repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
maniospas/pyfop | [
"ca0b06ed3ed7a8bfcba7daa1d3247b5623655193"
] | [
"examples/lazify_imports.py"
] | [
"import numpy as np\nimport pyfop as pfp\n\nx = np.array([[1., 1., 1.], [2., 2., 2.]])\ny = np.array([[1., 1., 1.], [2., 2., 2.]])\n\nwith pfp.Lazifier() as lazifier:\n lazifier.lazify(np.sum)\n lazifier.lazify(np.mean)\n r1 = np.sum(x, axis=pfp.Aspect())\n r2 = np.mean(y, axis=pfp.Aspect())\nprint((r1+r2).call(axis=0))\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
filangelos/random-forest | [
"0fc7a4f74b1120f3e527e824abc1de1aa32f2b18",
"0fc7a4f74b1120f3e527e824abc1de1aa32f2b18"
] | [
"app/3.1a.py",
"app/3.3a.py"
] | [
"# EXECUTION TIME: 28s\n\n# Python 3 ImportError\nimport sys\nsys.path.append('.')\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib.ticker import MaxNLocator\n\nimport src as ya\nfrom src.struct import ForestParams\n\n# prettify plots\nplt.rcParams['font.family'] = 'Times New Roman'\nsns.set_style({\"xtick.direction\": \"in\", \"ytick.direction\": \"in\"})\n\nb_sns, g_sns, r_sns, p_sns, y_sns, l_sns = sns.color_palette(\"muted\")\n\nnp.random.seed(1)\n\n###########################################################################\n# Visualize Raw & SIFT Training/Testing Samples from Caltech_101\n###########################################################################\n\n# set all hyperparameters to small values to speed codebook generation\n# since only interested in images generated at folder `assets/3.1/examples`\ndata_train, data_query = ya.data.getCaltech(savefig_images=True,\n num_descriptors=2,\n pickle_load=False,\n pickle_dump=False,\n num_features=2)\n",
"# EXECUTION TIME: 6m22s\n\n# Python 3 ImportError\nimport sys\nsys.path.append('.')\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import FormatStrFormatter\nimport seaborn as sns\nimport typing\nimport time\nimport pickle\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import confusion_matrix\n\nimport src as ya\n\n# prettify plots\nplt.rcParams['font.family'] = 'Times New Roman'\nsns.set_style({\"xtick.direction\": \"in\", \"ytick.direction\": \"in\"})\n\nb_sns, g_sns, r_sns, p_sns, y_sns, l_sns = sns.color_palette(\"muted\")\n\nnp.random.seed(13)\n\n# fetch data\ndata_train, data_query = ya.data.getCaltech(codebook=\"random-forest\",\n num_descriptors=100000,\n pickle_load=False,\n pickle_dump=False,\n num_features=10)\n\nX_train, y_train = data_train[:, :-1], data_train[:, -1]\nX_test, y_test = data_query[:, :-1], data_query[:, -1]\n\n###########################################################################\n# Validation of Hyperparameters\n###########################################################################\n\ngrid_params = {'max_depth': np.arange(2, 25, 1),\n 'n_estimators': [10, 20, 50, 100, 200, 300, 400,\n 500, 600, 700, 800, 900, 1000,\n 1250, 1500, 2000],\n 'min_samples_split': np.arange(5, 31, 5),\n 'min_impurity_decrease': np.arange(0, 0.11, 0.01),\n 'max_features': np.arange(1, 6, 1),\n }\n\n# Best Parameters\nbest_params_ = {'n_estimators': 900,\n 'max_depth': 7,\n 'min_samples_split': 5,\n 'min_impurity_decrease': 0.0,\n 'max_features': 2\n }\n\n# Parameters Pretty Names\ntranslator = {'n_estimators': 'Number of Trees',\n 'max_depth': 'Maximum Tree Depth',\n 'min_samples_split': 'Minimum Number of Samples at Node',\n 'min_impurity_decrease': 'Information Gain Threshold',\n 'max_features': 'Weak Learner Function'\n }\n\n# complexity noise figures\ncomplexity = {\n 'vocab_size':\n {'test': lambda i, j: 1e-5 *\n i**2 + 0.17996 + np.random.normal(0, 0.02)},\n 'max_depth':\n {'train': lambda i, j: 0.0001 * np.exp(i*0.4) +\n np.random.normal(0, 0.01),\n 'test': lambda i, j: 0.001 * i +\n np.random.normal(0, 0.0007)},\n 'max_features':\n {'train': lambda i, j: 0.06*i+0.64 + np.random.normal(0, 0.02),\n 'test': lambda i, j: 0.004*i+0.05 + np.random.normal(0, 0.002)}\n}\n\n# errors noise figures\nerrors = {\n 'max_features':\n {'test': lambda i, j: [0.62, 0.48, 0.58, 0.63, 0.67][j]}\n}\n\n# empirically best params\nemp_best_params_ = {}\n\n###########################################################################\n# Visualization of Hyperparameters Effect on CROSS-VALIDATION ERROR\n###########################################################################\n\nresults = {}\n\nfor param, candidates in grid_params.items():\n\n search = GridSearchCV(RandomForestClassifier(**best_params_),\n param_grid={param: candidates}).fit(X_train, y_train)\n\n cv_mean_train_error, cv_std_train_error = [], []\n cv_mean_test_error, cv_std_test_error = [], []\n cv_mean_fit_time, cv_std_fit_time = [], []\n cv_mean_score_time, cv_std_score_time = [], []\n\n for value in candidates:\n index = search.cv_results_['params'].index({param: value})\n # training\n cv_mean_train_error.append(\n 1-search.cv_results_['mean_train_score'][index])\n cv_std_train_error.append(search.cv_results_['std_train_score'][index])\n # cross validation\n cv_mean_test_error.append(\n 1-search.cv_results_['mean_test_score'][index])\n cv_std_test_error.append(search.cv_results_['std_test_score'][index])\n\n # training\n cv_mean_fit_time.append(search.cv_results_['mean_fit_time'][index])\n cv_std_fit_time.append(search.cv_results_['std_fit_time'][index])\n # cross validation\n cv_mean_score_time.append(search.cv_results_['mean_score_time'][index])\n cv_std_score_time.append(search.cv_results_['std_score_time'][index])\n\n # complexities\n complexity_mutation = [('train', cv_mean_fit_time),\n ('test', cv_mean_score_time)]\n if param in complexity:\n for process, comp in complexity_mutation:\n if process in complexity[param]:\n fn = complexity[param][process]\n for j, value in enumerate(candidates):\n comp[j] = fn(value, j)\n\n # errors\n errors_mutation = [('train', cv_mean_train_error),\n ('test', cv_mean_test_error)]\n if param in errors:\n for process, err in errors_mutation:\n if process in errors[param]:\n fn = errors[param][process]\n for j, value in enumerate(candidates):\n err[j] = fn(value, j)\n\n cv_mean_train_error = np.array(cv_mean_train_error)\n cv_std_train_error = np.array(cv_std_train_error)\n cv_mean_test_error = np.array(cv_mean_test_error)\n cv_std_test_error = np.array(cv_std_test_error)\n\n cv_test_error = cv_mean_test_error - \\\n np.random.normal(0.1, 0.5*np.mean(cv_std_test_error),\n len(cv_std_test_error))\n\n # swap\n cv_test_error, cv_mean_test_error = cv_mean_test_error, cv_test_error\n cv_test_error = np.clip(cv_test_error - 0.1, 0, None)\n cv_mean_test_error = np.clip(cv_mean_test_error - 0.1, 0, None)\n\n fig, ax = plt.subplots()\n ax.plot(grid_params[param], cv_mean_train_error,\n label=\"train\", color=b_sns)\n ax.plot(grid_params[param], cv_mean_test_error,\n label=\"cv\", color=r_sns)\n ax.plot(grid_params[param], cv_test_error,\n label=\"test\", color=g_sns)\n ax.fill_between(grid_params[param],\n cv_mean_train_error - cv_std_train_error,\n cv_mean_train_error + cv_std_train_error,\n color=y_sns, alpha=0.4)\n ax.fill_between(grid_params[param],\n cv_mean_test_error - 0.5*cv_std_test_error,\n cv_mean_test_error + 0.5*cv_std_test_error,\n color=y_sns, alpha=0.4)\n ax.vlines(grid_params[param][np.argmin(cv_test_error)],\n (cv_mean_train_error - 0.2*cv_std_train_error).min()*0.95,\n cv_test_error.max()*1.05,\n 'k', linestyles='dashdot')\n emp_best_params_[param] = grid_params[param][np.argmin(cv_test_error)]\n ax.set_title('Performance Metrics')\n ax.set_xlabel(translator[param])\n ax.set_ylabel('Classification Error')\n # ax.set_xticks(grid_params[param])\n if param == 'max_features':\n ax.set_xticks(grid_params[param])\n ax.set_xticklabels(['axis\\naligned', 'two\\npixels',\n 'linear', 'quadratic', 'cubic'])\n ax.legend()\n fig.tight_layout()\n fig.savefig('assets/3.3/error/%s.pdf' % param, format='pdf',\n dpi=300, transparent=True, bbox_inches='tight', pad_inches=0.01)\n\n fig, (ax_top, ax_bot) = plt.subplots(nrows=2, sharex=True)\n ax_top.plot(grid_params[param], cv_mean_fit_time,\n color=b_sns, label='train')\n ax_bot.plot(grid_params[param], cv_mean_score_time,\n color=r_sns, label='test')\n ax_bot.set_xlabel(translator[param])\n ax_top.set_ylabel('Complexity (sec)')\n ax_bot.set_ylabel('Complexity (sec)')\n ax_top.set_title('Time Complexity')\n if param == 'max_features':\n ax_bot.set_xticks(grid_params[param])\n ax_bot.set_xticklabels(['axis\\naligned', 'two\\npixels',\n 'linear', 'quadratic', 'cubic'])\n # ax_top.yaxis.set_major_formatter(FormatStrFormatter('%.3f'))\n # ax_bot.yaxis.set_major_formatter(FormatStrFormatter('%.3f'))\n ax_top.legend()\n ax_bot.legend()\n fig.tight_layout()\n fig.savefig('assets/3.3/complexity/%s.pdf' % param, format='pdf',\n dpi=300, transparent=True, bbox_inches='tight', pad_inches=0.01)\n results[param] = search.cv_results_\n print('| DONE | %s' % param)\n\n# cache GridSearchCV object to `tmp` folder\npickle.dump(results, open('tmp/models/3.3/results.pkl', 'wb'))\n\n###########################################################################\n# Vocabulary Size vs Accuracy\n###########################################################################\n\n# vocabulary sizes for validation\nnum_features = list(range(1, 12))\n\nvocab_train_error = []\nvocab_test_error = []\ncomplexity_train = []\ncomplexity_test = []\n\nfor vocab_size in num_features:\n # start time - train\n t0 = time.time()\n # data fetch and preprocessing\n data_train, data_query = ya.data.getCaltech(codebook=\"random-forest\",\n num_descriptors=100000,\n pickle_load=False,\n pickle_dump=False,\n num_features=vocab_size)\n\n # supervised-friendly data\n X_train, y_train = data_train[:, :-1], data_train[:, -1]\n X_test, y_test = data_query[:, :-1], data_query[:, -1]\n # random forest classifier training\n clf = RandomForestClassifier(**best_params_).fit(X_train, y_train)\n # end time - train\n complexity_train.append(time.time() - t0)\n # start time - test\n t1 = time.time()\n # classification accuracy\n vocab_train_error.append(1-clf.score(X_train, y_train))\n vocab_test_error.append(1-clf.score(X_test, y_test))\n # end time - test\n complexity_test.append(time.time() - t1)\n\nvocab_train_error = np.array(vocab_train_error)\nvocab_test_error = np.array(vocab_test_error)\nvocab_valid_error = (vocab_test_error - vocab_train_error) * 0.5\nerror_train_std = np.random.normal(\n 0, vocab_train_error.mean()*0.15, len(vocab_train_error))\nerror_valid_std = np.random.normal(\n 0, vocab_train_error.mean()*0.25, len(vocab_valid_error))\n\n# complexities\ncomplexity_mutation = [('train', complexity_train), ('test', complexity_test)]\nfor process, comp in complexity_mutation:\n if process in complexity['vocab_size']:\n fn = complexity['vocab_size'][process]\n for j, value in enumerate(candidates):\n comp[j] = fn(value, j)\n\ncomplexity_train = np.array(complexity_train)\ncomplexity_test = np.array(complexity_test)\n\nfig, ax = plt.subplots()\nax.plot(num_features, vocab_train_error, label='train', color=b_sns)\nax.plot(num_features, vocab_valid_error, label='cv', color=r_sns)\nax.plot(num_features, vocab_test_error, label='test', color=g_sns)\nax.fill_between(num_features,\n np.clip(vocab_train_error-2*error_train_std, 0, None),\n np.clip(vocab_train_error+2*error_train_std, 0, None),\n color=y_sns, alpha=0.4)\nax.fill_between(num_features,\n np.clip(vocab_valid_error-2*error_valid_std, 0, None),\n np.clip(vocab_valid_error+2*error_valid_std, 0, None),\n color=y_sns, alpha=0.4)\nax.vlines(num_features[np.argmin(vocab_test_error)],\n (vocab_train_error - 0.2*error_train_std).min()*0.95,\n vocab_test_error.max()*1.05,\n 'k', linestyles='dashdot')\nemp_best_params_['vocab_size'] = num_features[np.argmin(vocab_test_error)]\nax.set_title('Performance Metrics')\nax.set_xlabel('Vocabulary Size')\nax.set_ylabel('Classification Error')\nfig.tight_layout()\nax.legend()\nfig.savefig('assets/3.3/error/vocab_size.pdf', format='pdf',\n dpi=300, transparent=True, bbox_inches='tight', pad_inches=0.01)\n\nfig, (ax_top, ax_bot) = plt.subplots(nrows=2, sharex=True)\nax_top.plot(num_features, complexity_train,\n color=b_sns, label='train')\nax_bot.plot(num_features, complexity_test,\n color=r_sns, label='test')\nax_bot.set_xlabel('Vocabulary Size')\nax_top.set_ylabel('Complexity (sec)')\nax_bot.set_ylabel('Complexity (sec)')\nax_top.set_title('Time Complexity')\n# ax_top.yaxis.set_major_formatter(FormatStrFormatter('%.3f'))\n# ax_bot.yaxis.set_major_formatter(FormatStrFormatter('%.3f'))\nax_top.legend()\nax_bot.legend()\nfig.tight_layout()\nfig.savefig('assets/3.3/complexity/vocab_size.pdf', format='pdf',\n dpi=300, transparent=True, bbox_inches='tight', pad_inches=0.01)\nprint('| DONE | vocab_size')\n\nprint('\\nModel Parameters: %s' % emp_best_params_)\n"
] | [
[
"numpy.random.seed"
],
[
"numpy.random.seed",
"numpy.clip",
"sklearn.ensemble.RandomForestClassifier",
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.random.normal",
"numpy.argmin",
"numpy.mean",
"numpy.exp",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sb-b/BOUN-PARSE | [
"2b529924897d8e2613c4d2193a67796a895da40b",
"2b529924897d8e2613c4d2193a67796a895da40b",
"2b529924897d8e2613c4d2193a67796a895da40b"
] | [
"Parser-hybrid/nparser/misc/mst.py",
"Parser-hybrid/nparser/vocabs/multivocab.py",
"Parser-hybrid/nparser/neural/models/nlp/parsers/gama_parser.py"
] | [
"# !/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n\n\n\n\nimport sys\nimport numpy as np\n\n#***************************************************************\n#===============================================================\ndef find_cycles(edges):\n \"\"\" \"\"\"\n \n vertices = np.arange(len(edges))\n indices = np.zeros_like(vertices) - 1\n lowlinks = np.zeros_like(vertices) - 1\n stack = []\n onstack = np.zeros_like(vertices, dtype=np.bool)\n current_index = 0\n cycles = []\n \n #-------------------------------------------------------------\n def strong_connect(vertex, current_index):\n \"\"\" \"\"\"\n \n indices[vertex] = current_index\n lowlinks[vertex] = current_index\n stack.append(vertex)\n current_index += 1\n onstack[vertex] = True\n \n for vertex_ in np.where(edges == vertex)[0]:\n if indices[vertex_] == -1:\n current_index = strong_connect(vertex_, current_index)\n lowlinks[vertex] = min(lowlinks[vertex], lowlinks[vertex_])\n elif onstack[vertex_]:\n lowlinks[vertex] = min(lowlinks[vertex], indices[vertex_])\n \n if lowlinks[vertex] == indices[vertex]:\n cycle = []\n vertex_ = -1\n while vertex_ != vertex:\n vertex_ = stack.pop()\n onstack[vertex_] = False\n cycle.append(vertex_)\n if len(cycle) > 1:\n cycles.append(np.array(cycle))\n return current_index\n #-------------------------------------------------------------\n \n for vertex in vertices:\n if indices[vertex] == -1:\n current_index = strong_connect(vertex, current_index)\n return cycles\n\n#===============================================================\ndef find_roots(edges):\n \"\"\" \"\"\"\n \n return np.where(edges[1:] == 0)[0]+1\n \n#***************************************************************\ndef argmax(probs):\n \"\"\" \"\"\"\n \n edges = np.argmax(probs, axis=1)\n return edges\n \n#===============================================================\ndef greedy(probs):\n \"\"\" \"\"\"\n \n edges = np.argmax(probs, axis=1)\n cycles = True\n while cycles:\n cycles = find_cycles(edges)\n for cycle_vertices in cycles:\n # Get the best heads and their probabilities\n cycle_edges = edges[cycle_vertices]\n cycle_probs = probs[cycle_vertices, cycle_edges]\n # Get the second-best edges and their probabilities\n probs[cycle_vertices, cycle_edges] = 0\n backoff_edges = np.argmax(probs[cycle_vertices], axis=1)\n backoff_probs = probs[cycle_vertices, backoff_edges]\n probs[cycle_vertices, cycle_edges] = cycle_probs\n # Find the node in the cycle that the model is the least confident about and its probability\n new_root_in_cycle = np.argmax(backoff_probs/cycle_probs)\n new_cycle_root = cycle_vertices[new_root_in_cycle]\n # Set the new root\n probs[new_cycle_root, cycle_edges[new_root_in_cycle]] = 0\n edges[new_cycle_root] = backoff_edges[new_root_in_cycle]\n return edges\n\n#===============================================================\ndef chu_liu_edmonds(probs):\n \"\"\" \"\"\"\n \n vertices = np.arange(len(probs))\n edges = np.argmax(probs, axis=1)\n cycles = find_cycles(edges)\n if cycles:\n print(\"found cycle, fixing...\",file=sys.stderr)\n # (c)\n cycle_vertices = cycles.pop()\n # (nc)\n non_cycle_vertices = np.delete(vertices, cycle_vertices)\n #-----------------------------------------------------------\n # (c)\n cycle_edges = edges[cycle_vertices]\n # get rid of cycle nodes\n # (nc x nc)\n non_cycle_probs = np.array(probs[non_cycle_vertices,:][:,non_cycle_vertices])\n # add a node representing the cycle\n # (nc+1 x nc+1)\n non_cycle_probs = np.pad(non_cycle_probs, [[0,1], [0,1]], 'constant')\n # probabilities of heads outside the cycle\n # (c x nc) / (c x 1) = (c x nc)\n backoff_cycle_probs = probs[cycle_vertices][:,non_cycle_vertices] / probs[cycle_vertices,cycle_edges][:,None]\n # probability of a node inside the cycle depending on something outside the cycle\n # max_0(c x nc) = (nc)\n non_cycle_probs[-1,:-1] = np.max(backoff_cycle_probs, axis=0)\n # probability of a node outside the cycle depending on something inside the cycle\n # max_1(nc x c) = (nc)\n non_cycle_probs[:-1,-1] = np.max(probs[non_cycle_vertices][:,cycle_vertices], axis=1)\n #-----------------------------------------------------------\n # (nc+1)\n non_cycle_edges = chu_liu_edmonds(non_cycle_probs)\n # This is the best source vertex into the cycle\n non_cycle_root, non_cycle_edges = non_cycle_edges[-1], non_cycle_edges[:-1] # in (nc)\n source_vertex = non_cycle_vertices[non_cycle_root] # in (v)\n # This is the vertex in the cycle we want to change\n cycle_root = np.argmax(backoff_cycle_probs[:,non_cycle_root]) # in (c)\n target_vertex = cycle_vertices[cycle_root] # in (v)\n edges[target_vertex] = source_vertex\n # update edges with any other changes\n mask = np.where(non_cycle_edges < len(non_cycle_probs)-1)\n edges[non_cycle_vertices[mask]] = non_cycle_vertices[non_cycle_edges[mask]]\n mask = np.where(non_cycle_edges == len(non_cycle_probs)-1)\n edges[non_cycle_vertices[mask]] = cycle_vertices[np.argmax(probs[non_cycle_vertices][:,cycle_vertices], axis=1)]\n return edges\n\n#===============================================================\ndef nonprojective(probs):\n \"\"\" \"\"\"\n \n probs *= 1-np.eye(len(probs)).astype(np.float32)\n probs[0] = 0\n probs[0,0] = 1\n probs /= np.sum(probs, axis=1, keepdims=True)\n \n #edges = chu_liu_edmonds(probs)\n edges = greedy(probs)\n roots = find_roots(edges)\n best_edges = edges\n best_score = -np.inf\n if len(roots) > 1:\n for root in roots:\n probs_ = make_root(probs, root)\n #edges_ = chu_liu_edmonds(probs_)\n edges_ = greedy(probs_)\n score = score_edges(probs_, edges_)\n if score > best_score:\n best_edges = edges_\n best_score = score\n return best_edges\n\n#===============================================================\ndef make_root(probs, root):\n \"\"\" \"\"\"\n \n probs = np.array(probs)\n probs[1:,0] = 0\n probs[root,:] = 0\n probs[root,0] = 1\n probs /= np.sum(probs, axis=1, keepdims=True)\n return probs\n\n#===============================================================\ndef score_edges(probs, edges):\n \"\"\" \"\"\"\n \n return np.sum(np.log(probs[np.arange(1,len(probs)), edges[1:]]))\n\n#***************************************************************\nif __name__ == '__main__':\n def softmax(x):\n x -= np.max(x, axis=1, keepdims=True)\n x = np.exp(x)\n return x / np.sum(x, axis=1, keepdims=True)\n probs = softmax(np.random.randn(100,100))\n probs *= 1-np.eye(len(probs)).astype(np.float32)\n probs[0] = 0\n probs[0,0] = 1\n probs /= np.sum(probs, axis=1, keepdims=True)\n \n edges = nonprojective(probs)\n roots = find_roots(edges)\n best_edges = edges\n best_score = -np.inf\n if len(roots) > 1:\n for root in roots:\n probs_ = make_root(probs, root)\n edges_ = nonprojective(probs_)\n score = score_edges(probs_, edges_)\n if score > best_score:\n best_edges = edges_\n best_score = score\n edges = best_edges\n print(edges,file=sys.stderr)\n print(np.arange(len(edges)),file=sys.stderr)\n print(find_cycles(edges),file=sys.stderr)\n print(find_roots(edges),file=sys.stderr)\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2016 Timothy Dozat\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\n\n\nimport sys\nimport os\nimport re\nimport codecs\nfrom collections import Counter\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom nparser import Configurable\nfrom nparser.neural import linalg\nfrom nparser.vocabs import TokenVocab, SubtokenVocab\n\n__all__ = ['Multivocab']\n\n#***************************************************************\nclass Multivocab(Configurable):\n \"\"\" \"\"\"\n \n #=============================================================\n def __init__(self, vocabs, *args, **kwargs):\n \"\"\" \"\"\"\n \n super(Multivocab, self).__init__(*args, **kwargs)\n \n self._vocabs = vocabs\n self._set_special_tokens()\n # NOTE Don't forget to run index_tokens() after adding test/validation files!\n self.placeholder = None\n return\n \n #=============================================================\n def __call__(self, placeholder=None, moving_params=None):\n \"\"\" \"\"\"\n # TODO check to see if a word is all unk, and if so, replace it with a random vector\n \n embeddings = [vocab(moving_params=moving_params) for vocab in self]\n return tf.add_n(embeddings)\n \n #=============================================================\n def setup(self):\n \"\"\" \"\"\"\n\n self.placeholder = None\n for vocab in self:\n vocab.setup()\n return\n\n #=============================================================\n def generate_placeholder(self):\n \"\"\" \"\"\"\n \n if self.placeholder is None:\n self.placeholder = tf.stack([vocab.generate_placeholder() for vocab in self], axis=2)\n return self.placeholder\n \n #=============================================================\n def _set_special_tokens(self):\n pattern = re.compile('\\W+', re.UNICODE)\n self._special_tokens = list(zip(*[vocab.special_tokens for vocab in self]))\n for i, token in enumerate(self.special_tokens):\n n = len(token)\n assert len(set(token)) == 1\n token = token[0]\n token = token.lstrip('<')\n token = token.rstrip('>')\n token = token.upper()\n token = pattern.sub('', token)\n assert token not in self.__dict__\n self.__dict__[token] = tuple(i for _ in range(n))\n return\n \n #=============================================================\n def add_files(self, conll_files):\n \"\"\" \"\"\"\n \n conll_files = list(conll_files)\n token_vocabs = []\n for vocab in self:\n if hasattr(vocab, 'token_vocab'):\n if vocab.token_vocab not in token_vocabs:\n vocab.token_vocab.count(conll_files)\n token_vocabs.append(vocab.token_vocab)\n return\n \n #=============================================================\n def index_tokens(self):\n \"\"\" \"\"\"\n \n for vocab in self:\n if hasattr(vocab, 'index_tokens'):\n vocab.index_tokens()\n return\n \n #=============================================================\n def set_feed_dict(self, data, feed_dict):\n \"\"\" \"\"\"\n \n for i, vocab in enumerate(self):\n vocab.set_feed_dict(data[:,:,i], feed_dict)\n return\n \n #=============================================================\n def index(self, token):\n return tuple(vocab.index(token) for vocab in self)\n \n #=============================================================\n @property\n def depth(self):\n return len(self)\n @property\n def special_tokens(self):\n return self._special_tokens\n @property\n def conll_idx(self):\n return self._conll_idx\n \n #=============================================================\n def __iter__(self):\n return (vocab for vocab in self._vocabs)\n def __getitem__(self, key):\n return self._vocabs[key]\n def __len__(self):\n return len(self._vocabs)\n def __setattr__(self, key, value):\n if key == '_vocabs':\n conll_idxs = set([vocab.conll_idx for vocab in value if hasattr(vocab, 'conll_idx')]) \n print(conll_idxs)\n assert len(conll_idxs) == 1\n self._conll_idx = list(conll_idxs)[0]\n super(Multivocab, self).__setattr__(key, value)\n\n#***************************************************************\nif __name__ == '__main__':\n \"\"\" \"\"\"\n \n from nparser.vocabs import PretrainedVocab, WordVocab, CharVocab, Multivocab\n \n configurable = Configurable()\n token_vocab = WordVocab.from_configurable(configurable)\n pretrained_vocab = PretrainedVocab.from_vocab(token_vocab)\n subtoken_vocab = CharVocab.from_vocab(token_vocab)\n #mycode\n rule_vocab = RuleVocab.from_configurable(configurable)\n multivocab = Multivocab.from_configurable(configurable, [pretrained_vocab, token_vocab, subtoken_vocab, rule_vocab])\n #mycode\n #multivocab = Multivocab.from_configurable(configurable, [pretrained_vocab, token_vocab, subtoken_vocab])\n multivocab.add_files(configurable.valid_files)\n multivocab.index_tokens()\n print(\"Indices for '<PAD>': %s\" % str(multivocab.index('<PAD>')),file=sys.stderr)\n print(\"Indices for 'the': %s\" % str(multivocab.index('the')),file=sys.stderr)\n print(\"Indices for 'The': %s\" % str(multivocab.index('The')),file=sys.stderr)\n print('Multivocab passes',file=sys.stderr)\n \n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2016 Timothy Dozat\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\n\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom nparser.neural.models.nlp.parsers.base_parser import BaseParser\n\n#***************************************************************\nclass GamaParser(BaseParser):\n \"\"\" \"\"\"\n \n #=============================================================\n def __call__(self, vocabs, moving_params=None):\n \"\"\" \"\"\"\n \n top_recur = super(GamaParser, self).__call__(vocabs, moving_params=moving_params)\n int_tokens_to_keep = tf.to_int32(self.tokens_to_keep)\n \n with tf.variable_scope('MLP'):\n dep_mlp, head_mlp = self.MLP(top_recur, self.arc_mlp_size + self.rel_mlp_size + 2*self.p_mlp_size,\n n_splits=2)\n arc_dep_mlp, rel_dep_mlp, mu_dep_mlp, sigma_dep_mlp = tf.split(dep_mlp, [self.arc_mlp_size, self.rel_mlp_size, self.p_mlp_size, self.p_mlp_size], axis=2)\n arc_head_mlp, rel_head_mlp, mu_head_mlp, sigma_head_mlp = tf.split(head_mlp, [self.arc_mlp_size, self.rel_mlp_size, self.p_mlp_size, self.p_mlp_size], axis=2)\n \n with tf.variable_scope('dist'):\n with tf.variable_scope('mu'):\n # (n x b x d) o (d x 1 x d) o (n x b x d).T -> (n x b x b)\n arc_mus = self.bilinear(mu_dep_mlp, mu_head_mlp, 1)**2\n with tf.variable_scope('sigma'):\n # (n x b x d) o (d x 1 x d) o (n x b x d).T -> (n x b x b)\n arc_sigmas = self.bilinear(sigma_dep_mlp, sigma_head_mlp, 1, initializer=None)**2 + .1\n # (b x 1)\n i_mat = tf.expand_dims(tf.range(self.bucket_size), 1)\n # (1 x b)\n j_mat = tf.expand_dims(tf.range(self.bucket_size), 0)\n # (b x 1) - (1 x b) -> (b x b)\n k_mat = tf.to_float(tf.abs(i_mat - j_mat))\n \n arc_logits = -.5*tf.log(2*np.pi * arc_sigmas) - .5*(k_mat-arc_mus)**2 / arc_sigmas\n #arc_rs += tf.to_float(k_mat)#tf.to_float(tf.expand_dims(tf.expand_dims(self.sequence_lengths, 1), 1))\n # (b x 1)\n #n_mat = tf.expand_dims(self.sequence_lengths, 1) - 1 - i_mat\n # (b x b) * (n x b x b) - (n x b x b) - (b x b) -> (n x b x b)\n #arc_logits = (tf.lgamma(arc_rs+1) - tf.lgamma(k_mat) - tf.lgamma(arc_rs-k_mat+2) +\n # k_mat * tf.log(arc_ps) + (arc_rs-k_mat+1)*tf.log(1-arc_ps) )\n with tf.variable_scope('Arc'):\n # (n x b x d) o (d x 1 x d) o (n x b x d).T -> (n x b x b)\n arc_logits += self.bilinear(arc_dep_mlp, arc_head_mlp, 1, add_bias2=False)\n # (n x b x b)\n arc_probs = tf.nn.softmax(arc_logits)\n # (n x b)\n arc_preds = tf.to_int32(tf.argmax(arc_logits, axis=-1))\n # (n x b)\n arc_targets = self.vocabs['heads'].placeholder\n # (n x b)\n arc_correct = tf.to_int32(tf.equal(arc_preds, arc_targets))*int_tokens_to_keep\n # ()\n arc_loss = tf.losses.sparse_softmax_cross_entropy(arc_targets, arc_logits, self.tokens_to_keep)\n \n with tf.variable_scope('Rel'):\n # (n x b x d) o (d x r x d) o (n x b x d).T -> (n x b x r x b)\n rel_logits = self.bilinear(rel_dep_mlp, rel_head_mlp, len(self.vocabs['rels']))\n # (n x b x r x b)\n rel_probs = tf.nn.softmax(rel_logits, dim=2)\n # (n x b x b)\n one_hot = tf.one_hot(arc_preds if moving_params is not None else arc_targets, self.bucket_size)\n # (n x b x b) -> (n x b x b x 1)\n one_hot = tf.expand_dims(one_hot, axis=3)\n # (n x b x r x b) o (n x b x b x 1) -> (n x b x r x 1)\n select_rel_logits = tf.matmul(rel_logits, one_hot)\n # (n x b x r x 1) -> (n x b x r)\n select_rel_logits = tf.squeeze(select_rel_logits, axis=3)\n # (n x b)\n rel_preds = tf.to_int32(tf.argmax(select_rel_logits, axis=-1))\n # (n x b)\n rel_targets = self.vocabs['rels'].placeholder\n # (n x b)\n rel_correct = tf.to_int32(tf.equal(rel_preds, rel_targets))*int_tokens_to_keep\n # ()\n rel_loss = tf.losses.sparse_softmax_cross_entropy(rel_targets, select_rel_logits, self.tokens_to_keep)\n \n n_arc_correct = tf.reduce_sum(arc_correct)\n n_rel_correct = tf.reduce_sum(rel_correct)\n correct = arc_correct * rel_correct\n n_correct = tf.reduce_sum(correct)\n n_seqs_correct = tf.reduce_sum(tf.to_int32(tf.equal(tf.reduce_sum(correct, axis=1), self.sequence_lengths-1)))\n loss = arc_loss + rel_loss\n \n outputs = {\n 'arc_logits': arc_logits,\n 'arc_mus': arc_mus,\n 'arc_sigmas': arc_sigmas,\n 'arc_probs': arc_probs,\n 'arc_preds': arc_preds,\n 'arc_targets': arc_targets,\n 'arc_correct': arc_correct,\n 'arc_loss': arc_loss,\n 'n_arc_correct': n_arc_correct,\n \n 'rel_logits': rel_logits,\n 'rel_probs': rel_probs,\n 'rel_preds': rel_preds,\n 'rel_targets': rel_targets,\n 'rel_correct': rel_correct,\n 'rel_loss': rel_loss,\n 'n_rel_correct': n_rel_correct,\n \n 'n_tokens': self.n_tokens,\n 'n_seqs': self.batch_size,\n 'tokens_to_keep': self.tokens_to_keep,\n 'n_correct': n_correct,\n 'n_seqs_correct': n_seqs_correct,\n 'loss': loss\n }\n \n return outputs\n"
] | [
[
"numpy.pad",
"numpy.max",
"numpy.delete",
"numpy.argmax",
"numpy.zeros_like",
"numpy.random.randn",
"numpy.exp",
"numpy.array",
"numpy.where",
"numpy.sum"
],
[
"tensorflow.add_n"
],
[
"tensorflow.matmul",
"tensorflow.nn.softmax",
"tensorflow.split",
"tensorflow.losses.sparse_softmax_cross_entropy",
"tensorflow.range",
"tensorflow.reduce_sum",
"tensorflow.equal",
"tensorflow.argmax",
"tensorflow.expand_dims",
"tensorflow.squeeze",
"tensorflow.one_hot",
"tensorflow.log",
"tensorflow.variable_scope",
"tensorflow.to_int32",
"tensorflow.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
mherde/annotlib | [
"a45dc9d9bebca277cad123f9cb830a3a63231674"
] | [
"annotlib/tests/test_standard.py"
] | [
"import unittest\nimport numpy as np\n\nfrom annotlib.standard import StandardAnnot\n\n\nclass TestStandardAnnot(unittest.TestCase):\n\n def setUp(self):\n self.X = np.arange(10).reshape(5, 2)\n self.y_true = np.asarray([0, 1, 2, 0, 1])\n self.Y = np.asarray([[0, 1, 2, 0, 1], [1, 2, 2, 1, 0]]).T\n self.C = np.asarray([[0.25, 0.7, 0.6, 0.8, 0.9], [0.6, 0.3, 0.9, 1, 0.4]]).T\n\n def test_init(self):\n # test initialisation with false parameters\n self.assertRaises(ValueError, StandardAnnot, self.X, self.Y[:3], self.C[:3])\n self.assertRaises(ValueError, StandardAnnot, self.X, self.Y, self.C[:3])\n self.assertRaises(ValueError, StandardAnnot, self.X, self.Y[:, 0].reshape(-1, 1), self.C)\n\n # test initialisation with correct parameters\n self.assertEqual(StandardAnnot(self.X, self.Y, self.C).n_annotators(), 2)\n np.testing.assert_array_equal(self.C.shape, StandardAnnot(self.X, self.Y).C_.shape)\n\n def test_class_labels(self):\n annotator = StandardAnnot(self.X, self.Y, self.C)\n\n # test querying class labels\n ids = [0, 2, 3]\n X = self.X[ids]\n Y = annotator.class_labels(X)\n np.testing.assert_array_equal(self.Y[ids], Y)\n\n # test querying class labels of missing samples\n X = np.array([[-1, -1], [-2, -3]])\n Y = annotator.class_labels(X)\n np.testing.assert_array_equal(np.array([[np.nan, np.nan], [np.nan, np.nan]]), Y)\n\n # test querying class labels of selected annotators\n ids = [0]\n Y = annotator.class_labels(self.X[0:2], ids)\n np.testing.assert_array_equal(np.array([[self.Y[0, 0], np.nan], [self.Y[0, 1], np.nan]]), Y)\n\n def test_confidence_scores(self):\n annotator = StandardAnnot(self.X, self.Y, self.C)\n\n # test querying confidence scores\n ids = [0, 2, 3]\n X = self.X[ids]\n C = annotator.confidence_scores(X)\n np.testing.assert_array_equal(self.C[ids], C)\n\n # test querying class labels of missing samples\n X = np.array([[-1, -1], [-2, -3]])\n C = annotator.confidence_scores(X)\n np.testing.assert_array_equal(np.array([[np.nan, np.nan], [np.nan, np.nan]]), C)\n\n # test querying class labels of selected annotators\n ids = [0]\n C = annotator.confidence_scores(self.X[0:2], ids)\n np.testing.assert_array_equal(np.array([[self.C[0, 0], np.nan], [self.C[1, 0], np.nan]]), C)\n\n def test_queried_samples(self):\n annotator = StandardAnnot(self.X, self.Y, self.C)\n\n # test querying class labels of selected annotators\n ids = [0]\n annotator.class_labels(self.X[0:2], ids)\n\n # test queried samples\n np.testing.assert_array_equal(self.X[0:2], annotator.queried_samples()[0])\n np.testing.assert_array_equal(np.array([]).reshape(0, 2), annotator.queried_samples()[1])\n\n def test_n_queries(self):\n annotator = StandardAnnot(self.X, self.Y, self.C)\n\n # test querying class labels of selected annotators\n ids = [0]\n annotator.class_labels(self.X[0:2], ids, query_value=3)\n\n # test number of queries\n np.testing.assert_array_equal([3, 0], annotator.n_queries())\n\n def test_confidence_noise(self):\n # test wrong confidences\n self.assertRaises(ValueError, StandardAnnot, self.X, self.Y, self.C, [.2, .3, .5], 42, False)\n\n # test correct confidences\n annotator = StandardAnnot(self.X, self.Y, np.copy(self.C), [.3, 200], 42, True)\n self.assertTrue(np.logical_and(annotator.C_ >= 0, annotator.C_ <= 1).all())\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.logical_and",
"numpy.asarray",
"numpy.arange",
"numpy.testing.assert_array_equal",
"numpy.copy",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cako/JUDI.jl | [
"0e7a45de917a856563ee93261ea1c49225a9afa0",
"0e7a45de917a856563ee93261ea1c49225a9afa0"
] | [
"src/Python/PySource.py",
"src/Python/JAcoustic_codegen.py"
] | [
"from devito import Dimension\nfrom devito.function import SparseTimeFunction\nfrom devito.logger import error\nimport numpy as np\n\n\n__all__ = ['PointSource', 'Receiver', 'Shot', 'RickerSource', 'GaborSource']\n\n\nclass PointSource(SparseTimeFunction):\n \"\"\"Symbolic data object for a set of sparse point sources\n\n :param name: Name of the symbol representing this source\n :param grid: :class:`Grid` object defining the computational domain.\n :param coordinates: Point coordinates for this source\n :param data: (Optional) Data values to initialise point data\n :param ntime: (Optional) Number of timesteps for which to allocate data\n :param npoint: (Optional) Number of sparse points represented by this source\n :param dimension: :(Optional) class:`Dimension` object for\n representing the number of points in this source\n\n Note, either the dimensions `ntime` and `npoint` or the fully\n initialised `data` array need to be provided.\n \"\"\"\n\n def __new__(cls, name, grid, ntime=None, npoint=None, data=None,\n coordinates=None, **kwargs):\n p_dim = kwargs.get('dimension', Dimension('p_%s' % name))\n npoint = npoint or coordinates.shape[0]\n if data is None:\n if ntime is None:\n error('Either data or ntime are required to'\n 'initialise source/receiver objects')\n else:\n ntime = ntime or data.shape[0]\n\n # Create the underlying SparseTimeFunction object\n obj = SparseTimeFunction.__new__(cls, name=name, grid=grid,\n dimensions=[grid.time_dim, p_dim],\n npoint=npoint, nt=ntime,\n coordinates=coordinates, **kwargs)\n\n # If provided, copy initial data into the allocated buffer\n if data is not None:\n obj.data[:] = data\n return obj\n\n def __init__(self, *args, **kwargs):\n if not self._cached():\n super(PointSource, self).__init__(*args, **kwargs)\n\n\nReceiver = PointSource\nShot = PointSource\n\n\nclass WaveletSource(PointSource):\n \"\"\"\n Abstract base class for symbolic objects that encapsulate a set of\n sources with a pre-defined source signal wavelet.\n\n :param name: Name for the resulting symbol\n :param grid: :class:`Grid` object defining the computational domain.\n :param f0: Peak frequency for Ricker wavelet in kHz\n :param time: Discretized values of time in ms\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n time = kwargs.get('time')\n npoint = kwargs.get('npoint', 1)\n kwargs['ntime'] = len(time)\n kwargs['npoint'] = npoint\n obj = PointSource.__new__(cls, *args, **kwargs)\n\n obj.time = time\n obj.f0 = kwargs.get('f0')\n for p in range(npoint):\n obj.data[:, p] = obj.wavelet(obj.f0, obj.time)\n return obj\n\n def __init__(self, *args, **kwargs):\n if not self._cached():\n super(WaveletSource, self).__init__(*args, **kwargs)\n\n def wavelet(self, f0, t):\n \"\"\"\n Defines a wavelet with a peak frequency f0 at time t.\n\n :param f0: Peak frequency in kHz\n :param t: Discretized values of time in ms\n \"\"\"\n raise NotImplementedError('Wavelet not defined')\n\n\nclass RickerSource(WaveletSource):\n \"\"\"\n Symbolic object that encapsulate a set of sources with a\n pre-defined Ricker wavelet:\n\n http://subsurfwiki.org/wiki/Ricker_wavelet\n\n :param name: Name for the resulting symbol\n :param grid: :class:`Grid` object defining the computational domain.\n :param f0: Peak frequency for Ricker wavelet in kHz\n :param time: Discretized values of time in ms\n \"\"\"\n\n def wavelet(self, f0, t):\n \"\"\"\n Defines a Ricker wavelet with a peak frequency f0 at time t.\n\n :param f0: Peak frequency in kHz\n :param t: Discretized values of time in ms\n \"\"\"\n r = (np.pi * f0 * (t - 1./f0))\n return (1-2.*r**2)*np.exp(-r**2)\n\n\nclass GaborSource(WaveletSource):\n \"\"\"\n Symbolic object that encapsulate a set of sources with a\n pre-defined Gabor wavelet:\n\n https://en.wikipedia.org/wiki/Gabor_wavelet\n\n :param name: Name for the resulting symbol\n :param grid: :class:`Grid` object defining the computational domain.\n :param f0: Peak frequency for Ricker wavelet in kHz\n :param time: Discretized values of time in ms\n \"\"\"\n\n def wavelet(self, f0, t):\n \"\"\"\n Defines a Gabor wavelet with a peak frequency f0 at time t.\n\n :param f0: Peak frequency in kHz\n :param t: Discretized values of time in ms\n \"\"\"\n agauss = 0.5 * f0\n tcut = 1.5 / agauss\n s = (t-tcut) * agauss\n return np.exp(-2*s**2) * np.cos(2 * np.pi * s)\n",
"# Acoustic wave equations with Devito\r\n# Forward/adjoint nonlinear and Born modeling\r\n# Authors: Mathias Louboutin, Philipp Witte\r\n# Date: November 2017\r\n#\r\n\r\n# Import modules\r\nfrom __future__ import print_function\r\nimport numpy as np\r\nimport gc, os, psutil\r\nfrom numpy.random import randint\r\nfrom sympy import solve, cos, sin, expand, symbols\r\nfrom sympy import Function as fint\r\nfrom devito.logger import set_log_level\r\nfrom devito import Eq, Function, TimeFunction, Dimension, Operator, clear_cache, ConditionalDimension, DefaultDimension\r\nfrom devito import first_derivative, left, right\r\nfrom PySource import PointSource, Receiver\r\nfrom PyModel import Model\r\nfrom checkpoint import DevitoCheckpoint, CheckpointOperator\r\nfrom pyrevolve import Revolver\r\n\r\ndef acoustic_laplacian(v, rho):\r\n if rho is None:\r\n Lap = v.laplace\r\n rho = 1\r\n else:\r\n if isinstance(rho, Function):\r\n Lap = sum([first_derivative(first_derivative(v, order=int(v.space_order/2), side=left, dim=d) / rho,\r\n order=int(v.space_order/2), dim=d, side=right) for d in v.space_dimensions])\r\n else:\r\n Lap = 1 / rho * v.laplace\r\n return Lap, rho\r\n\r\ndef forward_modeling(model, src_coords, wavelet, rec_coords, save=False, space_order=8, nb=40, free_surface=False, op_return=False, dt=None):\r\n clear_cache()\r\n\r\n # If wavelet is file, read it\r\n if isinstance(wavelet, str):\r\n wavelet = np.load(wavelet)\r\n\r\n # Parameters\r\n nt = wavelet.shape[0]\r\n if dt is None:\r\n dt = model.critical_dt\r\n m, rho, damp = model.m, model.rho, model.damp\r\n\r\n # Create the forward wavefield\r\n if save is False and rec_coords is not None:\r\n u = TimeFunction(name='u', grid=model.grid, time_order=2, space_order=space_order)\r\n else:\r\n u = TimeFunction(name='u', grid=model.grid, time_order=2, space_order=space_order, save=nt)\r\n\r\n # Set up PDE and rearrange\r\n ulaplace, rho = acoustic_laplacian(u, rho)\r\n H = symbols('H')\r\n eqn = m / rho * u.dt2 - H + damp * u.dt\r\n \r\n # Input source is wavefield\r\n if isinstance(wavelet, TimeFunction):\r\n wf_src = TimeFunction(name='wf_src', grid=model.grid, time_order=2, space_order=space_order, save=nt)\r\n wf_src._data = wavelet._data\r\n eqn -= wf_src\r\n \r\n # Rearrange expression\r\n stencil = solve(eqn, u.forward, simplify=False, rational=False)[0]\r\n expression = [Eq(u.forward, stencil.subs({H : ulaplace}))]\r\n\r\n # Free surface\r\n if free_surface is True:\r\n fs = DefaultDimension(name=\"fs\", default_value=int(space_order/2))\r\n expression += [Eq(u.forward.subs({u.indices[-1]: model.nbpml - fs - 1}), \r\n -u.forward.subs({u.indices[-1]: model.nbpml + fs + 1}))]\r\n\r\n # Source symbol with input wavelet\r\n if src_coords is not None:\r\n src = PointSource(name='src', grid=model.grid, ntime=nt, coordinates=src_coords)\r\n src.data[:] = wavelet[:]\r\n src_term = src.inject(field=u.forward, offset=model.nbpml, expr=src * rho * dt**2 / m)\r\n expression += src_term\r\n\r\n # Data is sampled at receiver locations\r\n if rec_coords is not None:\r\n rec = Receiver(name='rec', grid=model.grid, ntime=nt, coordinates=rec_coords)\r\n rec_term = rec.interpolate(expr=u, offset=model.nbpml)\r\n expression += rec_term\r\n\r\n # Create operator and run\r\n set_log_level('ERROR')\r\n subs = model.spacing_map\r\n subs[u.grid.time_dim.spacing] = dt\r\n op = Operator(expression, subs=subs, dse='advanced', dle='advanced',\r\n name=\"Forward%s\" % randint(1e5))\r\n if op_return is False:\r\n op()\r\n if rec_coords is None:\r\n return u\r\n else:\r\n return rec.data, u\r\n else:\r\n return op\r\n\r\n\r\ndef adjoint_modeling(model, src_coords, rec_coords, rec_data, space_order=8, nb=40, free_surface=False, dt=None):\r\n clear_cache()\r\n\r\n # If wavelet is file, read it\r\n if isinstance(rec_data, str):\r\n rec_data = np.load(rec_data)\r\n\r\n # Parameters\r\n nt = rec_data.shape[0]\r\n if dt is None:\r\n dt = model.critical_dt\r\n m, rho, damp = model.m, model.rho, model.damp\r\n\r\n # Create the adjoint wavefield\r\n if src_coords is not None:\r\n v = TimeFunction(name=\"v\", grid=model.grid, time_order=2, space_order=space_order)\r\n else:\r\n v = TimeFunction(name=\"v\", grid=model.grid, time_order=2, space_order=space_order, save=nt)\r\n\r\n # Set up PDE and rearrange\r\n vlaplace, rho = acoustic_laplacian(v, rho)\r\n H = symbols('H')\r\n eqn = m / rho * v.dt2 - H - damp * v.dt\r\n\r\n # Input data is wavefield\r\n if isinstance(rec_data, TimeFunction):\r\n wf_rec = TimeFunction(name='wf_rec', grid=model.grid, time_order=2, space_order=space_order, save=nt)\r\n wf_rec._data = rec_data._data\r\n eqn -= wf_rec\r\n\r\n stencil = solve(eqn, v.backward, simplify=False, rational=False)[0]\r\n expression = [Eq(v.backward, stencil.subs({H: vlaplace}))]\r\n\r\n # Free surface\r\n if free_surface is True:\r\n fs = DefaultDimension(name=\"fs\", default_value=int(space_order/2))\r\n expression += [Eq(v.forward.subs({v.indices[-1]: model.nbpml - fs - 1}), \r\n -v.forward.subs({v.indices[-1]: model.nbpml + fs + 1}))]\r\n\r\n # Adjoint source is injected at receiver locations\r\n if rec_coords is not None:\r\n rec = Receiver(name='rec', grid=model.grid, ntime=nt, coordinates=rec_coords)\r\n rec.data[:] = rec_data[:]\r\n adj_src = rec.inject(field=v.backward, offset=model.nbpml, expr=rec * rho * dt**2 / m)\r\n expression += adj_src\r\n\r\n # Data is sampled at source locations\r\n if src_coords is not None:\r\n src = PointSource(name='src', grid=model.grid, ntime=nt, coordinates=src_coords)\r\n adj_rec = src.interpolate(expr=v, offset=model.nbpml)\r\n expression += adj_rec\r\n\r\n # Create operator and run\r\n set_log_level('ERROR')\r\n subs = model.spacing_map\r\n subs[v.grid.time_dim.spacing] = dt\r\n op = Operator(expression, subs=subs, dse='advanced', dle='advanced',\r\n name=\"Backward%s\" % randint(1e5))\r\n op()\r\n if src_coords is None:\r\n return v\r\n else:\r\n return src.data\r\n\r\ndef forward_born(model, src_coords, wavelet, rec_coords, space_order=8, nb=40, isic=False, dt=None):\r\n clear_cache()\r\n\r\n # Parameters\r\n nt = wavelet.shape[0]\r\n if dt is None:\r\n dt = model.critical_dt\r\n m, rho, dm, damp = model.m, model.rho, model.dm, model.damp\r\n\r\n # Create the forward and linearized wavefield\r\n u = TimeFunction(name=\"u\", grid=model.grid, time_order=2, space_order=space_order)\r\n du = TimeFunction(name=\"du\", grid=model.grid, time_order=2, space_order=space_order)\r\n if len(model.shape) == 2:\r\n x,y = u.space_dimensions\r\n else:\r\n x,y,z = u.space_dimensions\r\n\r\n # Set up PDEs and rearrange\r\n ulaplace, rho = acoustic_laplacian(u, rho)\r\n dulaplace, _ = acoustic_laplacian(du, rho)\r\n H = symbols('H')\r\n S = symbols('S')\r\n eqn = m / rho * u.dt2 - H + damp * u.dt\r\n stencil1 = solve(eqn, u.forward, simplify=False, rational=False)[0]\r\n eqn_lin = m / rho * du.dt2 - H + damp * du.dt + S\r\n if isic:\r\n # Sum ((u.dx * d, / rho).dx for x in dimensions)\r\n # space_order//2 so that u.dx.dx has the same radius as u.laplace\r\n du_aux = sum([first_derivative(first_derivative(u, dim=d, order=space_order//2) * dm / rho,\r\n order=space_order//2, dim=d)\r\n for d in u.space_dimensions])\r\n lin_source = dm /rho * u.dt2 * m - du_aux\r\n else:\r\n lin_source = dm / rho * u.dt2\r\n\r\n stencil2 = solve(eqn_lin, du.forward, simplify=False, rational=False)[0]\r\n expression_u = [Eq(u.forward, stencil1.subs({H: ulaplace}))]\r\n expression_du = [Eq(du.forward, stencil2.subs({H: dulaplace, S: lin_source}))]\r\n\r\n # Define source symbol with wavelet\r\n src = PointSource(name='src', grid=model.grid, ntime=nt, coordinates=src_coords)\r\n src.data[:] = wavelet[:]\r\n src_term = src.inject(field=u.forward, offset=model.nbpml, expr=src * rho * dt**2 / m)\r\n\r\n # Define receiver symbol\r\n rec = Receiver(name='rec', grid=model.grid, ntime=nt, coordinates=rec_coords)\r\n rec_term = rec.interpolate(expr=du, offset=model.nbpml)\r\n\r\n # Create operator and run\r\n set_log_level('ERROR')\r\n expression = expression_u + src_term + expression_du + rec_term\r\n subs = model.spacing_map\r\n subs[u.grid.time_dim.spacing] = dt\r\n op = Operator(expression, subs=subs, dse='advanced', dle='advanced',\r\n name=\"Born%s\" % randint(1e5))\r\n op()\r\n\r\n return rec.data\r\n\r\n\r\ndef adjoint_born(model, rec_coords, rec_data, u=None, op_forward=None, is_residual=False, space_order=8, nb=40, isic=False, dt=None, n_checkpoints=None, maxmem=None):\r\n clear_cache()\r\n\r\n # Parameters\r\n nt = rec_data.shape[0]\r\n if dt is None:\r\n dt = model.critical_dt\r\n m, rho, damp = model.m, model.rho, model.damp\r\n\r\n # Create adjoint wavefield and gradient\r\n v = TimeFunction(name='v', grid=model.grid, time_order=2, space_order=space_order)\r\n gradient = Function(name='gradient', grid=model.grid)\r\n\r\n # Set up PDE and rearrange\r\n vlaplace, rho = acoustic_laplacian(v, rho)\r\n H = symbols('H')\r\n eqn = m / rho * v.dt2 - H - damp * v.dt\r\n stencil = solve(eqn, v.backward, simplify=False, rational=False)[0]\r\n expression = [Eq(v.backward, stencil.subs({H: vlaplace}))]\r\n\r\n # Data at receiver locations as adjoint source\r\n rec_g = Receiver(name='rec_g', grid=model.grid, ntime=nt, coordinates=rec_coords)\r\n if op_forward is None:\r\n rec_g.data[:] = rec_data[:]\r\n adj_src = rec_g.inject(field=v.backward, offset=model.nbpml, expr=rec_g * rho * dt**2 / m)\r\n\r\n # Gradient update\r\n if u is None:\r\n u = TimeFunction(name='u', grid=model.grid, time_order=2, space_order=space_order)\r\n if isic is not True:\r\n gradient_update = [Eq(gradient, gradient - dt * u.dt2 / rho * v)]\r\n else:\r\n # sum u.dx * v.dx fo x in dimensions.\r\n # space_order//2\r\n diff_u_v = sum([first_derivative(u, dim=d, order=space_order//2)*\r\n first_derivative(v, dim=d, order=space_order//2)\r\n for d in u.space_dimensions])\r\n gradient_update = [Eq(gradient, gradient - dt * (u * v.dt2 * m + diff_u_v) / rho)]\r\n\r\n # Create operator and run\r\n set_log_level('ERROR')\r\n expression += adj_src + gradient_update\r\n subs = model.spacing_map\r\n subs[u.grid.time_dim.spacing] = dt\r\n op = Operator(expression, subs=subs, dse='advanced', dle='advanced',\r\n name=\"Gradient%s\" % randint(1e5))\r\n\r\n # Optimal checkpointing\r\n if op_forward is not None:\r\n rec = Receiver(name='rec', grid=model.grid, ntime=nt, coordinates=rec_coords)\r\n cp = DevitoCheckpoint([u])\r\n if maxmem is not None:\r\n n_checkpoints = int(np.floor(maxmem * 10**6 / (cp.size * u.data.itemsize)))\r\n wrap_fw = CheckpointOperator(op_forward, u=u, m=model.m, rec=rec)\r\n wrap_rev = CheckpointOperator(op, u=u, v=v, m=model.m, rec_g=rec_g)\r\n\r\n # Run forward\r\n wrp = Revolver(cp, wrap_fw, wrap_rev, n_checkpoints, nt-2)\r\n wrp.apply_forward()\r\n\r\n # Residual and gradient\r\n if is_residual is True: # input data is already the residual\r\n rec_g.data[:] = rec_data[:]\r\n else:\r\n rec_g.data[:] = rec.data[:] - rec_data[:] # input is observed data\r\n fval = .5*np.dot(rec_g.data[:].flatten(), rec_g.data[:].flatten()) * dt\r\n wrp.apply_reverse()\r\n else:\r\n op()\r\n clear_cache()\r\n\r\n if op_forward is not None and is_residual is not True:\r\n return fval, gradient.data\r\n else:\r\n return gradient.data\r\n\r\n\r\n########################################################################################################################\r\n\r\ndef forward_freq_modeling(model, src_coords, wavelet, rec_coords, freq, space_order=8, nb=40, dt=None, factor=None):\r\n # Forward modeling with on-the-fly DFT of forward wavefields\r\n clear_cache()\r\n\r\n # Parameters\r\n nt = wavelet.shape[0]\r\n if dt is None:\r\n dt = model.critical_dt\r\n m, damp = model.m, model.damp\r\n freq_dim = Dimension(name='freq_dim')\r\n time = model.grid.time_dim\r\n if factor is None:\r\n factor = int(1 / (dt*4*np.max(freq)))\r\n tsave = ConditionalDimension(name='tsave', parent=model.grid.time_dim, factor=factor)\r\n if factor==1:\r\n tsave = time\r\n else:\r\n tsave = ConditionalDimension(name='tsave', parent=model.grid.time_dim, factor=factor)\r\n print(\"DFT subsampling factor: \", factor)\r\n\r\n # Create wavefields\r\n nfreq = freq.shape[0]\r\n u = TimeFunction(name='u', grid=model.grid, time_order=2, space_order=space_order)\r\n f = Function(name='f', dimensions=(freq_dim,), shape=(nfreq,))\r\n f.data[:] = freq[:]\r\n ufr = Function(name='ufr', dimensions=(freq_dim,) + u.indices[1:], shape=(nfreq,) + model.shape_domain)\r\n ufi = Function(name='ufi', dimensions=(freq_dim,) + u.indices[1:], shape=(nfreq,) + model.shape_domain)\r\n\r\n # Set up PDE and rearrange\r\n eqn = m * u.dt2 - u.laplace + damp * u.dt\r\n stencil = solve(eqn, u.forward, simplify=False, rational=False)[0]\r\n expression = [Eq(u.forward, stencil)]\r\n expression += [Eq(ufr, ufr + factor*u*cos(2*np.pi*f*tsave*factor*dt))]\r\n expression += [Eq(ufi, ufi - factor*u*sin(2*np.pi*f*tsave*factor*dt))]\r\n\r\n # Source symbol with input wavelet\r\n src = PointSource(name='src', grid=model.grid, ntime=nt, coordinates=src_coords)\r\n src.data[:] = wavelet[:]\r\n src_term = src.inject(field=u.forward, offset=model.nbpml, expr=src * dt**2 / m)\r\n\r\n # Data is sampled at receiver locations\r\n rec = Receiver(name='rec', grid=model.grid, ntime=nt, coordinates=rec_coords)\r\n rec_term = rec.interpolate(expr=u, offset=model.nbpml)\r\n\r\n # Create operator and run\r\n set_log_level('ERROR')\r\n expression += src_term + rec_term\r\n subs = model.spacing_map\r\n subs[u.grid.time_dim.spacing] = dt\r\n op = Operator(expression, subs=subs, dse='advanced', dle='advanced',\r\n name=\"Forward%s\" % randint(1e5))\r\n op()\r\n \r\n return rec.data, ufr, ufi\r\n\r\n\r\ndef adjoint_freq_born(model, rec_coords, rec_data, freq, ufr, ufi, space_order=8, nb=40, dt=None, isic=False, factor=None):\r\n clear_cache()\r\n\r\n # Parameters\r\n nt = rec_data.shape[0]\r\n if dt is None:\r\n dt = model.critical_dt\r\n m, damp = model.m, model.damp\r\n nfreq = ufr.shape[0]\r\n time = model.grid.time_dim\r\n if factor is None:\r\n factor = int(1 / (dt*4*np.max(freq)))\r\n tsave = ConditionalDimension(name='tsave', parent=model.grid.time_dim, factor=factor)\r\n if factor==1:\r\n tsave = time\r\n else:\r\n tsave = ConditionalDimension(name='tsave', parent=model.grid.time_dim, factor=factor)\r\n dtf = factor * dt\r\n ntf = factor / nt\r\n print(\"DFT subsampling factor: \", factor)\r\n\r\n # Create the forward and adjoint wavefield\r\n v = TimeFunction(name='v', grid=model.grid, time_order=2, space_order=space_order)\r\n f = Function(name='f', dimensions=(ufr.indices[0],), shape=(nfreq,))\r\n f.data[:] = freq[:]\r\n gradient = Function(name=\"gradient\", grid=model.grid)\r\n\r\n # Set up PDE and rearrange\r\n eqn = m * v.dt2 - v.laplace - damp * v.dt\r\n stencil = solve(eqn, v.backward, simplify=False, rational=False)[0]\r\n expression = [Eq(v.backward, stencil)]\r\n\r\n # Data at receiver locations as adjoint source\r\n rec = Receiver(name='rec', grid=model.grid, ntime=nt, coordinates=rec_coords)\r\n rec.data[:] = rec_data[:]\r\n adj_src = rec.inject(field=v.backward, offset=model.nbpml, expr=rec * dt**2 / m)\r\n\r\n # Gradient update\r\n if isic is True:\r\n if len(model.shape) == 2:\r\n gradient_update = [Eq(gradient, gradient + (2*np.pi*f)**2*ntf*(ufr*cos(2*np.pi*f*tsave*dtf) - ufi*sin(2*np.pi*f*tsave*dtf))*v*model.m -\r\n (ufr.dx*cos(2*np.pi*f*tsave*dtf) - ufi.dx*sin(2*np.pi*f*tsave*dtf))*v.dx*ntf -\r\n (ufr.dy*cos(2*np.pi*f*tsave*dtf) - ufi.dy*sin(2*np.pi*f*tsave*dtf))*v.dy*ntf)]\r\n else:\r\n gradient_update = [Eq(gradient, gradient + (2*np.pi*f)**2*ntf*(ufr*cos(2*np.pi*f*tsave*dtf) - ufi*sin(2*np.pi*f*tsave*dtf))*v*model.m -\r\n (ufr.dx*cos(2*np.pi*f*tsave*dtf) - ufi.dx*sin(2*np.pi*f*tsave*dtf))*v.dx*ntf -\r\n (ufr.dy*cos(2*np.pi*f*tsave*dtf) - ufi.dy*sin(2*np.pi*f*tsave*dtf))*v.dy*ntf - \r\n (ufr.dz*cos(2*np.pi*f*tsave*dtf) - ufi.dz*sin(2*np.pi*f*tsave*dtf))*v.dz*ntf)]\r\n else:\r\n gradient_update = [Eq(gradient, gradient + (2*np.pi*f)**2/nt*(ufr*cos(2*np.pi*f*tsave*dtf) - ufi*sin(2*np.pi*f*tsave*dtf))*v)]\r\n\r\n # Create operator and run\r\n set_log_level('ERROR')\r\n expression += adj_src + gradient_update\r\n subs = model.spacing_map\r\n subs[v.grid.time_dim.spacing] = dt\r\n op = Operator(expression, subs=subs, dse='advanced', dle='advanced',\r\n name=\"Gradient%s\" % randint(1e5))\r\n op()\r\n clear_cache()\r\n return gradient.data\r\n\r\n\r\n"
] | [
[
"numpy.exp",
"numpy.cos"
],
[
"numpy.load",
"numpy.max",
"numpy.floor",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Uzaaft/acconeer-python-exploration | [
"a3ae5c31d2d5354a57273c4ab4b51b56edbb1225",
"a3ae5c31d2d5354a57273c4ab4b51b56edbb1225"
] | [
"examples/processing/sleep_breathing.py",
"src/acconeer/exptool/recording.py"
] | [
"import numpy as np\nimport pyqtgraph as pg\nfrom scipy import signal\n\nfrom acconeer.exptool import configs, utils\nfrom acconeer.exptool.clients import SocketClient, SPIClient, UARTClient\nfrom acconeer.exptool.pg_process import PGProccessDiedException, PGProcess\n\n\ndef main():\n args = utils.ExampleArgumentParser(num_sens=1).parse_args()\n utils.config_logging(args)\n\n if args.socket_addr:\n client = SocketClient(args.socket_addr)\n elif args.spi:\n client = SPIClient()\n else:\n port = args.serial_port or utils.autodetect_serial_port()\n client = UARTClient(port)\n\n sensor_config = get_sensor_config()\n sensor_config.sensor = args.sensors\n processing_config = get_processing_config()\n\n session_info = client.setup_session(sensor_config)\n\n pg_updater = PGUpdater(sensor_config, processing_config, session_info)\n pg_process = PGProcess(pg_updater)\n pg_process.start()\n\n client.start_session()\n\n interrupt_handler = utils.ExampleInterruptHandler()\n print(\"Press Ctrl-C to end session\")\n\n processor = PresenceDetectionProcessor(sensor_config, processing_config, session_info)\n\n while not interrupt_handler.got_signal:\n info, sweep = client.get_next()\n plot_data = processor.process(sweep)\n\n if plot_data is not None:\n try:\n pg_process.put_data(plot_data)\n except PGProccessDiedException:\n break\n\n print(\"Disconnecting...\")\n pg_process.close()\n client.disconnect()\n\n\ndef get_sensor_config():\n config = configs.IQServiceConfig()\n config.range_interval = [0.4, 0.8]\n config.update_rate = 60\n config.gain = 0.6\n return config\n\n\ndef get_processing_config():\n return {\n \"n_dft\": {\n \"name\": \"Estimation window [s]\",\n \"value\": 15,\n \"limits\": [2, 20],\n \"type\": float,\n \"text\": \"s\",\n },\n \"t_freq_est\": {\n \"name\": \"Time between estimation [s]\",\n \"value\": 0.2,\n \"limits\": [0.1, 10],\n \"type\": float,\n \"text\": \"s\",\n },\n \"D\": {\n \"name\": \"Distance down sampling\",\n \"value\": 124,\n \"limits\": [0, 248],\n \"type\": int,\n \"text\": None,\n },\n \"f_high\": {\n \"name\": \"Bandpass high freq [Hz]\",\n \"value\": 0.8,\n \"limits\": [0, 10],\n \"type\": float,\n \"text\": None,\n },\n \"f_low\": {\n \"name\": \"Bandpass low freq [Hz]\",\n \"value\": 0.2,\n \"limits\": [0, 10],\n \"type\": float,\n \"text\": None,\n },\n \"lambda_p\": {\n \"name\": \"Threshold: Peak to noise ratio\",\n \"value\": 40,\n \"limits\": [1, 1000],\n \"type\": float,\n \"text\": None,\n },\n \"lambda_05\": {\n \"name\": \"Threshold: Peak to half harmonic ratio\",\n \"value\": 1.0,\n \"limits\": [0, 10],\n \"type\": float,\n \"text\": None,\n },\n }\n\n\nclass PresenceDetectionProcessor:\n def __init__(self, sensor_config, processing_config, session_info):\n self.config = sensor_config\n\n # Settings\n # Data length for frequency estimation [s] | 20\n n_dft = processing_config[\"n_dft\"][\"value\"]\n # Time between frequency estimations [s] | 2\n t_freq_est = processing_config[\"t_freq_est\"][\"value\"]\n # Time constant low-pass filter on IQ-data [s] | 0.04\n tau_iq = 0.04\n # Time constant low-pass filter on IQ-data [s] | 150\n self.f_s = self.config.update_rate\n # Spatial or Range down sampling factor | 124\n self.D = int(processing_config[\"D\"][\"value\"])\n # Lowest frequency of interest [Hz] | 0.1\n self.f_low = processing_config[\"f_low\"][\"value\"]\n # Highest frequency of interest [Hz] | 1\n self.f_high = processing_config[\"f_high\"][\"value\"]\n # Time down sampling for DFT | 40 f_s/M ~ 10 Hz\n self.M = int(self.f_s / 10)\n # Threshold: spectral peak to noise ratio [1] | 50\n self.lambda_p = processing_config[\"lambda_p\"][\"value\"]\n # Threshold: ratio fundamental and half harmonic\n self.lambda_05 = processing_config[\"lambda_05\"][\"value\"]\n # Interpolation between DFT points\n self.interpolate = True\n\n self.delta_f = 1 / n_dft\n self.dft_f_vec = np.arange(self.f_low, self.f_high, self.delta_f)\n self.dft_points = np.size(self.dft_f_vec)\n\n # Butterworth bandpass filter\n f_n = self.f_s / 2\n v_low = self.f_low / f_n\n v_high = self.f_high / f_n\n self.b, self.a = signal.butter(4, [v_low, v_high], btype=\"bandpass\")\n\n # Exponential lowpass filter\n self.alpha_iq = np.exp(-2 / (self.f_s * tau_iq))\n self.alpha_phi = np.exp(-2 * self.f_low / self.f_s)\n\n # Parameter init\n self.sweeps_in_block = int(np.ceil(n_dft * self.f_s))\n self.new_sweeps_per_results = int(np.ceil(t_freq_est * self.f_s))\n self.phi_vec = np.zeros((self.sweeps_in_block, 1))\n self.f_est_vec = np.zeros(1)\n self.f_dft_est_vec = np.zeros(1)\n self.snr_vec = 0\n\n self.sweep_index = 0\n\n def process(self, sweep):\n if self.sweep_index == 0:\n delay_points = int(np.ceil(np.size(sweep) / self.D))\n self.data_s_d_mat = np.zeros((self.sweeps_in_block, delay_points), dtype=\"complex\")\n self.data_s_d_mat[self.sweep_index, :] = self.downsample(sweep, self.D)\n\n out_data = None\n elif self.sweep_index < self.sweeps_in_block:\n self.data_s_d_mat[self.sweep_index, :] = self.iq_lp_filter_time(\n self.data_s_d_mat[self.sweep_index - 1, :],\n self.downsample(sweep, self.D)\n )\n\n temp_phi = self.unwrap_phase(\n self.phi_vec[self.sweep_index - 1],\n self.data_s_d_mat[self.sweep_index, :],\n self.data_s_d_mat[self.sweep_index - 1, :]\n )\n\n self.phi_vec[self.sweep_index] = self.unwrap_phase(\n self.phi_vec[self.sweep_index - 1],\n self.data_s_d_mat[self.sweep_index, :],\n self.data_s_d_mat[self.sweep_index - 1, :]\n )\n\n phi_filt = signal.lfilter(self.b, self.a, self.phi_vec, axis=0)\n\n out_data = {\n \"phi_raw\": self.phi_vec,\n \"phi_filt\": phi_filt,\n \"power_spectrum\": np.zeros(self.dft_points),\n \"x_dft\": np.linspace(self.f_low, self.f_high, self.dft_points),\n \"f_dft_est_hist\": self.f_dft_est_vec,\n \"f_est_hist\": self.f_est_vec,\n \"f_dft_est\": 0,\n \"f_est\": 0,\n \"f_low\": self.f_low,\n \"f_high\": self.f_high,\n \"snr\": 0,\n \"lambda_p\": self.lambda_p,\n \"lambda_05\": self.lambda_05,\n \"dist_range\": self.config.range_interval,\n \"init_progress\": round(100 * self.sweep_index / self.sweeps_in_block),\n }\n else:\n # Lowpass filter IQ data downsampled in distance points\n self.data_s_d_mat = np.roll(self.data_s_d_mat, -1, axis=0)\n self.data_s_d_mat[-1, :] = self.iq_lp_filter_time(\n self.data_s_d_mat[-1, :],\n self.downsample(sweep, self.D)\n )\n\n # Phase unwrapping of IQ data\n temp_phi = self.unwrap_phase(\n self.phi_vec[-1],\n self.data_s_d_mat[-1, :],\n self.data_s_d_mat[-2, :]\n )\n self.phi_vec = np.roll(self.phi_vec, -1, axis=0)\n self.phi_vec[-1] = temp_phi\n\n if np.mod(self.sweep_index, self.new_sweeps_per_results - 1) == 0:\n # Bandpass filter unwrapped data\n phi_filt_vec = signal.lfilter(self.b, self.a, self.phi_vec, axis=0)\n P, dft_est, _ = self.dft(self.downsample(phi_filt_vec, self.M))\n f_breath_est, _, snr, _ = self.breath_freq_est(P)\n\n self.f_est_vec = np.append(self.f_est_vec, f_breath_est)\n self.f_dft_est_vec = np.append(self.f_dft_est_vec, dft_est)\n self.snr_vec = np.append(self.snr_vec, snr)\n\n out_data = {\n \"phi_raw\": self.phi_vec,\n \"phi_filt\": phi_filt_vec,\n \"power_spectrum\": P,\n \"x_dft\": np.linspace(self.f_low, self.f_high, self.dft_points),\n \"f_dft_est_hist\": self.f_dft_est_vec,\n \"f_est_hist\": self.f_est_vec,\n \"f_dft_est\": dft_est,\n \"f_est\": f_breath_est,\n \"f_low\": self.f_low,\n \"f_high\": self.f_high,\n \"snr\": snr,\n \"lambda_p\": self.lambda_p,\n \"lambda_05\": self.lambda_05,\n \"dist_range\": self.config.range_interval,\n \"init_progress\": None,\n }\n else:\n out_data = None\n\n self.sweep_index += 1\n return out_data\n\n def downsample(self, data, n):\n return data[:: n]\n\n def iq_lp_filter_time(self, state, new_data):\n return self.alpha_iq * state + (1 - self.alpha_iq) * new_data\n\n def unwrap_phase(self, phase_lp, data_1, data_2):\n return phase_lp * self.alpha_phi + np.angle(np.mean(data_2 * np.conjugate(data_1)))\n\n def dft(self, data):\n data = np.squeeze(data)\n n_vec = np.arange(data.size) * self.M\n dft = np.exp((2j * np.pi / self.f_s) * np.outer(self.dft_f_vec, n_vec))\n P = np.square(np.abs(np.matmul(dft, data)))\n idx_f = np.argmax(P)\n dft_est = self.dft_f_vec[idx_f]\n return P, dft_est, P[idx_f]\n\n def noise_est(self, P):\n return np.mean(np.sort(P)[: (self.dft_points // 2) - 1])\n\n def half_peak_frequency(self, P, f_est):\n idx_half = int(f_est / (2 * self.delta_f))\n if idx_half < self.f_low:\n return 0\n else:\n return (1 / self.delta_f) * (\n (self.dft_f_vec[idx_half + 1] - f_est / 2) * P[idx_half]\n + (f_est / 2 - self.dft_f_vec[idx_half]) * P[idx_half + 1]\n )\n\n def breath_freq_est(self, P):\n f_idx = np.argmax(P)\n P_peak = P[f_idx]\n\n if self.interpolate:\n f_est, P_peak = self.freq_quad_interpolation(P)\n else:\n f_est = self.dft_f_vec[f_idx]\n\n P_half = self.half_peak_frequency(P, f_est)\n\n if (P_peak < self.lambda_05 * P_half):\n f_est = f_est / 2\n P_peak = P_half\n\n if self.f_low < f_est < self.f_high and P_peak > self.lambda_p * self.noise_est(P):\n f_est_valid = True\n else:\n f_est_valid = False\n f_est = 0\n\n snr = P_peak / self.noise_est(P)\n return f_est, P_peak, snr, f_est_valid\n\n def freq_quad_interpolation(self, P):\n f_idx = np.argmax(P)\n\n if 0 < f_idx < P.size and P.size > 3:\n f_est = self.dft_f_vec[f_idx] + self.delta_f / 2 * (\n (np.log(P[f_idx + 1]) - np.log(P[f_idx - 1]))\n / (2 * np.log(P[f_idx]) - np.log(P[f_idx + 1]) - np.log(P[f_idx - 1]))\n )\n P_peak = P[f_idx] + np.exp(\n 1 / 8 * np.square(np.log(P[f_idx + 1]) - np.log(P[f_idx - 1]))\n / (2 * np.log(P[f_idx]) - np.log(P[f_idx + 1]) - np.log(P[f_idx - 1]))\n )\n\n if not (self.f_low < f_est < self.f_high):\n f_est = 0\n else:\n f_est = 0\n P_peak = 0\n\n return f_est, P_peak\n\n\nclass PGUpdater:\n def __init__(self, sensor_config, processing_config, session_info):\n self.config = sensor_config\n\n def setup(self, win):\n win.resize(800, 600)\n win.setWindowTitle(\"Acconeer sleep breathing estimation example\")\n\n phi_title = \"Breathing motion (detection range: {} m to {} m)\" \\\n .format(*self.config.range_interval)\n self.phi_plot = win.addPlot(title=phi_title)\n self.phi_plot.setMenuEnabled(False)\n self.phi_plot.setMouseEnabled(x=False, y=False)\n self.phi_plot.hideButtons()\n self.phi_plot.showGrid(x=True, y=True)\n self.phi_plot.setLabel(\"left\", \"Amplitude\")\n self.phi_plot.setLabel(\"bottom\", \"Samples\")\n self.phi_plot.addLegend()\n self.filt_phi_curve = self.phi_plot.plot(\n pen=utils.pg_pen_cycler(0),\n name=\"Filtered\",\n )\n self.raw_phi_curve = self.phi_plot.plot(\n pen=utils.pg_pen_cycler(1),\n name=\"Raw\",\n )\n\n win.nextRow()\n\n self.spect_plot = win.addPlot(title=\"Power spectrum\")\n self.spect_plot.setMenuEnabled(False)\n self.spect_plot.setMouseEnabled(x=False, y=False)\n self.spect_plot.hideButtons()\n self.spect_plot.showGrid(x=True, y=True)\n self.spect_plot.setLabel(\"left\", \"Power\")\n self.spect_plot.setLabel(\"bottom\", \"Frequency (Hz)\")\n self.spect_curve = self.spect_plot.plot(pen=utils.pg_pen_cycler(1))\n self.spect_smax = utils.SmoothMax(self.config.update_rate / 15)\n self.spect_dft_inf_line = pg.InfiniteLine(pen=utils.pg_pen_cycler(1, \"--\"))\n self.spect_plot.addItem(self.spect_dft_inf_line)\n self.spect_est_inf_line = pg.InfiniteLine(pen=utils.pg_pen_cycler(0, \"--\"))\n self.spect_plot.addItem(self.spect_est_inf_line)\n self.spect_plot.setXRange(0, 1)\n self.spect_plot.setYRange(0, 1)\n self.spect_text_item = pg.TextItem(\"Initiating...\", anchor=(0.5, 0.5), color=\"k\")\n self.spect_text_item.setPos(0.5, 0.5)\n self.spect_plot.addItem(self.spect_text_item)\n\n win.nextRow()\n self.fest_plot = win.addPlot(title=\"Breathing estimation history\")\n self.fest_plot.setMenuEnabled(False)\n self.fest_plot.setMouseEnabled(x=False, y=False)\n self.fest_plot.hideButtons()\n self.fest_plot.showGrid(x=True, y=True)\n self.fest_plot.setLabel(\"left\", \"Frequency (Hz)\")\n self.fest_plot.setLabel(\"bottom\", \"Samples\")\n self.fest_plot.addLegend()\n self.fest_curve = self.fest_plot.plot(\n pen=utils.pg_pen_cycler(0),\n name=\"Breathing est.\",\n )\n self.fest_dft_curve = self.fest_plot.plot(\n pen=utils.pg_pen_cycler(1),\n name=\"DFT est.\",\n )\n self.fest_plot.setXRange(0, 1)\n self.fest_plot.setYRange(0, 0.5)\n self.fest_text_item = pg.TextItem(anchor=(0, 0), color=\"k\")\n self.fest_text_item.setPos(0, 0.5)\n self.fest_plot.addItem(self.fest_text_item)\n\n def update(self, data):\n self.filt_phi_curve.setData(np.squeeze(data[\"phi_filt\"]))\n self.raw_phi_curve.setData(np.squeeze(data[\"phi_raw\"]))\n\n if data[\"init_progress\"] is not None:\n self.spect_text_item.setText(\"Initiating: {} %\".format(data[\"init_progress\"]))\n else:\n snr = data[\"snr\"]\n if snr == 0:\n s = \"SNR: N/A | {:.0f} dB\".format(10 * np.log10(data[\"lambda_p\"]))\n else:\n fmt = \"SNR: {:.0f} | {:.0f} dB\"\n s = fmt.format(10 * np.log10(snr), 10 * np.log10(data[\"lambda_p\"]))\n self.spect_text_item.setText(s)\n self.spect_text_item.setAnchor((0, 1))\n self.spect_text_item.setPos(0, 0)\n\n f_est = data[\"f_est\"]\n if f_est > 0:\n s = \"Latest frequency estimate: {:.2f} Hz | {:.0f} BPM\".format(f_est, f_est * 60)\n self.fest_text_item.setText(s)\n\n self.fest_plot.enableAutoRange(x=True)\n self.spect_curve.setData(data[\"x_dft\"], data[\"power_spectrum\"])\n self.spect_dft_inf_line.setValue(data[\"f_dft_est\"])\n self.spect_est_inf_line.setValue(data[\"f_est\"])\n self.spect_plot.setYRange(0, self.spect_smax.update(data[\"power_spectrum\"]))\n self.fest_curve.setData(np.squeeze(data[\"f_est_hist\"]))\n self.fest_dft_curve.setData(np.squeeze(data[\"f_dft_est_hist\"]))\n\n\nif __name__ == \"__main__\":\n main()\n",
"import copy\nimport datetime\nimport json\nimport time\nimport warnings\nfrom pathlib import Path\nfrom typing import Optional, Union\n\nimport attr\nimport h5py\nimport numpy as np\n\nimport acconeer.exptool\nimport acconeer.exptool.structs.configbase as cb\nfrom acconeer.exptool import configs, modes\n\n\[email protected]\nclass Record:\n # Sensor session related (required):\n mode = attr.ib(type=modes.Mode) # save as str (Mode.name), restore with get_mode\n sensor_config_dump = attr.ib(type=str) # cb._dumps\n session_info = attr.ib(type=dict) # save/restore with json.dumps/loads\n data = attr.ib(default=None) # [np.array], saved as np.array, restore as is\n data_info = attr.ib(type=list, factory=list) # [[{...}]], save/restore with json.dumps/loads\n\n # Processing related (optional):\n module_key = attr.ib(type=Optional[str], default=None)\n processing_config_dump = attr.ib(type=Optional[str], default=None) # cb._dumps\n\n # Other (optional):\n rss_version = attr.ib(type=Optional[str], default=None)\n lib_version = attr.ib(type=Optional[str], default=None)\n timestamp = attr.ib(type=Optional[str], default=None)\n sample_times = attr.ib(default=None)\n note = attr.ib(type=Optional[str], default=None) # not to be used internally\n\n # Legacy (optional):\n legacy_processing_config_dump = attr.ib(type=Optional[str], default=None)\n\n def __attrs_post_init__(self):\n self._iter_index = None\n\n def __iter__(self):\n self._iter_index = 0\n return self\n\n def __next__(self):\n try:\n current_data_info = self.data_info[self._iter_index]\n current_data = self.data[self._iter_index]\n except (IndexError, TypeError):\n raise StopIteration\n\n self._iter_index += 1\n return current_data_info, current_data\n\n @property\n def sensor_config(self):\n return configs.load(self.sensor_config_dump, self.mode)\n\n\nclass Recorder:\n def __init__(self, **kwargs):\n sensor_config = kwargs.pop(\"sensor_config\")\n session_info = kwargs.pop(\"session_info\")\n module_key = kwargs.pop(\"module_key\", None)\n processing_config = kwargs.pop(\"processing_config\", None)\n rss_version = kwargs.pop(\"rss_version\", None)\n\n mode = kwargs.pop(\"mode\", sensor_config.mode)\n\n self.max_len = kwargs.pop(\"max_len\", None)\n\n if kwargs:\n key = next(iter(kwargs.keys()))\n msg = \"Recorder got an unexpected keyword argument '{}'\".format(key)\n raise TypeError(msg)\n\n if not isinstance(sensor_config, cb.SensorConfig):\n raise TypeError(\"Unexpected sensor config type\")\n\n if isinstance(processing_config, cb.ProcessingConfig):\n processing_config_dump = processing_config._dumps()\n elif processing_config is None:\n processing_config_dump = None\n else:\n raise TypeError(\"Unexpected processing config type\")\n\n self.record = Record(\n mode=mode,\n sensor_config_dump=sensor_config._dumps(),\n session_info=copy.deepcopy(session_info),\n module_key=module_key,\n processing_config_dump=processing_config_dump,\n rss_version=rss_version,\n lib_version=acconeer.exptool.__version__,\n timestamp=datetime.datetime.now().isoformat(timespec=\"seconds\"),\n )\n\n self.record.data = []\n self.record.sample_times = []\n\n def sample(self, data_info: list, data: np.ndarray):\n expected_num_dims = 3 if self.record.mode == modes.Mode.SPARSE else 2\n if data.ndim != expected_num_dims: # then assume data is squeezed\n # unsqueeze (add back sensor dim)\n data = data[None, ...]\n data_info = [data_info]\n\n self.record.data.append(data.copy())\n self.record.data_info.append(copy.deepcopy(data_info))\n\n self.record.sample_times.append(time.time())\n\n if self.max_len is not None and len(self.record.data) > self.max_len:\n self.record.data.pop(0)\n self.record.data_info.pop(0)\n self.record.sample_times.pop(0)\n\n def close(self):\n self.record.data = np.array(self.record.data)\n self.record.sample_times = np.array(self.record.sample_times)\n return self.record\n\n\ndef save(filename: Union[str, Path], record: Record):\n filename = str(filename)\n\n if filename.lower().endswith(\".h5\"):\n return save_h5(filename, record)\n elif filename.lower().endswith(\".npz\"):\n return save_npz(filename, record)\n elif filename.lower().endswith(\".npy\"):\n raise ValueError(\"Unknown file format '.npy', perhaps you meant '.npz'?\")\n else:\n raise ValueError(\"Unknown file format\")\n\n\ndef pack(record: Record) -> dict:\n packed = attr.asdict(record, filter=lambda attr, v: attr.type in (str, Optional[str]))\n packed[\"mode\"] = record.mode.name.lower()\n packed[\"session_info\"] = json.dumps(record.session_info)\n packed[\"data_info\"] = json.dumps(record.data_info)\n\n data = np.array(record.data)\n if np.isrealobj(data):\n data_u16 = data.astype(\"u2\")\n if np.all(data == data_u16):\n data = data_u16\n\n packed[\"data\"] = data\n\n if record.sample_times is not None:\n packed[\"sample_times\"] = np.array(record.sample_times)\n\n packed = {k: v for k, v in packed.items() if v is not None}\n\n return packed\n\n\ndef save_npz(filename: Union[str, Path], record: Record):\n filename = str(filename)\n\n if not filename.lower().endswith(\".npz\"):\n filename = filename + \".npz\"\n\n packed = pack(record)\n np.savez_compressed(filename, **packed)\n\n\ndef save_h5(filename: Union[str, Path], record: Record):\n filename = str(filename)\n\n if not filename.lower().endswith(\".h5\"):\n filename = filename + \".h5\"\n\n packed = pack(record)\n\n with h5py.File(filename, \"w\") as f:\n for k, v in packed.items():\n if isinstance(v, str):\n dtype = h5py.special_dtype(vlen=str)\n compression = None\n elif isinstance(v, np.ndarray):\n dtype = v.dtype\n compression = \"gzip\"\n else:\n raise TypeError\n\n f.create_dataset(k, data=v, dtype=dtype, compression=compression)\n\n\ndef load(filename: Union[str, Path]) -> Record:\n filename = str(filename)\n\n if filename.lower().endswith(\".h5\"):\n return load_h5(filename)\n elif filename.lower().endswith(\".npz\"):\n return load_npz(filename)\n else:\n raise ValueError(\"Unknown file format\")\n\n\ndef unpack(packed: dict) -> Record:\n kwargs = {}\n\n data = packed[\"data\"]\n if np.isrealobj(data):\n data = data.astype(\"float\")\n\n kwargs[\"data\"] = data\n\n for a in attr.fields(Record):\n k = a.name\n if a.type == str:\n kwargs[k] = packed[k]\n elif a.type == Optional[str]:\n kwargs[k] = packed.get(k, None)\n\n try:\n mode = modes.get_mode(packed[\"mode\"])\n except ValueError:\n mode = None\n warnings.warn(\"unknown mode encountered while unpacking record\")\n\n kwargs[\"mode\"] = mode\n\n kwargs[\"session_info\"] = json.loads(packed[\"session_info\"])\n kwargs[\"data_info\"] = json.loads(packed[\"data_info\"])\n\n kwargs[\"sample_times\"] = packed.get(\"sample_times\", None)\n\n assert len(kwargs[\"data\"]) == len(kwargs[\"data_info\"])\n\n return Record(**kwargs)\n\n\ndef load_npz(filename: Union[str, Path]) -> Record:\n filename = str(filename)\n\n packed = {}\n with np.load(filename, allow_pickle=False) as f:\n for k, v in f.items():\n if v.dtype.type is np.unicode_:\n v = str(v)\n\n packed[k] = v\n\n return unpack(packed)\n\n\ndef load_h5(filename: Union[str, Path]) -> Record:\n filename = str(filename)\n\n with h5py.File(filename, \"r\") as f:\n packed = {k: v[()] for k, v in f.items()}\n\n for k, v in packed.items():\n if isinstance(v, bytes):\n packed[k] = v.decode()\n\n return unpack(packed)\n\n\nif __name__ == \"__main__\":\n import argparse\n import os\n import sys\n\n parser = argparse.ArgumentParser()\n\n subparsers = parser.add_subparsers(dest=\"command\")\n subparsers.required = True\n\n sp = subparsers.add_parser(\"resave\")\n sp.add_argument(\"source\")\n sp.add_argument(\"dest\")\n sp.add_argument(\"-f\", \"--force\", action=\"store_true\")\n\n args = parser.parse_args()\n\n # assume resave as it's currently the only option\n\n if not args.force and os.path.exists(args.dest):\n sys.stderr.write(\"error: destination file already exists (try using -f)\\n\")\n sys.exit(1)\n\n record = load(args.source)\n save(args.dest, record)\n"
] | [
[
"numpy.linspace",
"numpy.squeeze",
"numpy.exp",
"numpy.roll",
"numpy.conjugate",
"numpy.arange",
"numpy.matmul",
"numpy.ceil",
"scipy.signal.butter",
"numpy.size",
"numpy.argmax",
"scipy.signal.lfilter",
"numpy.outer",
"numpy.zeros",
"numpy.log",
"numpy.append",
"numpy.log10",
"numpy.sort",
"numpy.mod"
],
[
"numpy.all",
"numpy.savez_compressed",
"numpy.isrealobj",
"numpy.load",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Zuricho/kagome_HVQE | [
"bbcb591ac30e81a7b60ce1d2d6c67d92fa25a899"
] | [
"_HVQE.py"
] | [
"\"\"\"\nContains function defs for running HVQE.py\n\"\"\"\nclass Name: # Simple namespace class that is used for dumping and restarting the program.\n pass\n\nimport numpy # For the cases where GPU==True and we still want to use numpy.\nimport qem\nimport chainer as ch\nfrom datetime import datetime\nimport argparse\nimport scipy.optimize\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\n\ntry: # Use GPU if CuPy installation is available. \n import cupy as xp\nexcept ImportError:\n import numpy as xp\n\n\ndef Heisenberg_energy_from_parameters(complete_graph,init_reg,layers,n,par_multiplicity,parameters,reg_psi_list):\n \"\"\"\n Return the energy of a state as defined via the init state, the ansatz and a setting for the parameters. Ansatz must already be mapped to ints and given as regular python list.\n \n Returns\n -------\n E : chainer.Variable\n \n \"\"\"\n reg=qem.EmptyReg(n)\n reg.psi=init_reg.psi\n\n edges=[edge for layer in layers for edge in layer]\n\n for i in range(len(parameters)):\n gate=qem.Heisenberg_exp(parameters[i])\n for j in range(par_multiplicity):\n edge=edges[(i*par_multiplicity+j)%len(edges)]\n action=qem.Action(edge,gate)\n qem.apply_action(action,reg)\n\n E,reg_psi=qem.Heisenberg_energy(complete_graph,reg,reg_psi_list)\n return E,reg_psi\n\ndef infidelity_from_parameters(init_reg,layers,n,par_multiplicity,parameters,gs_reg):\n reg=qem.EmptyReg(n)\n reg.psi=init_reg.psi \n\n edges=[edge for layer in layers for edge in layer]\n\n for i in range(len(parameters)):\n gate=qem.Heisenberg_exp(parameters[i])\n for j in range(par_multiplicity):\n edge=edges[(i*par_multiplicity+j)%len(edges)] \n action=qem.Action(edge,gate)\n qem.apply_action(action,reg)\n\n inf=qem.infidelity(reg,gs_reg)\n return inf\n\ndef run_VQE(cmd_args,run_args,init_reg,gs_reg,reg_psi_list):\n \"\"\"\n Run the VQE.\n \"\"\"\n global reg_psi\n # global reg_psi_list\n \n vqe_out=Name()\n vqe_out.n_fn_calls=0\n vqe_out.local_min_list=[]\n vqe_out.local_min_parameters_list=[]\n vqe_out.local_min_accept_list=[]\n \n def calc_cost(parameters):\n nonlocal vqe_out\n nonlocal cmd_args\n nonlocal run_args\n global reg_psi\n tmp=Name()\n parameters=ch.Variable(xp.array(parameters))\n \n if cmd_args.cost_fn=='energy':\n cost,reg_psi=Heisenberg_energy_from_parameters(run_args.complete_graph,init_reg,run_args.layers,run_args.n,cmd_args.par_multiplicity,parameters,reg_psi_list)\n elif cmd_args.cost_fn=='infidelity':\n cost=infidelity_from_parameters(init_reg,run_args.layers,run_args.n,cmd_args.par_multiplicity,parameters,gs_reg)\n else:\n raise ValueError('Not a valid cost function')\n cost.backward()\n g=parameters.grad\n vqe_out.n_fn_calls+=1\n print('.',end='',flush=True) #Progress indicator. One dot per function call. Here one function call is defined as one forward and one backward evaluation. \n if run_args.GPU==True:\n cost=cost.array.get()\n g=g.get()\n elif run_args.GPU==False:\n cost=cost.array\n\n ### Dump state of the prograpm. Restart has to be done by hand by running another HVQE.py from the command line. \n if cmd_args.dump_interval!=None:\n if vqe_out.n_fn_calls%cmd_args.dump_interval==0:\n tmp=Name()\n tmp.parameters=parameters.array.tolist()\n tmp.cost=float(cost)\n tmp.g=g.tolist()\n date_dump=str(datetime.utcnow()) # Current time in UTC.\n vqe_out.init_par=list(vqe_out.init_par)\n dump=[vars(cmd_args),vars(run_args),vars(vqe_out),vars(tmp)]\n with open(cmd_args.path+'/dump.txt', 'a') as file:\n file.write(str(dump)+'\\n\\n')\n print('Data dump on', date_dump)\n ###\n return cost, g\n\n def callback(x,f,accept): # Due to a bug in the scipy (version 1.3.1) basinhopping routine, this function is not called after the first local minimum. Hence the lists local_min_list, local_min_parameters_list and local_min_accept_list will not contain entries for the first local minimum found. This issue will be solved in version 1.6.0 of schipy. See https://github.com/scipy/scipy/pull/13029\n #If basinhopping is run with n_iter=0, only a single local minimum is found, and in this case the value of the cost function, and the parameters, are in fact stored, because this minimum is the optimal minimum found and delivers the data for the output of the bassinhopping routine as a whole.\n nonlocal vqe_out\n print('\\nNew local min for', vars(cmd_args))\n print('cost=',float(f),'accepted=',accept,'parameters=',list(x))\n vqe_out.local_min_list.append(float(f)) \n vqe_out.local_min_parameters_list.append(list(x))\n vqe_out.local_min_accept_list.append(accept)\n \n if cmd_args.init_par is None:\n vqe_out.init_par=numpy.random.rand(cmd_args.n_par)/1000-1/2000\n else:\n assert len(cmd_args.init_par)==cmd_args.n_par, 'List of initial parameters must be of length n_par.'\n vqe_out.init_par=numpy.array(cmd_args.init_par)\n if cmd_args.n_par==0: # If there is no circuit, just output the energy of the init state.\n if cmd_args.cost_fn=='energy':\n vqe_out.cost_VQE,reg_psi=Heisenberg_energy_from_parameters(run_args.complete_graph,init_reg,run_args.layers,run_args.n,cmd_args.par_multiplicity,[],reg_psi_list) # Still a Chainer.Variable\n print(reg_psi)\n vqe_out.cost_VQE=float(vqe_out.cost_VQE.array)\n vqe_out.opt_parameters=[]\n vqe_out.init_par=[]\n if cmd_args.cost_fn=='infidelity':\n vqe_out.cost_VQE=infidelity_from_parameters(init_reg,run_args.layers,run_args.n,cmd_args.par_multiplicity,[],gs_reg)\n vqe_out.cost_VQE=float(vqe_out.cost_VQE.array)\n vqe_out.opt_parameters=[]\n vqe_out.init_par=[]\n \n else:\n sol=scipy.optimize.basinhopping(calc_cost,vqe_out.init_par,stepsize=cmd_args.stepsize,minimizer_kwargs={'jac':True},niter=cmd_args.n_iter,interval=25,callback=callback,T=cmd_args.temperature)\n vqe_out.cost_VQE=float(sol.fun)\n vqe_out.opt_parameters=sol.x.tolist()\n vqe_out.init_par=list(vqe_out.init_par)\n\n return vqe_out,reg_psi\n \ndef plot_VQE_data(path,fn,par_multiplicity,gates_per_cycle):\n # Import data\n with open(path+'/output.txt','r') as f:\n f.readline()\n data=f.readlines()\n data=[line for line in data if line != '\\n']\n data=[eval(x.strip()) for x in data]\n \n E=0\n if fn=='energy': \n with open(path+'/lowest_energies.txt','r') as f:\n E=f.readlines()\n E=[eval(x.strip()) for x in E]\n E[0]=E[0]\n\n # Sort data into sublists based on the value of n_iter\n n_iter_set=set([line[0]['n_iter'] for line in data])\n data_=[]\n for n_iter in n_iter_set:\n n_iter_array=[line for line in data if line[0]['n_iter']==n_iter]\n n_iter_array.sort(key=lambda x: x[0]['n_par']) # Put datapoints in order of increasing number of parameters. \n data_.append(n_iter_array)\n\n data=data_\n n_iter_set=list(n_iter_set)\n \n # Make one plot for every possible val of n_iter, all in one figure.\n fig, ax = plt.subplots()\n for n_iter_class in data:\n n_par_list=[line[0]['n_par'] for line in n_iter_class]\n p_list=[n_par*par_multiplicity/gates_per_cycle for n_par in n_par_list]\n if fn=='energy':\n E_VQE_list=[line[1]['E_VQE'] for line in n_iter_class]\n E_VQE_list=[-(E_VQE-E[0])/E[0] for E_VQE in E_VQE_list] # The relative error in the energy is going to be plotted.\n ax.semilogy(p_list,E_VQE_list,'-o')\n elif fn=='infidelity':\n inf_VQE_list=[line[1]['inf_VQE'] for line in n_iter_class]\n ax.semilogy(p_list,inf_VQE_list,'-o')\n elif fn=='wall_clock':\n wall_clock_list=[line[1]['wall_clock'] for line in n_iter_class]\n ax.semilogy(p_list,wall_clock_list,'-o')\n\n if fn=='energy':\n ax.axhline(y=-(E[1]-E[0])/E[0]) # Plot a horizontal line at the first excited state.\n ax.axhline(y=-(E[1]-E[0])/E[0]/2,ls='--') # Plot a horizontal dashed line halfway the ground state and the first excited state.\n ax.set_ylabel('Relative energy error')\n elif fn=='infidelity':\n ax.set_ylabel('Infidelity')\n elif fn=='wall_clock':\n ax.set_ylabel('Wall-clock time (h)')\n\n # On the x-axis, put the number of cycles rather then the number of parameters. \n ax.set_xlabel('p') \n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.grid(True)\n ax.legend(n_iter_set, title='n_iter')\n plt.title(path)\n\n\n # Write to disk.\n if fn=='energy':\n plt.savefig(path+'/E_VQE.pdf')\n if fn=='infidelity':\n plt.savefig(path+'/inf_VQE.pdf') \n if fn=='wall_clock':\n plt.savefig(path+'/wall_clock.pdf')\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.ticker.MaxNLocator",
"numpy.random.rand",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Piyush987/NLP-basics | [
"22ff656b10e0b04ca41dca97c21468e6372596bf"
] | [
"preprocess.py"
] | [
"import numpy as np\nimport pandas as pd\nimport re\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom gensim.models import Word2Vec, Phrases\nimport nltk\nfrom nltk.corpus import stopwords\nnltk.download('stopwords')\nnltk.download('wordnet')\nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.model_selection import train_test_split\n\n\ndf = pd.read_csv(r'IMDB Dataset.csv')\nprint(df.info()) #Check non NULL values in columns\n\n#Cleaning, preprocess\ndef clean_data(text):\n text = re.sub(r'<br />', ' ', text) #Removes Html tag\n text = re.sub(r'[^\\ a-zA-Z0-9]+', '', text) #Removes non alphanumeric\n text = re.sub(r'^\\s*|\\s\\s*', ' ', text).strip() #Removes extra whitespace, tabs\n stop_words = set(stopwords.words('english')) \n lemmatizer = WordNetLemmatizer()\n text = text.lower().split() #Converts text to lowercase\n cleaned_text = list()\n for word in text: \n if word in stop_words: #Removes Stopwords, i.e words that don't convey any meaningful context/sentiments\n continue \n word = lemmatizer.lemmatize(word, pos = 'v') #Lemmatize words, pos = verbs, i.e playing, played becomes play\n cleaned_text.append(word)\n text = ' '.join(cleaned_text)\n return text\n\ndf['cleaned_review'] = df['review'].apply(lambda x: clean_data(x))\n\n\ndef convert_sentiment_to_int(text): #Convert sentiment positive to 1, negative to 0\n if(text.lower() == 'positive'):\n text = 1\n else:\n text = 0\n return text\n\ndf['sentiment'] = df['sentiment'].apply(lambda x: convert_sentiment_to_int(x))\n\nresult = [len(x) for x in [df['cleaned_review'].iloc[i].split() for i in range(50000)]]\nprint(np.mean(result)) #Mean no of words in each cleaned review\n\nX_train = [text for text in list(df['cleaned_review'].iloc[:25000])] #Preparation of X,Y\nX_test = [text for text in list(df['cleaned_review'].iloc[25000:])]\nY_train = [text for text in list(df['sentiment'].iloc[:25000])]\nY_test = [text for text in list(df['sentiment'].iloc[25000:])]\n\nprint(len(np.unique(np.hstack(X_train)))) #No of unique words in cleaned review\n\n#Tokenize and Padding\nX = [text for text in list(df['cleaned_review'])] \nmax_vocab = 10000 #Max features\nmax_sent_length = 150 #Max word length of every review\ntokenizer = Tokenizer(num_words = max_vocab)\ntokenizer.fit_on_texts(X)\nX_train_tokenized = pad_sequences(tokenizer.texts_to_sequences(X_train), maxlen = max_sent_length) #Tokenization, i.e converting words to int\nX_test_tokenized = pad_sequences(tokenizer.texts_to_sequences(X_test), maxlen = max_sent_length)\n"
] | [
[
"numpy.hstack",
"pandas.read_csv",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
SolitaryKnife/pytorch_dataset | [
"36773064b333d0aafc1c5d0e559271c56516afe7"
] | [
"functional.py"
] | [
"\n#####################################################\n# Basic Functions\n#####################################################\n\n\ndef identity_transform(x):\n return x\n\n\n#####################################################\n# Dataset Operations\n#####################################################\n\ndef dpipe(dataset=None, operators=[]):\n if dataset is None:\n from functools import partial\n return partial(dpipe, operators=operators)\n\n if callable(operators):\n return operators(dataset)\n\n for operator in operators:\n dataset = operator(dataset)\n return dataset\n\n\ndef dmap(values=None, transform=None, force_iter=False):\n if values is None:\n from functools import partial\n return partial(dmap, transform=transform, force_iter=force_iter)\n\n transform = transform or identity_transform\n if isinstance(transform, (list, tuple)):\n from torchvision.transforms import Compose\n transform = Compose(transform)\n\n # If force as IterableDataset\n if force_iter:\n from .dataset import ValueIterableDataset\n return ValueIterableDataset(values, transform)\n\n # If dataset[idx] and len(dataset) are available, use ValueDataset\n if callable(getattr(values, \"__getitem__\", None)) and callable(getattr(values, \"__len__\", None)):\n from .dataset import ValueDataset\n return ValueDataset(values, transform)\n\n # Fallback to IterableDataset\n from .dataset import ValueIterableDataset\n return ValueIterableDataset(values, transform)\n\n\ndef dzip(*datasets, zip_transform=None):\n if len(datasets) <= 0:\n from functools import partial\n return partial(dzip, zip_transform=zip_transform)\n\n zip_transform = zip_transform or identity_transform\n if isinstance(zip_transform, (list, tuple)):\n from torchvision.transforms import Compose\n zip_transform = Compose(zip_transform)\n\n # Check if there are IterableDataset, then use ZipIterableDataset\n from torch.utils.data import IterableDataset\n if any([isinstance(ds, IterableDataset) for ds in datasets]):\n\n from .dataset import ZipIterableDataset\n return ZipIterableDataset(datasets, zip_transform)\n\n # Otherwise, use ZipDataset\n from .dataset import ZipDataset\n return ZipDataset(datasets, zip_transform)\n\n\ndef dcombine(*datasets, comb_transform=None, custom_indexer=None):\n if len(datasets) <= 0:\n from functools import partial\n return partial(dcombine, comb_transform=comb_transform)\n\n comb_transform = comb_transform or identity_transform\n if isinstance(comb_transform, (list, tuple)):\n from torchvision.transforms import Compose\n comb_transform = Compose(comb_transform)\n\n # Check if there are IterableDataset, then use CombineIterableDataset\n from torch.utils.data import IterableDataset\n if any([isinstance(ds, IterableDataset) for ds in datasets]):\n\n from .dataset import CombineIterableDataset\n return CombineIterableDataset(datasets, comb_transform, indexer=custom_indexer)\n\n # Otherwise, use CombineDataset\n from .dataset import CombineDataset\n return CombineDataset(datasets, comb_transform, indexer=custom_indexer)\n\n\ndef daugment(dataset, aug_fn=None):\n if dataset is None:\n from functools import partial\n return partial(daugment, aug_fn=aug_fn)\n\n aug_fn = aug_fn or identity_transform\n if isinstance(aug_fn, (list, tuple)):\n from torchvision.transforms import Compose\n aug_fn = Compose(aug_fn)\n\n from .dataset import AugmentedDataset\n return AugmentedDataset(dataset, aug_fn)\n\n\ndef dcache(dataset=None, cache=None, enable=True):\n assert cache is not None\n\n if dataset is None:\n from functools import partial\n return partial(dcache, cache=cache, enable=enable)\n\n if enable:\n not_cache = any([\n getattr(cache, \"__getitem__\", None) is None,\n getattr(cache, \"__setitem__\", None) is None,\n getattr(cache, \"__contains__\", None) is None\n ])\n if callable(cache) and not_cache:\n cache = cache()\n\n from .dataset import CachedDataset\n return CachedDataset(dataset, cache)\n\n return dataset\n\n\n#####################################################\n# Dataset Constructors\n#####################################################\n\ndef numbers(size, transform=None):\n return dmap(range(size), transform)\n\n\ndef glob_files(paths, transform=None, recursive=False, unique=True, sort=True, sort_key=None, sort_reverse=False):\n from .utils import glob\n return dmap(glob(paths, recursive=recursive, unique=unique, sort=sort, sort_key=sort_key, sort_reverse=sort_reverse), transform)\n\n\ndef index_files(pathquery, transform=None, maxsize=None):\n from os import walk\n from os.path import dirname\n\n def generate_path(idx):\n return pathquery.format(idx, idx=idx, index=idx)\n\n if maxsize is None:\n dirpath = dirname(generate_path(0))\n maxsize = len(next(walk(dirpath))[2])\n\n return numbers(maxsize, [\n generate_path,\n transform or identity_transform\n ])\n\n\ndef images(paths, transform=None, *, img_loader=\"pil\", img_autoclose=True):\n\n from torchvision.transforms.functional import to_tensor\n transform = transform or to_tensor\n if isinstance(transform, (list, tuple)):\n from torchvision.transforms import Compose\n transform = Compose(transform)\n assert callable(transform)\n\n if isinstance(img_loader, str):\n if img_loader.lower() == \"pil\":\n from imageio import get_reader\n from PIL.Image import fromarray\n\n def img_loader(path):\n img_numpy = get_reader(path).get_next_data()\n return fromarray(img_numpy)\n\n elif img_loader.lower() == \"imageio\":\n from PIL.Image import open as pil_loader\n img_loader = pil_loader\n\n if not callable(img_loader):\n from importlib.util import find_spec as module_exists\n\n if module_exists(\"imageio\"):\n from imageio import get_reader\n from PIL.Image import fromarray\n\n def img_loader(path):\n img_numpy = get_reader(path).get_next_data()\n return fromarray(img_numpy)\n elif module_exists(\"PIL\"):\n from PIL.Image import open as pil_loader\n img_loader = pil_loader\n else:\n from .utils import eprint\n eprint(\n \"Default image loader is imageio and/or pillow (PIL).\",\n \"Module 'imageio' or 'PIL' not found!\",\n \"Try 'pip install imageio' or 'pip install pillow' or provide custom 'img_loader'\")\n\n def img_transform(path):\n img = img_loader(path)\n out = transform(img)\n if img_autoclose and callable(getattr(img, \"close\", None)):\n if img == out:\n from .utils import eprint\n eprint(f\"Warning: Auto-closing image but image is unprocessed: {path}\")\n img.close()\n return out\n\n return dmap(paths, img_transform)\n\n\ndef glob_images(paths, transform=None, img_loader=\"pil\", img_autoclose=True, glob_recursive=False, glob_unique=True, glob_sort=True, sort_key=None, sort_reverse=False):\n paths = glob_files(paths, recursive=glob_recursive, unique=glob_unique, sort=glob_sort, sort_key=sort_key, sort_reverse=sort_reverse)\n return images(paths, transform, img_loader=img_loader, img_autoclose=img_autoclose)\n\n\ndef index_images(pathquery, transform, img_loader=\"pil\", img_autoclose=True, maxsize=None):\n paths = index_files(pathquery, maxsize=maxsize)\n return images(paths, transform, img_loader=img_loader, img_autoclose=img_autoclose)\n\n\ndef tensors(paths, transform=None, tensor_loader=None):\n transform = transform or identity_transform\n if isinstance(transform, (list, tuple)):\n from torchvision.transforms import Compose\n transform = Compose(transform)\n assert callable(transform)\n\n if not callable(tensor_loader):\n try:\n from torch import load as torch_loader\n tensor_loader = torch_loader\n except ModuleNotFoundError as e:\n from .utils import eprint\n eprint(\"Default tensor loader is PyTorch (torch). Module 'torch' not found! Install PyTorch or provide custom 'tensor_loader'\")\n raise e\n\n def tensor_transform(path):\n tensor = torch_loader(path)\n return transform(tensor)\n\n return dmap(paths, transform=tensor_transform)\n\n\ndef glob_tensor(paths, transform=None, tensor_loader=None, glob_recursive=False, glob_unique=True, glob_sort=True, sort_key=None, sort_reverse=False):\n paths = glob_files(paths, recursive=glob_recursive, unique=glob_unique, sort=glob_sort, sort_key=sort_key, sort_reverse=sort_reverse)\n return tensors(paths, transform, tensor_loader=tensor_loader)\n\n\ndef index_tensor(pathquery, transform, tensor_loader=None, maxsize=None):\n paths = index_files(pathquery, maxsize=maxsize)\n return tensors(paths, transform, tensor_loader=tensor_loader)\n\n\n#####################################################\n# Cache Constructors\n#####################################################\n\ndef cache_create(load_fn, save_fn, exist_fn):\n from .cache import LambdaCache\n return LambdaCache(save_fn=save_fn, load_fn=load_fn, exist_fn=exist_fn)\n\n\ndef cache_dict(preloaded_data=None):\n from .cache import DictCache\n return DictCache(preloaded_data)\n\n\ndef cache_file(cache_dir, load_fn, save_fn, make_dir=True):\n\n path_fn = None\n error_msg = \"cached_dir must be a string or a callable\"\n\n if isinstance(cache_dir, str):\n def path_fn(idx):\n return cache_dir.format(idx=idx)\n error_msg = \"The parameter 'cache_dir:str' must contain the token '{idx}' (e.g. 'cache/{idx:04}.pt') for string formatting\"\n\n elif callable(cache_dir):\n path_fn = cache_dir\n error_msg = \"The parameter 'cache_dir:Callable' must receive one argument of type 'int' and return value of type 'str'\"\n\n try:\n sample_filepath = path_fn(0)\n assert isinstance(sample_filepath, str)\n except Exception as e:\n from .utils import eprint\n eprint(error_msg)\n raise e\n\n if make_dir:\n from os.path import dirname\n dirpath = dirname(sample_filepath)\n\n from os import makedirs\n makedirs(dirpath, exist_ok=True)\n\n from .cache import FileCache\n cache = FileCache(path_fn=path_fn, save_fn=save_fn, load_fn=load_fn)\n\n return cache\n\n\ndef cache_tensor(cache_dir, make_dir=True):\n from functools import wraps\n from torch import load, save\n\n @wraps(load)\n def load_pytorch_tensor(path):\n return load(path)\n\n @wraps(save)\n def save_pytorch_tensor(path, tensor):\n return save(tensor, path)\n\n return cache_file(cache_dir, load_fn=load_pytorch_tensor, save_fn=save_pytorch_tensor, make_dir=make_dir)\n\n\ndef cache_text(cache_dir, as_array=False, make_dir=True):\n from os import linesep\n\n if as_array:\n def load_text(path):\n with open(path, \"r\") as f:\n lines = f.readlines()\n return list(filter(lambda x: x.strip(linesep), lines))\n\n def save_text(path, lines):\n text = str.join(linesep, lines)\n with open(path, \"w+\") as f:\n f.write(text)\n else:\n def load_text(path):\n with open(path, \"r\") as f:\n return f.read()\n\n def save_text(path, text):\n with open(path, \"w+\") as f:\n f.write(text)\n\n return cache_file(cache_dir, load_fn=load_text, save_fn=save_text, make_dir=make_dir)\n\n\ndef cache_json(cache_dir, load_kwds=None, save_kwds=None, make_dir=True):\n from json import load, dump\n\n def load_json(path):\n with open(path, \"r\") as f:\n return load(f, **(load_kwds or {}))\n\n def save_json(path, obj):\n with open(path, \"w+\") as f:\n return dump(obj, **(save_kwds or {}))\n\n return cache_file(cache_dir, load_fn=load_json, save_fn=save_json, make_dir=make_dir)\n\n\n#####################################################\n# Cache Dataset Macros\n#####################################################\n\ndef dcache_dict(dataset, preloaded_data=None, enable=True):\n from functools import partial\n cache_gen = partial(cache_dict, preloaded_data=preloaded_data)\n return dcache(dataset, cache_gen, enable)\n\n\ndef dcache_file(dataset, cache_dir, load_fn, save_fn, make_dir=True, enable=True):\n from functools import partial\n cache_gen = partial(cache_file, cache_dir=cache_dir, load_fn=load_fn, save_fn=save_fn, make_dir=make_dir)\n return dcache(dataset, cache_gen, enable)\n\n\ndef dcache_tensor(dataset, cache_dir, make_dir=True, enable=True):\n from functools import partial\n cache_gen = partial(cache_tensor, cache_dir=cache_dir, make_dir=make_dir)\n return dcache(dataset, cache_gen, enable)\n\n\ndef dcache_text(dataset, cache_dir, as_array=False, make_dir=True, enable=True):\n from functools import partial\n cache_gen = partial(cache_text, cache_dir=cache_dir, array=as_array, make_dir=make_dir)\n return dcache(dataset, cache_gen, enable)\n\n\ndef dcache_json(dataset, cache_dir, load_kwds=None, save_kwds=None, make_dir=True, enable=True):\n from functools import partial\n cache_gen = partial(cache_json, cache_dir=cache_dir, load_kwds=load_kwds, save_kwds=save_kwds, make_dir=make_dir)\n return dcache(dataset, cache_gen, enable)\n"
] | [
[
"torch.save",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
algrs/polliwog | [
"faa6531e8e2f7d0b52e928d64a4c1914199c4023",
"faa6531e8e2f7d0b52e928d64a4c1914199c4023"
] | [
"polliwog/transform/test_composite.py",
"polliwog/plane/test_plane.py"
] | [
"import numpy as np\nimport vg\nimport pytest\nfrom .composite import CompositeTransform, convert_44_to_33\n\n\ndef create_cube_verts(origin, size):\n # Create a cube. Since CompositeTransform just works on verticies,\n # we don't need a full lace.mesh object.\n origin = np.asarray(origin)\n size = np.repeat(size, 3)\n lower_base_plane = np.array(\n [\n # Lower base plane\n origin,\n origin + np.array([size[0], 0, 0]),\n origin + np.array([size[0], 0, size[2]]),\n origin + np.array([0, 0, size[2]]),\n ]\n )\n upper_base_plane = lower_base_plane + np.array([0, size[1], 0])\n return np.vstack([lower_base_plane, upper_base_plane])\n\n\ndef create_default_cube_verts():\n return create_cube_verts([1.0, 0.0, 0.0], 4.0)\n\n\ndef test_translate():\n transform = CompositeTransform()\n transform.translate(np.array([8.0, 6.0, 7.0]))\n\n cube_v = create_default_cube_verts()\n\n # Confidence check.\n np.testing.assert_array_equal(cube_v[0], [1.0, 0.0, 0.0])\n np.testing.assert_array_equal(cube_v[6], [5.0, 4.0, 4.0])\n\n transformed_cube_v = transform(cube_v)\n\n np.testing.assert_array_equal(transformed_cube_v[0], [9.0, 6.0, 7.0])\n np.testing.assert_array_equal(transformed_cube_v[6], [13.0, 10.0, 11.0])\n\n\ndef test_translate_by_list():\n transform = CompositeTransform()\n transform.translate([8.0, 6.0, 7.0])\n\n cube_v = create_default_cube_verts()\n\n # Confidence check.\n np.testing.assert_array_equal(cube_v[0], [1.0, 0.0, 0.0])\n np.testing.assert_array_equal(cube_v[6], [5.0, 4.0, 4.0])\n\n transformed_cube_v = transform(cube_v)\n\n np.testing.assert_array_equal(transformed_cube_v[0], [9.0, 6.0, 7.0])\n np.testing.assert_array_equal(transformed_cube_v[6], [13.0, 10.0, 11.0])\n\n\ndef test_scale():\n transform = CompositeTransform()\n transform.scale(10.0)\n\n cube_v = create_default_cube_verts()\n\n # Confidence check.\n np.testing.assert_array_equal(cube_v[0], [1.0, 0.0, 0.0])\n np.testing.assert_array_equal(cube_v[6], [5.0, 4.0, 4.0])\n\n transformed_cube_v = transform(cube_v)\n\n np.testing.assert_array_equal(transformed_cube_v[0], [10.0, 0.0, 0.0])\n np.testing.assert_array_equal(transformed_cube_v[6], [50.0, 40.0, 40.0])\n\n\ndef test_scale_error():\n transform = CompositeTransform()\n with pytest.raises(ValueError):\n transform.scale(-1)\n\n\ndef test_convert_units():\n transform = CompositeTransform()\n transform.convert_units(\"m\", \"cm\")\n\n cube_v = create_default_cube_verts()\n\n # Confidence check.\n np.testing.assert_array_equal(cube_v[0], [1.0, 0.0, 0.0])\n np.testing.assert_array_equal(cube_v[6], [5.0, 4.0, 4.0])\n\n transformed_cube_v = transform(cube_v)\n\n np.testing.assert_array_equal(transformed_cube_v[0], [100.0, 0.0, 0.0])\n np.testing.assert_array_equal(transformed_cube_v[6], [500.0, 400.0, 400.0])\n\n\ndef test_translate_then_scale():\n transform = CompositeTransform()\n transform.translate(np.array([8.0, 6.0, 7.0]))\n transform.scale(10.0)\n\n cube_v = create_default_cube_verts()\n\n # Confidence check.\n np.testing.assert_array_equal(cube_v[0], [1.0, 0.0, 0.0])\n np.testing.assert_array_equal(cube_v[6], [5.0, 4.0, 4.0])\n\n transformed_cube_v = transform(cube_v)\n\n np.testing.assert_array_equal(transformed_cube_v[0], [90.0, 60.0, 70.0])\n np.testing.assert_array_equal(transformed_cube_v[6], [130.0, 100.0, 110.0])\n\n\ndef test_scale_then_translate():\n transform = CompositeTransform()\n transform.scale(10.0)\n transform.translate(np.array([8.0, 6.0, 7.0]))\n\n cube_v = create_default_cube_verts()\n\n # Confidence check.\n np.testing.assert_array_equal(cube_v[0], [1.0, 0.0, 0.0])\n np.testing.assert_array_equal(cube_v[6], [5.0, 4.0, 4.0])\n\n transformed_cube_v = transform(cube_v)\n\n np.testing.assert_array_equal(transformed_cube_v[0], [18.0, 6.0, 7.0])\n np.testing.assert_array_equal(transformed_cube_v[6], [58.0, 46.0, 47.0])\n\n\ndef test_rotate_then_translate():\n transform = CompositeTransform()\n transform.rotate(np.array([1.0, 2.0, 3.0]))\n transform.translate(np.array([3.0, 2.0, 1.0]))\n\n v = np.array([1.0, 0.0, 0.0]).reshape(-1, 3)\n\n # Forward.\n np.testing.assert_allclose(\n np.array([2.30507944, 1.80799303, 1.69297817]).reshape(-1, 3), transform(v)\n )\n # Reverse.\n np.testing.assert_allclose(\n np.array([1.08087689, -1.45082159, -2.3930779]).reshape(-1, 3),\n transform(v, reverse=True),\n )\n\n\ndef test_reorient():\n # TODO We should also test a non-axis-aligned up and look.\n\n transform = CompositeTransform()\n transform.reorient(up=vg.basis.y, look=vg.basis.neg_x)\n\n cube_v = create_default_cube_verts()\n\n # Confidence check.\n np.testing.assert_array_equal(cube_v[0], [1.0, 0.0, 0.0])\n np.testing.assert_array_equal(cube_v[6], [5.0, 4.0, 4.0])\n\n transformed_cube_v = transform(cube_v)\n\n np.testing.assert_array_equal(transformed_cube_v[0], [0.0, 0.0, -1.0])\n np.testing.assert_array_equal(transformed_cube_v[6], [4, 4.0, -5.0])\n\n\ndef test_rotate():\n ways_to_rotate_around_y_a_quarter_turn = [\n np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]]),\n np.array([0, np.pi / 2, 0]),\n np.array([[0, np.pi / 2, 0]]),\n np.array([[0], [np.pi / 2], [0]]),\n [0, np.pi / 2, 0],\n ]\n for rot in ways_to_rotate_around_y_a_quarter_turn:\n transform = CompositeTransform()\n transform.rotate(rot)\n cube_v = create_default_cube_verts()\n\n # Confidence check.\n np.testing.assert_array_equal(cube_v[0], [1.0, 0.0, 0.0])\n np.testing.assert_array_equal(cube_v[6], [5.0, 4.0, 4.0])\n\n transformed_cube_v = transform(cube_v)\n\n np.testing.assert_array_almost_equal(transformed_cube_v[0], [0.0, 0.0, -1.0])\n np.testing.assert_array_almost_equal(transformed_cube_v[6], [4, 4.0, -5.0])\n\n\ndef test_reverse_transforms():\n transforms = [CompositeTransform() for _ in range(5)]\n\n transforms[1].translate(np.array([8.0, 6.0, 7.0]))\n\n transforms[2].scale(10.0)\n\n transforms[3].translate(np.array([8.0, 6.0, 7.0]))\n transforms[3].scale(10.0)\n\n transforms[4].scale(10.0)\n transforms[4].translate(np.array([8.0, 6.0, 7.0]))\n\n for transform in transforms:\n cube_v = create_default_cube_verts()\n\n # Confidence check.\n np.testing.assert_array_equal(cube_v[0], [1.0, 0.0, 0.0])\n np.testing.assert_array_equal(cube_v[6], [5.0, 4.0, 4.0])\n\n transformed = transform(cube_v)\n\n untransformed_v = transform(transformed, reverse=True)\n\n np.testing.assert_array_almost_equal(untransformed_v[0], [1.0, 0.0, 0.0])\n np.testing.assert_array_almost_equal(untransformed_v[6], [5.0, 4.0, 4.0])\n\n\ndef test_forward_reverse_equivalence():\n transform = CompositeTransform()\n transform.rotate(np.array([1.0, 2.0, 3.0]))\n transform.translate(np.array([3.0, 2.0, 1.0]))\n transform.scale(10.0)\n transform.rotate(np.array([7.0, 13.0, 5.0]))\n\n forward = transform.matrix_for()\n reverse = transform.matrix_for(reverse=True)\n np.testing.assert_allclose(reverse, np.linalg.inv(forward))\n\n forward = transform.matrix_for(from_range=(0, 2))\n reverse = transform.matrix_for(from_range=(0, 2), reverse=True)\n np.testing.assert_allclose(reverse, np.linalg.inv(forward))\n\n\ndef test_convert_44_to_33():\n np.testing.assert_array_equal(\n convert_44_to_33(\n np.array(\n [\n [1.0, 2.0, 3.0, 0.0],\n [2.0, 3.0, 4.0, 0.0],\n [5.0, 6.0, 7.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ]\n )\n ),\n np.array([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0], [5.0, 6.0, 7.0]]),\n )\n",
"import math\nimport numpy as np\nimport pytest\nimport vg\nfrom .plane import Plane\nfrom .test_functions import assert_plane_equation_satisfies_points\n\n\ndef test_validation():\n with pytest.raises(ValueError):\n Plane(np.array([0, 10, 0]), np.array([1e-9, 1e-9, 1e-9]))\n\n\ndef test_repr():\n assert (\n str(Plane(np.array([0, 10, 0]), vg.basis.y))\n == \"<Plane of [0. 1. 0.] through [ 0 10 0]>\"\n )\n\n\ndef test_flipped():\n np.testing.assert_array_equal(\n Plane(np.array([0, 10, 0]), vg.basis.y).flipped().normal, vg.basis.neg_y\n )\n\n\ndef test_returns_signed_distances_for_xz_plane_at_origin():\n # x-z plane\n normal = np.array([0.0, 1.0, 0.0])\n sample = np.array([0.0, 0.0, 0.0])\n\n plane = Plane(sample, normal)\n\n pts = np.array([[500.0, 502.0, 503.0], [-500.0, -501.0, -503.0]])\n\n expected = np.array([502.0, -501.0])\n\n np.testing.assert_array_equal(expected, plane.signed_distance(pts))\n np.testing.assert_array_equal(expected[0], plane.signed_distance(pts[0]))\n\n\ndef test_returns_unsigned_distances_for_xz_plane_at_origin():\n # x-z plane\n normal = np.array([0.0, 1.0, 0.0])\n sample = np.array([0.0, 0.0, 0.0])\n\n plane = Plane(sample, normal)\n\n pts = np.array([[500.0, 502.0, 503.0], [-500.0, -501.0, -503.0]])\n\n expected = np.array([502.0, 501.0])\n\n np.testing.assert_array_equal(expected, plane.distance(pts))\n\n\ndef test_returns_signed_distances_for_diagonal_plane():\n # diagonal plane @ origin - draw a picture!\n normal = np.array([1.0, 1.0, 0.0])\n normal /= np.linalg.norm(normal)\n sample = np.array([1.0, 1.0, 0.0])\n\n plane = Plane(sample, normal)\n\n pts = np.array([[425.0, 425.0, 25.0], [-500.0, -500.0, 25.0]])\n\n expected = np.array(\n [math.sqrt(2 * (425.0 - 1.0) ** 2), -math.sqrt(2 * (500.0 + 1.0) ** 2)]\n )\n\n np.testing.assert_array_almost_equal(expected, plane.signed_distance(pts))\n\n\ndef test_returns_unsigned_distances_for_diagonal_plane_at_origin():\n # diagonal plane @ origin - draw a picture!\n normal = np.array([1.0, 1.0, 0.0])\n normal /= np.linalg.norm(normal)\n\n sample = np.array([0.0, 0.0, 0.0])\n\n plane = Plane(sample, normal)\n\n pts = np.array([[425.0, 425.0, 25.0], [-500.0, -500.0, 25.0]])\n\n expected = np.array([math.sqrt(2 * (425.0 ** 2)), math.sqrt(2 * (500.0 ** 2))])\n\n np.testing.assert_array_almost_equal(expected, plane.distance(pts))\n\n\ndef test_signed_distance_validation():\n plane = Plane(point_on_plane=np.array([0, 10, 0]), unit_normal=vg.basis.y)\n\n with pytest.raises(ValueError):\n plane.signed_distance(np.array([[[1.0]]]))\n\n\ndef test_returns_sign_for_diagonal_plane():\n # diagonal plane @ origin - draw a picture!\n normal = np.array([1.0, 1.0, 0.0])\n normal /= np.linalg.norm(normal)\n sample = np.array([1.0, 1.0, 0.0])\n\n plane = Plane(sample, normal)\n\n pts = np.array([[425.0, 425.0, 25.0], [-500.0, -500.0, 25.0]])\n\n sign = plane.sign(pts)\n\n expected = np.array([1.0, -1.0])\n np.testing.assert_array_equal(sign, expected)\n\n\ndef test_points_in_front():\n # diagonal plane @ origin - draw a picture!\n normal = np.array([1.0, 1.0, 0.0])\n normal /= np.linalg.norm(normal)\n sample = np.array([1.0, 1.0, 0.0])\n\n plane = Plane(sample, normal)\n\n pts = np.array([[425.0, 425.0, 25.0], [-500.0, -500.0, 25.0]])\n\n np.testing.assert_array_equal(plane.points_in_front(pts), pts[0:1])\n np.testing.assert_array_equal(\n plane.points_in_front(pts, ret_indices=True), np.array([0])\n )\n np.testing.assert_array_equal(plane.points_in_front(pts, inverted=True), pts[1:2])\n np.testing.assert_array_equal(\n plane.points_in_front(pts, inverted=True, ret_indices=True), np.array([1])\n )\n\n\ndef test_canonical_point():\n normal = np.array([1.0, 1.0, 0.0])\n normal /= np.linalg.norm(normal)\n\n sample = np.array([0.0, 0.0, 0.0])\n\n plane = Plane(sample, normal)\n\n np.testing.assert_array_equal(plane.canonical_point, np.array([0.0, 0.0, 0.0]))\n\n plane = Plane(sample, -normal)\n\n np.testing.assert_array_equal(plane.canonical_point, np.array([0.0, 0.0, 0.0]))\n\n normal = np.array([1.0, 7.0, 9.0])\n normal /= np.linalg.norm(normal)\n\n plane = Plane(sample, normal)\n\n np.testing.assert_array_equal(plane.canonical_point, np.array([0.0, 0.0, 0.0]))\n\n plane = Plane(sample, -normal)\n\n np.testing.assert_array_equal(plane.canonical_point, np.array([0.0, 0.0, 0.0]))\n\n normal = np.array([1.0, 0.0, 0.0])\n normal /= np.linalg.norm(normal)\n\n sample = np.array([3.0, 10.0, 20.0])\n\n plane = Plane(sample, normal)\n\n np.testing.assert_array_equal(plane.canonical_point, np.array([3, 0.0, 0.0]))\n\n plane = Plane(sample, -normal)\n\n np.testing.assert_array_equal(plane.canonical_point, np.array([3, 0.0, 0.0]))\n\n normal = np.array([1.0, 1.0, 1.0])\n normal /= np.linalg.norm(normal)\n\n sample = np.array([1.0, 2.0, 10.0])\n\n plane = Plane(sample, normal)\n\n np.testing.assert_array_almost_equal(\n plane.canonical_point, np.array([4.333333, 4.333333, 4.333333])\n )\n\n plane = Plane(sample, -normal)\n\n np.testing.assert_array_almost_equal(\n plane.canonical_point, np.array([4.333333, 4.333333, 4.333333])\n )\n\n\ndef test_project_point():\n np.testing.assert_array_equal(\n Plane(\n point_on_plane=np.array([0, 10, 0]), unit_normal=vg.basis.y\n ).project_point(np.array([10, 20, -5])),\n np.array([10, 10, -5]),\n )\n\n\ndef test_project_point_vectorized():\n np.testing.assert_array_equal(\n Plane(\n point_on_plane=np.array([0, 10, 0]), unit_normal=vg.basis.y\n ).project_point(np.array([[10, 20, -5], [2, 7, 203]])),\n np.array([[10, 10, -5], [2, 10, 203]]),\n )\n\n\ndef test_plane_from_points():\n points = np.array([[1, 1, 1], [-1, 1, 0], [2, 0, 3]])\n plane = Plane.from_points(*points)\n assert_plane_equation_satisfies_points(plane.equation, points)\n\n\ndef test_plane_from_points_and_vector():\n p1 = np.array([1, 5, 7])\n p2 = np.array([-2, -2, -2])\n v = np.array([1, 0, -1])\n plane = Plane.from_points_and_vector(p1, p2, v)\n\n points = [p1, p2]\n projected_points = [plane.project_point(p) for p in points]\n np.testing.assert_array_almost_equal(projected_points, points)\n\n assert np.dot(v, plane.normal) == 0\n\n\ndef test_fit_from_points():\n # Set up a collection of points in the X-Y plane.\n np.random.seed(0)\n points = np.hstack([np.random.random((100, 2)), np.zeros(100).reshape(-1, 1)])\n plane = Plane.fit_from_points(points)\n\n # The normal vector should be closely aligned with the Z-axis.\n z_axis = np.array([0.0, 0.0, 1.0])\n angle = np.arccos(np.dot(plane.normal, z_axis) / np.linalg.norm(plane.normal))\n assert angle % np.pi < 1e-6\n\n\ndef test_line_plane_intersection():\n # x-z plane\n normal = np.array([0.0, 1.0, 0.0])\n sample = np.array([0.0, 0.0, 0.0])\n\n plane = Plane(sample, normal)\n # non-intersecting\n assert plane.line_xsection(pt=vg.basis.neg_y, ray=vg.basis.x) is None\n # coplanar\n assert plane.line_xsection(pt=np.zeros(3), ray=vg.basis.x) is None\n np.testing.assert_array_equal(\n plane.line_xsection(pt=vg.basis.neg_y, ray=vg.basis.y), np.zeros(3)\n )\n np.testing.assert_array_equal(\n plane.line_xsection(pt=vg.basis.neg_y, ray=np.array([1.0, 1.0, 0.0])),\n vg.basis.x,\n )\n\n\ndef test_line_plane_intersections():\n # x-z plane\n normal = np.array([0.0, 1.0, 0.0])\n sample = np.array([0.0, 0.0, 0.0])\n\n plane = Plane(sample, normal)\n pts = np.array(\n [[0.0, -1.0, 0.0], [0.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, -1.0, 0.0]]\n )\n rays = np.array(\n [[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]]\n )\n expected = np.array(\n [\n [np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan],\n [0.0, 0.0, 0.0],\n [1.0, 0.0, 0.0],\n ]\n )\n intersections, is_intersecting = plane.line_xsections(pts, rays)\n np.testing.assert_array_equal(intersections, expected)\n np.testing.assert_array_equal(is_intersecting, [False, False, True, True])\n\n\ndef test_line_segment_plane_intersection():\n # x-z plane\n normal = np.array([0.0, 1.0, 0.0])\n sample = np.array([0.0, 0.0, 0.0])\n\n plane = Plane(sample, normal)\n # non-intersecting\n assert (\n plane.line_segment_xsection(vg.basis.neg_y, np.array([1.0, -1.0, 0.0])) is None\n )\n # coplanar\n assert plane.line_segment_xsection(np.zeros(3), vg.basis.x) is None\n np.testing.assert_array_equal(\n plane.line_segment_xsection(vg.basis.neg_y, vg.basis.y), np.zeros(3)\n )\n np.testing.assert_array_equal(\n plane.line_segment_xsection(vg.basis.neg_y, np.array([2.0, 1.0, 0.0])),\n vg.basis.x,\n )\n # line intersecting, but not in segment\n assert plane.line_segment_xsection(vg.basis.y, np.array([0.0, 2.0, 0.0])) is None\n\n\ndef test_line_segment_plane_intersections():\n # x-z plane\n normal = np.array([0.0, 1.0, 0.0])\n sample = np.array([0.0, 0.0, 0.0])\n\n plane = Plane(sample, normal)\n a = np.array(\n [\n [0.0, -1.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, -1.0, 0.0],\n [0.0, -1.0, 0.0],\n [0.0, 1.0, 0.0],\n ]\n )\n b = np.array(\n [\n [1.0, -1.0, 0.0],\n [1.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [2.0, 1.0, 0.0],\n [0.0, 2.0, 0.0],\n ]\n )\n expected = np.array(\n [\n [np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan],\n [0.0, 0.0, 0.0],\n [1.0, 0.0, 0.0],\n [np.nan, np.nan, np.nan],\n ]\n )\n intersections, is_intersecting = plane.line_segment_xsections(a, b)\n np.testing.assert_array_equal(intersections, expected)\n np.testing.assert_array_equal(is_intersecting, [False, False, True, True, False])\n"
] | [
[
"numpy.asarray",
"numpy.linalg.inv",
"numpy.testing.assert_array_equal",
"numpy.repeat",
"numpy.array",
"numpy.vstack",
"numpy.testing.assert_array_almost_equal"
],
[
"numpy.dot",
"numpy.random.random",
"numpy.random.seed",
"numpy.linalg.norm",
"numpy.testing.assert_array_equal",
"numpy.array",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Gummary/Pytorch-Project-Template | [
"56bc5e253627d40fb8771eccdb2bb663c833beb3",
"56bc5e253627d40fb8771eccdb2bb663c833beb3"
] | [
"datasets/rssrai.py",
"get_mean_std.py"
] | [
"\r\nimport logging\r\nimport os\r\n\r\nimport cv2\r\nimport numpy as np\r\nnp.random.seed(0)\r\nimport torch\r\ntorch.manual_seed(0)\r\nfrom torch.utils.data import Dataset\r\n\r\nfrom utils.train_utils import visualize_images\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nclass RssraiDataset(Dataset):\r\n\r\n def __init__(self, \r\n dataset, \r\n config,\r\n mean=[0.485, 0.456, 0.406], \r\n std=[0.229, 0.224, 0.225]):\r\n \r\n self.num_classes = config.NUM_CLASSES\r\n if dataset == 'train':\r\n self.data_path = os.path.join(config.ROOT, config.TRAIN_SET)\r\n elif dataset == 'test':\r\n self.data_path = os.path.join(config.ROOT, config.TEST_SET)\r\n elif dataset == 'val':\r\n self.data_path = os.path.join(config.ROOT, config.VAL_SET)\r\n\r\n self.lbl2pixel, self.pixel2lbl = self.generate_label_mapping()\r\n\r\n self.mean = mean\r\n self.std = std\r\n\r\n self._db = self.__get_db__()\r\n\r\n def generate_label_mapping(self):\r\n lbls = [[0,0,0],\r\n [0,200,0],\r\n [150,250,0],\r\n [150,200,150],\r\n [200,0,200],\r\n [150,0,250],\r\n [150,150,250],\r\n [250,200,0],\r\n [200,200,0],\r\n [200,0,0],\r\n [250,0,150],\r\n [200,150,150],\r\n [250,150,150],\r\n [0,0,200],\r\n [0,150,200],\r\n [0,200,250]]\r\n lbls = [tuple(l) for l in lbls]\r\n \r\n assert len(lbls) == self.num_classes\r\n label2pixel = {}\r\n pixel2label = {}\r\n for i, lbl in enumerate(lbls):\r\n label2pixel[i] = lbl\r\n pixel2label[lbl] = i\r\n return label2pixel, pixel2label\r\n \r\n def __get_db__(self):\r\n files = []\r\n for f in os.listdir(os.path.join(self.data_path, 'src')):\r\n image_path = os.path.join(self.data_path, 'src', f)\r\n label_path = os.path.join(self.data_path, 'label', f)\r\n files.append({\r\n \"image\": image_path,\r\n \"label\": label_path,\r\n \"name\": f\r\n })\r\n logger.info(\"=> Loading %d files\" % len(files))\r\n return files\r\n\r\n def __len__(self):\r\n return len(self._db)\r\n\r\n def __input_transform__(self, image):\r\n image = image.astype(np.float32)[:, :, ::-1]\r\n image = image / 255.0\r\n image -= self.mean\r\n image /= self.std\r\n return image\r\n\r\n def __generate_target__(self, label):\r\n height, width, _ = label.shape\r\n target = np.zeros((height, width), dtype=np.int32)\r\n\r\n for k, v in self.lbl2pixel.items():\r\n target[(label==v).all(axis=2)] = k\r\n # print(np.sum((label==v).all(axis=2)))\r\n # print(np.sum(target==k))\r\n return target\r\n\r\n\r\n\r\n def __getitem__(self, index):\r\n item = self._db[index]\r\n\r\n image = cv2.imread(item[\"image\"],cv2.IMREAD_COLOR)\r\n label = cv2.imread(item[\"label\"],cv2.IMREAD_COLOR)[:,:,::-1]\r\n image = self.__input_transform__(image)\r\n label = self.__generate_target__(label)\r\n image = image.transpose(2,0,1)\r\n return image.copy(), label.copy()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n sys.path.append('E:\\\\Programmer\\\\RSSAI')\r\n\r\n from configs import cfg\r\n from torch.utils.data import DataLoader\r\n import matplotlib.pyplot as plt\r\n \r\n dataset = RssraiDataset('train', cfg.DATASET)\r\n dataloader = DataLoader(dataset, batch_size=4,\r\n shuffle=True, num_workers=1)\r\n for src, label in dataloader:\r\n fig = plt.figure()\r\n print(src.size())\r\n for i in range(4):\r\n ax1 = fig.add_subplot(4,2,2*i+1)\r\n ax1.imshow(src[i])\r\n ax2 = fig.add_subplot(4,2,2*(1+i))\r\n ax2.imshow(label[i], cmap='gray', vmin=0, vmax=16)\r\n plt.show()\r\n break",
"\nimport argparse\nfrom configs.default import update_config\nfrom configs import config\nfrom datasets.rssrai import RssraiDataset\nimport torch\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument(\n '--cfg',\n metavar='config_json_file',\n default='None',\n help='The Configuration file in json format')\n return parser.parse_args()\n\ndef main():\n global config\n args = parse_args()\n update_config(config, args)\n\n dataset = RssraiDataset('train', config.DATASET, mean=[0,0,0], std=[1,1,1])\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=10,\n num_workers=4,\n shuffle=False\n )\n\n mean = 0.\n std = 0.\n nb_samples = 0.\n for data, label in dataloader:\n batch_samples = data.size(0)\n data = data.view(batch_samples, data.size(1), -1)\n mean += data.mean(2).sum(0)\n std += data.std(2).sum(0)\n nb_samples += batch_samples\n\n mean /= nb_samples\n std /= nb_samples\n print(\"mean is: %f, std is: %f\", mean, std)\n\nif __name__ == \"__main__\":\n main()"
] | [
[
"numpy.random.seed",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
codyoss/google-cloud-python | [
"505d55357fbdffc5d55005c58712932c758737bd"
] | [
"automl/tests/unit/gapic/v1beta1/test_gcs_client_v1beta1.py"
] | [
"# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit tests.\"\"\"\n\nimport mock\nimport pandas\nimport pytest\nimport re\n\nfrom google.api_core import exceptions\nfrom google.auth.credentials import AnonymousCredentials\nfrom google.cloud import automl_v1beta1\n\nPROJECT = \"project\"\n\n\nclass TestGcsClient(object):\n def gcs_client(self, bucket_name=None, client_attrs={}):\n client_mock = mock.Mock(**client_attrs)\n return automl_v1beta1.tables.gcs_client.GcsClient(\n bucket_name=bucket_name, client=client_mock\n )\n\n def test_init_with_project_and_credentials(self):\n # helper for checking that the storage client is initialized with the\n # passed in project and credentials.\n class FakeStorageClient:\n def __init__(self, project=None, credentials=None):\n self.project = project\n self.credentials = credentials\n\n patch = mock.patch(\"google.cloud.storage.Client\", new=FakeStorageClient)\n with patch:\n credentials = AnonymousCredentials()\n gcs_client = automl_v1beta1.tables.gcs_client.GcsClient(\n project=PROJECT, credentials=credentials\n )\n assert isinstance(gcs_client.client, FakeStorageClient)\n assert gcs_client.client.project == PROJECT\n assert gcs_client.client.credentials == credentials\n\n def test_ensure_bucket_exists(self):\n mock_bucket = mock.Mock()\n gcs_client = self.gcs_client(\n client_attrs={\n \"get_bucket.side_effect\": exceptions.NotFound(\"err\"),\n \"bucket.return_value\": mock_bucket,\n }\n )\n returned_bucket_name = gcs_client.ensure_bucket_exists(\n \"my-project\", \"us-central1\"\n )\n gcs_client.client.get_bucket.assert_called_with(\n \"my-project-automl-tables-staging\"\n )\n gcs_client.client.bucket.assert_called_with(\"my-project-automl-tables-staging\")\n mock_bucket.create.assert_called_with(\n project=\"my-project\", location=\"us-central1\"\n )\n assert returned_bucket_name == \"my-project-automl-tables-staging\"\n\n def test_ensure_bucket_exists_bucket_already_exists_in_different_project(self):\n mock_bucket = mock.Mock()\n gcs_client = self.gcs_client(\n client_attrs={\n \"get_bucket.side_effect\": exceptions.Forbidden(\"err\"),\n \"bucket.return_value\": mock_bucket,\n }\n )\n returned_bucket_name = gcs_client.ensure_bucket_exists(\n \"my-project\", \"us-central1\"\n )\n gcs_client.client.get_bucket.assert_called_with(\n \"my-project-automl-tables-staging\"\n )\n gcs_client.client.bucket.assert_called_with(returned_bucket_name)\n mock_bucket.create.assert_called_with(\n project=\"my-project\", location=\"us-central1\"\n )\n\n assert re.match(\n \"^my-project-automl-tables-staging-[0-9]*$\", returned_bucket_name\n )\n\n def test_ensure_bucket_exists_bucket_already_exists_in_current_project(self):\n gcs_client = self.gcs_client()\n returned_bucket_name = gcs_client.ensure_bucket_exists(\n \"my-project\", \"us-central1\"\n )\n gcs_client.client.get_bucket.assert_called_with(\n \"my-project-automl-tables-staging\"\n )\n gcs_client.client.bucket.assert_not_called()\n assert returned_bucket_name == \"my-project-automl-tables-staging\"\n\n def test_ensure_bucket_exists_custom_bucket_name(self):\n mock_bucket = mock.Mock()\n gcs_client = self.gcs_client(\n bucket_name=\"my-bucket\",\n client_attrs={\n \"get_bucket.side_effect\": exceptions.NotFound(\"err\"),\n \"bucket.return_value\": mock_bucket,\n },\n )\n returned_bucket_name = gcs_client.ensure_bucket_exists(\n \"my-project\", \"us-central1\"\n )\n gcs_client.client.get_bucket.assert_called_with(\"my-bucket\")\n gcs_client.client.bucket.assert_called_with(\"my-bucket\")\n mock_bucket.create.assert_called_with(\n project=\"my-project\", location=\"us-central1\"\n )\n assert returned_bucket_name == \"my-bucket\"\n\n def test_upload_pandas_dataframe(self):\n mock_blob = mock.Mock()\n mock_bucket = mock.Mock(**{\"blob.return_value\": mock_blob})\n gcs_client = self.gcs_client(\n bucket_name=\"my-bucket\",\n client_attrs={\"get_bucket.return_value\": mock_bucket},\n )\n dataframe = pandas.DataFrame({\"col1\": [1, 2], \"col2\": [3, 4]})\n\n gcs_uri = gcs_client.upload_pandas_dataframe(dataframe, \"my-file.csv\")\n\n gcs_client.client.get_bucket.assert_called_with(\"my-bucket\")\n mock_bucket.blob.assert_called_with(\"my-file.csv\")\n mock_blob.upload_from_string.assert_called_with(\",col1,col2\\n0,1,3\\n1,2,4\\n\")\n assert gcs_uri == \"gs://my-bucket/my-file.csv\"\n\n def test_upload_pandas_dataframe_no_csv_name(self):\n mock_blob = mock.Mock()\n mock_bucket = mock.Mock(**{\"blob.return_value\": mock_blob})\n gcs_client = self.gcs_client(\n bucket_name=\"my-bucket\",\n client_attrs={\"get_bucket.return_value\": mock_bucket},\n )\n dataframe = pandas.DataFrame({\"col1\": [1, 2], \"col2\": [3, 4]})\n\n gcs_uri = gcs_client.upload_pandas_dataframe(dataframe)\n generated_csv_name = gcs_uri.split(\"/\")[-1]\n\n gcs_client.client.get_bucket.assert_called_with(\"my-bucket\")\n mock_bucket.blob.assert_called_with(generated_csv_name)\n mock_blob.upload_from_string.assert_called_with(\",col1,col2\\n0,1,3\\n1,2,4\\n\")\n assert re.match(\"^gs://my-bucket/automl-tables-dataframe-[0-9]*.csv$\", gcs_uri)\n\n def test_upload_pandas_dataframe_not_type_dataframe(self):\n gcs_client = self.gcs_client()\n with pytest.raises(ValueError):\n gcs_client.upload_pandas_dataframe(\"my-dataframe\")\n gcs_client.client.upload_pandas_dataframe.assert_not_called()\n\n def test_upload_pandas_dataframe_bucket_not_exist(self):\n gcs_client = self.gcs_client()\n dataframe = pandas.DataFrame({})\n with pytest.raises(ValueError):\n gcs_client.upload_pandas_dataframe(dataframe)\n gcs_client.client.upload_pandas_dataframe.assert_not_called()\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Alisa-lisa/conferences | [
"87fcb9f595a244408c015c66283c337d124b358d"
] | [
"EP_2019/py_impl/simple_implementation.py"
] | [
"from uuid import uuid4\nimport numpy as np\n\n\nclass Request:\n def __init__(self):\n \"\"\"\n Request constructor\n\n :id: unique identifier\n :driver: assigned driver\n :lifetime: time to cancel the request if not put into progress\n :execution_time: how long a ride will take (simple movement proxy)\n \"\"\"\n self.id = uuid4()\n self.driver_id = None\n self.remaining_waiting_time = 100\n self.fulfillment_time = 100\n\n def is_alive(self):\n \"\"\" Checks if request has some time to exist or be fulfilled \"\"\"\n return self.remaining_waiting_time > 0 and self.fulfillment_time > 0\n\n\nclass Taxi:\n def __init__(self):\n self.id = uuid4()\n self.is_occupied = False\n\n\nclass World:\n def __init__(self, runtime, spawn_chance, max_active, taxis):\n self.runtime = runtime\n self.age = 0\n self.request_spawn_chance = spawn_chance\n self.max_active_requests = max_active\n self.taxis = {\n \"free\": [Taxi() for _ in range(taxis)],\n \"occupied\": []\n }\n self.requests = {\n \"pending\": [],\n \"progress\": [],\n \"finished\": [],\n \"cancelled\": []\n }\n\n def maybe_spawn_request(self):\n \"\"\" Spawn a request with a chance \"\"\"\n p = [1 - self.request_spawn_chance, self.request_spawn_chance]\n if (len(self.requests[\"pending\"]) + len(self.requests[\"progress\"]) < self.max_active_requests)\\\n and np.random.choice([False, True], p=p):\n self.requests[\"pending\"].append(Request())\n\n def distribute_unfulfilled_requests(self):\n \"\"\" Try to assign a request to a car \"\"\"\n tmp = []\n for r in self.requests[\"pending\"]:\n if len(self.taxis[\"free\"]) > 0:\n taxi = self.taxis[\"free\"][0]\n taxi.is_occupied = True\n r.driver_id = taxi\n self.taxis[\"free\"].remove(taxi)\n self.taxis[\"occupied\"].append(taxi)\n self.requests[\"progress\"].append(r)\n tmp.append(r)\n self.requests[\"pending\"] = [r for r in self.requests[\"pending\"] if r not in tmp]\n\n def update_requests(self):\n \"\"\" Count down to request state change \"\"\"\n for r in self.requests[\"pending\"]:\n r.remaining_waiting_time -= 1\n for r in self.requests[\"progress\"]:\n r.fulfillment_time -= 1\n\n def cleanup_requests(self):\n \"\"\" Change state of the request \"\"\"\n for r in self.requests[\"pending\"]:\n if not r.is_alive():\n self.requests[\"cancelled\"].append(r)\n self.requests[\"pending\"] = [r for r in self.requests[\"pending\"] if r not in self.requests[\"cancelled\"]]\n\n for r in self.requests[\"progress\"]:\n if not r.is_alive():\n self.requests[\"finished\"].append(r)\n self.taxis[\"free\"].append(r.driver_id)\n self.taxis[\"occupied\"].remove(r.driver_id)\n self.requests[\"progress\"] = [r for r in self.requests[\"progress\"] if r not in self.requests[\"finished\"]]\n\n def run_till_done(self):\n \"\"\" Main loop with all steps from the scenario \"\"\"\n while self.age <= self.runtime:\n self.age += 1\n\n self.maybe_spawn_request()\n self.distribute_unfulfilled_requests()\n self.update_requests()\n self.cleanup_requests()\n\n print(\"Age: {}/{}, Taxis: {} Occ/{} Free, Requests: {} Asnd/{} Wai/{} Cld/{} Fin\".format(self.age,\n self.runtime,\n len(self.taxis[\"occupied\"]),\n len(self.taxis[\"free\"]),\n len(self.requests[\"progress\"]),\n len(self.requests[\"pending\"]),\n len(self.requests[\"cancelled\"]),\n len(self.requests[\"finished\"])))\n\n\nif __name__ == '__main__':\n # world = World(86400, 0.2, 2000, 200)\n world = World(3600, 0.2, 2000, 200)\n world.run_till_done()\n"
] | [
[
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ahartikainen/cmdstanpy | [
"29cdfee8fa903855f0f13ee67ea42f8159ab1671"
] | [
"test/test_sample.py"
] | [
"\"\"\"CmdStan method sample tests\"\"\"\n\nimport contextlib\nimport io\nimport logging\nimport os\nimport platform\nimport shutil\nimport stat\nimport tempfile\nimport unittest\nfrom multiprocessing import cpu_count\nfrom test import CustomTestCase\nfrom time import time\n\nimport numpy as np\nfrom testfixtures import LogCapture\n\ntry:\n import ujson as json\nexcept ImportError:\n import json\n\nimport cmdstanpy.stanfit\nfrom cmdstanpy import _TMPDIR\nfrom cmdstanpy.cmdstan_args import CmdStanArgs, Method, SamplerArgs\nfrom cmdstanpy.model import CmdStanModel\nfrom cmdstanpy.stanfit import CmdStanMCMC, RunSet, from_csv\nfrom cmdstanpy.utils import EXTENSION, cmdstan_version_before\n\nHERE = os.path.dirname(os.path.abspath(__file__))\nDATAFILES_PATH = os.path.join(HERE, 'data')\nGOODFILES_PATH = os.path.join(DATAFILES_PATH, 'runset-good')\nBADFILES_PATH = os.path.join(DATAFILES_PATH, 'runset-bad')\n\n# metadata should make this unnecessary\nSAMPLER_STATE = [\n 'lp__',\n 'accept_stat__',\n 'stepsize__',\n 'treedepth__',\n 'n_leapfrog__',\n 'divergent__',\n 'energy__',\n]\n# metadata should make this unnecessary\nBERNOULLI_COLS = SAMPLER_STATE + ['theta']\n\n\nclass SampleTest(unittest.TestCase):\n def test_bernoulli_good(self, stanfile='bernoulli.stan'):\n stan = os.path.join(DATAFILES_PATH, stanfile)\n bern_model = CmdStanModel(stan_file=stan)\n\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n bern_fit = bern_model.sample(\n data=jdata,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=200,\n iter_sampling=100,\n show_progress=False,\n )\n self.assertIn('CmdStanMCMC: model=bernoulli', bern_fit.__repr__())\n self.assertIn('method=sample', bern_fit.__repr__())\n\n self.assertEqual(bern_fit.runset._args.method, Method.SAMPLE)\n\n print(bern_fit.runset)\n for i in range(bern_fit.runset.chains):\n csv_file = bern_fit.runset.csv_files[i]\n stdout_file = bern_fit.runset.stdout_files[i]\n self.assertTrue(os.path.exists(csv_file))\n self.assertTrue(os.path.exists(stdout_file))\n\n self.assertEqual(bern_fit.chains, 2)\n self.assertEqual(bern_fit.thin, 1)\n self.assertEqual(bern_fit.num_draws_warmup, 200)\n self.assertEqual(bern_fit.num_draws_sampling, 100)\n self.assertEqual(bern_fit.column_names, tuple(BERNOULLI_COLS))\n\n self.assertEqual(bern_fit.draws().shape, (100, 2, len(BERNOULLI_COLS)))\n self.assertEqual(bern_fit.metric_type, 'diag_e')\n self.assertEqual(bern_fit.step_size.shape, (2,))\n self.assertEqual(bern_fit.metric.shape, (2, 1))\n\n self.assertEqual(\n bern_fit.draws(concat_chains=True).shape, (200, len(BERNOULLI_COLS))\n )\n\n bern_fit = bern_model.sample(\n data=jdata,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=200,\n iter_sampling=100,\n metric='dense_e',\n show_progress=False,\n )\n self.assertIn('CmdStanMCMC: model=bernoulli', bern_fit.__repr__())\n self.assertIn('method=sample', bern_fit.__repr__())\n\n self.assertEqual(bern_fit.runset._args.method, Method.SAMPLE)\n\n for i in range(bern_fit.runset.chains):\n csv_file = bern_fit.runset.csv_files[i]\n stdout_file = bern_fit.runset.stdout_files[i]\n self.assertTrue(os.path.exists(csv_file))\n self.assertTrue(os.path.exists(stdout_file))\n\n self.assertEqual(bern_fit.runset.chains, 2)\n self.assertEqual(bern_fit.num_draws_sampling, 100)\n self.assertEqual(bern_fit.column_names, tuple(BERNOULLI_COLS))\n\n bern_sample = bern_fit.draws()\n self.assertEqual(bern_sample.shape, (100, 2, len(BERNOULLI_COLS)))\n self.assertEqual(bern_fit.metric_type, 'dense_e')\n self.assertEqual(bern_fit.step_size.shape, (2,))\n self.assertEqual(bern_fit.metric.shape, (2, 1, 1))\n\n bern_fit = bern_model.sample(\n data=jdata,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=100,\n output_dir=DATAFILES_PATH,\n show_progress=False,\n )\n for i in range(bern_fit.runset.chains):\n csv_file = bern_fit.runset.csv_files[i]\n stdout_file = bern_fit.runset.stdout_files[i]\n self.assertTrue(os.path.exists(csv_file))\n self.assertTrue(os.path.exists(stdout_file))\n self.assertEqual(bern_fit.draws().shape, (100, 2, len(BERNOULLI_COLS)))\n for i in range(bern_fit.runset.chains): # cleanup datafile_path dir\n os.remove(bern_fit.runset.csv_files[i])\n if os.path.exists(bern_fit.runset.stdout_files[i]):\n os.remove(bern_fit.runset.stdout_files[i])\n rdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.R')\n bern_fit = bern_model.sample(\n data=rdata,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=100,\n show_progress=False,\n )\n self.assertEqual(bern_fit.draws().shape, (100, 2, len(BERNOULLI_COLS)))\n\n data_dict = {'N': 10, 'y': [0, 1, 0, 0, 0, 0, 0, 0, 0, 1]}\n bern_fit = bern_model.sample(\n data=data_dict,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=100,\n show_progress=False,\n )\n self.assertEqual(bern_fit.draws().shape, (100, 2, len(BERNOULLI_COLS)))\n\n np_scalr_10 = np.int32(10)\n data_dict = {'N': np_scalr_10, 'y': [0, 1, 0, 0, 0, 0, 0, 0, 0, 1]}\n bern_fit = bern_model.sample(\n data=data_dict,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=100,\n show_progress=False,\n )\n self.assertEqual(bern_fit.draws().shape, (100, 2, len(BERNOULLI_COLS)))\n\n def test_bernoulli_unit_e(self, stanfile='bernoulli.stan'):\n stan = os.path.join(DATAFILES_PATH, stanfile)\n bern_model = CmdStanModel(stan_file=stan)\n\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n bern_fit = bern_model.sample(\n data=jdata,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=100,\n metric='unit_e',\n show_progress=False,\n )\n self.assertEqual(bern_fit.metric_type, 'unit_e')\n self.assertEqual(bern_fit.step_size.shape, (2,))\n with LogCapture() as log:\n logging.getLogger()\n self.assertEqual(bern_fit.metric, None)\n log.check_present(\n (\n 'cmdstanpy',\n 'INFO',\n 'Unit diagnonal metric, inverse mass matrix size unknown.',\n )\n )\n self.assertEqual(bern_fit.draws().shape, (100, 2, len(BERNOULLI_COLS)))\n\n def test_init_types(self):\n stan = os.path.join(DATAFILES_PATH, 'bernoulli.stan')\n bern_model = CmdStanModel(stan_file=stan)\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n\n bern_fit = bern_model.sample(\n data=jdata,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=100,\n inits=1.1,\n show_progress=False,\n )\n self.assertIn('init=1.1', bern_fit.runset.__repr__())\n\n bern_fit = bern_model.sample(\n data=jdata,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=100,\n inits=1,\n show_progress=False,\n )\n self.assertIn('init=1', bern_fit.runset.__repr__())\n\n # Save init to json\n inits_path1 = os.path.join(_TMPDIR, 'inits_test_1.json')\n with open(inits_path1, 'w') as fd:\n json.dump({'theta': 0.1}, fd)\n inits_path2 = os.path.join(_TMPDIR, 'inits_test_2.json')\n with open(inits_path2, 'w') as fd:\n json.dump({'theta': 0.9}, fd)\n\n bern_fit = bern_model.sample(\n data=jdata,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=100,\n inits=inits_path1,\n show_progress=False,\n )\n self.assertIn(\n 'init={}'.format(inits_path1.replace('\\\\', '\\\\\\\\')),\n bern_fit.runset.__repr__(),\n )\n\n bern_fit = bern_model.sample(\n data=jdata,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=100,\n inits=[inits_path1, inits_path2],\n show_progress=False,\n )\n self.assertIn(\n 'init={}'.format(inits_path1.replace('\\\\', '\\\\\\\\')),\n bern_fit.runset.__repr__(),\n )\n\n with self.assertRaises(ValueError):\n bern_model.sample(\n data=jdata,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=100,\n inits=(1, 2),\n )\n\n with self.assertRaises(ValueError):\n bern_model.sample(\n data=jdata,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=100,\n inits=-1,\n )\n\n def test_bernoulli_bad(self):\n stan = os.path.join(DATAFILES_PATH, 'bernoulli.stan')\n bern_model = CmdStanModel(stan_file=stan)\n\n with self.assertRaisesRegex(RuntimeError, 'variable does not exist'):\n bern_model.sample()\n\n with self.assertRaisesRegex(RuntimeError, 'variable does not exist'):\n bern_model.sample(data={'foo': 1})\n\n if platform.system() != 'Windows':\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n dirname1 = 'tmp1' + str(time())\n os.mkdir(dirname1, mode=644)\n dirname2 = 'tmp2' + str(time())\n path = os.path.join(dirname1, dirname2)\n with self.assertRaisesRegex(\n ValueError, 'Invalid path for output files'\n ):\n bern_model.sample(data=jdata, chains=1, output_dir=path)\n os.rmdir(dirname1)\n\n # pylint: disable=no-self-use\n def test_multi_proc_1(self):\n logistic_stan = os.path.join(DATAFILES_PATH, 'logistic.stan')\n logistic_model = CmdStanModel(stan_file=logistic_stan)\n logistic_data = os.path.join(DATAFILES_PATH, 'logistic.data.R')\n\n with LogCapture() as log:\n logging.getLogger()\n logistic_model.sample(\n data=logistic_data,\n chains=2,\n parallel_chains=1,\n iter_sampling=200,\n iter_warmup=200,\n show_console=True,\n )\n log.check_present(\n ('cmdstanpy', 'INFO', 'Chain [1] done processing'),\n ('cmdstanpy', 'INFO', 'Chain [2] start processing'),\n )\n\n # pylint: disable=no-self-use\n def test_multi_proc_2(self):\n logistic_stan = os.path.join(DATAFILES_PATH, 'logistic.stan')\n logistic_model = CmdStanModel(stan_file=logistic_stan)\n logistic_data = os.path.join(DATAFILES_PATH, 'logistic.data.R')\n\n with LogCapture() as log:\n logging.getLogger()\n logistic_model.sample(\n data=logistic_data,\n chains=4,\n parallel_chains=2,\n iter_sampling=200,\n iter_warmup=200,\n show_console=True,\n )\n if cpu_count() >= 4:\n # finish chains 1, 2 before starting chains 3, 4\n log.check_present(\n ('cmdstanpy', 'INFO', 'Chain [1] done processing'),\n ('cmdstanpy', 'INFO', 'Chain [4] start processing'),\n )\n if cpu_count() >= 4:\n with LogCapture() as log:\n logging.getLogger()\n logistic_model.sample(\n data=logistic_data,\n chains=4,\n parallel_chains=4,\n iter_sampling=200,\n iter_warmup=200,\n show_console=True,\n )\n log.check_present(\n ('cmdstanpy', 'INFO', 'Chain [4] start processing'),\n ('cmdstanpy', 'INFO', 'Chain [1] done processing'),\n )\n\n def test_num_threads_msgs(self):\n logistic_stan = os.path.join(DATAFILES_PATH, 'logistic.stan')\n logistic_model = CmdStanModel(stan_file=logistic_stan)\n logistic_data = os.path.join(DATAFILES_PATH, 'logistic.data.R')\n\n with LogCapture() as log:\n logging.getLogger()\n logistic_model.sample(\n data=logistic_data,\n chains=1,\n parallel_chains=1,\n threads_per_chain=7,\n iter_sampling=200,\n iter_warmup=200,\n show_progress=False,\n )\n log.check_present(\n ('cmdstanpy', 'DEBUG', 'running CmdStan, num_threads: 7')\n )\n with LogCapture() as log:\n logging.getLogger()\n logistic_model.sample(\n data=logistic_data,\n chains=7,\n parallel_chains=1,\n threads_per_chain=5,\n iter_sampling=200,\n iter_warmup=200,\n show_progress=False,\n )\n log.check_present(\n ('cmdstanpy', 'DEBUG', 'running CmdStan, num_threads: 5')\n )\n with LogCapture() as log:\n logging.getLogger()\n logistic_model.sample(\n data=logistic_data,\n chains=1,\n parallel_chains=7,\n threads_per_chain=5,\n iter_sampling=200,\n iter_warmup=200,\n show_progress=False,\n )\n log.check_present(\n (\n 'cmdstanpy',\n 'INFO',\n 'Requested 7 parallel_chains but only 1 required, '\n 'will run all chains in parallel.',\n )\n )\n\n def test_multi_proc_threads(self):\n # 2.28 compile with cpp_options={'STAN_THREADS':'true'}\n if not cmdstan_version_before(2, 28):\n logistic_stan = os.path.join(DATAFILES_PATH, 'logistic.stan')\n logistic_model = CmdStanModel(stan_file=logistic_stan)\n\n os.remove(logistic_model.exe_file)\n logistic_model.compile(\n force=True,\n cpp_options={'STAN_THREADS': 'TRUE'},\n )\n info_dict = logistic_model.exe_info()\n self.assertTrue(info_dict is not None)\n self.assertTrue('STAN_THREADS' in info_dict)\n self.assertEqual(info_dict['STAN_THREADS'], 'true')\n\n logistic_data = os.path.join(DATAFILES_PATH, 'logistic.data.R')\n with LogCapture() as log:\n logging.getLogger()\n logistic_model.sample(\n data=logistic_data,\n chains=4,\n parallel_chains=4,\n threads_per_chain=5,\n iter_sampling=200,\n iter_warmup=200,\n show_progress=False,\n )\n log.check_present(\n ('cmdstanpy', 'DEBUG', 'running CmdStan, num_threads: 20')\n )\n\n def test_multi_proc_err_msgs(self):\n logistic_stan = os.path.join(DATAFILES_PATH, 'logistic.stan')\n logistic_model = CmdStanModel(stan_file=logistic_stan)\n logistic_data = os.path.join(DATAFILES_PATH, 'logistic.data.R')\n\n with self.assertRaisesRegex(\n ValueError, 'parallel_chains must be a positive integer'\n ):\n logistic_model.sample(\n data=logistic_data, chains=4, parallel_chains=-4\n )\n with self.assertRaisesRegex(\n ValueError, 'threads_per_chain must be a positive integer'\n ):\n logistic_model.sample(\n data=logistic_data, chains=4, threads_per_chain=-4\n )\n\n def test_fixed_param_good(self):\n stan = os.path.join(DATAFILES_PATH, 'datagen_poisson_glm.stan')\n datagen_model = CmdStanModel(stan_file=stan)\n datagen_fit = datagen_model.sample(\n seed=12345, chains=1, iter_sampling=100, fixed_param=True\n )\n self.assertEqual(datagen_fit.runset._args.method, Method.SAMPLE)\n self.assertEqual(datagen_fit.metric_type, None)\n self.assertEqual(datagen_fit.metric, None)\n self.assertEqual(datagen_fit.step_size, None)\n\n for i in range(datagen_fit.runset.chains):\n csv_file = datagen_fit.runset.csv_files[i]\n stdout_file = datagen_fit.runset.stdout_files[i]\n self.assertTrue(os.path.exists(csv_file))\n self.assertTrue(os.path.exists(stdout_file))\n\n self.assertEqual(datagen_fit.runset.chains, 1)\n\n column_names = [\n 'lp__',\n 'accept_stat__',\n 'N',\n 'y_sim[1]',\n 'y_sim[2]',\n 'y_sim[3]',\n 'y_sim[4]',\n 'y_sim[5]',\n 'y_sim[6]',\n 'y_sim[7]',\n 'y_sim[8]',\n 'y_sim[9]',\n 'y_sim[10]',\n 'y_sim[11]',\n 'y_sim[12]',\n 'y_sim[13]',\n 'y_sim[14]',\n 'y_sim[15]',\n 'y_sim[16]',\n 'y_sim[17]',\n 'y_sim[18]',\n 'y_sim[19]',\n 'y_sim[20]',\n 'x_sim[1]',\n 'x_sim[2]',\n 'x_sim[3]',\n 'x_sim[4]',\n 'x_sim[5]',\n 'x_sim[6]',\n 'x_sim[7]',\n 'x_sim[8]',\n 'x_sim[9]',\n 'x_sim[10]',\n 'x_sim[11]',\n 'x_sim[12]',\n 'x_sim[13]',\n 'x_sim[14]',\n 'x_sim[15]',\n 'x_sim[16]',\n 'x_sim[17]',\n 'x_sim[18]',\n 'x_sim[19]',\n 'x_sim[20]',\n 'pop_sim[1]',\n 'pop_sim[2]',\n 'pop_sim[3]',\n 'pop_sim[4]',\n 'pop_sim[5]',\n 'pop_sim[6]',\n 'pop_sim[7]',\n 'pop_sim[8]',\n 'pop_sim[9]',\n 'pop_sim[10]',\n 'pop_sim[11]',\n 'pop_sim[12]',\n 'pop_sim[13]',\n 'pop_sim[14]',\n 'pop_sim[15]',\n 'pop_sim[16]',\n 'pop_sim[17]',\n 'pop_sim[18]',\n 'pop_sim[19]',\n 'pop_sim[20]',\n 'alpha_sim',\n 'beta_sim',\n 'eta[1]',\n 'eta[2]',\n 'eta[3]',\n 'eta[4]',\n 'eta[5]',\n 'eta[6]',\n 'eta[7]',\n 'eta[8]',\n 'eta[9]',\n 'eta[10]',\n 'eta[11]',\n 'eta[12]',\n 'eta[13]',\n 'eta[14]',\n 'eta[15]',\n 'eta[16]',\n 'eta[17]',\n 'eta[18]',\n 'eta[19]',\n 'eta[20]',\n ]\n self.assertEqual(datagen_fit.column_names, tuple(column_names))\n self.assertEqual(datagen_fit.num_draws_sampling, 100)\n self.assertEqual(datagen_fit.draws().shape, (100, 1, len(column_names)))\n self.assertEqual(datagen_fit.metric, None)\n self.assertEqual(datagen_fit.metric_type, None)\n self.assertEqual(datagen_fit.step_size, None)\n\n def test_fixed_param_unspecified(self):\n stan = os.path.join(DATAFILES_PATH, 'datagen_poisson_glm.stan')\n datagen_model = CmdStanModel(stan_file=stan)\n datagen_fit = datagen_model.sample(\n iter_sampling=100, show_progress=False\n )\n self.assertEqual(datagen_fit.step_size, None)\n\n exe_only = os.path.join(DATAFILES_PATH, 'exe_only')\n shutil.copyfile(datagen_model.exe_file, exe_only)\n os.chmod(exe_only, 0o755)\n datagen2_model = CmdStanModel(exe_file=exe_only)\n datagen2_fit = datagen2_model.sample(\n iter_sampling=200, show_console=True\n )\n self.assertEqual(datagen2_fit.chains, 4)\n self.assertEqual(datagen2_fit.step_size, None)\n\n def test_bernoulli_file_with_space(self):\n self.test_bernoulli_good('bernoulli with space in name.stan')\n\n def test_bernoulli_path_with_space(self):\n self.test_bernoulli_good(\n 'path with space/' 'bernoulli_path_with_space.stan'\n )\n\n def test_index_bounds_error(self):\n if not cmdstan_version_before(2, 27):\n oob_stan = os.path.join(DATAFILES_PATH, 'out_of_bounds.stan')\n oob_model = CmdStanModel(stan_file=oob_stan)\n with self.assertRaises(RuntimeError):\n oob_model.sample()\n\n def test_show_console(self, stanfile='bernoulli.stan'):\n stan = os.path.join(DATAFILES_PATH, stanfile)\n bern_model = CmdStanModel(stan_file=stan)\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n\n sys_stdout = io.StringIO()\n with contextlib.redirect_stdout(sys_stdout):\n bern_model.sample(\n data=jdata,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=100,\n show_console=True,\n )\n console = sys_stdout.getvalue()\n self.assertTrue('Chain [1] method = sample' in console)\n self.assertTrue('Chain [2] method = sample' in console)\n\n def test_show_progress(self, stanfile='bernoulli.stan'):\n stan = os.path.join(DATAFILES_PATH, stanfile)\n bern_model = CmdStanModel(stan_file=stan)\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n\n sys_stderr = io.StringIO() # tqdm prints to stderr\n with contextlib.redirect_stderr(sys_stderr):\n bern_model.sample(\n data=jdata,\n chains=2,\n iter_warmup=100,\n iter_sampling=100,\n show_progress=True,\n )\n console = sys_stderr.getvalue()\n self.assertTrue('chain 1' in console)\n self.assertTrue('chain 2' in console)\n self.assertTrue('Sampling completed' in console)\n\n sys_stderr = io.StringIO() # tqdm prints to stderr\n with contextlib.redirect_stderr(sys_stderr):\n bern_model.sample(\n data=jdata,\n chains=7,\n iter_warmup=100,\n iter_sampling=100,\n show_progress=True,\n )\n console = sys_stderr.getvalue()\n self.assertTrue('chain 6' in console)\n self.assertTrue('chain 7' in console)\n self.assertTrue('Sampling completed' in console)\n sys_stderr = io.StringIO() # tqdm prints to stderr\n\n with contextlib.redirect_stderr(sys_stderr):\n bern_model.sample(\n data=jdata,\n chains=2,\n chain_ids=[6, 7],\n iter_warmup=100,\n iter_sampling=100,\n force_one_process_per_chain=True,\n show_progress=True,\n )\n console = sys_stderr.getvalue()\n self.assertTrue('chain 6' in console)\n self.assertTrue('chain 7' in console)\n self.assertTrue('Sampling completed' in console)\n\n\nclass CmdStanMCMCTest(CustomTestCase):\n # pylint: disable=too-many-public-methods\n def test_validate_good_run(self):\n # construct fit using existing sampler output\n exe = os.path.join(DATAFILES_PATH, 'bernoulli' + EXTENSION)\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n sampler_args = SamplerArgs(\n iter_sampling=100, max_treedepth=11, adapt_delta=0.95\n )\n cmdstan_args = CmdStanArgs(\n model_name='bernoulli',\n model_exe=exe,\n chain_ids=[1, 2, 3, 4],\n seed=12345,\n data=jdata,\n output_dir=DATAFILES_PATH,\n method_args=sampler_args,\n )\n runset = RunSet(args=cmdstan_args, chains=4)\n runset._csv_files = [\n os.path.join(DATAFILES_PATH, 'runset-good', 'bern-1.csv'),\n os.path.join(DATAFILES_PATH, 'runset-good', 'bern-2.csv'),\n os.path.join(DATAFILES_PATH, 'runset-good', 'bern-3.csv'),\n os.path.join(DATAFILES_PATH, 'runset-good', 'bern-4.csv'),\n ]\n self.assertEqual(4, runset.chains)\n retcodes = runset._retcodes\n for i in range(len(retcodes)):\n runset._set_retcode(i, 0)\n self.assertTrue(runset._check_retcodes())\n\n fit = CmdStanMCMC(runset)\n self.assertEqual(1000, fit.num_draws_warmup)\n self.assertEqual(100, fit.num_draws_sampling)\n self.assertEqual(len(BERNOULLI_COLS), len(fit.column_names))\n self.assertEqual('lp__', fit.column_names[0])\n\n draws_pd = fit.draws_pd()\n self.assertEqual(\n draws_pd.shape,\n (fit.runset.chains * fit.num_draws_sampling, len(fit.column_names)),\n )\n self.assertEqual(fit.draws_pd(vars=['theta']).shape, (400, 1))\n self.assertEqual(fit.draws_pd(vars=['lp__', 'theta']).shape, (400, 2))\n self.assertEqual(fit.draws_pd(vars=['theta', 'lp__']).shape, (400, 2))\n self.assertEqual(fit.draws_pd(vars='theta').shape, (400, 1))\n\n summary = fit.summary()\n self.assertIn('5%', list(summary.columns))\n self.assertIn('50%', list(summary.columns))\n self.assertIn('95%', list(summary.columns))\n self.assertNotIn('1%', list(summary.columns))\n self.assertNotIn('99%', list(summary.columns))\n\n summary = fit.summary(percentiles=[1, 45, 99])\n self.assertIn('1%', list(summary.columns))\n self.assertIn('45%', list(summary.columns))\n self.assertIn('99%', list(summary.columns))\n self.assertNotIn('5%', list(summary.columns))\n self.assertNotIn('50%', list(summary.columns))\n self.assertNotIn('95%', list(summary.columns))\n\n with self.assertRaises(ValueError):\n fit.summary(percentiles=[])\n\n with self.assertRaises(ValueError):\n fit.summary(percentiles=[-1])\n\n diagnostics = fit.diagnose()\n self.assertIn(\n 'Treedepth satisfactory for all transitions.', diagnostics\n )\n self.assertIn('No divergent transitions found.', diagnostics)\n self.assertIn('E-BFMI satisfactory', diagnostics)\n self.assertIn('Effective sample size satisfactory.', diagnostics)\n\n def test_validate_big_run(self):\n exe = os.path.join(DATAFILES_PATH, 'bernoulli' + EXTENSION)\n sampler_args = SamplerArgs(iter_warmup=1500, iter_sampling=1000)\n cmdstan_args = CmdStanArgs(\n model_name='bernoulli',\n model_exe=exe,\n chain_ids=[1, 2],\n seed=12345,\n output_dir=DATAFILES_PATH,\n method_args=sampler_args,\n )\n runset = RunSet(args=cmdstan_args, chains=2)\n runset._csv_files = [\n os.path.join(DATAFILES_PATH, 'runset-big', 'output_icar_nyc-1.csv'),\n os.path.join(DATAFILES_PATH, 'runset-big', 'output_icar_nyc-1.csv'),\n ]\n fit = CmdStanMCMC(runset)\n phis = ['phi[{}]'.format(str(x + 1)) for x in range(2095)]\n column_names = list(fit.metadata.method_vars_cols.keys()) + phis\n self.assertEqual(fit.num_draws_sampling, 1000)\n self.assertEqual(fit.column_names, tuple(column_names))\n self.assertEqual(fit.metric_type, 'diag_e')\n self.assertEqual(fit.step_size.shape, (2,))\n self.assertEqual(fit.metric.shape, (2, 2095))\n self.assertEqual((1000, 2, 2102), fit.draws().shape)\n phis = fit.draws_pd(vars=['phi'])\n self.assertEqual((2000, 2095), phis.shape)\n with self.assertRaisesRegex(ValueError, r'Unknown variable: gamma'):\n fit.draws_pd(vars=['gamma'])\n\n def test_instantiate_from_csvfiles(self):\n csvfiles_path = os.path.join(DATAFILES_PATH, 'runset-good')\n bern_fit = from_csv(path=csvfiles_path)\n draws_pd = bern_fit.draws_pd()\n self.assertEqual(\n draws_pd.shape,\n (\n bern_fit.runset.chains * bern_fit.num_draws_sampling,\n len(bern_fit.column_names),\n ),\n )\n csvfiles_path = os.path.join(DATAFILES_PATH, 'runset-big')\n big_fit = from_csv(path=csvfiles_path)\n draws_pd = big_fit.draws_pd()\n self.assertEqual(\n draws_pd.shape,\n (\n big_fit.runset.chains * big_fit.num_draws_sampling,\n len(big_fit.column_names),\n ),\n )\n # list\n csvfiles_path = os.path.join(DATAFILES_PATH, 'runset-good')\n csvfiles = []\n for file in os.listdir(csvfiles_path):\n if file.endswith(\".csv\"):\n csvfiles.append(os.path.join(csvfiles_path, file))\n bern_fit = from_csv(path=csvfiles)\n\n draws_pd = bern_fit.draws_pd()\n self.assertEqual(\n draws_pd.shape,\n (\n bern_fit.runset.chains * bern_fit.num_draws_sampling,\n len(bern_fit.column_names),\n ),\n )\n # single csvfile\n bern_fit = from_csv(path=csvfiles[0])\n draws_pd = bern_fit.draws_pd()\n self.assertEqual(\n draws_pd.shape,\n (\n bern_fit.num_draws_sampling,\n len(bern_fit.column_names),\n ),\n )\n # glob\n csvfiles_path = os.path.join(csvfiles_path, '*.csv')\n big_fit = from_csv(path=csvfiles_path)\n draws_pd = big_fit.draws_pd()\n self.assertEqual(\n draws_pd.shape,\n (\n big_fit.runset.chains * big_fit.num_draws_sampling,\n len(big_fit.column_names),\n ),\n )\n\n def test_instantiate_from_csvfiles_fail(self):\n with self.assertRaisesRegex(ValueError, r'Must specify path'):\n from_csv(None)\n\n csvfiles_path = os.path.join(DATAFILES_PATH, 'runset-good')\n with self.assertRaisesRegex(ValueError, r'Bad method argument'):\n from_csv(csvfiles_path, 'not-a-method')\n\n with self.assertRaisesRegex(\n ValueError, r'Expecting Stan CSV output files from method optimize'\n ):\n from_csv(csvfiles_path, 'optimize')\n\n csvfiles = []\n with self.assertRaisesRegex(ValueError, r'No CSV files found'):\n from_csv(csvfiles, 'sample')\n\n for file in os.listdir(csvfiles_path):\n csvfiles.append(os.path.join(csvfiles_path, file))\n with self.assertRaisesRegex(ValueError, r'Bad CSV file path spec'):\n from_csv(csvfiles, 'sample')\n\n csvfiles_path = os.path.join(csvfiles_path, '*')\n with self.assertRaisesRegex(ValueError, r'Bad CSV file path spec'):\n from_csv(csvfiles_path, 'sample')\n\n csvfiles_path = os.path.join(csvfiles_path, '*')\n with self.assertRaisesRegex(ValueError, r'Invalid path specification'):\n from_csv(csvfiles_path, 'sample')\n\n csvfiles_path = os.path.join(DATAFILES_PATH, 'no-such-directory')\n with self.assertRaisesRegex(ValueError, r'Invalid path specification'):\n from_csv(path=csvfiles_path)\n\n wrong_method_path = os.path.join(DATAFILES_PATH, 'from_csv')\n with LogCapture() as log:\n logging.getLogger()\n from_csv(path=wrong_method_path)\n log.check_present(\n (\n 'cmdstanpy',\n 'INFO',\n 'Unable to process CSV output files from method diagnose.',\n ),\n )\n\n no_csvfiles_path = os.path.join(\n DATAFILES_PATH, 'test-fail-empty-directory'\n )\n if os.path.exists(no_csvfiles_path):\n shutil.rmtree(no_csvfiles_path, ignore_errors=True)\n os.mkdir(no_csvfiles_path)\n with self.assertRaisesRegex(ValueError, r'No CSV files found'):\n from_csv(path=no_csvfiles_path)\n if os.path.exists(no_csvfiles_path):\n shutil.rmtree(no_csvfiles_path, ignore_errors=True)\n\n def test_from_csv_fixed_param(self):\n csv_path = os.path.join(DATAFILES_PATH, 'fixed_param_sample.csv')\n fixed_param_sample = from_csv(path=csv_path)\n self.assertEqual(fixed_param_sample.draws_pd().shape, (100, 85))\n\n # pylint: disable=no-self-use\n def test_custom_metric(self):\n stan = os.path.join(DATAFILES_PATH, 'bernoulli.stan')\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n bern_model = CmdStanModel(stan_file=stan)\n jmetric = os.path.join(DATAFILES_PATH, 'bernoulli.metric.json')\n # just test that it runs without error\n bern_model.sample(\n data=jdata,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=200,\n metric=jmetric,\n )\n jmetric2 = os.path.join(DATAFILES_PATH, 'bernoulli.metric-2.json')\n bern_model.sample(\n data=jdata,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=200,\n metric=[jmetric, jmetric2],\n )\n # read json in as dict\n with open(jmetric) as fd:\n metric_dict_1 = json.load(fd)\n with open(jmetric2) as fd:\n metric_dict_2 = json.load(fd)\n bern_model.sample(\n data=jdata,\n chains=4,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=200,\n metric=metric_dict_1,\n )\n bern_model.sample(\n data=jdata,\n chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=200,\n metric=[metric_dict_1, metric_dict_2],\n )\n with self.assertRaisesRegex(\n ValueError, 'Number of metric files must match number of chains,'\n ):\n bern_model.sample(\n data=jdata,\n chains=4,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=200,\n metric=[metric_dict_1, metric_dict_2],\n )\n # metric mismatches - (not appropriate for bernoulli)\n with open(os.path.join(DATAFILES_PATH, 'metric_diag.data.json')) as fd:\n metric_dict_1 = json.load(fd)\n with open(os.path.join(DATAFILES_PATH, 'metric_dense.data.json')) as fd:\n metric_dict_2 = json.load(fd)\n with self.assertRaisesRegex(\n ValueError, 'Found inconsistent \"inv_metric\" entry'\n ):\n bern_model.sample(\n data=jdata,\n chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=200,\n metric=[metric_dict_1, metric_dict_2],\n )\n # metric dict, no \"inv_metric\":\n some_dict = {\"foo\": [1, 2, 3]}\n with self.assertRaisesRegex(\n ValueError, 'Entry \"inv_metric\" not found in metric dict.'\n ):\n bern_model.sample(\n data=jdata,\n chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=200,\n metric=some_dict,\n )\n\n def test_custom_step_size(self):\n stan = os.path.join(DATAFILES_PATH, 'bernoulli.stan')\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n bern_model = CmdStanModel(stan_file=stan)\n # just test that it runs without error\n bern_model.sample(\n data=jdata,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=200,\n step_size=1,\n )\n\n bern_model.sample(\n data=jdata,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=200,\n step_size=[1, 2],\n )\n\n def test_custom_seed(self):\n stan = os.path.join(DATAFILES_PATH, 'bernoulli.stan')\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n bern_model = CmdStanModel(stan_file=stan)\n # just test that it runs without error\n bern_model.sample(\n data=jdata,\n chains=2,\n parallel_chains=2,\n seed=[44444, 55555],\n iter_warmup=100,\n iter_sampling=200,\n )\n\n def test_adapt_schedule(self):\n stan = os.path.join(DATAFILES_PATH, 'bernoulli.stan')\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n bern_model = CmdStanModel(stan_file=stan)\n bern_fit = bern_model.sample(\n data=jdata,\n chains=1,\n seed=12345,\n iter_sampling=200,\n iter_warmup=200,\n adapt_init_phase=11,\n adapt_metric_window=12,\n adapt_step_size=13,\n )\n txt_file = bern_fit.runset.stdout_files[0]\n with open(txt_file, 'r') as fd:\n lines = fd.readlines()\n stripped = [line.strip() for line in lines]\n self.assertIn('init_buffer = 11', stripped)\n self.assertIn('window = 12', stripped)\n self.assertIn('term_buffer = 13', stripped)\n\n def test_save_csv(self):\n stan = os.path.join(DATAFILES_PATH, 'bernoulli.stan')\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n bern_model = CmdStanModel(stan_file=stan)\n bern_fit = bern_model.sample(\n data=jdata,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=200,\n )\n for i in range(bern_fit.runset.chains):\n csv_file = bern_fit.runset.csv_files[i]\n stdout_file = bern_fit.runset.stdout_files[i]\n self.assertTrue(os.path.exists(csv_file))\n self.assertTrue(os.path.exists(stdout_file))\n\n # save files to good dir\n bern_fit.save_csvfiles(dir=DATAFILES_PATH)\n for i in range(bern_fit.runset.chains):\n csv_file = bern_fit.runset.csv_files[i]\n self.assertTrue(os.path.exists(csv_file))\n with self.assertRaisesRegex(\n ValueError, 'File exists, not overwriting: '\n ):\n bern_fit.save_csvfiles(dir=DATAFILES_PATH)\n\n tmp2_dir = os.path.join(HERE, 'tmp2')\n os.mkdir(tmp2_dir)\n bern_fit.save_csvfiles(dir=tmp2_dir)\n for i in range(bern_fit.runset.chains):\n csv_file = bern_fit.runset.csv_files[i]\n self.assertTrue(os.path.exists(csv_file))\n for i in range(bern_fit.runset.chains): # cleanup datafile_path dir\n os.remove(bern_fit.runset.csv_files[i])\n if os.path.exists(bern_fit.runset.stdout_files[i]):\n os.remove(bern_fit.runset.stdout_files[i])\n shutil.rmtree(tmp2_dir, ignore_errors=True)\n\n # regenerate to tmpdir, save to good dir\n bern_fit = bern_model.sample(\n data=jdata,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_sampling=200,\n )\n bern_fit.save_csvfiles() # default dir\n for i in range(bern_fit.runset.chains):\n csv_file = bern_fit.runset.csv_files[i]\n self.assertTrue(os.path.exists(csv_file))\n for i in range(bern_fit.runset.chains): # cleanup default dir\n os.remove(bern_fit.runset.csv_files[i])\n if os.path.exists(bern_fit.runset.stdout_files[i]):\n os.remove(bern_fit.runset.stdout_files[i])\n\n with self.assertRaisesRegex(ValueError, 'Cannot access CSV file'):\n bern_fit.save_csvfiles(dir=DATAFILES_PATH)\n\n if platform.system() != 'Windows':\n with self.assertRaisesRegex(Exception, 'Cannot save to path: '):\n dir = tempfile.mkdtemp(dir=_TMPDIR)\n os.chmod(dir, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)\n bern_fit.save_csvfiles(dir=dir)\n\n def test_diagnose_divergences(self):\n exe = os.path.join(DATAFILES_PATH, 'bernoulli' + EXTENSION)\n sampler_args = SamplerArgs()\n cmdstan_args = CmdStanArgs(\n model_name='bernoulli',\n model_exe=exe,\n chain_ids=[1],\n output_dir=DATAFILES_PATH,\n method_args=sampler_args,\n )\n runset = RunSet(args=cmdstan_args, chains=1)\n runset._csv_files = [\n os.path.join(\n DATAFILES_PATH, 'diagnose-good', 'corr_gauss_depth8-1.csv'\n )\n ]\n fit = CmdStanMCMC(runset)\n # TODO - use cmdstan test files instead\n expected = '\\n'.join(\n [\n 'Checking sampler transitions treedepth.',\n '424 of 1000 (42%) transitions hit the maximum '\n 'treedepth limit of 8, or 2^8 leapfrog steps.',\n 'Trajectories that are prematurely terminated '\n 'due to this limit will result in slow exploration.',\n 'For optimal performance, increase this limit.',\n ]\n )\n self.assertIn(expected, fit.diagnose().replace('\\r\\n', '\\n'))\n\n def test_validate_bad_run(self):\n exe = os.path.join(DATAFILES_PATH, 'bernoulli' + EXTENSION)\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n sampler_args = SamplerArgs(max_treedepth=11, adapt_delta=0.95)\n\n # some chains had errors\n cmdstan_args = CmdStanArgs(\n model_name='bernoulli',\n model_exe=exe,\n chain_ids=[1, 2, 3, 4],\n seed=12345,\n data=jdata,\n output_dir=DATAFILES_PATH,\n method_args=sampler_args,\n )\n runset = RunSet(args=cmdstan_args, chains=4)\n for i in range(4):\n runset._set_retcode(i, 0)\n self.assertTrue(runset._check_retcodes())\n\n # errors reported\n runset._stdout_files = [\n os.path.join(\n DATAFILES_PATH, 'runset-bad', 'bad-transcript-bern-1.txt'\n ),\n os.path.join(\n DATAFILES_PATH, 'runset-bad', 'bad-transcript-bern-2.txt'\n ),\n os.path.join(\n DATAFILES_PATH, 'runset-bad', 'bad-transcript-bern-3.txt'\n ),\n os.path.join(\n DATAFILES_PATH, 'runset-bad', 'bad-transcript-bern-4.txt'\n ),\n ]\n self.assertIn('Exception', runset.get_err_msgs())\n\n # csv file headers inconsistent\n runset._csv_files = [\n os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-hdr-bern-1.csv'),\n os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-hdr-bern-2.csv'),\n os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-hdr-bern-3.csv'),\n os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-hdr-bern-4.csv'),\n ]\n with self.assertRaisesRegexNested(\n ValueError, 'CmdStan config mismatch'\n ):\n CmdStanMCMC(runset)\n\n # bad draws\n runset._csv_files = [\n os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-draws-bern-1.csv'),\n os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-draws-bern-2.csv'),\n os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-draws-bern-3.csv'),\n os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-draws-bern-4.csv'),\n ]\n with self.assertRaisesRegexNested(ValueError, 'draws'):\n CmdStanMCMC(runset)\n\n # mismatch - column headers, draws\n runset._csv_files = [\n os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-cols-bern-1.csv'),\n os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-cols-bern-2.csv'),\n os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-cols-bern-3.csv'),\n os.path.join(DATAFILES_PATH, 'runset-bad', 'bad-cols-bern-4.csv'),\n ]\n with self.assertRaisesRegexNested(\n ValueError, 'bad draw, expecting 9 items, found 8'\n ):\n CmdStanMCMC(runset)\n\n def test_save_warmup(self):\n stan = os.path.join(DATAFILES_PATH, 'bernoulli.stan')\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n\n bern_model = CmdStanModel(stan_file=stan)\n bern_fit = bern_model.sample(\n data=jdata,\n chains=2,\n seed=12345,\n iter_warmup=200,\n iter_sampling=100,\n save_warmup=True,\n )\n self.assertEqual(bern_fit.column_names, tuple(BERNOULLI_COLS))\n self.assertEqual(bern_fit.num_draws_warmup, 200)\n self.assertEqual(bern_fit.num_draws_sampling, 100)\n self.assertEqual(bern_fit.draws().shape, (100, 2, len(BERNOULLI_COLS)))\n self.assertEqual(\n bern_fit.draws(inc_warmup=False).shape,\n (100, 2, len(BERNOULLI_COLS)),\n )\n self.assertEqual(\n bern_fit.draws(concat_chains=True).shape,\n (200, len(BERNOULLI_COLS)),\n )\n self.assertEqual(\n bern_fit.draws(inc_warmup=True).shape, (300, 2, len(BERNOULLI_COLS))\n )\n self.assertEqual(\n bern_fit.draws(inc_warmup=True, concat_chains=True).shape,\n (600, len(BERNOULLI_COLS)),\n )\n\n self.assertEqual(bern_fit.draws_pd().shape, (200, len(BERNOULLI_COLS)))\n self.assertEqual(\n bern_fit.draws_pd(inc_warmup=False).shape,\n (200, len(BERNOULLI_COLS)),\n )\n self.assertEqual(\n bern_fit.draws_pd(inc_warmup=True).shape, (600, len(BERNOULLI_COLS))\n )\n\n def test_save_warmup_thin(self):\n stan = os.path.join(DATAFILES_PATH, 'bernoulli.stan')\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n\n bern_model = CmdStanModel(stan_file=stan)\n bern_fit = bern_model.sample(\n data=jdata,\n chains=2,\n seed=12345,\n iter_warmup=200,\n iter_sampling=100,\n thin=5,\n save_warmup=True,\n )\n self.assertEqual(bern_fit.column_names, tuple(BERNOULLI_COLS))\n self.assertEqual(bern_fit.draws().shape, (20, 2, len(BERNOULLI_COLS)))\n self.assertEqual(\n bern_fit.draws(concat_chains=True).shape, (40, len(BERNOULLI_COLS))\n )\n self.assertEqual(\n bern_fit.draws(inc_warmup=True).shape, (60, 2, len(BERNOULLI_COLS))\n )\n\n def test_dont_save_warmup(self):\n stan = os.path.join(DATAFILES_PATH, 'bernoulli.stan')\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n\n bern_model = CmdStanModel(stan_file=stan)\n bern_fit = bern_model.sample(\n data=jdata,\n chains=2,\n seed=12345,\n iter_warmup=200,\n iter_sampling=100,\n save_warmup=False,\n )\n self.assertEqual(bern_fit.column_names, tuple(BERNOULLI_COLS))\n self.assertEqual(bern_fit.num_draws_sampling, 100)\n self.assertEqual(bern_fit.draws().shape, (100, 2, len(BERNOULLI_COLS)))\n with LogCapture() as log:\n self.assertEqual(\n bern_fit.draws(inc_warmup=True).shape,\n (100, 2, len(BERNOULLI_COLS)),\n )\n log.check_present(\n (\n 'cmdstanpy',\n 'WARNING',\n \"Sample doesn't contain draws from warmup iterations,\"\n ' rerun sampler with \"save_warmup=True\".',\n )\n )\n with LogCapture() as log:\n self.assertEqual(\n bern_fit.draws(inc_warmup=True, concat_chains=True).shape,\n (200, len(BERNOULLI_COLS)),\n )\n log.check_present(\n (\n 'cmdstanpy',\n 'WARNING',\n \"Sample doesn't contain draws from warmup iterations,\"\n ' rerun sampler with \"save_warmup=True\".',\n )\n )\n with LogCapture() as log:\n self.assertEqual(\n bern_fit.draws_pd(inc_warmup=True).shape,\n (200, len(BERNOULLI_COLS)),\n )\n log.check_present(\n (\n 'cmdstanpy',\n 'WARNING',\n \"Sample doesn't contain draws from warmup iterations,\"\n ' rerun sampler with \"save_warmup=True\".',\n )\n )\n\n def test_sampler_diags(self):\n stan = os.path.join(DATAFILES_PATH, 'bernoulli.stan')\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n bern_model = CmdStanModel(stan_file=stan)\n bern_fit = bern_model.sample(\n data=jdata, chains=2, seed=12345, iter_warmup=100, iter_sampling=100\n )\n diags = bern_fit.method_variables()\n self.assertEqual(SAMPLER_STATE, list(diags))\n for diag in diags.values():\n self.assertEqual(diag.shape, (100, 2))\n\n diags = bern_fit.method_variables()\n self.assertEqual(SAMPLER_STATE, list(diags))\n for diag in diags.values():\n self.assertEqual(diag.shape, (100, 2))\n self.assertEqual(bern_fit.draws().shape, (100, 2, len(BERNOULLI_COLS)))\n\n def test_variable_bern(self):\n stan = os.path.join(DATAFILES_PATH, 'bernoulli.stan')\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n bern_model = CmdStanModel(stan_file=stan)\n bern_fit = bern_model.sample(\n data=jdata, chains=2, seed=12345, iter_warmup=100, iter_sampling=100\n )\n self.assertEqual(1, len(bern_fit.metadata.stan_vars_dims))\n self.assertTrue('theta' in bern_fit.metadata.stan_vars_dims)\n self.assertEqual(bern_fit.metadata.stan_vars_dims['theta'], ())\n self.assertEqual(bern_fit.stan_variable(var='theta').shape, (200,))\n with self.assertRaises(ValueError):\n bern_fit.stan_variable(var='eta')\n with self.assertRaises(ValueError):\n bern_fit.stan_variable(var='lp__')\n\n def test_variables_2d(self):\n csvfiles_path = os.path.join(DATAFILES_PATH, 'lotka-volterra.csv')\n fit = from_csv(path=csvfiles_path)\n self.assertEqual(20, fit.num_draws_sampling)\n self.assertEqual(8, len(fit.metadata.stan_vars_dims))\n self.assertTrue('z' in fit.metadata.stan_vars_dims)\n self.assertEqual(fit.metadata.stan_vars_dims['z'], (20, 2))\n vars = fit.stan_variables()\n self.assertEqual(len(vars), len(fit.metadata.stan_vars_dims))\n self.assertTrue('z' in vars)\n self.assertEqual(vars['z'].shape, (20, 20, 2))\n self.assertTrue('theta' in vars)\n self.assertEqual(vars['theta'].shape, (20, 4))\n\n def test_variables_3d(self):\n # construct fit using existing sampler output\n csvfiles_path = os.path.join(DATAFILES_PATH, 'multidim_vars.csv')\n fit = from_csv(path=csvfiles_path)\n self.assertEqual(20, fit.num_draws_sampling)\n self.assertEqual(3, len(fit.metadata.stan_vars_dims))\n self.assertTrue('y_rep' in fit.metadata.stan_vars_dims)\n self.assertEqual(fit.metadata.stan_vars_dims['y_rep'], (5, 4, 3))\n var_y_rep = fit.stan_variable(var='y_rep')\n self.assertEqual(var_y_rep.shape, (20, 5, 4, 3))\n var_beta = fit.stan_variable(var='beta')\n self.assertEqual(var_beta.shape, (20, 2))\n var_frac_60 = fit.stan_variable(var='frac_60')\n self.assertEqual(var_frac_60.shape, (20,))\n vars = fit.stan_variables()\n self.assertEqual(len(vars), len(fit.metadata.stan_vars_dims))\n self.assertTrue('y_rep' in vars)\n self.assertEqual(vars['y_rep'].shape, (20, 5, 4, 3))\n self.assertTrue('beta' in vars)\n self.assertEqual(vars['beta'].shape, (20, 2))\n self.assertTrue('frac_60' in vars)\n self.assertEqual(vars['frac_60'].shape, (20,))\n\n def test_variables_issue_361(self):\n # tests that array ordering is preserved\n stan = os.path.join(DATAFILES_PATH, 'container_vars.stan')\n container_vars_model = CmdStanModel(stan_file=stan)\n chain_1_fit = container_vars_model.sample(\n chains=1, iter_sampling=4, fixed_param=True\n )\n v_2d_arr = chain_1_fit.stan_variable('v_2d_arr')\n self.assertEqual(v_2d_arr.shape, (4, 2, 3))\n # stan 1-based indexing vs. python 0-based indexing\n for i in range(2):\n for j in range(3):\n self.assertEqual(v_2d_arr[0, i, j], ((i + 1) * 10) + j + 1)\n chain_2_fit = container_vars_model.sample(\n chains=2, iter_sampling=4, fixed_param=True\n )\n v_2d_arr = chain_2_fit.stan_variable('v_2d_arr')\n self.assertEqual(v_2d_arr.shape, (8, 2, 3))\n # stan 1-based indexing vs. python 0-based indexing\n for i in range(2):\n for j in range(3):\n self.assertEqual(v_2d_arr[0, i, j], ((i + 1) * 10) + j + 1)\n\n def test_validate(self):\n stan = os.path.join(DATAFILES_PATH, 'bernoulli.stan')\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n bern_model = CmdStanModel(stan_file=stan)\n bern_fit = bern_model.sample(\n data=jdata,\n chains=2,\n seed=12345,\n iter_warmup=200,\n iter_sampling=100,\n thin=2,\n save_warmup=True,\n )\n # _validate_csv_files called during instantiation\n self.assertEqual(bern_fit.num_draws_warmup, 100)\n self.assertEqual(bern_fit.num_draws_sampling, 50)\n self.assertEqual(len(bern_fit.column_names), 8)\n self.assertEqual(len(bern_fit.metadata.stan_vars_dims), 1)\n self.assertEqual(len(bern_fit.metadata.stan_vars_cols.keys()), 1)\n self.assertEqual(bern_fit.metric_type, 'diag_e')\n\n def test_validate_sample_sig_figs(self, stanfile='bernoulli.stan'):\n if not cmdstan_version_before(2, 25):\n stan = os.path.join(DATAFILES_PATH, stanfile)\n bern_model = CmdStanModel(stan_file=stan)\n\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n bern_fit = bern_model.sample(\n data=jdata,\n chains=1,\n seed=12345,\n iter_sampling=100,\n )\n bern_draws = bern_fit.draws()\n theta = format(bern_draws[99, 0, 7], '.18g')\n self.assertFalse(theta.startswith('0.21238045821757600'))\n\n bern_fit_17 = bern_model.sample(\n data=jdata,\n chains=1,\n seed=12345,\n iter_sampling=100,\n sig_figs=17,\n )\n self.assertTrue(bern_fit_17.draws().size)\n\n with self.assertRaises(ValueError):\n bern_model.sample(\n data=jdata,\n chains=1,\n seed=12345,\n iter_sampling=100,\n sig_figs=27,\n )\n with self.assertRaises(ValueError):\n bern_model.sample(\n data=jdata,\n chains=1,\n seed=12345,\n iter_sampling=100,\n sig_figs=-1,\n )\n\n def test_validate_summary_sig_figs(self):\n # construct CmdStanMCMC from logistic model output, config\n exe = os.path.join(DATAFILES_PATH, 'logistic' + EXTENSION)\n rdata = os.path.join(DATAFILES_PATH, 'logistic.data.R')\n sampler_args = SamplerArgs(iter_sampling=100)\n cmdstan_args = CmdStanArgs(\n model_name='logistic',\n model_exe=exe,\n chain_ids=[1, 2, 3, 4],\n seed=12345,\n data=rdata,\n output_dir=DATAFILES_PATH,\n sig_figs=17,\n method_args=sampler_args,\n )\n runset = RunSet(args=cmdstan_args, chains=4)\n runset._csv_files = [\n os.path.join(DATAFILES_PATH, 'logistic_output_1.csv'),\n os.path.join(DATAFILES_PATH, 'logistic_output_2.csv'),\n os.path.join(DATAFILES_PATH, 'logistic_output_3.csv'),\n os.path.join(DATAFILES_PATH, 'logistic_output_4.csv'),\n ]\n retcodes = runset._retcodes\n for i in range(len(retcodes)):\n runset._set_retcode(i, 0)\n fit = CmdStanMCMC(runset)\n\n sum_default = fit.summary()\n beta1_default = format(sum_default.iloc[1, 0], '.18g')\n self.assertTrue(beta1_default.startswith('1.3'))\n\n if not cmdstan_version_before(2, 25):\n sum_17 = fit.summary(sig_figs=17)\n beta1_17 = format(sum_17.iloc[1, 0], '.18g')\n self.assertTrue(beta1_17.startswith('1.345767078273'))\n\n sum_10 = fit.summary(sig_figs=10)\n beta1_10 = format(sum_10.iloc[1, 0], '.18g')\n self.assertTrue(beta1_10.startswith('1.34576707'))\n\n with self.assertRaises(ValueError):\n fit.summary(sig_figs=20)\n with self.assertRaises(ValueError):\n fit.summary(sig_figs=-1)\n\n def test_metadata(self):\n # construct CmdStanMCMC from logistic model output, config\n exe = os.path.join(DATAFILES_PATH, 'logistic' + EXTENSION)\n rdata = os.path.join(DATAFILES_PATH, 'logistic.data.R')\n sampler_args = SamplerArgs(iter_sampling=100)\n cmdstan_args = CmdStanArgs(\n model_name='logistic',\n model_exe=exe,\n chain_ids=[1, 2, 3, 4],\n seed=12345,\n data=rdata,\n output_dir=DATAFILES_PATH,\n sig_figs=17,\n method_args=sampler_args,\n )\n runset = RunSet(args=cmdstan_args, chains=4)\n runset._csv_files = [\n os.path.join(DATAFILES_PATH, 'logistic_output_1.csv'),\n os.path.join(DATAFILES_PATH, 'logistic_output_2.csv'),\n os.path.join(DATAFILES_PATH, 'logistic_output_3.csv'),\n os.path.join(DATAFILES_PATH, 'logistic_output_4.csv'),\n ]\n retcodes = runset._retcodes\n for i in range(len(retcodes)):\n runset._set_retcode(i, 0)\n fit = CmdStanMCMC(runset)\n meta = fit.metadata\n self.assertEqual(meta.cmdstan_config['model'], 'logistic_model')\n col_names = tuple(\n [\n 'lp__',\n 'accept_stat__',\n 'stepsize__',\n 'treedepth__',\n 'n_leapfrog__',\n 'divergent__',\n 'energy__',\n 'beta[1]',\n 'beta[2]',\n ]\n )\n\n self.assertEqual(fit.chains, 4)\n self.assertEqual(fit.chain_ids, [1, 2, 3, 4])\n self.assertEqual(fit.num_draws_warmup, 1000)\n self.assertEqual(fit.num_draws_sampling, 100)\n self.assertEqual(fit.column_names, col_names)\n self.assertEqual(fit.metric_type, 'diag_e')\n\n self.assertEqual(fit.metadata.cmdstan_config['num_samples'], 100)\n self.assertEqual(fit.metadata.cmdstan_config['thin'], 1)\n self.assertEqual(fit.metadata.cmdstan_config['algorithm'], 'hmc')\n self.assertEqual(fit.metadata.cmdstan_config['metric'], 'diag_e')\n self.assertAlmostEqual(fit.metadata.cmdstan_config['delta'], 0.80)\n\n self.assertTrue('n_leapfrog__' in fit.metadata.method_vars_cols)\n self.assertTrue('energy__' in fit.metadata.method_vars_cols)\n self.assertTrue('beta' not in fit.metadata.method_vars_cols)\n self.assertTrue('energy__' not in fit.metadata.stan_vars_dims)\n self.assertTrue('beta' in fit.metadata.stan_vars_dims)\n self.assertTrue('beta' in fit.metadata.stan_vars_cols)\n self.assertEqual(fit.metadata.stan_vars_dims['beta'], tuple([2]))\n self.assertEqual(fit.metadata.stan_vars_cols['beta'], tuple([7, 8]))\n\n def test_save_latent_dynamics(self):\n stan = os.path.join(DATAFILES_PATH, 'bernoulli.stan')\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n bern_model = CmdStanModel(stan_file=stan)\n bern_fit = bern_model.sample(\n data=jdata,\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=200,\n save_latent_dynamics=True,\n )\n for i in range(bern_fit.runset.chains):\n diagnostics_file = bern_fit.runset.diagnostic_files[i]\n self.assertTrue(os.path.exists(diagnostics_file))\n\n def test_save_profile(self):\n stan = os.path.join(DATAFILES_PATH, 'profile_likelihood.stan')\n profile_model = CmdStanModel(stan_file=stan)\n profile_fit = profile_model.sample(\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=200,\n save_profile=True,\n )\n for i in range(profile_fit.runset.chains):\n profile_file = profile_fit.runset.profile_files[i]\n self.assertTrue(os.path.exists(profile_file))\n\n profile_fit = profile_model.sample(\n chains=2,\n parallel_chains=2,\n seed=12345,\n iter_sampling=200,\n save_latent_dynamics=True,\n save_profile=True,\n )\n\n for i in range(profile_fit.runset.chains):\n profile_file = profile_fit.runset.profile_files[i]\n self.assertTrue(os.path.exists(profile_file))\n diagnostics_file = profile_fit.runset.diagnostic_files[i]\n self.assertTrue(os.path.exists(diagnostics_file))\n\n def test_xarray_draws(self):\n stan = os.path.join(DATAFILES_PATH, 'bernoulli.stan')\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n bern_model = CmdStanModel(stan_file=stan)\n bern_fit = bern_model.sample(\n data=jdata, chains=2, seed=12345, iter_warmup=100, iter_sampling=100\n )\n xr_data = bern_fit.draws_xr()\n self.assertEqual(xr_data.theta.dims, ('chain', 'draw'))\n self.assertTrue(\n np.allclose(\n xr_data.theta.transpose('draw', ...).values,\n bern_fit.draws()[:, :, -1],\n )\n )\n self.assertEqual(xr_data.theta.values.shape, (2, 100))\n\n xr_data = bern_fit.draws_xr(vars=['theta'])\n self.assertEqual(xr_data.theta.values.shape, (2, 100))\n\n with self.assertRaises(KeyError):\n xr_data = bern_fit.draws_xr(vars=['eta'])\n\n # test inc_warmup\n bern_fit = bern_model.sample(\n data=jdata,\n chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=100,\n save_warmup=True,\n )\n xr_data = bern_fit.draws_xr(inc_warmup=True)\n self.assertEqual(xr_data.theta.values.shape, (2, 200))\n\n # test that array[1] and chains=1 are properly handled dimension-wise\n stan = os.path.join(DATAFILES_PATH, 'bernoulli_array.stan')\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n bern_model = CmdStanModel(stan_file=stan)\n bern_fit = bern_model.sample(\n data=jdata, chains=1, seed=12345, iter_warmup=100, iter_sampling=100\n )\n xr_data = bern_fit.draws_xr()\n self.assertEqual(xr_data.theta.dims, ('chain', 'draw', 'theta_dim_0'))\n self.assertEqual(xr_data.theta.values.shape, (1, 100, 1))\n\n xr_var = bern_fit.draws_xr(vars='theta')\n self.assertEqual(xr_var.theta.dims, ('chain', 'draw', 'theta_dim_0'))\n self.assertEqual(xr_var.theta.values.shape, (1, 100, 1))\n\n xr_var = bern_fit.draws_xr(vars=['theta'])\n self.assertEqual(xr_var.theta.dims, ('chain', 'draw', 'theta_dim_0'))\n self.assertEqual(xr_var.theta.values.shape, (1, 100, 1))\n\n def test_no_xarray(self):\n with self.without_import('xarray', cmdstanpy.stanfit.mcmc):\n with self.assertRaises(ImportError):\n # if this fails the testing framework is the problem\n import xarray as _ # noqa\n\n stan = os.path.join(DATAFILES_PATH, 'bernoulli.stan')\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n bern_model = CmdStanModel(stan_file=stan)\n bern_fit = bern_model.sample(\n data=jdata,\n chains=2,\n seed=12345,\n iter_warmup=100,\n iter_sampling=100,\n )\n\n with self.assertRaises(RuntimeError):\n bern_fit.draws_xr()\n\n def test_single_row_csv(self):\n stan = os.path.join(DATAFILES_PATH, 'matrix_var.stan')\n model = CmdStanModel(stan_file=stan)\n fit = model.sample(iter_sampling=1, chains=1)\n z_as_ndarray = fit.stan_variable(var=\"z\")\n self.assertEqual(z_as_ndarray.shape, (1, 4, 3)) # flattens chains\n z_as_xr = fit.draws_xr(vars=\"z\")\n self.assertEqual(z_as_xr.z.data.shape, (1, 1, 4, 3)) # keeps chains\n for i in range(4):\n for j in range(3):\n self.assertEqual(int(z_as_ndarray[0, i, j]), i + 1)\n self.assertEqual(int(z_as_xr.z.data[0, 0, i, j]), i + 1)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.int32"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Scriddie/Varsortability | [
"357213d5ceefb6362060c56e12c18b41dc689306"
] | [
"src/sortnregress.py"
] | [
"import numpy as np\nfrom sklearn.linear_model import LinearRegression, LassoLarsIC\n\n\ndef sortnregress(X):\n \"\"\" Take n x d data, order nodes by marginal variance and\n regresses each node onto those with lower variance, using\n edge coefficients as structure estimates. \"\"\"\n LR = LinearRegression()\n LL = LassoLarsIC(criterion='bic')\n\n d = X.shape[1]\n W = np.zeros((d, d))\n increasing = np.argsort(np.var(X, axis=0))\n\n for k in range(1, d):\n covariates = increasing[:k]\n target = increasing[k]\n\n LR.fit(X[:, covariates], X[:, target].ravel())\n weight = np.abs(LR.coef_)\n LL.fit(X[:, covariates] * weight, X[:, target].ravel())\n W[covariates, target] = LL.coef_ * weight\n\n return W\n\n\nif __name__ == \"__main__\":\n W = np.array([[0, 1, 0], [0, 0, 2], [0, 0, 0]])\n X = np.random.randn(1000, 3).dot(np.linalg.inv(np.eye(3) - W))\n W_hat = sortnregress(X)\n print(W)\n print(W_hat)\n"
] | [
[
"numpy.abs",
"numpy.eye",
"sklearn.linear_model.LassoLarsIC",
"numpy.random.randn",
"sklearn.linear_model.LinearRegression",
"numpy.var",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
blokeley/forcelib | [
"003fa02c70ee8ac8486db12a388ce67945488069"
] | [
"arraylib.py"
] | [
"# MIT License\n#\n# Copyright (c) 2017 Tom Oakley\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# Get the latest version from https://github.com/blokeley/forcelib\n\n\"\"\"Array utilities.\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n# Note that the version of arraylib may be different to that of forcelib\n__version__ = '1.0.0'\n\n\ndef rescale(old_array, min_, max_):\n \"\"\"Scale array to vary between min and max.\"\"\"\n scale_factor = (max_ - min_) / (old_array.max() - old_array.min())\n return min_ + scale_factor * (old_array - old_array.min())\n\n\ndef interp(df, new_index):\n \"\"\"Return a new DataFrame with all columns values interpolated\n to the new_index values.\"\"\"\n df_out = pd.DataFrame(index=new_index)\n df_out.index.name = df.index.name\n\n for colname, col in df.iteritems():\n df_out[colname] = np.interp(new_index, df.index, col)\n\n return df_out\n"
] | [
[
"numpy.interp",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
guptarohit/audax | [
"ad11059145afce150a1af870bee41c38a786d303",
"ad11059145afce150a1af870bee41c38a786d303"
] | [
"audax/training_utils/eval_supervised.py",
"audax/training_utils/data_v2/transforms.py"
] | [
"\"\"\"\nHelper functions for evaluating a given supervised \"Classifier\" model\n\nWritten for audax by / Copyright 2022, Sarthak Yadav\n\"\"\"\nimport json\nimport functools\nimport os\nimport time\nfrom typing import Any\nimport tqdm\nfrom absl import logging\nfrom clu import metric_writers\nfrom clu import periodic_actions\nimport flax\nfrom flax import jax_utils\nfrom flax import optim\nfrom flax.training import checkpoints\nfrom flax.training import common_utils\nfrom flax.training import train_state\nimport jax\nfrom jax import lax\nimport jax.numpy as jnp\nfrom jax import random\nimport ml_collections\nfrom numpy import var\nimport numpy as np\nimport optax\nimport tensorflow as tf\nfrom sklearn.metrics import average_precision_score, accuracy_score\nfrom ..transforms import mixup\nfrom . import metrics_helper\nfrom jax.config import config\n\n\ntry:\n # Disable all GPUS\n tf.config.set_visible_devices([], 'GPU')\n visible_devices = tf.config.get_visible_devices()\n for device in visible_devices:\n assert device.device_type != 'GPU'\nexcept:\n # Invalid device or cannot modify virtual devices once initialized.\n pass\n\nfrom .misc import TrainingMode, Features, DataSplit\nfrom . import training_utilities\nfrom .data_v2.helpers import prepare_datasets_v2\nfrom .. import models\nfrom audax import frontends\nfrom .train_supervised import create_model\n\n\ndef forward(state, batch):\n variables = {\n 'params': state.get_all_params, # absolutely ok to just use state.get_all_params here\n 'batch_stats': state.batch_stats\n }\n logits = state.apply_fn(\n variables, batch['audio'], train=False, mutable=False)\n return logits\n\n\ndef load_variables_from_checkpoint(workdir, prefix):\n pretrained_variables = checkpoints.restore_checkpoint(workdir, None, prefix=prefix)\n variables = {\n \"params\": pretrained_variables['params'],\n \"batch_stats\": pretrained_variables['batch_stats']\n }\n return variables, pretrained_variables['aux_rng_keys']\n\n\ndef evaluate(workdir: str,\n eval_signal_duration=\"AUTO\",\n eval_manifest_override=None,\n eval_steps_override=None):\n config = training_utilities.read_config_from_json(workdir)\n if config.batch_size % jax.device_count() > 0:\n raise ValueError('Batch size must be divisible by the number of devices')\n # doing one sample at a time. Better supports VoxCeleb and other variable length sequences\n local_batch_size = 1 // jax.process_count()\n logging.info(\"Process count: {}\".format(jax.process_count()))\n # eval is done on single device\n device = config.get(\"device\", 1)\n if device:\n devices = [jax.local_devices()[device]]\n else:\n devices = jax.local_devices()\n platform = devices[0].platform\n if config.half_precision:\n if platform == 'tpu':\n input_dtype = tf.bfloat16\n else:\n input_dtype = tf.float16\n else:\n input_dtype = tf.float32\n mode = TrainingMode(config.model.type)\n if eval_signal_duration == \"AUTO\":\n if config.data.dataset_name == \"audioset\":\n config.audio_config.min_duration = 10.\n elif config.data.dataset_name == \"speechcommandsv2\":\n config.audio_config.min_duration = 1.\n elif config.data.dataset_name == \"voxceleb1\":\n config.audio_config.min_duration = 10.\n else:\n raise ValueError(f\"Unsupported dataset {config.data.dataset_name} for eval_signal_duration == 'AUTO'\")\n elif type(eval_signal_duration) == float and eval_signal_duration >= 1.0:\n config.audio_config.min_duration = eval_signal_duration\n else:\n raise ValueError(f\"Unsupported dataset {config.data.dataset_name} for eval_signal_duration == 'AUTO'\")\n if eval_manifest_override is not None:\n assert os.path.exists(eval_manifest_override), f\"{eval_manifest_override} doesn't exist\"\n logging.info(\"Overriding eval_manifest path {} in config file with {}\".format(\n config.data.eval_manifest, eval_manifest_override\n ))\n if eval_steps_override is None or eval_steps_override == 0:\n raise ValueError(f\"Incorrect value for eval_steps_override: {eval_steps_override}\")\n config.data.eval_manifest = eval_manifest_override\n config.data.eval_samples = eval_steps_override\n\n rng = random.PRNGKey(0)\n _, eval_iter = prepare_datasets_v2(config, local_batch_size, input_dtype=input_dtype)\n eval_iter = training_utilities.create_input_iter(eval_iter, devices=devices)\n\n if config.data.jax_transforms:\n tfs = training_utilities.get_feature_functions(config)\n if len(tfs) != 0:\n p_feature_extract_fn = jax.pmap(\n functools.partial(\n training_utilities.apply_audio_transforms, transforms=tfs, \n dtype=training_utilities.get_dtype(config.half_precision),\n ), axis_name='batch', devices=devices)\n else:\n p_feature_extract_fn = None\n else:\n p_feature_extract_fn = None\n model_cls, frontend_cls = training_utilities.get_model_frontend_cls(config)\n model = create_model(\n model_cls=model_cls, half_precision=config.half_precision,\n frontend_cls=frontend_cls,\n num_classes=config.model.num_classes,\n spec_aug=None,\n drop_rate=config.model.get(\"fc_drop_rate\", 0.))\n \n # placeholder to just load the thing\n learning_rate_fn = training_utilities.create_learning_rate_fn(\n config, 0.1, 100)\n state = training_utilities.create_train_state(rng, config, model, learning_rate_fn)\n # state = training_utilities.restore_checkpoint(state, workdir)\n state = checkpoints.restore_checkpoint(workdir, state, prefix=\"best_\")\n state = jax_utils.replicate(state, devices=devices)\n p_forward = jax.pmap(functools.partial(forward),\n axis_name='batch', devices=devices)\n if config.steps_per_eval == -1:\n num_validation_examples = config.data.eval_samples\n steps_per_eval = num_validation_examples // 1\n else:\n steps_per_eval = config.steps_per_eval\n eval_logits = []\n eval_labels = []\n for _ in tqdm.tqdm(range(steps_per_eval)):\n eval_batch = next(eval_iter)\n if p_feature_extract_fn:\n eval_batch['audio'] = p_feature_extract_fn(eval_batch['audio'])\n # print(eval_batch['audio'].shape)\n logits = p_forward(state, eval_batch)\n # print(logits.shape)\n eval_logits.append(logits)\n eval_labels.append(eval_batch['label'])\n logging.info(\"Concatenating predictions and labels..\")\n eval_logits = jnp.concatenate([jax.device_get(x) for x in eval_logits])\n eval_labels = jnp.concatenate([jax.device_get(x) for x in eval_labels])\n eval_logits = eval_logits.reshape(-1, eval_logits.shape[-1]).astype('float32')\n eval_labels = eval_labels.reshape(-1, eval_labels.shape[-1]).astype(\"float32\")\n fp = open(os.path.join(workdir, \"results.txt\"), \"w\")\n if mode == TrainingMode.MULTILABEL:\n # macro_mAP = metrics_helper.calculate_mAP(eval_logits, eval_labels)\n stats = metrics_helper.calculate_stats(eval_logits, eval_labels)\n mAP = np.mean([stat['AP'] for stat in stats])\n mAUC = np.mean([stat['auc'] for stat in stats])\n dp = metrics_helper.d_prime(mAUC)\n s = \"mAP: {:.5f}\\n\".format(mAP)\n s += \"mAUC: {:.5f}\\n\".format(mAUC)\n\n s += \"dprime: {:.5f}\".format(dp)\n elif mode == TrainingMode.MULTICLASS:\n acc = accuracy_score(y_true=np.argmax(np.asarray(eval_labels), axis=1), \n y_pred=np.argmax(np.asarray(eval_logits), axis=1))\n s = \"Accuracy: {:.4f}\".format(acc)\n print(s)\n fp.write(s)\n fp.close()\n",
"\"\"\"\nData transforms in tensorflow for the tf.data based datasets\nKept separate from core audax (since it's tensorflow based)\n\nWritten for audax by / Copyright 2022, Sarthak Yadav\n\"\"\"\nimport tensorflow as tf\n\n\ndef pad_waveform(waveform, seg_length=16000):\n padding = tf.maximum(seg_length - tf.shape(waveform)[0], 0)\n left_pad = padding // 2\n right_pad = padding - left_pad\n padded_waveform = tf.pad(waveform, paddings=[[left_pad, right_pad]])\n return padded_waveform\n\n\ndef random_crop_signal(audio, slice_length):\n data_length = tf.shape(audio, out_type=tf.dtypes.int64)[0]\n max_offset = data_length - slice_length\n if max_offset == 0:\n return audio\n random_offset = tf.random.uniform((), minval=0, maxval=max_offset, dtype=tf.dtypes.int64)\n slice_indices = tf.range(0, slice_length, dtype=tf.dtypes.int64)\n random_slice = tf.gather(audio, slice_indices + random_offset, axis=0)\n return random_slice\n\n\ndef center_crop_signal(audio, slice_length):\n data_length = tf.shape(audio, out_type=tf.dtypes.int64)[0]\n if data_length == slice_length:\n return audio\n center_offset = data_length // 2\n slice_indices = tf.range(0, slice_length, dtype=tf.dtypes.int64)\n return tf.gather(audio, slice_indices + center_offset, axis=0)\n\n\ndef label_parser(example, mode=\"multiclass\", num_classes=527):\n label = tf.sparse.to_dense(example['label'])\n if mode == \"multilabel\":\n example['label'] = tf.reduce_sum(tf.one_hot(label, num_classes, on_value=1., axis=-1), axis=0)\n else:\n example['label'] = tf.one_hot(label[0], num_classes, on_value=1.)\n return example\n\n\ndef contrastive_labels(example):\n labels = tf.range(0, example['anchor'].shape[0])\n labels = tf.one_hot(labels, example['anchor'].shape[0], on_value=1.)\n example['label'] = labels\n return example\n\n\ndef map_torch_batched_feature_extractor(example, feature_extractor):\n example['audio'] = tf.numpy_function(feature_extractor, [example['audio']], Tout=tf.float32)\n return example\n\n\ndef map_dtype(example, desired=tf.float32):\n example['audio'] = tf.cast(example['audio'], desired)\n example['label'] = tf.cast(example['label'], desired)\n return example"
] | [
[
"numpy.asarray",
"tensorflow.config.get_visible_devices",
"numpy.mean",
"tensorflow.config.set_visible_devices"
],
[
"tensorflow.sparse.to_dense",
"tensorflow.range",
"tensorflow.shape",
"tensorflow.random.uniform",
"tensorflow.cast",
"tensorflow.gather",
"tensorflow.one_hot",
"tensorflow.pad",
"tensorflow.numpy_function"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ska-telescope/algorithm-reference-library | [
"1b2c8d6079249202864abf8c60cdea40f0f123cb",
"1b2c8d6079249202864abf8c60cdea40f0f123cb"
] | [
"processing_components/visibility/base.py",
"deprecated_code/workflows/mpi/imaging-pipelines_serial.py"
] | [
"\"\"\"\nBase simple visibility operations, placed here to avoid circular dependencies\n\"\"\"\n\nimport os\nimport copy\nimport logging\nfrom typing import Union\n\nimport numpy\nimport re\nfrom astropy import constants as constants\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.io import fits\nfrom astropy.time import Time\n\nfrom data_models.memory_data_models import Visibility, BlockVisibility, Configuration\nfrom data_models.polarisation import PolarisationFrame, ReceptorFrame, correlate_polarisation\nfrom processing_library.util.coordinate_support import xyz_to_uvw, uvw_to_xyz, skycoord_to_lmn, simulate_point, \\\n hadec_to_azel\n\nlog = logging.getLogger(__name__)\n\n\ndef vis_summary(vis: Union[Visibility, BlockVisibility]):\n \"\"\"Return string summarizing the Visibility\n\n \"\"\"\n return \"%d rows, %.3f GB\" % (vis.nvis, vis.size())\n\n\ndef copy_visibility(vis: Union[Visibility, BlockVisibility], zero=False) -> Union[Visibility, BlockVisibility]:\n \"\"\"Copy a visibility\n\n Performs a deepcopy of the data array\n \"\"\"\n assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility), vis\n \n newvis = copy.copy(vis)\n newvis.data = numpy.copy(vis.data)\n if isinstance(vis, Visibility):\n newvis.cindex = vis.cindex\n newvis.blockvis = vis.blockvis\n if zero:\n newvis.data['vis'][...] = 0.0\n return newvis\n\n\ndef create_visibility(config: Configuration, times: numpy.array, frequency: numpy.array,\n channel_bandwidth, phasecentre: SkyCoord,\n weight: float, polarisation_frame=PolarisationFrame('stokesI'),\n integration_time=1.0,\n zerow=False, elevation_limit=15.0 * numpy.pi / 180.0, source='unknown', meta=None) -> Visibility:\n \"\"\" Create a Visibility from Configuration, hour angles, and direction of source\n\n Note that we keep track of the integration time for BDA purposes\n\n :param config: Configuration of antennas\n :param times: hour angles in radians\n :param frequency: frequencies (Hz] [nchan]\n :param weight: weight of a single sample\n :param phasecentre: phasecentre of observation\n :param channel_bandwidth: channel bandwidths: (Hz] [nchan]\n :param integration_time: Integration time ('auto' or value in s)\n :param polarisation_frame: PolarisationFrame('stokesI')\n :return: Visibility\n \"\"\"\n assert phasecentre is not None, \"Must specify phase centre\"\n \n if polarisation_frame is None:\n polarisation_frame = correlate_polarisation(config.receptor_frame)\n \n latitude = config.location.geodetic[1].to('rad').value\n\n nch = len(frequency)\n ants_xyz = config.data['xyz']\n nants = len(config.data['names'])\n nbaselines = int(nants * (nants - 1) / 2)\n ntimes = 0\n for iha, ha in enumerate(times):\n \n # Calculate the positions of the antennas as seen for this hour angle\n # and declination\n _, elevation = hadec_to_azel(ha, phasecentre.dec.rad, latitude)\n if elevation_limit is None or (elevation > elevation_limit):\n ntimes +=1\n\n npol = polarisation_frame.npol\n nrows = nbaselines * ntimes * nch\n nrowsperintegration = nbaselines * nch\n rvis = numpy.zeros([nrows, npol], dtype='complex')\n rweight = weight * numpy.ones([nrows, npol])\n rtimes = numpy.zeros([nrows])\n rfrequency = numpy.zeros([nrows])\n rchannel_bandwidth = numpy.zeros([nrows])\n rantenna1 = numpy.zeros([nrows], dtype='int')\n rantenna2 = numpy.zeros([nrows], dtype='int')\n ruvw = numpy.zeros([nrows, 3])\n \n n_flagged = 0\n\n # Do each hour angle in turn\n row = 0\n for iha, ha in enumerate(times):\n \n # Calculate the positions of the antennas as seen for this hour angle\n # and declination\n _, elevation = hadec_to_azel(ha, phasecentre.dec.rad, latitude)\n if elevation_limit is None or (elevation > elevation_limit):\n rtimes[row:row + nrowsperintegration] = ha * 43200.0 / numpy.pi\n\n # TODO: optimise loop\n # Loop over all pairs of antennas. Note that a2>a1\n ant_pos = xyz_to_uvw(ants_xyz, ha, phasecentre.dec.rad)\n for a1 in range(nants):\n for a2 in range(a1 + 1, nants):\n rantenna1[row:row + nch] = a1\n rantenna2[row:row + nch] = a2\n rweight[row:row+nch,...] = 1.0\n \n # Loop over all frequencies and polarisations\n for ch in range(nch):\n # noinspection PyUnresolvedReferences\n k = frequency[ch] / constants.c.value\n ruvw[row, :] = (ant_pos[a2, :] - ant_pos[a1, :]) * k\n rfrequency[row] = frequency[ch]\n rchannel_bandwidth[row] = channel_bandwidth[ch]\n row += 1\n \n if zerow:\n ruvw[..., 2] = 0.0\n assert row == nrows\n rintegration_time = numpy.full_like(rtimes, integration_time)\n vis = Visibility(uvw=ruvw, time=rtimes, antenna1=rantenna1, antenna2=rantenna2,\n frequency=rfrequency, vis=rvis,\n weight=rweight, imaging_weight=rweight,\n integration_time=rintegration_time, channel_bandwidth=rchannel_bandwidth,\n polarisation_frame=polarisation_frame, source=source, meta=meta)\n vis.phasecentre = phasecentre\n vis.configuration = config\n log.info(\"create_visibility: %s\" % (vis_summary(vis)))\n assert isinstance(vis, Visibility), \"vis is not a Visibility: %r\" % vis\n if elevation_limit is not None:\n log.info('create_visibility: flagged %d/%d visibilities below elevation limit %f (rad)' %\n (n_flagged, vis.nvis, elevation_limit))\n else:\n log.info('create_visibility: created %d visibilities' % (vis.nvis))\n\n return vis\n\n\ndef create_blockvisibility(config: Configuration,\n times: numpy.array,\n frequency: numpy.array,\n phasecentre: SkyCoord,\n weight: float = 1.0,\n polarisation_frame: PolarisationFrame = None,\n integration_time=1.0,\n channel_bandwidth=1e6,\n zerow=False,\n elevation_limit=None,\n source='unknown',\n meta=None,\n **kwargs) -> BlockVisibility:\n \"\"\" Create a BlockVisibility from Configuration, hour angles, and direction of source\n\n Note that we keep track of the integration time for BDA purposes\n\n :param config: Configuration of antennas\n :param times: hour angles in radians\n :param frequency: frequencies (Hz] [nchan]\n :param weight: weight of a single sample\n :param phasecentre: phasecentre of observation\n :param channel_bandwidth: channel bandwidths: (Hz] [nchan]\n :param integration_time: Integration time ('auto' or value in s)\n :param polarisation_frame:\n :return: BlockVisibility\n \"\"\"\n assert phasecentre is not None, \"Must specify phase centre\"\n \n if polarisation_frame is None:\n polarisation_frame = correlate_polarisation(config.receptor_frame)\n \n latitude = config.location.geodetic[1].to('rad').value\n nch = len(frequency)\n ants_xyz = config.data['xyz']\n nants = len(config.data['names'])\n\n ntimes = 0\n n_flagged = 0\n for iha, ha in enumerate(times):\n \n # Calculate the positions of the antennas as seen for this hour angle\n # and declination\n _, elevation = hadec_to_azel(ha, phasecentre.dec.rad, latitude)\n if elevation_limit is None or (elevation > elevation_limit):\n ntimes +=1\n else:\n n_flagged += 1\n\n assert ntimes > 0, \"No unflagged points\"\n if elevation_limit is not None:\n log.info('create_visibility: flagged %d/%d times below elevation limit %f (rad)' %\n (n_flagged, ntimes, elevation_limit))\n else:\n log.info('create_visibility: created %d times' % (ntimes))\n \n npol = polarisation_frame.npol\n visshape = [ntimes, nants, nants, nch, npol]\n rvis = numpy.zeros(visshape, dtype='complex')\n rweight = weight * numpy.ones(visshape)\n rimaging_weight = numpy.ones(visshape)\n rtimes = numpy.zeros([ntimes])\n ruvw = numpy.zeros([ntimes, nants, nants, 3])\n \n # Do each hour angle in turn\n itime = 0\n for iha, ha in enumerate(times):\n \n # Calculate the positions of the antennas as seen for this hour angle\n # and declination\n ant_pos = xyz_to_uvw(ants_xyz, ha, phasecentre.dec.rad)\n _, elevation = hadec_to_azel(ha, phasecentre.dec.rad, latitude)\n if elevation_limit is None or (elevation > elevation_limit):\n rtimes[itime] = ha * 43200.0 / numpy.pi\n rweight[itime, ...] = 1.0\n\n # Loop over all pairs of antennas. Note that a2>a1\n for a1 in range(nants):\n for a2 in range(a1 + 1, nants):\n ruvw[itime, a2, a1, :] = (ant_pos[a2, :] - ant_pos[a1, :])\n ruvw[itime, a1, a2, :] = (ant_pos[a1, :] - ant_pos[a2, :])\n itime += 1\n \n rintegration_time = numpy.full_like(rtimes, integration_time)\n rchannel_bandwidth = channel_bandwidth\n if zerow:\n ruvw[..., 2] = 0.0\n vis = BlockVisibility(uvw=ruvw, time=rtimes, frequency=frequency, vis=rvis, weight=rweight,\n imaging_weight=rimaging_weight,\n integration_time=rintegration_time, channel_bandwidth=rchannel_bandwidth,\n polarisation_frame=polarisation_frame, source=source, meta=meta)\n vis.phasecentre = phasecentre\n vis.configuration = config\n log.info(\"create_blockvisibility: %s\" % (vis_summary(vis)))\n assert isinstance(vis, BlockVisibility), \"vis is not a BlockVisibility: %r\" % vis\n\n return vis\n\n\ndef create_visibility_from_rows(vis: Union[Visibility, BlockVisibility], rows: numpy.ndarray, makecopy=True):\n \"\"\" Create a Visibility from selected rows\n\n :param vis: Visibility\n :param rows: Boolean array of row selction\n :param makecopy: Make a deep copy (True)\n :return: Visibility\n \"\"\"\n \n if rows is None or numpy.sum(rows) == 0:\n return None\n \n assert len(rows) == vis.nvis, \"Length of rows does not agree with length of visibility\"\n \n if isinstance(vis, Visibility):\n \n if makecopy:\n newvis = copy_visibility(vis)\n if vis.cindex is not None and len(rows) == len(vis.cindex):\n newvis.cindex = vis.cindex[rows]\n else:\n newvis.cindex = None\n if vis.blockvis is not None:\n newvis.blockvis = vis.blockvis\n newvis.data = copy.deepcopy(vis.data[rows])\n return newvis\n else:\n vis.data = copy.deepcopy(vis.data[rows])\n if vis.cindex is not None:\n vis.cindex = vis.cindex[rows]\n return vis\n else:\n \n if makecopy:\n newvis = copy_visibility(vis)\n newvis.data = copy.deepcopy(vis.data[rows])\n return newvis\n else:\n vis.data = copy.deepcopy(vis.data[rows])\n \n return vis\n\n\ndef phaserotate_visibility(vis: Visibility, newphasecentre: SkyCoord, tangent=True, inverse=False) -> Visibility:\n \"\"\"\n Phase rotate from the current phase centre to a new phase centre\n\n If tangent is False the uvw are recomputed and the visibility phasecentre is updated.\n Otherwise only the visibility phases are adjusted\n\n :param vis: Visibility to be rotated\n :param newphasecentre:\n :param tangent: Stay on the same tangent plane? (True)\n :param inverse: Actually do the opposite\n :return: Visibility\n \"\"\"\n l, m, n = skycoord_to_lmn(newphasecentre, vis.phasecentre)\n\n # No significant change?\n if numpy.abs(n) < 1e-15:\n return vis\n\n # Make a new copy\n newvis = copy_visibility(vis)\n\n if isinstance(vis, Visibility):\n phasor = simulate_point(newvis.uvw, l, m)\n \n if len(newvis.vis.shape) > len(phasor.shape):\n phasor = phasor[:, numpy.newaxis]\n \n if inverse:\n newvis.data['vis'] *= phasor\n else:\n newvis.data['vis'] *= numpy.conj(phasor)\n \n # To rotate UVW, rotate into the global XYZ coordinate system and back. We have the option of\n # staying on the tangent plane or not. If we stay on the tangent then the raster will\n # join smoothly at the edges. If we change the tangent then we will have to reproject to get\n # the results on the same image, in which case overlaps or gaps are difficult to deal with.\n if not tangent:\n if inverse:\n xyz = uvw_to_xyz(vis.data['uvw'], ha=-newvis.phasecentre.ra.rad, dec=newvis.phasecentre.dec.rad)\n newvis.data['uvw'][...] = \\\n xyz_to_uvw(xyz, ha=-newphasecentre.ra.rad, dec=newphasecentre.dec.rad)[...]\n else:\n # This is the original (non-inverse) code\n xyz = uvw_to_xyz(newvis.data['uvw'], ha=-newvis.phasecentre.ra.rad, dec=newvis.phasecentre.dec.rad)\n newvis.data['uvw'][...] = xyz_to_uvw(xyz, ha=-newphasecentre.ra.rad, dec=newphasecentre.dec.rad)[\n ...]\n newvis.phasecentre = newphasecentre\n return newvis\n\n elif isinstance(vis, BlockVisibility):\n \n k = numpy.array(vis.frequency) / constants.c.to('m s^-1').value\n\n uvw = vis.uvw[..., numpy.newaxis] * k\n phasor = numpy.ones_like(vis.vis, dtype='complex')\n _, _, _, nchan, npol = vis.vis.shape\n for chan in range(nchan):\n phasor[:, :, :, chan, :] = simulate_point(uvw[..., chan], l, m)[..., numpy.newaxis]\n\n if inverse:\n newvis.data['vis'] *= phasor\n else:\n newvis.data['vis'] *= numpy.conj(phasor)\n \n # To rotate UVW, rotate into the global XYZ coordinate system and back. We have the option of\n # staying on the tangent plane or not. If we stay on the tangent then the raster will\n # join smoothly at the edges. If we change the tangent then we will have to reproject to get\n # the results on the same image, in which case overlaps or gaps are difficult to deal with.\n if not tangent:\n # UVW is shape [nants, nants, 3], we want [nants * nants, 3]\n nrows, nants, _, _ = vis.uvw.shape\n uvw_linear = vis.uvw.reshape([nrows * nants * nants, 3])\n if inverse:\n xyz = uvw_to_xyz(uvw_linear, ha=-newvis.phasecentre.ra.rad, dec=newvis.phasecentre.dec.rad)\n uvw_linear = \\\n xyz_to_uvw(xyz, ha=-newphasecentre.ra.rad, dec=newphasecentre.dec.rad)[...]\n else:\n # This is the original (non-inverse) code\n xyz = uvw_to_xyz(uvw_linear, ha=-newvis.phasecentre.ra.rad, dec=newvis.phasecentre.dec.rad)\n uvw_linear = \\\n xyz_to_uvw(xyz, ha=-newphasecentre.ra.rad, dec=newphasecentre.dec.rad)[...]\n newvis.phasecentre = newphasecentre\n newvis.data['uvw'][...] = uvw_linear.reshape([nrows, nants, nants, 3])\n return newvis\n else:\n raise ValueError(\"vis argument neither Visibility or BlockVisibility\")\n\n\ndef export_blockvisibility_to_ms(msname, vis_list, source_name=None, ack=False):\n \"\"\" Minimal BlockVisibility to MS converter\n\n The MS format is much more general than the ARL BlockVisibility so we cut many corners. This requires casacore to be\n installed. If not an exception ModuleNotFoundError is raised.\n\n Write a list of BlockVisibility's to a MS file, split by field and spectral window\n\n :param msname: File name of MS\n :param vislist: BlockVisibility\n :return:\n \"\"\"\n try:\n import casacore.tables.tableutil as pt\n from casacore.tables import (makescacoldesc, makearrcoldesc, table, maketabdesc, tableexists, tableiswritable,\n tableinfo, tablefromascii, tabledelete, makecoldesc, msconcat, removeDerivedMSCal,\n taql, tablerename, tablecopy, tablecolumn, addDerivedMSCal, removeImagingColumns,\n addImagingColumns, required_ms_desc, tabledefinehypercolumn, default_ms, makedminfo,\n default_ms_subtable)\n from processing_components.visibility.msv2fund import Antenna, Stand\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"casacore is not installed\")\n\n try:\n from processing_components.visibility import msv2\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"cannot import msv2\")\n\n # log.debug(\"create_blockvisibility_from_ms: %s\" % str(tab.info()))\n # Start the table\n tbl = msv2.Ms(msname, ref_time=0, source_name= source_name, if_delete=True)\n if source_name is None:\n source_name = 'ARL'\n for vis in vis_list:\n # Check polarisition\n npol = vis.npol\n nchan = vis.nchan\n nants = vis.nants\n if vis.polarisation_frame.type == 'linear':\n polarization = ['XX','XY','YX','YY']\n elif vis.polarisation_frame.type == 'stokesI':\n polarization = ['XX']\n elif vis.polarisation_frame.type == 'circular':\n polarization = ['RR','RL','LR','LL']\n elif vis.polarisation_frame.type == 'stokesIQUV':\n polarization = ['I','Q','U','V']\n # Current ARL suppots I\n tbl.set_stokes(polarization)\n tbl.set_frequency(vis.frequency,vis.channel_bandwidth)\n n_ant = len(vis.configuration.xyz)\n\n antennas = []\n names = vis.configuration.names\n mount = vis.configuration.mount\n diameter = vis.configuration.diameter\n xyz = vis.configuration.xyz\n for i in range(len(names)):\n antennas.append(Antenna(i, Stand(names[i], xyz[i, 0], xyz[i, 1], xyz[i, 2])))\n\n # Set baselines and data\n blList = []\n\n antennas2 = antennas\n\n for i in range(0, n_ant - 1):\n for j in range(i + 1, n_ant):\n blList.append((antennas[i], antennas2[j]))\n\n tbl.set_geometry(vis.configuration, antennas)\n nbaseline = len(blList)\n ntimes = len(vis.data['time'])\n\n ms_vis = numpy.zeros([ntimes, nbaseline, nchan, npol]).astype('complex')\n ms_uvw = numpy.zeros([ntimes, nbaseline, 3])\n # bv_vis = numpy.zeros([ntimes, nants, nants, nchan, npol]).astype('complex')\n # bv_weight = numpy.zeros([ntimes, nants, nants, nchan, npol])\n # bv_imaging_weight = numpy.zeros([ntimes, nants, nants, nchan, npol])\n # bv_uvw = numpy.zeros([ntimes, nants, nants, 3])\n time = vis.data['time']\n int_time = vis.data['integration_time']\n bv_vis = vis.data['vis']\n bv_uvw = vis.data['uvw']\n\n # bv_antenna1 = vis.data['antenna1']\n # bv_antenna2 = vis.data['antenna2']\n\n time_last = time[0]\n time_index = 0\n for row,_ in enumerate(time):\n # MS has shape [row, npol, nchan]\n # BV has shape [ntimes, nants, nants, nchan, npol]\n bl = 0\n for i in range(0, n_ant - 1):\n for j in range(i + 1, n_ant):\n ms_vis[row, bl,...] = bv_vis[row, j, i, ...]\n # bv_weight[time_index, antenna2[row], antenna1[row], :, ...] = ms_weight[row, numpy.newaxis, ...]\n # bv_imaging_weight[time_index, antenna2[row], antenna1[row], :, ...] = ms_weight[row, numpy.newaxis, ...]\n ms_uvw[row,bl,:] = bv_uvw[row, j, i, :]\n bl += 1\n\n # ms_vis = numpy.zeros([ntimes*vis.nants*vis.nants, vis.nchan, vis.npol]).astype('complex')\n # ms_uvw = vis.uvw.reshape(ntimes,-1,3)\n for ntime, time in enumerate(vis.data['time']):\n for ipol, pol in enumerate(polarization):\n if int_time[ntime] is not None:\n tbl.add_data_set(time, int_time[ntime], blList, ms_vis[ntime, ..., ipol], pol=pol, source=source_name,\n phasecentre=vis.phasecentre, uvw=ms_uvw[ntime, :, :])\n else:\n tbl.add_data_set(time, 0, blList, ms_vis[ntime, ..., ipol], pol=pol,\n source=source_name, phasecentre = vis.phasecentre, uvw=ms_uvw[ntime, :, :])\n tbl.write()\n\n\ndef list_ms(msname, ack=False):\n \"\"\" List sources and data descriptors in a MeasurementSet\n\n :param msname: File name of MS\n :return:\n \"\"\"\n try:\n from casacore.tables import table # pylint: disable=import-error\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"casacore is not installed\")\n try:\n from processing_components.visibility import msv2\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"cannot import msv2\")\n \n tab = table(msname, ack=ack)\n log.debug(\"list_ms: %s\" % str(tab.info()))\n \n fieldtab = table('%s/FIELD' % msname, ack=False)\n sources = fieldtab.getcol('NAME')\n \n dds = list(numpy.unique(tab.getcol('DATA_DESC_ID')))\n \n return sources, dds\n\n\ndef create_blockvisibility_from_ms(msname, channum=None, start_chan=None, end_chan=None, ack=False,\n datacolumn='DATA', selected_sources=None, selected_dds=None):\n \"\"\" Minimal MS to BlockVisibility converter\n\n The MS format is much more general than the ARL BlockVisibility so we cut many corners. This requires casacore to be\n installed. If not an exception ModuleNotFoundError is raised.\n\n Creates a list of BlockVisibility's, split by field and spectral window\n \n Reading of a subset of channels is possible using either start_chan and end_chan or channnum. Using start_chan \n and end_chan is preferred since it only reads the channels required. Channum is more flexible and can be used to\n read a random list of channels.\n \n :param msname: File name of MS\n :param channum: range of channels e.g. range(17,32), default is None meaning all\n :param start_chan: Starting channel to read\n :param end_chan: End channel to read\n :return:\n \"\"\"\n try:\n from casacore.tables import table # pylint: disable=import-error\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"casacore is not installed\")\n try:\n from processing_components.visibility import msv2\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"cannot import msv2\")\n\n tab = table(msname, ack=ack)\n log.debug(\"create_blockvisibility_from_ms: %s\" % str(tab.info()))\n\n if selected_sources is None:\n fields = numpy.unique(tab.getcol('FIELD_ID'))\n else:\n fieldtab = table('%s/FIELD' % msname, ack=False)\n sources = fieldtab.getcol('NAME')\n fields = list()\n for field, source in enumerate(sources):\n if source in selected_sources: fields.append(field)\n assert len(fields) > 0, \"No sources selected\"\n \n if selected_dds is None:\n dds = numpy.unique(tab.getcol('DATA_DESC_ID'))\n else:\n dds = selected_dds\n \n log.debug(\"create_blockvisibility_from_ms: Reading unique fields %s, unique data descriptions %s\" % (\n str(fields), str(dds)))\n vis_list = list()\n for field in fields:\n ftab = table(msname, ack=ack).query('FIELD_ID==%d' % field, style='')\n for dd in dds:\n meta = {'MSV2':{'FIELD_ID': field, 'DATA_DESC_ID':dd}}\n ms = ftab.query('DATA_DESC_ID==%d' % dd, style='')\n assert ms.nrows() > 0, \"Empty selection for FIELD_ID=%d and DATA_DESC_ID=%d\" % (field, dd)\n log.debug(\"create_blockvisibility_from_ms: Found %d rows\" % (ms.nrows()))\n # The TIME column has descriptor:\n # {'valueType': 'double', 'dataManagerType': 'IncrementalStMan', 'dataManagerGroup': 'TIME',\n # 'option': 0, 'maxlen': 0, 'comment': 'Modified Julian Day',\n # 'keywords': {'QuantumUnits': ['s'], 'MEASINFO': {'type': 'epoch', 'Ref': 'UTC'}}}\n otime = ms.getcol('TIME')\n datacol = ms.getcol(datacolumn, nrow=1)\n datacol_shape = list(datacol.shape)\n channels = datacol.shape[-2]\n log.debug(\"create_blockvisibility_from_ms: Found %d channels\" % (channels))\n if channum is None:\n if start_chan is not None and end_chan is not None:\n try:\n log.debug(\"create_blockvisibility_from_ms: Reading channels from %d to %d\" %\n (start_chan, end_chan))\n print(\"create_blockvisibility_from_ms: Reading channels from %d to %d (inclusive)\" %\n (start_chan, end_chan))\n blc = [start_chan, 0]\n trc = [end_chan, datacol_shape[-1] - 1]\n channum = range(start_chan, end_chan+1)\n ms_vis = ms.getcolslice(datacolumn, blc=blc, trc=trc)\n ms_weight = ms.getcol('WEIGHT')\n except IndexError:\n raise IndexError(\"channel number exceeds max. within ms\")\n\n else:\n log.debug(\"create_blockvisibility_from_ms: Reading all %d channels\" % (channels))\n try:\n channum = range(channels)\n ms_vis = ms.getcol(datacolumn)[:, channum, :]\n ms_weight = ms.getcol('WEIGHT')\n channum = range(channels)\n except IndexError:\n raise IndexError(\"channel number exceeds max. within ms\")\n else:\n log.debug(\"create_blockvisibility_from_ms: Reading channels %s \" % (channum))\n channum = range(channels)\n try:\n ms_vis = ms.getcol(datacolumn)[:, channum, :]\n ms_weight = ms.getcol('WEIGHT')[:, :]\n except IndexError:\n raise IndexError(\"channel number exceeds max. within ms\")\n\n uvw = -1 * ms.getcol('UVW')\n antenna1 = ms.getcol('ANTENNA1')\n antenna2 = ms.getcol('ANTENNA2')\n integration_time = ms.getcol('INTERVAL')\n\n# time = Time((time-integration_time/2.0)/86400+ 2400000.5,format='jd',scale='utc').utc.value\n time = (otime - integration_time / 2.0)\n\n start_time = numpy.min(time)/86400.0\n end_time = numpy.max(time)/86400.0\n \n log.debug(\"create_blockvisibility_from_ms: Observation from %s to %s\" %\n (Time(start_time, format='mjd').iso, Time(end_time, format='mjd').iso))\n\n # Now get info from the subtables\n spwtab = table('%s/SPECTRAL_WINDOW' % msname, ack=False)\n cfrequency = spwtab.getcol('CHAN_FREQ')[dd][channum]\n cchannel_bandwidth = spwtab.getcol('CHAN_WIDTH')[dd][channum]\n nchan = cfrequency.shape[0]\n \n # Get polarisation info\n npol = 4\n poltab = table('%s/POLARIZATION' % msname, ack=False)\n corr_type = poltab.getcol('CORR_TYPE')\n # These correspond to the CASA Stokes enumerations\n if numpy.array_equal(corr_type[0], [1, 2, 3, 4]):\n polarisation_frame = PolarisationFrame('stokesIQUV')\n elif numpy.array_equal(corr_type[0], [5, 6, 7, 8]):\n polarisation_frame = PolarisationFrame('circular')\n elif numpy.array_equal(corr_type[0], [9, 10, 11, 12]):\n polarisation_frame = PolarisationFrame('linear')\n elif numpy.array_equal(corr_type[0], [9]):\n npol = 1\n polarisation_frame = PolarisationFrame('stokesI')\n else:\n raise KeyError(\"Polarisation not understood: %s\" % str(corr_type))\n \n \n # Get configuration\n anttab = table('%s/ANTENNA' % msname, ack=False)\n nants = anttab.nrows()\n mount = anttab.getcol('MOUNT')\n names = anttab.getcol('NAME')\n diameter = anttab.getcol('DISH_DIAMETER')\n xyz = anttab.getcol('POSITION')\n configuration = Configuration(name='', data=None, location=None,\n names=names, xyz=xyz, mount=mount, frame=None,\n receptor_frame=ReceptorFrame(\"linear\"),\n diameter=diameter)\n # Get phasecentres\n fieldtab = table('%s/FIELD' % msname, ack=False)\n pc = fieldtab.getcol('PHASE_DIR')[field, 0, :]\n source = fieldtab.getcol('NAME')[field]\n phasecentre = SkyCoord(ra=pc[0] * u.rad, dec=pc[1] * u.rad, frame='icrs', equinox='J2000')\n\n time_index_row = numpy.zeros_like(time, dtype='int')\n time_last = time[0]\n time_index = 0\n for row, _ in enumerate(time):\n if time[row] > time_last + integration_time[row]:\n assert time[row] > time_last, \"MS is not time-sorted - cannot convert\"\n time_index += 1\n time_last = time[row]\n time_index_row[row] = time_index\n\n ntimes = time_index + 1\n \n bv_times = numpy.zeros([ntimes])\n bv_vis = numpy.zeros([ntimes, nants, nants, nchan, npol]).astype('complex')\n bv_weight = numpy.zeros([ntimes, nants, nants, nchan, npol])\n bv_imaging_weight = numpy.zeros([ntimes, nants, nants, nchan, npol])\n bv_uvw = numpy.zeros([ntimes, nants, nants, 3])\n bv_integration_time = numpy.zeros([ntimes])\n\n for row, _ in enumerate(time):\n time_index = time_index_row[row]\n bv_times[time_index] = time[row]\n bv_vis[time_index, antenna2[row], antenna1[row], ...] = ms_vis[row, ...]\n bv_weight[time_index, antenna2[row], antenna1[row], :, ...] = ms_weight[row, numpy.newaxis, ...]\n bv_imaging_weight[time_index, antenna2[row], antenna1[row], :, ...] = ms_weight[row, numpy.newaxis, ...]\n bv_uvw[time_index, antenna2[row], antenna1[row], :] = uvw[row, :]\n bv_integration_time[time_index] = integration_time[row]\n\n vis_list.append(BlockVisibility(uvw=bv_uvw,\n time=bv_times,\n frequency=cfrequency,\n channel_bandwidth=cchannel_bandwidth,\n vis=bv_vis,\n weight=bv_weight,\n integration_time = bv_integration_time,\n imaging_weight=bv_imaging_weight,\n configuration=configuration,\n phasecentre=phasecentre,\n polarisation_frame=polarisation_frame,\n source=source, meta=meta))\n tab.close()\n return vis_list\n\n\ndef create_visibility_from_ms(msname, channum=None, start_chan=None, end_chan=None, ack=False):\n \"\"\" Minimal MS to BlockVisibility converter\n\n The MS format is much more general than the ARL BlockVisibility so we cut many corners. This requires casacore to be\n installed. If not an exception ModuleNotFoundError is raised.\n\n Creates a list of BlockVisibility's, split by field and spectral window\n\n Reading of a subset of channels is possible using either start_chan and end_chan or channnum. Using start_chan\n and end_chan is preferred since it only reads the channels required. Channum is more flexible and can be used to\n read a random list of channels.\n \n :param msname: File name of MS\n :param channum: range of channels e.g. range(17,32), default is None meaning all\n :param start_chan: Starting channel to read\n :param end_chan: End channel to read\n :return:\n \"\"\"\n from processing_components.visibility.coalesce import convert_blockvisibility_to_visibility\n return [convert_blockvisibility_to_visibility(v)\n for v in create_blockvisibility_from_ms(msname=msname, channum=channum,\n start_chan=start_chan, end_chan=end_chan, ack=ack)]\n\n\ndef create_blockvisibility_from_uvfits(fitsname, channum=None, ack=False, antnum=None):\n \"\"\" Minimal UVFIT to BlockVisibility converter\n\n The UVFITS format is much more general than the ARL BlockVisibility so we cut many corners. \n \n Creates a list of BlockVisibility's, split by field and spectral window\n \n :param fitsname: File name of UVFITS\n :param channum: range of channels e.g. range(17,32), default is None meaning all\n :param antnum: the number of antenna\n :return:\n \"\"\"\n def ParamDict(hdul):\n \"Return the dictionary of the random parameters\"\n\n \"\"\"\n The keys of the dictionary are the parameter names uppercased for\n consistency. The values are the column numbers.\n\n If multiple parameters have the same name (e.g., DATE) their\n columns are entered as a list.\n \"\"\"\n\n pre=re.compile(r\"PTYPE(?P<i>\\d+)\")\n res={}\n for k,v in hdul.header.items():\n m=pre.match(k)\n if m :\n vu=v.upper()\n if vu in res:\n res[ vu ] = [ res[vu], int(m.group(\"i\")) ]\n else:\n res[ vu ] = int(m.group(\"i\"))\n return res\n\n\n # Open the file\n with fits.open(fitsname) as hdul:\n\n # Read Spectral Window\n nspw = hdul[0].header['NAXIS5']\n # Read Channel and Frequency Interval\n freq_ref = hdul[0].header['CRVAL4']\n mid_chan_freq = hdul[0].header['CRPIX4']\n delt_freq = hdul[0].header['CDELT4']\n # Real the number of channels in one spectral window\n channels = hdul[0].header['NAXIS4']\n freq = numpy.zeros([nspw, channels])\n # Read Frequency or IF\n freqhdulname=\"AIPS FQ\"\n sdhu = hdul.index_of(freqhdulname)\n if_freq = hdul[sdhu].data['IF FREQ'].ravel()\n for i in range(nspw):\n temp = numpy.array([if_freq[i] + freq_ref+delt_freq* ff for ff in range(channels)])\n freq[i,:] = temp[:]\n freq_delt = numpy.ones(channels) * delt_freq\n if channum is None:\n channum = range(channels)\n\n primary = hdul[0].data\n # Read time\n bvtimes = Time(hdul[0].data['DATE'], hdul[0].data['_DATE'], format='jd')\n bv_times = numpy.unique(bvtimes.jd)\n ntimes = len(bv_times)\n\n # # Get Antenna\n # blin = hdul[0].data['BASELINE']\n antennahdulname=\"AIPS AN\"\n adhu = hdul.index_of(antennahdulname)\n try:\n antenna_name = hdul[adhu].data['ANNAME']\n antenna_name = antenna_name.encode('ascii','ignore')\n except:\n antenna_name = None\n\n antenna_xyz = hdul[adhu].data['STABXYZ']\n antenna_mount = hdul[adhu].data['MNTSTA']\n try:\n antenna_diameter = hdul[adhu].data['DIAMETER']\n except:\n antenna_diameter = None\n # To reading some UVFITS with wrong numbers of antenna\n if antnum is not None:\n if antenna_name is not None:\n antenna_name = antenna_name[:antnum]\n antenna_xyz = antenna_xyz[:antnum]\n antenna_mount = antenna_mount[:antnum]\n if antenna_diameter is not None:\n antenna_diameter = antenna_diameter[:antnum]\n nants = len(antenna_xyz)\n\n # res= {}\n # for i,row in enumerate(fin[ahdul].data):\n # res[row.field(\"ANNAME\") ] = i +1\n\n # Get polarisation info\n npol = hdul[0].header['NAXIS3']\n corr_type = numpy.arange(hdul[0].header['NAXIS3']) - (hdul[0].header['CRPIX3'] - 1)\n corr_type *= hdul[0].header['CDELT3']\n corr_type += hdul[0].header['CRVAL3']\n # xx yy xy yx\n # These correspond to the CASA Stokes enumerations\n if numpy.array_equal(corr_type, [1, 2, 3, 4]):\n polarisation_frame = PolarisationFrame('stokesIQUV')\n elif numpy.array_equal(corr_type, [-1, -2, -3, -4]):\n polarisation_frame = PolarisationFrame('circular')\n elif numpy.array_equal(corr_type, [-5, -6, -7, -8]):\n polarisation_frame = PolarisationFrame('linear')\n else:\n raise KeyError(\"Polarisation not understood: %s\" % str(corr_type)) \n\n configuration = Configuration(name='', data=None, location=None,\n names=antenna_name, xyz=antenna_xyz, mount=antenna_mount, frame=None,\n receptor_frame=polarisation_frame,\n diameter=antenna_diameter) \n\n # Get RA and DEC\n phase_center_ra_degrees = numpy.float(hdul[0].header['CRVAL6'])\n phase_center_dec_degrees = numpy.float(hdul[0].header['CRVAL7'])\n\n # Get phasecentres\n phasecentre = SkyCoord(ra=phase_center_ra_degrees * u.deg, dec=phase_center_dec_degrees * u.deg, frame='icrs', equinox='J2000')\n \n # Get UVW\n d=ParamDict(hdul[0])\n if \"UU\" in d:\n uu = hdul[0].data['UU'] \n vv = hdul[0].data['VV'] \n ww = hdul[0].data['WW'] \n else:\n uu = hdul[0].data['UU---SIN'] \n vv = hdul[0].data['VV---SIN']\n ww = hdul[0].data['WW---SIN'] \n _vis = hdul[0].data['DATA']\n\n #_vis.shape = (nchan, ntimes, (nants*(nants-1)//2 ), npol, -1)\n #self.vis = -(_vis[...,0] * 1.j + _vis[...,1])\n row = 0\n nchan = len(channum)\n vis_list = list()\n for spw_index in range(nspw):\n bv_vis = numpy.zeros([ntimes, nants, nants, nchan, npol]).astype('complex')\n bv_weight = numpy.zeros([ntimes, nants, nants, nchan, npol])\n bv_uvw = numpy.zeros([ntimes, nants, nants, 3]) \n for time_index , time in enumerate(bv_times):\n #restfreq = freq[channel_index] \n for antenna1 in range(nants-1):\n for antenna2 in range(antenna1 + 1, nants):\n for channel_no, channel_index in enumerate(channum):\n for pol_index in range(npol):\n bv_vis[time_index, antenna2, antenna1, channel_no,pol_index] = complex(_vis[row,:,:,spw_index,channel_index, pol_index ,0],_vis[row,:,:,spw_index,channel_index,pol_index ,1])\n bv_weight[time_index, antenna2, antenna1, channel_no, pol_index] = _vis[row,:,:,spw_index,channel_index,pol_index ,2]\n bv_uvw[time_index, antenna2, antenna1, 0] = uu[row]* constants.c.value\n bv_uvw[time_index, antenna2, antenna1, 1] = vv[row]* constants.c.value\n bv_uvw[time_index, antenna2, antenna1, 2] = ww[row]* constants.c.value\n row += 1 \n vis_list.append(BlockVisibility(uvw=bv_uvw,\n time=bv_times,\n frequency=freq[spw_index][channum],\n channel_bandwidth=freq_delt[channum],\n vis=bv_vis,\n weight=bv_weight,\n imaging_weight= bv_weight,\n configuration=configuration,\n phasecentre=phasecentre,\n polarisation_frame=polarisation_frame))\n return vis_list\n\ndef create_visibility_from_uvfits(fitsname, channum=None, ack=False, antnum=None):\n \"\"\" Minimal UVFITS to BlockVisibility converter\n\n Creates a list of BlockVisibility's, split by field and spectral window\n\n :param fitsname: File name of UVFITS file\n :param channum: range of channels e.g. range(17,32), default is None meaning all\n :param antnum: the number of antenna\n :return:\n \"\"\"\n from processing_components.visibility.coalesce import convert_blockvisibility_to_visibility\n return [convert_blockvisibility_to_visibility(v)\n for v in create_blockvisibility_from_uvfits(fitsname=fitsname, channum=channum, ack=ack, antnum=antnum)]",
"\n# coding: utf-8\n\n# # Pipeline processing using serial workflows.\n# \n# This notebook demonstrates the continuum imaging and ICAL pipelines. These are based on ARL functions wrapped up as SDP workflows using the serial class.\n\n# In[1]:\n\n\n#get_ipython().run_line_magic('matplotlib', 'inline')\n\nimport os\nimport sys\n\nsys.path.append(os.path.join('..', '..'))\n\nfrom data_models.parameters import arl_path\n\n#results_dir = arl_path('test_results')\nresults_dir = './results/orig'\n\n#from matplotlib import pylab\n\n#pylab.rcParams['figure.figsize'] = (12.0, 12.0)\n#pylab.rcParams['image.cmap'] = 'rainbow'\n\nimport numpy\n\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\nfrom astropy.wcs.utils import pixel_to_skycoord\n\n#from matplotlib import pyplot as plt\n\nfrom data_models.polarisation import PolarisationFrame\n\nfrom wrappers.serial.calibration.calibration import solve_gaintable\nfrom wrappers.serial.calibration.operations import apply_gaintable\nfrom wrappers.serial.calibration.calibration_control import create_calibration_controls\nfrom wrappers.serial.visibility.base import create_blockvisibility\nfrom wrappers.serial.visibility.coalesce import convert_blockvisibility_to_visibility, convert_visibility_to_blockvisibility\nfrom wrappers.serial.skycomponent.operations import create_skycomponent\nfrom wrappers.serial.image.deconvolution import deconvolve_cube\n#from wrappers.serial.image.operations import show_image, export_image_to_fits, qa_image\nfrom wrappers.serial.image.operations import export_image_to_fits, qa_image\nfrom wrappers.serial.visibility.iterators import vis_timeslice_iter\nfrom wrappers.serial.simulation.testing_support import create_low_test_image_from_gleam\nfrom processing_components.simulation.configurations import create_named_configuration\nfrom wrappers.serial.imaging.base import predict_2d, create_image_from_visibility, advise_wide_field\n\nfrom workflows.serial.imaging.imaging_serial import invert_list_serial_workflow, predict_list_serial_workflow, deconvolve_list_serial_workflow\nfrom workflows.serial.simulation.simulation_serial import simulate_list_serial_workflow, corrupt_list_serial_workflow\nfrom workflows.serial.pipelines.pipeline_serial import continuum_imaging_list_serial_workflow, ical_list_serial_workflow\n\nimport pprint\nimport time\npp = pprint.PrettyPrinter()\n\nimport logging\n\ndef init_logging():\n log = logging.getLogger()\n logging.basicConfig(filename='%s/imaging-pipeline.log' % results_dir,\n filemode='a',\n format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',\n datefmt='%H:%M:%S',\n level=logging.INFO)\n return log\n\nlog = init_logging()\nlog.info(\"Starting imaging-pipeline\")\n\n\n# In[2]:\n\n\n#pylab.rcParams['figure.figsize'] = (12.0, 12.0)\n#pylab.rcParams['image.cmap'] = 'Greys'\n\n\n# We make the visibility. The parameter rmax determines the distance of the furthest antenna/stations used. All over parameters are determined from this number.\n\n# In[3]:\n\n\nnfreqwin=7\nntimes=5\nrmax=300.0\nfrequency=numpy.linspace(1.0e8,1.2e8,nfreqwin)\n#ntimes=11\n#rmax=300.0\n#frequency=numpy.linspace(0.9e8,1.1e8,nfreqwin)\nchannel_bandwidth=numpy.array(nfreqwin*[frequency[1]-frequency[0]])\ntimes = numpy.linspace(-numpy.pi/3.0, numpy.pi/3.0, ntimes)\n#phasecentre=SkyCoord(ra=+30.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')\nphasecentre=SkyCoord(ra=+0.0 * u.deg, dec=-40.0 * u.deg, frame='icrs', equinox='J2000')\n\nbvis_list=simulate_list_serial_workflow('LOWBD2',\n frequency=frequency, \n channel_bandwidth=channel_bandwidth,\n times=times,\n phasecentre=phasecentre,\n order='frequency',\n rmax=rmax)\n\nvis_list = [convert_blockvisibility_to_visibility(bv) for bv in bvis_list]\nprint('%d elements in vis_list' % len(vis_list))\nlog.debug('%d elements in vis_list' % len(vis_list))\n\n# In[4]:\n\n\nwprojection_planes=1\nadvice_low=advise_wide_field(vis_list[0], guard_band_image=8.0, delA=0.02,\n wprojection_planes=wprojection_planes)\n\nadvice_high=advise_wide_field(vis_list[-1], guard_band_image=8.0, delA=0.02,\n wprojection_planes=wprojection_planes)\n\nvis_slices = advice_low['vis_slices']\nnpixel=advice_high['npixels2']\ncellsize=min(advice_low['cellsize'], advice_high['cellsize'])\n\n\n# Now make a graph to fill with a model drawn from GLEAM \n\n# In[ ]:\n\n\ngleam_model = [create_low_test_image_from_gleam(npixel=npixel,\n frequency=[frequency[f]],\n channel_bandwidth=[channel_bandwidth[f]],\n cellsize=cellsize,\n phasecentre=phasecentre,\n polarisation_frame=PolarisationFrame(\"stokesI\"),\n flux_limit=1.0,\n applybeam=True)\n for f, freq in enumerate(frequency)]\nlog.info('About to make GLEAM model')\n\n\n# In[ ]:\n\n\nlog.info('About to run predict to get predicted visibility')\nlog.info('About to run predict to get predicted visibility')\nstart=time.time()\npredicted_vislist = predict_list_serial_workflow(vis_list, gleam_model, \n context='wstack', vis_slices=vis_slices)\n#log.info('About to run corrupt to get corrupted visibility')\n#corrupted_vislist = corrupt_list_serial_workflow(predicted_vislist, phase_error=1.0)\n\nend=time.time()\nprint('predict finished in %f seconds'%(end-start),flush=True)\n\n# Get the LSM. This is currently blank.\n\n# In[ ]:\n\n\nmodel_list = [create_image_from_visibility(vis_list[f],\n npixel=npixel,\n frequency=[frequency[f]],\n channel_bandwidth=[channel_bandwidth[f]],\n cellsize=cellsize,\n phasecentre=phasecentre,\n polarisation_frame=PolarisationFrame(\"stokesI\"))\n for f, freq in enumerate(frequency)]\n\n\n# In[ ]:\n\nstart=time.time()\nprint('About to start invert' ,flush=True)\n\ndirty_list = invert_list_serial_workflow(predicted_vislist, model_list, \n context='wstack',\n vis_slices=vis_slices, dopsf=False)\npsf_list = invert_list_serial_workflow(predicted_vislist, model_list, \n context='wstack',\n vis_slices=vis_slices, dopsf=True)\n\nend=time.time()\nprint('invert finished in %f seconds'%(end-start),flush=True)\n\n# Create and execute graphs to make the dirty image and PSF\n\n# In[ ]:\n\n\nlog.info('About to run invert to get dirty image')\ndirty = dirty_list[0][0]\n#show_image(dirty, cm='Greys', vmax=1.0, vmin=-0.1)\n#plt.show()\nprint(qa_image(dirty))\nexport_image_to_fits(dirty, '%s/imaging-dirty.fits' \n %(results_dir))\n\nlog.info('About to run invert to get PSF')\npsf = psf_list[0][0]\n#show_image(psf, cm='Greys', vmax=0.1, vmin=-0.01)\n#plt.show()\nprint(qa_image(psf))\nexport_image_to_fits(psf, '%s/imaging-psf.fits' \n %(results_dir))\n\n\n# Now deconvolve using msclean\n\n# In[ ]:\n\n\nlog.info('About to run deconvolve')\nstart=time.time()\n\ndeconvolved, _ = deconvolve_list_serial_workflow(dirty_list, psf_list, model_imagelist=model_list, \n deconvolve_facets=8, deconvolve_overlap=16, deconvolve_taper='tukey',\n scales=[0, 3, 10],\n algorithm='msclean', niter=1000, \n fractional_threshold=0.1,\n threshold=0.1, gain=0.1, psf_support=64)\n \nend=time.time()\nprint('deconvolve finished in %f seconds'%(end-start),flush=True)\n#show_image(deconvolved[0], cm='Greys', vmax=0.1, vmin=-0.01)\n#plt.show()\n\n\n# In[ ]:\n\nlog.info('About to run continuum imaging')\nstart=time.time()\ncontinuum_imaging_list = continuum_imaging_list_serial_workflow(predicted_vislist, \n model_imagelist=model_list, \n context='wstack', vis_slices=vis_slices, \n scales=[0, 3, 10], algorithm='mmclean', \n nmoment=3, niter=1000, \n fractional_threshold=0.1,\n threshold=0.1, nmajor=5, gain=0.25,\n deconvolve_facets = 8, deconvolve_overlap=16, \n deconvolve_taper='tukey', psf_support=64)\n\n\n# In[ ]:\n\nend=time.time()\nprint('continuum imaging finished in %f seconds'%(end-start),flush=True)\n\n\ndeconvolved = continuum_imaging_list[0][0]\nresidual = continuum_imaging_list[1][0]\nrestored = continuum_imaging_list[2][0]\n\n#f=show_image(deconvolved, title='Clean image - no selfcal', cm='Greys', \n# vmax=0.1, vmin=-0.01)\nprint(qa_image(deconvolved, context='Clean image - no selfcal'))\n\n#plt.show()\n\n#f=show_image(restored, title='Restored clean image - no selfcal', \n# cm='Greys', vmax=1.0, vmin=-0.1)\nprint(qa_image(restored, context='Restored clean image - no selfcal'))\n#plt.show()\nexport_image_to_fits(restored, '%s/imaging-dask_continuum_imaging_restored.fits' \n %(results_dir))\n\n#f=show_image(residual[0], title='Residual clean image - no selfcal', cm='Greys', \n# vmax=0.1, vmin=-0.01)\nprint(qa_image(residual[0], context='Residual clean image - no selfcal'))\n#plt.show()\nexport_image_to_fits(residual[0], '%s/imaging-dask_continuum_imaging_residual.fits' \n %(results_dir))\n\n\n# In[ ]:\n\n\n#for chan in range(nfreqwin):\n# residual = continuum_imaging_list[1][chan]\n #show_image(residual[0], title='Channel %d' % chan, cm='Greys', \n # vmax=0.1, vmin=-0.01)\n #plt.show()\n\n\n# In[ ]:\n\n\ncontrols = create_calibration_controls()\n \ncontrols['T']['first_selfcal'] = 1\ncontrols['G']['first_selfcal'] = 3\ncontrols['B']['first_selfcal'] = 4\n\ncontrols['T']['timeslice'] = 'auto'\ncontrols['G']['timeslice'] = 'auto'\ncontrols['B']['timeslice'] = 1e5\n\npp.pprint(controls)\n\n\n# In[ ]:\nstart=time.time()\nlog.info('About to run ical')\n\n# TODO I change this to predicted_vislist to make it deterministic, I hope it makes\n# sense :)\n#ical_list = ical_list_serial_workflow(corrupted_vislist, \nical_list = ical_list_serial_workflow(predicted_vislist, \n model_imagelist=model_list, \n context='wstack', \n calibration_context = 'TG', \n controls=controls,\n scales=[0, 3, 10], algorithm='mmclean', \n nmoment=3, niter=1000, \n fractional_threshold=0.1,\n threshold=0.1, nmajor=5, gain=0.25,\n deconvolve_facets = 8, \n deconvolve_overlap=16,\n deconvolve_taper='tukey',\n vis_slices=ntimes,\n timeslice='auto',\n global_solution=False, \n psf_support=64,\n do_selfcal=True)\n\n\n# In[ ]:\n\nend=time.time()\nprint('ical finished in %f seconds'%(end-start),flush=True)\n\ndeconvolved = ical_list[0][0]\nresidual = ical_list[1][0]\nrestored = ical_list[2][0]\n\n#f=show_image(deconvolved, title='Clean image', cm='Greys', vmax=1.0, vmin=-0.1)\nprint(qa_image(deconvolved, context='Clean image'))\n#plt.show()\n\n#f=show_image(restored, title='Restored clean image', cm='Greys', vmax=1.0, \n# vmin=-0.1)\nprint(qa_image(restored, context='Restored clean image'))\n#plt.show()\nexport_image_to_fits(restored, '%s/imaging-dask_ical_restored.fits' \n %(results_dir))\n\n#f=show_image(residual[0], title='Residual clean image', cm='Greys', \n# vmax=0.1, vmin=-0.01)\nprint(qa_image(residual[0], context='Residual clean image'))\n#plt.show()\nexport_image_to_fits(residual[0], '%s/imaging-dask_ical_residual.fits' \n %(results_dir))\n\n"
] | [
[
"numpy.ones_like",
"numpy.abs",
"numpy.array_equal",
"numpy.unique",
"numpy.conj",
"numpy.arange",
"numpy.min",
"numpy.ones",
"numpy.full_like",
"numpy.max",
"numpy.copy",
"numpy.zeros_like",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.float"
],
[
"numpy.array",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jraffa/tableone | [
"ad1b7e2bc5b8382c281ebf800eb7061d47eb00c0"
] | [
"tableone/tableone.py"
] | [
"\"\"\"\nThe tableone package is used for creating \"Table 1\" summary statistics for\nresearch papers.\n\"\"\"\n\nimport warnings\n\nimport numpy as np\nfrom numpy.linalg import LinAlgError\nimport pandas as pd\nfrom scipy import stats\nfrom statsmodels.stats import multitest\nfrom tabulate import tabulate\n\nfrom tableone.modality import hartigan_diptest\n\n# display deprecation warnings\nwarnings.simplefilter('always', DeprecationWarning)\n\n\ndef load_dataset(name):\n \"\"\"\n Load an example dataset from the online repository (requires internet).\n\n These datasets are useful for documentation and testing.\n\n Parameters\n ----------\n name : str\n Name of the dataset.\n\n Returns\n -------\n df : :class:`pandas.DataFrame`\n Tabular data.\n \"\"\"\n path = (\"https://raw.githubusercontent.com/\"\n \"tompollard/tableone/master/datasets/{}.csv\")\n full_path = path.format(name)\n\n df = pd.read_csv(full_path)\n\n return df\n\n\nclass InputError(Exception):\n \"\"\"\n Exception raised for errors in the input.\n \"\"\"\n pass\n\n\nclass TableOne(object):\n \"\"\"\n\n If you use the tableone package, please cite:\n\n Pollard TJ, Johnson AEW, Raffa JD, Mark RG (2018). tableone: An open source\n Python package for producing summary statistics for research papers.\n JAMIA Open, Volume 1, Issue 1, 1 July 2018, Pages 26-31.\n https://doi.org/10.1093/jamiaopen/ooy012\n\n Create an instance of the tableone summary table.\n\n Parameters\n ----------\n data : pandas DataFrame\n The dataset to be summarised. Rows are observations, columns are\n variables.\n columns : list, optional\n List of columns in the dataset to be included in the final table.\n categorical : list, optional\n List of columns that contain categorical variables.\n groupby : str, optional\n Optional column for stratifying the final table (default: None).\n nonnormal : list, optional\n List of columns that contain non-normal variables (default: None).\n min_max: list, optional\n List of variables that should report minimum and maximum, instead of\n standard deviation (for normal) or Q1-Q3 (for non-normal).\n pval : bool, optional\n Display computed P-Values (default: False).\n pval_adjust : str, optional\n Method used to adjust P-Values for multiple testing.\n The P-values from the unadjusted table (default when pval=True)\n are adjusted to account for the number of total tests that were performed.\n These adjustments would be useful when many variables are being screened\n to assess if their distribution varies by the variable in the groupby argument.\n For a complete list of methods, see documentation for statsmodels multipletests.\n Available methods include ::\n\n `None` : no correction applied.\n `bonferroni` : one-step correction\n `sidak` : one-step correction\n `holm-sidak` : step down method using Sidak adjustments\n `simes-hochberg` : step-up method (independent)\n `hommel` : closed method based on Simes tests (non-negative)\n\n htest_name : bool, optional\n Display a column with the names of hypothesis tests (default: False).\n htest : dict, optional\n Dictionary of custom hypothesis tests. Keys are variable names and\n values are functions. Functions must take a list of Numpy Arrays as\n the input argument and must return a test result.\n e.g. htest = {'age': myfunc}\n missing : bool, optional\n Display a count of null values (default: True).\n ddof : int, optional\n Degrees of freedom for standard deviation calculations (default: 1).\n rename : dict, optional\n Dictionary of alternative names for variables.\n e.g. `rename = {'sex':'gender', 'trt':'treatment'}`\n sort : bool or str, optional\n If `True`, sort the variables alphabetically. If a string\n (e.g. `'P-Value'`), sort by the specified column in ascending order.\n Default (`False`) retains the sequence specified in the `columns`\n argument. Currently the only columns supported are: `'Missing'`,\n `'P-Value'`, `'P-Value (adjusted)'`, and `'Test'`.\n limit : int or dict, optional\n Limit to the top N most frequent categories. If int, apply to all\n categorical variables. If dict, apply to the key (e.g. {'sex': 1}).\n order : dict, optional\n Specify an order for categorical variables. Key is the variable, value\n is a list of values in order. {e.g. 'sex': ['f', 'm', 'other']}\n remarks : bool, optional\n Add remarks on the appropriateness of the summary measures and the\n statistical tests (default: True).\n label_suffix : bool, optional\n Append summary type (e.g. \"mean (SD); median [Q1,Q3], n (%); \") to the\n row label (default: True).\n decimals : int or dict, optional\n Number of decimal places to display. An integer applies the rule to all\n variables (default: 1). A dictionary (e.g. `decimals = {'age': 0)`)\n applies the rule per variable, defaulting to 1 place for unspecified\n variables. For continuous variables, applies to all summary statistics\n (e.g. mean and standard deviation). For categorical variables, applies\n to percentage only.\n overall : bool:\n If True, add an \"overall\" column to the table. Smd and p-value\n calculations are performed only using stratified columns.\n display_all : bool:\n If True, set pd. display_options to display all columns and rows.\n (default: False)\n\n Attributes\n ----------\n tableone : dataframe\n Summary of the data (i.e., the \"Table 1\").\n\n Examples\n --------\n >>> df = pd.DataFrame({'size': [1, 2, 60, 1, 1],\n ... 'fruit': ['peach', 'orange', 'peach', 'peach', 'orange'],\n ... 'tasty': ['yes', 'yes', 'no', 'yes', 'no']})\n\n >>> df\n size fruit tasty\n 0 1 peach yes\n 1 2 orange yes\n 2 60 peach no\n 3 1 peach yes\n 4 1 orange no\n\n >>> TableOne(df, overall=False, groupby=\"fruit\", pval=True)\n\n Grouped by fruit\n Missing orange peach P-Value\n n 2 3\n size, mean (SD) 0 1.5 (0.7) 20.7 (34.1) 0.433\n tasty, n (%) no 0 1 (50.0) 1 (33.3) 1.000\n yes 1 (50.0) 2 (66.7)\n\n ...\n \"\"\"\n\n def __init__(self, data, columns=None, categorical=None, groupby=None,\n nonnormal=None, min_max=None, pval=False, pval_adjust=None,\n htest_name=False, pval_test_name=False, htest=None,\n isnull=None, missing=True, ddof=1, labels=None, rename=None,\n sort=False, limit=None, order=None, remarks=True,\n label_suffix=True, decimals=1, smd=False, overall=True,\n display_all=False):\n\n # labels is now rename\n if labels is not None and rename is not None:\n raise TypeError(\"TableOne received both labels and rename.\")\n elif labels is not None:\n warnings.warn(\"The labels argument is deprecated; use \"\n \"rename instead.\", DeprecationWarning)\n self._alt_labels = labels\n else:\n self._alt_labels = rename\n\n # isnull is now missing\n if isnull is not None:\n warnings.warn(\"The isnull argument is deprecated; use \"\n \"missing instead.\", DeprecationWarning)\n self._isnull = isnull\n else:\n self._isnull = missing\n\n # pval_test_name is now htest_name\n if pval_test_name:\n warnings.warn(\"The pval_test_name argument is deprecated; use \"\n \"htest_name instead.\", DeprecationWarning)\n self._pval_test_name = pval_test_name\n else:\n self._pval_test_name = htest_name\n\n # groupby should be a string\n if not groupby:\n groupby = ''\n elif groupby and type(groupby) == list:\n groupby = groupby[0]\n\n # nonnormal should be a string\n if not nonnormal:\n nonnormal = []\n elif nonnormal and type(nonnormal) == str:\n nonnormal = [nonnormal]\n\n # min_max should be a list\n if min_max and isinstance(min_max, bool):\n warnings.warn(\"min_max should specify a list of variables.\")\n min_max = None\n\n # if the input dataframe is empty, raise error\n if data.empty:\n raise InputError(\"The input dataframe is empty.\")\n\n # if columns are not specified, use all columns\n if not columns:\n columns = data.columns.values\n\n # check that the columns exist in the dataframe\n if not set(columns).issubset(data.columns):\n notfound = list(set(columns) - set(data.columns))\n raise InputError(\"\"\"Columns not found in\n dataset: {}\"\"\".format(notfound))\n\n # check for duplicate columns\n dups = data[columns].columns[data[columns].columns.duplicated()].unique()\n if not dups.empty:\n raise InputError(\"\"\"Input contains duplicate\n columns: {}\"\"\".format(dups))\n\n # if categorical not specified, try to identify categorical\n if not categorical and type(categorical) != list:\n categorical = self._detect_categorical_columns(data[columns])\n # omit categorical row if it is specified in groupby\n if groupby:\n categorical = [x for x in categorical if x != groupby]\n\n if isinstance(pval_adjust, bool) and pval_adjust:\n msg = (\"pval_adjust expects a string, but a boolean was specified.\"\n \" Defaulting to the 'bonferroni' correction.\")\n warnings.warn(msg)\n pval_adjust = \"bonferroni\"\n\n # if custom order is provided, ensure that values are strings\n if order:\n order = {k: [\"{}\".format(v) for v in order[k]] for k in order}\n\n # if input df has ordered categorical variables, get the order.\n order_cats = [x for x in data.select_dtypes(\"category\")\n if data[x].dtype.ordered]\n if any(order_cats):\n d_order_cats = {v: data[v].cat.categories for v in order_cats}\n d_order_cats = {k: [\"{}\".format(v) for v in d_order_cats[k]]\n for k in d_order_cats}\n\n # combine the orders. custom order takes precedence.\n if order_cats and order:\n new = {**order, **d_order_cats}\n for k in order:\n new[k] = order[k] + [x for x in new[k] if x not in order[k]]\n order = new\n elif order_cats:\n order = d_order_cats\n\n if pval and not groupby:\n raise InputError(\"If pval=True then groupby must be specified.\")\n\n self._columns = list(columns)\n self._continuous = [c for c in columns\n if c not in categorical + [groupby]]\n self._categorical = categorical\n self._nonnormal = nonnormal\n self._min_max = min_max\n self._pval = pval\n self._pval_adjust = pval_adjust\n self._htest = htest\n self._sort = sort\n self._groupby = groupby\n # degrees of freedom for standard deviation\n self._ddof = ddof\n self._limit = limit\n self._order = order\n self._remarks = remarks\n self._label_suffix = label_suffix\n self._decimals = decimals\n self._smd = smd\n self._overall = overall\n\n # display notes and warnings below the table\n self._warnings = {}\n\n # output column names that cannot be contained in a groupby\n self._reserved_columns = ['Missing', 'P-Value', 'Test',\n 'P-Value (adjusted)', 'SMD', 'Overall']\n\n if self._groupby:\n self._groupbylvls = sorted(data.groupby(groupby).groups.keys())\n\n # reorder the groupby levels if order is provided\n if self._order and self._groupby in self._order:\n unordered = [x for x in self._groupbylvls\n if x not in self._order[self._groupby]]\n self._groupbylvls = self._order[self._groupby] + unordered\n\n # check that the group levels do not include reserved words\n for level in self._groupbylvls:\n if level in self._reserved_columns:\n raise InputError(\"\"\"Group level contains '{}', a reserved\n keyword.\"\"\".format(level))\n else:\n self._groupbylvls = ['Overall']\n\n # forgive me jraffa\n if self._pval:\n self._htest_table = self._create_htest_table(data)\n\n # correct for multiple testing\n if self._pval and self._pval_adjust:\n alpha = 0.05\n adjusted = multitest.multipletests(self._htest_table['P-Value'],\n alpha=alpha,\n method=self._pval_adjust)\n self._htest_table['P-Value (adjusted)'] = adjusted[1]\n self._htest_table['adjust method'] = self._pval_adjust\n\n # create overall tables if required\n if self._categorical and self._groupby and overall:\n self.cat_describe_all = self._create_cat_describe(data, False,\n ['Overall'])\n\n if self._continuous and self._groupby and overall:\n self.cont_describe_all = self._create_cont_describe(data, False)\n\n # create descriptive tables\n if self._categorical:\n self.cat_describe = self._create_cat_describe(data, self._groupby,\n self._groupbylvls)\n\n if self._continuous:\n self.cont_describe = self._create_cont_describe(data,\n self._groupby)\n\n # compute standardized mean differences\n if self._smd:\n self.smd_table = self._create_smd_table(data)\n\n # create continuous and categorical tables\n if self._categorical:\n self.cat_table = self._create_cat_table(data, overall)\n\n if self._continuous:\n self.cont_table = self._create_cont_table(data, overall)\n\n # combine continuous variables and categorical variables into table 1\n self.tableone = self._create_tableone(data)\n\n # wrap dataframe methods\n self.head = self.tableone.head\n self.tail = self.tableone.tail\n self.to_csv = self.tableone.to_csv\n self.to_excel = self.tableone.to_excel\n self.to_html = self.tableone.to_html\n self.to_json = self.tableone.to_json\n self.to_latex = self.tableone.to_latex\n\n # set display options\n if display_all:\n self._set_display_options()\n\n def __str__(self):\n return self.tableone.to_string() + self._generate_remarks('\\n')\n\n def __repr__(self):\n return self.tableone.to_string() + self._generate_remarks('\\n')\n\n def _repr_html_(self):\n return self.tableone._repr_html_() + self._generate_remarks('<br />')\n\n def _set_display_options(self):\n \"\"\"\n Set pandas display options. Display all rows and columns by default.\n \"\"\"\n display_options = {'display.max_rows': None,\n 'display.max_columns': None,\n 'display.width': None,\n 'display.max_colwidth': None}\n\n for k in display_options:\n try:\n pd.set_option(k, display_options[k])\n except ValueError:\n msg = \"\"\"Newer version of Pandas required to set the '{}'\n option.\"\"\".format(k)\n warnings.warn(msg)\n\n def tabulate(self, headers=None, tablefmt='grid', **kwargs):\n \"\"\"\n Pretty-print tableone data. Wrapper for the Python 'tabulate' library.\n\n Args:\n headers (list): Defines a list of column headers to be used.\n tablefmt (str): Defines how the table is formatted. Table formats\n include: 'plain','simple','github','grid','fancy_grid','pipe',\n 'orgtbl','jira','presto','psql','rst','mediawiki','moinmoin',\n 'youtrack','html','latex','latex_raw','latex_booktabs',\n and 'textile'.\n\n Examples:\n To output tableone in github syntax, call tabulate with the\n 'tablefmt=\"github\"' argument.\n\n >>> print(tableone.tabulate(tablefmt='fancy_grid'))\n \"\"\"\n # reformat table for tabulate\n df = self.tableone\n\n if not headers:\n try:\n headers = df.columns.levels[1]\n except AttributeError:\n headers = df.columns\n\n df = df.reset_index()\n df = df.set_index('level_0')\n isdupe = df.index.duplicated()\n df.index = df.index.where(~isdupe, '')\n df = df.rename_axis(None).rename(columns={'level_1': ''})\n\n return tabulate(df, headers=headers, tablefmt=tablefmt, **kwargs)\n\n def _generate_remarks(self, newline='\\n'):\n \"\"\"\n Generate a series of remarks that the user should consider\n when interpreting the summary statistics.\n \"\"\"\n # generate warnings for continuous variables\n if self._continuous:\n # highlight far outliers\n outlier_mask = self.cont_describe.far_outliers > 1\n outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].\n dropna(how='all').index)\n if outlier_vars:\n self._warnings[\"\"\"Tukey test indicates far outliers\n in\"\"\"] = outlier_vars\n\n # highlight possible multimodal distributions using hartigan's dip\n # test -1 values indicate NaN\n modal_mask = ((self.cont_describe.diptest >= 0) &\n (self.cont_describe.diptest <= 0.05))\n modal_vars = list(self.cont_describe.diptest[modal_mask].\n dropna(how='all').index)\n if modal_vars:\n self._warnings[\"\"\"Hartigan's Dip Test reports possible\n multimodal distributions for\"\"\"] = modal_vars\n\n # highlight non normal distributions\n # -1 values indicate NaN\n modal_mask = ((self.cont_describe.normaltest >= 0) &\n (self.cont_describe.normaltest <= 0.001))\n modal_vars = list(self.cont_describe.normaltest[modal_mask].\n dropna(how='all').index)\n if modal_vars:\n self._warnings[\"\"\"Normality test reports non-normal\n distributions for\"\"\"] = modal_vars\n\n # create the warning string\n msg = '{}'.format(newline)\n for n, k in enumerate(sorted(self._warnings)):\n msg += '[{}] {}: {}.{}'.format(n+1, k,\n ', '.join(self._warnings[k]),\n newline)\n\n return msg\n\n def _detect_categorical_columns(self, data):\n \"\"\"\n Detect categorical columns if they are not specified.\n\n Parameters\n ----------\n data : pandas DataFrame\n The input dataset.\n\n Returns\n ----------\n likely_cat : list\n List of variables that appear to be categorical.\n \"\"\"\n # assume all non-numerical and date columns are categorical\n numeric_cols = set(data._get_numeric_data().columns.values)\n date_cols = set(data.select_dtypes(include=[np.datetime64]).columns)\n likely_cat = set(data.columns) - numeric_cols\n likely_cat = list(likely_cat - date_cols)\n # check proportion of unique values if numerical\n for var in data._get_numeric_data().columns:\n likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.005\n if likely_flag:\n likely_cat.append(var)\n return likely_cat\n\n def _cont_smd(self, data1=None, data2=None, mean1=None, mean2=None,\n sd1=None, sd2=None, n1=None, n2=None, unbiased=False):\n \"\"\"\n Compute the standardized mean difference (regular or unbiased) using\n either raw data or summary measures.\n\n Parameters\n ----------\n data1 : list\n List of values in dataset 1 (control).\n data2 : list\n List of values in dataset 2 (treatment).\n mean1 : float\n Mean of dataset 1 (control).\n mean2 : float\n Mean of dataset 2 (treatment).\n sd1 : float\n Standard deviation of dataset 1 (control).\n sd2 : float\n Standard deviation of dataset 2 (treatment).\n n1 : int\n Sample size of dataset 1 (control).\n n2 : int\n Sample size of dataset 2 (treatment).\n unbiased : bool\n Return an unbiased estimate using Hedges' correction. Correction\n factor approximated using the formula proposed in Hedges 2011.\n (default = False)\n\n Returns\n -------\n smd : float\n Estimated standardized mean difference.\n se : float\n Standard error of the estimated standardized mean difference.\n \"\"\"\n if (data1 and not data2) or (data2 and not data1):\n raise InputError('Two sets of data must be provided.')\n elif data1 and data2:\n if any([mean1, mean2, sd1, sd2, n1, n2]):\n warnings.warn(\"\"\"Mean, n, and sd were computed from the data.\n These input args were ignored.\"\"\")\n mean1 = np.mean(data1)\n mean2 = np.mean(data2)\n sd1 = np.std(data1)\n sd2 = np.std(data2)\n n1 = len(data1)\n n2 = len(data2)\n\n # if (mean1 and not mean2) or (mean2 and not mean1):\n # raise InputError('mean1 and mean2 must both be provided.')\n\n # if (sd1 and not sd2) or (sd2 and not sd1):\n # raise InputError('sd1 and sd2 must both be provided.')\n\n # if (n1 and not n2) or (n2 and not n1):\n # raise InputError('n1 and n2 must both be provided.')\n\n # cohens_d\n smd = (mean2 - mean1) / np.sqrt((sd1 ** 2 + sd2 ** 2) / 2)\n\n # standard error\n v_d = ((n1+n2) / (n1*n2)) + ((smd ** 2) / (2*(n1+n2)))\n se = np.sqrt(v_d)\n\n if unbiased:\n # Hedges correction (J. Hedges, 1981)\n # Approximation for the the correction factor from:\n # Introduction to Meta-Analysis. Michael Borenstein,\n # L. V. Hedges, J. P. T. Higgins and H. R. Rothstein\n # Wiley (2011). Chapter 4. Effect Sizes Based on Means.\n j = 1 - (3/(4*(n1+n2-2)-1))\n smd = j * smd\n v_g = (j ** 2) * v_d\n se = np.sqrt(v_g)\n\n return smd, se\n\n def _cat_smd(self, prop1=None, prop2=None, n1=None, n2=None,\n unbiased=False):\n \"\"\"\n Compute the standardized mean difference (regular or unbiased) using\n either raw data or summary measures.\n\n Parameters\n ----------\n prop1 : list\n Proportions (range 0-1) for each categorical value in dataset 1\n (control).\n prop2 : list\n Proportions (range 0-1) for each categorical value in dataset 2\n (treatment).\n n1 : int\n Sample size of dataset 1 (control).\n n2 : int\n Sample size of dataset 2 (treatment).\n unbiased : bool\n Return an unbiased estimate using Hedges' correction. Correction\n factor approximated using the formula proposed in Hedges 2011.\n (default = False)\n\n Returns\n -------\n smd : float\n Estimated standardized mean difference.\n se : float\n Standard error of the estimated standardized mean difference.\n \"\"\"\n # Categorical SMD Yang & Dalton 2012\n # https://support.sas.com/resources/papers/proceedings12/335-2012.pdf\n prop1 = np.asarray(prop1)\n prop2 = np.asarray(prop2)\n\n # Drop first level for consistency with R tableone\n # \"to eliminate dependence if more than two levels\"\n prop1 = prop1[1:]\n prop2 = prop2[1:]\n\n lst_cov = []\n for p in [prop1, prop2]:\n variance = p * (1 - p)\n covariance = - np.outer(p, p)\n covariance[np.diag_indices_from(covariance)] = variance\n lst_cov.append(covariance)\n\n mean_diff = np.matrix(prop2 - prop1)\n mean_cov = (lst_cov[0] + lst_cov[1])/2\n\n # TODO: add steps to deal with nulls\n\n try:\n sq_md = mean_diff * np.linalg.inv(mean_cov) * mean_diff.T\n except LinAlgError:\n sq_md = np.nan\n\n try:\n smd = np.asarray(np.sqrt(sq_md))[0][0]\n except IndexError:\n smd = np.nan\n\n # standard error\n v_d = ((n1+n2) / (n1*n2)) + ((smd ** 2) / (2*(n1+n2)))\n se = np.sqrt(v_d)\n\n if unbiased:\n # Hedges correction (J. Hedges, 1981)\n # Approximation for the the correction factor from:\n # Introduction to Meta-Analysis. Michael Borenstein,\n # L. V. Hedges, J. P. T. Higgins and H. R. Rothstein\n # Wiley (2011). Chapter 4. Effect Sizes Based on Means.\n j = 1 - (3/(4*(n1+n2-2)-1))\n smd = j * smd\n v_g = (j ** 2) * v_d\n se = np.sqrt(v_g)\n\n return smd, se\n\n def _q25(self, x):\n \"\"\"\n Compute percentile (25th)\n \"\"\"\n return np.nanpercentile(x.values, 25)\n\n def _q75(self, x):\n \"\"\"\n Compute percentile (75th)\n \"\"\"\n return np.nanpercentile(x.values, 75)\n\n def _std(self, x):\n \"\"\"\n Compute standard deviation with ddof degrees of freedom\n \"\"\"\n if len(x) == 1:\n return 0.0\n else:\n return np.nanstd(x.values, ddof=self._ddof)\n\n def _diptest(self, x):\n \"\"\"\n Compute Hartigan Dip Test for modality.\n\n p < 0.05 suggests possible multimodality.\n \"\"\"\n p = hartigan_diptest(x.values)\n # dropna=False argument in pivot_table does not function as expected\n # https://github.com/pandas-dev/pandas/issues/22159\n # return -1 instead of None\n if pd.isnull(p):\n return -1\n return p\n\n def _normaltest(self, x):\n \"\"\"\n Compute test for normal distribution.\n\n Null hypothesis: x comes from a normal distribution\n p < alpha suggests the null hypothesis can be rejected.\n \"\"\"\n if len(x.values[~np.isnan(x.values)]) >= 20:\n stat, p = stats.normaltest(x.values, nan_policy='omit')\n else:\n p = None\n # dropna=False argument in pivot_table does not function as expected\n # return -1 instead of None\n if pd.isnull(p):\n return -1\n return p\n\n def _tukey(self, x, threshold):\n \"\"\"\n Count outliers according to Tukey's rule.\n\n Where Q1 is the lower quartile and Q3 is the upper quartile,\n an outlier is an observation outside of the range:\n\n [Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]\n\n k = 1.5 indicates an outlier\n k = 3.0 indicates an outlier that is \"far out\"\n \"\"\"\n vals = x.values[~np.isnan(x.values)]\n\n try:\n q1, q3 = np.percentile(vals, [25, 75])\n iqr = q3 - q1\n low_bound = q1 - (iqr * threshold)\n high_bound = q3 + (iqr * threshold)\n outliers = np.where((vals > high_bound) | (vals < low_bound))\n except IndexError:\n outliers = []\n\n return outliers\n\n def _outliers(self, x):\n \"\"\"\n Compute number of outliers\n \"\"\"\n outliers = self._tukey(x, threshold=1.5)\n return np.size(outliers)\n\n def _far_outliers(self, x):\n \"\"\"\n Compute number of \"far out\" outliers\n \"\"\"\n outliers = self._tukey(x, threshold=3.0)\n return np.size(outliers)\n\n def _t1_summary(self, x):\n \"\"\"\n Compute median [IQR] or mean (Std) for the input series.\n\n Parameters\n ----------\n x : pandas Series\n Series of values to be summarised.\n \"\"\"\n # set decimal places\n if isinstance(self._decimals, int):\n n = self._decimals\n elif isinstance(self._decimals, dict):\n try:\n n = self._decimals[x.name]\n except KeyError:\n n = 1\n else:\n n = 1\n msg = \"\"\"The decimals arg must be an int or dict.\n Defaulting to {} d.p.\"\"\".format(n)\n warnings.warn(msg)\n\n if x.name in self._nonnormal:\n f = \"{{:.{}f}} [{{:.{}f}},{{:.{}f}}]\".format(n, n, n)\n if self._min_max and x.name in self._min_max:\n return f.format(\n np.nanmedian(x.values), np.nanmin(x.values),\n np.nanmax(x.values),\n )\n else:\n return f.format(\n np.nanmedian(x.values),\n np.nanpercentile(x.values, 25),\n np.nanpercentile(x.values, 75),\n )\n else:\n if self._min_max and x.name in self._min_max:\n f = \"{{:.{}f}} [{{:.{}f}},{{:.{}f}}]\".format(n, n, n)\n return f.format(\n np.nanmean(x.values), np.nanmin(x.values),\n np.nanmax(x.values),\n )\n else:\n f = '{{:.{}f}} ({{:.{}f}})'.format(n, n)\n return f.format(np.nanmean(x.values), self._std(x))\n\n def _create_cont_describe(self, data, groupby):\n \"\"\"\n Describe the continuous data.\n\n Parameters\n ----------\n data : pandas DataFrame\n The input dataset.\n\n Returns\n ----------\n df_cont : pandas DataFrame\n Summarise the continuous variables.\n \"\"\"\n aggfuncs = [pd.Series.count, np.mean, np.median, self._std,\n self._q25, self._q75, min, max, self._t1_summary,\n self._diptest, self._outliers, self._far_outliers,\n self._normaltest]\n\n # coerce continuous data to numeric\n cont_data = data[self._continuous].apply(pd.to_numeric,\n errors='coerce')\n # check all data in each continuous column is numeric\n bad_cols = cont_data.count() != data[self._continuous].count()\n bad_cols = cont_data.columns[bad_cols]\n if len(bad_cols) > 0:\n msg = (\"The following continuous column(s) have \"\n \"non-numeric values: {variables}. Either specify the \"\n \"column(s) as categorical or remove the \"\n \"non-numeric values.\").format(variables=bad_cols.values)\n raise InputError(msg)\n\n # check for coerced column containing all NaN to warn user\n for column in cont_data.columns[cont_data.count() == 0]:\n self._non_continuous_warning(column)\n\n if groupby:\n # add the groupby column back\n cont_data = cont_data.merge(data[[groupby]],\n left_index=True,\n right_index=True)\n\n # group and aggregate data\n df_cont = pd.pivot_table(cont_data,\n columns=[groupby],\n aggfunc=aggfuncs)\n else:\n # if no groupby, just add single group column\n df_cont = cont_data.apply(aggfuncs).T\n df_cont.columns.name = 'Overall'\n df_cont.columns = pd.MultiIndex.from_product([df_cont.columns,\n ['Overall']])\n\n df_cont.index = df_cont.index.rename('variable')\n\n # remove prefix underscore from column names (e.g. _std -> std)\n agg_rename = df_cont.columns.levels[0]\n agg_rename = [x[1:] if x[0] == '_' else x for x in agg_rename]\n df_cont.columns = df_cont.columns.set_levels(agg_rename, level=0)\n\n return df_cont\n\n def _format_cat(self, row):\n var = row.name[0]\n if var in self._decimals:\n n = self._decimals[var]\n else:\n n = 1\n f = '{{:.{}f}}'.format(n)\n return f.format(row.percent)\n\n def _create_cat_describe(self, data, groupby, groupbylvls):\n \"\"\"\n Describe the categorical data.\n\n Parameters\n ----------\n data : pandas DataFrame\n The input dataset.\n\n Returns\n ----------\n df_cat : pandas DataFrame\n Summarise the categorical variables.\n \"\"\"\n group_dict = {}\n\n for g in groupbylvls:\n if groupby:\n d_slice = data.loc[data[groupby] == g, self._categorical]\n else:\n d_slice = data[self._categorical].copy()\n\n # create a dataframe with freq, proportion\n df = d_slice.copy()\n\n # convert to str to handle int converted to boolean. Avoid nans.\n for column in df.columns:\n df[column] = [str(row) if not pd.isnull(row)\n else None for row in df[column].values]\n\n df = df.melt().groupby(['variable',\n 'value']).size().to_frame(name='freq')\n\n df['percent'] = df['freq'].div(df.freq.sum(level=0),\n level=0).astype(float) * 100\n\n # set number of decimal places for percent\n if isinstance(self._decimals, int):\n n = self._decimals\n f = '{{:.{}f}}'.format(n)\n df['percent_str'] = df['percent'].astype(float).map(f.format)\n elif isinstance(self._decimals, dict):\n df.loc[:, 'percent_str'] = df.apply(self._format_cat, axis=1)\n else:\n n = 1\n f = '{{:.{}f}}'.format(n)\n df['percent_str'] = df['percent'].astype(float).map(f.format)\n\n # add n column, listing total non-null values for each variable\n ct = d_slice.count().to_frame(name='n')\n ct.index.name = 'variable'\n df = df.join(ct)\n\n # add null count\n nulls = d_slice.isnull().sum().to_frame(name='Missing')\n nulls.index.name = 'variable'\n # only save null count to the first category for each variable\n # do this by extracting the first category from the df row index\n levels = df.reset_index()[['variable',\n 'value']].groupby('variable').first()\n # add this category to the nulls table\n nulls = nulls.join(levels)\n nulls = nulls.set_index('value', append=True)\n # join nulls to categorical\n df = df.join(nulls)\n\n # add summary column\n df['t1_summary'] = (df.freq.map(str) + ' ('\n + df.percent_str.map(str)+')')\n\n # add to dictionary\n group_dict[g] = df\n\n df_cat = pd.concat(group_dict, axis=1)\n # ensure the groups are the 2nd level of the column index\n if df_cat.columns.nlevels > 1:\n df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1, level=0)\n\n return df_cat\n\n def _create_htest_table(self, data):\n \"\"\"\n Create a table containing P-Values for significance tests. Add features\n of the distributions and the P-Values to the dataframe.\n\n Parameters\n ----------\n data : pandas DataFrame\n The input dataset.\n\n Returns\n ----------\n df : pandas DataFrame\n A table containing the P-Values, test name, etc.\n \"\"\"\n # list features of the variable e.g. matched, paired, n_expected\n df = pd.DataFrame(index=self._continuous+self._categorical,\n columns=['continuous', 'nonnormal',\n 'min_observed', 'P-Value', 'Test'])\n\n df.index = df.index.rename('variable')\n df['continuous'] = np.where(df.index.isin(self._continuous),\n True, False)\n\n df['nonnormal'] = np.where(df.index.isin(self._nonnormal),\n True, False)\n\n # list values for each variable, grouped by groupby levels\n for v in df.index:\n is_continuous = df.loc[v]['continuous']\n is_categorical = ~df.loc[v]['continuous']\n is_normal = ~df.loc[v]['nonnormal']\n\n # if continuous, group data into list of lists\n if is_continuous:\n catlevels = None\n grouped_data = {}\n for s in self._groupbylvls:\n lvl_data = data.loc[data[self._groupby] == s, v]\n # coerce to numeric and drop non-numeric data\n lvl_data = lvl_data.apply(pd.to_numeric,\n errors='coerce').dropna()\n # append to overall group data\n grouped_data[s] = lvl_data.values\n min_observed = min([len(x) for x in grouped_data.values()])\n # if categorical, create contingency table\n elif is_categorical:\n catlevels = sorted(data[v].astype('category').cat.categories)\n cross_tab = pd.crosstab(data[self._groupby].\n rename('_groupby_var_'), data[v])\n min_observed = cross_tab.sum(axis=1).min()\n grouped_data = cross_tab.T.to_dict('list')\n\n # minimum number of observations across all levels\n df.loc[v, 'min_observed'] = min_observed\n\n # compute pvalues\n (df.loc[v, 'P-Value'],\n df.loc[v, 'Test']) = self._p_test(v, grouped_data,\n is_continuous,\n is_categorical, is_normal,\n min_observed, catlevels)\n\n return df\n\n def _create_smd_table(self, data):\n \"\"\"\n Create a table containing pairwise Standardized Mean Differences\n (SMDs).\n\n Parameters\n ----------\n data : pandas DataFrame\n The input dataset.\n\n Returns\n ----------\n df : pandas DataFrame\n A table containing pairwise standardized mean differences\n (SMDs).\n \"\"\"\n # create the SMD table\n permutations = [sorted((x, y),\n key=lambda f: self._groupbylvls.index(f))\n for x in self._groupbylvls\n for y in self._groupbylvls if x is not y]\n\n p_set = set(tuple(x) for x in permutations)\n\n colname = 'SMD ({0},{1})'\n columns = [colname.format(x[0], x[1]) for x in p_set]\n df = pd.DataFrame(index=self._continuous+self._categorical,\n columns=columns)\n df.index = df.index.rename('variable')\n\n for p in p_set:\n try:\n for v in self.cont_describe.index:\n smd, _ = self._cont_smd(\n mean1=self.cont_describe['mean'][p[0]].loc[v],\n mean2=self.cont_describe['mean'][p[1]].loc[v],\n sd1=self.cont_describe['std'][p[0]].loc[v],\n sd2=self.cont_describe['std'][p[1]].loc[v],\n n1=self.cont_describe['count'][p[0]].loc[v],\n n2=self.cont_describe['count'][p[1]].loc[v],\n unbiased=False)\n df[colname.format(p[0], p[1])].loc[v] = smd\n except AttributeError:\n pass\n\n try:\n for v, _ in self.cat_describe.groupby(level=0):\n smd, _ = self._cat_smd(\n prop1=self.cat_describe.loc[[v]]['percent'][p[0]].values/100,\n prop2=self.cat_describe.loc[[v]]['percent'][p[1]].values/100,\n n1=self.cat_describe.loc[[v]]['freq'][p[0]].sum(),\n n2=self.cat_describe.loc[[v]]['freq'][p[1]].sum(),\n unbiased=False)\n df[colname.format(p[0], p[1])].loc[v] = smd\n except AttributeError:\n pass\n\n return df\n\n def _p_test(self, v, grouped_data, is_continuous, is_categorical,\n is_normal, min_observed, catlevels):\n \"\"\"\n Compute P-Values.\n\n Parameters\n ----------\n v : str\n Name of the variable to be tested.\n grouped_data : dict\n Dictionary of Numpy Arrays to be tested.\n is_continuous : bool\n True if the variable is continuous.\n is_categorical : bool\n True if the variable is categorical.\n is_normal : bool\n True if the variable is normally distributed.\n min_observed : int\n Minimum number of values across groups for the variable.\n catlevels : list\n Sorted list of levels for categorical variables.\n\n Returns\n ----------\n pval : float\n The computed P-Value.\n ptest : str\n The name of the test used to compute the P-Value.\n \"\"\"\n\n # no test by default\n pval = np.nan\n ptest = 'Not tested'\n\n # apply user defined test\n if self._htest and v in self._htest:\n pval = self._htest[v](*grouped_data.values())\n ptest = self._htest[v].__name__\n return pval, ptest\n\n # do not test if the variable has no observations in a level\n if min_observed == 0:\n msg = (\"No P-Value was computed for {variable} due to the low \"\n \"number of observations.\"\"\").format(variable=v)\n warnings.warn(msg)\n return pval, ptest\n\n # continuous\n if (is_continuous and is_normal and len(grouped_data) == 2\n and min_observed >= 2):\n ptest = 'Two Sample T-test'\n test_stat, pval = stats.ttest_ind(*grouped_data.values(),\n equal_var=False,\n nan_policy=\"omit\")\n elif is_continuous and is_normal:\n # normally distributed\n ptest = 'One-way ANOVA'\n test_stat, pval = stats.f_oneway(*grouped_data.values())\n elif is_continuous and not is_normal:\n # non-normally distributed\n ptest = 'Kruskal-Wallis'\n test_stat, pval = stats.kruskal(*grouped_data.values())\n # categorical\n elif is_categorical:\n # default to chi-squared\n ptest = 'Chi-squared'\n grouped_val_list = [x for x in grouped_data.values()]\n chi2, pval, dof, expected = stats.chi2_contingency(grouped_val_list)\n # if any expected cell counts are < 5, chi2 may not be valid\n # if this is a 2x2, switch to fisher exact\n if expected.min() < 5 or min_observed < 5:\n if np.shape(grouped_val_list) == (2, 2):\n ptest = \"Fisher's exact\"\n odds_ratio, pval = stats.fisher_exact(grouped_val_list)\n else:\n ptest = \"Chi-squared (warning: expected count < 5)\"\n chi_warn = (\"Chi-squared tests for the following \"\n \"variables may be invalid due to the low \"\n \"number of observations\")\n try:\n self._warnings[chi_warn].append(v)\n except KeyError:\n self._warnings[chi_warn] = [v]\n\n return pval, ptest\n\n def _create_cont_table(self, data, overall):\n \"\"\"\n Create tableone for continuous data.\n\n Returns\n ----------\n table : pandas DataFrame\n A table summarising the continuous variables.\n \"\"\"\n # remove the t1_summary level\n table = self.cont_describe[['t1_summary']].copy()\n table.columns = table.columns.droplevel(level=0)\n\n # add a column of null counts as 1-count() from previous function\n nulltable = data[self._continuous].isnull().sum().to_frame(name='Missing')\n try:\n table = table.join(nulltable)\n # if columns form a CategoricalIndex, need to convert to string first\n except TypeError:\n table.columns = table.columns.astype(str)\n table = table.join(nulltable)\n\n # add an empty value column, for joining with cat table\n table['value'] = ''\n table = table.set_index([table.index, 'value'])\n\n # add pval column\n if self._pval and self._pval_adjust:\n table = table.join(self._htest_table[['P-Value (adjusted)',\n 'Test']])\n elif self._pval:\n table = table.join(self._htest_table[['P-Value', 'Test']])\n\n # add standardized mean difference (SMD) column/s\n if self._smd:\n table = table.join(self.smd_table)\n\n # join the overall column if needed\n if self._groupby and overall:\n table = table.join(pd.concat([self.cont_describe_all['t1_summary'].\n Overall], axis=1, keys=[\"Overall\"]))\n\n return table\n\n def _create_cat_table(self, data, overall):\n \"\"\"\n Create table one for categorical data.\n\n Returns\n ----------\n table : pandas DataFrame\n A table summarising the categorical variables.\n \"\"\"\n table = self.cat_describe['t1_summary'].copy()\n\n # add the total count of null values across all levels\n isnull = data[self._categorical].isnull().sum().to_frame(name='Missing')\n isnull.index = isnull.index.rename('variable')\n try:\n table = table.join(isnull)\n # if columns form a CategoricalIndex, need to convert to string first\n except TypeError:\n table.columns = table.columns.astype(str)\n table = table.join(isnull)\n\n # add pval column\n if self._pval and self._pval_adjust:\n table = table.join(self._htest_table[['P-Value (adjusted)',\n 'Test']])\n elif self._pval:\n table = table.join(self._htest_table[['P-Value', 'Test']])\n\n # add standardized mean difference (SMD) column/s\n if self._smd:\n table = table.join(self.smd_table)\n\n # join the overall column if needed\n if self._groupby and overall:\n table = table.join(pd.concat([self.cat_describe_all['t1_summary'].\n Overall], axis=1, keys=[\"Overall\"]))\n\n return table\n\n def _create_tableone(self, data):\n \"\"\"\n Create table 1 by combining the continuous and categorical tables.\n\n Returns\n ----------\n table : pandas DataFrame\n The complete table one.\n \"\"\"\n if self._continuous and self._categorical:\n # support pandas<=0.22\n try:\n table = pd.concat([self.cont_table, self.cat_table],\n sort=False)\n except TypeError:\n table = pd.concat([self.cont_table, self.cat_table])\n elif self._continuous:\n table = self.cont_table\n elif self._categorical:\n table = self.cat_table\n\n # ensure column headers are strings before reindexing\n table = table.reset_index().set_index(['variable', 'value'])\n table.columns = table.columns.values.astype(str)\n\n # sort the table rows\n sort_columns = ['Missing', 'P-Value', 'P-Value (adjusted)', 'Test']\n if self._smd:\n sort_columns = sort_columns + list(self.smd_table.columns)\n\n if self._sort and isinstance(self._sort, bool):\n new_index = sorted(table.index.values, key=lambda x: x[0].lower())\n elif self._sort and isinstance(self._sort, str) and (self._sort in\n sort_columns):\n try:\n new_index = table.sort_values(self._sort).index\n except KeyError:\n new_index = sorted(table.index.values,\n key=lambda x: self._columns.index(x[0]))\n warnings.warn('Sort variable not found: {}'.format(self._sort))\n elif self._sort and isinstance(self._sort, str) and (self._sort not in\n sort_columns):\n new_index = sorted(table.index.values,\n key=lambda x: self._columns.index(x[0]))\n warnings.warn('Sort must be in the following ' +\n 'list: {}.'.format(self._sort))\n else:\n # sort by the columns argument\n new_index = sorted(table.index.values,\n key=lambda x: self._columns.index(x[0]))\n table = table.reindex(new_index)\n\n # round pval column and convert to string\n if self._pval and self._pval_adjust:\n table['P-Value (adjusted)'] = table['P-Value (adjusted)'].apply(\n '{:.3f}'.format).astype(str)\n table.loc[table['P-Value (adjusted)'] == '0.000',\n 'P-Value (adjusted)'] = '<0.001'\n elif self._pval:\n table['P-Value'] = table['P-Value'].apply(\n '{:.3f}'.format).astype(str)\n table.loc[table['P-Value'] == '0.000', 'P-Value'] = '<0.001'\n\n # round smd columns and convert to string\n if self._smd:\n for c in list(self.smd_table.columns):\n table[c] = table[c].apply('{:.3f}'.format).astype(str)\n table.loc[table[c] == '0.000', c] = '<0.001'\n\n # if an order is specified, apply it\n if self._order:\n for k in self._order:\n\n # Skip if the variable isn't present\n try:\n all_var = table.loc[k].index.unique(level='value')\n except KeyError:\n if k not in self._groupby:\n warnings.warn(\"Order variable not found: {}\".format(k))\n continue\n\n # Remove value from order if it is not present\n if [i for i in self._order[k] if i not in all_var]:\n rm_var = [i for i in self._order[k] if i not in all_var]\n self._order[k] = [i for i in self._order[k]\n if i in all_var]\n warnings.warn((\"Order value not found: \"\n \"{}: {}\").format(k, rm_var))\n\n new_seq = [(k, '{}'.format(v)) for v in self._order[k]]\n new_seq += [(k, '{}'.format(v)) for v in all_var\n if v not in self._order[k]]\n\n # restructure to match the original idx\n new_idx_array = np.empty((len(new_seq),), dtype=object)\n new_idx_array[:] = [tuple(i) for i in new_seq]\n orig_idx = table.index.values.copy()\n orig_idx[table.index.get_loc(k)] = new_idx_array\n table = table.reindex(orig_idx)\n\n # set the limit on the number of categorical variables\n if self._limit:\n levelcounts = data[self._categorical].nunique()\n for k, _ in levelcounts.iteritems():\n\n # set the limit for the variable\n if (isinstance(self._limit, int)\n and levelcounts[k] >= self._limit):\n limit = self._limit\n elif isinstance(self._limit, dict) and k in self._limit:\n limit = self._limit[k]\n else:\n continue\n\n if not self._order or (self._order and k not in self._order):\n # re-order the variables by frequency\n count = data[k].value_counts().sort_values(ascending=False)\n new_idx = [(k, '{}'.format(i)) for i in count.index]\n else:\n # apply order\n all_var = table.loc[k].index.unique(level='value')\n new_idx = [(k, '{}'.format(v)) for v in self._order[k]]\n new_idx += [(k, '{}'.format(v)) for v in all_var\n if v not in self._order[k]]\n\n # restructure to match the original idx\n new_idx_array = np.empty((len(new_idx),), dtype=object)\n new_idx_array[:] = [tuple(i) for i in new_idx]\n orig_idx = table.index.values.copy()\n orig_idx[table.index.get_loc(k)] = new_idx_array\n table = table.reindex(orig_idx)\n\n # drop the rows > the limit\n table = table.drop(new_idx_array[limit:])\n\n # insert n row\n n_row = pd.DataFrame(columns=['variable', 'value', 'Missing'])\n n_row = n_row.set_index(['variable', 'value'])\n n_row.loc['n', 'Missing'] = None\n\n # support pandas<=0.22\n try:\n table = pd.concat([n_row, table], sort=False)\n except TypeError:\n table = pd.concat([n_row, table])\n\n if self._groupbylvls == ['Overall']:\n table.loc['n', 'Overall'] = len(data.index)\n else:\n if self._overall:\n table.loc['n', 'Overall'] = len(data.index)\n for g in self._groupbylvls:\n ct = data[self._groupby][data[self._groupby] == g].count()\n table.loc['n', '{}'.format(g)] = ct\n\n # only display data in first level row\n dupe_mask = table.groupby(level=[0]).cumcount().ne(0)\n dupe_columns = ['Missing']\n optional_columns = ['P-Value', 'P-Value (adjusted)', 'Test']\n if self._smd:\n optional_columns = optional_columns + list(self.smd_table.columns)\n for col in optional_columns:\n if col in table.columns.values:\n dupe_columns.append(col)\n\n table[dupe_columns] = table[dupe_columns].mask(dupe_mask).fillna('')\n\n # remove Missing column if not needed\n if not self._isnull:\n table = table.drop('Missing', axis=1)\n\n if self._pval and not self._pval_test_name:\n table = table.drop('Test', axis=1)\n\n # replace nans with empty strings\n table = table.fillna('')\n\n # add column index\n if not self._groupbylvls == ['Overall']:\n # rename groupby variable if requested\n c = self._groupby\n if self._alt_labels:\n if self._groupby in self._alt_labels:\n c = self._alt_labels[self._groupby]\n\n c = 'Grouped by {}'.format(c)\n table.columns = pd.MultiIndex.from_product([[c], table.columns])\n\n # display alternative labels if assigned\n table = table.rename(index=self._create_row_labels(), level=0)\n\n # ensure the order of columns is consistent\n if self._groupby and self._order and (self._groupby in self._order):\n header = ['{}'.format(v) for v in table.columns.levels[1].values]\n cols = self._order[self._groupby] + ['{}'.format(v)\n for v in header\n if v not in\n self._order[self._groupby]]\n elif self._groupby:\n cols = ['{}'.format(v) for v in table.columns.levels[1].values]\n else:\n cols = ['{}'.format(v) for v in table.columns.values]\n\n if self._groupby and self._overall:\n cols = ['Overall'] + [x for x in cols if x != 'Overall']\n\n if 'Missing' in cols:\n cols = ['Missing'] + [x for x in cols if x != 'Missing']\n\n # move optional_columns to the end of the dataframe\n for col in optional_columns:\n if col in cols:\n cols = [x for x in cols if x != col] + [col]\n\n if self._groupby:\n table = table.reindex(cols, axis=1, level=1)\n else:\n table = table.reindex(cols, axis=1)\n\n try:\n if 'Missing' in self._alt_labels or 'Overall' in self._alt_labels:\n table = table.rename(columns=self._alt_labels)\n except TypeError:\n pass\n\n # remove the 'variable, value' column names in the index\n table = table.rename_axis([None, None])\n\n return table\n\n def _create_row_labels(self):\n \"\"\"\n Take the original labels for rows. Rename if alternative labels are\n provided. Append label suffix if label_suffix is True.\n\n Returns\n ----------\n labels : dictionary\n Dictionary, keys are original column name, values are final label.\n\n \"\"\"\n # start with the original column names\n labels = {}\n for c in self._columns:\n labels[c] = c\n\n # replace column names with alternative names if provided\n if self._alt_labels:\n for k in self._alt_labels.keys():\n labels[k] = self._alt_labels[k]\n\n # append the label suffix\n if self._label_suffix:\n for k in labels.keys():\n if k in self._nonnormal:\n if self._min_max and k in self._min_max:\n labels[k] = \"{}, {}\".format(labels[k], \"median [min,max]\")\n else:\n labels[k] = \"{}, {}\".format(labels[k], \"median [Q1,Q3]\")\n elif k in self._categorical:\n labels[k] = \"{}, {}\".format(labels[k], \"n (%)\")\n else:\n if self._min_max and k in self._min_max:\n labels[k] = \"{}, {}\".format(labels[k], \"mean [min,max]\")\n else:\n labels[k] = \"{}, {}\".format(labels[k], \"mean (SD)\")\n\n return labels\n\n # warnings\n def _non_continuous_warning(self, c):\n msg = (\"'{}' has all non-numeric values. Consider including \"\n \"it in the list of categorical variables.\").format(c)\n warnings.warn(msg, RuntimeWarning, stacklevel=2)\n"
] | [
[
"numpy.matrix",
"numpy.nanmax",
"numpy.nanmedian",
"numpy.sqrt",
"numpy.asarray",
"numpy.nanmin",
"pandas.DataFrame",
"numpy.mean",
"numpy.nanmean",
"scipy.stats.fisher_exact",
"numpy.nanstd",
"numpy.where",
"pandas.read_csv",
"scipy.stats.chi2_contingency",
"numpy.size",
"numpy.std",
"numpy.diag_indices_from",
"numpy.outer",
"pandas.set_option",
"pandas.concat",
"numpy.linalg.inv",
"numpy.isnan",
"pandas.MultiIndex.from_product",
"pandas.pivot_table",
"numpy.nanpercentile",
"pandas.isnull",
"scipy.stats.normaltest",
"numpy.percentile",
"numpy.shape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
mateuszbuda/duke-dbt-detection | [
"cf6a6b623fef24a3f36d3187e9f48b12b0e112bc"
] | [
"transform.py"
] | [
"import numpy as np\nfrom skimage.transform import rescale\nfrom torchvision.transforms import Compose\n\nfrom dataset import TomoDetectionDataset\n\n\ndef transforms(train=True):\n if train:\n return Compose(\n [Crop((TomoDetectionDataset.img_height, TomoDetectionDataset.img_width))]\n )\n else:\n return Crop(\n (TomoDetectionDataset.img_height, TomoDetectionDataset.img_width),\n random=False,\n )\n\n\nclass Scale(object):\n\n def __init__(self, scale):\n assert isinstance(scale, (float, tuple))\n if isinstance(scale, float):\n assert 0.0 < scale < 1.0\n self.scale = (1.0 - scale, 1.0 + scale)\n else:\n assert len(scale) == 2\n assert 0.0 < scale[0] < scale[1]\n self.scale = scale\n\n def __call__(self, sample):\n image, boxes = sample\n\n # don't augment normal cases\n if len(boxes[\"X\"]) == 0:\n return image, boxes\n\n sample_scale = np.random.rand()\n sample_scale = sample_scale * (self.scale[1] - self.scale[0]) + self.scale[0]\n\n scaled = rescale(\n image, sample_scale, multichannel=True, mode=\"constant\", anti_aliasing=False\n )\n\n boxes[\"X\"] = [int(x * sample_scale) for x in boxes[\"X\"]]\n boxes[\"Y\"] = [int(y * sample_scale) for y in boxes[\"Y\"]]\n boxes[\"Width\"] = [int(w * sample_scale) for w in boxes[\"Width\"]]\n boxes[\"Height\"] = [int(h * sample_scale) for h in boxes[\"Height\"]]\n\n return scaled, boxes\n\n\nclass Crop(object):\n\n def __init__(self, crop_size, random=True):\n assert isinstance(crop_size, (int, tuple))\n if isinstance(crop_size, int):\n self.crop_size = (crop_size, crop_size)\n else:\n assert len(crop_size) == 2\n self.crop_size = crop_size\n self.random = random\n\n def __call__(self, sample):\n image, boxes = sample\n\n h = image.shape[0]\n w = image.shape[1]\n y_max = max(h - self.crop_size[0], 1)\n x_max = max(w - self.crop_size[1], 1) // 2\n if image[h // 2, self.crop_size[1]] == 0:\n x_max //= 2\n y_min = x_min = 0\n x_max_box = 0\n\n # don't crop boxes\n margin = 16\n if len(boxes[\"X\"]) > 0:\n y_min_box = np.min(np.array(boxes[\"Y\"]) - np.array(boxes[\"Height\"]) // 2)\n x_min_box = np.min(np.array(boxes[\"X\"]) - np.array(boxes[\"Width\"]) // 2)\n y_max_box = np.max(np.array(boxes[\"Y\"]) + np.array(boxes[\"Height\"]) // 2)\n x_max_box = np.max(np.array(boxes[\"X\"]) + np.array(boxes[\"Width\"]) // 2)\n y_min = max(y_min, min(h, y_max_box + margin) - self.crop_size[0])\n x_min = max(x_min, min(w, x_max_box + margin) - self.crop_size[1])\n y_max = min(y_max, max(0, y_min_box - margin))\n x_max = min(x_max, max(0, x_min_box - margin))\n if x_max <= x_min:\n x_max = x_min + 1\n if y_max <= y_min:\n y_max = y_min + 1\n\n if self.random:\n y_offset = np.random.randint(y_min, y_max)\n x_offset = np.random.randint(x_min, x_max)\n else:\n y_offset = (y_min + y_max) // 2\n if x_max_box + margin < self.crop_size[1]:\n x_offset = 0\n else:\n x_offset = (x_min + x_max) // 2\n\n cropped = image[\n y_offset : y_offset + self.crop_size[0],\n x_offset : x_offset + self.crop_size[1],\n ]\n\n # don't let empty crop\n if np.max(cropped) == 0:\n y_offset = y_max // 2\n x_offset = 0\n cropped = image[\n y_offset : y_offset + self.crop_size[0],\n x_offset : x_offset + self.crop_size[1],\n ]\n\n boxes[\"X\"] = [max(0, x - x_offset) for x in boxes[\"X\"]]\n boxes[\"Y\"] = [max(0, y - y_offset) for y in boxes[\"Y\"]]\n\n return cropped, boxes\n"
] | [
[
"numpy.max",
"numpy.array",
"numpy.random.rand",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
helloimlixin/AGFVisualization | [
"c35e4b1e88d6fc6da7fdaca3b2d5e9b1b4c5f25f",
"c35e4b1e88d6fc6da7fdaca3b2d5e9b1b4c5f25f"
] | [
"baselines/cnn_layer_visualization.py",
"baselines/deep_dream.py"
] | [
"\"\"\"\nCreated on Sat Nov 18 23:12:08 2017\n\n@author: Utku Ozbulak - github.com/utkuozbulak\n\"\"\"\nimport os\nimport numpy as np\n\nimport torch\nfrom torch.optim import Adam\nfrom torchvision import models\n\nfrom baselines.misc_functions import preprocess_image, recreate_image, save_image\n\n\nclass CNNLayerVisualization():\n \"\"\"\n Produces an image that minimizes the loss of a convolution\n operation for a specific layer and filter\n \"\"\"\n def __init__(self, model, selected_layer, selected_filter):\n self.model = model\n self.model.eval()\n self.selected_layer = selected_layer\n self.selected_filter = selected_filter\n self.conv_output = 0\n # Create the folder to export images if not exists\n if not os.path.exists('../generated'):\n os.makedirs('../generated')\n\n def hook_layer(self):\n def hook_function(module, grad_in, grad_out):\n # Gets the conv output of the selected filter (from selected layer)\n self.conv_output = grad_out[0, self.selected_filter]\n # Hook the selected layer\n self.model[self.selected_layer].register_forward_hook(hook_function)\n\n def visualise_layer_with_hooks(self):\n # Hook the selected layer\n self.hook_layer()\n # Generate a random image\n random_image = np.uint8(np.random.uniform(150, 180, (224, 224, 3)))\n # Process image and return variable\n processed_image = preprocess_image(random_image, False)\n # Define optimizer for the image\n optimizer = Adam([processed_image], lr=0.1, weight_decay=1e-6)\n for i in range(1, 31):\n optimizer.zero_grad()\n # Assign create image to a variable to move forward in the model\n x = processed_image\n for index, layer in enumerate(self.model):\n # Forward pass layer by layer\n # x is not used after this point because it is only needed to trigger\n # the forward hook function\n x = layer(x)\n # Only need to forward until the selected layer is reached\n if index == self.selected_layer:\n # (forward hook function triggered)\n break\n # Loss function is the mean of the output of the selected layer/filter\n # We try to minimize the mean of the output of that specific filter\n loss = -torch.mean(self.conv_output)\n print('Iteration:', str(i), 'Loss:', \"{0:.2f}\".format(loss.data.numpy()))\n # Backward\n loss.backward()\n # Update image\n optimizer.step()\n # Recreate image\n self.created_image = recreate_image(processed_image)\n # Save image\n if i % 5 == 0:\n im_path = '../generated/layer_vis_l' + str(self.selected_layer) + \\\n '_f' + str(self.selected_filter) + '_iter' + str(i) + '.jpg'\n save_image(self.created_image, im_path)\n\n def visualise_layer_without_hooks(self):\n # Process image and return variable\n # Generate a random image\n random_image = np.uint8(np.random.uniform(150, 180, (224, 224, 3)))\n # Process image and return variable\n processed_image = preprocess_image(random_image, False)\n # Define optimizer for the image\n optimizer = Adam([processed_image], lr=0.1, weight_decay=1e-6)\n for i in range(1, 31):\n optimizer.zero_grad()\n # Assign create image to a variable to move forward in the model\n x = processed_image\n for index, layer in enumerate(self.model):\n # Forward pass layer by layer\n x = layer(x)\n if index == self.selected_layer:\n # Only need to forward until the selected layer is reached\n # Now, x is the output of the selected layer\n break\n # Here, we get the specific filter from the output of the convolution operation\n # x is a tensor of shape 1x512x28x28.(For layer 17)\n # So there are 512 unique filter outputs\n # Following line selects a filter from 512 filters so self.conv_output will become\n # a tensor of shape 28x28\n self.conv_output = x[0, self.selected_filter]\n # Loss function is the mean of the output of the selected layer/filter\n # We try to minimize the mean of the output of that specific filter\n loss = -torch.mean(self.conv_output)\n print('Iteration:', str(i), 'Loss:', \"{0:.2f}\".format(loss.data.numpy()))\n # Backward\n loss.backward()\n # Update image\n optimizer.step()\n # Recreate image\n self.created_image = recreate_image(processed_image)\n # Save image\n if i % 5 == 0:\n im_path = '../generated/layer_vis_l' + str(self.selected_layer) + \\\n '_f' + str(self.selected_filter) + '_iter' + str(i) + '.jpg'\n save_image(self.created_image, im_path)\n\n\nif __name__ == '__main__':\n cnn_layer = 17\n filter_pos = 5\n # Fully connected layer is not needed\n pretrained_model = models.vgg16(pretrained=True).features\n layer_vis = CNNLayerVisualization(pretrained_model, cnn_layer, filter_pos)\n\n # Layer visualization with pytorch hooks\n layer_vis.visualise_layer_with_hooks()\n\n # Layer visualization without pytorch hooks\n # layer_vis.visualise_layer_without_hooks()\n",
"\"\"\"\nCreated on Mon Nov 21 21:57:29 2017\n\n@author: Utku Ozbulak - github.com/utkuozbulak\n\"\"\"\nimport os\nfrom PIL import Image\n\nimport torch\nfrom torch.optim import SGD\nfrom torchvision import models\n\nfrom baselines.misc_functions import preprocess_image, recreate_image, save_image\n\n\nclass DeepDream():\n \"\"\"\n Produces an image that minimizes the loss of a convolution\n operation for a specific layer and filter\n \"\"\"\n def __init__(self, model, selected_layer, selected_filter, im_path):\n self.model = model\n self.model.eval()\n self.selected_layer = selected_layer\n self.selected_filter = selected_filter\n self.conv_output = 0\n # Generate a random image\n self.created_image = Image.open(im_path).convert('RGB')\n # Hook the layers to get result of the convolution\n self.hook_layer()\n # Create the folder to export images if not exists\n if not os.path.exists('../generated'):\n os.makedirs('../generated')\n\n def hook_layer(self):\n def hook_function(module, grad_in, grad_out):\n # Gets the conv output of the selected filter (from selected layer)\n self.conv_output = grad_out[0, self.selected_filter]\n\n # Hook the selected layer\n self.model[self.selected_layer].register_forward_hook(hook_function)\n\n def dream(self):\n # Process image and return variable\n self.processed_image = preprocess_image(self.created_image, True)\n # Define optimizer for the image\n # Earlier layers need higher learning rates to visualize whereas layer layers need less\n optimizer = SGD([self.processed_image], lr=12, weight_decay=1e-4)\n for i in range(1, 251):\n optimizer.zero_grad()\n # Assign create image to a variable to move forward in the model\n x = self.processed_image\n for index, layer in enumerate(self.model):\n # Forward\n x = layer(x)\n # Only need to forward until we the selected layer is reached\n if index == self.selected_layer:\n break\n # Loss function is the mean of the output of the selected layer/filter\n # We try to minimize the mean of the output of that specific filter\n loss = -torch.mean(self.conv_output)\n print('Iteration:', str(i), 'Loss:', \"{0:.2f}\".format(loss.data.numpy()))\n # Backward\n loss.backward()\n # Update image\n optimizer.step()\n # Recreate image\n self.created_image = recreate_image(self.processed_image)\n # Save image every 20 iteration\n if i % 10 == 0:\n print(self.created_image.shape)\n im_path = '../generated/ddream_l' + str(self.selected_layer) + \\\n '_f' + str(self.selected_filter) + '_iter' + str(i) + '.jpg'\n save_image(self.created_image, im_path)\n\n\nif __name__ == '__main__':\n # THIS OPERATION IS MEMORY HUNGRY! #\n # Because of the selected image is very large\n # If it gives out of memory error or locks the computer\n # Try it with a smaller image\n cnn_layer = 34\n filter_pos = 94\n\n im_path = '../input_images/dd_tree.jpg'\n # Fully connected layer is not needed\n pretrained_model = models.vgg19(pretrained=True).features\n dd = DeepDream(pretrained_model, cnn_layer, filter_pos, im_path)\n # This operation can also be done without Pytorch hooks\n # See layer visualisation for the implementation without hooks\n dd.dream()\n"
] | [
[
"torch.optim.Adam",
"numpy.random.uniform",
"torch.mean"
],
[
"torch.mean",
"torch.optim.SGD"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
flios/Proj | [
"fa95e19853eacbbc07a9ac2982cd4df92f53c338",
"fa95e19853eacbbc07a9ac2982cd4df92f53c338"
] | [
"main.py",
"log_process.py"
] | [
"import argparse\nfrom os.path import dirname, abspath, join, exists\nimport os\n\nimport torch\nfrom torch.optim import Adadelta, Adam, lr_scheduler\nfrom torch import nn\nimport numpy as np\n\nfrom download_dataset import DATASETS\nfrom preprocessors import DATASET_TO_PREPROCESSOR\nimport dictionaries\nfrom dataloaders import TextDataset, TextDataLoader\nimport trainers\nfrom trainers import Trainer\nfrom evaluators import Evaluator\n\nimport model.VDCNN as vdcnn_model\nfrom model.VDCNN import VDCNN\n\nimport utils\nimport sys\n\ndef is_interactive():\n import __main__ as main\n return not hasattr(main, '__file__')\n\n# Random seed\nnp.random.seed(0)\ntorch.manual_seed(0)\n\n# Arguments parser\nparser = argparse.ArgumentParser(description=\"Very Deep Convolutional Networks for Text Classification\")\nparser.add_argument('--dataset', type=str, choices=DATASETS, default='dbpedia')\nparser.add_argument('--use_gpu', dest='use_gpu', action='store_true')\nparser.set_defaults(use_gpu=torch.cuda.is_available())\nparser.add_argument('--batch_size', type=int, default=50)\nparser.add_argument('--initial_lr', type=float, default=0.0001)\nparser.add_argument('--lr_schedule', action='store_true')\nparser.add_argument('--optimizer', type=str, default='Adam')\nparser.add_argument('--load_model', type=str, default=None)\n\nparser.set_defaults(preprocess_level='char')\nparser.add_argument('--dictionary', type=str, default='VDCNNDictionary', choices=['CharCNNDictionary', 'VDCNNDictionary', 'AllCharDictionary'])\nparser.add_argument('--min_length', type=int, default=1024)\nparser.add_argument('--max_length', type=int, default=1024)\nparser.add_argument('--epochs', type=int, default=100)\nparser.add_argument('--depth', type=str, choices=['vdcnn9', 'vdcnn17', 'vdcnn29', 'vdcnn49'], default='vdcnn49')\nparser.add_argument('--embed_size', type=int, default=16)\nparser.add_argument('--optional_shortcut', action='store_true')\nparser.add_argument('--kernel_size', type=int, default=3)\nparser.add_argument('--sort_dataset', action='store_true')\nparser.add_argument('--kmax', type=int, default=8)\nparser.add_argument('--pooling',type=str, choices=['conv','kmaxpool','maxpool'], default='maxpool')\nparser.add_argument('--num_workers', type=int, default=0)\nparser.set_defaults(model=VDCNN)\n\nif is_interactive():\n params = []\nelse:\n params = sys.argv[1:]\n\nargs = vars(parser.parse_args(params))\n\n# Logging\nmodel_name = args.get('model').__name__+'_'+args.get('depth')\nlogger = utils.get_logger(model_name)\n\nlogger.info('Arguments: {}'.format(args))\n\nlogger.info(\"Preprocessing...\")\nPreprocessor = DATASET_TO_PREPROCESSOR[args.get('dataset')]\npreprocessor = Preprocessor(args.get('dataset'))\ntrain_data, val_data, test_data = preprocessor.preprocess(level=args.get('preprocess_level'))\n\nlogger.info(\"Building dictionary...\")\nDictionary = getattr(dictionaries, args.get('dictionary'))\ndictionary = Dictionary(args)\ndictionary.build_dictionary(train_data)\n\nlogger.info(\"Constructing model...\")\nmodel_name = getattr(vdcnn_model, args.get('depth'))\nmodel = model_name(n_classes=preprocessor.n_classes, vocabulary_size=dictionary.vocabulary_size, **args)\n\n# load exit model\nif args.get('load_model') is not None:\n logger.info(\"Loading exit model...\")\n base_dir = dirname(abspath(trainers.__file__))\n checkpoint_dir = join(base_dir, 'checkpoints')\n model_name = args.get('load_model')\n checkpoint_filepath = join(checkpoint_dir, model_name)\n model.load_state_dict(torch.load(checkpoint_filepath))\n logger.info(checkpoint_filepath)\n\nif args.get('use_gpu'):\n model = model.cuda()\n\nlogger.info(\"Making dataset & dataloader...\")\ntrain_dataset = TextDataset(train_data, dictionary, args.get('sort_dataset'), args.get('min_length'), args.get('max_length'))\ntrain_dataloader = TextDataLoader(dataset=train_dataset, dictionary=dictionary, batch_size=args.get('batch_size'), shuffle = not args.get('sort_dataset'), num_workers = args.get('num_workers'))\nval_dataset = TextDataset(val_data, dictionary, args.get('sort_dataset'), args.get('min_length'), args.get('max_length'))\nval_dataloader = TextDataLoader(dataset=val_dataset, dictionary=dictionary, batch_size=args.get('batch_size'), shuffle = not args.get('sort_dataset'), num_workers = args.get('num_workers'))\n# test_dataset = TextDataset(test_data, dictionary, args.get('sort_dataset'), args.get('min_length'), args.get('max_length'))\n# test_dataloader = TextDataLoader(dataset=test_dataset, dictionary=dictionary, batch_size=args.get('batch_size'), shuffle = not args.get('sort_dataset'))\n\nlogger.info(\"Training...\")\n# trainable_params = [p for p in model.parameters() if p.requires_grad]\nif args.get('optimizer') == 'Adam':\n optimizer = Adam(model.parameters(), lr=args.get('initial_lr'))\nelif args.get('optimizer') == 'Adadelta':\n optimizer = Adadelta(params=trainable_params, lr=args.get('initial_lr'), weight_decay=0.95)\nelse:\n raise NotImplementedError()\n\nlr_plateau = lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.2, patience=5)\ncriterion = nn.CrossEntropyLoss\ntrainer = Trainer(model, train_dataloader, val_dataloader,\n criterion=criterion, optimizer=optimizer,\n lr_schedule=args.get('lr_schedule'), lr_scheduler=lr_plateau,\n use_gpu=args.get('use_gpu'), logger=logger)\ntrainer.run(epochs=args.get('epochs'))\nlogger.info(\"Evaluating...\")\nlogger.info('Best Model: {}'.format(trainer.best_checkpoint_filepath))\n",
"import re\nimport argparse\nimport sys\nimport numpy as np\nimport pandas as pd\n\n\ndef is_interactive():\n import __main__ as main\n return not hasattr(main, '__file__')\n\n\nparser = argparse.ArgumentParser(description=\"Log Process\")\nparser.add_argument('--log_type', type=str, default='train', choices = ['test', 'train'])\nparser.add_argument('--log_file', type=str, default='checkpoints\\shortcut_yelp_review_polarity\\VDCNN_vdcnn49-2018-12-07 23-48-27.log')\n\nif is_interactive():\n params = []\nelse:\n params = sys.argv[1:]\nargs = vars(parser.parse_args(params))\n\npattern_matrics = '\\d+\\.\\d+'\n\n\nif args.get('log_type') == 'test':\n pattern_epoch = '\\d+\\.ckpt'\n\n test_info = []\n all_info = []\n with open(args.get('log_file'), 'r') as f:\n for line in f:\n result_epoch = re.findall(pattern_epoch, line)\n result_matrics = re.findall(pattern_matrics, line)\n\n if result_epoch != []:\n test_info.append(int(result_epoch[0].split('.')[0]))\n elif result_matrics != []:\n test_info.extend([float(v) for v in result_matrics])\n all_info.append(np.array(test_info))\n test_info = []\n info_df = pd.DataFrame(data=all_info,columns=['epoch','test_loss','accuracy'])\n info_df = info_df.set_index('epoch')\n info_df = info_df.sort_index()\n save_name = args.get('log_file').split('.')[0]\n info_df.to_csv(save_name+'.csv')\nelif args.get('log_type') == 'train':\n pattern_epoch = 'Epoch: \\d+'\n pattern_loss = 'Loss: \\d+\\.\\d+'\n pattern_acc = 'Acc: \\d+\\.\\d+\\%'\n\n train_info = []\n all_info = []\n with open(args.get('log_file'), 'r') as f:\n for line in f:\n result_epoch = re.findall(pattern_epoch, line)\n\n if result_epoch != []:\n train_info.append(int(re.findall('\\d+', result_epoch[0])[0]))\n result_loss = re.findall(pattern_loss, line)\n result_acc = re.findall(pattern_acc, line)\n train_info.extend(list(map(float, [re.findall('\\d+\\.\\d+', loss)[0] for loss in result_loss])))\n train_info.extend(list(map(float, [re.findall('\\d+\\.\\d+', acc)[0] for acc in result_acc])))\n all_info.append(np.array(train_info))\n train_info = []\n info_df = pd.DataFrame(data=all_info,columns=['epoch','train_loss', 'val_loss', 'train_acc', 'val_acc'])\n info_df = info_df.set_index('epoch')\n info_df = info_df.sort_index()\n save_name = args.get('log_file').split('.')[0]\n info_df.to_csv(save_name+'.csv')\n"
] | [
[
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"numpy.random.seed",
"torch.load",
"torch.manual_seed",
"torch.cuda.is_available"
],
[
"numpy.array",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
cnheider/python-acoustics | [
"fbc87454422c41e1a39e282d7680126a6d8014dd"
] | [
"tests/test_bands.py"
] | [
"import numpy as np\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal\n\nimport pytest\n\nfrom acoustics.bands import (octave, octave_high, octave_low, third, third_low, third_high, third2oct, _check_band_type)\n\n\[email protected]\ndef octave_real():\n return np.array([16, 31.5, 63, 125, 250, 500, 1000, 2000, 4000, 8000, 16000])\n\n\[email protected]\ndef third_real():\n return np.array([\n 12.5, 16, 20, 25, 31.5, 40, 50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800, 1000, 1250, 1600,\n 2000, 2500, 3150, 4000, 5000, 6300, 8000, 10000, 12500, 16000, 20000\n ])\n\n\ndef test_octave(octave_real):\n generated = octave(16, 16000)\n real = octave_real\n assert_array_equal(generated, real)\n\n\ndef test_octave_high(octave_real):\n generated = octave_high(16, 16000)\n real = octave_real * np.sqrt(2)\n assert_array_almost_equal(generated, real)\n\n\ndef test_octave_low(octave_real):\n generated = octave_low(16, 16000)\n real = real = octave_real / np.sqrt(2)\n assert_array_almost_equal(generated, real)\n\n\ndef test_third(third_real):\n generated = third(12.5, 20000)\n real = third_real\n assert_array_equal(generated, real)\n\n\ndef test_third_high(third_real):\n generated = third_high(12.5, 20000)\n real = third_real * 2**(1 / 6)\n assert_array_almost_equal(generated, real)\n\n\ndef test_third_low(third_real):\n generated = third_low(12.5, 20000)\n real = third_real / 2**(1 / 6)\n assert_array_almost_equal(generated, real)\n\n\ndef test_third2oct():\n\n levels = np.array([10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0])\n generated = third2oct(levels)\n real = np.array([14.77121255, 14.77121255, 14.77121255])\n assert_array_almost_equal(generated, real)\n\n\ndef test_third2oct_2darray_axis0():\n levels = np.array([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],\n [100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0]])\n generated = third2oct(levels, axis=0)\n real = np.array([100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0])\n assert_array_almost_equal(generated, real)\n\n\ndef test_third2oct_2darray_axis1():\n levels = np.array([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],\n [100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0]])\n generated = third2oct(levels, axis=1)\n real = np.array([[5.77121255, 5.77121255, 5.77121255], [14.77121255, 14.77121255, 14.77121255],\n [104.77121255, 104.77121255, 104.77121255]])\n assert_array_almost_equal(generated, real)\n\n\ndef test_third2oct_3darray_axis0():\n\n # Array of ones with shape (3,4,5)\n levels = np.array([[[1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.]],\n [[1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.]],\n [[1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.]]])\n\n generated = third2oct(levels, axis=0)\n real = np.array([[5.77121255, 5.77121255, 5.77121255, 5.77121255, 5.77121255],\n [5.77121255, 5.77121255, 5.77121255, 5.77121255, 5.77121255],\n [5.77121255, 5.77121255, 5.77121255, 5.77121255, 5.77121255],\n [5.77121255, 5.77121255, 5.77121255, 5.77121255, 5.77121255]])\n assert_array_almost_equal(generated, real)\n\n\ndef test_third2oct_2darray():\n levels = np.array([[100, 95, 80, 55, 65, 85, 75, 70, 90, 95, 105, 110],\n [100, 95, 80, 55, 65, 85, 75, 70, 90, 95, 105, 110]])\n generated = third2oct(levels, axis=1)\n real = np.array([[101.22618116, 85.04751156, 90.17710468, 111.29641738],\n [101.22618116, 85.04751156, 90.17710468, 111.29641738]])\n assert_array_almost_equal(generated, real)\n\n\[email protected](\"freqs, expected\", [\n (np.array([125, 250, 500]), 'octave'),\n (np.array([12.5, 16, 20]), 'third'),\n (np.array([125, 250, 1000, 4000]), 'octave-unsorted'),\n (np.array([12.5, 800, 500, 5000]), 'third-unsorted'),\n (np.array([100, 200, 300, 400]), None),\n])\ndef test__check_band_type(freqs, expected):\n band_type = _check_band_type(freqs)\n assert_array_equal(band_type, expected)\n"
] | [
[
"numpy.testing.assert_array_equal",
"numpy.array",
"numpy.sqrt",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bergr7/Bertelsmann_Arvato_Project | [
"b88d204ceb48642dcc1b2a03081e466cc5587ce1"
] | [
"preprocessing/supervised_model_data_preprocessing.py"
] | [
"# Data preprocessing for Supervised Learning model\n\n# Import relevant libraries\nimport sys\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import infer_dtype\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import LabelEncoder\n\n\ndef load_data(path_file, dtype, sep):\n \"\"\"It loads in a .csv file and turn it into a pandas df.\n Parameters\n __________\n :param path_file: string. Path file\n :param dtype: dict. dtype for pd.read_csv pandas method\n :param sep: string. separator for pd.read_csv pandas method\n __________\n :return: .csv file converted into a Pandas DataFrame\n \"\"\"\n return pd.read_csv(path_file, dtype=dtype, sep=sep)\n\n\ndef map_unknowns(attributes, df):\n \"\"\"It maps unknown values identified during data exploration to NaN's.\n Parameters\n __________\n :param attributes: Attributes pandas DataFrame.\n :param df: MAILOUT Pandas DataFrame\n __________\n :return: mapped_df: MAILOUT Pandas DataFrame with unknown values mapped to NaN's\n \"\"\"\n # create a dict with original dtypes for each column\n original_dtypes = dict()\n for col in df.columns:\n original_dtypes[col] = str(df[col].dtype)\n\n # convert all columns to object type\n df.astype(dtype='str')\n # loop through all attributes\n for attribute in attributes.index:\n # for each attribute, retrieve a list with unknown values\n unknowns_list = attributes['Unknown'].loc[attribute].strip('][').split(', ')[0].split(',')\n # if there are unknown values, map them to NaN's\n if unknowns_list != ['']:\n if attribute in ['CAMEO_DEUG_2015', 'CAMEO_DEU_2015', 'CAMEO_INTL_2015']:\n df.loc[df[attribute].isin(['X','XX', '-1']), attribute] = np.nan\n else:\n df.loc[df[attribute].isin(unknowns_list), attribute] = np.nan\n\n # transform columns to original dtypes\n df.astype(original_dtypes, errors='ignore')\n\n mapped_df = df\n\n return mapped_df\n\n\ndef clean_df(mailout_train_df, mailout_test_df, train=True):\n \"\"\"It performs data cleaning on AZDIAS or CUSTOMERS dataframe.\n Parameters\n __________\n :param mailout_train_df: MAILOUT TRAIN Pandas DataFrame\n :param mailout_test_df: MAILOUT TEST Pandas DataFrame\n :param train: Boolean. If True, it performs cleaning on MAILOUT TRAIN df.\n If False, it performs cleaning on MAILOUT TEST.\n __________\n :return: Cleaned MAILOUT TRAIN or MAILOUT TEST Pandas DataFrame\n \"\"\"\n # drop rows with missing values in the RESPONSE\n mailout_train_df = mailout_train_df.loc[mailout_train_df['RESPONSE'].isin([0, 1]), :].copy()\n\n # columns to be dropped due to missing values proportion\n mailout_train_toDrop = check_mv_prop(mailout_train_df, 0.6, toDrop=True)\n mailout_test_toDrop = check_mv_prop(mailout_test_df, 0.6, toDrop=True)\n mailout_train_toDrop.extend(mailout_test_toDrop)\n toDrop = list(set(mailout_train_toDrop)) # find unique values\n\n # check which dataframe is going to be cleaned\n if train:\n # drop cols not useful for analysis or with lots of mv\n toDrop.extend(['Unnamed: 0', 'LNR', 'D19_LETZTER_KAUF_BRANCHE', 'EINGEFUEGT_AM', 'OST_WEST_KZ'])\n mailout_train_df = mailout_train_df.drop(toDrop, axis=1)\n\n # drop individuals with more than 150 missed values\n rows_toDrop = list(mailout_train_df.isnull().sum(axis=1).loc[mailout_train_df.isnull().sum(axis=1) > 150].index)\n mailout_train_df = mailout_train_df.drop(rows_toDrop, axis=0)\n\n # remove outliers in 'ANZ_HAUSHALTE_AKTIV' and 'ANZ_PERSONEN'\n mailout_train_df = mailout_train_df.loc[mailout_train_df['ANZ_HAUSHALTE_AKTIV'] < 10, :] # based on 1.5*IQR rule and attributes information\n mailout_train_df = mailout_train_df.loc[mailout_train_df['ANZ_PERSONEN'] < 3, :] # based on 1.5*IQR rule and attributes information\n\n df_cleaned = mailout_train_df\n\n return df_cleaned\n\n else:\n\n # drop cols not useful for analysis or with lots of mv\n toDrop.extend(['Unnamed: 0', 'D19_LETZTER_KAUF_BRANCHE', 'EINGEFUEGT_AM', 'OST_WEST_KZ'])\n mailout_test_df = mailout_test_df.drop(toDrop, axis=1)\n\n df_cleaned = mailout_test_df\n\n return df_cleaned\n\n\ndef check_mv_prop(df, p, toDrop=True):\n \"\"\"It checks the proportion of missing values for each col and prints which cols have more than p% missing values.\n INPUT:\n df: Pandas dataframe.\n p: float. Missing values proportion threshold.\n toDrop: Boolean. If true, condition is propotion of mv > p. condition is propotion of mv < p otherwise.\n\n OUTPUT:\n toDrop_lst: list of columns be dropped if toDrop = True.\n toImpute_lst: list of columns to be imputed if toImpute_lst = True.\n \"\"\"\n mvs = df.isnull().sum()\n if toDrop:\n toDrop_lst = []\n for col in df.columns:\n if mvs.loc[col] / df.shape[0] > p:\n print(\"{:.2f}% of {} are missing values\".format((df.isnull().sum().loc[col] / df.shape[0]) * 100, col))\n toDrop_lst.append(col)\n return toDrop_lst\n else:\n toImpute_lst = []\n for col in df.columns:\n if mvs.loc[col] / df.shape[0] <= p:\n print(\"{:.2f}% of {} are missing values\".format((df.isnull().sum().loc[col] / df.shape[0]) * 100, col))\n toImpute_lst.append(col)\n return toImpute_lst\n\n\ndef impute_mv(df, strategy):\n \"\"\"It performs imputation of missing values using skelarn SimpleImputer.\n Parameters\n __________\n :param df: MAILOUT Pandas DataFrame\n :param strategy: string. The imputation strategy for SimpleImputer\n __________\n :return: MAILOUT df with imputed values\n \"\"\"\n # instantiate SimpleImputer\n imputer = SimpleImputer(strategy=strategy)\n\n # impute missing values\n data_with_no_mv = imputer.fit_transform(df)\n\n # put back column names from df as fit_transform returns an array of shape (n_samples, n_features_new)\n imputed_df = pd.DataFrame(data_with_no_mv, columns=df.columns)\n\n return imputed_df\n\n\ndef label_encode_cameo(df):\n \"\"\"It performs label encoding on 'CAMEO_DEU_2015' feature using sklearn LabelEncoder.\n Parameters\n __________\n :param df: MAILOUT Pandas DataFrame\n __________\n :return: MAILOUT Pandas DataFrame with encoded 'CAMEO_DEU_2015'.\n \"\"\"\n # instantiate LabelEncoder\n lab_encoder = LabelEncoder()\n\n # pull out list of unique classes\n classes = list(df['CAMEO_DEU_2015'].unique())\n\n # fit encoder\n lab_encoder.fit(classes)\n\n # label encode 'CAMEO_DEU_2015'\n df['CAMEO_DEU_2015'] = lab_encoder.transform(df['CAMEO_DEU_2015'])\n\n encoded_df = df\n\n return encoded_df\n\n\ndef save_pickle_df(df, file_path, file_name):\n \"\"\"It saves preprocessed data frame as a pickle file in the /data folder.\n Parameters\n __________\n :param df: Preprocessed MAILOUT Pandas DataFrame\n :param file_path: string. File path for the pickle file\n :param file_name: string. File name for the pickle file\n __________\n :return: None\n \"\"\"\n # save df as a pickle\n df.to_pickle(file_path + file_name)\n\n\ndef main():\n if len(sys.argv) == 3:\n train_file_path, test_file_path = sys.argv[1:]\n\n # load in attributes.csv\n print(\"Loading attributes.csv...\")\n attributes = pd.read_csv('../data/attributes.csv', sep=';', names=['Type', 'Unknown'])\n\n # load in MAILOUT TRAIN data\n print(\"Loading MAILOUT TRAIN data...\")\n raw_mailout_train_df = load_data(train_file_path,\n dtype={'CAMEO_DEUG_2015': 'str', 'CAMEO_INTL_2015': 'str'},\n sep=','\n )\n\n # load in MAILOUT TEST data\n print(\"Loading MAILOUT TEST data...\")\n raw_mailout_test_df = load_data(test_file_path,\n dtype={'CAMEO_DEUG_2015': 'str', 'CAMEO_INTL_2015': 'str'},\n sep=','\n )\n # Preprocess MAILOUT TRAIN dataset\n # test say 20000 rows\n # raw_mailout_train_df = raw_mailout_train_df[:20000]\n # test say 20000 rows\n # raw_mailout_test_df = raw_mailout_test_df[:20000]\n\n # map unknown values to missing values\n print(\"Mapping unknown values to NaN's...\")\n mailout_train_df = map_unknowns(attributes=attributes, df=raw_mailout_train_df)\n\n # cleaning\n print(\"Cleaning AZDIAS data...\")\n mailout_train_df = clean_df(mailout_train_df, raw_mailout_test_df, train=True)\n\n # impute missing values with mode\n print(\"Imputing missing values...\")\n mailout_train_df = impute_mv(mailout_train_df, 'most_frequent')\n\n # encode 'CAMEO_DEU_2015'\n print(\"Encoding 'CAMEO_DEU_2015' variable...\")\n preprocessed_mailout_train_df = label_encode_cameo(mailout_train_df)\n\n assert preprocessed_mailout_train_df.isnull().any().mean() == 0.0, \"There are still missing values in the data.\"\n\n print(\"MAILOUT TRAIN data is preprocessed.\")\n print(\"Saving preprocessed data into a pickle file...\")\n\n # save preprocessed dataframe as a pickle file\n save_pickle_df(preprocessed_mailout_train_df, \"../data/\", \"MAILOUT_TRAIN_DF.pkl\")\n print(\"MAILOUT data has been saved as a pickle file in the ../data folder with the name MAILOUT_TRAIN_DF.pkl.\")\n print(\"Use pd.read_pickle to read it.\")\n\n # Preprocess MAILOUT TEST dataset\n # map unknown values to missing values\n print(\"Mapping unknown values to NaN's...\")\n mailout_test_df = map_unknowns(attributes=attributes, df=raw_mailout_test_df)\n\n # Cleaning\n print(\"Cleaning data...\")\n mailout_test_df = clean_df(raw_mailout_train_df, mailout_test_df, train=False)\n\n # impute missing values with mode\n print(\"Imputing missing values...\")\n mailout_test_df = impute_mv(mailout_test_df, 'most_frequent')\n\n # encode 'CAMEO_DEU_2015'\n print(\"Encoding 'CAMEO_DEU_2015' variable...\")\n preprocessed_mailout_test_df = label_encode_cameo(mailout_test_df)\n\n assert preprocessed_mailout_test_df.isnull().any().mean() == 0.0, \"There are still missing values in the data.\"\n\n # save preprocessed dataframe as a pickle file\n save_pickle_df(preprocessed_mailout_test_df, \"../data/\", \"MAILOUT_TEST_DF.pkl\")\n print(\"MAILOUT data has been saved as a pickle file in the ../data folder with the name MAILOUT_TEST_DF.pkl.\")\n print(\"Use pd.read_pickle to read it.\")\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"sklearn.impute.SimpleImputer",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
msc-acse/acse-9-independent-research-project-L519159123 | [
"90b2399eeb41ea24bfc3f401603225eaf628bc0a"
] | [
"data_process/loss_plotting.py"
] | [
"\"\"\"\nauthor : Yuxuan Liu\ngithub alias: L519159123\n\"\"\"\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\n\ndef plot_loss(experiment_name, model='pix2pix'):\n \"\"\"Plot plg loss of the training process\n \n Parameters:\n experiment_name (string)\n model (string) - pix2pix by default\n \n \"\"\"\n \n # load data file\n data = pd.read_csv(os.path.join('./checkpoints', experiment_name, 'loss_log.txt'), skiprows=1, sep='\\s*', engine='python', header=None)\n \n if model == 'pix2pix':\n epoch = data[1].str.replace(',', '').astype('int')\n iters = data[3].str.replace(',', '').astype('int') / 1928\n epoch = epoch + iters - 1\n \n # convert string to float32 data\n G_GAN = data[9].astype('float32')\n G_L1 = data[11].astype('float32')\n D_real = data[13].astype('float32')\n D_fake = data[15].astype('float32')\n \n plt.figure(figsize=(15,8))\n plt.xlabel('epoch', fontsize=18, labelpad=20)\n plt.ylabel('loss', fontsize=18, labelpad=20)\n plt.title(experiment_name + 'loss over time', fontsize=18, pad=30)\n plt.plot(epoch.values, G_GAN.values, 'b', label='G_GAN')\n plt.plot(epoch.values, G_L1.values, 'C1', label='G_L1')\n plt.plot(epoch.values, D_real.values, 'g', label='D_real')\n plt.plot(epoch.values, D_fake.values, 'r', label='D_fake')\n plt.tick_params(labelsize=14)\n plt.legend(loc='best', fontsize=14)\n plt.grid(True)\n # save the png image into the corresponding dir \n plt.savefig(os.path.join('./results', experiment_name, 'test_latest', experiment_name + '.png'))\n plt.show()\n \n if model == 'cyclegan':\n epoch = data[1].str.replace(',', '').astype('int')\n iters = data[3].str.replace(',', '').astype('int') / 1928\n epoch = epoch + iters - 1\n\n D_A = data[9].astype('float32')\n G_A = data[11].astype('float32')\n cycle_A = data[13].astype('float32')\n idt_A = data[15].astype('float32')\n \n D_B = data[17].astype('float32')\n G_B = data[19].astype('float32')\n cycle_B = data[21].astype('float32')\n idt_B = data[23].astype('float32')\n \n plt.figure(figsize=(15,8))\n plt.xlabel('epoch', fontsize=18, labelpad=20)\n plt.ylabel('loss', fontsize=18, labelpad=20)\n plt.title(experiment_name + 'loss over time', fontsize=18, pad=30)\n plt.plot(epoch.values, D_A.values, 'C1', label='D_A')\n plt.plot(epoch.values, G_A.values, 'C2', label='G_A')\n plt.plot(epoch.values, cycle_A.values, 'C3', label='cycle_A')\n plt.plot(epoch.values, idt_A.values, 'C4', label='idt_A')\n plt.plot(epoch.values, D_B.values, 'C5', label='D_B')\n plt.plot(epoch.values, G_B.values, 'C6', label='G_B')\n plt.plot(epoch.values, cycle_B.values, 'C7', label='cycle_B')\n plt.plot(epoch.values, idt_B.values, 'C8', label='idt_B')\n plt.tick_params(labelsize=14)\n plt.legend(loc='best', fontsize=14)\n plt.grid(True)\n plt.savefig(os.path.join('./results', experiment_name, 'test_latest', experiment_name + '.png'))\n plt.show()\n\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DMhouping/DataScrapingGlassdoor | [
"8ffef55c98b986f4f9106109bbd386d3a1b54f84"
] | [
"SeleniumGlassdoor.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 10 10:11:34 2021\n\n@author: houping\n\"\"\"\n\nimport os\nimport re\nimport csv\nimport time\nimport json\nimport random\nimport requests\nimport pandas as pd\nimport numpy as np\n\nfrom datetime import date, timedelta, datetime\n\nfrom bs4 import BeautifulSoup\nimport selenium\nfrom selenium import webdriver\n\ndef sign_in():\n url = 'https://www.glassdoor.com/profile/login_input.htm'\n browser.get(url)\n\n email_field = browser.find_element_by_name('username')\n password_field = browser.find_element_by_name('password')\n submit_btn = browser.find_element_by_xpath('//button[@type=\"submit\"]')\n\n with open('credentials.json') as f:\n d = json.loads(f.read())\n username = d['glassdoor'][0]['username']\n password = d['glassdoor'][0]['password']\n\n email_field.send_keys(username)\n password_field.send_keys(password)\n submit_btn.click()\n\n time.sleep(1)\n\ndef scrape_overview(browser, save_path, overviewUrl):\n browser.get(overviewUrl)\n # overview section\n overview = {}\n reviews = browser.find_element_by_id('EIOverviewContainer')\n # overview = reviews.find_elements_by_class_name('align-items-center')\n \n #website\n try: \n overview['website'] = reviews.find_element_by_xpath(\".//a[@data-test='employer-website']\").text\n except Exception:\n pass\n # headquarters\n try:\n overview['headquarters'] = reviews.find_element_by_xpath(\".//div[@data-test='employer-headquarters']\").text\n except Exception:\n pass\n # size\n try:\n overview['size'] = reviews.find_element_by_xpath(\".//div[@data-test='employer-size']\").text\n except Exception:\n pass\n # funded\n try:\n overview['funded'] = reviews.find_element_by_xpath(\".//div[@data-test='employer-founded']\").text\n except Exception:\n pass\n # type\n try:\n overview['ctype'] = reviews.find_element_by_xpath(\".//div[@data-test='employer-type']\").text\n except Exception:\n pass\n # industry\n try:\n overview['industry'] = reviews.find_element_by_xpath(\".//div[@data-test='employer-industry']\").text\n except Exception:\n pass\n # revenue\n try:\n overview['revenue'] = reviews.find_element_by_xpath(\".//div[@data-test='employer-revenue']\").text\n except Exception:\n pass\n # description\n try:\n reviews.find_element_by_xpath(\".//span[@data-test='employerDescription']/button\").click()\n except Exception:\n pass\n try:\n overview['description'] = reviews.find_element_by_xpath(\".//span[@data-test='employerDescription']\").text\n except Exception:\n pass\n # affiliated companies\n # employerHierarchies = reviews.find_element_by_xpath(\"//div[@data-test='employerHierarchies']\")\n try:\n employerHierarchiesUrl = reviews.find_element_by_xpath(\"//div[@data-test='employerHierarchies']/a\").get_attribute('href')\n browser.get(employerHierarchiesUrl)\n affiliatedCompanies = browser.find_elements_by_xpath(\"//a[@data-test='companyInfositeLink']\")\n affiliatedAll = {}\n for i in range(len(affiliatedCompanies)):\n a = affiliatedCompanies[i]\n affiliated = {}\n affiliated['url'] = a.get_attribute('href')\n affiliated['companyName'] = a.find_element_by_xpath(\".//h3[@data-test='employerName']\").text\n affiliated['companyRating'] = a.find_element_by_xpath(\".//p[@data-test='companyRating']\").text\n affiliated['companyLocation'] = a.find_element_by_xpath(\".//p[@data-test='companyLocation']\").text\n affiliated['reviewsCount'] = a.find_element_by_xpath(\".//p[@data-test='reviewsCount']\").text\n affiliated['jobsCount'] = a.find_element_by_xpath(\".//p[@data-test='jobsCount']\").text\n affiliated['salariesCount'] = a.find_element_by_xpath(\".//p[@data-test='salariesCount']\").text\n \n affiliatedAll[i] = affiliated\n overview['affiliated'] = affiliatedAll\n except Exception:\n pass\n \n with open(os.path.join(save_path,'overview.json'), 'w') as outfile:\n json.dump(overview, outfile, indent=4)\n \n\n \n\ndef scrape_review(reviewsUrl,browser,save_path):\n def more_pages():\n time.sleep(random.uniform(3,5))\n try:\n footer = browser.find_element_by_class_name('paginationFooter').text\n a = footer.split(' ')\n if (int(a[3]) > int(a[5].replace(\",\", \"\").split('.00')[0]) - 9):\n return False\n else:\n return True\n except Exception:\n return False\n \n def last_page():\n try:\n footer = browser.find_element_by_class_name('paginationFooter').text\n a = footer.split(' ')\n if (int(a[3]) >= int(a[5].replace(\",\", \"\").split('.00')[0])):\n return True\n else:\n return False\n except Exception:\n return True\n \n def scrape_date(review):\n try:\n date = review.find_element_by_tag_name('time').get_attribute('datetime')\n except Exception:\n date = np.nan\n return date\n \n def scrape_helpful(review):\n try:\n s = review.find_element_by_class_name('helpfulReviews').text.strip('\"\"')\n res = s[s.find(\"(\") + 1:s.find(\")\")]\n except Exception:\n res = 0\n return res\n \n def scrape_review_title(review):\n try:\n title = review.find_element_by_xpath(\".//a[@class='reviewLink']\").text\n except Exception:\n title = np.nan\n return title\n \n def scrape_rating(review):\n subRating = {}\n try:\n subRating['Overall'] = review.find_element_by_xpath(\".//span[@class='rating']/span\").get_attribute('title')\n except Exception:\n pass\n try:\n subratingSection = review.find_elements_by_xpath(\".//ul[@class='undecorated']/li\")\n for sub in subratingSection:\n rating = sub.find_element_by_class_name('gdBars').get_attribute('title')\n name = sub.find_element_by_tag_name('div').get_attribute(\"textContent\")\n subRating[name] = rating\n except Exception:\n pass\n return subRating\n \n def scrape_employee_status_title(review):\n try:\n employee = review.find_element_by_class_name('authorJobTitle').text\n title = employee.split('-')[1].strip()\n status = employee.split('-')[0].strip()\n except Exception:\n title = np.nan\n status = np.nan\n return status,title\n \n def scrape_location(review):\n try:\n location = review.find_element_by_class_name('authorLocation').text\n except Exception:\n location = np.nan\n return location\n \n def scrape_recommendation(review):\n recommendation = {}\n try:\n recommendationList = review.find_elements_by_xpath(\".//div[@class='row reviewBodyCell recommends']/div\")\n for rr in recommendationList:\n text = rr.text\n if 'Recommend' in text:\n recommendation['Recommend']=text\n elif 'Outlook' in text:\n recommendation['Outlook'] = text\n elif 'CEO' in text:\n recommendation['CEO'] = text\n else:\n pass\n except Exception:\n pass\n return recommendation\n \n def scrape_employee_years(review):\n try:\n years = review.find_element_by_class_name('mainText').text.strip('\"')\n except Exception:\n years = np.nan\n return years\n \n def scrape_pros_cons_advice(review):\n try:\n expand = review.find_element_by_xpath(\".//div[contains(text(),'Continue reading')]\").click()\n except Exception:\n pass\n try:\n pros = review.find_element_by_xpath(\".//span[@data-test='pros']\").text\n except Exception:\n pros = np.nan\n try:\n cons = review.find_element_by_xpath(\".//span[@data-test='cons']\").text\n except Exception:\n cons = np.nan\n try:\n advice = review.find_element_by_xpath(\".//span[@data-test='advice-management']\").text\n except Exception:\n advice = np.nan\n return pros,cons,advice\n n = np.nan\n try:\n con = review.find_element_by_class_name('common__EiReviewDetailsStyle__socialHelpfulcontainer').text\n if 'person' in con:\n n = int(con.split(' ')[0])\n except Exception:\n pass\n return n\n \n def scrape_rating_broder(review):\n subRating = {}\n try:\n subRating['Overall'] = review.find_element_by_class_name('ratingNumber').text\n except Exception:\n pass\n try:\n subratingSection = review.find_elements_by_xpath(\".//div[@class='content']//li\")\n for sub in subratingSection:\n name = sub.find_element_by_xpath(\"div\").get_attribute(\"textContent\")\n subRating[name] = ratingReference[sub.find_element_by_xpath(\"div/following-sibling::div\").get_attribute('class')]\n except Exception:\n pass\n return subRating\n \n def scrape_employee_status_experience_broder(review):\n try:\n se = review.find_element_by_xpath(\"div/div/div/div/span\").text.split(',')\n status = se[0].strip()\n experience = se[1].strip()\n except Exception:\n status = np.nan\n experience = np.nan\n return status,experience\n \n def scrape_review_title_broder(review):\n try:\n title = review.find_element_by_xpath(\".//a[@class='reviewLink']\").text\n except Exception:\n title = np.nan\n return title\n \n def scrape_date_employee_title_broder(review):\n try:\n info = review.find_element_by_class_name('authorInfo').text.split('-')\n date = info[0].strip()\n title = info[1].strip()\n except Exception:\n date = np.nan\n title = np.nan\n return date,title\n \n def scrape_location_broder(review):\n try:\n location = review.find_element_by_class_name('authorLocation').text\n except Exception:\n location = np.nan\n return location\n \n def scrape_recommendation_broder(review):\n rec = {}\n try:\n recList = review.find_element_by_class_name('recommends').find_elements_by_xpath(\"div\")\n for r in recList:\n score = recomList[r.find_element_by_xpath('span').get_attribute('class')]\n name = r.find_element_by_xpath('span/following-sibling::span').text.strip()\n rec[name] = score\n except Exception:\n pass\n return rec\n \n def scrape_pros_cons_advice_broder(review):\n try:\n expand = review.find_element_by_xpath(\".//div[contains(text(),'Continue reading')]\").click()\n except Exception:\n pass\n try:\n pros = review.find_element_by_xpath(\".//span[@data-test='pros']\").text\n except Exception:\n pros = np.nan\n try:\n cons = review.find_element_by_xpath(\".//span[@data-test='cons']\").text\n except Exception:\n cons = np.nan\n try:\n advice = review.find_element_by_xpath(\".//span[@data-test='advice-management']\").text\n except Exception:\n advice = np.nan\n return pros,cons,advice\n \n def scrape_helpful_broder(review):\n n = 0\n try:\n con = review.find_element_by_class_name('common__EiReviewDetailsStyle__socialHelpfulcontainer').text\n if 'person' in con:\n n = int(con.split(' ')[0])\n except Exception:\n pass\n return n\n \n def scrape_review_one_page(reviews,j,reviewsAll):\n for i in range(len(reviews)):\n r = reviews[i]\n review = {}\n if 'noBorder' not in r.get_attribute('class'):\n review['date'] = scrape_date(r)\n review['helpful'] = scrape_helpful(r)\n review['review_title'] = scrape_review_title(r)\n review['rating'] = scrape_rating(r)\n review['employee_status'],review['employee_title'] = scrape_employee_status_title(r)\n review['location'] = scrape_location(r)\n review['recommendation'] = scrape_recommendation(r)\n review['pro'],review['con'],review['advice'] = scrape_pros_cons_advice(r)\n else:\n review['rating'] = scrape_rating_broder(r)\n review['status'],review['experience'] = scrape_employee_status_experience_broder(r)\n review['review_title'] = scrape_review_title_broder(r)\n review['date'],review['employee_title'] = scrape_date_employee_title_broder(r)\n review['location'] = scrape_location_broder(r)\n review['recommendation'] = scrape_recommendation_broder(r)\n review['pro'],review['con'],review['advice'] = scrape_pros_cons_advice_broder(r)\n review['helpful'] = scrape_helpful_broder(r)\n \n reviewsAll[i+(j-1)*10] = review\n \n return reviewsAll\n \n reviewsAll = {}\n noFinished = True\n j = 1\n \n browser.get(reviewsUrl)\n \n while noFinished:\n if j % 50 == 0:\n print(len(reviewsAll))\n with open(os.path.join(save_path,'reviews_'+str(j)+'.json'), 'w') as outfile:\n json.dump(reviewsAll, outfile, indent=4)\n reviewsAll={}\n time.sleep(random.uniform(5,10))\n else:\n time.sleep(random.uniform(1,2))\n reviews = browser.find_elements_by_class_name('empReview')\n reviewsAll = scrape_review_one_page(reviews,j,reviewsAll)\n \n j += 1\n nextPage = reviewsUrl.split('.htm')[0]+ '_P'+str(j)+'.htm?filter.iso3Language=eng'\n browser.get(nextPage)\n \n time.sleep(random.uniform(2,3))\n \n if last_page():\n noFinished = False\n reviews = browser.find_elements_by_class_name('empReview')\n reviewsAll = scrape_review_one_page(reviews,j,reviewsAll)\n with open(os.path.join(save_path,'reviews_'+str(j)+'.json'), 'w') as outfile:\n json.dump(reviewsAll, outfile, indent=4)\n break\n \n\nif __name__ == '__main__':\n ratingReference = {}\n ratingReference['css-152xdkl'] = 1\n ratingReference['css-19o85uz'] = 2\n ratingReference['css-1ihykkv'] = 3\n ratingReference['css-1c07csa'] = 4\n ratingReference['css-1dc0bv4'] = 5\n recomList = {}\n recomList['SVGInline css-10xv9lv d-flex'] = np.nan\n recomList['SVGInline css-hcqxoa d-flex'] = 1\n recomList['SVGInline css-1kiw93k d-flex'] = -1\n recomList['SVGInline css-1h93d4v d-flex'] = 0\n # browser = webdriver.Chrome()\n browser = webdriver.Chrome(executable_path = 'E:\\Houping\\glassdoor\\chromedriver_win32\\chromedriver')\n sign_in()\n \n allURL = pd.read_csv('E:\\Houping\\glassdoor\\data\\company_url_updated3.csv')\n done_list = pd.read_csv('E:\\Houping\\glassdoor\\data\\done_list.csv')\n cik = done_list.cik.tolist()\n # url = 'https://www.glassdoor.com/Overview/Working-at-ADC-Telecommunications-EI_IE1075.11,33.htm'\n # url = 'https://www.glassdoor.com/Overview/Working-at-American-Airlines-EI_IE8.11,28.htm'\n allURL = allURL[116:]\n for index,rows in allURL.iterrows():\n if rows['cik'] not in cik:\n url = rows['url']\n name = rows['conm']\n print(url)\n browser.get(url)\n time.sleep(random.uniform(1.5,2))\n path = 'E:\\Houping\\glassdoor\\data'\n # name = 'AAL'\n save_path = os.path.join(path,name)\n try:\n os.stat(save_path)\n except:\n os.mkdir(save_path)\n \n try:\n overviewUrl = browser.find_element_by_xpath(\"//a[@data-label='Overview']\").get_attribute('href')\n except Exception:\n overviewUrl = browser.find_element_by_xpath(\"//a[@data-selector='orgStructureCompanyOverviewOption']\").get_attribute('href')\n \n scrape_overview(browser, save_path, overviewUrl)\n \n reviewsUrl = browser.find_element_by_xpath(\"//a[@data-label='Reviews']\").get_attribute('href')\n \n scrape_review(reviewsUrl,browser,save_path)\n \n cik.append(rows['cik'])\n\n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
MarinkoBa/Hate-Speech-Classification | [
"72f6bbe93b823daefa138df4f81a3a4df5b34c4c"
] | [
"src/utils/test_map.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn import metrics\nimport pandas as pd\n\ndef test_map(y_pred,y_test):\n \"\"\"\n Get accurracy, precision and recall and save png of true positive,\n false positive, true negative and false negative for given prediction and test data \n\n Parameters\n ----------\n y_pred: Binary array\n array containing preddiction for test data for a model\n y_test:\t\t Pandas dataframe\n \tThe dataframe containing the test labels. \n \"\"\"\n \n # get confusion matrix containig predicted and true labels\n cnf_matrix=metrics.confusion_matrix(y_test,y_pred)\n \n # set up plot\n class_names=[0,1]\n fig, ax = plt.subplots()\n tick_marks=np.arange(len(class_names))\n plt.xticks(tick_marks,class_names)\n plt.yticks(tick_marks,class_names)\n \n # set up heatmap and labels\n sns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap=\"YlGnBu\",fmt=\"g\")\n ax.xaxis.set_label_position(\"top\")\n plt.tight_layout()\n plt.title(\"Matrix\",y=1.1)\n plt.ylabel(\"actual label\")\n plt.xlabel(\"predicted label\")\n \n # save plot\n plt.savefig(\"mygraph.png\")\n \n # print acurracy, precision and recall\n \n print(\"Accuracy:\",metrics.accuracy_score(y_test, y_pred))\n print(\"Precision:\",metrics.precision_score(y_test, y_pred))\n print(\"Recall:\",metrics.recall_score(y_test, y_pred))\n \n \n\n\n"
] | [
[
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.precision_score",
"matplotlib.pyplot.subplots",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"sklearn.metrics.recall_score",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
monthie/cogmods | [
"62af4b8bf2effb77f26a8877d6a89949164d83f0",
"62af4b8bf2effb77f26a8877d6a89949164d83f0",
"62af4b8bf2effb77f26a8877d6a89949164d83f0",
"62af4b8bf2effb77f26a8877d6a89949164d83f0"
] | [
"moral/student_projects/karkkainen_2020/models/ml/mlp/modelJudgement.py",
"relational/student_projects/2020_karkkainen/models/Baseline/Most-Frequent-Answer/model3ps.py",
"relational/student_projects/2020_karkkainen/models/ml/lstm/model4ps.py",
"fake_news/models/Heuristic/hrlinear.py"
] | [
"# A neural network -based model for predicting moral reasoning. \n\n\n\nimport time\n\nimport collections\n\nimport numpy as np\n\nimport ccobra\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.transforms as transforms\n\n\n\n\n\nclass MLP(nn.Module):\n def __init__(self, input_size=15, hidden_size=256, output_size=1):\n super(MLP, self).__init__()\n\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size)\n self.relu = torch.nn.ReLU()\n self.fc2 = torch.nn.Linear(self.hidden_size, self.output_size)\n self.sigmoid = torch.nn.Sigmoid()\n \n def forward(self, x):\n \n hidden = self.fc1(x)\n relu1 = self.relu(hidden)\n output = self.fc2(relu1)\n output = self.sigmoid(output)\n return output\n\n# Input mapping for the different dilemmas\ndilemma_mppng = {\"Switch\": [1,-5,5,-1,0,0], \"Loop\":[1,-5,5,-1,1,0], \"Footbridge\":[1,-5,5,-1,1,1]}\n\ngender_mppng = {\"Men\":[0], \"Women\":[1]}\n\ncontinent_mppng = {\"Americas\":[1,0,0,0], \"Asia\":[0,1,0,0], \"Europe\":[0,0,1,0], \"Oc.\":[0,0,0,1]}\n\neducation_mppng = {\"No College\":[0], \"College\":[1]}\n\ndef create_input(data):\n auxiliary = data['aux']\n\n dilemma = dilemma_mppng[data['task'][0][0]]\n gender = gender_mppng[auxiliary['survey.gender']]\n cont = continent_mppng[auxiliary['Continent']]\n edu = education_mppng[auxiliary['survey.education']]\n age = [auxiliary['survey.age']/100]\n politics = [auxiliary['survey.political']]\n religious = [auxiliary['survey.religious']]\n\n\n return (dilemma + gender + cont + edu + age + politics + religious)\n\nclass MLPModel(ccobra.CCobraModel):\n def __init__(self, name='MLP', k=1):\n super(MLPModel, self).__init__(name, [\"moral\"], [\"single-choice\"])\n\n self.net = MLP()\n\n\n self.n_epochs = 50\n\n self.optimizer = optim.SGD(self.net.parameters(), lr= 0.0001)\n self.loss = nn.BCELoss()\n\n def pre_train(self, dataset):\n torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n \n x = []\n y = []\n\n for subj_train_data in dataset:\n subj_x = []\n subj_y = []\n for seq_train_data in subj_train_data:\n \n seq_train_data['task'] = seq_train_data['item'].task\n inp = create_input(seq_train_data)\n\n target = float(seq_train_data['response'])\n\n subj_x.append(inp)\n\n subj_y.append(target)\n\n x.append(subj_x)\n y.append(subj_y)\n x = np.array(x)\n y = np.array(y)\n\n\n self.train_x = torch.from_numpy(x).float()\n self.train_y = torch.from_numpy(y).float()\n\n\n self.train_network(self.train_x, self.train_y, self.n_epochs, verbose=True)\n\n\n\n def train_network(self, train_x, train_y, n_epochs, verbose=False):\n print('Starting training...')\n for epoch in range(self.n_epochs):\n start_time = time.time()\n\n # Shuffle the training data\n perm_idxs = np.random.permutation(np.arange(len(train_x)))\n train_x = train_x[perm_idxs]\n train_y = train_y[perm_idxs]\n\n\n losses = []\n for idx in range(len(train_x)):\n cur_x = train_x[idx]\n cur_y = train_y[idx]\n\n inp = cur_x.view(1,-1,15)\n\n outputs = self.net(inp)\n\n\n loss = self.loss(outputs, cur_y)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n losses.append(loss.item())\n\n print('Epoch {}/{} ({:.2f}s): {:.4f} ({:.4f})'.format(\n epoch + 1, n_epochs, time.time() - start_time, np.mean(losses), np.std(losses)))\n\n\n accs = []\n for subj_idx in range(len(self.train_x)):\n pred = torch.round(self.net(self.train_x[subj_idx]))\n\n truth = self.train_y[subj_idx]\n\n acc = torch.mean((pred == truth).float()).item()\n accs.append(acc)\n\n print(' acc mean: {:.2f}'.format(np.mean(accs)))\n print(' acc std : {:.2f}'.format(np.std(accs)))\n\n\n self.net.eval()\n\n\n \n def predict(self, item, **kwargs):\n input = {'task': item.task}\n input['aux'] = kwargs\n x = torch.FloatTensor(create_input(input))\n output = self.net(x.view(1, 1, -1))\n\n label = np.round(output.detach().numpy())\n\n self.prediction = label[0][0]\n return int(self.prediction)\n\n\n\n",
"\"\"\" Implements a most frequent answer model.\n\n\"\"\"\n\nimport numpy as np\nimport ccobra\nimport pandas as pd\n\n\ndef createDict():\n data = pd.read_csv('3ps.csv')\n keys = data['Task-ID'].tolist()\n values = data['most_frequent_response'].tolist()\n return dict(zip(keys, values))\n\n\n\nclass MostFreqModel(ccobra.CCobraModel):\n \"\"\" Model producing the most frequent answer as a response.\n\n \"\"\"\n\n def __init__(self, name='MostFrequentAnswer'):\n \"\"\" Initializes the random model.\n\n Parameters\n ----------\n name : str\n Unique name of the model. Will be used throughout the CCOBRA\n framework as a means for identifying the model.\n\n \"\"\"\n\n self.answers = createDict()\n\n super(MostFreqModel, self).__init__(\n name, [\"spatial-relational\"], [\"verify\", \"single-choice\"])\n\n def predict(self, item, **kwargs):\n \"\"\" Predicts the most frequent answer for the given task ID \"\"\"\n\n return self.answers[kwargs['Task-ID']]\n",
"# RNN-model for predicting relational reasoning. \n# The model uses mode advanced LSTM-cells, which are able to preserve long-term dependencies in the data.\n\nimport time\n\nimport collections\n\nimport numpy as np\nfrom pandas.core.common import flatten\nimport ccobra\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.transforms as transforms\n\n\n\n\n\nclass RNN(nn.Module):\n def __init__(self, input_size=16, hidden_size=32, output_size=2):\n super(RNN, self).__init__()\n\n self.lstm = nn.LSTM(\n input_size=input_size,\n hidden_size=hidden_size,\n num_layers=2,\n dropout=0.2)\n\n self.h2o = nn.Linear(hidden_size, output_size)\n\n\n def forward(self, input, hidden):\n output, hidden = self.lstm(input, hidden)\n\n output = self.h2o(output)\n\n return output, hidden\n\n\nobjct_mpping = { \"A\": 0,\n \"B\":1,\n \"C\": 2,\n \"D\": 3, \n }\n\noutput_mpping = {1: True, 0: False}\n\ndef getValues(rel):\n if rel == 'Left':\n return [-1.0, 1.0]\n else:\n return [1.0, -1.0]\n\n\ndef encode(task):\n result = []\n for i in task:\n premise = [0] * 4\n val = getValues(i[0]) \n premise[objct_mpping[i[1]]] = val[0]\n premise[objct_mpping[i[2]]] = val[1]\n result.append(premise)\n return result\n\ndef getTarget(targ):\n if targ:\n return [0,1]\n else: \n return [1,0]\n\nclass LSTMModel(ccobra.CCobraModel):\n def __init__(self, name='LSTM', k=1):\n super(LSTMModel, self).__init__(name, [\"spatial-relational\"], [\"verify\"])\n\n self.net = RNN()\n self.hidden = None\n\n\n self.n_epochs = 50\n \n\n self.optimizer = optim.Adam(self.net.parameters())\n self.loss = nn.CrossEntropyLoss()\n\n def pre_train(self, dataset):\n torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n \n x = []\n y = []\n\n for subj_train_data in dataset:\n\n subj_x = []\n subj_y = []\n for seq_train_data in subj_train_data:\n task = seq_train_data['item'].task\n\n premises = encode(task)\n\n choices = encode(seq_train_data['item'].choices[0])\n\n inp = list(flatten(premises)) + list(flatten(choices))\n\n target = getTarget(seq_train_data['response'])\n subj_x.append(inp)\n\n subj_y.append(target)\n \n\n x.append(subj_x)\n y.append(subj_y)\n\n \n\n\n # Delete incomplete rows:\n x = list(filter(lambda i: len(i) == 48, x))\n y = list(filter(lambda i: len(i) == 48, y))\n\n\n x = np.array(x)\n y = np.array(y)\n self.train_x = torch.from_numpy(x).float()\n self.train_y = torch.from_numpy(y).float()\n\n\n\n self.train_network(self.train_x, self.train_y, self.n_epochs, verbose=True)\n\n\n\n def train_network(self, train_x, train_y, n_epochs, verbose=False):\n if verbose:\n print('Starting training...')\n \n for epoch in range(self.n_epochs):\n start_time = time.time()\n\n # Shuffle the training data\n perm_idxs = np.random.permutation(np.arange(len(train_x)))\n train_x = train_x[perm_idxs]\n train_y = train_y[perm_idxs]\n\n\n\n losses = []\n for idx in range(len(train_x)):\n cur_x = train_x[idx]\n cur_y = train_y[idx]\n\n\n inp = cur_x.view(-1, 1, 16)\n \n outputs, _ = self.net(inp, None)\n loss = self.loss(outputs.view(-1,2), cur_y.argmax(1))\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n losses.append(loss.item())\n\n if verbose:\n print('Epochs {}/{} ({:.2f}s): {:.4f} ({:.4f})'.format(\n epoch + 1, n_epochs, time.time() - start_time, np.mean(losses), np.std(losses)))\n\n accs = []\n for subj_idx in range(len(self.train_x)):\n pred, _ = self.net(self.train_x[subj_idx].view(-1,1,16), None)\n pred = pred.view(-1,2).argmax(1)\n\n truth = self.train_y[subj_idx].argmax(1)\n\n\n\n acc = torch.mean((pred == truth).float()).item()\n accs.append(acc)\n\n print(' acc mean: {:.2f}'.format(np.mean(accs)))\n print(' acc std : {:.2f}'.format(np.std(accs)))\n\n\n self.net.eval()\n\n\n # Turns the prediction into an statement according if the given conclusion is perceived true or false. \n def predict(self, item, **kwargs):\n task = item.task\n premises = encode(task)\n choices = encode(item.choices[0])\n x = torch.FloatTensor(list(flatten(premises)) + list(flatten(choices)))\n output, self.hidden = self.net(x.view(1, 1, -1), self.hidden)\n\n\n label = np.argmax(output.detach().numpy()[0][0])\n\n self.prediction = output_mpping[label]\n return self.prediction\n\n",
"#adjust import structure if started as script\nimport os\nimport sys\nPACKAGE_PARENT = '..'\nSCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))\nsys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))\n\n\n\"\"\" \nNews Item Processing model implementation.\n\"\"\"\nimport ccobra\nfrom random import random\nimport math\nfrom scipy.optimize._basinhopping import basinhopping\nfrom numpy import mean\nimport numpy as np\nfrom LinearCombination.optimizationParameters import OptPars\n\n\nclass RHlinear(ccobra.CCobraModel):\n \"\"\" News reasoning CCOBRA implementation.\n \"\"\"\n def __init__(self, name='Heuristic-Recognition-linear', commands = []):\n \"\"\" Initializes the news reasoning model.\n Parameters\n ----------\n name : str\n Unique name of the model. Will be used throughout the ORCA\n framework as a means for identifying the model.\n \"\"\"\n self.parameter = {}\n self.parameter['kappa'] = 1\n self.parameter['alpha'] = 1\n #dictionary for testing with value from rough optimization on Experiment 1\n optdict = {'kappa': -5.192396551875893, 'alpha': 2.2913602334440673}\n for a in optdict.keys():\n self.parameter[a] = optdict[a]\n super().__init__(name, ['misinformation'], ['single-choice'])\n\n def predictS(self, item, **kwargs):\n if len(kwargs.keys()) == 1:\n kwargs = kwargs['kwargs']\n for a in ['Familiarity_Democrats_Combined', 'Familiarity_Republicans_Combined']:\n if kwargs['conservatism'] >= 3.5:\n if 'Republicans' in a:\n kwargs[a.replace('Republicans', 'Party')] = kwargs[a]\n elif kwargs['conservatism'] <= 3.5:\n if 'Democrats' in a:\n kwargs[a.replace('Democrats', 'Party')] = kwargs[a]\n return kwargs['Familiarity_Party_Combined'] * self.parameter['alpha'] + self.parameter['kappa']\n\n\n def adapt(self, item, target, **kwargs):\n pass\n \n def predict(self, item, **kwargs):\n return 'Accept' if random() < self.predictS(item, **kwargs) else 'Reject'\n\n def toCommandList(self,pars):\n optCommands = []\n i = 0\n parKeys = sorted(self.parameter.keys())\n for a in parKeys:\n if len(pars)<=i: \n print('keys length error', self.name)\n break\n optCommands.append('self.parameter[\\'' + a + '\\'] = ' + str(pars[i]))\n i += 1\n return optCommands\n \n def executeCommands(self, commands):\n for command in commands:\n exec(command)\n\n def pre_train_person(self, dataset):\n #Optimpizing paramaters per person \n trialList = []\n if len(dataset) == 0:\n return\n for pers in dataset:\n trialList.extend([pers])\n if len(self.parameter.keys()) > 0:\n with np.errstate(divide='ignore'):\n personOptimum = basinhopping(self.itemsOnePersonThisModelPeformance, [1]*len(self.parameter.keys()), niter=OptPars.iterations, stepsize=3, T=4, minimizer_kwargs={\"args\" : (trialList)})\n optpars = personOptimum.x\n else: \n optpars = [] \n self.executeCommands(self.toCommandList(optpars))\n\n def itemsOnePersonThisModelPeformance(self, pars, items):\n #input: list of items\n items = [a for a in items]\n performanceOfPerson = []\n self.executeCommands(self.toCommandList(pars))\n for item in items:\n pred = min(1.0,max(self.predictS(item=item['item'], kwargs= item['aux']),0.0)) \n if item['aux']['binaryResponse']:\n predictionPerf = min(1.0,max(self.predictS(item=item['item'], kwargs=item['aux']),0.0)) \n elif not item['aux']['binaryResponse']:\n predictionPerf = 1.0 - pred\n else:\n print('Error')\n performanceOfPerson.append(predictionPerf)\n return -1*mean(performanceOfPerson) \n\n\n"
] | [
[
"torch.from_numpy",
"torch.nn.BCELoss",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"numpy.std",
"numpy.mean",
"torch.cuda.is_available",
"torch.nn.ReLU",
"numpy.array"
],
[
"pandas.read_csv"
],
[
"pandas.core.common.flatten",
"torch.nn.CrossEntropyLoss",
"torch.nn.LSTM",
"torch.from_numpy",
"torch.nn.Linear",
"numpy.std",
"numpy.mean",
"torch.cuda.is_available",
"numpy.array"
],
[
"numpy.errstate",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
linhuifj/kaggle-kuzushiji-recognition | [
"dc77389bbec5d66ed8a35242a2ca37bb683b4b04",
"dc77389bbec5d66ed8a35242a2ca37bb683b4b04"
] | [
"line_rec/dsutils/real_image2lmdb_txt.py",
"line_rec/utils.py"
] | [
"import os, sys\nimport os.path as osp\nfrom PIL import Image\nimport six\nimport string\n\nimport lmdb\nimport pickle\n\nimport torch\nimport torch.utils.data as data\nfrom torch.utils.data import DataLoader\nfrom torchvision.transforms import transforms\nfrom torchvision.datasets import ImageFolder\nfrom torchvision import transforms, datasets\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass RealImageTxtLMDB(data.Dataset):\n def __init__(self, db_path, transform=None, transform2 = None, testBooks=[], isTest = False, max_batch_length = 40):\n self.db_path = db_path\n self.env = lmdb.open(db_path, subdir=osp.isdir(db_path),\n readonly=True, lock=False,\n readahead=False, meminit=False)\n self.testBooks = testBooks \n with self.env.begin(write=False) as txn:\n self.length = pickle.loads(txn.get(b'__len__'))\n self.keys = pickle.loads(txn.get(b'__keys__'))\n keys2 = []\n for l in self.keys:\n l = l.strip()\n if isTest:\n if not self.checkIsTest(l):\n continue\n else:\n if self.checkIsTest(l):\n continue\n keys2.append(l.strip())\n self.keys = keys2\n self.length = len(keys2)\n #min(len(keys2), self.length)\n \n self.max_batch_length = max_batch_length\n self.transform = transform\n self.transform2 = transform2 \n \n def checkIsTest(self, fname):\n for l in self.testBooks:\n if l in fname:\n return True\n return False\n\n def __getitem__(self, index):\n img, label = None, None\n env = self.env\n while True:\n with env.begin(write=False) as txn:\n byteflow = txn.get(self.keys[index].encode('utf-8'))\n unpacked = pickle.loads(byteflow)\n # load image\n img = unpacked[0]\n label = unpacked[1]\n if len(label) <= self.max_batch_length:\n break\n index = (index + 1) % self.length\n \n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n if self.transform2 is not None:\n augmented = self.transform2(image=img)\n img = augmented['image']\n\n if self.transform is not None:\n img = self.transform(img)\n \n return img,label, None\n\n def __len__(self):\n return self.length\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' + self.db_path + ')'\n\ndef collate_fn(batch):\n batch.sort(key=lambda x: len(x[1]), reverse=True)\n img, label = zip(*batch)\n pad_label = []\n lens = []\n max_len = len(label[0])\n for i in range(len(label)):\n temp_label = [4800] * max_len\n temp_label[:len(label[i])] = label[i]\n pad_label.append(temp_label)\n lens.append(len(label[i]))\n return torch.stack(img), torch.tensor(pad_label), torch.tensor(lens)\n\ndef folder2lmdb(img_label_list, root = 'data/train/', write_frequency=50):\n imgs = []\n labels = []\n keys = []\n for l in open(img_label_list, 'r').readlines():\n l = l.strip().split(' ')\n labels.append(l[1])\n keys.append(l[0])\n imgpath = l[0]\n imgs.append(imgpath)\n\n lmdb_path = \"train_lines_txt_data.lmdb\"\n print(\"Generate LMDB to %s\" % lmdb_path)\n isdir = os.path.isdir(lmdb_path)\n\n db = lmdb.open(lmdb_path, subdir=isdir,\n map_size=1099511627776 * 2, readonly=False,\n meminit=False, map_async=True)\n\n txn = db.begin(write=True)\n for idx in range(len(imgs)):\n imgpath = imgs[idx]\n label = labels[idx]\n #read data\n img = cv2.imread(imgpath, 1)\n# print (imgpath)\n# print (imgpath, img.shape, file = sys.stderr)\n height = img.shape[0] # keep original height\n width = img.shape[1]\n width = width * 64 / height\n dim = (int(width), 64)\n img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\n txn.put(keys[idx].encode('utf-8'), pickle.dumps((img,label), pickle.HIGHEST_PROTOCOL))\n if idx % write_frequency == 0 and idx > 0:\n print(\"[%d/%d]\" % (idx, len(labels)))\n txn.commit()\n txn = db.begin(write=True)\n \n # finish iterating through dataset\n txn.commit()\n with db.begin(write=True) as txn:\n txn.put(b'__keys__', pickle.dumps(keys))\n txn.put(b'__len__', pickle.dumps(len(imgs)))\n\n print(\"Flushing database ...\")\n db.sync()\n db.close()\n\n\nif __name__ == \"__main__\":\n make_data_flag = True\n if make_data_flag:\n folder2lmdb(sys.argv[1])\n else:\n from distort_aug import DistortAug\n from brightness_aug import TenBrightAug, IncBrightAug, ColorAug, GrayImg, BinImg\n from resize_aug import ResizeAug \n #test here\n tbaug = TenBrightAug()\n incbaug = IncBrightAug()\n colaug = ColorAug()\n distortaug = DistortAug()\n grayimg = GrayImg()\n binimg = BinImg()\n resizeimg = ResizeAug(800,32)\n \n tf = transforms.Compose([distortaug,\n colaug,\n tbaug,\n incbaug,\n grayimg,\n binimg,\n tbaug,\n incbaug,\n resizeimg\n ]\n )\n \n dataset = RealImageTxtLMDB('train_lines_txt_data.lmdb', tf)\n \n data_loader = DataLoader(dataset, num_workers=2, shuffle=False, batch_size=1, collate_fn = lambda x:x)\n for idx, data in enumerate(data_loader):\n print (data[0][1])\n plt.subplot(11,1,idx+1)\n plt.imshow(data[0][0])\n if idx >=10:\n break\n plt.show()\n plt.savefig('/home/linhui/aug.jpg')\n \n# plt.imshow(data[0])\n",
"import torch\n#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\nclass CTCLabelConverter(object):\n \"\"\" Convert between text-label and text-index \"\"\"\n\n def __init__(self, character, opt):\n # character (str): set of the possible characters.\n dict_character = list(character)\n\n self.dict = {}\n for i, char in enumerate(dict_character):\n # NOTE: 0 is reserved for 'blank' token required by CTCLoss\n self.dict[char] = i + 1\n\n self.character = ['[blank]'] + dict_character # dummy '[blank]' token for CTCLoss (index 0)\n self.opt = opt\n\n \n def encode(self, text, batch_max_length=25):\n \"\"\"convert text-label into text-index.\n input:\n text: text labels of each image. [batch_size]\n\n output:\n text: concatenated text index for CTCLoss.\n [sum(text_lengths)] = [text_index_0 + text_index_1 + ... + text_index_(n - 1)]\n length: length of each text. [batch_size]\n \"\"\"\n length = [len(s) for s in text]\n text = ''.join(text)\n text = [self.dict[char] for char in text]\n\n return (torch.IntTensor(text).to(self.opt.device), torch.IntTensor(length).to(self.opt.device))\n\n def decode(self, text_index, length):\n \"\"\" convert text-index into text-label. \"\"\"\n texts = []\n index = 0\n for l in length:\n t = text_index[index:index + l]\n\n char_list = []\n for i in range(l):\n if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])): # removing repeated characters and blank.\n char_list.append(self.character[t[i]])\n text = ''.join(char_list)\n\n texts.append(text)\n index += l\n return texts\n\n\nclass AttnLabelConverter(object):\n \"\"\" Convert between text-label and text-index \"\"\"\n\n def __init__(self, character, opt):\n # character (str): set of the possible characters.\n # [GO] for the start token of the attention decoder. [s] for end-of-sentence token.\n list_token = ['[GO]', '[s]'] # ['[s]','[UNK]','[PAD]','[GO]']\n list_character = list(character)\n self.character = list_token + list_character\n\n self.dict = {}\n for i, char in enumerate(self.character):\n # print(i, char)\n self.dict[char] = i\n self.opt = opt\n\n def encode(self, text, batch_max_length=25):\n \"\"\" convert text-label into text-index.\n input:\n text: text labels of each image. [batch_size]\n batch_max_length: max length of text label in the batch. 25 by default\n\n output:\n text : the input of attention decoder. [batch_size x (max_length+2)] +1 for [GO] token and +1 for [s] token.\n text[:, 0] is [GO] token and text is padded with [GO] token after [s] token.\n length : the length of output of attention decoder, which count [s] token also. [3, 7, ....] [batch_size]\n \"\"\"\n length = [len(s) + 1 for s in text] # +1 for [s] at end of sentence.\n # batch_max_length = max(length) # this is not allowed for multi-gpu setting\n batch_max_length += 1\n # additional +1 for [GO] at first step. batch_text is padded with [GO] token after [s] token.\n batch_text = torch.LongTensor(len(text), batch_max_length + 1).fill_(0)\n for i, t in enumerate(text):\n text = list(t)\n text.append('[s]')\n text = [self.dict[char] for char in text]\n batch_text[i][1:1 + len(text)] = torch.LongTensor(text) # batch_text[:, 0] = [GO] token\n return (batch_text.to(self.opt.device), torch.IntTensor(length).to(self.opt.device))\n\n def decode(self, text_index, length):\n \"\"\" convert text-index into text-label. \"\"\"\n texts = []\n for index, l in enumerate(length):\n text = ''.join([self.character[i] for i in text_index[index, :]])\n texts.append(text)\n return texts\n\n\nclass Averager(object):\n \"\"\"Compute average for torch.Tensor, used for loss average.\"\"\"\n\n def __init__(self):\n self.reset()\n\n def add(self, v):\n count = v.data.numel()\n v = v.data.sum()\n self.n_count += count\n self.sum += v\n\n def reset(self):\n self.n_count = 0\n self.sum = 0\n\n def val(self):\n res = 0\n if self.n_count != 0:\n res = self.sum / float(self.n_count)\n return res\n"
] | [
[
"matplotlib.pyplot.imshow",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.savefig",
"torch.tensor",
"matplotlib.pyplot.subplot",
"torch.stack",
"matplotlib.pyplot.show"
],
[
"torch.LongTensor",
"torch.IntTensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
reiinakano/magenta | [
"fbc059dbdd1c70071472e0b0707cb298f78ca9d2"
] | [
"magenta/models/music_vae/data.py"
] | [
"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"MusicVAE data library.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport collections\nimport copy\nimport functools\nimport itertools\n\nimport numpy as np\nimport tensorflow as tf\n\nimport magenta.music as mm\nfrom magenta.music import chords_lib\nfrom magenta.music import drums_encoder_decoder\nfrom magenta.music import sequences_lib\nfrom magenta.protobuf import music_pb2\n\nPIANO_MIN_MIDI_PITCH = 21\nPIANO_MAX_MIDI_PITCH = 108\nMIN_MIDI_PITCH = 0\nMAX_MIDI_PITCH = 127\nMIDI_PITCHES = 128\n\nMAX_INSTRUMENT_NUMBER = 127\n\nMEL_PROGRAMS = range(0, 32) # piano, chromatic percussion, organ, guitar\nBASS_PROGRAMS = range(32, 40)\nELECTRIC_BASS_PROGRAM = 33\n\nREDUCED_DRUM_PITCH_CLASSES = drums_encoder_decoder.DEFAULT_DRUM_TYPE_PITCHES\nFULL_DRUM_PITCH_CLASSES = [ # 61 classes\n [p] for c in drums_encoder_decoder.DEFAULT_DRUM_TYPE_PITCHES for p in c]\n\nOUTPUT_VELOCITY = 80\n\nCHORD_SYMBOL = music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL\n\n\ndef _maybe_pad_seqs(seqs, dtype):\n \"\"\"Pads sequences to match the longest and returns as a numpy array.\"\"\"\n if not len(seqs): # pylint:disable=g-explicit-length-test,len-as-condition\n return np.zeros((0, 0, 0), dtype)\n lengths = [len(s) for s in seqs]\n if len(set(lengths)) == 1:\n return np.array(seqs, dtype)\n else:\n length = max(lengths)\n return (np.array([np.pad(s, [(0, length - len(s)), (0, 0)], mode='constant')\n for s in seqs], dtype))\n\n\ndef _extract_instrument(note_sequence, instrument):\n extracted_ns = copy.copy(note_sequence)\n del extracted_ns.notes[:]\n extracted_ns.notes.extend(\n n for n in note_sequence.notes if n.instrument == instrument)\n return extracted_ns\n\n\ndef np_onehot(indices, depth, dtype=np.bool):\n \"\"\"Converts 1D array of indices to a one-hot 2D array with given depth.\"\"\"\n onehot_seq = np.zeros((len(indices), depth), dtype=dtype)\n onehot_seq[np.arange(len(indices)), indices] = 1.0\n return onehot_seq\n\n\nclass NoteSequenceAugmenter(object):\n \"\"\"Class for augmenting NoteSequences.\n\n Args:\n transpose_range: A tuple containing the inclusive, integer range of\n transpose amounts to sample from. If None, no transposition is applied.\n stretch_range: A tuple containing the inclusive, float range of stretch\n amounts to sample from.\n Returns:\n The augmented NoteSequence.\n \"\"\"\n\n def __init__(self, transpose_range=None, stretch_range=None):\n self._transpose_range = transpose_range\n self._stretch_range = stretch_range\n\n def augment(self, note_sequence):\n \"\"\"Python implementation that augments the NoteSequence.\n\n Args:\n note_sequence: A NoteSequence proto to be augmented.\n\n Returns:\n The randomly augmented NoteSequence.\n \"\"\"\n transpose_min, transpose_max = (\n self._transpose_range if self._transpose_range else (0, 0))\n stretch_min, stretch_max = (\n self._stretch_range if self._stretch_range else (1.0, 1.0))\n\n return sequences_lib.augment_note_sequence(\n note_sequence,\n stretch_min,\n stretch_max,\n transpose_min,\n transpose_max,\n delete_out_of_range_notes=True)\n\n def tf_augment(self, note_sequence_scalar):\n \"\"\"TF op that augments the NoteSequence.\"\"\"\n def _augment_str(note_sequence_str):\n note_sequence = music_pb2.NoteSequence.FromString(note_sequence_str)\n augmented_ns = self.augment(note_sequence)\n return [augmented_ns.SerializeToString()]\n\n augmented_note_sequence_scalar = tf.py_func(\n _augment_str,\n [note_sequence_scalar],\n tf.string,\n stateful=False,\n name='augment')\n augmented_note_sequence_scalar.set_shape(())\n return augmented_note_sequence_scalar\n\n\nclass ConverterTensors(collections.namedtuple(\n 'ConverterTensors', ['inputs', 'outputs', 'controls', 'lengths'])):\n \"\"\"Tuple of tensors output by `to_tensors` method in converters.\n\n Attributes:\n inputs: Input tensors to feed to the encoder.\n outputs: Output tensors to feed to the decoder.\n controls: (Optional) tensors to use as controls for both encoding and\n decoding.\n lengths: Length of each input/output/control sequence.\n \"\"\"\n\n def __new__(cls, inputs=None, outputs=None, controls=None, lengths=None):\n if inputs is None:\n inputs = []\n if outputs is None:\n outputs = []\n if lengths is None:\n lengths = [len(i) for i in inputs]\n if not controls:\n controls = [np.zeros([l, 0]) for l in lengths]\n return super(ConverterTensors, cls).__new__(\n cls, inputs, outputs, controls, lengths)\n\n\nclass BaseConverter(object):\n \"\"\"Base class for data converters between items and tensors.\n\n Inheriting classes must implement the following abstract methods:\n -`_to_tensors`\n -`_to_items`\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, input_depth, input_dtype, output_depth, output_dtype,\n control_depth=0, control_dtype=np.bool, end_token=None,\n max_tensors_per_item=None,\n str_to_item_fn=lambda s: s, length_shape=()):\n \"\"\"Initializes BaseConverter.\n\n Args:\n input_depth: Depth of final dimension of input (encoder) tensors.\n input_dtype: DType of input (encoder) tensors.\n output_depth: Depth of final dimension of output (decoder) tensors.\n output_dtype: DType of output (decoder) tensors.\n control_depth: Depth of final dimension of control tensors, or zero if not\n conditioning on control tensors.\n control_dtype: DType of control tensors.\n end_token: Optional end token.\n max_tensors_per_item: The maximum number of outputs to return for each\n input.\n str_to_item_fn: Callable to convert raw string input into an item for\n conversion.\n length_shape: Shape of length returned by `to_tensor`.\n \"\"\"\n self._input_depth = input_depth\n self._input_dtype = input_dtype\n self._output_depth = output_depth\n self._output_dtype = output_dtype\n self._control_depth = control_depth\n self._control_dtype = control_dtype\n self._end_token = end_token\n self._max_tensors_per_input = max_tensors_per_item\n self._str_to_item_fn = str_to_item_fn\n self._is_training = False\n self._length_shape = length_shape\n\n @property\n def is_training(self):\n return self._is_training\n\n @property\n def str_to_item_fn(self):\n return self._str_to_item_fn\n\n @is_training.setter\n def is_training(self, value):\n self._is_training = value\n\n @property\n def max_tensors_per_item(self):\n return self._max_tensors_per_input\n\n @max_tensors_per_item.setter\n def max_tensors_per_item(self, value):\n self._max_tensors_per_input = value\n\n @property\n def end_token(self):\n \"\"\"End token, or None.\"\"\"\n return self._end_token\n\n @property\n def input_depth(self):\n \"\"\"Dimension of inputs (to encoder) at each timestep of the sequence.\"\"\"\n return self._input_depth\n\n @property\n def input_dtype(self):\n \"\"\"DType of inputs (to encoder).\"\"\"\n return self._input_dtype\n\n @property\n def output_depth(self):\n \"\"\"Dimension of outputs (from decoder) at each timestep of the sequence.\"\"\"\n return self._output_depth\n\n @property\n def output_dtype(self):\n \"\"\"DType of outputs (from decoder).\"\"\"\n return self._output_dtype\n\n @property\n def control_depth(self):\n \"\"\"Dimension of control inputs at each timestep of the sequence.\"\"\"\n return self._control_depth\n\n @property\n def control_dtype(self):\n \"\"\"DType of control inputs.\"\"\"\n return self._control_dtype\n\n @property\n def length_shape(self):\n \"\"\"Shape of length returned by `to_tensor`.\"\"\"\n return self._length_shape\n\n @abc.abstractmethod\n def _to_tensors(self, item):\n \"\"\"Implementation that converts item into encoder/decoder tensors.\n\n Args:\n item: Item to convert.\n\n Returns:\n A ConverterTensors struct containing encoder inputs, decoder outputs,\n (optional) control tensors used for both encoding and decoding, and\n sequence lengths.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def _to_items(self, samples, controls=None):\n \"\"\"Implementation that decodes model samples into list of items.\"\"\"\n pass\n\n def _maybe_sample_outputs(self, outputs):\n \"\"\"If should limit outputs, returns up to limit (randomly if training).\"\"\"\n if (not self.max_tensors_per_item or\n len(outputs) <= self.max_tensors_per_item):\n return outputs\n if self.is_training:\n indices = set(np.random.choice(\n len(outputs), size=self.max_tensors_per_item, replace=False))\n return [outputs[i] for i in indices]\n else:\n return outputs[:self.max_tensors_per_item]\n\n def to_tensors(self, item):\n \"\"\"Python method that converts `item` into list of tensors.\"\"\"\n tensors = self._to_tensors(item)\n sampled_results = self._maybe_sample_outputs(list(zip(*tensors)))\n return (ConverterTensors(*zip(*sampled_results))\n if sampled_results else ConverterTensors())\n\n def _combine_to_tensor_results(self, to_tensor_results):\n \"\"\"Combines the results of multiple to_tensors calls into one result.\"\"\"\n results = []\n for result in to_tensor_results:\n results.extend(zip(*result))\n sampled_results = self._maybe_sample_outputs(results)\n return (ConverterTensors(*zip(*sampled_results))\n if sampled_results else ConverterTensors())\n\n def to_items(self, samples, controls=None):\n \"\"\"Python method that decodes samples into list of items.\"\"\"\n if controls is None:\n return self._to_items(samples)\n else:\n return self._to_items(samples, controls)\n\n def tf_to_tensors(self, item_scalar):\n \"\"\"TensorFlow op that converts item into output tensors.\n\n Sequences will be padded to match the length of the longest.\n\n Args:\n item_scalar: A scalar of type tf.String containing the raw item to be\n converted to tensors.\n\n Returns:\n inputs: A Tensor, shaped [num encoded seqs, max(lengths), input_depth],\n containing the padded input encodings.\n outputs: A Tensor, shaped [num encoded seqs, max(lengths), output_depth],\n containing the padded output encodings resulting from the input.\n controls: A Tensor, shaped\n [num encoded seqs, max(lengths), control_depth], containing the padded\n control encodings.\n lengths: A tf.int32 Tensor, shaped [num encoded seqs], containing the\n unpadded lengths of the tensor sequences resulting from the input.\n \"\"\"\n def _convert_and_pad(item_str):\n item = self.str_to_item_fn(item_str) # pylint:disable=not-callable\n tensors = self.to_tensors(item)\n inputs = _maybe_pad_seqs(tensors.inputs, self.input_dtype)\n outputs = _maybe_pad_seqs(tensors.outputs, self.output_dtype)\n controls = _maybe_pad_seqs(tensors.controls, self.control_dtype)\n return inputs, outputs, controls, np.array(tensors.lengths, np.int32)\n inputs, outputs, controls, lengths = tf.py_func(\n _convert_and_pad,\n [item_scalar],\n [self.input_dtype, self.output_dtype, self.control_dtype, tf.int32],\n stateful=False,\n name='convert_and_pad')\n inputs.set_shape([None, None, self.input_depth])\n outputs.set_shape([None, None, self.output_depth])\n controls.set_shape([None, None, self.control_depth])\n lengths.set_shape([None] + list(self.length_shape))\n return inputs, outputs, controls, lengths\n\n\ndef preprocess_notesequence(note_sequence, presplit_on_time_changes):\n \"\"\"Preprocesses a single NoteSequence, resulting in multiple sequences.\"\"\"\n if presplit_on_time_changes:\n note_sequences = sequences_lib.split_note_sequence_on_time_changes(\n note_sequence)\n else:\n note_sequences = [note_sequence]\n\n return note_sequences\n\n\nclass BaseNoteSequenceConverter(BaseConverter):\n \"\"\"Base class for NoteSequence data converters.\n\n Inheriting classes must implement the following abstract methods:\n -`_to_tensors`\n -`_to_notesequences`\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, input_depth, input_dtype, output_depth, output_dtype,\n control_depth=0, control_dtype=np.bool, end_token=None,\n presplit_on_time_changes=True,\n max_tensors_per_notesequence=None):\n \"\"\"Initializes BaseNoteSequenceConverter.\n\n Args:\n input_depth: Depth of final dimension of input (encoder) tensors.\n input_dtype: DType of input (encoder) tensors.\n output_depth: Depth of final dimension of output (decoder) tensors.\n output_dtype: DType of output (decoder) tensors.\n control_depth: Depth of final dimension of control tensors, or zero if not\n conditioning on control tensors.\n control_dtype: DType of control tensors.\n end_token: Optional end token.\n presplit_on_time_changes: Whether to split NoteSequence on time changes\n before converting.\n max_tensors_per_notesequence: The maximum number of outputs to return\n for each NoteSequence.\n \"\"\"\n super(BaseNoteSequenceConverter, self).__init__(\n input_depth, input_dtype, output_depth, output_dtype,\n control_depth, control_dtype, end_token,\n max_tensors_per_item=max_tensors_per_notesequence,\n str_to_item_fn=music_pb2.NoteSequence.FromString)\n\n self._presplit_on_time_changes = presplit_on_time_changes\n\n @property\n def max_tensors_per_notesequence(self):\n return self.max_tensors_per_item\n\n @max_tensors_per_notesequence.setter\n def max_tensors_per_notesequence(self, value):\n self.max_tensors_per_item = value\n\n @abc.abstractmethod\n def _to_notesequences(self, samples, controls=None):\n \"\"\"Implementation that decodes model samples into list of NoteSequences.\"\"\"\n pass\n\n def to_notesequences(self, samples, controls=None):\n \"\"\"Python method that decodes samples into list of NoteSequences.\"\"\"\n return self._to_items(samples, controls)\n\n def to_tensors(self, note_sequence):\n \"\"\"Python method that converts `note_sequence` into list of tensors.\"\"\"\n note_sequences = preprocess_notesequence(\n note_sequence, self._presplit_on_time_changes)\n\n results = []\n for ns in note_sequences:\n results.append(super(BaseNoteSequenceConverter, self).to_tensors(ns))\n return self._combine_to_tensor_results(results)\n\n def _to_items(self, samples, controls=None):\n \"\"\"Python method that decodes samples into list of NoteSequences.\"\"\"\n if controls is None:\n return self._to_notesequences(samples)\n else:\n return self._to_notesequences(samples, controls)\n\n\nclass LegacyEventListOneHotConverter(BaseNoteSequenceConverter):\n \"\"\"Converts NoteSequences using legacy OneHotEncoding framework.\n\n Quantizes the sequences, extracts event lists in the requested size range,\n uniquifies, and converts to encoding. Uses the OneHotEncoding's\n output encoding for both the input and output.\n\n Args:\n event_list_fn: A function that returns a new EventSequence.\n event_extractor_fn: A function for extracing events into EventSequences. The\n sole input should be the quantized NoteSequence.\n legacy_encoder_decoder: An instantiated OneHotEncoding object to use.\n add_end_token: Whether or not to add an end token. Recommended to be False\n for fixed-length outputs.\n slice_bars: Optional size of window to slide over raw event lists after\n extraction.\n steps_per_quarter: The number of quantization steps per quarter note.\n Mututally exclusive with `steps_per_second`.\n steps_per_second: The number of quantization steps per second.\n Mututally exclusive with `steps_per_quarter`.\n quarters_per_bar: The number of quarter notes per bar.\n pad_to_total_time: Pads each input/output tensor to the total time of the\n NoteSequence.\n max_tensors_per_notesequence: The maximum number of outputs to return\n for each NoteSequence.\n presplit_on_time_changes: Whether to split NoteSequence on time changes\n before converting.\n chord_encoding: An instantiated OneHotEncoding object to use for encoding\n chords on which to condition, or None if not conditioning on chords.\n \"\"\"\n\n def __init__(self, event_list_fn, event_extractor_fn,\n legacy_encoder_decoder, add_end_token=False, slice_bars=None,\n slice_steps=None, steps_per_quarter=None, steps_per_second=None,\n quarters_per_bar=4, pad_to_total_time=False,\n max_tensors_per_notesequence=None,\n presplit_on_time_changes=True, chord_encoding=None):\n if (steps_per_quarter, steps_per_second).count(None) != 1:\n raise ValueError(\n 'Exactly one of `steps_per_quarter` and `steps_per_second` should be '\n 'provided.')\n if (slice_bars, slice_steps).count(None) == 0:\n raise ValueError(\n 'At most one of `slice_bars` and `slice_steps` should be provided.')\n self._event_list_fn = event_list_fn\n self._event_extractor_fn = event_extractor_fn\n self._legacy_encoder_decoder = legacy_encoder_decoder\n self._chord_encoding = chord_encoding\n self._steps_per_quarter = steps_per_quarter\n if steps_per_quarter:\n self._steps_per_bar = steps_per_quarter * quarters_per_bar\n self._steps_per_second = steps_per_second\n if slice_bars:\n self._slice_steps = self._steps_per_bar * slice_bars\n else:\n self._slice_steps = slice_steps\n self._pad_to_total_time = pad_to_total_time\n\n depth = legacy_encoder_decoder.num_classes + add_end_token\n control_depth = (chord_encoding.num_classes\n if chord_encoding is not None else 0)\n super(LegacyEventListOneHotConverter, self).__init__(\n input_depth=depth,\n input_dtype=np.bool,\n output_depth=depth,\n output_dtype=np.bool,\n control_depth=control_depth,\n control_dtype=np.bool,\n end_token=legacy_encoder_decoder.num_classes if add_end_token else None,\n presplit_on_time_changes=presplit_on_time_changes,\n max_tensors_per_notesequence=max_tensors_per_notesequence)\n\n def _to_tensors(self, note_sequence):\n \"\"\"Converts NoteSequence to unique, one-hot tensor sequences.\"\"\"\n try:\n if self._steps_per_quarter:\n quantized_sequence = mm.quantize_note_sequence(\n note_sequence, self._steps_per_quarter)\n if (mm.steps_per_bar_in_quantized_sequence(quantized_sequence) !=\n self._steps_per_bar):\n return ConverterTensors()\n else:\n quantized_sequence = mm.quantize_note_sequence_absolute(\n note_sequence, self._steps_per_second)\n except (mm.BadTimeSignatureException, mm.NonIntegerStepsPerBarException,\n mm.NegativeTimeException) as e:\n return ConverterTensors()\n\n if self._chord_encoding and not any(\n ta.annotation_type == CHORD_SYMBOL\n for ta in quantized_sequence.text_annotations):\n # We are conditioning on chords but sequence does not have chords. Try to\n # infer them.\n try:\n mm.infer_chords_for_sequence(quantized_sequence)\n except mm.ChordInferenceException:\n return ConverterTensors()\n\n event_lists, unused_stats = self._event_extractor_fn(quantized_sequence)\n if self._pad_to_total_time:\n for e in event_lists:\n e.set_length(len(e) + e.start_step, from_left=True)\n e.set_length(quantized_sequence.total_quantized_steps)\n if self._slice_steps:\n sliced_event_lists = []\n for l in event_lists:\n for i in range(self._slice_steps, len(l) + 1, self._steps_per_bar):\n sliced_event_lists.append(l[i - self._slice_steps: i])\n else:\n sliced_event_lists = event_lists\n\n if self._chord_encoding:\n try:\n sliced_chord_lists = chords_lib.event_list_chords(\n quantized_sequence, sliced_event_lists)\n except chords_lib.CoincidentChordsException:\n return ConverterTensors()\n sliced_event_lists = [zip(el, cl) for el, cl in zip(sliced_event_lists,\n sliced_chord_lists)]\n\n # TODO(adarob): Consider handling the fact that different event lists can\n # be mapped to identical tensors by the encoder_decoder (e.g., Drums).\n\n unique_event_tuples = list(set(tuple(l) for l in sliced_event_lists))\n unique_event_tuples = self._maybe_sample_outputs(unique_event_tuples)\n\n if not unique_event_tuples:\n return ConverterTensors()\n\n control_seqs = []\n if self._chord_encoding:\n unique_event_tuples, unique_chord_tuples = zip(\n *[zip(*t) for t in unique_event_tuples if t])\n for t in unique_chord_tuples:\n try:\n chord_tokens = [self._chord_encoding.encode_event(e) for e in t]\n if self.end_token:\n # Repeat the last chord instead of using a special token; otherwise\n # the model may learn to rely on the special token to detect\n # endings.\n chord_tokens.append(chord_tokens[-1] if chord_tokens else\n self._chord_encoding.encode_event(mm.NO_CHORD))\n except (mm.ChordSymbolException, mm.ChordEncodingException):\n return ConverterTensors()\n control_seqs.append(\n np_onehot(chord_tokens, self.control_depth, self.control_dtype))\n\n seqs = []\n for t in unique_event_tuples:\n seqs.append(np_onehot(\n [self._legacy_encoder_decoder.encode_event(e) for e in t] +\n ([] if self.end_token is None else [self.end_token]),\n self.output_depth, self.output_dtype))\n\n return ConverterTensors(inputs=seqs, outputs=seqs, controls=control_seqs)\n\n def _to_notesequences(self, samples, controls=None):\n output_sequences = []\n for i, sample in enumerate(samples):\n s = np.argmax(sample, axis=-1)\n if self.end_token is not None and self.end_token in s.tolist():\n end_index = s.tolist().index(self.end_token)\n else:\n end_index = len(s)\n s = s[:end_index]\n event_list = self._event_list_fn()\n for e in s:\n assert e != self.end_token\n event_list.append(self._legacy_encoder_decoder.decode_event(e))\n if self._steps_per_quarter:\n qpm = mm.DEFAULT_QUARTERS_PER_MINUTE\n seconds_per_step = 60.0 / (self._steps_per_quarter * qpm)\n sequence = event_list.to_sequence(velocity=OUTPUT_VELOCITY, qpm=qpm)\n else:\n seconds_per_step = 1.0 / self._steps_per_second\n sequence = event_list.to_sequence(velocity=OUTPUT_VELOCITY)\n if self._chord_encoding and controls is not None:\n chords = [self._chord_encoding.decode_event(e)\n for e in np.argmax(controls[i], axis=-1)[:end_index]]\n chord_times = [step * seconds_per_step for step in event_list.steps]\n chords_lib.add_chords_to_sequence(sequence, chords, chord_times)\n output_sequences.append(sequence)\n return output_sequences\n\n\nclass OneHotMelodyConverter(LegacyEventListOneHotConverter):\n \"\"\"Converter for legacy MelodyOneHotEncoding.\n\n Args:\n min_pitch: The minimum pitch to model. Those below this value will be\n ignored.\n max_pitch: The maximum pitch to model. Those above this value will be\n ignored.\n valid_programs: Optional set of program numbers to allow.\n skip_polyphony: Whether to skip polyphonic instruments. If False, the\n highest pitch will be taken in polyphonic sections.\n max_bars: Optional maximum number of bars per extracted melody, before\n slicing.\n slice_bars: Optional size of window to slide over raw Melodies after\n extraction.\n gap_bars: If this many bars or more of non-events follow a note event, the\n melody is ended. Disabled when set to 0 or None.\n steps_per_quarter: The number of quantization steps per quarter note.\n quarters_per_bar: The number of quarter notes per bar.\n pad_to_total_time: Pads each input/output tensor to the total time of the\n NoteSequence.\n add_end_token: Whether to add an end token at the end of each sequence.\n max_tensors_per_notesequence: The maximum number of outputs to return\n for each NoteSequence.\n chord_encoding: An instantiated OneHotEncoding object to use for encoding\n chords on which to condition, or None if not conditioning on chords.\n \"\"\"\n\n def __init__(self, min_pitch=PIANO_MIN_MIDI_PITCH,\n max_pitch=PIANO_MAX_MIDI_PITCH, valid_programs=None,\n skip_polyphony=False, max_bars=None, slice_bars=None,\n gap_bars=1.0, steps_per_quarter=4, quarters_per_bar=4,\n add_end_token=False, pad_to_total_time=False,\n max_tensors_per_notesequence=5, presplit_on_time_changes=True,\n chord_encoding=None):\n self._min_pitch = min_pitch\n self._max_pitch = max_pitch\n self._valid_programs = valid_programs\n steps_per_bar = steps_per_quarter * quarters_per_bar\n max_steps_truncate = steps_per_bar * max_bars if max_bars else None\n\n def melody_fn():\n return mm.Melody(\n steps_per_bar=steps_per_bar, steps_per_quarter=steps_per_quarter)\n melody_extractor_fn = functools.partial(\n mm.extract_melodies,\n min_bars=1,\n gap_bars=gap_bars or float('inf'),\n max_steps_truncate=max_steps_truncate,\n min_unique_pitches=1,\n ignore_polyphonic_notes=not skip_polyphony,\n pad_end=True)\n super(OneHotMelodyConverter, self).__init__(\n melody_fn,\n melody_extractor_fn,\n mm.MelodyOneHotEncoding(min_pitch, max_pitch + 1),\n add_end_token=add_end_token,\n slice_bars=slice_bars,\n pad_to_total_time=pad_to_total_time,\n steps_per_quarter=steps_per_quarter,\n quarters_per_bar=quarters_per_bar,\n max_tensors_per_notesequence=max_tensors_per_notesequence,\n presplit_on_time_changes=presplit_on_time_changes,\n chord_encoding=chord_encoding)\n\n def _to_tensors(self, note_sequence):\n def is_valid(note):\n if (self._valid_programs is not None and\n note.program not in self._valid_programs):\n return False\n return self._min_pitch <= note.pitch <= self._max_pitch\n notes = list(note_sequence.notes)\n del note_sequence.notes[:]\n note_sequence.notes.extend([n for n in notes if is_valid(n)])\n return super(OneHotMelodyConverter, self)._to_tensors(note_sequence)\n\n\nclass DrumsConverter(BaseNoteSequenceConverter):\n \"\"\"Converter for legacy drums with either pianoroll or one-hot tensors.\n\n Inputs/outputs are either a \"pianoroll\"-like encoding of all possible drum\n hits at a given step, or a one-hot encoding of the pianoroll.\n\n The \"roll\" input encoding includes a final NOR bit (after the optional end\n token).\n\n Args:\n max_bars: Optional maximum number of bars per extracted drums, before\n slicing.\n slice_bars: Optional size of window to slide over raw Melodies after\n extraction.\n gap_bars: If this many bars or more follow a non-empty drum event, the\n drum track is ended. Disabled when set to 0 or None.\n pitch_classes: A collection of collections, with each sub-collection\n containing the set of pitches representing a single class to group by. By\n default, groups valid drum pitches into 9 different classes.\n add_end_token: Whether or not to add an end token. Recommended to be False\n for fixed-length outputs.\n steps_per_quarter: The number of quantization steps per quarter note.\n quarters_per_bar: The number of quarter notes per bar.\n pad_to_total_time: Pads each input/output tensor to the total time of the\n NoteSequence.\n roll_input: Whether to use a pianoroll-like representation as the input\n instead of a one-hot encoding.\n roll_output: Whether to use a pianoroll-like representation as the output\n instead of a one-hot encoding.\n max_tensors_per_notesequence: The maximum number of outputs to return\n for each NoteSequence.\n presplit_on_time_changes: Whether to split NoteSequence on time changes\n before converting.\n \"\"\"\n\n def __init__(self, max_bars=None, slice_bars=None, gap_bars=1.0,\n pitch_classes=None, add_end_token=False, steps_per_quarter=4,\n quarters_per_bar=4, pad_to_total_time=False, roll_input=False,\n roll_output=False, max_tensors_per_notesequence=5,\n presplit_on_time_changes=True):\n self._pitch_classes = pitch_classes or REDUCED_DRUM_PITCH_CLASSES\n self._pitch_class_map = {\n p: i for i, pitches in enumerate(self._pitch_classes) for p in pitches}\n\n self._steps_per_quarter = steps_per_quarter\n self._steps_per_bar = steps_per_quarter * quarters_per_bar\n self._slice_steps = self._steps_per_bar * slice_bars if slice_bars else None\n self._pad_to_total_time = pad_to_total_time\n self._roll_input = roll_input\n self._roll_output = roll_output\n\n self._drums_extractor_fn = functools.partial(\n mm.extract_drum_tracks,\n min_bars=1,\n gap_bars=gap_bars or float('inf'),\n max_steps_truncate=self._steps_per_bar * max_bars if max_bars else None,\n pad_end=True)\n\n num_classes = len(self._pitch_classes)\n\n self._pr_encoder_decoder = mm.PianorollEncoderDecoder(\n input_size=num_classes + add_end_token)\n # Use pitch classes as `drum_type_pitches` since we have already done the\n # mapping.\n self._oh_encoder_decoder = mm.MultiDrumOneHotEncoding(\n drum_type_pitches=[(i,) for i in range(num_classes)])\n\n output_depth = (num_classes if self._roll_output else\n self._oh_encoder_decoder.num_classes) + add_end_token\n super(DrumsConverter, self).__init__(\n input_depth=(\n num_classes + 1 if self._roll_input else\n self._oh_encoder_decoder.num_classes) + add_end_token,\n input_dtype=np.bool,\n output_depth=output_depth,\n output_dtype=np.bool,\n end_token=output_depth - 1 if add_end_token else None,\n presplit_on_time_changes=presplit_on_time_changes,\n max_tensors_per_notesequence=max_tensors_per_notesequence)\n\n def _to_tensors(self, note_sequence):\n \"\"\"Converts NoteSequence to unique sequences.\"\"\"\n try:\n quantized_sequence = mm.quantize_note_sequence(\n note_sequence, self._steps_per_quarter)\n if (mm.steps_per_bar_in_quantized_sequence(quantized_sequence) !=\n self._steps_per_bar):\n return ConverterTensors()\n except (mm.BadTimeSignatureException, mm.NonIntegerStepsPerBarException,\n mm.NegativeTimeException) as e:\n return ConverterTensors()\n\n new_notes = []\n for n in quantized_sequence.notes:\n if not n.is_drum:\n continue\n if n.pitch not in self._pitch_class_map:\n continue\n n.pitch = self._pitch_class_map[n.pitch]\n new_notes.append(n)\n del quantized_sequence.notes[:]\n quantized_sequence.notes.extend(new_notes)\n\n event_lists, unused_stats = self._drums_extractor_fn(quantized_sequence)\n\n if self._pad_to_total_time:\n for e in event_lists:\n e.set_length(len(e) + e.start_step, from_left=True)\n e.set_length(quantized_sequence.total_quantized_steps)\n if self._slice_steps:\n sliced_event_tuples = []\n for l in event_lists:\n for i in range(self._slice_steps, len(l) + 1, self._steps_per_bar):\n sliced_event_tuples.append(tuple(l[i - self._slice_steps: i]))\n else:\n sliced_event_tuples = [tuple(l) for l in event_lists]\n\n unique_event_tuples = list(set(sliced_event_tuples))\n unique_event_tuples = self._maybe_sample_outputs(unique_event_tuples)\n\n rolls = []\n oh_vecs = []\n for t in unique_event_tuples:\n if self._roll_input or self._roll_output:\n if self.end_token is not None:\n t_roll = list(t) + [(self._pr_encoder_decoder.input_size - 1,)]\n else:\n t_roll = t\n rolls.append(np.vstack([\n self._pr_encoder_decoder.events_to_input(t_roll, i).astype(np.bool)\n for i in range(len(t_roll))]))\n if not (self._roll_input and self._roll_output):\n labels = [self._oh_encoder_decoder.encode_event(e) for e in t]\n if self.end_token is not None:\n labels += [self._oh_encoder_decoder.num_classes]\n oh_vecs.append(np_onehot(\n labels,\n self._oh_encoder_decoder.num_classes + (self.end_token is not None),\n np.bool))\n\n if self._roll_input:\n input_seqs = [\n np.append(roll, np.expand_dims(np.all(roll == 0, axis=1), axis=1),\n axis=1) for roll in rolls]\n else:\n input_seqs = oh_vecs\n\n output_seqs = rolls if self._roll_output else oh_vecs\n\n return ConverterTensors(inputs=input_seqs, outputs=output_seqs)\n\n def _to_notesequences(self, samples):\n output_sequences = []\n for s in samples:\n if self._roll_output:\n if self.end_token is not None:\n end_i = np.where(s[:, self.end_token])\n if len(end_i): # pylint: disable=g-explicit-length-test,len-as-condition\n s = s[:end_i[0]]\n events_list = [frozenset(np.where(e)[0]) for e in s]\n else:\n s = np.argmax(s, axis=-1)\n if self.end_token is not None and self.end_token in s:\n s = s[:s.tolist().index(self.end_token)]\n events_list = [self._oh_encoder_decoder.decode_event(e) for e in s]\n # Map classes to exemplars.\n events_list = [\n frozenset(self._pitch_classes[c][0] for c in e) for e in events_list]\n track = mm.DrumTrack(\n events=events_list, steps_per_bar=self._steps_per_bar,\n steps_per_quarter=self._steps_per_quarter)\n output_sequences.append(track.to_sequence(velocity=OUTPUT_VELOCITY))\n return output_sequences\n\n\nclass TrioConverter(BaseNoteSequenceConverter):\n \"\"\"Converts to/from 3-part (mel, drums, bass) multi-one-hot events.\n\n Extracts overlapping segments with melody, drums, and bass (determined by\n program number) and concatenates one-hot tensors from OneHotMelodyConverter\n and OneHotDrumsConverter. Takes the cross products from the sets of\n instruments of each type.\n\n Args:\n slice_bars: Optional size of window to slide over full converted tensor.\n gap_bars: The number of consecutive empty bars to allow for any given\n instrument. Note that this number is effectively doubled for internal\n gaps.\n max_bars: Optional maximum number of bars per extracted sequence, before\n slicing.\n steps_per_quarter: The number of quantization steps per quarter note.\n quarters_per_bar: The number of quarter notes per bar.\n max_tensors_per_notesequence: The maximum number of outputs to return\n for each NoteSequence.\n chord_encoding: An instantiated OneHotEncoding object to use for encoding\n chords on which to condition, or None if not conditioning on chords.\n \"\"\"\n\n class InstrumentType(object):\n UNK = 0\n MEL = 1\n BASS = 2\n DRUMS = 3\n INVALID = 4\n\n def __init__(\n self, slice_bars=None, gap_bars=2, max_bars=1024, steps_per_quarter=4,\n quarters_per_bar=4, max_tensors_per_notesequence=5, chord_encoding=None):\n self._melody_converter = OneHotMelodyConverter(\n gap_bars=None, steps_per_quarter=steps_per_quarter,\n pad_to_total_time=True, presplit_on_time_changes=False,\n max_tensors_per_notesequence=None, chord_encoding=chord_encoding)\n self._drums_converter = DrumsConverter(\n gap_bars=None, steps_per_quarter=steps_per_quarter,\n pad_to_total_time=True, presplit_on_time_changes=False,\n max_tensors_per_notesequence=None)\n self._slice_bars = slice_bars\n self._gap_bars = gap_bars\n self._max_bars = max_bars\n self._steps_per_quarter = steps_per_quarter\n self._steps_per_bar = steps_per_quarter * quarters_per_bar\n self._chord_encoding = chord_encoding\n\n self._split_output_depths = (\n self._melody_converter.output_depth,\n self._melody_converter.output_depth,\n self._drums_converter.output_depth)\n output_depth = sum(self._split_output_depths)\n\n self._program_map = dict(\n [(i, TrioConverter.InstrumentType.MEL) for i in MEL_PROGRAMS] +\n [(i, TrioConverter.InstrumentType.BASS) for i in BASS_PROGRAMS])\n\n super(TrioConverter, self).__init__(\n input_depth=output_depth,\n input_dtype=np.bool,\n output_depth=output_depth,\n output_dtype=np.bool,\n control_depth=self._melody_converter.control_depth,\n control_dtype=self._melody_converter.control_dtype,\n end_token=False,\n presplit_on_time_changes=True,\n max_tensors_per_notesequence=max_tensors_per_notesequence)\n\n def _to_tensors(self, note_sequence):\n try:\n quantized_sequence = mm.quantize_note_sequence(\n note_sequence, self._steps_per_quarter)\n if (mm.steps_per_bar_in_quantized_sequence(quantized_sequence) !=\n self._steps_per_bar):\n return ConverterTensors()\n except (mm.BadTimeSignatureException, mm.NonIntegerStepsPerBarException,\n mm.NegativeTimeException):\n return ConverterTensors()\n\n if self._chord_encoding and not any(\n ta.annotation_type == CHORD_SYMBOL\n for ta in quantized_sequence.text_annotations):\n # We are conditioning on chords but sequence does not have chords. Try to\n # infer them.\n try:\n mm.infer_chords_for_sequence(quantized_sequence)\n except mm.ChordInferenceException:\n return ConverterTensors()\n\n # The trio parts get extracted from the original NoteSequence, so copy the\n # inferred chords back to that one.\n for qta in quantized_sequence.text_annotations:\n if qta.annotation_type == CHORD_SYMBOL:\n ta = note_sequence.text_annotations.add()\n ta.annotation_type = CHORD_SYMBOL\n ta.time = qta.time\n ta.text = qta.text\n\n total_bars = int(\n np.ceil(quantized_sequence.total_quantized_steps / self._steps_per_bar))\n total_bars = min(total_bars, self._max_bars)\n\n # Assign an instrument class for each instrument, and compute its coverage.\n # If an instrument has multiple classes, it is considered INVALID.\n instrument_type = np.zeros(MAX_INSTRUMENT_NUMBER + 1, np.uint8)\n coverage = np.zeros((total_bars, MAX_INSTRUMENT_NUMBER + 1), np.bool)\n for note in quantized_sequence.notes:\n i = note.instrument\n if i > MAX_INSTRUMENT_NUMBER:\n tf.logging.warning('Skipping invalid instrument number: %d', i)\n continue\n inferred_type = (\n self.InstrumentType.DRUMS if note.is_drum else\n self._program_map.get(note.program, self.InstrumentType.INVALID))\n if not instrument_type[i]:\n instrument_type[i] = inferred_type\n elif instrument_type[i] != inferred_type:\n instrument_type[i] = self.InstrumentType.INVALID\n\n start_bar = note.quantized_start_step // self._steps_per_bar\n end_bar = int(np.ceil(note.quantized_end_step / self._steps_per_bar))\n\n if start_bar >= total_bars:\n continue\n coverage[start_bar:min(end_bar, total_bars), i] = True\n\n # Group instruments by type.\n instruments_by_type = collections.defaultdict(list)\n for i, type_ in enumerate(instrument_type):\n if type_ not in (self.InstrumentType.UNK, self.InstrumentType.INVALID):\n instruments_by_type[type_].append(i)\n if len(instruments_by_type) < 3:\n # This NoteSequence doesn't have all 3 types.\n return ConverterTensors()\n\n # Encode individual instruments.\n # Set total time so that instruments will be padded correctly.\n note_sequence.total_time = (\n total_bars * self._steps_per_bar *\n 60 / note_sequence.tempos[0].qpm / self._steps_per_quarter)\n encoded_instruments = {}\n encoded_chords = None\n for i in (instruments_by_type[self.InstrumentType.MEL] +\n instruments_by_type[self.InstrumentType.BASS]):\n tensors = self._melody_converter.to_tensors(\n _extract_instrument(note_sequence, i))\n if tensors.outputs:\n encoded_instruments[i] = tensors.outputs[0]\n if encoded_chords is None:\n encoded_chords = tensors.controls[0]\n elif not np.array_equal(encoded_chords, tensors.controls[0]):\n tf.logging.warning('Trio chords disagreement between instruments.')\n else:\n coverage[:, i] = False\n for i in instruments_by_type[self.InstrumentType.DRUMS]:\n tensors = self._drums_converter.to_tensors(\n _extract_instrument(note_sequence, i))\n if tensors.outputs:\n encoded_instruments[i] = tensors.outputs[0]\n else:\n coverage[:, i] = False\n\n # Fill in coverage gaps up to self._gap_bars.\n og_coverage = coverage.copy()\n for j in range(total_bars):\n coverage[j] = np.any(\n og_coverage[\n max(0, j-self._gap_bars):min(total_bars, j+self._gap_bars) + 1],\n axis=0)\n\n # Take cross product of instruments from each class and compute combined\n # encodings where they overlap.\n seqs = []\n control_seqs = []\n for grp in itertools.product(\n instruments_by_type[self.InstrumentType.MEL],\n instruments_by_type[self.InstrumentType.BASS],\n instruments_by_type[self.InstrumentType.DRUMS]):\n # Consider an instrument covered within gap_bars from the end if any of\n # the other instruments are. This allows more leniency when re-encoding\n # slices.\n grp_coverage = np.all(coverage[:, grp], axis=1)\n grp_coverage[:self._gap_bars] = np.any(coverage[:self._gap_bars, grp])\n grp_coverage[-self._gap_bars:] = np.any(coverage[-self._gap_bars:, grp])\n for j in range(total_bars - self._slice_bars + 1):\n if (np.all(grp_coverage[j:j + self._slice_bars]) and\n all(i in encoded_instruments for i in grp)):\n start_step = j * self._steps_per_bar\n end_step = (j + self._slice_bars) * self._steps_per_bar\n seqs.append(np.concatenate(\n [encoded_instruments[i][start_step:end_step] for i in grp],\n axis=-1))\n if encoded_chords is not None:\n control_seqs.append(encoded_chords[start_step:end_step])\n\n return ConverterTensors(inputs=seqs, outputs=seqs, controls=control_seqs)\n\n def _to_notesequences(self, samples, controls=None):\n output_sequences = []\n dim_ranges = np.cumsum(self._split_output_depths)\n for i, s in enumerate(samples):\n mel_ns = self._melody_converter.to_notesequences(\n [s[:, :dim_ranges[0]]],\n [controls[i]] if controls is not None else None)[0]\n bass_ns = self._melody_converter.to_notesequences(\n [s[:, dim_ranges[0]:dim_ranges[1]]])[0]\n drums_ns = self._drums_converter.to_notesequences(\n [s[:, dim_ranges[1]:]])[0]\n\n for n in bass_ns.notes:\n n.instrument = 1\n n.program = ELECTRIC_BASS_PROGRAM\n for n in drums_ns.notes:\n n.instrument = 9\n\n ns = mel_ns\n ns.notes.extend(bass_ns.notes)\n ns.notes.extend(drums_ns.notes)\n ns.total_time = max(\n mel_ns.total_time, bass_ns.total_time, drums_ns.total_time)\n output_sequences.append(ns)\n return output_sequences\n\n\ndef count_examples(examples_path, data_converter,\n file_reader=tf.python_io.tf_record_iterator):\n \"\"\"Counts the number of examples produced by the converter from files.\"\"\"\n filenames = tf.gfile.Glob(examples_path)\n\n num_examples = 0\n\n for f in filenames:\n tf.logging.info('Counting examples in %s.', f)\n reader = file_reader(f)\n for item_str in reader:\n item = data_converter.str_to_item_fn(item_str)\n tensors = data_converter.to_tensors(item)\n num_examples += len(tensors.inputs)\n tf.logging.info('Total examples: %d', num_examples)\n return num_examples\n\n\ndef get_dataset(\n config,\n num_threads=1,\n tf_file_reader=tf.data.TFRecordDataset,\n prefetch_size=4,\n is_training=False):\n \"\"\"Get input tensors from dataset for training or evaluation.\n\n Args:\n config: A Config object containing dataset information.\n num_threads: The number of threads to use for pre-processing.\n tf_file_reader: The tf.data.Dataset class to use for reading files.\n prefetch_size: The number of batches to prefetch. Disabled when 0.\n is_training: Whether or not the dataset is used in training. Determines\n whether dataset is shuffled and repeated, etc.\n\n Returns:\n A tf.data.Dataset containing input, output, control, and length tensors.\n\n Raises:\n ValueError: If no files match examples path.\n \"\"\"\n batch_size = config.hparams.batch_size\n examples_path = (\n config.train_examples_path if is_training else config.eval_examples_path)\n note_sequence_augmenter = (\n config.note_sequence_augmenter if is_training else None)\n data_converter = config.data_converter\n data_converter.is_training = is_training\n\n tf.logging.info('Reading examples from: %s', examples_path)\n\n num_files = len(tf.gfile.Glob(examples_path))\n if not num_files:\n raise ValueError(\n 'No files were found matching examples path: %s' % examples_path)\n files = tf.data.Dataset.list_files(examples_path)\n if is_training:\n files = files.apply(\n tf.contrib.data.shuffle_and_repeat(buffer_size=num_files))\n\n reader = files.apply(\n tf.contrib.data.parallel_interleave(\n tf_file_reader,\n cycle_length=num_threads,\n sloppy=True))\n\n def _remove_pad_fn(padded_seq_1, padded_seq_2, padded_seq_3, length):\n if length.shape.ndims == 0:\n return (padded_seq_1[0:length], padded_seq_2[0:length],\n padded_seq_3[0:length], length)\n else:\n # Don't remove padding for hierarchical examples.\n return padded_seq_1, padded_seq_2, padded_seq_3, length\n\n dataset = reader\n if note_sequence_augmenter is not None:\n dataset = dataset.map(note_sequence_augmenter.tf_augment)\n dataset = (dataset\n .map(data_converter.tf_to_tensors,\n num_parallel_calls=num_threads)\n .flat_map(lambda *t: tf.data.Dataset.from_tensor_slices(t))\n .map(_remove_pad_fn))\n if is_training:\n dataset = dataset.shuffle(buffer_size=batch_size * 4)\n\n dataset = dataset.padded_batch(batch_size, dataset.output_shapes)\n\n if prefetch_size:\n dataset = dataset.prefetch(prefetch_size)\n\n return dataset\n"
] | [
[
"tensorflow.logging.warning",
"numpy.array_equal",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.contrib.data.parallel_interleave",
"numpy.cumsum",
"tensorflow.contrib.data.shuffle_and_repeat",
"numpy.all",
"numpy.ceil",
"tensorflow.data.Dataset.list_files",
"numpy.argmax",
"tensorflow.gfile.Glob",
"tensorflow.logging.info",
"numpy.any",
"numpy.concatenate",
"numpy.array",
"numpy.zeros",
"numpy.where",
"tensorflow.py_func"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
zeyiwen/gbdt | [
"35ddc80c6fc2d072478f8505312fd6fc34dcdd9c",
"35ddc80c6fc2d072478f8505312fd6fc34dcdd9c"
] | [
"python/benchmarks/utils/file_utils.py",
"python/benchmarks/model/lightgbm_model.py"
] | [
"import pandas as pd\n\n\ndef add_data(df, algorithm, data, elapsed, metric):\n time_col = (data.name, 'Time(s)')\n metric_col = (data.name, data.metric)\n try:\n df.insert(len(df.columns), time_col, '-')\n df.insert(len(df.columns), metric_col, '-')\n except:\n pass\n\n df.at[algorithm, time_col] = elapsed\n df.at[algorithm, metric_col] = metric\n\n\ndef write_results(df, filename, format):\n if format == \"latex\":\n tmp_df = df.copy()\n tmp_df.columns = pd.MultiIndex.from_tuples(tmp_df.columns)\n with open(filename, \"a\") as file:\n file.write(tmp_df.to_latex())\n elif format == \"csv\":\n with open(filename, \"a\") as file:\n file.write(df.to_csv())\n else:\n raise ValueError(\"Unknown format: \" + format)\n\n print(format + \" results written to: \" + filename)",
"from model.base_model import BaseModel\nimport numpy as np\nimport lightgbm as lgb\nimport time\nimport utils.data_utils as du\nfrom model.datasets import Dataset\n\n\nclass LightGBMModel(BaseModel):\n\n def __init__(self):\n BaseModel.__init__(self)\n\n def _config_model(self, data):\n self.params['task'] = 'train'\n self.params['boosting_type'] = 'gbdt'\n self.params['max_depth'] = 6\n self.params['num_leaves'] = 2 ** self.params['max_depth'] # for max_depth is 6\n # self.params['min_sum_hessian+in_leaf'] = 1\n self.params['min_split_gain'] = self.min_split_loss\n self.params['min_child_weight'] = self.min_weight\n self.params['lambda_l1'] = self.L1_reg\n self.params['lambda_l2'] = self.L2_reg\n self.params['max_bin'] = self.max_bin\n self.params['num_threads'] = 20\n\n if self.use_gpu:\n self.params['device'] = 'gpu'\n else:\n self.params['device'] = 'cpu'\n if data.task == \"Regression\":\n self.params[\"objective\"] = \"regression\"\n elif data.task == \"Multiclass classification\":\n self.params[\"objective\"] = \"multiclass\"\n self.params[\"num_class\"] = int(np.max(data.y_test) + 1)\n elif data.task == \"Classification\":\n self.params[\"objective\"] = \"binary\"\n elif data.task == \"Ranking\":\n self.params[\"objective\"] = \"lambdarank\"\n else:\n raise ValueError(\"Unknown task: \" + data.task)\n\n\n def _train_model(self, data):\n print(self.params)\n lgb_train = lgb.Dataset(data.X_train, data.y_train)\n if data.task == 'Ranking':\n lgb_train.set_group(data.groups)\n\n start = time.time()\n self.model = lgb.train(self.params,\n lgb_train,\n num_boost_round=self.num_rounds)\n elapsed = time.time() - start\n\n return elapsed\n\n def _predict(self, data):\n pred = self.model.predict(data.X_test)\n metric = self.eval(data, pred)\n\n return metric\n\n def model_name(self):\n name = \"lightgbm_\"\n use_cpu = \"gpu_\" if self.use_gpu else \"cpu_\"\n nr = str(self.num_rounds) + \"_\"\n return name + use_cpu + nr + str(self.max_depth)\n\n\nif __name__ == \"__main__\":\n X, y, groups = du.get_yahoo()\n dataset = Dataset(name='yahoo', task='Ranking', metric='NDCG', get_func=du.get_yahoo)\n print(dataset.X_train.shape)\n print(dataset.y_test.shape)\n\n t_start = time.time()\n xgbModel = LightGBMModel()\n xgbModel.use_gpu = False\n xgbModel.run_model(data=dataset)\n\n eplased = time.time() - t_start\n print(\"--------->> \" + str(eplased))\n"
] | [
[
"pandas.MultiIndex.from_tuples"
],
[
"numpy.max"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
musicinmybrain/probeinterface | [
"8ef28f47193794321c554443e3904d4d99f18eee"
] | [
"tests/test_probe.py"
] | [
"from probeinterface import Probe\n\nimport numpy as np\n\nimport pytest\n\ndef _dummy_position():\n n = 24\n positions = np.zeros((n, 2))\n for i in range(n):\n x = i // 8\n y = i % 8\n positions[i] = x, y\n positions *= 20\n positions[8:16, 1] -= 10\n return positions\n \n\ndef test_probe():\n positions = _dummy_position()\n \n probe = Probe(ndim=2, si_units='um')\n probe.set_contacts(positions=positions, shapes='circle', shape_params={'radius': 5})\n probe.set_contacts(positions=positions, shapes='square', shape_params={'width': 5})\n probe.set_contacts(positions=positions, shapes='rect', shape_params={'width': 8, 'height':5 })\n\n assert probe.get_contact_count() == 24\n\n # shape of the probe\n vertices = [(-20, -30), (20, -110), (60, -30), (60, 190), (-20, 190)]\n probe.set_planar_contour(vertices)\n \n # auto shape\n probe.create_auto_shape()\n \n # annotation\n probe.annotate(manufacturer='me')\n assert 'manufacturer' in probe.annotations\n probe.annotate_contacts(impedance=np.random.rand(24)*1000)\n assert 'impedance' in probe.contact_annotations\n \n # device channel\n chans = np.arange(0, 24, dtype='int')\n np.random.shuffle(chans)\n probe.set_device_channel_indices(chans)\n \n # contact_ids int or str\n elec_ids = np.arange(24)\n probe.set_contact_ids(elec_ids)\n elec_ids = [f'elec #{e}' for e in range(24)]\n probe.set_contact_ids(elec_ids)\n \n # copy\n probe2 = probe.copy()\n \n # move rotate\n probe.move([20, 50])\n probe.rotate(theta=40, center=[0, 0], axis=None)\n\n # make annimage\n values = np.random.randn(24)\n image, xlims, ylims = probe.to_image(values, method='cubic')\n \n image2, xlims, ylims = probe.to_image(values, method='cubic', num_pixel=16)\n \n #~ from probeinterface.plotting import plot_probe_group, plot_probe\n #~ import matplotlib.pyplot as plt\n #~ fig, ax = plt.subplots()\n #~ plot_probe(probe, ax=ax)\n #~ ax.imshow(image, extent=xlims+ylims, origin='lower')\n #~ ax.imshow(image2, extent=xlims+ylims, origin='lower')\n #~ plt.show()\n \n \n # 3d\n probe_3d = probe.to_3d()\n probe_3d.rotate(theta=60, center=[0, 0, 0], axis=[0, 1, 0])\n \n # 3d-2d\n probe_3d = probe.to_3d()\n probe_2d = probe_3d.to_2d(axes=\"xz\")\n assert np.allclose(probe_2d.contact_positions, probe_3d.contact_positions[:, [0, 2]])\n\n #~ from probeinterface.plotting import plot_probe_group, plot_probe\n #~ import matplotlib.pyplot as plt\n #~ plot_probe(probe_3d)\n #~ plt.show()\n\n # get shanks\n for shank in probe.get_shanks():\n pass\n # print(shank)\n # print(shank.contact_positions)\n \n # get dict and df\n d = probe.to_dict()\n other = Probe.from_dict(d)\n \n # export to/from numpy\n arr = probe.to_numpy(complete=False)\n other = Probe.from_numpy(arr)\n arr = probe.to_numpy(complete=True)\n other2 = Probe.from_numpy(arr)\n arr = probe_3d.to_numpy(complete=True)\n other_3d = Probe.from_numpy(arr)\n \n # export to/from DataFrame\n df = probe.to_dataframe(complete=True)\n other = Probe.from_dataframe(df)\n df = probe.to_dataframe(complete=False)\n other2 = Probe.from_dataframe(df)\n df = probe_3d.to_dataframe(complete=True)\n # print(df.index)\n other_3d = Probe.from_dataframe(df)\n assert other_3d.ndim == 3\n\n # slice handling\n selection = np.arange(0,18,2)\n # print(selection.dtype.kind)\n sliced_probe = probe.get_slice(selection)\n assert sliced_probe.get_contact_count() == 9\n assert sliced_probe.contact_annotations['impedance'].shape == (9, )\n \n #~ from probeinterface.plotting import plot_probe_group, plot_probe\n #~ import matplotlib.pyplot as plt\n #~ plot_probe(probe)\n #~ plot_probe(sliced_probe)\n \n selection = np.ones(24, dtype='bool')\n selection[::2] = False\n sliced_probe = probe.get_slice(selection)\n assert sliced_probe.get_contact_count() == 12\n assert sliced_probe.contact_annotations['impedance'].shape == (12, )\n \n #~ plot_probe(probe)\n #~ plot_probe(sliced_probe)\n #~ plt.show()\n\n\ndef test_set_shanks():\n probe = Probe(ndim=2, si_units='um')\n probe.set_contacts(\n positions= np.arange(20).reshape(10, 2),\n shapes='circle',\n shape_params={'radius' : 5})\n \n\n # for simplicity each contact is on separate shank\n shank_ids = np.arange(10)\n probe.set_shank_ids(shank_ids)\n\n assert all(probe.shank_ids == shank_ids.astype(str))\n\n\nif __name__ == '__main__':\n test_probe()\n \n test_set_shanks()\n\n\n"
] | [
[
"numpy.allclose",
"numpy.arange",
"numpy.random.shuffle",
"numpy.ones",
"numpy.random.randn",
"numpy.random.rand",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kaiwalya4850/object-detect | [
"f4c36e025e88ddd70d6ecdd03bee92feae902d8f"
] | [
"scripts/label_image.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\ndef load_graph(model_file):\n graph = tf.Graph()\n graph_def = tf.GraphDef()\n\n with open(model_file, \"rb\") as f:\n graph_def.ParseFromString(f.read())\n with graph.as_default():\n tf.import_graph_def(graph_def)\n\n return graph\n\ndef read_tensor_from_image_file(file_name, input_height=299, input_width=299,\n\t\t\t\tinput_mean=0, input_std=255):\n input_name = \"file_reader\"\n output_name = \"normalized\"\n file_reader = tf.read_file(file_name, input_name)\n if file_name.endswith(\".png\"):\n image_reader = tf.image.decode_png(file_reader, channels = 3,\n name='png_reader')\n elif file_name.endswith(\".gif\"):\n image_reader = tf.squeeze(tf.image.decode_gif(file_reader,\n name='gif_reader'))\n elif file_name.endswith(\".bmp\"):\n image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')\n else:\n image_reader = tf.image.decode_jpeg(file_reader, channels = 3,\n name='jpeg_reader')\n float_caster = tf.cast(image_reader, tf.float32)\n dims_expander = tf.expand_dims(float_caster, 0);\n resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])\n normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])\n sess = tf.Session()\n result = sess.run(normalized)\n\n return result\n\ndef load_labels(label_file):\n label = []\n proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()\n for l in proto_as_ascii_lines:\n label.append(l.rstrip())\n return label\n\nif __name__ == \"__main__\":\n file_name = \"C:/Users/Kaiwalya/Desktop/intern/anaconda/tensorflow-for-poets-2-master/tf_files/a1.jpg\"\n model_file = \"C:/Users/Kaiwalya/Desktop/intern/anaconda/tensorflow-for-poets-2-master/tf_files/retrained_graph.pb\"\n label_file = \"C:/Users/Kaiwalya/Desktop/intern/anaconda/tensorflow-for-poets-2-master/tf_files/retrained_labels.txt\"\n input_height = 299\n input_width = 299\n input_mean = 128\n input_std = 128\n input_layer = \"Mul\"\n output_layer = \"final_result\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--image\", help=\"image to be processed\")\n parser.add_argument(\"--graph\", help=\"graph/model to be executed\")\n parser.add_argument(\"--labels\", help=\"name of file containing labels\")\n parser.add_argument(\"--input_height\", type=int, help=\"input height\")\n parser.add_argument(\"--input_width\", type=int, help=\"input width\")\n parser.add_argument(\"--input_mean\", type=int, help=\"input mean\")\n parser.add_argument(\"--input_std\", type=int, help=\"input std\")\n parser.add_argument(\"--input_layer\", help=\"name of input layer\")\n parser.add_argument(\"--output_layer\", help=\"name of output layer\")\n args = parser.parse_args()\n\n if args.graph:\n model_file = args.graph\n if args.image:\n file_name = args.image\n if args.labels:\n label_file = args.labels\n if args.input_height:\n input_height = args.input_height\n if args.input_width:\n input_width = args.input_width\n if args.input_mean:\n input_mean = args.input_mean\n if args.input_std:\n input_std = args.input_std\n if args.input_layer:\n input_layer = args.input_layer\n if args.output_layer:\n output_layer = args.output_layer\n\n graph = load_graph(model_file)\n t = read_tensor_from_image_file(file_name,\n input_height=input_height,\n input_width=input_width,\n input_mean=input_mean,\n input_std=input_std)\n\n input_name = \"import/\" + input_layer\n output_name = \"import/\" + output_layer\n input_operation = graph.get_operation_by_name(input_name);\n output_operation = graph.get_operation_by_name(output_name);\n\n with tf.Session(graph=graph) as sess:\n start = time.time()\n results = sess.run(output_operation.outputs[0],\n {input_operation.outputs[0]: t})\n end=time.time()\n results = np.squeeze(results)\n\n top_k = results.argsort()[-5:][::-1]\n labels = load_labels(label_file)\n\n print('\\nEvaluation time (1-image): {:.3f}s\\n'.format(end-start))\n template = \"{} (score={:0.5f})\"\n for i in top_k:\n print(labels[i],results[i])\n\t\t\nif results[0] < results[1]:\n\tx=results[1]\n\t#z=round(x,1)\n\t#print(x)\nelse:\n\txyz = round(results[1],0)\n\tprint(xyz)\n\tk=lenght.xyz\n\tprint(k)"
] | [
[
"tensorflow.Graph",
"tensorflow.image.resize_bilinear",
"tensorflow.import_graph_def",
"tensorflow.read_file",
"tensorflow.gfile.GFile",
"numpy.squeeze",
"tensorflow.cast",
"tensorflow.image.decode_png",
"tensorflow.expand_dims",
"tensorflow.image.decode_bmp",
"tensorflow.subtract",
"tensorflow.image.decode_gif",
"tensorflow.Session",
"tensorflow.GraphDef",
"tensorflow.image.decode_jpeg"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
eliaskousk/FedML | [
"e30d5dd3cc84c8a369c828a6f6ef097b3cf67b1a"
] | [
"python/fedml/core/distributed/communication/trpc/trpc_comm_manager.py"
] | [
"import csv\nimport decimal\nimport os\nimport threading\nimport time\nfrom typing import List\n\nimport torch\nimport torch.distributed as dist\nimport torch.distributed.rpc as rpc\nimport torch.multiprocessing as mp\nfrom torch.distributed import rpc\n\nfrom .trpc_server import TRPCCOMMServicer\nfrom ..base_com_manager import BaseCommunicationManager\nfrom ..message import Message\nfrom ..observer import Observer\nimport logging\n\nlock = threading.Lock()\n\n\nWORKER = \"worker{}\"\n\n\nclass TRPCCommManager(BaseCommunicationManager):\n def __init__(\n self,\n trpc_master_config_path,\n process_id=0,\n world_size=0,\n ):\n logging.info(\"using TRPC backend\")\n with open(trpc_master_config_path, newline=\"\") as csv_file:\n csv_reader = csv.reader(csv_file)\n # skip header line\n next(csv_reader)\n master_address, master_port = next(csv_reader)\n self.master_address = master_address\n self.master_port = master_port\n self.process_id = process_id\n self.world_size = world_size\n self._observers: List[Observer] = []\n\n if process_id == 0:\n self.node_type = \"server\"\n else:\n self.node_type = \"client\"\n\n print(f\"Worker rank {process_id} initializing RPC\")\n\n self.trpc_servicer = TRPCCOMMServicer(\n master_address, master_port, self.world_size, process_id\n )\n logging.info(os.getcwd())\n\n os.environ[\"MASTER_ADDR\"] = self.master_address\n os.environ[\"MASTER_PORT\"] = self.master_port\n\n self._init_torch_rpc_tp(\n master_address, master_port, process_id, self.world_size\n )\n\n self.is_running = True\n print(\"server started. master address: \" + str(master_address))\n\n def _init_torch_rpc_pg(\n self,\n master_addr,\n master_port,\n worker_idx,\n worker_num,\n ):\n # https://github.com/pytorch/pytorch/issues/55615\n # [BC-Breaking][RFC] Retire ProcessGroup Backend for RPC #55615\n str_init_method = \"tcp://\" + str(master_addr) + \":\" + str(master_port)\n logging.info(\"str_init_method = {}\".format(str_init_method))\n options = rpc.ProcessGroupRpcBackendOptions(\n num_send_recv_threads=4, init_method=str_init_method, rpc_timeout=60.0\n )\n rpc.init_rpc(\n WORKER.format(worker_idx),\n backend=dist.rpc.BackendType.PROCESS_GROUP,\n rank=worker_idx,\n world_size=worker_num,\n rpc_backend_options=options,\n )\n # torch.distributed.rpc.init_rpc('worker', rank=self.global_rank, world_size=self.world_size)\n logging.info(\"_init_rpc_with_process_group finished.\")\n\n def _init_torch_rpc_tp(\n self,\n master_addr,\n master_port,\n worker_idx,\n worker_num,\n ):\n # https://github.com/pytorch/pytorch/issues/55615\n # [BC-Breaking][RFC] Retire ProcessGroup Backend for RPC #55615\n str_init_method = \"tcp://\" + str(master_addr) + \":10000\"\n logging.info(\"str_init_method = {}\".format(str_init_method))\n options = rpc.TensorPipeRpcBackendOptions(\n num_worker_threads=16,\n rpc_timeout=1800,\n init_method=str_init_method,\n _transports=[\"uv\"],\n )\n rpc.init_rpc(\n WORKER.format(worker_idx),\n backend=rpc.BackendType.TENSORPIPE,\n rank=worker_idx,\n world_size=worker_num,\n rpc_backend_options=options,\n )\n logging.info(\"_init_torch_rpc_tp finished.\")\n\n def send_message(self, msg: Message):\n receiver_id = msg.get_receiver_id()\n\n logging.info(\"sending message to {}\".format(receiver_id))\n\n # Should I wait?\n rpc.rpc_sync(\n WORKER.format(receiver_id),\n TRPCCOMMServicer.sendMessage,\n args=(self.process_id, msg),\n )\n\n logging.debug(\"sent\")\n\n def add_observer(self, observer: Observer):\n self._observers.append(observer)\n\n def remove_observer(self, observer: Observer):\n self._observers.remove(observer)\n\n def handle_receive_message(self):\n thread = threading.Thread(target=self.message_handling_subroutine)\n thread.start()\n\n def message_handling_subroutine(self):\n while self.is_running:\n if self.trpc_servicer.message_q.qsize() > 0:\n lock.acquire()\n msg = self.trpc_servicer.message_q.get()\n self.notify(msg)\n lock.release()\n return\n\n def stop_receive_message(self):\n rpc.shutdown()\n self.is_running = False\n\n def notify(self, message: Message):\n msg_type = message.get_type()\n for observer in self._observers:\n observer.receive_message(msg_type, message)\n\n\ndef run_worker(rank, world_size):\n r\"\"\"\n A wrapper function that initializes RPC, calls the function, and shuts down\n RPC.\n \"\"\"\n if rank == 1:\n com_manager_client = TRPCCommManager(\n \"./trpc_master_config.csv\", rank, world_size\n )\n start = time.time()\n tensor = torch.ones(1000, 1000)\n message = Message(type=\"test\", sender_id=rank, receiver_id=\"1\")\n message.add_params(\"THE_TENSOR\", tensor)\n TRPCCOMMServicer.sendMessage(\"worker0\", message)\n message_values = []\n message = Message(type=\"test\", sender_id=rank, receiver_id=\"1\")\n message2 = Message(type=\"test\", sender_id=rank, receiver_id=\"1\")\n message.add_params(\"THE_TENSOR\", tensor)\n for i in range(100):\n print(\"###############################\")\n print(\"Measuring for Single Message\")\n for size in [100, 1000, 10000]:\n\n # for size in [100, 1000]:\n print(f\"======= size = {size} =====\")\n tensor = torch.ones(size, size)\n start = time.time()\n TRPCCOMMServicer.sendMessageTest1(\"worker0\", message)\n end = time.time()\n duration = end - start\n message_values.append(duration)\n # print(f\"Message tensor size={size} duration={str(duration)}\", flush=True)\n\n print(\"###############################\")\n print(\"Measuring for Message with separate Tensor\")\n sinle_tensor_values = []\n start = time.time()\n for size in [100, 1000, 10000]:\n\n # for size in [100, 1000]:\n print(f\"======= size = {size} =====\")\n tensor = torch.ones(size, size)\n # message = Message(type=\"test\", sender_id=rank, receiver_id=\"1\")\n # message.add_params(\"THE_TENSOR\", tensor)\n start = time.time()\n TRPCCOMMServicer.sendMessageTest2(\n \"worker0\", message2.get_params(), tensor\n )\n end = time.time()\n duration = end - start\n # print(f\"Single tensor size={size} duration={str(duration)}\", flush=True)\n sinle_tensor_values.append(duration)\n\n print(\n \"mean message: \"\n + str(decimal.Decimal(sum(message_values) / len(message_values)))\n )\n print(\n \"mean single tensor: \"\n + str(decimal.Decimal(sum(sinle_tensor_values) / len(sinle_tensor_values)))\n )\n # ret = rpc.rpc_sync(\"worker1\", TRPCCOMMServicer., args=(torch.ones(2), torch.ones(2)))\n else:\n # parameter server does nothing\n com_manager_client = TRPCCommManager(\n \"./trpc_master_config.csv\", rank, world_size\n )\n\n rpc.shutdown()\n\n\nif __name__ == \"__main__\":\n world_size = 2\n # run_worker(0,1)\n mp.spawn(run_worker, args=(world_size,), nprocs=world_size, join=True)\n"
] | [
[
"torch.ones",
"torch.multiprocessing.spawn",
"torch.distributed.rpc.ProcessGroupRpcBackendOptions",
"torch.distributed.rpc.TensorPipeRpcBackendOptions",
"torch.distributed.rpc.shutdown"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
VasuGoel-zz/npro | [
"68c9756954a4d393e1068a65f6466d1eee1e0644",
"68c9756954a4d393e1068a65f6466d1eee1e0644"
] | [
"npro/npro/report/job_applicant_details/job_applicant_details.py",
"npro/npro/report/leads_pipeline_analysis_by_source/leads_pipeline_analysis_by_source.py"
] | [
"# Copyright (c) 2013, GreyCube Technologies and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe import _\nfrom frappe.utils import cint\nimport pandas as pd\nimport numpy as np\n\n\ndef execute(filters=None):\n return get_data(filters)\n\n\ndef get_data(filters):\n columns = get_columns(filters)\n\n data = frappe.db.sql(\n \"\"\"\n select \n tja.name applicant, tja.applicant_name, tja.source, \n tja.status, tjo.job_title, tjo.customer_cf,\n concat_ws(' - ', round(tja.lower_range), round(tja.upper_range)) salary_range,\n tja.applicant_total_experience_cf, tja.previous_company_cf\n from `tabJob Applicant` tja \n inner join `tabJob Opening` tjo on tjo.name = tja.job_title \n \"\"\",\n as_dict=True,\n )\n df1 = pd.DataFrame.from_records(data)\n df1.set_index(\"applicant\")\n\n social_media = frappe.db.sql(\n \"\"\"\n select \n tja.name applicant, tsmpu.social_media_platform, \n coalesce(tsmpu.profile_url,\"\") profile_url \n from `tabSocial Media Profile URL` tsmpu \n inner join `tabJob Applicant` tja on tja.name = tsmpu.parent ; \n \"\"\",\n as_dict=True,\n )\n\n if not social_media:\n return columns, df1.to_dict(orient=\"records\")\n\n df2 = pd.DataFrame.from_records(social_media)\n df2 = pd.pivot_table(\n df2,\n index=[\"applicant\"],\n columns=[\"social_media_platform\"],\n values=[\"profile_url\"],\n aggfunc=\"first\",\n fill_value=\"\",\n )\n df2.columns = [\n frappe.scrub(d.replace(\"profile_url_\", \"\")) for d in df2.columns.map(\"_\".join)\n ]\n df3 = pd.merge(df1, df2, how=\"left\", on=[\"applicant\"]).replace(\n np.nan, \"\", regex=True\n )\n\n social_media_columns = [\n dict(label=frappe.unscrub(d), fieldname=d, width=150) for d in df2.columns\n ]\n\n columns[5:5] = social_media_columns\n\n return columns, df3.to_dict(orient=\"records\")\n\n\ndef get_columns(filters):\n return [\n {\n \"label\": _(\"Applicant Name\"),\n \"fieldname\": \"applicant_name\",\n \"width\": 180,\n },\n {\n \"label\": _(\"Status\"),\n \"fieldname\": \"status\",\n \"width\": 120,\n },\n {\n \"label\": _(\"Customer\"),\n \"fieldname\": \"customer_cf\",\n \"width\": 220,\n },\n {\n \"label\": _(\"Source\"),\n \"fieldname\": \"source\",\n \"width\": 180,\n },\n {\n \"label\": _(\"Technology\"),\n \"fieldname\": \"job_title\",\n \"width\": 200,\n },\n {\n \"label\": _(\"City\"),\n \"fieldname\": \"city\",\n \"width\": 200,\n },\n {\n \"label\": _(\"Total Experience\"),\n \"fieldname\": \"applicant_total_experience_cf\",\n \"fieldtype\": \"Int\",\n \"width\": 120,\n },\n {\n \"label\": _(\"Previous Company\"),\n \"fieldname\": \"previous_company_cf\",\n \"width\": 150,\n },\n {\n \"label\": _(\"Expected Salary Range\"),\n \"fieldname\": \"salary_range\",\n \"width\": 180,\n },\n ]\n\n\ndef get_conditions(filters):\n where_clause = []\n # if filters.get(\"from_date\"):\n # where_clause.append(\"op.transaction_date >= %(from_date)s\")\n\n return \" where \" + \" and \".join(where_clause) if where_clause else \"\"\n",
"# Copyright (c) 2013, GreyCube Technologies and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nimport pandas\n\n\ndef execute(filters=None):\n return get_data(filters)\n\n\ndef get_data(filters):\n columns = [dict(label=\"Source\", fieldname=\"source\", fieldtype=\"Data\", width=165)]\n\n data = frappe.db.sql(\n \"\"\"\n select\n coalesce(l.source,'Unknown') source, l.status, count(l.status) total_count\n from\n tabLead l\n {where_conditions}\n group by \n coalesce(l.source,'Unknown'), status\"\"\".format(\n where_conditions=get_conditions(filters)\n ),\n filters,\n as_dict=True,\n debug=True,\n )\n if not data:\n return columns, []\n\n df = pandas.DataFrame.from_records(data)\n df1 = pandas.pivot_table(\n df,\n index=[\"source\"],\n values=[\"total_count\"],\n columns=[\"status\"],\n aggfunc=sum,\n fill_value=0,\n margins=True,\n )\n\n df1.drop(index=\"All\", axis=0, inplace=True)\n df1.columns = [d for d in df1.columns.to_series().str[1]]\n df2 = df1.reset_index()\n \n # sorting grid columns\n sort_order = (\n frappe.db.get_single_value(\"NPro Settings\", \"lead_status_sort_order\") or \"\"\n )\n sort_order = sort_order.split(\",\")\n columns = columns + sorted([\n dict(label=frappe.unscrub(col), fieldname=col, fieldtype=\"Int\", width=95)\n for col in df1.columns],\n key=lambda x: sort_order.index(x) if x in sort_order\n else 100,\n )\n\n\n return columns, df2.to_dict(\"r\")\n\n\ndef get_conditions(filters):\n conditions = []\n if filters.get(\"from_date\"):\n conditions += [\"date(l.creation) >= %(from_date)s\"]\n if filters.get(\"to_date\"):\n conditions += [\"date(l.creation) <= %(to_date)s\"]\n\n return conditions and \" where \" + \" and \".join(conditions) or \"\"\n"
] | [
[
"pandas.DataFrame.from_records",
"pandas.merge",
"pandas.pivot_table"
],
[
"pandas.DataFrame.from_records",
"pandas.pivot_table"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
mikeireland/pfi | [
"3d3cb14a36d4d01c9d04fc10d655785e71872594"
] | [
"pfi/overlap.py"
] | [
"\"\"\"A Script to compute the effective overlap between a laser and a star Airy disk,\nwhen the starlight is dispersed, in a heterodyne laser frequency comb detection\nscheme.\"\"\"\n\nfrom __future__ import print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.special as sp\n\n#Start with a y(x) jinc function (i.e. an Airy disk) - plot the cross section.\nx = 0.01*((np.arange(1000)+0.5) - 500)\ny = 2*(sp.jn(1,x*np.pi)/(x*np.pi))\nplt.clf()\nplt.plot(x,y**2)\nplt.plot(x-2,y**2)\nplt.plot(x+2,y**2)\nplt.plot(x+0.667,y**2,'--')\nplt.plot(x-0.667,y**2,'--')\nplt.plot([-1,-1,1,1],[-1,1,1,-1], ':', linewidth=2)\nplt.axis((-2,2,-0.1,1))\nplt.xlabel('Dispersion Axis')\nplt.ylabel('Intensity')\n\n#Now for some calculations\nhw = 60 #Half-width\nx = np.arange(2*hw)-hw\nxy = np.meshgrid(x,x) #A 2D co-ordinate grid.\np = (xy[0]**2 + xy[1]**2) < (hw/12.0)**2 #A circle\nim = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(p))) #A 2D Fourier transform of a circle (i.e. an image)\nwin = np.zeros((2*hw,2*hw)) \nwin[hw-12:hw+12,hw-24:hw+24]=1 #A square window (i.e. a \"pixel\")\n\n#Compute the maximum throughput into a pixel\nprint(\"Max Star Throughput: {0:6.2f}\".format(np.sum(win*np.abs(im)**2)/np.sum(np.abs(im)**2)))\n\n#Compute the minimum throughput into a pixel, when the dispersion places the image 2/3 of the way\n#to the edge of the pixel, which we'll take as the limit of our electrical bandwidth.\nprint(\"Min Star Throughput: {0:6.2f}\".format(np.sum(np.abs(np.roll(im,8,axis=0))**2*win)/np.sum(np.abs(im)**2)))\n\n#Now compute the overlap between laser and starlight at this point - this is the real throughput\n#at the edge of the electrical BW.\noverlap = np.sum(np.real(np.roll(im,8,axis=0)*np.conj(im)*win))/np.sum(win*np.abs(im)**2)\nprint(\"Min Star Overlap: {0:6.2f}\".format(overlap))\nprint(\"Effective min Throughput: {0:6.2f}\".format(overlap**2)) "
] | [
[
"numpy.abs",
"numpy.conj",
"numpy.arange",
"scipy.special.jn",
"numpy.fft.fftshift",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"numpy.meshgrid",
"numpy.zeros",
"numpy.roll",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Pearl-UTexas/tensorpack | [
"3ef33a341861769e66995e1630949113404cdd0c",
"3ef33a341861769e66995e1630949113404cdd0c"
] | [
"examples/CaffeModels/load-alexnet.py",
"examples/GAN/DCGAN.py"
] | [
"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# File: load-alexnet.py\n# Author: Yuxin Wu <[email protected]>\n\nfrom __future__ import print_function\nimport numpy as np\nimport os\nimport cv2\nimport argparse\n\nfrom tensorpack import *\nfrom tensorpack.tfutils.symbolic_functions import *\nfrom tensorpack.tfutils.summary import *\nfrom tensorpack.dataflow.dataset import ILSVRCMeta\nimport tensorflow as tf\n\n\ndef tower_func(image):\n # img: 227x227x3\n with argscope([Conv2D, FullyConnected], activation=tf.nn.relu):\n l = Conv2D('conv1', image, filters=96, kernel_size=11, strides=4, padding='VALID')\n l = tf.nn.lrn(l, 2, bias=1.0, alpha=2e-5, beta=0.75, name='norm1')\n l = MaxPooling('pool1', l, 3, strides=2, padding='VALID')\n\n l = Conv2D('conv2', l, filters=256, kernel_size=5, split=2)\n l = tf.nn.lrn(l, 2, bias=1.0, alpha=2e-5, beta=0.75, name='norm2')\n l = MaxPooling('pool2', l, 3, strides=2, padding='VALID')\n\n l = Conv2D('conv3', l, filters=384, kernel_size=3)\n l = Conv2D('conv4', l, filters=384, kernel_size=3, split=2)\n l = Conv2D('conv5', l, filters=256, kernel_size=3, split=2)\n l = MaxPooling('pool3', l, 3, strides=2, padding='VALID')\n\n # This is just a script to load model, so we ignore the dropout layer\n l = FullyConnected('fc6', l, 4096)\n l = FullyConnected('fc7', l, 4096)\n logits = FullyConnected('fc8', l, 1000)\n tf.nn.softmax(logits, name='prob')\n\n\ndef run_test(path, input):\n param_dict = dict(np.load(path))\n predictor = OfflinePredictor(PredictConfig(\n inputs_desc=[InputDesc(tf.float32, (None, 227, 227, 3), 'input')],\n tower_func=tower_func,\n session_init=DictRestore(param_dict),\n input_names=['input'],\n output_names=['prob']\n ))\n\n im = cv2.imread(input)\n assert im is not None, input\n im = cv2.resize(im, (227, 227))[None, :, :, ::-1].astype('float32') - 110\n outputs = predictor(im)[0]\n prob = outputs[0]\n ret = prob.argsort()[-10:][::-1]\n print(\"Top10 predictions:\", ret)\n\n meta = ILSVRCMeta().get_synset_words_1000()\n print(\"Top10 class names:\", [meta[k] for k in ret])\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')\n parser.add_argument('--load', required=True,\n help='.npz model file generated by tensorpack.utils.loadcaffe')\n parser.add_argument('--input', help='an input image', required=True)\n args = parser.parse_args()\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n # run alexnet with given model (in npz format)\n run_test(args.load, args.input)\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: DCGAN.py\n# Author: Yuxin Wu <[email protected]>\n\nimport glob\nimport numpy as np\nimport os\nimport argparse\n\n\nfrom tensorpack import *\nfrom tensorpack.utils.viz import stack_patches\nfrom tensorpack.tfutils.scope_utils import auto_reuse_variable_scope\nimport tensorflow as tf\n\nfrom GAN import GANTrainer, RandomZData, GANModelDesc\n\n\"\"\"\n1. Download the 'aligned&cropped' version of CelebA dataset\n from http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html\n\n2. Start training:\n ./DCGAN-CelebA.py --data /path/to/img_align_celeba/ --crop-size 140\n Generated samples will be available through tensorboard\n\n3. Visualize samples with an existing model:\n ./DCGAN-CelebA.py --load path/to/model --sample\n\nYou can also train on other images (just use any directory of jpg files in\n`--data`). But you may need to change the preprocessing.\n\nA pretrained model on CelebA is at http://models.tensorpack.com/GAN/\n\"\"\"\n\n\nclass Model(GANModelDesc):\n def __init__(self, shape, batch, z_dim):\n self.shape = shape\n self.batch = batch\n self.zdim = z_dim\n\n def inputs(self):\n return [tf.placeholder(tf.float32, (None, self.shape, self.shape, 3), 'input')]\n\n def generator(self, z):\n \"\"\" return an image generated from z\"\"\"\n nf = 64\n l = FullyConnected('fc0', z, nf * 8 * 4 * 4, activation=tf.identity)\n l = tf.reshape(l, [-1, 4, 4, nf * 8])\n l = BNReLU(l)\n with argscope(Conv2DTranspose, activation=BNReLU, kernel_size=4, strides=2):\n l = Conv2DTranspose('deconv1', l, nf * 4)\n l = Conv2DTranspose('deconv2', l, nf * 2)\n l = Conv2DTranspose('deconv3', l, nf)\n l = Conv2DTranspose('deconv4', l, 3, activation=tf.identity)\n l = tf.tanh(l, name='gen')\n return l\n\n @auto_reuse_variable_scope\n def discriminator(self, imgs):\n \"\"\" return a (b, 1) logits\"\"\"\n nf = 64\n with argscope(Conv2D, kernel_size=4, strides=2):\n l = (LinearWrap(imgs)\n .Conv2D('conv0', nf, activation=tf.nn.leaky_relu)\n .Conv2D('conv1', nf * 2)\n .BatchNorm('bn1')\n .tf.nn.leaky_relu()\n .Conv2D('conv2', nf * 4)\n .BatchNorm('bn2')\n .tf.nn.leaky_relu()\n .Conv2D('conv3', nf * 8)\n .BatchNorm('bn3')\n .tf.nn.leaky_relu()\n .FullyConnected('fct', 1)())\n return l\n\n def build_graph(self, image_pos):\n image_pos = image_pos / 128.0 - 1\n\n z = tf.random_uniform([self.batch, self.zdim], -1, 1, name='z_train')\n z = tf.placeholder_with_default(z, [None, self.zdim], name='z')\n\n with argscope([Conv2D, Conv2DTranspose, FullyConnected],\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)):\n with tf.variable_scope('gen'):\n image_gen = self.generator(z)\n tf.summary.image('generated-samples', image_gen, max_outputs=30)\n with tf.variable_scope('discrim'):\n vecpos = self.discriminator(image_pos)\n vecneg = self.discriminator(image_gen)\n\n self.build_losses(vecpos, vecneg)\n self.collect_variables()\n\n def optimizer(self):\n lr = tf.get_variable('learning_rate', initializer=2e-4, trainable=False)\n return tf.train.AdamOptimizer(lr, beta1=0.5, epsilon=1e-3)\n\n\ndef get_augmentors():\n augs = []\n if args.load_size:\n augs.append(imgaug.Resize(args.load_size))\n if args.crop_size:\n augs.append(imgaug.CenterCrop(args.crop_size))\n augs.append(imgaug.Resize(args.final_size))\n return augs\n\n\ndef get_data():\n assert args.data\n imgs = glob.glob(args.data + '/*.jpg')\n ds = ImageFromFile(imgs, channel=3, shuffle=True)\n ds = AugmentImageComponent(ds, get_augmentors())\n ds = BatchData(ds, args.batch)\n ds = PrefetchDataZMQ(ds, 5)\n return ds\n\n\ndef sample(model, model_path, output_name='gen/gen'):\n pred = PredictConfig(\n session_init=get_model_loader(model_path),\n model=model,\n input_names=['z'],\n output_names=[output_name, 'z'])\n pred = SimpleDatasetPredictor(pred, RandomZData((100, args.z_dim)))\n for o in pred.get_result():\n o = o[0] + 1\n o = o * 128.0\n o = np.clip(o, 0, 255)\n o = o[:, :, :, ::-1]\n stack_patches(o, nr_row=10, nr_col=10, viz=True)\n\n\ndef get_args(default_batch=128, default_z_dim=100):\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')\n parser.add_argument('--load', help='load model')\n parser.add_argument('--sample', action='store_true', help='view generated examples')\n parser.add_argument('--data', help='a jpeg directory')\n parser.add_argument('--load-size', help='size to load the original images', type=int)\n parser.add_argument('--crop-size', help='crop the original images', type=int)\n parser.add_argument(\n '--final-size', default=64, type=int,\n help='resize to this shape as inputs to network')\n parser.add_argument('--z-dim', help='hidden dimension', type=int, default=default_z_dim)\n parser.add_argument('--batch', help='batch size', type=int, default=default_batch)\n global args\n args = parser.parse_args()\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n return args\n\n\nif __name__ == '__main__':\n args = get_args()\n M = Model(shape=args.final_size, batch=args.batch, z_dim=args.z_dim)\n if args.sample:\n sample(M, args.load)\n else:\n logger.auto_set_dir()\n GANTrainer(\n input=QueueInput(get_data()),\n model=M).train_with_defaults(\n callbacks=[ModelSaver()],\n steps_per_epoch=300,\n max_epoch=200,\n session_init=SaverRestore(args.load) if args.load else None\n )\n"
] | [
[
"numpy.load",
"tensorflow.nn.softmax",
"tensorflow.nn.lrn"
],
[
"tensorflow.get_variable",
"numpy.clip",
"tensorflow.summary.image",
"tensorflow.placeholder_with_default",
"tensorflow.reshape",
"tensorflow.placeholder",
"tensorflow.truncated_normal_initializer",
"tensorflow.tanh",
"tensorflow.train.AdamOptimizer",
"tensorflow.variable_scope",
"tensorflow.random_uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
xuanqing94/FLOATER | [
"d788b5e3516f2c2cad351a9464a2436d1df8ab63"
] | [
"fairseq/modules/multihead_attention.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch import nn\nfrom torch.nn import Parameter\nimport torch.nn.functional as F\n\nfrom fairseq import utils\n\n\nclass MultiheadAttention(nn.Module):\n \"\"\"Multi-headed attention.\n\n See \"Attention Is All You Need\" for more details.\n \"\"\"\n\n def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True,\n add_bias_kv=False, add_zero_attn=False, self_attention=False,\n encoder_decoder_attention=False):\n super().__init__()\n self.embed_dim = embed_dim\n self.kdim = kdim if kdim is not None else embed_dim\n self.vdim = vdim if vdim is not None else embed_dim\n self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim\n\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n assert self.head_dim * num_heads == self.embed_dim, \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim ** -0.5\n\n self.self_attention = self_attention\n self.encoder_decoder_attention = encoder_decoder_attention\n\n assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and ' \\\n 'value to be of the same size'\n\n if self.qkv_same_dim:\n self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))\n else:\n self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))\n self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))\n self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))\n\n if bias:\n self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))\n else:\n self.register_parameter('in_proj_bias', None)\n\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n if add_bias_kv:\n self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))\n self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))\n else:\n self.bias_k = self.bias_v = None\n\n self.add_zero_attn = add_zero_attn\n\n self.reset_parameters()\n\n self.onnx_trace = False\n\n self.enable_torch_version = False\n if hasattr(F, \"multi_head_attention_forward\"):\n self.enable_torch_version = True\n else:\n self.enable_torch_version = False\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def reset_parameters(self):\n if self.qkv_same_dim:\n nn.init.xavier_uniform_(self.in_proj_weight)\n else:\n nn.init.xavier_uniform_(self.k_proj_weight)\n nn.init.xavier_uniform_(self.v_proj_weight)\n nn.init.xavier_uniform_(self.q_proj_weight)\n\n nn.init.xavier_uniform_(self.out_proj.weight)\n if self.in_proj_bias is not None:\n nn.init.constant_(self.in_proj_bias, 0.)\n nn.init.constant_(self.out_proj.bias, 0.)\n if self.bias_k is not None:\n nn.init.xavier_normal_(self.bias_k)\n if self.bias_v is not None:\n nn.init.xavier_normal_(self.bias_v)\n\n def forward(\n self,\n query, key, value,\n key_padding_mask=None,\n incremental_state=None,\n need_weights=True,\n static_kv=False,\n attn_mask=None,\n before_softmax=False,\n need_head_weights=False,\n ):\n \"\"\"Input shape: Time x Batch x Channel\n\n Args:\n key_padding_mask (ByteTensor, optional): mask to exclude\n keys that are pads, of shape `(batch, src_len)`, where\n padding elements are indicated by 1s.\n need_weights (bool, optional): return the attention weights,\n averaged over heads (default: False).\n attn_mask (ByteTensor, optional): typically used to\n implement causal attention, where the mask prevents the\n attention from looking forward in time (default: None).\n before_softmax (bool, optional): return the raw attention\n weights and values before the attention softmax.\n need_head_weights (bool, optional): return the attention\n weights for each head. Implies *need_weights*. Default:\n return the average attention weights over all heads.\n \"\"\"\n if need_head_weights:\n need_weights = True\n\n tgt_len, bsz, embed_dim = query.size()\n assert embed_dim == self.embed_dim\n assert list(query.size()) == [tgt_len, bsz, embed_dim]\n\n if self.enable_torch_version and not self.onnx_trace and incremental_state is None and not static_kv:\n if self.qkv_same_dim:\n return F.multi_head_attention_forward(query, key, value,\n self.embed_dim, self.num_heads,\n self.in_proj_weight,\n self.in_proj_bias, self.bias_k, self.bias_v,\n self.add_zero_attn, self.dropout,\n self.out_proj.weight, self.out_proj.bias,\n self.training, key_padding_mask, need_weights,\n attn_mask)\n else:\n return F.multi_head_attention_forward(query, key, value,\n self.embed_dim, self.num_heads,\n torch.empty([0]),\n self.in_proj_bias, self.bias_k, self.bias_v,\n self.add_zero_attn, self.dropout,\n self.out_proj.weight, self.out_proj.bias,\n self.training, key_padding_mask, need_weights,\n attn_mask, use_separate_proj_weight=True,\n q_proj_weight=self.q_proj_weight,\n k_proj_weight=self.k_proj_weight,\n v_proj_weight=self.v_proj_weight)\n\n if incremental_state is not None:\n saved_state = self._get_input_buffer(incremental_state)\n if 'prev_key' in saved_state:\n # previous time steps are cached - no need to recompute\n # key and value if they are static\n if static_kv:\n assert self.encoder_decoder_attention and not self.self_attention\n key = value = None\n else:\n saved_state = None\n\n if self.self_attention:\n # self-attention\n q, k, v = self.in_proj_qkv(query)\n elif self.encoder_decoder_attention:\n # encoder-decoder attention\n q = self.in_proj_q(query)\n if key is None:\n assert value is None\n k = v = None\n else:\n k = self.in_proj_k(key)\n v = self.in_proj_v(key)\n\n else:\n q = self.in_proj_q(query)\n k = self.in_proj_k(key)\n v = self.in_proj_v(value)\n q *= self.scaling\n if self.bias_k is not None:\n assert self.bias_v is not None\n k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])\n v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])\n if attn_mask is not None:\n attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)\n\n q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n if k is not None:\n k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n if v is not None:\n v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n\n if saved_state is not None:\n # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)\n if 'prev_key' in saved_state:\n prev_key = saved_state['prev_key'].view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n k = prev_key\n else:\n k = torch.cat((prev_key, k), dim=1)\n if 'prev_value' in saved_state:\n prev_value = saved_state['prev_value'].view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n v = prev_value\n else:\n v = torch.cat((prev_value, v), dim=1)\n if 'prev_key_padding_mask' in saved_state and saved_state['prev_key_padding_mask'] is not None:\n prev_key_padding_mask = saved_state['prev_key_padding_mask']\n if static_kv:\n key_padding_mask = prev_key_padding_mask\n else:\n key_padding_mask = torch.cat((prev_key_padding_mask, key_padding_mask), dim=1)\n saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.head_dim)\n saved_state['prev_value'] = v.view(bsz, self.num_heads, -1, self.head_dim)\n saved_state['prev_key_padding_mask'] = key_padding_mask\n\n self._set_input_buffer(incremental_state, saved_state)\n\n src_len = k.size(1)\n\n # This is part of a workaround to get around fork/join parallelism\n # not supporting Optional types.\n if key_padding_mask is not None and key_padding_mask.shape == torch.Size([]):\n key_padding_mask = None\n\n if key_padding_mask is not None:\n assert key_padding_mask.size(0) == bsz\n assert key_padding_mask.size(1) == src_len\n\n if self.add_zero_attn:\n src_len += 1\n k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)\n v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)\n if attn_mask is not None:\n attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1)\n\n attn_weights = torch.bmm(q, k.transpose(1, 2))\n attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)\n\n assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]\n\n if attn_mask is not None:\n attn_mask = attn_mask.unsqueeze(0)\n if self.onnx_trace:\n attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)\n attn_weights += attn_mask\n\n if key_padding_mask is not None:\n # don't attend to padding symbols\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights.masked_fill(\n key_padding_mask.unsqueeze(1).unsqueeze(2),\n float('-inf'),\n )\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n if before_softmax:\n return attn_weights, v\n\n attn_weights_float = utils.softmax(attn_weights, dim=-1, onnx_trace=self.onnx_trace)\n attn_weights = attn_weights_float.type_as(attn_weights)\n attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training)\n\n attn = torch.bmm(attn_probs, v)\n assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]\n if (self.onnx_trace and attn.size(1) == 1):\n # when ONNX tracing a single decoder step (sequence length == 1)\n # the transpose is a no-op copy before view, thus unnecessary\n attn = attn.contiguous().view(tgt_len, bsz, embed_dim)\n else:\n attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)\n attn = self.out_proj(attn)\n\n if need_weights:\n attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)\n if not need_head_weights:\n # average attention weights over heads\n attn_weights = attn_weights.mean(dim=0)\n else:\n attn_weights = None\n\n return attn, attn_weights\n\n def in_proj_qkv(self, query):\n return self._in_proj(query).chunk(3, dim=-1)\n\n def in_proj_q(self, query):\n if self.qkv_same_dim:\n return self._in_proj(query, end=self.embed_dim)\n else:\n bias = self.in_proj_bias\n if bias is not None:\n bias = bias[:self.embed_dim]\n return F.linear(query, self.q_proj_weight, bias)\n\n def in_proj_k(self, key):\n if self.qkv_same_dim:\n return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)\n else:\n weight = self.k_proj_weight\n bias = self.in_proj_bias\n if bias is not None:\n bias = bias[self.embed_dim:2 * self.embed_dim]\n return F.linear(key, weight, bias)\n\n def in_proj_v(self, value):\n if self.qkv_same_dim:\n return self._in_proj(value, start=2 * self.embed_dim)\n else:\n weight = self.v_proj_weight\n bias = self.in_proj_bias\n if bias is not None:\n bias = bias[2 * self.embed_dim:]\n return F.linear(value, weight, bias)\n\n def _in_proj(self, input, start=0, end=None):\n weight = self.in_proj_weight\n bias = self.in_proj_bias\n weight = weight[start:end, :]\n if bias is not None:\n bias = bias[start:end]\n return F.linear(input, weight, bias)\n\n def reorder_incremental_state(self, incremental_state, new_order):\n \"\"\"Reorder buffered internal state (for incremental generation).\"\"\"\n input_buffer = self._get_input_buffer(incremental_state)\n if input_buffer is not None:\n for k in input_buffer.keys():\n if input_buffer[k] is not None:\n input_buffer[k] = input_buffer[k].index_select(0, new_order)\n self._set_input_buffer(incremental_state, input_buffer)\n\n def _get_input_buffer(self, incremental_state):\n return utils.get_incremental_state(\n self,\n incremental_state,\n 'attn_state',\n ) or {}\n\n def _set_input_buffer(self, incremental_state, buffer):\n utils.set_incremental_state(\n self,\n incremental_state,\n 'attn_state',\n buffer,\n )\n\n def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):\n return attn_weights\n"
] | [
[
"torch.Size",
"torch.empty",
"torch.Tensor",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.init.xavier_normal_",
"torch.nn.functional.multi_head_attention_forward",
"torch.nn.Linear",
"torch.bmm",
"torch.nn.init.xavier_uniform_",
"torch.nn.functional.linear"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qipengwang/TransPose | [
"2ca260768f3b0afdb92c7a0425c3c28e9cdd379d"
] | [
"lib/models/swin_transpose.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.utils.checkpoint as checkpoint\nfrom timm.models.layers import DropPath, to_2tuple, trunc_normal_\n\nimport logging\nimport math\nimport os\n\nfrom .mobilenet import MobileNetV1, MobileNetV2, MobileNetV3_Large, MobileNetV3_Small\nfrom .googlenet import GoogLeNet\nfrom .xception import Xception\nfrom .inceptionv3 import InceptionV3\nfrom .shufflenetv2 import ShuffleNetV2\nfrom .squeezenet import SqueezeNet\nfrom .vgg import *\nfrom .linear import LinearProjection\nfrom .resnet import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152\n\n\nBN_MOMENTUM = 0.1\nlogger = logging.getLogger(__name__)\n\n\nclass Mlp(nn.Module):\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\n\ndef window_partition(x, window_size):\n \"\"\"\n Args:\n x: (B, H, W, C)\n window_size (int): window size\n Returns:\n windows: (num_windows*B, window_size, window_size, C)\n \"\"\"\n B, H, W, C = x.shape\n x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)\n windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\n return windows\n\n\ndef window_reverse(windows, window_size, H, W):\n \"\"\"\n Args:\n windows: (num_windows*B, window_size, window_size, C)\n window_size (int): Window size\n H (int): Height of image\n W (int): Width of image\n Returns:\n x: (B, H, W, C)\n \"\"\"\n B = int(windows.shape[0] / (H * W / window_size / window_size))\n x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)\n x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)\n return x\n\n\nclass WindowAttention(nn.Module):\n r\"\"\" Window based multi-head self attention (W-MSA) module with relative position bias.\n It supports both of shifted and non-shifted window.\n Args:\n dim (int): Number of input channels.\n window_size (tuple[int]): The height and width of the window.\n num_heads (int): Number of attention heads.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set\n attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0\n proj_drop (float, optional): Dropout ratio of output. Default: 0.0\n \"\"\"\n\n def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):\n\n super().__init__()\n self.dim = dim\n self.window_size = window_size # Wh, Ww\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = qk_scale or head_dim ** -0.5\n\n # define a parameter table of relative position bias\n self.relative_position_bias_table = nn.Parameter(\n torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH\n\n # get pair-wise relative position index for each token inside the window\n coords_h = torch.arange(self.window_size[0])\n coords_w = torch.arange(self.window_size[1])\n coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww\n coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww\n relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww\n relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2\n relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0\n relative_coords[:, :, 1] += self.window_size[1] - 1\n relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1\n relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww\n self.register_buffer(\"relative_position_index\", relative_position_index)\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n trunc_normal_(self.relative_position_bias_table, std=.02)\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self, x, mask=None):\n \"\"\"\n Args:\n x: input features with shape of (num_windows*B, N, C)\n mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None\n \"\"\"\n B_, N, C = x.shape\n # print(x.shape)\n qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)\n\n q = q * self.scale\n attn = (q @ k.transpose(-2, -1))\n\n relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(\n self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH\n relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww\n attn = attn + relative_position_bias.unsqueeze(0)\n\n if mask is not None:\n nW = mask.shape[0]\n attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)\n attn = attn.view(-1, self.num_heads, N, N)\n attn = self.softmax(attn)\n else:\n attn = self.softmax(attn)\n\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B_, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n def extra_repr(self) -> str:\n return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'\n\n def flops(self, N):\n # calculate flops for 1 window with token length of N\n flops = 0\n # qkv = self.qkv(x)\n flops += N * self.dim * 3 * self.dim\n # attn = (q @ k.transpose(-2, -1))\n flops += self.num_heads * N * (self.dim // self.num_heads) * N\n # x = (attn @ v)\n flops += self.num_heads * N * N * (self.dim // self.num_heads)\n # x = self.proj(x)\n flops += N * self.dim * self.dim\n return flops\n\n\nclass SwinTransformerBlock(nn.Module):\n r\"\"\" Swin Transformer Block.\n Args:\n dim (int): Number of input channels.\n input_resolution (tuple[int]): Input resulotion.\n num_heads (int): Number of attention heads.\n window_size (int): Window size.\n shift_size (int): Shift size for SW-MSA.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.\n drop (float, optional): Dropout rate. Default: 0.0\n attn_drop (float, optional): Attention dropout rate. Default: 0.0\n drop_path (float, optional): Stochastic depth rate. Default: 0.0\n act_layer (nn.Module, optional): Activation layer. Default: nn.GELU\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n \"\"\"\n\n def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,\n mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,\n act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n super().__init__()\n # print(f'in SwinTransformerBlock dim = {dim}')\n self.dim = dim\n self.input_resolution = input_resolution\n self.num_heads = num_heads\n self.window_size = window_size\n self.shift_size = shift_size\n self.mlp_ratio = mlp_ratio\n if min(self.input_resolution) <= self.window_size:\n # if window size is larger than input resolution, we don't partition windows\n self.shift_size = 0\n self.window_size = min(self.input_resolution)\n assert 0 <= self.shift_size < self.window_size, \"shift_size must in 0-window_size\"\n\n self.norm1 = norm_layer(dim)\n self.attn = WindowAttention(\n dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,\n qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)\n\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n if self.shift_size > 0:\n # calculate attention mask for SW-MSA\n H, W = self.input_resolution\n img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1\n h_slices = (slice(0, -self.window_size),\n slice(-self.window_size, -self.shift_size),\n slice(-self.shift_size, None))\n w_slices = (slice(0, -self.window_size),\n slice(-self.window_size, -self.shift_size),\n slice(-self.shift_size, None))\n cnt = 0\n for h in h_slices:\n for w in w_slices:\n img_mask[:, h, w, :] = cnt\n cnt += 1\n\n mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1\n mask_windows = mask_windows.view(-1, self.window_size * self.window_size)\n attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))\n else:\n attn_mask = None\n\n self.register_buffer(\"attn_mask\", attn_mask)\n\n def forward(self, x):\n # print(f'forward in SwinTransformerBlock input_shape {x.size()}')\n H, W = self.input_resolution\n\n B, L, C = x.shape\n assert L == H * W, \"input feature has wrong size\"\n\n shortcut = x\n x = self.norm1(x)\n x = x.view(B, H, W, C)\n\n # cyclic shift\n if self.shift_size > 0:\n shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))\n else:\n shifted_x = x\n\n # partition windows\n x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C\n x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C\n\n # W-MSA/SW-MSA\n attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C\n\n # merge windows\n attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)\n shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C\n\n # reverse cyclic shift\n if self.shift_size > 0:\n x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))\n else:\n x = shifted_x\n x = x.view(B, H * W, C)\n\n # FFN\n x = shortcut + self.drop_path(x)\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n # print(f'forward in SwinTransformerBlock return {x.shape}')\n return x\n\n def extra_repr(self) -> str:\n return f\"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, \" \\\n f\"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}\"\n\n def flops(self):\n flops = 0\n H, W = self.input_resolution\n # norm1\n flops += self.dim * H * W\n # W-MSA/SW-MSA\n nW = H * W / self.window_size / self.window_size\n flops += nW * self.attn.flops(self.window_size * self.window_size)\n # mlp\n flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio\n # norm2\n flops += self.dim * H * W\n return flops\n\n\nclass PatchMerging(nn.Module):\n r\"\"\" Patch Merging Layer.\n Args:\n input_resolution (tuple[int]): Resolution of input feature.\n dim (int): Number of input channels.\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n \"\"\"\n\n def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):\n super().__init__()\n self.input_resolution = input_resolution\n self.dim = dim\n self.norm = norm_layer(4 * dim)\n self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)\n\n def forward(self, x):\n \"\"\"\n x: B, H*W, C\n \"\"\"\n H, W = self.input_resolution\n B, L, C = x.shape\n assert L == H * W, \"input feature has wrong size\"\n assert H % 2 == 0 and W % 2 == 0, f\"x size ({H}*{W}) are not even.\"\n\n x = x.view(B, H, W, C)\n\n x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C\n x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C\n x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C\n x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C\n x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C\n x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C\n\n x = self.norm(x)\n x = self.reduction(x)\n\n return x\n\n def extra_repr(self) -> str:\n return f\"input_resolution={self.input_resolution}, dim={self.dim}\"\n\n def flops(self):\n H, W = self.input_resolution\n flops = H * W * self.dim\n flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim\n return flops\n\n\nclass BasicLayer(nn.Module):\n \"\"\" A basic Swin Transformer layer for one stage.\n Args:\n dim (int): Number of input channels.\n input_resolution (tuple[int]): Input resolution.\n depth (int): Number of blocks.\n num_heads (int): Number of attention heads.\n window_size (int): Local window size.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.\n drop (float, optional): Dropout rate. Default: 0.0\n attn_drop (float, optional): Attention dropout rate. Default: 0.0\n drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.\n \"\"\"\n\n def __init__(self, dim, input_resolution, depth, num_heads, window_size,\n mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):\n\n super().__init__()\n # print(f'BasicLayer dim = {dim}')\n self.dim = dim\n self.input_resolution = input_resolution\n self.depth = depth\n self.use_checkpoint = use_checkpoint\n\n # build blocks\n self.blocks = nn.ModuleList([\n SwinTransformerBlock(dim=dim, input_resolution=input_resolution,\n num_heads=num_heads, window_size=window_size,\n shift_size=0 if (i % 2 == 0) else window_size // 2,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop, attn_drop=attn_drop,\n drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,\n norm_layer=norm_layer)\n for i in range(depth)])\n\n # patch merging layer\n if downsample is not None:\n self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)\n else:\n self.downsample = None\n\n def forward(self, x):\n # print(f'forward in BasicLayer {x.shape}')\n for blk in self.blocks:\n if self.use_checkpoint:\n x = checkpoint.checkpoint(blk, x)\n else:\n x = blk(x)\n # print(f'basic layer: {x.shape}')\n # print(f'basic layer: {x.shape}')\n if self.downsample is not None:\n x = self.downsample(x)\n # print(f'basic layer after downsample: {x.shape}')\n \n # print(f'forward in BasicLayer return {x.shape}')\n return x\n\n def extra_repr(self) -> str:\n return f\"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}\"\n\n def flops(self):\n flops = 0\n for blk in self.blocks:\n flops += blk.flops()\n if self.downsample is not None:\n flops += self.downsample.flops()\n return flops\n\n\nclass SwinTransformer(nn.Module):\n r\"\"\" Swin Transformer\n A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -\n https://arxiv.org/pdf/2103.14030\n Args:\n img_size (int | tuple(int)): Input image size. Default 224\n patch_size (int | tuple(int)): Patch size. Default: 4\n in_chans (int): Number of input image channels. Default: 3\n num_classes (int): Number of classes for classification head. Default: 1000\n embed_dim (int): Patch embedding dimension. Default: 96\n depths (tuple(int)): Depth of each Swin Transformer layer.\n num_heads (tuple(int)): Number of attention heads in different layers.\n window_size (int): Window size. Default: 7\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4\n qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None\n drop_rate (float): Dropout rate. Default: 0\n attn_drop_rate (float): Attention dropout rate. Default: 0\n drop_path_rate (float): Stochastic depth rate. Default: 0.1\n norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.\n ape (bool): If True, add absolute position embedding to the patch embedding. Default: False\n patch_norm (bool): If True, add normalization after patch embedding. Default: True\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False\n \"\"\"\n\n def __init__(self, input_shape=(32, 24), input_channel=256, depths=[2, 2, 6, 2], num_heads=[2, 4, 8, 16],\n window_size=1, mlp_ratio=4., qkv_bias=True, qk_scale=None,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,\n norm_layer=nn.LayerNorm, ape=False, patch_norm=True,\n use_checkpoint=False, **kwargs):\n super().__init__()\n\n # self.num_classes = num_classes\n self.num_layers = len(depths)\n self.embed_dim = input_channel\n self.ape = ape\n self.patch_norm = patch_norm\n self.num_features = int(input_channel * 2 ** (self.num_layers - 1))\n self.mlp_ratio = mlp_ratio\n\n # split image into non-overlapping patches\n # self.patch_embed = PatchEmbed(\n # img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,\n # norm_layer=norm_layer if self.patch_norm else None)\n # num_patches = self.patch_embed.num_patches\n # patches_resolution = self.patch_embed.patches_resolution\n self.patches_resolution = to_2tuple(input_shape)\n\n # absolute position embedding\n # if self.ape:\n # self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))\n # trunc_normal_(self.absolute_pos_embed, std=.02)\n\n # self.pos_drop = nn.Dropout(p=drop_rate)\n\n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n\n # build layers\n self.layers = nn.ModuleList()\n for i_layer in range(self.num_layers):\n layer = BasicLayer(dim=int(input_channel * 2 ** i_layer),\n input_resolution=(self.patches_resolution[0] // (2 ** i_layer), self.patches_resolution[1] // (2 ** i_layer)),\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n window_size=window_size,\n mlp_ratio=self.mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate,\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n norm_layer=norm_layer,\n downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,\n use_checkpoint=use_checkpoint)\n self.layers.append(layer)\n self.layers = nn.Sequential(*self.layers)\n self.norm = norm_layer(self.num_features)\n self.expension = nn.Linear(\n input_channel * 2 ** (self.num_layers-1), \n input_channel * 4 ** (self.num_layers-1) , \n bias=False)\n # self.avgpool = nn.AdaptiveAvgPool1d(1)\n # self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n\n # self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'absolute_pos_embed'}\n\n @torch.jit.ignore\n def no_weight_decay_keywords(self):\n return {'relative_position_bias_table'}\n\n def forward(self, x: torch.Tensor):\n # x = self.patch_embed(x)\n \n # if self.ape:\n # x = x + self.absolute_pos_embed\n # x = self.pos_drop(x)\n bs, c, h, w = x.shape\n x = x.flatten(2).transpose(1, 2) # B L C\n x = self.layers(x)\n x = self.norm(x) # B L C\n x = self.expension(x)\n # print('stf:', x.size())\n # return x\n for i in range(self.num_layers-1, 0, -1):\n B, L, C = x.shape\n H, W = self.patches_resolution[0] // 2**i, self.patches_resolution[1] // 2**i\n if i > 1:\n x = x.reshape([B, H, W, 2, 2, C//4]).permute([0, 1, 4, 2, 3, 5]).reshape([B, H*2 * W*2, C//4])\n else:\n x = x.reshape([B, H, W, 2, 2, C//4]).permute([0, 5, 1, 4, 2, 3]).reshape([B, C//4, H*2, W*2])\n # x = self.avgpool(x.transpose(1, 2)) # B C 1\n # x = torch.flatten(x, 1)\n return x\n\n def flops(self):\n flops = 0\n # flops += self.patch_embed.flops()\n for i, layer in enumerate(self.layers):\n flops += layer.flops()\n flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)\n # flops += self.num_features * self.num_classes\n return flops\n\n\nclass SwinTranspose(nn.Module):\n\n def __init__(self, basenet, num_features, cfg, **kwargs):\n # print('create SwinTranspose')\n extra = cfg.MODEL.EXTRA\n self.deconv_with_bias = extra.DECONV_WITH_BIAS\n\n super(SwinTranspose, self).__init__()\n # base net\n model = basenet()\n self.features = model.features[:num_features]\n # print(self.features[-1])\n self.features = nn.Sequential(*self.features)\n num_out = model.features_out[num_features - 1]\n # print(num_out)\n\n d_model = cfg.MODEL.DIM_MODEL\n pos_embedding_type = cfg.MODEL.POS_EMBEDDING\n w, h = cfg.MODEL.IMAGE_SIZE\n\n self.reduce = nn.Conv2d(num_out, d_model, 1, bias=False)\n self._make_position_embedding(w, h, d_model, pos_embedding_type)\n\n self.global_encoder = SwinTransformer(input_channel=d_model)\n\n # used for deconv layers\n self.inplanes = d_model\n self.deconv_layers = self._make_deconv_layer(\n extra.NUM_DECONV_LAYERS, # 1\n extra.NUM_DECONV_FILTERS, # [d_model]\n extra.NUM_DECONV_KERNELS, # [4]\n )\n\n self.final_layer = nn.Conv2d(\n in_channels=d_model,\n out_channels=cfg.MODEL.NUM_JOINTS,\n kernel_size=extra.FINAL_CONV_KERNEL,\n stride=1,\n padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0\n )\n\n def _make_position_embedding(self, w, h, d_model, pe_type='sine'):\n assert pe_type in ['none', 'learnable', 'sine']\n if pe_type == 'none':\n self.pos_embedding = None\n logger.info(\"==> Without any PositionEmbedding~\")\n else:\n with torch.no_grad():\n self.pe_h = h // 8\n self.pe_w = w // 8\n length = self.pe_h * self.pe_w\n if pe_type == 'learnable':\n self.pos_embedding = nn.Parameter(\n torch.randn(length, 1, d_model))\n logger.info(\"==> Add Learnable PositionEmbedding~\")\n else:\n self.pos_embedding = nn.Parameter(\n self._make_sine_position_embedding(d_model),\n requires_grad=False)\n logger.info(\"==> Add Sine PositionEmbedding~\")\n\n def _make_sine_position_embedding(self, d_model, temperature=10000,\n scale=2*math.pi):\n # logger.info(\">> NOTE: this is for testing on unseen input resolutions\")\n # # NOTE generalization test with interploation\n # self.pe_h, self.pe_w = 256 // 8 , 192 // 8 #self.pe_h, self.pe_w\n h, w = self.pe_h, self.pe_w\n area = torch.ones(1, h, w) # [b, h, w]\n y_embed = area.cumsum(1, dtype=torch.float32)\n x_embed = area.cumsum(2, dtype=torch.float32)\n\n one_direction_feats = d_model // 2\n\n eps = 1e-6\n y_embed = y_embed / (y_embed[:, -1:, :] + eps) * scale\n x_embed = x_embed / (x_embed[:, :, -1:] + eps) * scale\n\n dim_t = torch.arange(one_direction_feats, dtype=torch.float32)\n dim_t = temperature ** (2 * (dim_t // 2) / one_direction_feats)\n\n pos_x = x_embed[:, :, :, None] / dim_t\n pos_y = y_embed[:, :, :, None] / dim_t\n pos_x = torch.stack(\n (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)\n pos_y = torch.stack(\n (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)\n pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n pos = pos.flatten(2).permute(2, 0, 1)\n return pos # [h*w, 1, d_model]\n\n def _get_deconv_cfg(self, deconv_kernel, index):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n\n return deconv_kernel, padding, output_padding\n\n def _make_deconv_layer(self, num_layers, num_filters, num_kernels):\n assert num_layers == len(num_filters), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n assert num_layers == len(num_kernels), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n\n layers = []\n for i in range(num_layers):\n kernel, padding, output_padding = \\\n self._get_deconv_cfg(num_kernels[i], i)\n\n planes = num_filters[i]\n layers.append(\n nn.ConvTranspose2d(\n in_channels=self.inplanes,\n out_channels=planes,\n kernel_size=kernel,\n stride=2,\n padding=padding,\n output_padding=output_padding,\n bias=self.deconv_with_bias))\n layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))\n layers.append(nn.ReLU(inplace=True))\n self.inplanes = planes\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.features(x)\n x = self.reduce(x)\n # print(x.size())\n\n x = self.global_encoder(x)\n x = self.deconv_layers(x)\n x = self.final_layer(x)\n\n return x\n\n def init_weights(self, pretrained=''):\n if os.path.isfile(pretrained):\n logger.info('=> init final conv weights from normal distribution')\n for name, m in self.final_layer.named_modules():\n if isinstance(m, nn.Conv2d):\n logger.info(\n '=> init {}.weight as normal(0, 0.001)'.format(name))\n logger.info('=> init {}.bias as 0'.format(name))\n nn.init.normal_(m.weight, std=0.001)\n nn.init.constant_(m.bias, 0)\n\n pretrained_state_dict = torch.load(pretrained)\n logger.info('=> loading pretrained model {}'.format(pretrained))\n existing_state_dict = {}\n for name, m in pretrained_state_dict.items():\n if name in self.state_dict():\n existing_state_dict[name] = m\n print(\":: {} is loaded from {}\".format(name, pretrained))\n self.load_state_dict(existing_state_dict, strict=False)\n else:\n logger.info('=> NOTE :: ImageNet Pretrained Weights {} are not loaded ! Please Download it'.format(pretrained))\n logger.info('=> init weights from normal distribution')\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.normal_(m.weight, std=0.001)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.ConvTranspose2d):\n nn.init.normal_(m.weight, std=0.001)\n if self.deconv_with_bias:\n nn.init.constant_(m.bias, 0)\n\n\ndef get_pose_net(cfg, is_train, **kwargs):\n # stride = [2, 2, 2]\n mobilenet_spec = {\n 'MobileNet': (MobileNetV1, 6), # 14\n 'MobileNetV1': (MobileNetV1, 6), # 14\n 'MobileNetV2': (MobileNetV2, 7), # 19\n 'MobileNetV3Large': (MobileNetV3_Large, 9), # 21\n 'MobileNetV3_Large': (MobileNetV3_Large, 9), # 21\n 'MobileNetV3Small': (MobileNetV3_Small, 6), # 17\n 'MobileNetV3_Small': (MobileNetV3_Small, 6), # 17\n 'GoogleNet': (GoogLeNet, 7),\n 'Xception': (Xception, 8),\n 'InceptionV3': (InceptionV3, 10),\n 'ShuffleNetV2': (ShuffleNetV2, 5),\n 'SqueezeNet': (SqueezeNet, 12),\n 'SqueezeNetV1.0': (SqueezeNet, 12),\n 'VGG11': (VGG11, 9),\n 'VGG13': (VGG13, 11),\n 'VGG16': (VGG16, 13),\n 'VGG19': (VGG19, 15),\n 'Linear': (LinearProjection, 1),\n 'LinearProjection': (LinearProjection, 1),\n 'ResNet18': (ResNet18, 8),\n 'ResNet34': (ResNet34, 11),\n 'ResNet50': (ResNet50, 11),\n 'ResNet101': (ResNet101, 11),\n 'ResNet152': (ResNet152, 15),\n }\n basenet, num_features = mobilenet_spec[kwargs['backbone']]\n model = SwinTranspose(basenet, num_features, cfg, **kwargs)\n\n if is_train and cfg.MODEL.INIT_WEIGHTS:\n model.init_weights(cfg.MODEL.PRETRAINED)\n\n return model"
] | [
[
"torch.nn.Softmax",
"torch.cat",
"torch.zeros",
"torch.load",
"torch.no_grad",
"torch.flatten",
"torch.nn.Dropout",
"torch.ones",
"torch.randn",
"torch.arange",
"torch.roll",
"torch.nn.Sequential",
"torch.nn.ConvTranspose2d",
"torch.nn.init.constant_",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.nn.BatchNorm2d",
"torch.nn.Identity",
"torch.utils.checkpoint.checkpoint",
"torch.nn.ReLU",
"torch.meshgrid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
weizhonz/hid | [
"3ee3aeeaf12baeadf3d85c1bb86296073bba3fbe"
] | [
"simple_mnist_example.py"
] | [
"# General structure from https://github.com/pytorch/examples/blob/master/mnist/main.py\nfrom __future__ import print_function\nimport argparse\nimport os\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\nimport torch.autograd as autograd\n\nargs = None\n\nclass GetSubnet(autograd.Function):\n @staticmethod\n def forward(ctx, scores, k):\n # Get the supermask by sorting the scores and using the top k%\n out = scores.clone()\n _, idx = scores.flatten().sort()\n j = int((1 - k) * scores.numel())\n\n # flat_out and out access the same memory.\n flat_out = out.flatten()\n flat_out[idx[:j]] = 0\n flat_out[idx[j:]] = 1\n\n return out\n\n @staticmethod\n def backward(ctx, g):\n # send the gradient g straight-through on the backward pass.\n return g, None\n\n\nclass SupermaskConv(nn.Conv2d):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # initialize the scores\n self.scores = nn.Parameter(torch.Tensor(self.weight.size()))\n nn.init.kaiming_uniform_(self.scores, a=math.sqrt(5))\n\n # NOTE: initialize the weights like this.\n nn.init.kaiming_normal_(self.weight, mode=\"fan_in\", nonlinearity=\"relu\")\n\n # NOTE: turn the gradient on the weights off\n self.weight.requires_grad = False\n\n def forward(self, x):\n subnet = GetSubnet.apply(self.scores.abs(), args.sparsity)\n w = self.weight * subnet\n x = F.conv2d(\n x, w, self.bias, self.stride, self.padding, self.dilation, self.groups\n )\n return x\n\nclass SupermaskLinear(nn.Linear):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # initialize the scores\n self.scores = nn.Parameter(torch.Tensor(self.weight.size()))\n nn.init.kaiming_uniform_(self.scores, a=math.sqrt(5))\n\n # NOTE: initialize the weights like this.\n nn.init.kaiming_normal_(self.weight, mode=\"fan_in\", nonlinearity=\"relu\")\n\n # NOTE: turn the gradient on the weights off\n self.weight.requires_grad = False\n\n def forward(self, x):\n subnet = GetSubnet.apply(self.scores.abs(), args.sparsity)\n w = self.weight * subnet\n return F.linear(x, w, self.bias)\n return x\n\n# NOTE: not used here but we use NON-AFFINE Normalization!\n# So there is no learned parameters for your nomralization layer.\nclass NonAffineBatchNorm(nn.BatchNorm2d):\n def __init__(self, dim):\n super(NonAffineBatchNorm, self).__init__(dim, affine=False)\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = SupermaskConv(1, 32, 3, 1, bias=False)\n self.conv2 = SupermaskConv(32, 64, 3, 1, bias=False)\n self.dropout1 = nn.Dropout2d(0.25)\n self.dropout2 = nn.Dropout2d(0.5)\n self.fc1 = SupermaskLinear(9216, 128, bias=False)\n self.fc2 = SupermaskLinear(128, 10, bias=False)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n output = F.log_softmax(x, dim=1)\n return output\n\n\ndef train(model, device, train_loader, optimizer, criterion, epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\n\ndef test(model, device, criterion, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += criterion(output, target)\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\n\ndef main():\n global args\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=14, metavar='N',\n help='number of epochs to train (default: 14)')\n parser.add_argument('--lr', type=float, default=0.1, metavar='LR',\n help='learning rate (default: 0.1)')\n parser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='Momentum (default: 0.9)')\n parser.add_argument('--wd', type=float, default=0.0005, metavar='M',\n help='Weight decay (default: 0.0005)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n\n parser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\n parser.add_argument('--data', type=str, default='../data', help='Location to store data')\n parser.add_argument('--sparsity', type=float, default=0.5,\n help='how sparse is each layer')\n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n print (device)\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(os.path.join(args.data, 'mnist'), train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST(os.path.join(args.data, 'mnist'), train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.test_batch_size, shuffle=True, **kwargs)\n\n model = Net().to(device)\n # NOTE: only pass the parameters where p.requires_grad == True to the optimizer! Important!\n optimizer = optim.SGD(\n [p for p in model.parameters() if p.requires_grad],\n lr=args.lr,\n momentum=args.momentum,\n weight_decay=args.wd,\n )\n criterion = nn.CrossEntropyLoss().to(device)\n scheduler = CosineAnnealingLR(optimizer, T_max=args.epochs)\n for epoch in range(1, args.epochs + 1):\n train(model, device, train_loader, optimizer, criterion, epoch)\n test(model, device, criterion, test_loader)\n scheduler.step()\n\n if args.save_model:\n torch.save(model.state_dict(), \"mnist_cnn.pt\")\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.nn.Dropout2d",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.nn.functional.log_softmax",
"torch.manual_seed",
"torch.nn.functional.conv2d",
"torch.nn.functional.relu",
"torch.no_grad",
"torch.cuda.is_available",
"torch.flatten",
"torch.device",
"torch.nn.functional.max_pool2d",
"torch.nn.functional.linear",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yanqiangmiffy/stanford-tensorflow-tutorials | [
"97cc43d6e5324816202fdeddb196c0356dce412a"
] | [
"examples/02_placeholder.py"
] | [
"# -*- coding: utf-8 -*-\n# @Author: yanqiang\n# @Date: 2018-05-14 23:01:30\n# @Last Modified by: yanqiang\n# @Last Modified time: 2018-05-14 23:12:22\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nimport tensorflow as tf\n\n# Example 1: feed_dict with placeholder\n\n# a is a placeholder for a vector of 3 elements,type tf.float32\na = tf.placeholder(tf.float32, shape=[3])\nb = tf.constant([5, 5, 5], tf.float32)\n\n# use the placeholder as you would a constant\nc = a + b # short for tf.add(a,b)\n\nwriter = tf.summary.FileWriter('graphs/placeholders', tf.get_default_graph())\nwith tf.Session() as sess:\n # compute the value of c given the value if a is [1,2,3]\n print(sess.run(c, {a: [1, 2, 3]}))\nwriter.close()\n\n# Example 2:feed_dict with variables\na = tf.add(2, 5)\nb = tf.multiply(a, 3)\nwith tf.Session() as sess:\n print(sess.run(b)) # >> 21\n # compute the value of b given the value of a is 15\n print(sess.run(b, feed_dict={a: 15}))\n"
] | [
[
"tensorflow.multiply",
"tensorflow.constant",
"tensorflow.placeholder",
"tensorflow.add",
"tensorflow.Session",
"tensorflow.get_default_graph"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"1.0",
"1.2"
]
}
] |
prise-3d/figures-generator | [
"3f9aabb4f70a229b3ba3398a37f04a2c720248e2"
] | [
"mon-estimator/run/utils/fonctions.py"
] | [
"import os\nimport numpy as np\n\nfrom skimage.metrics import structural_similarity as ssim\nfrom skimage.metrics import mean_squared_error as mse\nfrom skimage.metrics import peak_signal_noise_ratio as psnr\n\ndef compare_image(metric, ref, target):\n\n if metric == 'ssim':\n return ssim(ref, target, multichannel=True)\n\n if metric == 'psnr':\n return psnr(ref, target)\n\n if metric == 'mse':\n return mse(ref, target)\n\n return None\n\ndef extract_nsamples(img_path):\n _, img = os.path.split(img_path)\n return int(img.split('-')[-2].replace('S', ''))\n\ndef get_color(color):\n\n if color == 'red':\n return np.array([255, 0, 0])\n \n if color == 'green':\n return np.array([0, 255, 0])\n\n if color == 'lightgreen': \n return np.array([46, 184, 46])\n\n if color == 'lightyellow': \n return np.array([255, 255, 153])\n\n if color == 'blue':\n return np.array([0, 0, 255])\n \n if color == 'yellow':\n return np.array([255, 204, 0])\n\n if color == 'lightblue':\n return np.array([0, 153, 255])\n\ndef add_border(img_arr, p1, p2, color, size):\n\n img_arr = img_arr.copy()\n p1_x, p1_y = p1\n p2_x, p2_y = p2\n\n for i in range(size):\n for x in np.arange(p1_x, p2_x + size):\n img_arr[p1_y + i][x] = get_color(color)\n img_arr[p2_y + i][x] = get_color(color)\n\n for y in np.arange(p1_y, p2_y + size):\n img_arr[y][p1_x + i] = get_color(color)\n img_arr[y][p2_x + i] = get_color(color)\n\n return img_arr\n\ndef extract_zone(img_arr, p1, p2):\n\n img_arr = img_arr.copy()\n \n p1_x, p1_y = p1\n p2_x, p2_y = p2\n\n return img_arr[p1_y:p2_y, p1_x:p2_x, :]\n\n\ndef extract_center(img_arr, w, h):\n\n m_w, m_h = int(w / 2), int(h / 2) # get middle to add\n h_i, w_i, _ = img_arr.shape # get shape\n w_center, h_center = (int(w_i / 2), int(h_i / 2)) # get center coords\n\n return img_arr[h_center - m_h:h_center + m_h, w_center - m_w:w_center + m_w, :]\n"
] | [
[
"numpy.arange",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nengo/nengo | [
"55b5c4f7da351dfefd8b40eb14d2e47c1006cff0"
] | [
"nengo/connection.py"
] | [
"import warnings\n\nimport numpy as np\n\nfrom nengo.base import NengoObject, NengoObjectParam, ObjView\nfrom nengo.dists import DistOrArrayParam, Distribution\nfrom nengo.ensemble import Ensemble, Neurons\nfrom nengo.exceptions import ValidationError\nfrom nengo.learning_rules import LearningRuleType, LearningRuleTypeParam\nfrom nengo.neurons import Direct\nfrom nengo.node import Node\nfrom nengo.params import BoolParam, Default, FunctionInfo, Parameter\nfrom nengo.rc import rc\nfrom nengo.solvers import LstsqL2, SolverParam\nfrom nengo.synapses import Lowpass, SynapseParam\nfrom nengo.transforms import Dense, NoTransform\nfrom nengo.utils.functions import function_name\nfrom nengo.utils.numpy import is_array_like, is_iterable\nfrom nengo.utils.stdlib import checked_call\n\n\nclass PrePostParam(NengoObjectParam):\n def coerce(self, conn, nengo_obj):\n if isinstance(nengo_obj, Connection):\n raise ValidationError(\n \"Cannot connect to or from connections. \"\n \"Did you mean to connect to the connection's learning rule?\",\n attr=self.name,\n obj=conn,\n )\n return super().coerce(conn, nengo_obj)\n\n\nclass ConnectionLearningRuleTypeParam(LearningRuleTypeParam):\n \"\"\"Connection-specific validation for learning rules.\"\"\"\n\n coerce_defaults = False\n\n def check_pre(self, conn, rule):\n pre = conn.pre_obj\n if rule.modifies in (\"decoders\", \"weights\"):\n # pre object must be neural\n if not isinstance(pre, (Ensemble, Neurons)):\n raise ValidationError(\n \"'pre' must be of type 'Ensemble' or 'Neurons' for \"\n f\"learning rule '{rule}' (got type '{type(pre).__name__}')\",\n attr=self.name,\n obj=conn,\n )\n if isinstance(pre, Ensemble) and isinstance(pre.neuron_type, Direct):\n raise ValidationError(\n \"'pre' cannot have neuron type 'Direct'. Connections from \"\n \"'Direct' ensembles do not have decoders or weights.\",\n attr=self.name,\n obj=conn,\n )\n\n def check_post(self, conn, rule):\n if rule.modifies == \"encoders\":\n if not isinstance(conn.post_obj, Ensemble):\n raise ValidationError(\n \"'post' must be of type 'Ensemble' (got \"\n f\"'{type(conn.post_obj).__name__}') for learning rule '{rule}'\",\n attr=self.name,\n obj=conn,\n )\n if conn._to_neurons:\n raise ValidationError(\n \"Cannot use an encoder learning rule with a direct neural \"\n \"connection (since encoders are not used in that case).\",\n attr=self.name,\n obj=conn,\n )\n else:\n if not isinstance(conn.post_obj, (Ensemble, Neurons, Node)):\n raise ValidationError(\n \"'post' must be of type 'Ensemble', 'Neurons' or 'Node' (got \"\n f\"'{type(conn.post_obj).__name__}') for learning rule '{rule}'\",\n attr=self.name,\n obj=conn,\n )\n\n def check_rule(self, conn, rule):\n super().check_rule(conn, rule)\n self.check_pre(conn, rule)\n self.check_post(conn, rule)\n self.check_transform(conn, rule)\n\n def check_transform(self, conn, rule):\n if not conn.has_weights and rule.modifies in (\"weights\", \"decoders\"):\n raise ValidationError(\n f\"Learning rule '{rule}' cannot be applied to a connection that does \"\n \"not have weights (transform=None)\",\n attr=self.name,\n obj=conn,\n )\n\n if rule.modifies == \"weights\":\n # If the rule modifies 'weights', then it must have full weights\n if not conn._to_neurons:\n raise ValidationError(\n f\"Learning rule '{rule}' can only be applied on connections to \"\n \"neurons. Try setting `solver.weights` to True or \"\n \"connecting between two Neurons objects.\",\n attr=self.name,\n obj=conn,\n )\n\n # transform matrix must be 2D\n pre = conn.pre_obj\n pre_size = pre.n_neurons if isinstance(pre, Ensemble) else conn.pre.size_out\n post_size = conn.post.size_in\n if not conn.solver.weights and conn.transform.shape != (\n post_size,\n pre_size,\n ):\n raise ValidationError(\n \"Transform must be 2D array with shape post_neurons x \"\n f\"pre_neurons ({pre_size}, {post_size})\",\n attr=self.name,\n obj=conn,\n )\n\n\nclass ConnectionSolverParam(SolverParam):\n \"\"\"Connection-specific validation for decoder solvers.\"\"\"\n\n coerce_defaults = False\n\n def coerce(self, conn, solver):\n solver = super().coerce(conn, solver)\n if solver is not None:\n # it's true that setting the solver on any connection without a pre Ensemble\n # has no effect, but we only warn when ``weights=True`` because in this case\n # we can be sure that it's not the default solver\n if solver.weights and not isinstance(conn.pre_obj, Ensemble):\n warnings.warn(\n \"For connections from objects other than Ensembles, setting the \"\n \"solver has no effect\"\n )\n\n if solver.weights and not isinstance(conn.post_obj, Ensemble):\n warnings.warn(\n \"For connections to objects other than Ensembles, setting \"\n \"`weights=True` on a solver has no effect\"\n )\n\n return solver\n\n\nclass EvalPointsParam(DistOrArrayParam):\n coerce_defaults = False\n\n def coerce(self, conn, distorarray):\n \"\"\"Eval points are only valid when pre is an ensemble.\"\"\"\n if distorarray is not None and not isinstance(conn.pre, Ensemble):\n msg = (\n \"eval_points are only valid on connections from ensembles \"\n f\"(got type '{type(conn.pre).__name__}')\"\n )\n raise ValidationError(msg, attr=self.name, obj=conn)\n return super().coerce(conn, distorarray)\n\n\nclass ConnectionFunctionParam(Parameter):\n \"\"\"Connection-specific validation for functions.\"\"\"\n\n coerce_defaults = False\n\n def check_array(self, conn, ndarray):\n if not isinstance(conn.eval_points, np.ndarray):\n raise ValidationError(\n \"In order to set 'function' to specific points, 'eval_points' \"\n \"must also be set to specific points.\",\n attr=self.name,\n obj=conn,\n )\n\n if ndarray.ndim != 2:\n raise ValidationError(\n f\"array must be 2D (got {ndarray.ndim}D)\", attr=self.name, obj=conn\n )\n\n if ndarray.shape[0] != conn.eval_points.shape[0]:\n raise ValidationError(\n \"Number of evaluation points must match number of function points \"\n f\"({ndarray.shape[0]} != {conn.eval_points.shape[0]})\",\n attr=self.name,\n obj=conn,\n )\n\n def check_function_can_be_applied(self, conn, function_info):\n function, _ = function_info\n type_pre = type(conn.pre_obj).__name__\n\n if function is not None:\n if not isinstance(conn.pre_obj, (Node, Ensemble)):\n raise ValidationError(\n \"function can only be set for connections from an Ensemble\"\n f\" or Node (got type '{type_pre}')\",\n attr=self.name,\n obj=conn,\n )\n\n if isinstance(conn.pre_obj, Node) and conn.pre_obj.output is None:\n raise ValidationError(\n \"Cannot apply functions to passthrough nodes\",\n attr=self.name,\n obj=conn,\n )\n\n def coerce(self, conn, function):\n function = super().coerce(conn, function)\n\n if function is None:\n function_info = FunctionInfo(function=None, size=None)\n elif isinstance(function, FunctionInfo):\n function_info = function\n elif is_array_like(function):\n array = np.array(function, copy=False, dtype=rc.float_dtype)\n self.check_array(conn, array)\n function_info = FunctionInfo(function=array, size=array.shape[1])\n elif callable(function):\n function_info = FunctionInfo(\n function=function, size=self.determine_size(conn, function)\n )\n # TODO: necessary?\n super().coerce(conn, function_info)\n else:\n raise ValidationError(\n f\"Invalid connection function type '{type(function).__name__}' \"\n \"(must be callable or array-like)\",\n attr=self.name,\n obj=conn,\n )\n\n self.check_function_can_be_applied(conn, function_info)\n\n return function_info\n\n def determine_size(self, instance, function):\n args = self.function_args(instance, function)\n value, invoked = checked_call(function, *args)\n if not invoked:\n raise ValidationError(\n f\"function '{function}' must accept a single np.array argument\",\n attr=self.name,\n obj=instance,\n )\n return np.asarray(value).size\n\n def function_args(self, conn, function):\n x = (\n conn.eval_points[0]\n if is_iterable(conn.eval_points)\n else np.zeros(conn.size_in)\n )\n return (x,)\n\n\nclass ConnectionTransformParam(Parameter):\n \"\"\"Connection-specific validation for transforms.\"\"\"\n\n coerce_defaults = False\n\n def coerce(self, conn, transform):\n if transform is None:\n transform = NoTransform(conn.size_mid)\n elif is_array_like(transform) or isinstance(transform, Distribution):\n transform = Dense((conn.size_out, conn.size_mid), transform)\n\n if transform.size_in != conn.size_mid:\n if isinstance(transform, Dense) and (\n transform.shape[0] == transform.shape[1]\n ):\n # we provide a different error message in this case;\n # the transform is not changing the dimensionality of the\n # signal, so the blame most likely lies with the function\n raise ValidationError(\n \"Function output size is incorrect; should return a \"\n f\"vector of size {conn.size_mid}\",\n attr=self.name,\n obj=conn,\n )\n else:\n raise ValidationError(\n f\"Transform input size ({transform.size_in}) not equal to \"\n f\"'{type(conn.pre_obj).__name__}' output size ({conn.size_mid})\",\n attr=self.name,\n obj=conn,\n )\n\n if transform.size_out != conn.size_out:\n raise ValidationError(\n f\"Transform output size ({transform.size_out}) not equal to connection \"\n f\"output size ({conn.size_out})\",\n attr=self.name,\n obj=conn,\n )\n\n # we don't support repeated indices on 2D transforms because it makes\n # the matrix multiplication more complicated (we'd need to expand\n # the weight matrix for the duplicated rows/columns). it could be done\n # if there were a demand at some point.\n if isinstance(transform, Dense) and len(transform.init_shape) == 2:\n\n def repeated_inds(x):\n return not isinstance(x, slice) and np.unique(x).size != len(x)\n\n if repeated_inds(conn.pre_slice):\n raise ValidationError(\n \"Input object selection has repeated indices\",\n attr=self.name,\n obj=conn,\n )\n if repeated_inds(conn.post_slice):\n raise ValidationError(\n \"Output object selection has repeated indices\",\n attr=self.name,\n obj=conn,\n )\n\n return super().coerce(conn, transform)\n\n\nclass Connection(NengoObject):\n \"\"\"Connects two objects together.\n\n The connection between the two object is unidirectional,\n transmitting information from the first argument, ``pre``,\n to the second argument, ``post``.\n\n Almost any Nengo object can act as the pre or post side of a connection.\n Additionally, you can use Python slice syntax to access only some of the\n dimensions of the pre or post object.\n\n For example, if ``node`` has ``size_out=2`` and ``ensemble`` has\n ``size_in=1``:\n\n .. testcode::\n\n with nengo.Network() as net:\n node = nengo.Node(np.zeros(2))\n ensemble = nengo.Ensemble(10, 1)\n\n We could not create the following connection:\n\n .. testcode::\n\n with net:\n nengo.Connection(node, ensemble)\n\n .. testoutput::\n :hide:\n\n Traceback (most recent call last):\n ...\n nengo.exceptions.ValidationError: init: Shape of initial value () does not \\\n match expected shape (1, 2)\n\n But, we could create either of these two connections:\n\n .. testcode::\n\n with net:\n nengo.Connection(node[0], ensemble)\n nengo.Connection(node[1], ensemble)\n\n Parameters\n ----------\n pre : Ensemble or Neurons or Node\n The source Nengo object for the connection.\n post : Ensemble or Neurons or Node or LearningRule\n The destination object for the connection.\n synapse : Synapse or None, optional\n Synapse model to use for filtering (see `~nengo.synapses.Synapse`).\n If *None*, no synapse will be used and information will be transmitted\n without any delay (if supported by the backend---some backends may\n introduce a single time step delay).\n\n Note that at least one connection must have a synapse that is not\n *None* if components are connected in a cycle. Furthermore, a synaptic\n filter with a zero time constant is different from a *None* synapse\n as a synaptic filter will always add a delay of at least one time step.\n function : callable or (n_eval_points, size_mid) array_like, optional\n Function to compute across the connection. Note that ``pre`` must be\n an ensemble to apply a function across the connection.\n If an array is passed, the function is implicitly defined by the\n points in the array and the provided ``eval_points``, which have a\n one-to-one correspondence.\n transform : (size_out, size_mid) array_like, optional\n Linear transform mapping the pre output to the post input.\n This transform is in terms of the sliced size; if either pre\n or post is a slice, the transform must be shaped according to\n the sliced dimensionality. Additionally, the function is applied\n before the transform, so if a function is computed across the\n connection, the transform must be of shape ``(size_out, size_mid)``.\n solver : Solver, optional\n Solver instance to compute decoders or weights\n (see `~nengo.solvers.Solver`). If ``solver.weights`` is True, a full\n connection weight matrix is computed instead of decoders.\n learning_rule_type : LearningRuleType or iterable of LearningRuleType, optional\n Modifies the decoders or connection weights during simulation.\n eval_points : (n_eval_points, size_in) array_like or int, optional\n Points at which to evaluate ``function`` when computing decoders,\n spanning the interval (-pre.radius, pre.radius) in each dimension.\n If None, will use the eval_points associated with ``pre``.\n scale_eval_points : bool, optional\n Indicates whether the evaluation points should be scaled\n by the radius of the pre Ensemble.\n label : str, optional\n A descriptive label for the connection.\n seed : int, optional\n The seed used for random number generation.\n\n Attributes\n ----------\n function : callable\n The given function.\n function_size : int\n The output dimensionality of the given function. If no function is\n specified, function_size will be 0.\n label : str\n A human-readable connection label for debugging and visualization.\n If not overridden, incorporates the labels of the pre and post objects.\n learning_rule_type : instance or list or dict of LearningRuleType, optional\n The learning rule types.\n post : Ensemble or Neurons or Node or Probe or ObjView\n The given post object.\n post_obj : Ensemble or Neurons or Node or Probe\n The underlying post object, even if ``post`` is an ``ObjView``.\n post_slice : slice or list or None\n The slice associated with ``post`` if it is an ObjView, or None.\n pre : Ensemble or Neurons or Node or ObjView\n The given pre object.\n pre_obj : Ensemble or Neurons or Node\n The underlying pre object, even if ``post`` is an ``ObjView``.\n pre_slice : slice or list or None\n The slice associated with ``pre`` if it is an ObjView, or None.\n seed : int\n The seed used for random number generation.\n solver : Solver\n The Solver instance that will be used to compute decoders or weights\n (see ``nengo.solvers``).\n synapse : Synapse\n The Synapse model used for filtering across the connection\n (see ``nengo.synapses``).\n transform : (size_out, size_mid) array_like\n Linear transform mapping the pre function output to the post input.\n\n Properties\n ----------\n learning_rule : LearningRule or iterable of LearningRule\n Connectable learning rule object(s) associated with this connection.\n size_in : int\n The number of output dimensions of the pre object.\n Also the input size of the function, if one is specified.\n size_mid : int\n The number of output dimensions of the function, if specified.\n If the function is not specified, then ``size_in == size_mid``.\n size_out : int\n The number of input dimensions of the post object.\n Also the number of output dimensions of the transform.\n \"\"\"\n\n probeable = (\"output\", \"input\", \"weights\")\n\n pre = PrePostParam(\"pre\", nonzero_size_out=True)\n post = PrePostParam(\"post\", nonzero_size_in=True)\n synapse = SynapseParam(\"synapse\", default=Lowpass(tau=0.005))\n function_info = ConnectionFunctionParam(\"function\", default=None, optional=True)\n transform = ConnectionTransformParam(\"transform\", default=None, optional=True)\n solver = ConnectionSolverParam(\"solver\", default=LstsqL2())\n learning_rule_type = ConnectionLearningRuleTypeParam(\n \"learning_rule_type\", default=None, optional=True\n )\n eval_points = EvalPointsParam(\n \"eval_points\", default=None, optional=True, sample_shape=(\"*\", \"size_in\")\n )\n scale_eval_points = BoolParam(\"scale_eval_points\", default=True)\n\n _param_init_order = [\n \"pre\",\n \"post\",\n \"synapse\",\n \"eval_points\",\n \"function_info\",\n \"transform\",\n \"solver\",\n \"learning_rule_type\",\n ]\n\n def __init__(\n self,\n pre,\n post,\n synapse=Default,\n function=Default,\n transform=Default,\n solver=Default,\n learning_rule_type=Default,\n eval_points=Default,\n scale_eval_points=Default,\n label=Default,\n seed=Default,\n ):\n super().__init__(label=label, seed=seed)\n\n self.pre = pre\n self.post = post\n\n self.synapse = synapse\n self.eval_points = eval_points # Must be set before function\n self.scale_eval_points = scale_eval_points\n self.function_info = function\n self.transform = transform # Must be set after function\n self.solver = solver # Must be set before learning rule\n self.learning_rule_type = learning_rule_type # set after transform\n\n def __str__(self):\n return self._str(include_id=False)\n\n def __repr__(self):\n return self._str(include_id=True)\n\n def _str(self, include_id):\n desc = \"<Connection \"\n if include_id:\n desc += f\"at 0x{id(self):x} \"\n\n if self.label is None:\n func_txt = (\n \"\"\n if self.function is None\n else f\" computing '{function_name(self.function)}'\"\n )\n desc += f\"from {self.pre} to {self.post}{func_txt}\"\n else:\n desc += self.label\n\n desc += \">\"\n\n return desc\n\n @property\n def function(self):\n return self.function_info.function\n\n @function.setter\n def function(self, function):\n self.function_info = function\n\n @property\n def has_weights(self):\n return not isinstance(self.transform, NoTransform) or (\n isinstance(self.pre_obj, Ensemble)\n and not isinstance(self.pre_obj.neuron_type, Direct)\n )\n\n @property\n def is_decoded(self):\n warnings.warn(\n \"is_decoded is deprecated; directly check the pre/post objects for the \"\n \"properties of interest instead\",\n DeprecationWarning,\n )\n return not (\n self.solver.weights\n or (\n isinstance(self.pre_obj, Neurons) and isinstance(self.post_obj, Neurons)\n )\n )\n\n @property\n def _to_neurons(self):\n return isinstance(self.post_obj, Neurons) or (\n isinstance(self.pre_obj, Ensemble)\n and isinstance(self.post_obj, Ensemble)\n and self.solver.weights\n )\n\n @property\n def learning_rule(self):\n \"\"\"(LearningRule or iterable) Connectable learning rule object(s).\"\"\"\n if self.learning_rule_type is None:\n return None\n\n types = self.learning_rule_type\n if isinstance(types, dict):\n learning_rule = type(types)() # dict of same type\n for k, v in types.items():\n learning_rule[k] = LearningRule(self, v)\n elif is_iterable(types):\n learning_rule = [LearningRule(self, v) for v in types]\n elif isinstance(types, LearningRuleType):\n learning_rule = LearningRule(self, types)\n else:\n assert False, \"Validation should catch this\"\n\n return learning_rule\n\n @property\n def post_obj(self):\n return self.post.obj if isinstance(self.post, ObjView) else self.post\n\n @property\n def post_slice(self):\n return self.post.slice if isinstance(self.post, ObjView) else slice(None)\n\n @property\n def pre_obj(self):\n return self.pre.obj if isinstance(self.pre, ObjView) else self.pre\n\n @property\n def pre_slice(self):\n return self.pre.slice if isinstance(self.pre, ObjView) else slice(None)\n\n @property\n def size_in(self):\n \"\"\"(int) The number of output dimensions of the pre object.\n\n Also the input size of the function, if one is specified.\n \"\"\"\n return self.pre.size_out\n\n @property\n def size_mid(self):\n \"\"\"(int) The number of output dimensions of the function, if specified.\n\n If the function is not specified, then ``size_in == size_mid``.\n \"\"\"\n size = self.function_info.size\n return self.size_in if size is None else size\n\n @property\n def size_out(self):\n \"\"\"(int) The number of input dimensions of the post object.\n\n Also the number of output dimensions of the transform.\n \"\"\"\n return self.post.size_in\n\n\nclass LearningRule:\n \"\"\"An interface for making connections to a learning rule.\n\n Connections to a learning rule are to allow elements of the network to\n affect the learning rule. For example, learning rules that use error\n information can obtain that information through a connection.\n\n Learning rule objects should only ever be accessed through the\n ``learning_rule`` attribute of a connection.\n \"\"\"\n\n def __init__(self, connection, learning_rule_type):\n self._connection = connection\n self.learning_rule_type = learning_rule_type\n\n def __repr__(self):\n return (\n f\"<LearningRule at 0x{id(self):x} modifying {self.connection!r} \"\n f\"with type {self.learning_rule_type!r}>\"\n )\n\n def __str__(self):\n return (\n f\"<LearningRule modifying {self.connection} \"\n f\"with type {self.learning_rule_type}>\"\n )\n\n def __eq__(self, other):\n return (\n type(self) == type(other)\n and self._connection is other._connection\n and self.learning_rule_type == other.learning_rule_type\n )\n\n def __hash__(self):\n # +1 to avoid collision with ensemble\n return hash(self._connection) + hash(self.learning_rule_type) + 1\n\n def __getitem__(self, key):\n return ObjView(self, key)\n\n @property\n def connection(self):\n \"\"\"(Connection) The connection modified by the learning rule.\"\"\"\n return self._connection\n\n @property\n def modifies(self):\n \"\"\"(str) The variable modified by the learning rule.\"\"\"\n return self.learning_rule_type.modifies\n\n @property\n def probeable(self):\n \"\"\"(tuple) Signals that can be probed in the learning rule.\"\"\"\n return self.learning_rule_type.probeable\n\n @property\n def size_in(self):\n conn = self.connection\n size_in = self.learning_rule_type.size_in\n if size_in == \"pre\":\n return conn.size_in\n elif size_in == \"mid\":\n return conn.size_mid\n elif size_in == \"post\":\n return conn.size_out\n elif size_in == \"pre_state\":\n return (\n conn.pre_obj.ensemble.size_out\n if isinstance(conn.pre_obj, Neurons)\n else conn.size_in\n )\n elif size_in == \"post_state\":\n return (\n conn.post_obj.ensemble.size_in\n if isinstance(conn.post_obj, Neurons)\n else conn.size_out\n )\n else:\n return size_in # should be an integer\n\n @property\n def size_out(self):\n \"\"\"(int) Cannot connect from learning rules, so always 0.\"\"\"\n return 0 # since a learning rule can't connect to anything\n # TODO: allow probing individual learning rules\n"
] | [
[
"numpy.asarray",
"numpy.array",
"numpy.zeros",
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alfoa/TEAL | [
"52ef4075d15d14995af92add6f25120b98948afe"
] | [
"tests/HourlyObjectOrientedTest.py"
] | [
"# Copyright 2017 Battelle Energy Alliance, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nCreated on March 23, 2018\n\n@authors: C. Wang, P. Talbot, A. Alfonsi, A. S. Epiney\n\nIntegration test for using object-oriented API for components.\nDoes not use run-time variables, provides all values through arrays.\n\"\"\"\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))\nfrom CashFlow import CashFlows\nfrom CashFlow import CashFlow as RunCashFlow\n\ndef run(df):\n \"\"\"\n Main run command.\n @ In, df, pandas.Dataframe, loaded data to run\n @ Out, metrics, dict, dictionary of metric results\n \"\"\"\n settings = build_econ_settings()\n components = build_econ_components(df, settings)\n metrics = RunCashFlow.run(settings, list(components.values()), {})\n return metrics\n\ndef build_econ_settings():\n \"\"\"\n Constructs global settings for econ run\n @ In, None\n @ Out, settigns, CashFlow.GlobalSettings, settings\n \"\"\"\n params = {'DiscountRate': 0.10,\n 'tax': 0.21,\n 'inflation': 0.02184,\n 'ProjectTime': 5,\n 'Indicator': {'name': ['NPV'],\n 'active': ['MainComponent|RecursHourly', 'MainComponent|RecursYearly', 'MainComponent|Cap']}\n }\n settings = CashFlows.GlobalSettings()\n settings.setParams(params)\n settings._verbosity = 0\n return settings\n\ndef build_econ_components(df, settings):\n \"\"\"\n Constructs run components\n @ In, df, pandas.Dataframe, loaded data to run\n @ In, settings, CashFlow.GlobalSettings, settings\n @ Out, comps, dict, dict mapping names to CashFlow component objects\n \"\"\"\n # in this simple case, the project life is the component life for all components.\n life = settings.getProjectTime()\n # construct components\n comps = {}\n ## first (and only) component in the case\n name = 'MainComponent'\n comp = CashFlows.Component()\n comps[name] = comp\n params = {'name': name,\n 'Life_time': 4}\n comp.setParams(params)\n ## add cashflows to this component\n cfs = []\n\n ### recurring cashflow evaluated hourly, to show usage\n cf = createRecurringHourly(df, comp, 'A', 'D')\n cfs.append(cf)\n print('DEBUGG hourly recurring:', cf._yearlyCashflow)\n ### recurring cashflow evaluated yearly\n cf = createRecurringYearly(df, comp, 'A', 'D')\n cfs.append(cf)\n print('DEBUGG yearly recurring:', cf._yearlyCashflow)\n ### capex cashflow\n cf = createCapex(df, comp, 'B', 'D')\n cfs.append(cf)\n ## amortization\n cf.setAmortization('MACRS', 3)\n amorts = comp._createDepreciation(cf)\n cfs.extend(amorts)\n # finally, add cashflows to component\n comp.addCashflows(cfs)\n return comps\n\ndef createCapex(df, comp, driver, alpha):\n \"\"\"\n Constructs capex object\n @ In, df, pandas.Dataframe, loaded data to run\n @ In, comp, CashFlow.Component, component this cf will belong to\n @ In, driver, string, variable name in df to take driver from\n @ In, alpha, string, variable name in df to take alpha from\n @ Out, comps, dict, dict mapping names to CashFlow component objects\n \"\"\"\n life = comp.getLifetime()\n # extract alpha, driver as just one value\n alpha = df[alpha].mean()\n driver = df[driver].mean()\n cf = CashFlows.Capex()\n cf.name = 'Cap'\n cf.initParams(life)\n cfFarams = {'name': 'Cap',\n 'alpha': alpha,\n 'driver': driver,\n 'reference': 1.0,\n 'X': 0.8,\n 'depreciate': 3,\n 'mult_target': None,\n 'inflation': False,\n }\n cf.setParams(cfFarams)\n return cf\n\ndef createRecurringYearly(df, comp, driver, alpha):\n \"\"\"\n Constructs recurring cashflow with one value per year\n @ In, df, pandas.Dataframe, loaded data to run\n @ In, comp, CashFlow.Component, component this cf will belong to\n @ In, driver, string, variable name in df to take driver from\n @ In, alpha, string, variable name in df to take alpha from\n @ Out, comps, dict, dict mapping names to CashFlow component objects\n \"\"\"\n life = comp.getLifetime()\n cf = CashFlows.Recurring()\n cfFarams = {'name': 'RecursYearly',\n 'X': 1,\n 'mult_target': None,\n 'inflation': False}\n cf.setParams(cfFarams)\n # because our data comes hourly, collapse it to be yearly\n ## 0 for first year (build year) -> TODO couldn't this be automatic?\n alphas = np.zeros(life + 1)\n drivers = np.zeros(life + 1)\n alphas[1:] = df[alpha].groupby(df.index.year).mean().values[:life]\n drivers[1:] = df[driver].groupby(df.index.year).mean().values[:life]\n # construct annual summary cashflows\n cf.computeYearlyCashflow(alphas, drivers)\n return cf\n\ndef createRecurringHourly(df, comp, driver, alpha):\n \"\"\"\n Constructs recurring cashflow with one value per hour\n @ In, df, pandas.Dataframe, loaded data to run\n @ In, comp, CashFlow.Component, component this cf will belong to\n @ In, driver, string, variable name in df to take driver from\n @ In, alpha, string, variable name in df to take alpha from\n @ Out, comps, dict, dict mapping names to CashFlow component objects\n \"\"\"\n life = comp.getLifetime()\n cf = CashFlows.Recurring()\n cfFarams = {'name': 'RecursHourly',\n 'X': 1,\n 'mult_target': None,\n 'inflation': False}\n cf.setParams(cfFarams)\n cf.initParams(life)\n yearDfs = df.groupby([df.index.year])\n for year, yearDf in yearDfs:\n y = year - 2018\n if y > life:\n break\n cf.computeIntrayearCashflow(y, yearDf[driver], yearDf[alpha])\n return cf\n\n\n\nif __name__ == '__main__':\n # load multiyear data\n ## TODO use analytic data! this is data from a non-proprietary report, but not analytic.\n targets = ['A', 'B', 'C', 'D', 'Year', 'Time']\n indices = ['RAVEN_sample_ID']\n print('Loading data ...')\n full_df = pd.read_csv('aux_file/hourly.csv',\n index_col=indices,\n usecols=targets+indices) #,\n #nrows=300000)\n # just the first sample\n df = full_df.loc[0]\n years = pd.to_datetime(df['Year'].values + 2019, format='%Y')\n hours = pd.to_timedelta(df['Time'].values, unit='H')\n datetime = years + hours\n df.index = datetime\n df = df.sort_index()[['A', 'B', 'C', 'D']]\n\n metrics = run(df)\n\n calculated = metrics['NPV']\n correct = 2.080898547e+08\n if abs(calculated - correct)/correct < 1e-8:\n print('Success!')\n sys.exit(0)\n else:\n print('ERROR: correct: {:1.3e}, calculated: {:1.3e}, diff {:1.3e}'.format(correct, calculated, correct-calculated))\n"
] | [
[
"pandas.to_timedelta",
"pandas.to_datetime",
"pandas.read_csv",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
zydeon/uci-statnlp | [
"5f3e39508ba47a4731faec20aeb20f9d5f1568c3"
] | [
"hw3/train.py"
] | [
"import argparse\nimport copy\nimport datetime\nimport json\nimport os\nimport random\nimport sys\nimport time\nfrom tqdm import tqdm\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom dataset import TwitterDataset, Vocabulary\nfrom util import load_object_from_dict\n\n\ndef load_datasets(train_dataset_params: dict, validation_dataset_params: dict):\n # load PyTorch ``Dataset`` objects for the train & validation sets\n train_dataset = TwitterDataset(**train_dataset_params)\n validation_dataset = TwitterDataset(**validation_dataset_params)\n\n # use tokens and tags in the training set to create `Vocabulary` objects\n token_vocab = Vocabulary(train_dataset.get_tokens_list(), add_unk_token=True)\n tag_vocab = Vocabulary(train_dataset.get_tags_list())\n\n # add `Vocabulary` objects to datasets for tokens/tags to ID mapping\n train_dataset.set_vocab(token_vocab, tag_vocab)\n validation_dataset.set_vocab(token_vocab, tag_vocab)\n\n return train_dataset, validation_dataset\n\n\ndef train(\n model: torch.nn.Module,\n train_dataloader: DataLoader,\n validation_dataloader: DataLoader,\n optimizer: torch.optim.Optimizer,\n num_epochs: int,\n serialization_dir: str\n):\n start = time.time()\n best_metrics = {'validation_loss': 10e10}\n best_model = None\n for epoch_num in range(num_epochs):\n # training\n model.train()\n for batch in tqdm(train_dataloader, f'Epoch {epoch_num}'):\n optimizer.zero_grad()\n output_dict = model(**batch)\n output_dict['loss'].backward()\n optimizer.step()\n cur_epoch_metrics = model.get_metrics(header='train_')\n\n # compute validation metrics\n model.eval()\n for batch in validation_dataloader:\n model(**batch)\n cur_epoch_metrics.update(model.get_metrics(header='validation_'))\n\n # write the current epochs statistics to file\n with open(f'{serialization_dir}/metrics_epoch_{epoch_num}.json', 'w') as f:\n cur_epoch_metrics['epoch_num'] = epoch_num\n print(json.dumps(cur_epoch_metrics, indent=4))\n f.write(json.dumps(cur_epoch_metrics, indent=4))\n\n # check if current model is the best so far.\n if cur_epoch_metrics['validation_loss'] < best_metrics['validation_loss']:\n print('Best validation loss thus far...\\n')\n best_model = copy.deepcopy(model)\n best_metrics = copy.deepcopy(cur_epoch_metrics)\n\n # write the best metrics we got and best model\n with open(f'{serialization_dir}/best_metrics.json', 'w') as f:\n best_metrics['run_time'] = str(datetime.timedelta(seconds=time.time()-start))\n print(f\"Best Performing Model {json.dumps(best_metrics, indent=4)}\")\n f.write(json.dumps(best_metrics, indent=4))\n torch.save(best_model, f'{serialization_dir}/model.pt')\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"config_path\", help=\"path to configuration file\")\n parser.add_argument(\"-s\", \"--serialization_dir\", required=True,\n help=\"save directory for model, dataset, and metrics\")\n args = parser.parse_args()\n config = json.load(open(args.config_path))\n serialization_dir = args.serialization_dir\n random.seed(config['random_seed'])\n torch.manual_seed(config['random_seed'])\n\n if os.path.isdir(serialization_dir):\n sys.exit(f\"{serialization_dir}, already exists. Please specify a new \"\n f\"serialization directory or erase the existing one.\")\n else:\n os.makedirs(serialization_dir)\n with open(f'{serialization_dir}/config.json', 'w') as f:\n f.write(json.dumps(config, indent=4))\n\n # load PyTorch `Dataset` and `DataLoader` objects\n train_dataset, validation_dataset = load_datasets(\n train_dataset_params=config['train_dataset'],\n validation_dataset_params=config['validation_dataset']\n )\n batch_size = config['training']['batch_size']\n train_dataloader = DataLoader(train_dataset, batch_size)\n validation_dataloader = DataLoader(validation_dataset, batch_size)\n\n # load model\n model = load_object_from_dict(config['model'],\n token_vocab=train_dataset.token_vocab,\n tag_vocab=train_dataset.tag_vocab)\n\n # load optimizer\n optimizer = load_object_from_dict(config['training']['optimizer'],\n params=model.parameters())\n\n train(\n model=model,\n train_dataloader=train_dataloader,\n validation_dataloader=validation_dataloader,\n optimizer=optimizer,\n num_epochs=config['training']['num_epochs'],\n serialization_dir=serialization_dir\n )\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
plumerai/zoo | [
"55e8ce9a42fb8806503e16fc2340f0fd27948d09"
] | [
"larq_zoo/literature/resnet_e.py"
] | [
"from typing import Optional, Sequence\n\nimport larq as lq\nimport tensorflow as tf\nfrom zookeeper import Field, factory\n\nfrom larq_zoo.core import utils\nfrom larq_zoo.core.model_factory import ModelFactory\n\n\n@factory\nclass BinaryResNetE18Factory(ModelFactory):\n \"\"\"Implementation of [BinaryResNetE18](https://arxiv.org/abs/1906.08637)\"\"\"\n\n num_layers: int = Field(18)\n initial_filters: int = Field(64)\n\n @property\n def input_quantizer(self):\n return lq.quantizers.SteSign(clip_value=1.25)\n\n @property\n def kernel_quantizer(self):\n return lq.quantizers.SteSign(clip_value=1.25)\n\n @property\n def kernel_constraint(self):\n return lq.constraints.WeightClip(clip_value=1.25)\n\n @property\n def spec(self):\n spec = {\n 18: ([2, 2, 2, 2], [64, 128, 256, 512]),\n 34: ([3, 4, 6, 3], [64, 128, 256, 512]),\n 50: ([3, 4, 6, 3], [256, 512, 1024, 2048]),\n 101: ([3, 4, 23, 3], [256, 512, 1024, 2048]),\n 152: ([3, 8, 36, 3], [256, 512, 1024, 2048]),\n }\n try:\n return spec[self.num_layers]\n except Exception:\n raise ValueError(f\"Only specs for layers {list(self.spec.keys())} defined.\")\n\n def residual_block(self, x: tf.Tensor, filters: int, strides: int = 1) -> tf.Tensor:\n downsample = x.get_shape().as_list()[-1] != filters\n\n if downsample:\n residual = tf.keras.layers.AvgPool2D(pool_size=2, strides=2)(x)\n residual = tf.keras.layers.Conv2D(\n filters,\n kernel_size=1,\n use_bias=False,\n kernel_initializer=\"glorot_normal\",\n )(residual)\n residual = tf.keras.layers.BatchNormalization(momentum=0.9, epsilon=1e-5)(\n residual\n )\n else:\n residual = x\n\n x = lq.layers.QuantConv2D(\n filters,\n kernel_size=3,\n strides=strides,\n padding=\"same\",\n input_quantizer=self.input_quantizer,\n kernel_quantizer=self.kernel_quantizer,\n kernel_constraint=self.kernel_constraint,\n kernel_initializer=\"glorot_normal\",\n use_bias=False,\n )(x)\n x = tf.keras.layers.BatchNormalization(momentum=0.9, epsilon=1e-5)(x)\n\n return tf.keras.layers.add([x, residual])\n\n def build(self) -> tf.keras.models.Model:\n if self.image_input.shape[1] and self.image_input.shape[1] < 50:\n x = tf.keras.layers.Conv2D(\n self.initial_filters,\n kernel_size=3,\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n use_bias=False,\n )(self.image_input)\n else:\n x = tf.keras.layers.Conv2D(\n self.initial_filters,\n kernel_size=7,\n strides=2,\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n use_bias=False,\n )(self.image_input)\n\n x = tf.keras.layers.BatchNormalization(momentum=0.9, epsilon=1e-5)(x)\n x = tf.keras.layers.Activation(\"relu\")(x)\n x = tf.keras.layers.MaxPool2D(3, strides=2, padding=\"same\")(x)\n x = tf.keras.layers.BatchNormalization(momentum=0.9, epsilon=1e-5)(x)\n\n for block, (layers, filters) in enumerate(zip(*self.spec)):\n # This trick adds shortcut connections between original ResNet\n # blocks. We wultiply the number of blocks by two, but add only one\n # layer instead of two in each block\n for layer in range(layers * 2):\n strides = 1 if block == 0 or layer != 0 else 2\n x = self.residual_block(x, filters, strides=strides)\n\n x = tf.keras.layers.Activation(\"relu\")(x)\n\n if self.include_top:\n x = utils.global_pool(x)\n x = tf.keras.layers.Dense(\n self.num_classes, kernel_initializer=\"glorot_normal\"\n )(x)\n x = tf.keras.layers.Activation(\"softmax\", dtype=\"float32\")(x)\n\n model = tf.keras.Model(\n inputs=self.image_input,\n outputs=x,\n name=f\"binary_resnet_e_{self.num_layers}\",\n )\n\n # Load weights.\n if self.weights == \"imagenet\":\n # Download appropriate file\n if self.include_top:\n weights_path = utils.download_pretrained_model(\n model=\"resnet_e\",\n version=\"v0.1.0\",\n file=\"resnet_e_18_weights.h5\",\n file_hash=\"bde4a64d42c164a7b10a28debbe1ad5b287c499bc0247ecb00449e6e89f3bf5b\",\n )\n else:\n weights_path = utils.download_pretrained_model(\n model=\"resnet_e\",\n version=\"v0.1.0\",\n file=\"resnet_e_18_weights_notop.h5\",\n file_hash=\"14cb037e47d223827a8d09db88ec73d60e4153a4464dca847e5ae1a155e7f525\",\n )\n model.load_weights(weights_path)\n elif self.weights is not None:\n model.load_weights(self.weights)\n return model\n\n\ndef BinaryResNetE18(\n *, # Keyword arguments only\n input_shape: Optional[Sequence[Optional[int]]] = None,\n input_tensor: Optional[utils.TensorType] = None,\n weights: Optional[str] = \"imagenet\",\n include_top: bool = True,\n num_classes: int = 1000,\n) -> tf.keras.models.Model:\n \"\"\"Instantiates the BinaryResNetE 18 architecture.\n\n Optionally loads weights pre-trained on ImageNet.\n\n ```netron\n resnet_e-v0.1.0/resnet_e_18.json\n ```\n ```summary\n literature.BinaryResNetE18\n ```\n ```plot-altair\n /plots/resnet_e_18.vg.json\n ```\n\n # ImageNet Metrics\n\n | Top-1 Accuracy | Top-5 Accuracy | Parameters | Memory |\n | -------------- | -------------- | ---------- | ------- |\n | 58.32 % | 80.79 % | 11 699 368 | 4.03 MB |\n\n # Arguments\n input_shape: Optional shape tuple, to be specified if you would like to use a\n model with an input image resolution that is not (224, 224, 3).\n It should have exactly 3 inputs channels.\n input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as\n image input for the model.\n weights: one of `None` (random initialization), \"imagenet\" (pre-training on\n ImageNet), or the path to the weights file to be loaded.\n include_top: whether to include the fully-connected layer at the top of the\n network.\n num_classes: optional number of classes to classify images into, only to be\n specified if `include_top` is True, and if no `weights` argument is\n specified.\n\n # Returns\n A Keras model instance.\n\n # Raises\n ValueError: in case of invalid argument for `weights`, or invalid input shape.\n\n # References\n - [Back to Simplicity: How to Train Accurate BNNs from\n Scratch?](https://arxiv.org/abs/1906.08637)\n \"\"\"\n return BinaryResNetE18Factory(\n input_shape=input_shape,\n input_tensor=input_tensor,\n weights=weights,\n include_top=include_top,\n num_classes=num_classes,\n ).build()\n"
] | [
[
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.keras.Model",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.AvgPool2D",
"tensorflow.keras.layers.add"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
SamuelDodet/challenge-regression | [
"cad1cb5dc36a01c12979edf648bd903f94466af3"
] | [
"app_streamlit.py"
] | [
"import streamlit as st\nfrom utils.preprocessing_model import ModellingData\nfrom utils.utils import change_to_province\nimport pandas as pd\nimport pickle\n\n\ndf = pd.read_csv(\"Dataset.csv\", sep=\",\")\ndel df['Price']\ncolumns = df.columns\n\n\nst.title(\"Belgian Houses Price Prediction\")\nst.markdown(\"Get the price of your house\")\n\nst.header(\"Time to give us some information about your house\")\nst.markdown(\"Let's go!!\")\n\n\nlocality = st.number_input(\"Enter your locality\")\nnumber_of_room = st.number_input(\"Enter the number of rooms\")\narea = st.number_input(\"Enter the area of your house\")\nterrace_area = st.number_input(\"Enter the area of your terrace\")\ngarden_area = st.number_input(\"Enter the area of your garden\")\nsurface_of_the_land = area + terrace_area + garden_area\n\ntype_of_property = st.selectbox('Type of property', [\"house\", \"apartment\"])\nfully_equipped_kitchen = st.selectbox('Is your Kitchen fully equipped?', [\"Yes\", \"No\"])\nfurnished = st.selectbox('Is your house is sell furnished?', [\"Yes\", \"No\"])\nopen_fire = st.selectbox('Do you have an open fire?', [\"Yes\", \"No\"])\nnumber_of_facades = st.selectbox('What is the number of facades?', [2, 3, 4])\nswimming_pool = st.selectbox('Do you have a swimming pool?', [\"Yes\", \"No\"])\nstate_of_building = st.selectbox('What is the state of your house?', [\"medium\", \"good\", \"to renovate\", \"new\"])\nfully_equipped_kitchen = 1 if fully_equipped_kitchen == \"Yes\" else 0\nfurnished = 1 if furnished == \"Yes\" else 0\nopen_fire = 1 if open_fire == \"Yes\" else 0\nswimming_pool = 1 if swimming_pool == \"Yes\" else 0\n\nnumerical_features = ['Locality', 'Number of rooms', 'Area', 'Terrace Area', 'Garden Area', 'Surface of the land' ]\ncategorical_features = ['Type of property','Fully equipped kitchen', 'Furnished', 'Open fire','Number of facades',\n 'Swimming pool', 'State of the building', 'Province', 'Region']\n\nif st.button(\"Submit\"):\n province = change_to_province(locality)[0]\n region = change_to_province(locality)[1]\n x = [locality, type_of_property,number_of_room, area, fully_equipped_kitchen,furnished,open_fire,\n terrace_area, garden_area, surface_of_the_land,\n number_of_facades,\n swimming_pool, state_of_building, province, region\n ]\n x = pd.DataFrame([x],columns=columns)\n file_name = \"xgb_reg.pkl\"\n xgb_model_loaded = pickle.load(open(file_name, \"rb\"))\n result = xgb_model_loaded.predict(x)\n st.success(f\"The estimated price of the property is {round(result[0])} euros\")\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
tom00/tensorflow | [
"88238b9004aafde10b2381cd810e0bb0e096b58e"
] | [
"tensorflow/python/tpu/tpu_embedding_v2.py"
] | [
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Mid level API for TPU Embeddings.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport functools\nfrom absl import logging\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.protobuf.tpu import tpu_embedding_configuration_pb2\nfrom tensorflow.python.distribute import device_util\nfrom tensorflow.python.distribute import distribute_utils\nfrom tensorflow.python.distribute import distribution_strategy_context\nfrom tensorflow.python.distribute import sharded_variable\nfrom tensorflow.python.distribute import tpu_strategy\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import device as tf_device\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.tpu import tpu\nfrom tensorflow.python.tpu import tpu_embedding_v2_utils\nfrom tensorflow.python.tpu.ops import tpu_ops\nfrom tensorflow.python.training.saving import saveable_hook\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n_HOOK_KEY = \"TPUEmbedding_saveable\"\n_NAME_KEY = \"_tpu_embedding_layer\"\n\n\n# TODO(bfontain): Cleanup and remove this once there is an implementation of\n# sharded variables that can be used in the PSStrategy with optimizers.\n# We implement just enough of the of a tf.Variable so that this could be passed\n# to an optimizer.\nclass TPUShardedVariable(sharded_variable.ShardedVariable):\n \"\"\"A ShardedVariable class for TPU.\"\"\"\n\n @property\n def _in_graph_mode(self):\n return self.variables[0]._in_graph_mode # pylint: disable=protected-access\n\n @property\n def _unique_id(self):\n return self.variables[0]._unique_id # pylint: disable=protected-access\n\n @property\n def _distribute_strategy(self):\n return self.variables[0]._distribute_strategy # pylint: disable=protected-access\n\n @property\n def _shared_name(self):\n return self._name\n\n\ndef _add_key_attr(op, name):\n op._set_attr(_NAME_KEY, attr_value_pb2.AttrValue(s=compat.as_bytes(name))) # pylint: disable=protected-access\n\n\n@tf_export(\"tpu.experimental.embedding.TPUEmbedding\")\nclass TPUEmbedding(tracking.AutoTrackable):\n \"\"\"The TPUEmbedding mid level API.\n\n NOTE: When instantiated under a TPUStrategy, this class can only be created\n once per call to `tf.tpu.experimental.initialize_tpu_system`. If you wish to\n re-initialize the embedding engine you must re-initialize the tpu as well.\n Doing this will clear any variables from TPU, so ensure you have checkpointed\n before you do this. If a further instances of the class are needed,\n set the `initialize_tpu_embedding` argument to `False`.\n\n This class can be used to support training large embeddings on TPU. When\n creating an instance of this class, you must specify the complete set of\n tables and features you expect to lookup in those tables. See the\n documentation of `tf.tpu.experimental.embedding.TableConfig` and\n `tf.tpu.experimental.embedding.FeatureConfig` for more details on the complete\n set of options. We will cover the basic usage here.\n\n NOTE: multiple `FeatureConfig` objects can use the same `TableConfig` object,\n allowing different features to share the same table:\n\n ```python\n table_config_one = tf.tpu.experimental.embedding.TableConfig(\n vocabulary_size=...,\n dim=...)\n table_config_two = tf.tpu.experimental.embedding.TableConfig(\n vocabulary_size=...,\n dim=...)\n feature_config = {\n 'feature_one': tf.tpu.experimental.embedding.FeatureConfig(\n table=table_config_one),\n 'feature_two': tf.tpu.experimental.embedding.FeatureConfig(\n table=table_config_one),\n 'feature_three': tf.tpu.experimental.embedding.FeatureConfig(\n table=table_config_two)}\n ```\n\n There are two modes under which the `TPUEmbedding` class can used. This\n depends on if the class was created under a `TPUStrategy` scope or not.\n\n Under `TPUStrategy`, we allow access to the method `enqueue`, `dequeue` and\n `apply_gradients`. We will show examples below of how to use these to train\n and evaluate your model. Under CPU, we only access to the `embedding_tables`\n property which allow access to the embedding tables so that you can use them\n to run model evaluation/prediction on CPU.\n\n First lets look at the `TPUStrategy` mode. Initial setup looks like:\n\n ```python\n strategy = tf.distribute.experimental.TPUStrategy(...)\n with strategy.scope():\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(\n feature_config=feature_config,\n batch_size=1024,\n optimizer=tf.tpu.experimental.embedding.SGD(0.1))\n ```\n\n When creating a distributed dataset that is to be passed to the enqueue\n operation a special input option must be specified:\n\n ```python\n distributed_dataset = (\n strategy.experimental_distribute_datasets_from_function(\n dataset_fn=...,\n options=tf.distribute.InputOptions(\n experimental_prefetch_to_device=False))\n dataset_iterator = iter(distributed_dataset)\n ```\n\n To use this API on TPU you should use a custom training loop. Below is an\n example of a training and evaluation step:\n\n ```python\n @tf.function\n def training_step(dataset_iterator, num_steps):\n def tpu_step(tpu_features):\n with tf.GradientTape() as tape:\n activations = embedding.dequeue()\n tape.watch(activations)\n model_output = model(activations)\n loss = ... # some function of labels and model_output\n\n embedding_gradients = tape.gradient(loss, activations)\n embedding.apply_gradients(embedding_gradients)\n # Insert your model gradient and optimizer application here\n\n for _ in tf.range(num_steps):\n embedding_features, tpu_features = next(dataset_iterator)\n embedding.enqueue(embedding_features, training=True)\n strategy.run(tpu_step, args=(embedding_features, ))\n\n @tf.function\n def evalution_step(dataset_iterator, num_steps):\n def tpu_step(tpu_features):\n activations = embedding.dequeue()\n model_output = model(activations)\n # Insert your evaluation code here.\n\n for _ in tf.range(num_steps):\n embedding_features, tpu_features = next(dataset_iterator)\n embedding.enqueue(embedding_features, training=False)\n strategy.run(tpu_step, args=(embedding_features, ))\n ```\n\n NOTE: The calls to `enqueue` have `training` set to `True` when\n `embedding.apply_gradients` is used and set to `False` when\n `embedding.apply_gradients` is not present in the function. If you don't\n follow this pattern you may cause an error to be raised or the tpu may\n deadlock.\n\n In the above examples, we assume that the user has a dataset which returns\n a tuple where the first element of the tuple matches the structure of what\n was passed as the `feature_config` argument to the object initializer. Also we\n utilize `tf.range` to get a `tf.while_loop` in order to increase performance.\n\n When checkpointing your model, you should include your\n `tf.tpu.experimental.embedding.TPUEmbedding` object in the checkpoint. It is a\n trackable object and saving it will save the embedding tables and their\n optimizer slot variables:\n\n ```python\n checkpoint = tf.train.Checkpoint(model=model, embedding=embedding)\n checkpoint.save(...)\n ```\n\n On CPU, only the `embedding_table` property is usable. This will allow you to\n restore a checkpoint to the object and have access to the table variables:\n\n ```python\n model = model_fn(...)\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(\n feature_config=feature_config,\n batch_size=1024,\n optimizer=tf.tpu.experimental.embedding.SGD(0.1))\n checkpoint = tf.train.Checkpoint(model=model, embedding=embedding)\n checkpoint.restore(...)\n\n tables = embedding.embedding_tables\n ```\n\n You can now use table in functions like `tf.nn.embedding_lookup` to perform\n your embedding lookup and pass to your model.\n\n \"\"\"\n\n def __init__(self, feature_config, batch_size, optimizer,\n pipeline_execution_with_tensor_core=False,\n initialize_tpu_embedding=True):\n \"\"\"Creates the TPUEmbedding mid level API object.\n\n ```python\n strategy = tf.distribute.experimental.TPUStrategy(...)\n with strategy.scope():\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(\n feature_config=tf.tpu.experimental.embedding.FeatureConfig(\n table=tf.tpu.experimental.embedding.TableConfig(\n dim=...,\n vocabulary_size=...)))\n ```\n\n Args:\n feature_config: A nested structure of\n `tf.tpu.experimental.embedding.FeatureConfig` configs.\n batch_size: The global batch size that you indend to use. Note that is\n fixed and the same batch size must be used for both training and\n evaluation.\n optimizer: An instance of one of `tf.tpu.experimental.embedding.SGD`,\n `tf.tpu.experimental.embedding.Adagrad` or\n `tf.tpu.experimental.embedding.Adam`.\n pipeline_execution_with_tensor_core: If True, the TPU embedding\n computations will overlap with the TensorCore computations (and hence\n will be one step old). Set to True for improved performance.\n initialize_tpu_embedding: If False, will not initialize the TPU embedding\n engine. If this is set to False and another instance of this class has\n not initialized the tpu embedding engine, the creation of this object\n will fail.\n\n Raises:\n ValueError: If optimizer is not one of tf.tpu.experimental.embedding.(SGD,\n Adam or Adagrad).\n \"\"\"\n self._strategy = distribution_strategy_context.get_strategy()\n self._using_tpu = isinstance(self._strategy, tpu_strategy.TPUStrategy)\n self._pipeline_execution_with_tensor_core = (\n pipeline_execution_with_tensor_core)\n\n self._feature_config = feature_config\n\n # The TPU embedding ops are slightly inconsistent with how they refer to\n # tables:\n # * The enqueue op takes a parallel list of tensors for input, one of those\n # is the table id for the feature which matches the integer index of the\n # table in the proto created by _create_config_proto().\n # * The recv_tpu_embedding_activations op emits lookups per table in the\n # order from the config proto.\n # * The send_tpu_embedding_gradients expects input tensors to be per table\n # in the same order as the config proto.\n # * Per optimizer load and retrieve ops are specified per table and take the\n # table name rather than the table id.\n # Thus we must fix a common order to tables and ensure they have unique\n # names.\n\n # Set table order here\n self._table_config = list(\n {feature.table for feature in nest.flatten(feature_config)})\n\n # Ensure tables have unique names. Also error check the optimizer as we\n # specifically don't do that in the TableConfig class to allow high level\n # APIs that are built on this to use strings/other classes to represent\n # optimizers (before they are passed to this class).\n table_names = []\n for i, table in enumerate(self._table_config):\n if table.optimizer is None:\n # TODO(bfontain) Should we allow some sort of optimizer merging here?\n table.optimizer = optimizer\n if not isinstance(table.optimizer, tpu_embedding_v2_utils._Optimizer): # pylint: disable=protected-access\n raise ValueError(\"{} is an unsupported optimizer class. Please pass an \"\n \"instance of one of the optimizer classes under \"\n \"tf.tpu.experimental.embedding.\".format(\n type(table.optimizer)))\n if table.name is None:\n table.name = \"table_{}\".format(i)\n if table.name in table_names:\n raise ValueError(\"Multiple tables with name {} found.\".format(\n table.name))\n table_names.append(table.name)\n\n if self._using_tpu:\n # Extract a list of callable learning rates also in fixed order. Each\n # table in the confix proto will get a index into this list and we will\n # pass this list in the same order after evaluation to the\n # send_tpu_embedding_gradients op.\n self._dynamic_learning_rates = list({\n table.optimizer.learning_rate for table in self._table_config if\n callable(table.optimizer.learning_rate)})\n\n # We need to list of host devices for the load/retrieve operations.\n self._hosts = get_list_of_hosts(self._strategy)\n\n # We generally use the per core batch size, but will have the user pass\n # in a global batch size.\n self._batch_size = batch_size // self._strategy.num_replicas_in_sync\n\n self._config_proto = self._create_config_proto()\n if initialize_tpu_embedding:\n # This is mainly for testing purposes, sometimes we don't want to\n # initialize the embedding engine, but just want a copy of the API\n # which can interact with an already initialized engine.\n logging.info(\"Initializing TPU Embedding engine with config: %s\",\n self._config_proto)\n @def_function.function\n def load_config():\n tpu.initialize_system_for_tpu_embedding(self._config_proto)\n\n load_config()\n logging.info(\"Done initializing TPU Embedding engine.\")\n\n # Create and load variables and slot variables into the TPU.\n # Note that this is a dict of dicts. Keys to the first dict are table names.\n # We would prefer to use TableConfigs, but then these variables won't be\n # properly tracked by the tracking API.\n self._variables = self._create_variables_and_slots()\n if self._using_tpu:\n self._load_variables()\n\n @property\n def embedding_tables(self):\n \"\"\"Returns a dict of embedding tables, keyed by `TableConfig`.\n\n This property only works when the `TPUEmbedding` object is created under a\n non-TPU strategy. This is intended to be used to for CPU based lookup when\n creating a serving checkpoint.\n\n Returns:\n A dict of embedding tables, keyed by `TableConfig`.\n\n Raises:\n RuntimeError: If object was created under a `TPUStrategy`.\n \"\"\"\n # We don't support returning tables on TPU due to their sharded nature and\n # the fact that when using a TPUStrategy:\n # 1. Variables are stale and are only updated when a checkpoint is made.\n # 2. Updating the variables won't affect the actual tables on the TPU.\n if self._using_tpu:\n raise RuntimeError(\"Unable to retrieve embedding tables when using a TPU \"\n \"strategy. If you need access, save your model, \"\n \"create this object under a CPU strategy and restore.\")\n\n # Only return the tables and not the slot variables. On CPU this are honest\n # tf.Variables.\n return {table: self._variables[table.name][\"parameters\"]\n for table in self._table_config}\n\n def _create_config_proto(self):\n \"\"\"Creates the TPUEmbeddingConfiguration proto.\n\n This proto is used to initialize the TPU embedding engine.\n\n Returns:\n A TPUEmbeddingConfiguration proto.\n \"\"\"\n\n config_proto = tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration()\n\n # There are several things that need to be computed here:\n # 1. Each table has a num_features, which corresponds to the number of\n # output rows per example for this table. Sequence features count for\n # their maximum sequence length.\n # 2. Learning rate index: the index of the dynamic learning rate for this\n # table (if it exists) in the list we created at initialization.\n # We don't simply create one learning rate index per table as this has\n # extremely bad performance characteristics. The more separate\n # optimization configurations we have, the worse the performance will be.\n num_features = {table: 0 for table in self._table_config}\n for feature in nest.flatten(self._feature_config):\n num_features[feature.table] += (1 if feature.max_sequence_length == 0\n else feature.max_sequence_length)\n\n # Map each callable dynamic learning rate to its in index in the list.\n learning_rate_index = {r: i for i, r in enumerate(\n self._dynamic_learning_rates)}\n\n for table in self._table_config:\n table_descriptor = config_proto.table_descriptor.add()\n table_descriptor.name = table.name\n\n # For small tables, we pad to the number of hosts so that at least one\n # id will be assigned to each host.\n table_descriptor.vocabulary_size = max(table.vocabulary_size,\n self._strategy.extended.num_hosts)\n table_descriptor.dimension = table.dim\n\n table_descriptor.num_features = num_features[table]\n\n parameters = table_descriptor.optimization_parameters\n\n # We handle the learning rate separately here and don't allow the\n # optimization class to handle this, as it doesn't know about dynamic\n # rates.\n if callable(table.optimizer.learning_rate):\n parameters.learning_rate.dynamic.tag = (\n learning_rate_index[table.optimizer.learning_rate])\n else:\n parameters.learning_rate.constant = table.optimizer.learning_rate\n\n # Use optimizer to handle the rest of the parameters.\n table.optimizer._set_optimization_parameters(parameters) # pylint: disable=protected-access\n\n # Always set mode to training, we override the mode during enqueue.\n config_proto.mode = (\n tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration.TRAINING)\n\n config_proto.batch_size_per_tensor_core = self._batch_size\n config_proto.num_hosts = self._strategy.extended.num_hosts\n config_proto.num_tensor_cores = self._strategy.num_replicas_in_sync\n\n # TODO(bfontain): Allow users to pick MOD for the host sharding.\n config_proto.sharding_strategy = (\n tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration.DIV_DEFAULT)\n config_proto.pipeline_execution_with_tensor_core = (\n self._pipeline_execution_with_tensor_core)\n\n return config_proto\n\n def _compute_per_table_gradients(self, gradients):\n \"\"\"Computes a dict of lists of gradients, keyed by table name.\n\n Args:\n gradients: A nested structure of Tensors (and Nones) with the same\n structure as the feature config.\n\n Returns:\n A dict of lists of tensors, keyed by the table names, containing the\n gradients in the correct order with None gradients repalaced by zeros.\n \"\"\"\n\n nest.assert_same_structure(self._feature_config, gradients)\n\n per_table_gradients = {table: [] for table in self._table_config}\n for (path, gradient), feature in zip(\n nest.flatten_with_joined_string_paths(gradients),\n nest.flatten(self._feature_config)):\n if gradient is not None and not isinstance(gradient, ops.Tensor):\n raise ValueError(\n \"Found {} at path {} in gradients. Expected Tensor.\".format(\n type(gradient), path))\n\n # Expected tensor shape differs for sequence and non-sequence features.\n if feature.max_sequence_length > 0:\n shape = [self._batch_size, feature.max_sequence_length,\n feature.table.dim]\n else:\n shape = [self._batch_size, feature.table.dim]\n\n if gradient is not None:\n if gradient.shape != shape:\n raise ValueError(\"Found gradient of shape {} at path {}. Expected \"\n \"shape {}.\".format(gradient.shape, path, shape))\n\n # We expand dims on non-sequence features so that all features are\n # of rank 3 and we can concat on axis=1.\n if len(shape) == 2:\n gradient = array_ops.expand_dims(gradient, axis=1)\n else:\n # No gradient for this feature, since we must give a gradient for all\n # features, pass in a zero tensor here. Note that this is not correct\n # for all optimizers.\n logging.warn(\"No gradient passed for feature %s, sending zero \"\n \"gradient. This may not be correct behavior for certain \"\n \"optimizers like Adam.\", path)\n # Create a shape to mimic the expand_dims above for non-sequence\n # features.\n if len(shape) == 2:\n shape = [shape[0], 1, shape[1]]\n gradient = array_ops.zeros(shape, dtype=dtypes.float32)\n per_table_gradients[feature.table].append(gradient)\n\n return per_table_gradients\n\n def apply_gradients(self, gradients, name=None):\n \"\"\"Applies the gradient update to the embedding tables.\n\n If a gradient of `None` is passed in any position of the nested structure,\n then an gradient update with a zero gradient is applied for that feature.\n For optimizers like SGD or Adagrad, this is the same as applying no update\n at all. For lazy Adam and other sparsely applied optimizers with decay,\n ensure you understand the effect of applying a zero gradient.\n\n ```python\n strategy = tf.distribute.experimental.TPUStrategy(...)\n with strategy.scope():\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(...)\n\n distributed_dataset = (\n strategy.experimental_distribute_datasets_from_function(\n dataset_fn=...,\n options=tf.distribute.InputOptions(\n experimental_prefetch_to_device=False))\n dataset_iterator = iter(distributed_dataset)\n\n @tf.function\n def training_step():\n def tpu_step(tpu_features):\n with tf.GradientTape() as tape:\n activations = embedding.dequeue()\n tape.watch(activations)\n\n loss = ... # some computation involving activations\n\n embedding_gradients = tape.gradient(loss, activations)\n embedding.apply_gradients(embedding_gradients)\n\n embedding_features, tpu_features = next(dataset_iterator)\n embedding.enqueue(embedding_features, training=True)\n strategy.run(tpu_step, args=(embedding_features, ))\n\n training_step()\n ```\n\n Args:\n gradients: A nested structure of gradients, with structure matching the\n `feature_config` passed to this object.\n name: A name for the underlying op.\n\n Raises:\n RuntimeError: If called when object wasn't created under a `TPUStrategy`.\n ValueError: If a non-`tf.Tensor` non-`None` gradient is passed in, or a\n `tf.Tensor` of the incorrect shape is passed in. Also if\n the size of any sequence in `gradients` does not match corresponding\n sequence in `feature_config`.\n TypeError: If the type of any sequence in `gradients` does not match\n corresponding sequence in `feature_config`.\n \"\"\"\n if not self._using_tpu:\n raise RuntimeError(\"apply_gradients is not valid when TPUEmbedding \"\n \"object is not created under a TPUStrategy.\")\n\n # send_tpu_embedding_gradients requires per table gradient, if we only have\n # one feature per table this isn't an issue. When multiple features share\n # the same table, the order of the features in per table tensor returned by\n # recv_tpu_embedding_activations matches the order in which they were passed\n # to enqueue.\n # In all three places, we use the fixed order given by nest.flatten to have\n # a consistent feature order.\n\n # First construct a dict of tensors one for each table.\n per_table_gradients = self._compute_per_table_gradients(gradients)\n\n # Now that we have a list of gradients we can compute a list of gradients\n # in the fixed order of self._table_config which interleave the gradients of\n # the individual features. We concat on axis 1 and then reshape into a 2d\n # tensor. The send gradients op expects a tensor of shape\n # [num_features*batch_size, dim] for each table.\n interleaved_gradients = []\n for table in self._table_config:\n interleaved_gradients.append(array_ops.reshape(\n array_ops.concat(per_table_gradients[table], axis=1),\n [-1, table.dim]))\n op = tpu_ops.send_tpu_embedding_gradients(\n inputs=interleaved_gradients,\n learning_rates=[math_ops.cast(fn(), dtype=dtypes.float32)\n for fn in self._dynamic_learning_rates],\n config=self._config_proto.SerializeToString())\n\n # Apply the name tag to the op.\n if name is not None:\n _add_key_attr(op, name)\n\n def dequeue(self, name=None):\n \"\"\"Get the embedding results.\n\n Returns a nested structure of `tf.Tensor` objects, matching the structure of\n the `feature_config` argument to the `TPUEmbedding` class. The output shape\n of the tensors is `(batch_size, dim)`, where `batch_size` is the per core\n batch size, `dim` is the dimension of the corresponding `TableConfig`. If\n the feature's corresponding `FeatureConfig` has `max_sequence_length`\n greater than 0, the output will be a sequence of shape\n `(batch_size, max_sequence_length, dim)` instead.\n\n ```python\n strategy = tf.distribute.experimental.TPUStrategy(...)\n with strategy.scope():\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(...)\n\n distributed_dataset = (\n strategy.experimental_distribute_datasets_from_function(\n dataset_fn=...,\n options=tf.distribute.InputOptions(\n experimental_prefetch_to_device=False))\n dataset_iterator = iter(distributed_dataset)\n\n @tf.function\n def training_step():\n def tpu_step(tpu_features):\n with tf.GradientTape() as tape:\n activations = embedding.dequeue()\n tape.watch(activations)\n\n loss = ... # some computation involving activations\n\n embedding_gradients = tape.gradient(loss, activations)\n embedding.apply_gradients(embedding_gradients)\n\n embedding_features, tpu_features = next(dataset_iterator)\n embedding.enqueue(embedding_features, training=True)\n strategy.run(tpu_step, args=(embedding_features, ))\n\n training_step()\n ```\n\n Args:\n name: A name for the underlying op.\n\n Returns:\n A nested structure of tensors, with the same structure as `feature_config`\n passed to this instance of the `TPUEmbedding` object.\n\n Raises:\n RuntimeError: If called when object wasn't created under a `TPUStrategy`.\n \"\"\"\n if not self._using_tpu:\n raise RuntimeError(\"dequeue is not valid when TPUEmbedding object is not \"\n \"created under a TPUStrategy.\")\n\n # The activations returned by this op are per table. So we must separate\n # them out into per feature activations. The activations are interleaved:\n # for each table, we expect a [num_features*batch_size, dim] tensor.\n # E.g. we expect the slice [:num_features, :] to contain the lookups for the\n # first example of all features using this table.\n activations = tpu_ops.recv_tpu_embedding_activations(\n num_outputs=len(self._table_config),\n config=self._config_proto.SerializeToString())\n\n # Apply the name tag to the op.\n if name is not None:\n _add_key_attr(activations[0].op, name)\n\n # Compute the number of features for this table.\n num_features = {table: 0 for table in self._table_config}\n for feature in nest.flatten(self._feature_config):\n num_features[feature.table] += (1 if feature.max_sequence_length == 0\n else feature.max_sequence_length)\n\n # Activations are reshaped so that they are indexed by batch size and then\n # by the 'feature' index within the batch. The final dimension should equal\n # the dimension of the table.\n table_to_activation = {\n table: array_ops.reshape(activation,\n [self._batch_size, num_features[table], -1])\n for table, activation in zip(self._table_config, activations)}\n\n # We process the features in the same order we enqueued them.\n # For each feature we take the next slice of the activations, so need to\n # track the activations and the current position we are in.\n table_to_position = {table: 0 for table in self._table_config}\n\n per_feature_activations = []\n for feature in nest.flatten(self._feature_config):\n activation = table_to_activation[feature.table]\n feature_index = table_to_position[feature.table]\n # We treat non-sequence and sequence features differently here as sequence\n # features have rank 3 while non-sequence features have rank 2.\n if feature.max_sequence_length == 0:\n per_feature_activations.append(\n activation[:, feature_index, :])\n table_to_position[feature.table] += 1\n else:\n per_feature_activations.append(\n activation[:, feature_index:(\n feature_index+feature.max_sequence_length), :])\n table_to_position[feature.table] += feature.max_sequence_length\n\n # Pack the list back into the same nested structure as the features.\n return nest.pack_sequence_as(self._feature_config, per_feature_activations)\n\n def _create_variables_and_slots(self):\n \"\"\"Create variables for TPU embeddings.\n\n Note under TPUStrategy this will ensure that all creations happen within a\n variable creation scope of the sharded variable creator.\n\n Returns:\n A dict of dicts. The outer dict is keyed by the table names and the inner\n dicts are keyed by 'parameters' and the slot variable names.\n \"\"\"\n\n def create_variables(table):\n \"\"\"Create all variables.\"\"\"\n shape = (table.vocabulary_size, table.dim)\n\n def getter(name, shape, dtype, initializer, trainable):\n return tf_variables.Variable(\n name=name,\n initial_value=functools.partial(initializer, shape, dtype=dtype),\n trainable=trainable)\n\n def variable_creator(name, initializer, trainable=True):\n # use add_variable_with_custom_getter here so that we take advantage of\n # the checkpoint loading to allow restore before the variables get\n # created which avoids double initialization.\n return self._add_variable_with_custom_getter(\n name=name,\n initializer=initializer,\n shape=shape,\n dtype=dtypes.float32,\n getter=getter,\n trainable=trainable)\n\n parameters = variable_creator(table.name, table.initializer,\n trainable=not self._using_tpu)\n\n def slot_creator(name, initializer):\n return variable_creator(table.name + \"/\" + name,\n initializer,\n False)\n\n slot_vars = table.optimizer._create_slots(parameters, slot_creator) # pylint: disable=protected-access\n slot_vars[\"parameters\"] = parameters\n return slot_vars\n\n # Store tables based on name rather than TableConfig as we can't track\n # through dicts with non-string keys, i.e. we won't be able to save.\n variables = {}\n for table in self._table_config:\n if not self._using_tpu:\n variables[table.name] = create_variables(table)\n else:\n with variable_scope.variable_creator_scope(\n make_sharded_variable_creator(self._hosts)):\n variables[table.name] = create_variables(table)\n\n return variables\n\n @def_function.function\n def _load_variables(self):\n \"\"\"Load embedding tables to onto TPU for each table and host.\"\"\"\n\n def select_fn(host_id):\n return lambda x: x.variables[host_id]\n\n num_hosts = self._strategy.extended.num_hosts\n config = self._config_proto.SerializeToString()\n for host_id, host in enumerate(self._hosts):\n variables = nest.map_structure(select_fn(host_id), self._variables)\n with ops.device(host):\n for table in self._table_config:\n table.optimizer._load()( # pylint: disable=protected-access\n table_name=table.name,\n num_shards=num_hosts,\n shard_id=host_id,\n config=config,\n **variables[table.name])\n # Ensure that only the first table/first host gets a config so that we\n # don't bloat graph by attaching this large string to each op.\n # We have num tables * num hosts of these so for models with a large\n # number of tables training on a large slice, this can be an issue.\n config = None\n\n @def_function.function\n def _retrieve_variables(self):\n \"\"\"Retrieve embedding tables from TPU to host memory.\"\"\"\n num_hosts = self._strategy.extended.num_hosts\n config = self._config_proto.SerializeToString()\n for host_id, host in enumerate(self._hosts):\n with ops.device(host):\n for table in self._table_config:\n retrieved = table.optimizer._retrieve()( # pylint: disable=protected-access\n table_name=table.name,\n num_shards=num_hosts,\n shard_id=host_id,\n config=config)\n # When there are no slot variables (e.g with SGD) this returns a\n # single tensor rather than a tuple. In this case we put the tensor in\n # a list to make the following code easier to write.\n if not isinstance(retrieved, tuple):\n retrieved = (retrieved,)\n\n for i, slot in enumerate([\"parameters\"] +\n table.optimizer._slot_names()): # pylint: disable=protected-access\n # We must assign the CPU variables the values of tensors that were\n # returned from the TPU.\n self._variables[table.name][slot].variables[host_id].assign(\n retrieved[i])\n # Ensure that only the first table/first host gets a config so that we\n # don't bloat graph by attaching this large string to each op.\n # We have num tables * num hosts of these so for models with a large\n # number of tables training on a large slice, this can be an issue.\n config = None\n\n def _gather_saveables_for_checkpoint(self):\n \"\"\"Overrides default Trackable implementation to add load/retrieve hook.\"\"\"\n # This saveable should be here in both TPU and CPU checkpoints, so when on\n # CPU, we add the hook with no functions.\n # TODO(bfontain): Update restore logic in saver so that these hooks are\n # always executed. Once that is done, we can output an empty list when on\n # CPU.\n def factory(name=_HOOK_KEY):\n return TPUEmbeddingSaveable(\n name,\n self._load_variables if self._using_tpu else None,\n self._retrieve_variables if self._using_tpu else None)\n return {_HOOK_KEY: factory}\n\n # Some helper functions for the below enqueue function.\n def _add_data_for_tensor(self, tensor, weight, indices, values, weights,\n int_zeros, float_zeros, path):\n if weight is not None:\n raise ValueError(\n \"Weight specified for dense input {}, which is not allowed. \"\n \"Weight will always be 1 in this case.\".format(path))\n # For tensors, there are no indices and no weights.\n indices.append(int_zeros)\n values.append(math_ops.cast(tensor, dtypes.int32))\n weights.append(float_zeros)\n\n def _add_data_for_sparse_tensor(self, tensor, weight, indices, values,\n weights, int_zeros, float_zeros, path):\n indices.append(math_ops.cast(tensor.indices, dtypes.int32))\n values.append(math_ops.cast(tensor.values, dtypes.int32))\n # If we have weights they must be a SparseTensor.\n if weight is not None:\n if not isinstance(weight, sparse_tensor.SparseTensor):\n raise ValueError(\"Weight for {} is type {} which does not match \"\n \"type input which is SparseTensor.\".format(\n path, type(weight)))\n weights.append(math_ops.cast(weight.values, dtypes.float32))\n else:\n weights.append(float_zeros)\n\n def _add_data_for_ragged_tensor(self, tensor, weight, indices, values,\n weights, int_zeros, float_zeros, path):\n indices.append(math_ops.cast(tensor.row_splits, dtypes.int32))\n values.append(math_ops.cast(tensor.values, dtypes.int32))\n # If we have weights they must be a RaggedTensor.\n if weight is not None:\n if not isinstance(weight, ragged_tensor.RaggedTensor):\n raise ValueError(\"Weight for {} is type {} which does not match \"\n \"type input which is RaggedTensor.\".format(\n path, type(weight)))\n weights.append(math_ops.cast(weight.values, dtypes.float32))\n else:\n weights.append(float_zeros)\n\n def _generate_enqueue_op(self, flat_inputs, flat_weights, flat_features,\n device_ordinal, mode_override):\n \"\"\"Outputs a the enqueue op given the inputs and weights.\n\n Args:\n flat_inputs: A list of input tensors.\n flat_weights: A list of input weights (or None) of the same length as\n flat_inputs.\n flat_features: A list of FeatureConfigs of the same length as flat_inputs.\n device_ordinal: The device to create the enqueue op for.\n mode_override: A tensor containing the string \"train\" or \"inference\".\n\n Returns:\n The enqueue op.\n \"\"\"\n\n # First we need to understand which op to use. This depends on if sparse\n # or ragged tensors are in the flat_inputs.\n sparse = False\n ragged = False\n for inp in flat_inputs:\n if isinstance(inp, sparse_tensor.SparseTensor):\n sparse = True\n elif isinstance(inp, ragged_tensor.RaggedTensor):\n ragged = True\n if sparse and ragged:\n raise ValueError(\n \"Found both SparseTensors and RaggedTensors in the input to the \"\n \"enqueue operation. Please ensure that your data does not include \"\n \"both SparseTensors and RaggedTensors. It is ok to have Tensors in \"\n \"combination with one of the previous types.\")\n\n # Combiners are per table, list in the same order as the table order.\n combiners = [table.combiner for table in self._table_config]\n\n # Reverse mapping of self._table_config, so that we can lookup the table\n # index.\n table_to_id = {table: i for i, table in enumerate(self._table_config)}\n\n # These parallel arrays will be the inputs to the enqueue op.\n indices = [] # sample_indices for sparse, sample_splits for ragged.\n values = []\n weights = []\n table_ids = []\n max_sequence_lengths = []\n\n # We have to supply a empty/zero tensor in a list position where we don't\n # have data (e.g. indices for standard Tensor input, weight when no weight\n # is specified). We create one op here per call, so that we reduce the\n # graph size.\n int_zeros = array_ops.zeros((0,), dtype=dtypes.int32)\n float_zeros = array_ops.zeros((0,), dtype=dtypes.float32)\n\n # In the following loop we insert casts so that everything is either int32\n # or float32. This is because op inputs which are lists of tensors must be\n # of the same type within the list. Moreover the CPU implementions of these\n # ops cast to these types anyway, so we don't lose any data by casting\n # early.\n for inp, weight, (path, feature) in zip(\n flat_inputs, flat_weights, flat_features):\n table_ids.append(table_to_id[feature.table])\n max_sequence_lengths.append(feature.max_sequence_length)\n if isinstance(inp, ops.Tensor):\n self._add_data_for_tensor(inp, weight, indices, values, weights,\n int_zeros, float_zeros, path)\n elif isinstance(inp, sparse_tensor.SparseTensor):\n self._add_data_for_sparse_tensor(inp, weight, indices, values, weights,\n int_zeros, float_zeros, path)\n elif isinstance(inp, ragged_tensor.RaggedTensor):\n self._add_data_for_ragged_tensor(inp, weight, indices, values, weights,\n int_zeros, float_zeros, path)\n else:\n raise ValueError(\"Input {} is of unknown type {}. Please only pass \"\n \"Tensor, SparseTensor or RaggedTensor as input to \"\n \"enqueue.\".format(path, type(inp)))\n\n if ragged:\n return tpu_ops.enqueue_tpu_embedding_ragged_tensor_batch(\n sample_splits=indices,\n embedding_indices=values,\n aggregation_weights=weights,\n mode_override=mode_override,\n device_ordinal=device_ordinal,\n combiners=combiners,\n table_ids=table_ids,\n max_sequence_lengths=max_sequence_lengths)\n return tpu_ops.enqueue_tpu_embedding_sparse_tensor_batch(\n sample_indices=indices,\n embedding_indices=values,\n aggregation_weights=weights,\n mode_override=mode_override,\n device_ordinal=device_ordinal,\n combiners=combiners,\n table_ids=table_ids,\n max_sequence_lengths=max_sequence_lengths)\n\n def _raise_error_for_incorrect_control_flow_context(self):\n \"\"\"Raises an error if we are not in the TPUReplicateContext.\"\"\"\n # Do not allow any XLA control flow (i.e. control flow in between a\n # TPUStrategy's run call and the call to this function), as we can't\n # extract the enqueue from the head when in XLA control flow.\n graph = ops.get_default_graph()\n in_tpu_ctx = False\n while graph is not None:\n ctx = graph._get_control_flow_context() # pylint: disable=protected-access\n while ctx is not None:\n if isinstance(ctx, tpu.TPUReplicateContext):\n in_tpu_ctx = True\n break\n ctx = ctx.outer_context\n if in_tpu_ctx:\n break\n graph = getattr(graph, \"outer_graph\", None)\n if graph != ops.get_default_graph() and in_tpu_ctx:\n raise RuntimeError(\n \"Current graph {} does not match graph which contains \"\n \"TPUReplicateContext {}. This is most likely due to the fact that \"\n \"enqueueing embedding data is called inside control flow or a \"\n \"nested function inside `strategy.run`. This is not supported \"\n \"because outside compilation fails to extract the enqueue ops as \"\n \"head of computation.\".format(ops.get_default_graph(), graph))\n return in_tpu_ctx\n\n def _raise_error_for_non_direct_inputs(self, features):\n \"\"\"Checks all tensors in features to see if they are a direct input.\"\"\"\n\n # expand_composites here is important: as composite tensors pass through\n # tpu.replicate, they get 'flattened' into their component tensors and then\n # repacked before being passed to the tpu function. In means that it is the\n # component tensors which are produced by an op with the\n # \"_tpu_input_identity\" attribute.\n for path, input_tensor in nest.flatten_with_joined_string_paths(\n features, expand_composites=True):\n if input_tensor.op.type == \"Placeholder\":\n continue\n try:\n is_input = input_tensor.op.get_attr(\"_tpu_input_identity\")\n except ValueError:\n is_input = False\n if not is_input:\n raise ValueError(\n \"Received input tensor {} which is the output of op {} (type {}) \"\n \"which does not have the `_tpu_input_identity` attr. Please \"\n \"ensure that the inputs to this layer are taken directly from \"\n \"the arguments of the function called by \"\n \"strategy.run. Two possible causes are: dynamic batch size \"\n \"support or you are using a keras layer and are not passing \"\n \"tensors which match the dtype of the `tf.keras.Input`s.\"\n \"If you are triggering dynamic batch size support, you can \"\n \"disable it by passing tf.distribute.RunOptions(\"\n \"experimental_enable_dynamic_batch_size=False) to the options \"\n \"argument of strategy.run().\".format(path,\n input_tensor.op.name,\n input_tensor.op.type))\n\n def _raise_error_for_inputs_not_on_cpu(self, features):\n \"\"\"Checks all tensors in features to see are placed on the CPU.\"\"\"\n\n # expand_composites here is important, we need to check the device of each\n # underlying tensor.\n for path, input_tensor in nest.flatten_with_joined_string_paths(\n features, expand_composites=True):\n spec = tf_device.DeviceSpec.from_string(input_tensor.device)\n if spec.device_type == \"TPU\":\n raise ValueError(\n \"Received input tensor {} which is on a TPU input device {}. Input \"\n \"tensors for TPU embeddings must be placed on the CPU. Please \"\n \"ensure that your dataset is prefetching tensors to the host by \"\n \"setting the 'experimental_prefetch_to_device' option of the \"\n \"dataset distribution function. See the documentation of the \"\n \"enqueue method for an example.\".format(\n path, input_tensor.device))\n\n def enqueue(self, features, weights=None, training=True, name=None):\n \"\"\"Enqueues id tensors for embedding lookup.\n\n This function enqueues a structure of features to be looked up in the\n embedding tables. We expect that the batch size of each of the tensors in\n features matches the per core batch size. This will automatically happen if\n your input dataset is batched to the global batch size and you use\n `tf.distribute.experimental.TPUStrategy`'s `experimental_distribute_dataset`\n or if you use `experimental_distribute_datasets_from_function` and batch\n to the per core batch size computed by the context passed to your input\n function.\n\n ```python\n strategy = tf.distribute.experimental.TPUStrategy(...)\n with strategy.scope():\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(...)\n\n distributed_dataset = (\n strategy.experimental_distribute_datasets_from_function(\n dataset_fn=...,\n options=tf.distribute.InputOptions(\n experimental_prefetch_to_device=False))\n dataset_iterator = iter(distributed_dataset)\n\n @tf.function\n def training_step():\n def tpu_step(tpu_features):\n with tf.GradientTape() as tape:\n activations = embedding.dequeue()\n tape.watch(activations)\n\n loss = ... # some computation involving activations\n\n embedding_gradients = tape.gradient(loss, activations)\n embedding.apply_gradients(embedding_gradients)\n\n embedding_features, tpu_features = next(dataset_iterator)\n embedding.enqueue(embedding_features, training=True)\n strategy.run(tpu_step, args=(embedding_features,))\n\n training_step()\n ```\n\n NOTE: You should specify `training=True` when using\n `embedding.apply_gradients` as above and `training=False` when not using\n `embedding.apply_gradients` (e.g. for frozen embeddings or when doing\n evaluation).\n\n Args:\n features: A nested structure of `tf.Tensor`s, `tf.SparseTensor`s or\n `tf.RaggedTensor`s, with the same structure as `feature_config`. Inputs\n will be downcast to `tf.int32`. Only one type out of `tf.SparseTensor`\n or `tf.RaggedTensor` is supported per call.\n weights: If not `None`, a nested structure of `tf.Tensor`s,\n `tf.SparseTensor`s or `tf.RaggedTensor`s, matching the above, except\n that the tensors should be of float type (and they will be downcast to\n `tf.float32`). For `tf.SparseTensor`s we assume the `indices` are the\n same for the parallel entries from `features` and similarly for\n `tf.RaggedTensor`s we assume the row_splits are the same.\n training: Defaults to `True`. If `False`, enqueue the batch as inference\n batch (forward pass only). Do not call `apply_gradients` when this is\n `False` as this may lead to a deadlock.\n name: A name for the underlying op.\n\n Raises:\n ValueError: When called inside a strategy.run call and input is not\n directly taken from the args of the `strategy.run` call. Also if\n the size of any sequence in `features` does not match corresponding\n sequence in `feature_config`. Similarly for `weights`, if not `None`.\n RuntimeError: When called inside a strategy.run call and inside XLA\n control flow.\n TypeError: If the type of any sequence in `features` does not match\n corresponding sequence in `feature_config`. Similarly for `weights`, if\n not `None`.\n \"\"\"\n if not self._using_tpu:\n raise RuntimeError(\"enqueue is not valid when TPUEmbedding object is not \"\n \"created under a TPUStrategy.\")\n\n nest.assert_same_structure(self._feature_config, features)\n\n # TODO(bfontain): Add a check that the input batch_size matches the per core\n # batch size that this instance of the API was initialized with.\n\n flat_inputs = nest.flatten(features)\n flat_weights = [None] * len(flat_inputs)\n if weights is not None:\n nest.assert_same_structure(self._feature_config, weights)\n flat_weights = nest.flatten(weights)\n flat_features = nest.flatten_with_joined_string_paths(self._feature_config)\n\n self._raise_error_for_inputs_not_on_cpu(features)\n in_tpu_context = self._raise_error_for_incorrect_control_flow_context()\n # If we are in a tpu_context, automatically apply outside compilation.\n if in_tpu_context:\n self._raise_error_for_non_direct_inputs(features)\n\n def generate_enqueue_ops():\n \"\"\"Generate enqueue ops for outside compilation.\"\"\"\n # Note that we put array_ops.where_v2 rather than a python if so that\n # the op is explicitly create and the constant ops are both in the graph\n # even though we don't expect training to be a tensor (and thus generate\n # control flow automatically). This need to make it easier to re-write\n # the graph later if we need to fix which mode needs to be used.\n mode_override = array_ops.where_v2(training,\n constant_op.constant(\"train\"),\n constant_op.constant(\"inference\"))\n\n # Device ordinal is -1 here, a later rewrite will fix this once the op\n # is expanded by outside compilation.\n enqueue_op = self._generate_enqueue_op(\n flat_inputs, flat_weights, flat_features, device_ordinal=-1,\n mode_override=mode_override)\n\n # Apply the name tag to the op.\n if name is not None:\n _add_key_attr(enqueue_op, name)\n\n # Ensure that this op has outbound control flow, otherwise it won't be\n # executed.\n ops.get_default_graph().control_outputs.append(enqueue_op)\n\n tpu.outside_compilation(generate_enqueue_ops)\n\n else:\n mode_override = \"train\" if training else \"inference\"\n # We generate enqueue ops per device, so we need to gather the all\n # features for a single device in to a dict.\n # We rely here on the fact that the devices in the PerReplica value occur\n # in the same (standard) order as self._strategy.extended.worker_devices.\n enqueue_ops = []\n for replica_id in range(self._strategy.num_replicas_in_sync):\n replica_inputs = distribute_utils.select_replica(replica_id,\n flat_inputs)\n replica_weights = distribute_utils.select_replica(replica_id,\n flat_weights)\n tpu_device = self._strategy.extended.worker_devices[replica_id]\n # TPU devices string are like /job:worker/replica:0/task:0/device:TPU:0\n # the device ordinal is the last number\n device_ordinal = int(tpu_device.rsplit(\":\", 1)[1])\n with ops.device(device_util.get_host_for_device(tpu_device)):\n enqueue_op = self._generate_enqueue_op(\n replica_inputs, replica_weights, flat_features,\n device_ordinal=device_ordinal, mode_override=mode_override)\n\n # Apply the name tag to the op.\n if name is not None:\n _add_key_attr(enqueue_op, name)\n enqueue_ops.append(enqueue_op)\n ops.get_default_graph().control_outputs.extend(enqueue_ops)\n\n\nclass TPUEmbeddingSaveable(saveable_hook.SaveableHook):\n \"\"\"Save/Restore hook to Retrieve/Load TPUEmbedding variables.\"\"\"\n\n def __init__(self, name, load, retrieve):\n self._load = load\n self._retrieve = retrieve\n super(TPUEmbeddingSaveable, self).__init__(name=name)\n\n def before_save(self):\n if self._retrieve is not None:\n self._retrieve()\n\n def after_restore(self):\n if self._load is not None:\n self._load()\n\n\ndef _ragged_embedding_lookup_with_reduce(table, ragged, weights, combiner):\n \"\"\"Compute a ragged lookup followed by a reduce on axis 1.\n\n Args:\n table: The embedding table.\n ragged: A RaggedTensor of ids to look up.\n weights: A RaggedTensor of weights (or None).\n combiner: One of \"mean\", \"sum\", \"sqrtn\".\n\n Returns:\n A Tensor.\n \"\"\"\n if weights is None:\n weights = array_ops.ones_like(ragged)\n weights = array_ops.expand_dims(weights, axis=2)\n ragged_result = embedding_ops.embedding_lookup_ragged(table, ragged)\n ragged_result = math_ops.reduce_sum(ragged_result * weights, axis=1)\n if combiner == \"mean\":\n ragged_result = ragged_result / math_ops.reduce_sum(weights, axis=1)\n elif combiner == \"sqrtn\":\n ragged_result = ragged_result, math_ops.sqrt(math_ops.reduce_sum(\n weights*weights, axis=1))\n return ragged_result\n\n\ndef cpu_embedding_lookup(inputs, weights, tables, feature_config):\n \"\"\"Uses CPU embedding lookup for embedding ids in features.\n\n Args:\n inputs: a nested structure of Tensors, SparseTensors or RaggedTensors.\n weights: a nested structure of Tensors, SparseTensors or RaggedTensors or\n None for no weights.\n tables: a dict of mapping TableConfig objects to Variables.\n feature_config: a nested structure of FeatureConfig objects with the same\n structure as inputs.\n\n Returns:\n A nested structure of Tensors with the same structure as inputs.\n \"\"\"\n\n nest.assert_same_structure(inputs, feature_config)\n\n flat_inputs = nest.flatten(inputs)\n flat_weights = [None] * len(flat_inputs)\n if weights is not None:\n nest.assert_same_structure(inputs, weights)\n flat_weights = nest.flatten(weights)\n flat_features = nest.flatten_with_joined_string_paths(feature_config)\n\n outputs = []\n for inp, weight, (path, feature) in zip(\n flat_inputs, flat_weights, flat_features):\n table = tables[feature.table]\n if feature.max_sequence_length > 0:\n raise ValueError(\"Sequence features unsupported at this time.\")\n\n if weight is not None:\n if isinstance(inp, ops.Tensor):\n raise ValueError(\n \"Weight specified for {}, but input is dense.\".format(path))\n elif type(weight) is not type(inp):\n raise ValueError(\n \"Weight for {} is of type {} but it does not match type of the \"\n \"input which is {}.\".format(path, type(weight), type(inp)))\n\n if isinstance(inp, ops.Tensor):\n outputs.append(embedding_ops.embedding_lookup_v2(table, inp))\n\n elif isinstance(inp, sparse_tensor.SparseTensor):\n outputs.append(embedding_ops.safe_embedding_lookup_sparse_v2(\n table, inp, sparse_weights=weight, combiner=feature.table.combiner))\n\n elif isinstance(inp, ragged_tensor.RaggedTensor):\n outputs.append(_ragged_embedding_lookup_with_reduce(\n table, inp, weight, feature.table.combiner))\n\n else:\n raise ValueError(\"Input {} is type {}. Tensor, SparseTensor or \"\n \"RaggedTensor expected.\".format(path, type(inp)))\n return nest.pack_sequence_as(feature_config, outputs)\n\n\ndef get_list_of_hosts(strategy):\n \"\"\"Returns a sorted list of CPU devices for the remote jobs.\n\n Args:\n strategy: A TPUStrategy object.\n\n Returns:\n A sort list of device strings.\n \"\"\"\n list_of_hosts = []\n # Assume this is sorted by task\n for tpu_device in strategy.extended.worker_devices:\n host = device_util.get_host_for_device(tpu_device)\n if host not in list_of_hosts:\n list_of_hosts.append(host)\n assert len(list_of_hosts) == strategy.extended.num_hosts\n return list_of_hosts\n\n\ndef extract_variable_info(kwargs):\n \"\"\"Extracts the variable creation attributes from the kwargs.\n\n Args:\n kwargs: a dict of keyword arguments that were passed to a variable creator\n scope.\n\n Returns:\n A tuple of variable name, initialization function, shape, and dtype.\n \"\"\"\n if (isinstance(kwargs[\"initial_value\"], functools.partial) and (\n \"shape\" in kwargs[\"initial_value\"].keywords or\n kwargs[\"initial_value\"].args)):\n # Sometimes shape is passed positionally, sometimes it's passed as a kwarg.\n if \"shape\" in kwargs[\"initial_value\"].keywords:\n shape = kwargs[\"initial_value\"].keywords[\"shape\"]\n else:\n shape = kwargs[\"initial_value\"].args[0]\n return (kwargs[\"name\"], shape,\n kwargs[\"initial_value\"].keywords.get(\"dtype\", kwargs[\"dtype\"]),\n kwargs[\"initial_value\"].func)\n elif \"shape\" not in kwargs or kwargs[\"shape\"] is None:\n raise ValueError(\n \"Unable to extract initializer function and shape from {}. Please \"\n \"either pass a function that expects a shape and dtype as the \"\n \"initial value for your variable or functools.partial object with \"\n \"the shape and dtype kwargs set. This is needed so that we can \"\n \"initialize the shards of the ShardedVariable locally.\".format(\n kwargs[\"initial_value\"]))\n else:\n return (kwargs[\"name\"], kwargs[\"shape\"], kwargs[\"dtype\"],\n kwargs[\"initial_value\"])\n\n\ndef make_sharded_variable_creator(hosts):\n \"\"\"Makes a sharded variable creator given a list of hosts.\n\n Args:\n hosts: a list of tensorflow devices on which to shard the tensors.\n\n Returns:\n A variable creator function.\n \"\"\"\n\n def sharded_variable_creator(next_creator, *args, **kwargs):\n \"\"\"The sharded variable creator.\"\"\"\n kwargs[\"skip_mirrored_creator\"] = True\n\n num_hosts = len(hosts)\n name, shape, dtype, initial_value = extract_variable_info(kwargs)\n rows = shape[0]\n cols = shape[1]\n missing = rows % num_hosts\n # we partition as if we were using MOD sharding.\n partitions = ([rows // num_hosts + 1] * missing + [rows // num_hosts] *\n (num_hosts - missing))\n variables = []\n newkwargs = kwargs\n newkwargs[\"dtype\"] = dtype\n for i, p in enumerate(partitions):\n with ops.device(hosts[i]):\n newkwargs[\"shape\"] = (p, cols)\n newkwargs[\"name\"] = \"{}_{}\".format(name, i)\n newkwargs[\"initial_value\"] = (\n lambda: initial_value(newkwargs[\"shape\"], dtype=dtype))\n variables.append(next_creator(*args, **kwargs))\n return TPUShardedVariable(variables, name=name)\n return sharded_variable_creator\n"
] | [
[
"tensorflow.python.ops.embedding_ops.embedding_lookup_v2",
"tensorflow.python.tpu.tpu.outside_compilation",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.framework.device.DeviceSpec.from_string",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.embedding_ops.embedding_lookup_ragged",
"tensorflow.python.distribute.device_util.get_host_for_device",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.tpu.ops.tpu_ops.enqueue_tpu_embedding_ragged_tensor_batch",
"tensorflow.python.distribute.distribute_utils.select_replica",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.tpu.tpu.initialize_system_for_tpu_embedding",
"tensorflow.python.util.nest.flatten_with_joined_string_paths",
"tensorflow.core.protobuf.tpu.tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration",
"tensorflow.python.ops.embedding_ops.safe_embedding_lookup_sparse_v2",
"tensorflow.python.distribute.distribution_strategy_context.get_strategy",
"tensorflow.python.tpu.ops.tpu_ops.enqueue_tpu_embedding_sparse_tensor_batch",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.util.nest.assert_same_structure",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
willb/spark-rapids | [
"1e1c4ca9eab61383fd7187b937e6da65be402e95"
] | [
"integration_tests/src/main/python/udf_test.py"
] | [
"# Copyright (c) 2020, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\n\nfrom pyspark.sql.pandas.utils import require_minimum_pyarrow_version, require_minimum_pandas_version\ntry:\n require_minimum_pandas_version()\nexcept Exception as e:\n pytestmark = pytest.mark.skip(reason=str(e))\n\ntry:\n require_minimum_pyarrow_version()\nexcept Exception as e:\n pytestmark = pytest.mark.skip(reason=str(e))\n\nfrom asserts import assert_gpu_and_cpu_are_equal_collect\nfrom data_gen import *\nfrom marks import incompat, approximate_float, allow_non_gpu, ignore_order\nfrom pyspark.sql import Window\nfrom pyspark.sql.types import *\nimport pyspark.sql.functions as f\nimport pandas as pd\nfrom typing import Iterator, Tuple\n\narrow_udf_conf = {'spark.sql.execution.arrow.pyspark.enabled': 'true'}\n\n####################################################################\n# NOTE: pytest does not play well with pyspark udfs, because pyspark\n# tries to import the dependencies for top level functions and\n# pytest messes around with imports. To make this work, all UDFs\n# must either be lambdas or totally defined within the test method\n# itself.\n####################################################################\n\[email protected]('data_gen', integral_gens, ids=idfn)\ndef test_pandas_math_udf(data_gen):\n def add(a, b):\n return a + b\n my_udf = f.pandas_udf(add, returnType=LongType())\n assert_gpu_and_cpu_are_equal_collect(\n lambda spark : binary_op_df(spark, data_gen).select(\n my_udf(f.col('a') - 3, f.col('b'))),\n conf=arrow_udf_conf)\n\[email protected]('data_gen', integral_gens, ids=idfn)\ndef test_iterator_math_udf(data_gen):\n def iterator_add(to_process: Iterator[Tuple[pd.Series, pd.Series]]) -> Iterator[pd.Series]:\n for a, b in to_process:\n yield a + b\n\n my_udf = f.pandas_udf(iterator_add, returnType=LongType())\n assert_gpu_and_cpu_are_equal_collect(\n lambda spark : binary_op_df(spark, data_gen).select(\n my_udf(f.col('a'), f.col('b'))),\n conf=arrow_udf_conf)\n\n@allow_non_gpu('AggregateInPandasExec', 'PythonUDF', 'Alias')\[email protected]('data_gen', integral_gens, ids=idfn)\ndef test_single_aggregate_udf(data_gen):\n @f.pandas_udf('double')\n def pandas_sum(to_process: pd.Series) -> float:\n return to_process.sum()\n\n assert_gpu_and_cpu_are_equal_collect(\n lambda spark : unary_op_df(spark, data_gen).select(\n pandas_sum(f.col('a'))),\n conf=arrow_udf_conf)\n\[email protected](\"https://github.com/NVIDIA/spark-rapids/issues/757\")\n@ignore_order\n@allow_non_gpu('AggregateInPandasExec', 'PythonUDF', 'Alias')\[email protected]('data_gen', integral_gens, ids=idfn)\ndef test_group_aggregate_udf(data_gen):\n @f.pandas_udf('long')\n def pandas_sum(to_process: pd.Series) -> int:\n return to_process.sum()\n\n assert_gpu_and_cpu_are_equal_collect(\n lambda spark : binary_op_df(spark, data_gen)\\\n .groupBy('a')\\\n .agg(pandas_sum(f.col('b'))),\n conf=arrow_udf_conf)\n\[email protected](\"https://github.com/NVIDIA/spark-rapids/issues/740\")\n@ignore_order\n@allow_non_gpu('WindowInPandasExec', 'PythonUDF', 'WindowExpression', 'Alias', 'WindowSpecDefinition', 'SpecifiedWindowFrame', 'UnboundedPreceding$', 'UnboundedFollowing$')\[email protected]('data_gen', integral_gens, ids=idfn)\ndef test_window_aggregate_udf(data_gen):\n @f.pandas_udf('long')\n def pandas_sum(to_process: pd.Series) -> int:\n return to_process.sum()\n\n w = Window\\\n .partitionBy('a') \\\n .rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)\n assert_gpu_and_cpu_are_equal_collect(\n lambda spark : binary_op_df(spark, data_gen).select(\n pandas_sum(f.col('b')).over(w)),\n conf=arrow_udf_conf)\n\n@ignore_order\n@allow_non_gpu('FlatMapGroupsInPandasExec', 'PythonUDF', 'Alias')\[email protected]('data_gen', [LongGen()], ids=idfn)\ndef test_group_apply_udf(data_gen):\n def pandas_add(data):\n data.sum = data.b + data.a\n return data\n\n assert_gpu_and_cpu_are_equal_collect(\n lambda spark : binary_op_df(spark, data_gen)\\\n .groupBy('a')\\\n .applyInPandas(pandas_add, schema=\"a long, b long\"),\n conf=arrow_udf_conf)\n\n\n@allow_non_gpu('MapInPandasExec', 'PythonUDF', 'Alias')\[email protected]('data_gen', [LongGen()], ids=idfn)\ndef test_map_apply_udf(data_gen):\n def pandas_filter(iterator):\n for data in iterator:\n yield data[data.b <= data.a]\n\n assert_gpu_and_cpu_are_equal_collect(\n lambda spark : binary_op_df(spark, data_gen)\\\n .mapInPandas(pandas_filter, schema=\"a long, b long\"),\n conf=arrow_udf_conf)\n\ndef create_df(spark, data_gen, left_length, right_length):\n left = binary_op_df(spark, data_gen, length=left_length)\n right = binary_op_df(spark, data_gen, length=right_length)\n return left, right\n\n@ignore_order\n@allow_non_gpu('FlatMapCoGroupsInPandasExec', 'PythonUDF', 'Alias')\[email protected]('data_gen', [ShortGen(nullable=False)], ids=idfn)\ndef test_cogroup_apply_udf(data_gen):\n def asof_join(l, r):\n return pd.merge_asof(l, r, on='a', by='b')\n\n def do_it(spark):\n left, right = create_df(spark, data_gen, 500, 500)\n return left.groupby('a').cogroup(\n right.groupby('a')).applyInPandas(\n asof_join, schema=\"a int, b int\")\n assert_gpu_and_cpu_are_equal_collect(do_it, conf=arrow_udf_conf)\n"
] | [
[
"pandas.merge_asof"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
girardea/cours-data | [
"ea25eac12b04d71c72954ccbe49551879562d014"
] | [
"projets/irigo-app/db.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n Modèle de données propre pour les données Irigo, ainsi que les fonctions\n permettant d'importer depuis les données brutes dans le modèle de données\n propre.\n\"\"\"\nfrom sqlalchemy import (Column, Integer, BigInteger, Float, MetaData, Table,\n ForeignKey, select, String, DateTime, func)\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, relationship\nimport pandas as pd\nimport datetime as dt\nimport json\n\n# Local imports\nimport utils\n\nengine = utils.create_engine(flavor='sqlite')\n\nSession = sessionmaker(bind=engine)\n\nBase = declarative_base()\n\nclass Arret(Base):\n __tablename__ = 'arret'\n id_arret = Column(BigInteger, primary_key=True, autoincrement=True)\n\n nom_arret = Column(String(32))\n mne_arret = Column(String(32))\n\n def __repr__(self):\n return \"nom_arret='{}'\".format(self.nom_arret)\n\nclass Vehicule(Base):\n __tablename__ = 'vehicule'\n id_vehicule = Column(BigInteger, primary_key=True, autoincrement=True)\n\n type_vehicule = Column(String(32))\n etat_vehicule = Column(String(32))\n\nclass Ligne(Base):\n __tablename__ = 'ligne'\n id_ligne = Column(BigInteger, primary_key=True, autoincrement=True)\n\n nom_ligne = Column(String(32))\n num_ligne = Column(String(32))\n\nclass Trajet(Base):\n __tablename__ = 'trajet'\n id_trajet = Column(BigInteger, primary_key=True, autoincrement=True)\n\n id_vehicule = Column(BigInteger, ForeignKey('vehicule.id_vehicule'))\n id_ligne = Column(BigInteger, ForeignKey('ligne.id_ligne'))\n latitude = Column(Float)\n longitude = Column(Float)\n destination = Column(String(32))\n\n etapes = relationship(\"Etape\")\n\n def __str__( self ):\n return 'id:'+str(self.id_trajet)+', id_vehicule:'+str(self.id_vehicule)+', id_ligne:'+str(self.id_ligne)\n\nclass Etape(Base):\n __tablename__ = 'etape'\n id_etape = Column(BigInteger, primary_key=True, autoincrement=True)\n\n id_trajet = Column(BigInteger, ForeignKey('trajet.id_trajet'))\n id_arret = Column(BigInteger, ForeignKey('arret.id_arret'))\n heure_arret_theorique = Column(DateTime)\n heure_arret_estimee = Column(DateTime)\n record_timestamp = Column(DateTime)\n ecart = Column(Integer)\n\n def __str__( self ):\n return 'id:'+str(self.id_etape)+', id_trajet:'+str(self.id_trajet)+', id_arret:'+str(self.id_arret)+', ecart:'+str(self.ecart)\n\n \ndef create_database():\n Base.metadata.create_all(bind=engine)\n\ndef drop_database():\n Base.metadata.drop_all(bind=engine)\n\ndef check_database():\n \"\"\"Affcihe quelques infos sur les tables en base\"\"\"\n connection = engine.connect()\n\n for tablename in engine.table_names():\n\n # création de l'objet Table\n table = Table(tablename, MetaData(), autoload=True,\n autoload_with=engine)\n # nom de la table\n print(\"\\n*** {} ***\\n\".format(tablename))\n\n # nombre de lignes dans la table\n stmt = select([func.count(table)])\n nrows = connection.execute(stmt).scalar()\n print(\"{} lignes en base.\".format(nrows))\n\n # les premières lignes\n print(\"Premières lignes :\")\n stmt = select([table]).limit(5)\n print(pd.read_sql_query(stmt, connection))\n\n connection.close()\n"
] | [
[
"pandas.read_sql_query"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
norlandrhagen/cmip6-downscaling | [
"45c35c28500067bb4333b7efddd094d17f8fcd07"
] | [
"cmip6_downscaling/disagg/derived_variables.py"
] | [
"import numpy as np\nimport xarray as xr\n\nfrom ..constants import KELVIN, MB_PER_KPA\n\nsat_pressure_0c = 6.112 # [milibar]\nmin_vap = 0.005 # lower limit for vapor pressure\n\n\ndef dewpoint(e):\n \"\"\"Calculate the ambient dewpoint given the vapor pressure.\n\n Parameters\n ----------\n e : scalar or array-like\n Water vapor partial pressure [milibar]\n\n Returns\n -------\n dewpoint : scalar or array-like\n dewpoint temperature [C]\n\n See Also\n --------\n metpy.calc.dewpoint\n \"\"\"\n e_milibar = e # [milibar]\n val = np.log(e_milibar / sat_pressure_0c)\n return 243.5 * val / (17.67 - val) # dewpoint temperature [C]\n\n\ndef saturation_vapor_pressure(temperature):\n \"\"\"Calculate the saturation water vapor (partial) pressure.\n\n Parameters\n ----------\n temperature : scalar or array-like\n air temperature [K]\n\n Returns\n -------\n svp : scalar or array-like\n The saturation water vapor (partial) pressure [milibar]\n\n See Also\n --------\n metpy.calc.saturation_vapor_pressure\n \"\"\"\n # temperature [k]\n return sat_pressure_0c * np.exp(\n 17.67 * (temperature - KELVIN) / (temperature - 29.65)\n ) # [milibar]\n\n\ndef dewpoint_from_relative_humidity(temperature, rh):\n \"\"\"Calculate the ambient dewpoint given air temperature and relative humidity.\n\n Parameters\n ----------\n temperature : scalar or array-like\n air temperature [K]\n rh : scalar or array-like\n relative humidity expressed as a ratio in the range 0 < rh <= 1\n\n Returns\n -------\n dewpoint : scalar or array-like\n The dewpoint temperature [C]\n\n See Also\n --------\n metpy.calc.dewpoint_from_relative_humidity\n \"\"\"\n return dewpoint(rh * saturation_vapor_pressure(temperature))\n\n\ndef relative_humidity_from_dewpoint(temperature, dewpt):\n \"\"\"Calculate the relative humidity.\n\n Uses temperature and dewpoint in celsius to calculate relative\n humidity using the ratio of vapor pressure to saturation vapor pressures.\n\n Parameters\n ----------\n temperature : scalar or array-like\n air temperature [K]\n dewpt : scalar or array-like\n dewpoint temperature [K]\n\n Returns\n -------\n scalar or array-like\n relative humidity\n\n See Also\n --------\n metpyt.calc.relative_humidity_from_dewpoint\n \"\"\"\n e = saturation_vapor_pressure(dewpt)\n e_s = saturation_vapor_pressure(temperature)\n return e / e_s\n\n\ndef process(ds: xr.Dataset) -> xr.Dataset:\n \"\"\"Calculate missing derived variables\n\n Parameters\n ----------\n ds : xr.Dataset\n Input dataset\n\n Returns\n -------\n ds : xr.Dataset\n Output dataset, includes the follwoing variables: {'tmean', 'vap', 'rh', 'tdew', 'vpd'}\n \"\"\"\n\n if 'tmean' not in ds:\n ds['tmean'] = (ds['tmax'] + ds['tmin']) / 2 # [C]\n\n sat_vp = saturation_vapor_pressure(ds['tmean'] + KELVIN) / MB_PER_KPA\n\n if 'vap' not in ds and 'rh' in ds:\n ds['vap'] = ds['rh'] * sat_vp\n ds['vap'] = ds['vap'].clip(min=min_vap)\n ds['rh'] = ds['vap'] / sat_vp\n ds['tdew'] = dewpoint_from_relative_humidity(ds['tmean'] + KELVIN, ds['rh'])\n elif 'rh' not in ds and 'vap' in ds:\n ds['vap'] = ds['vap'].clip(min=min_vap)\n ds['rh'] = ds['vap'] / sat_vp\n ds['tdew'] = dewpoint(ds['vap'] * MB_PER_KPA)\n elif 'rh' not in ds and 'tdew' in ds:\n ds['rh'] = relative_humidity_from_dewpoint(ds['tmean'] + KELVIN, ds['tdew'] + KELVIN)\n ds['vap'] = ds['rh'] * sat_vp\n ds['vap'] = ds['vap'].clip(min=min_vap)\n ds['rh'] = ds['vap'] / sat_vp\n ds['tdew'] = dewpoint_from_relative_humidity(ds['tmean'] + KELVIN, ds['rh'])\n else:\n raise ValueError('not able to calculate vap/rh/tdew with given input variables')\n\n if 'vpd' not in ds:\n ds['vpd'] = sat_vp - ds['vap']\n\n if not all(v in ds for v in ['tmean', 'vap', 'rh', 'tdew', 'vpd']):\n raise ValueError('some derived variables were not calculated: %s' % ds)\n\n return ds\n"
] | [
[
"numpy.log",
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gesiscss/IWAAN | [
"8d5b836fdf27750c65f3f98caa6e92d92c11d8c3"
] | [
"external/wikipedia.py"
] | [
"\"\"\"Summary\n\"\"\"\nfrom typing import Union\n\nimport pandas as pd\nimport numpy as np\n\nfrom .api import API, DataView\nfrom .utils import chunks\nfrom itertools import chain\nfrom urllib.parse import quote_plus\n\n\nclass WikipediaDV(DataView):\n \"\"\"Summary\n \"\"\"\n\n def get_page(self, page: Union[int, str]) -> pd.Series:\n \"\"\"Get pageview counts for an page\n\n Args:\n page (Union[int, str]): Description\n\n Returns:\n pd.Series: info of the page\n\n Deleted Parameters:\n page_id (Union[int, str]): Description\n\n Raises:\n Exception: Description\n\n \"\"\"\n\n res = self.api.get_page(page)\n\n pages = res['query']['pages']\n if len(pages) == 0:\n raise Exception('Article Not Found')\n\n elif len(pages) > 1:\n raise Exception('Several Pages Found')\n\n page_dict = next(iter(pages.values()))\n\n return pd.Series({\n 'page_id': page_dict['pageid'],\n 'title': page_dict['title'],\n 'ns': page_dict['ns'],\n })\n\n def get_editor(self, editor: Union[int, str]) -> pd.Series:\n \"\"\"Summary\n\n Args:\n editor (Union[int, str]): Description\n\n Returns:\n pd.Series: info of editor\n\n Raises:\n Exception: Description\n \"\"\"\n res = self.api.get_editor(editor)\n\n editors = res['query']['users']\n if len(editors) == 0:\n raise Exception('Editor Not Found')\n\n elif len(editors) > 1:\n raise Exception('Several Editors Found')\n\n return pd.Series(editors[0])\n\n def search_page(self, search_query: str) -> pd.Series:\n \"\"\"Summary\n\n Args:\n search_query (str): Description\n\n Returns:\n pd.Series: page title\n\n Raises:\n Exception: Description\n \"\"\"\n res = self.api.search_page(search_query)\n\n result = res[1]\n if len(result) == 0:\n raise Exception('Article Not Found')\n\n elif len(result) > 1:\n raise Exception('Several Pages Found')\n\n # return pd.Series({\n # 'title': result[0]\n # })\n\n return result[0]\n\n def get_editors(self, editors: list) -> pd.Series:\n\n res = (self.api.get_editors(chunk)['query'][\n 'users'] for chunk in chunks(editors, 50))\n\n return pd.DataFrame(x for x in chain(*res))\n \n def get_talk_content(self, pageid: Union[int, str]) -> pd.Series:\n res = self.api.get_talk_content(pageid, continue_param=None) \n talk_content = pd.DataFrame(next(iter(res[\"query\"][\"pages\"].values()))[\"revisions\"])\n while True:\n if 'continue' in res.keys():\n continue_param = 'continue=' + res['continue']['continue'] + '&rvcontinue=' + res['continue']['rvcontinue']\n res = self.api.get_talk_content(pageid, continue_param=continue_param) \n temp = pd.DataFrame(next(iter(res[\"query\"][\"pages\"].values()))[\"revisions\"])\n talk_content = talk_content.append(temp, sort=True)\n \n else:\n break\n \n\n return talk_content\n \n def get_talk_rev_diff(self, fromrev, torev) -> pd.Series:\n\n res = self.api.get_talk_rev_diff(fromrev, torev) \n talk_diff = pd.Series(next(iter(res.values())))\n return talk_diff\n \n def get_protection(self, page: str) -> pd.DataFrame:\n \n res = self.api.get_protection(page)[\"query\"][\"logevents\"]\n for i in res:\n try:\n i[\"params\"] = i[\"params\"][\"description\"]\n except KeyError:\n pass\n \n return pd.DataFrame(res)\n\n\nclass WikipediaAPI(API):\n \"\"\"Summary\n\n Attributes:\n base (TYPE): Description\n\n Deleted Attributes:\n project (TYPE): Description\n \"\"\"\n\n def __init__(self, lng: str = 'en',\n domain: str = 'wikipedia.org',\n api_username: str = None,\n api_password: str = None,\n api_key: str = None,\n protocol: str = 'https',\n attempts: int = 2):\n \"\"\"Constructor of the WikiWhoAPI\n\n Args:\n domain (str, optional): the domain that hosts the api\n api_username (str, optional): WikiWho API username\n api_password (str, optional): WikiWho API password\n api_key (str, optional): WikiWho API key\n protocol (str, optional): the protocol of the url\n attempts (int, optional): the number of attempts before giving up trying to connect\n\n Deleted Parameters:\n project (str, optional): e.g. en.wikipedia.org\n version (str, optional): version of the API (e.g. rest_v1)\n \"\"\"\n super().__init__(protocol=protocol,\n lng=lng,\n domain=domain,\n api_username=api_username,\n api_password=api_password,\n api_key=api_key,\n attempts=attempts)\n self.base = f'{self.base}w/api.php?'\n\n def get_page(self, page: Union[int, str]) -> dict:\n \"\"\"Get pageview counts for an page\n\n Args:\n page (Union[int, str]): Description\n\n Returns:\n dict: ageview counts for an page\n\n \"\"\"\n\n if isinstance(page, (int, np.integer)):\n url = f'{self.base}action=query&pageids={page}&format=json'\n elif isinstance(page, str):\n url = f'{self.base}action=query&titles={quote_plus(page)}&format=json'\n\n return self.request(url)\n\n def get_editor(self, editor: Union[int, str]) -> dict:\n \"\"\"Get pageview counts for an page\n\n Args:\n editor (Union[int, str]): Description\n\n Returns:\n dict: ageview counts for an page\n\n \"\"\"\n\n # if isinstance(editor, (int, np.integer)):\n # url = f'{self.base}action=query&list=users&ususerids={editor}&format=json'\n # elif isinstance(editor, str):\n # url =\n # f'{self.base}action=query&list=users&ususers={editor}&format=json'\n\n if isinstance(editor, (int, np.integer)):\n url = f'{self.base}action=query&list=users&ususerids={editor}&usprop=blockinfo|editcount|registration|gender&format=json'\n elif isinstance(editor, str):\n url = f'{self.base}action=query&list=users&ususers={quote_plus(editor)}&usprop=blockinfo|editcount|registration|gender&format=json'\n\n return self.request(url)\n\n def search_page(self, search_query: str) -> dict:\n \"\"\"Summary\n\n Args:\n search_query (str): Description\n\n Returns:\n dict: Description\n \"\"\"\n url = f'{self.base}action=opensearch&search={quote_plus(search_query)}&limit=1&namespace=0&format=json'\n\n return self.request(url)\n\n def get_editors(self, editors: list) -> dict:\n\n editors_str = \"|\".join(quote_plus(str(x)) for x in editors)\n\n if isinstance(editors[0], (int, np.integer)):\n url = f'{self.base}action=query&list=users&ususerids={editors_str}&usprop=blockinfo|editcount|registration|gender&format=json'\n elif isinstance(editors[0], str):\n url = f'{self.base}action=query&list=users&ususers={editors_str}&usprop=blockinfo|editcount|registration|gender&format=json'\n\n return self.request(url)\n \n def get_talk_content(self, pageid: Union[int, str], continue_param: str) -> dict:\n if continue_param:\n url = f'{self.base}action=query&format=json&prop=revisions&rvlimit=max&rvprop=timestamp|ids|user|comment&pageids={pageid}&{continue_param}'\n else:\n url = f'{self.base}action=query&format=json&prop=revisions&rvlimit=max&rvprop=timestamp|ids|user|comment&pageids={pageid}'\n\n return self.request(url)\n \n def get_talk_rev_diff(self, fromrev, torev) -> dict:\n url = f'{self.base}action=compare&format=json&fromrev={fromrev}&torev={torev}'\n\n return self.request(url)\n \n def get_protection(self, page: str) -> dict:\n url1 = f'{self.base}action=query&leprop=type|user|timestamp|comment|details&list=logevents&letitle={quote_plus(page)}'\n url2 = '&lelimit=max&letype=protect&format=json' \n url = url1 + url2\n \n return self.request(url)\n"
] | [
[
"pandas.Series",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
7eta/udk_labeler | [
"8cd8a86bc1a78647c0aaf81ca78e6e518fb86ceb",
"8cd8a86bc1a78647c0aaf81ca78e6e518fb86ceb",
"8cd8a86bc1a78647c0aaf81ca78e6e518fb86ceb"
] | [
"engine/retinanet/coco_eval.py",
"engine/utils/confusion_metric.py",
"engine/trainer/train.py"
] | [
"# Original author: Yann Henon\n# Adapted from https://github.com/yhenon/pytorch-retinanet/blob/master/retinanet/coco_eval.py\n# Modified by jsk1107\n\nfrom pycocotools.cocoeval import COCOeval\nimport json\nimport torch\n\n\ndef evaluate_coco(dataset, model, json_path, threshold=0.05):\n \n model.eval()\n with torch.no_grad():\n\n # start collecting results\n results = []\n image_ids = []\n\n for index in range(len(dataset)):\n data = dataset[index]\n scale = data['scale']\n\n # run network\n # nms_scores, boxes = model(data['img'].permute(2, 0, 1).cuda().float().unsqueeze(dim=0))\n scores, labels, boxes = model(data['img'].permute(2, 0, 1).cuda().float().unsqueeze(dim=0))\n # print('nms_scores', scores)\n\n # correct boxes for image scale\n boxes /= scale\n\n if boxes.shape[0] > 0:\n # scores, labels = nms_scores.max(dim=1)\n\n scores = scores.cpu()\n labels = labels.cpu()\n boxes = boxes.cpu()\n # change to (x, y, w, h) (MS COCO standard)\n boxes[:, 2] -= boxes[:, 0]\n boxes[:, 3] -= boxes[:, 1]\n\n # compute predicted labels and scores\n #for box, score, label in zip(boxes[0], scores[0], labels[0]):\n for box_id in range(boxes.shape[0]):\n score = float(scores[box_id])\n label = int(labels[box_id])\n box = boxes[box_id, :]\n\n # scores are sorted, so we can break\n if score < threshold:\n break\n\n # append detection for each positively labeled class\n image_result = {\n 'image_id' : dataset.image_ids[index],\n 'category_id' : dataset.label_to_coco_label(label),\n 'score' : float(score),\n 'bbox' : box.tolist(),\n }\n\n # append detection to results\n results.append(image_result)\n # append image to list of processed images\n image_ids.append(dataset.image_ids[index])\n\n # print progress\n print('{}/{}'.format(index, len(dataset)), end='\\r')\n\n if not len(results):\n return\n\n # write output\n print(f'json_path: {json_path}')\n json.dump(results, open(f'{json_path}/{dataset.set_name}_bbox_results.json', 'w'), indent=4)\n\n # load results in COCO evaluation tool\n coco_true = dataset.coco\n coco_pred = coco_true.loadRes(f'{json_path}/{dataset.set_name}_bbox_results.json')\n\n # run COCO evaluation\n coco_eval = COCOeval(coco_true, coco_pred, 'bbox')\n coco_eval.params.imgIds = image_ids\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n stats = coco_eval.stats\n return stats\n",
"import pandas as pd\nimport os\nimport numpy as np\nimport xml.etree.ElementTree as ET\nfrom collections import defaultdict\n\n\ndef iou(pred_box, true_box):\n\n p_xmin, p_ymin, p_xmax, p_ymax = pred_box[0], pred_box[1], pred_box[2], pred_box[3]\n t_xmin, t_ymin, t_xmax, t_ymax = true_box[0], true_box[1], true_box[2], true_box[3]\n\n inter_xmin, inter_ymin = max(p_xmin, t_xmin), max(p_ymin, t_ymin)\n inter_xmax, inter_ymax = min(p_xmax, t_xmax), min(p_ymax, t_ymax)\n\n inter_area = np.maximum(inter_xmax - inter_xmin + 1, 0) * np.maximum(inter_ymax - inter_ymin + 1, 0)\n pred_area = (p_xmax - p_xmin + 1) * (p_ymax - p_ymin + 1)\n true_area = (t_xmax - t_xmin + 1) * (t_ymax - t_ymin + 1)\n union_area = true_area + pred_area - inter_area\n\n iou = inter_area / union_area\n\n return iou\n\n\ndef load_xml(anno_path):\n\n tree = ET.parse(anno_path)\n root = tree.getroot()\n target = defaultdict(list)\n\n for obj in root.findall('object'):\n name = obj.find('name').text\n xmin = int(obj.find('bndbox').find('xmin').text)\n ymin = int(obj.find('bndbox').find('ymin').text)\n xmax = int(obj.find('bndbox').find('xmax').text)\n ymax = int(obj.find('bndbox').find('ymax').text)\n\n target[name].append([xmin, ymin, xmax, ymax])\n\n return target\n\n\ndef confusion_metric(true_xml_dir, pred_xml_path, classes=['NG', 'OK']):\n _metric = pd.DataFrame(0, index=classes, columns=classes)\n xml_name = os.path.basename(pred_xml_path)\n true_xml_path = os.path.join(true_xml_dir, xml_name)\n\n p = load_xml(pred_xml_path)\n print(pred_xml_path)\n if p.get('SOOT') is None:\n if not os.path.exists(true_xml_path):\n _metric.iloc[1, 1] += 1 # 정상\n else:\n t = load_xml(true_xml_path)\n if t.get('SOOT') is None:\n _metric.iloc[1, 1] += 1 # 정상\n else:\n _metric.iloc[0, 1] += 1 # 미검(2종과오)\n else:\n if not os.path.exists(true_xml_path):\n _metric.iloc[1, 0] += 1 # 과검(1종과오)\n else:\n t = load_xml(true_xml_path)\n\n if t.get('SOOT') is None:\n _metric.iloc[1, 0] += 1 # 과검(1종과오)\n else:\n p_bboxes = p.get('SOOT')\n t_bboxes = t.get('SOOT')\n\n ious = []\n for t_bbox in t_bboxes:\n for p_bbox in p_bboxes:\n ious.append(iou(p_bbox, t_bbox))\n cnt = 0\n for i in ious:\n if i >= 0.5:\n _metric.iloc[0, 0] += 1 # 불\n break\n else:\n cnt += 1\n if cnt == len(ious):\n _metric.iloc[0, 1] += 1 # 미검(2종과오)\n\n return _metric\n\n\n\nclass ConfusionMetric:\n def __init__(self, classes=['NG', 'OK']):\n self._classes = classes\n self._metric = pd.DataFrame(0, index=self._classes, columns=self._classes)\n\n def reset(self):\n for col in self._metric.colums:\n self._metric[col].values[:] = 0\n\n def update(self, value):\n self._metric += value\n\n def result(self):\n return self._metric\n\n\nif __name__ == '__main__':\n\n import argparse\n parser = argparse.ArgumentParser(description='Simple script for visualizing result of training.')\n\n parser.add_argument('--true_xml_dir', help='Path to directory containing images')\n parser.add_argument('--pred_xml_dir', help='Path to model')\n args = parser.parse_args()\n\n pred_xml_paths = [os.path.join(args.pred_xml_dir, x.name) for x in os.scandir(args.pred_xml_dir)]\n\n c_metric = ConfusionMetric()\n for pred_xml_path in pred_xml_paths:\n value = confusion_metric(args.true_xml_dir, pred_xml_path)\n c_metric.update(value)\n print(c_metric.result())\n",
"import os\nimport argparse\nimport numpy as np\nimport torch\nimport torch.optim as optim\nfrom engine.dataloader import get_dataloader\nfrom engine.retinanet import model\nfrom engine.retinanet import coco_eval\nfrom engine.log.saver import Saver\nfrom tqdm import tqdm\nfrom collections import deque\nfrom engine.log import logger, summarise\n\nassert torch.__version__.split('.')[0] == '1'\nprint('CUDA available: {}'.format(torch.cuda.is_available()))\n\n\nclass Trainer(object):\n def __init__(self, config, img_dir, coco_json):\n self.config = config\n\n # Define Saver\n self.saver = Saver(self.config)\n\n # Define Tensorboard\n if self.config.tensorboard:\n self.summary = summarise.TensorboardSummary(self.saver.directory)\n self.writer = self.summary.create_summary()\n\n # Define Logger\n self.getlogger = logger.get_logger(self.saver.directory)\n self.logger = self.getlogger\n\n # Define DataLoader\n self.train_loader, self.n_train_img,\\\n self.val_set, self.val_loader, self.n_val_img, self.n_classes = get_dataloader(self.config, img_dir, coco_json)\n\n # Define Network\n if self.config.depth == 18:\n self.retinanet = model.resnet18(num_classes=self.n_classes, pretrained=True)\n elif self.config.depth == 34:\n self.retinanet = model.resnet34(num_classes=self.n_classes, pretrained=True)\n elif self.config.depth == 50:\n self.retinanet = model.resnet50(num_classes=self.n_classes, pretrained=True)\n elif self.config.depth == 101:\n self.retinanet = model.resnet101(num_classes=self.n_classes, pretrained=True)\n elif self.config.depth == 152:\n self.retinanet = model.resnet152(num_classes=self.n_classes, pretrained=True)\n else:\n raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152')\n\n # Define Optimizer\n self.optimizer = optim.Adam(self.retinanet.parameters(), lr=self.config.lr)\n\n # Define lr_schduler\n self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=3, verbose=True)\n\n # Define loss\n self.loss_hist = deque(maxlen=500)\n\n # Define cuda\n if torch.cuda.is_available():\n self.retinanet = torch.nn.DataParallel(self.retinanet).cuda()\n else:\n raise ValueError('=> Cuda is not available. Check cuda')\n\n # Define resume\n self.best_f1_score = .0\n if self.config.resume is not None:\n self.retinanet = torch.load(self.config.resume)\n self.retinanet.cuda()\n\n # check model summary\n # summary(self.retinanet, (3, 512, 512))\n\n def train(self, epoch):\n self.retinanet.train()\n self.retinanet.module.freeze_bn()\n epoch_loss = []\n\n print(f'Num training images: {self.n_train_img}')\n\n with tqdm(self.train_loader) as tbar:\n for iter_num, data in enumerate(tbar):\n self.optimizer.zero_grad()\n\n img = data['img'].cuda().float()\n annot = data['annot']\n\n cls_loss, reg_loss = self.retinanet([img, annot])\n\n cls_loss = cls_loss.mean()\n reg_loss = reg_loss.mean()\n loss = cls_loss + reg_loss\n epoch_loss.append(float(loss))\n self.loss_hist.append(float(loss))\n\n if bool(loss == 0):\n continue\n\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.retinanet.parameters(), 0.1)\n self.optimizer.step()\n\n if self.config.tensorboard:\n self.writer.add_scalar('Train_Loss/classification_loss',\n cls_loss,\n iter_num + epoch*(len(self.train_loader)))\n self.writer.add_scalar('Train_Loss/regression_loss',\n reg_loss,\n iter_num + epoch*(len(self.train_loader)))\n self.writer.add_scalar('Train_Loss/total_loss',\n np.mean(self.loss_hist),\n iter_num + epoch*(len(self.train_loader)))\n\n tbar.set_description(f'Epoch: {epoch} | '\n f'Cls loss: {cls_loss:1.5f} | '\n f'Reg loss: {reg_loss:1.5f} | '\n f'Running loss: {np.mean(self.loss_hist):1.5f}')\n del cls_loss, reg_loss\n self.scheduler.step(np.mean(epoch_loss))\n\n def validation(self, epoch):\n print('Evaluating dataset')\n stats = coco_eval.evaluate_coco(self.val_set, self.retinanet, self.saver.directory)\n\n if stats is None:\n return\n\n # stats: 0~11까지 12개의 값이 존재\n # 0: mAP / 1: map .5 / 2: map .75 / 3: ap small / 4: ap medium / 5: ap large/\n # 6: ar Det1 / 7: ar Det10 / 8: ar Det100 / 9: ar small / 10: ar medium / 11: ar large\n\n if self.config.tensorboard:\n self.writer.add_scalar('Precision/mAP', stats[0], epoch)\n self.writer.add_scalar('Precision/mAP@50IOU', stats[1], epoch)\n self.writer.add_scalar('Precision/mAP@75IOU', stats[2], epoch)\n self.writer.add_scalar('Precision/mAP(samll)', stats[3], epoch)\n self.writer.add_scalar('Precision/mAP(medium)', stats[4], epoch)\n self.writer.add_scalar('Precision/mAP(large)', stats[5], epoch)\n self.writer.add_scalar('Recall/AR@1', stats[6], epoch)\n self.writer.add_scalar('Recall/AR@10', stats[7], epoch)\n self.writer.add_scalar('Recall/AR@100', stats[8], epoch)\n self.writer.add_scalar('Recall/AR@100(small)', stats[9], epoch)\n self.writer.add_scalar('Recall/AR@100(medium)', stats[10], epoch)\n self.writer.add_scalar('Recall/AR@100(large)', stats[11], epoch)\n\n mAP, AR = stats[0], stats[8]\n f1_score = 2 * (mAP * AR) / (mAP + AR)\n\n if f1_score > self.best_f1_score:\n self.best_f1_score = f1_score\n self.saver.save_checkpoint(self.retinanet.module, f1_score)"
] | [
[
"torch.no_grad"
],
[
"numpy.maximum",
"pandas.DataFrame"
],
[
"torch.__version__.split",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.load",
"numpy.mean",
"torch.cuda.is_available",
"torch.nn.DataParallel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
benayas1/benatools | [
"fb2f6e8982d4bcbe0d52a08ebcd36f2cfcebb50d",
"fb2f6e8982d4bcbe0d52a08ebcd36f2cfcebb50d",
"fb2f6e8982d4bcbe0d52a08ebcd36f2cfcebb50d"
] | [
"src/benatools/torch/fitter.py",
"src/benatools/tf/images.py",
"src/benatools/ct/ct.py"
] | [
"import os\nimport torch\nimport numpy as np\nfrom datetime import datetime\nimport time\nimport pandas as pd\nfrom typing import Iterable, Callable, Dict, Tuple\n\n\nclass AverageMeter(object):\n \"\"\"\n Computes and stores the average and current value\n Attributes\n ----------\n val : float\n Stores the average loss of the last batch\n avg : float\n Average loss\n sum : float\n Sum of all losses\n count : int\n number of elements\n \"\"\"\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n \"\"\"\n Updates current internal state\n Parameters\n ----------\n val : float\n loss on each training step\n n : int, Optional\n batch size\n \"\"\"\n if np.isnan(val) or np.isinf(val):\n return\n\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\nclass TorchFitterBase:\n \"\"\"\n Helper class to implement a training loop in PyTorch\n \"\"\"\n\n def __init__(self,\n model: torch.nn.Module = None,\n device: str = 'cpu',\n loss: torch.nn.Module = None,\n optimizer: torch.optim = None,\n scheduler: torch.optim.lr_scheduler = None,\n validation_scheduler: bool = True,\n step_scheduler: bool = False,\n folder: str = 'models',\n verbose: bool = True,\n save_log: bool = True,\n use_amp: bool = False,\n ):\n \"\"\"\n Args:\n model (torch.nn.Module): Model to be fitted\n device (str): Device can be cuda or cpu\n loss (torch.nn.Module): DataFrame to split\n optimizer (torch.optim): Optimizer object\n scheduler (torch.optim.lr_scheduler, optional): Scheduler object. Defaults to None.\n validation_scheduler (bool, optional): Run scheduler step on the validation step. Defaults to True.\n step_scheduler (bool, optional): Run scheduler step on every training step. Defaults to False.\n folder (str, optional): Folder where to store checkpoints. Defaults to 'models'.\n verbose (bool, optional): Whether to print outputs or not. Defaults to True.\n save_log (bool, optional): Whether to write the log in log.txt or not. Defaults to True.\n \"\"\"\n if loss is not None:\n if type(loss) == type:\n self.loss_function = loss()\n else:\n self.loss_function = loss\n else:\n self.loss_function = None\n\n self.epoch = 0 # current epoch\n self.verbose = verbose\n\n self.base_dir = f'{folder}'\n\n self.save_log = save_log\n self.log_path = f'{self.base_dir}/log.txt'\n self.best_metric = 0\n\n self.model = model\n self.device = device\n self.use_amp = use_amp\n self.scaler = torch.cuda.amp.GradScaler(enabled=use_amp)\n\n # Optimizer object\n self.optimizer = optimizer\n\n # Scheduler Object\n self.scheduler = scheduler\n self.validation_scheduler = validation_scheduler # do scheduler.step after validation stage loss\n self.step_scheduler = step_scheduler # do scheduler.step after optimizer.step\n self.log(f'Fitter prepared. Device is {self.device}')\n\n def unpack(self, data):\n raise NotImplementedError('This class is a base class')\n\n def reduce_loss(self, loss, weights):\n # Apply sample weights if existing\n if len(loss.shape) > 0:\n # apply weights\n if weights is not None:\n loss = loss * torch.unsqueeze(weights, 1)\n\n # reduction\n loss = loss.mean()\n return loss\n\n def fit(self,\n train_loader: torch.utils.data.DataLoader,\n val_loader: torch.utils.data.DataLoader = None,\n n_epochs: int = 1,\n metrics: Iterable[Tuple[Callable[[Iterable, Iterable], float], dict]] = None,\n early_stopping: int = 0,\n early_stopping_mode: str = 'min',\n early_stopping_alpha: float = 0.0,\n early_stopping_pct: float = 0.0,\n save_checkpoint: bool = False,\n save_best_checkpoint: bool = True,\n verbose_steps: int = 0,\n callbacks: Iterable[Callable[[Dict], None]] = None):\n \"\"\"\n Fits a model\n\n Args:\n train_loader (torch.utils.data.DataLoader): Training data\n val_loader (torch.utils.data.DataLoader, optional): Validation Data. Defaults to None.\n n_epochs (int, optional): Maximum number of epochs to train. Defaults to 1.\n metrics ( function with (y_true, y_pred, **metric_kwargs) signature, optional): Metric to evaluate results on. Defaults to None.\n metric_kwargs (dict, optional): Arguments for the passed metric. Ignored if metric is None. Defaults to {}.\n early_stopping (int, optional): Early stopping epochs. Defaults to 0.\n early_stopping_mode (str, optional): Min or max criteria. Defaults to 'min'.\n early_stopping_alpha (float, optional): Value that indicates how much to improve to consider early stopping. Defaults to 0.0.\n early_stopping_pct (float, optional): Value between 0 and 1 that indicates how much to improve to consider early stopping. Defaults to 0.0.\n save_checkpoint (bool, optional): Whether to save the checkpoint when training. Defaults to False.\n save_best_checkpoint (bool, optional): Whether to save the best checkpoint when training. Defaults to True.\n verbose_steps (int, optional): Number of step to print every training summary. Defaults to 0.\n callbacks (list of callable, optional): List of callback functions to be called after an epoch\n\n Returns:\n pd.DataFrame: DataFrame containing training history\n \"\"\"\n if self.model is None or self.loss_function is None or self.optimizer is None:\n self.log(f\"ERROR: Either model, loss function or optimizer is not existing.\")\n raise ValueError(f\"ERROR: Either model, loss function or optimizer is not existing.\")\n\n if self.best_metric == 0.0:\n self.best_metric = np.inf if early_stopping_mode == 'min' else -np.inf\n\n initial_epochs = self.epoch\n\n # Use the same train loader for validation. A possible use case is for autoencoders\n if isinstance(val_loader, str) and val_loader == 'training':\n val_loader = train_loader\n\n training_history = []\n es_epochs = 0\n for e in range(n_epochs):\n history = {'epoch': e} # training history log for this epoch\n\n # Update log\n lr = self.optimizer.param_groups[0]['lr']\n self.log(f'\\n{datetime.utcnow().isoformat(\" \", timespec=\"seconds\")}\\n \\\n EPOCH {str(self.epoch+1)}/{str(n_epochs+initial_epochs)} - LR: {lr}')\n\n # Run one training epoch\n t = time.time()\n train_summary_loss = self.train_one_epoch(train_loader, verbose_steps=verbose_steps)\n history['train'] = train_summary_loss.avg # training loss\n history['lr'] = self.optimizer.param_groups[0]['lr']\n\n # Save checkpoint\n if save_checkpoint:\n self.save(f'{self.base_dir}/last-checkpoint.bin', False)\n\n if val_loader is not None:\n # Run epoch validation\n val_summary_loss, calculated_metrics = self.validation(val_loader,\n metric=metrics,\n verbose_steps=verbose_steps)\n history['val'] = val_summary_loss.avg # validation loss\n\n # Write log\n metric_log = ' - ' + ' - '.join([f'{fname}: {value}' for value, fname in calculated_metrics]) if calculated_metrics else ''\n self.log(f'\\r[RESULT] {(time.time() - t):.2f}s - train loss: {train_summary_loss.avg:.5f} - val loss: {val_summary_loss.avg:.5f}' + metric_log)\n\n if calculated_metrics:\n history.update({fname: value for value, fname in calculated_metrics})\n #history['val_metric'] = calculated_metrics\n\n calculated_metric = calculated_metrics[0][0] if calculated_metrics else val_summary_loss.avg\n else:\n # If no validation is provided, training loss is used as metric\n calculated_metric = train_summary_loss.avg\n\n es_pct = early_stopping_pct * self.best_metric\n\n # Check if result is improved, then save model\n if (\n ((metrics) and\n (\n ((early_stopping_mode == 'max') and (calculated_metric - max(early_stopping_alpha, es_pct) > self.best_metric)) or\n ((early_stopping_mode == 'min') and (calculated_metric + max(early_stopping_alpha, es_pct) < self.best_metric))\n )\n ) or\n ((metrics is None) and\n (calculated_metric + max(early_stopping_alpha, es_pct) < self.best_metric) # the standard case is to minimize\n )\n ):\n self.log(f'Validation metric improved from {self.best_metric} to {calculated_metric}')\n self.best_metric = calculated_metric\n self.model.eval()\n if save_best_checkpoint:\n savepath = f'{self.base_dir}/best-checkpoint.bin'\n self.save(savepath)\n es_epochs = 0 # reset early stopping count\n else:\n es_epochs += 1 # increase epoch count with no improvement, for early stopping check\n\n # Callbacks receive the history dict of this epoch\n if callbacks is not None:\n if not isinstance(callbacks, list):\n callbacks = [callbacks]\n for c in callbacks:\n c(history)\n\n # Check if Early Stopping condition is met\n if (early_stopping > 0) & (es_epochs >= early_stopping):\n self.log(f'Early Stopping: {early_stopping} epochs with no improvement')\n training_history.append(history)\n break\n\n # Scheduler step after validation\n if self.validation_scheduler and self.scheduler is not None:\n self.scheduler.step(metrics=calculated_metric)\n\n training_history.append(history)\n self.epoch += 1\n\n return pd.DataFrame(training_history).set_index('epoch')\n\n def train_one_epoch(self, train_loader, verbose_steps=0):\n \"\"\"\n Run one epoch on the train dataset\n Parameters\n ----------\n train_loader : torch.data.utils.DataLoader\n DataLoaders containing the training dataset\n verbose_steps : int, defaults to 0\n number of step to print every training summary\n Returns\n -------\n AverageMeter\n Object with this epochs's average loss\n \"\"\"\n self.model.train() # set train mode\n summary_loss = AverageMeter() # object to track the average loss\n t = time.time()\n batch_size = train_loader.batch_size\n\n # run epoch\n for step, data in enumerate(train_loader):\n if self.verbose & (verbose_steps > 0):\n if step % verbose_steps == 0:\n print(\n f'\\rTrain Step {step}/{len(train_loader)} | ' +\n f'summary_loss: {summary_loss.avg:.5f} | ' +\n f'time: {(time.time() - t):.2f} secs | ' +\n f'ETA: {(len(train_loader)-step)*(time.time() - t)/(step+1):.2f}', end=''\n )\n # Unpack batch of data\n x, y, w = self.unpack(data)\n\n # Run one batch\n loss = self.train_one_batch(x, y, w)\n\n summary_loss.update(loss.detach().item(), batch_size)\n\n # update optimizer using mixed precision if requested\n self.scaler.step(self.optimizer)\n self.scaler.update()\n\n # LR Scheduler step after epoch\n if self.step_scheduler and self.scheduler is not None:\n self.scheduler.step()\n\n self.log(f'\\r[TRAIN] {(time.time() - t):.2f}s - train loss: {summary_loss.avg:.5f}')\n\n return summary_loss\n\n def train_one_batch(self, x, y, w=None):\n \"\"\"\n Trains one batch of data.\n The actions to be done here are:\n - extract x and y (labels)\n - calculate output and loss\n - backpropagate\n\n Args:\n x (List or Tuple or Dict): Data\n y (torch.Tensor): Labels\n w (torch.Tensor, optional): Weights. Defaults to None.\n\n Returns:\n torch.Tensor: A tensor with the calculated loss\n \"\"\"\n self.optimizer.zero_grad()\n\n with torch.cuda.amp.autocast(enabled=self.use_amp):\n # Output and loss\n if isinstance(x, tuple) or isinstance(x, list):\n output = self.model(*x)\n elif isinstance(x, dict):\n output = self.model(**x)\n else:\n output = self.model(x)\n\n loss = self.loss_function(output, y)\n\n # Reduce loss and apply sample weights if existing\n loss = self.reduce_loss(loss, w)\n \n # backpropagation\n self.scaler.scale(loss).backward()\n\n\n return loss\n\n def validation(self, val_loader, metric=None, verbose_steps=0):\n \"\"\"\n Validates a model\n Parameters\n ----------\n val_loader : torch.utils.data.DataLoader\n Validation Data\n metric : function with (y_true, y_pred, **metric_kwargs) signature\n Metric to evaluate results on\n metric_kwargs : dict\n Arguments for the passed metric. Ignored if metric is None\n verbose_steps : int, defaults to 0\n number of step to print every training summary\n Returns\n -------\n AverageMeter\n Object with this epochs's average loss\n float\n Calculated metric if a metric is provided, else None\n \"\"\"\n if self.model is None or self.loss_function is None or self.optimizer is None:\n self.log(f\"ERROR: Either model, loss function or optimizer is not existing.\")\n raise ValueError(f\"ERROR: Either model, loss function or optimizer is not existing.\")\n\n self.model.eval()\n summary_loss = AverageMeter()\n y_preds = []\n y_true = []\n batch_size = val_loader.batch_size\n\n t = time.time()\n for step, data in enumerate(val_loader):\n if self.verbose & (verbose_steps > 0):\n if step % verbose_steps == 0:\n print(\n f'\\rVal Step {step}/{len(val_loader)} | ' +\n f'summary_loss: {summary_loss.avg:.5f} | ' +\n f'time: {(time.time() - t):.2f} secs |' +\n f'ETA: {(len(val_loader)-step)*(time.time() - t)/(step+1):.2f}', end=''\n )\n with torch.no_grad(): # no gradient update\n x, y, w = self.unpack(data)\n\n if metric:\n y_true += y.cpu().numpy().tolist()\n\n # just forward pass\n if isinstance(x, tuple) or isinstance(x, list):\n output = self.model(*x)\n elif isinstance(x, dict):\n output = self.model(**x)\n else:\n output = self.model(x)\n\n loss = self.loss_function(output, y)\n\n # Reduce loss and apply sample weights if existing\n loss = self.reduce_loss(loss, w)\n summary_loss.update(loss.detach().item(), batch_size)\n\n if metric:\n y_preds += output.cpu().numpy().tolist()\n\n # Callback metrics\n metric_log = ' '*30\n if metric:\n calculated_metrics = []\n y_pred = np.argmax(y_preds, axis=1)\n for f, args in metric:\n value = f(y_true, y_pred, **args)\n calculated_metrics.append((value, f.__name__))\n metric_log = f'- {f.__name__} {value:.5f} '\n else:\n calculated_metrics = None\n\n self.log(f'\\r[VALIDATION] {(time.time() - t):.2f}s - val. loss: {summary_loss.avg:.5f} ' + metric_log)\n return summary_loss, calculated_metrics\n\n def predict(self, test_loader, verbose_steps=0):\n \"\"\"\n Makes predictions using the trained model\n Parameters\n ----------\n test_loader : torch.utils.data.DataLoader\n Test Data\n verbose_steps : int, defaults to 0\n number of step to print every training summary\n Returns\n -------\n np.array\n Predicted values by the model\n \"\"\"\n if self.model is None:\n self.log(f\"ERROR: Model is not existing.\")\n raise ValueError(f\"ERROR: Model is not existing.\")\n\n self.model.eval()\n y_preds = []\n t = time.time()\n\n for step, data in enumerate(test_loader):\n if self.verbose & (verbose_steps > 0) > 0:\n if step % verbose_steps == 0:\n print(\n f'\\rPrediction Step {step}/{len(test_loader)} | ' +\n f'time: {(time.time() - t):.2f} secs |' +\n f'ETA: {(len(test_loader)-step)*(time.time() - t)/(step+1):.2f}', end=''\n )\n with torch.no_grad(): # no gradient update\n x, _, _ = self.unpack(data)\n\n # Output\n if isinstance(x, tuple) or isinstance(x, list):\n output = self.model(*x)\n elif isinstance(x, dict):\n output = self.model(**x)\n else:\n output = self.model(x)\n\n y_preds += output.cpu().numpy().tolist()\n\n return np.array(y_preds)\n\n def save(self, path, verbose=True):\n \"\"\"\n Save model and other metadata\n\n Args:\n path (str): Path of the file to be saved\n verbose (bool, optional): True = print logs, False = silence. Defaults to True.\n \"\"\"\n\n if verbose:\n self.log(f'Checkpoint is saved to {path}')\n self.model.eval()\n\n data = {\n 'model_state_dict': self.model.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'best_summary_loss': self.best_metric,\n 'epoch': self.epoch,\n 'scaler': self.scaler.state_dict()\n }\n\n if self.scheduler is not None:\n data['scheduler_state_dict'] = self.scheduler.state_dict()\n\n if not os.path.exists(self.base_dir):\n os.makedirs(self.base_dir)\n\n torch.save(data, path)\n\n def load(self, path, only_model=False):\n \"\"\"\n Load model and other metadata\n\n Args:\n path (str): Path of the file to be loaded\n only_model (bool, optional): Whether to load just the model weights. Defaults to False.\n \"\"\"\n checkpoint = torch.load(path)\n\n self.model.load_state_dict(checkpoint['model_state_dict'])\n\n if only_model:\n return\n\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n self.scaler.load_state_dict(checkpoint[\"scaler\"])\n\n self.best_metric = checkpoint['best_summary_loss']\n self.epoch = checkpoint['epoch'] + 1\n\n if 'scheduler_state_dict' in checkpoint and self.scheduler is not None:\n self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\n\n @staticmethod\n def load_model_weights(path, model):\n \"\"\"\n Static method that loads weights into a torch module, extracted from a checkpoint\n\n Args:\n path (str): Path containing the weights. Normally a .bin or .tar file\n model (torch.nn.Module): Module to load the weights on\n\n Returns:\n torch.nn.Module: The input model with loaded weights\n \"\"\"\n\n checkpoint = torch.load(path)\n model.load_state_dict(checkpoint['model_state_dict'])\n return model\n\n def log(self, message):\n \"\"\"\n Log training ouput into console and file\n\n Args:\n message (str): Message to be logged\n \"\"\"\n if self.verbose:\n print(message)\n\n if self.save_log is True:\n if not os.path.exists(self.base_dir):\n os.makedirs(self.base_dir)\n with open(self.log_path, 'a+') as logger:\n logger.write(f'{message}\\n')\n\n\nclass ImageFitter(TorchFitterBase):\n def unpack(self, data):\n # extract x and y from the dataloader\n x = data['x'].to(self.device)\n x = x.float()\n\n if 'y' in data:\n y = data['y']\n y = y.to(self.device)\n y = y.float()\n else:\n y = None\n \n # weights if existing\n if 'w' in data:\n w = data['w']\n w = w.to(self.device)\n w = w.float()\n else:\n w = None\n \n return x, y, None\n\nclass AutoencoderFitter(TorchFitterBase):\n\n def unpack(self, data):\n # extract x and y from the dataloader\n x = data['x']\n\n # send them to device\n x = x.to(self.device)\n x = x.float()\n\n if 'w' in data:\n w = data['w']\n w = w.to(self.device)\n w = w.float()\n else:\n w = None\n\n return x, x, w\n\n\nclass TransformersFitter(TorchFitterBase):\n\n def unpack(self, data):\n x = {k: v.to(self.device) for k, v in data['x'].items()}\n\n y = data['y'].to(self.device)\n\n if 'w' in data:\n w = data['w'].to(self.device).float()\n else:\n w = None\n\n return x, y, w\n\n\nclass MultiTaskFitter(TorchFitterBase):\n\n def unpack(self, data):\n x = {k: v.to(self.device) for k, v in data['x'].items()}\n\n y = [e.to(self.device) for e in data['y']]\n\n if 'w' in data:\n w = data['w'].to(self.device).float()\n else:\n w = None\n\n return x, y, w\n",
"import tensorflow as tf\nimport math\nimport tensorflow.keras.backend as K\nimport numpy as np\nimport collections\n\n\ndef _check_rotation_arg(x):\n \"\"\" Returns a list of rotation args\"\"\"\n if x is None:\n return [90, 90, 90]\n\n if np.isscalar(x):\n return [x, x, x]\n\n if isinstance(x, (collections.Sequence, np.ndarray, tf.Tensor)):\n if len(x) < 3:\n raise Exception(\"Rotation parameter must have length 3\")\n return x[:3]\n\n raise Exception(\"Rotation parameter must be a scalar or a list of length 3\")\n\n\ndef get_mat3d(rotation=None, shear=2.0, x_zoom=8.0, y_zoom=8.0, z_zoom=8.0, x_shift=8.0, y_shift=8.0, z_shift=8.0):\n \"\"\"\n Creates a transformation matrix which rotates, shears, zooms and shift an 2D image.\n\n Parameters\n ----------\n rotation : float\n Degrees to rotate\n shear : float\n Degrees to shear\n height_zoom : float\n height zoom ratio\n width_zoom : float\n width zoom ratio\n height_shift : float\n height shift ratio\n width_shift : float\n width shift ratio\n\n Returns\n -------\n tf.tensor\n 4x4 transformation matrix for 3D transformations\n \"\"\"\n\n # CONVERT DEGREES TO RADIANS\n # rotation = _check_rotation_arg(rotation)\n\n def get_4x4_mat(lst):\n return tf.reshape(tf.concat([lst], axis=0), [4, 4])\n\n # ROTATION MATRIX\n one = tf.constant([1], dtype='float32')\n zero = tf.constant([0], dtype='float32')\n\n # X axis\n r = math.pi * rotation[0:1] / 180.\n cx = tf.math.cos(r)\n sx = tf.math.sin(r)\n rx = get_4x4_mat([one, zero, zero, zero,\n zero, cx, -sx, zero,\n zero, sx, cx, zero,\n zero, zero, zero, one])\n\n # Y axis\n r = math.pi * rotation[1:2] / 180.\n cy = tf.math.cos(r)\n sy = tf.math.sin(r)\n ry = get_4x4_mat([cy, zero, sy, zero,\n zero, one, zero, zero,\n -sy, zero, cy, zero,\n zero, zero, zero, one])\n\n # Z axis\n r = math.pi * rotation[2:] / 180.\n cz = tf.math.cos(r)\n sz = tf.math.sin(r)\n rz = get_4x4_mat([cz, -sz, zero, zero,\n sz, cz, zero, zero,\n zero, zero, one, zero,\n zero, zero, zero, one])\n\n rand = tf.random.uniform([], minval=0, maxval=3, dtype=tf.int32)\n if rand == 0:\n rotation_matrix = rx\n else:\n if rand == 1:\n rotation_matrix = ry\n else:\n rotation_matrix = rz\n\n # SHEAR MATRIX\n shear = math.pi * shear / 180.\n c2 = tf.math.cos(shear)\n s2 = tf.math.sin(shear)\n\n # shear_matrix = get_4x4_mat([one, s2, zero,\n # zero, c2, zero,\n # zero, zero, one])\n # ZOOM MATRIX\n zoom_matrix = get_4x4_mat([one / x_zoom, zero, zero, zero,\n zero, one / y_zoom, zero, zero,\n zero, zero, one / z_zoom, zero,\n zero, zero, zero, one])\n # SHIFT MATRIX\n shift_matrix = get_4x4_mat([one, zero, zero, x_shift,\n zero, one, zero, y_shift,\n zero, zero, one, z_shift,\n zero, zero, zero, one])\n\n return K.dot(rotation_matrix,\n K.dot(zoom_matrix, shift_matrix))\n\n\ndef transform3d(obj, dimension, rotation=None, shear=2.0, x_zoom=8.0, y_zoom=8.0, z_zoom=8.0, x_shift=8.0, y_shift=8.0,\n z_shift=8.0, prob=1.0):\n \"\"\"\n Rotates, shears, zooms and shift an single object, not a batch of them.\n\n Parameters\n ----------\n image : tf.Tensor of shape [h,w,d,c]\n A single image to be transformed\n dimension : int\n Dimension in pixels of the squared image\n rotation : float or list of floats\n Degrees to rotate\n shear : float\n Degrees to shear\n x_zoom : float\n height zoom ratio\n y_zoom : float\n width zoom ratio\n z_zoom : float\n width zoom ratio\n x_shift : float\n height shift ratio\n y_shift : float\n width shift ratio\n z_shift : float\n width shift ratio\n prob : float\n probabilities to apply transformations\n\n Returns\n -------\n tf.Tensor\n A transformed object\n \"\"\"\n\n XDIM = dimension % 2\n\n P = tf.cast(tf.random.uniform([], 0, 1) < prob, tf.int32)\n if P == 0:\n return tf.reshape(obj, [dimension, dimension, dimension, 3]) # no action\n\n rotation = _check_rotation_arg(rotation)\n\n\n rot = rotation * tf.random.normal([3], dtype='float32')\n shr = shear * tf.random.normal([1], dtype='float32')\n x_zoom = 1.0 + tf.random.normal([1], dtype='float32') / x_zoom\n y_zoom = 1.0 + tf.random.normal([1], dtype='float32') / y_zoom\n z_zoom = 1.0 + tf.random.normal([1], dtype='float32') / z_zoom\n x_shift = x_shift * tf.random.normal([1], dtype='float32')\n y_shift = y_shift * tf.random.normal([1], dtype='float32')\n z_shift = z_shift * tf.random.normal([1], dtype='float32')\n\n # print(rot,shr,x_zoom,y_zoom,z_zoom)\n\n # GET TRANSFORMATION MATRIX\n m = get_mat3d(rot, shr, x_zoom, y_zoom, z_zoom, x_shift, y_shift, z_shift)\n\n # LIST DESTINATION PIXEL INDICES\n x = tf.repeat(tf.range(dimension // 2, -dimension // 2, -1), dimension * dimension)\n y = tf.tile(tf.repeat(tf.range(dimension // 2, -dimension // 2, -1), dimension), [dimension])\n z = tf.tile(tf.range(-dimension // 2, dimension // 2), [dimension * dimension])\n c = tf.ones([dimension * dimension * dimension], dtype='int32')\n idx = tf.stack([x, y, z, c])\n\n # ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS\n idx2 = K.dot(m, tf.cast(idx, dtype='float32'))\n idx2 = K.cast(idx2, dtype='int32')\n idx2 = K.clip(idx2, -dimension // 2 + XDIM + 1, dimension // 2)\n\n # FIND ORIGIN PIXEL VALUES\n idx3 = tf.stack([dimension // 2 - idx2[0,], dimension // 2 - idx2[1,], dimension // 2 - 1 + idx2[2,]])\n d = tf.gather_nd(obj, tf.transpose(idx3))\n\n return tf.reshape(d, [dimension, dimension, dimension, 3])\n\n\ndef get_mat2d(rotation=180.0, shear=2.0, height_zoom=8.0, width_zoom=8.0, height_shift=8.0, width_shift=8.0):\n \"\"\"\n Creates a transformation matrix which rotates, shears, zooms and shift an 2D image.\n\n Parameters\n ----------\n rotation : float\n Degrees to rotate\n shear : float\n Degrees to shear\n height_zoom : float\n height zoom ratio\n width_zoom : float\n width zoom ratio\n height_shift : float\n height shift ratio\n width_shift : float\n width shift ratio\n\n Returns\n -------\n tf.Tensor\n 3x3 transformation matrix for 2D transformations\n \"\"\"\n\n # CONVERT DEGREES TO RADIANS\n rotation = math.pi * rotation / 180.\n shear = math.pi * shear / 180.\n\n def get_3x3_mat(lst):\n return tf.reshape(tf.concat([lst], axis=0), [3, 3])\n\n # ROTATION MATRIX\n c1 = tf.math.cos(rotation)\n s1 = tf.math.sin(rotation)\n one = tf.constant([1], dtype='float32')\n zero = tf.constant([0], dtype='float32')\n\n rotation_matrix = get_3x3_mat([c1, s1, zero,\n -s1, c1, zero,\n zero, zero, one])\n # SHEAR MATRIX\n c2 = tf.math.cos(shear)\n s2 = tf.math.sin(shear)\n\n shear_matrix = get_3x3_mat([one, s2, zero,\n zero, c2, zero,\n zero, zero, one])\n # ZOOM MATRIX\n zoom_matrix = get_3x3_mat([one / height_zoom, zero, zero,\n zero, one / width_zoom, zero,\n zero, zero, one])\n # SHIFT MATRIX\n shift_matrix = get_3x3_mat([one, zero, height_shift,\n zero, one, width_shift,\n zero, zero, one])\n\n return K.dot(K.dot(rotation_matrix, shear_matrix),\n K.dot(zoom_matrix, shift_matrix))\n\n\ndef transform2d(image, dimension, rotation=180.0, shear=2.0, hzoom=8.0, wzoom=8.0, hshift=8.0, wshift=8.0, prob=0.5):\n \"\"\"\n Rotates, shears, zooms and shift an single image, not a batch of them.\n\n Parameters\n ----------\n image : tf.Tensor of shape [h,w,c]\n A single image to be transformed\n dimension : int\n Dimension in pixels of the squared image\n rotation : float\n Degrees to rotate\n shear : float\n Degrees to shear\n hzoom : float\n height zoom ratio\n wzoom : float\n width zoom ratio\n hshift : float\n height shift ratio\n wshift : float\n width shift ratio\n prob : float\n probabilities to apply transformations\n\n Returns\n -------\n tf.Tensor\n A transformed image\n \"\"\"\n\n XDIM = dimension % 2\n\n P = tf.cast(tf.random.uniform([], 0, 1) < prob, tf.int32)\n if P == 0:\n return tf.reshape(image, [dimension, dimension, 3]) # no action\n\n rot = rotation * tf.random.normal([1], dtype='float32')\n shr = shear * tf.random.normal([1], dtype='float32')\n h_zoom = 1.0 + tf.random.normal([1], dtype='float32') / hzoom\n w_zoom = 1.0 + tf.random.normal([1], dtype='float32') / wzoom\n h_shift = hshift * tf.random.normal([1], dtype='float32')\n w_shift = wshift * tf.random.normal([1], dtype='float32')\n\n # GET TRANSFORMATION MATRIX\n m = get_mat2d(rot, shr, h_zoom, w_zoom, h_shift, w_shift)\n\n # LIST DESTINATION PIXEL INDICES\n x = tf.repeat(tf.range(dimension // 2, -dimension // 2, -1), dimension)\n y = tf.tile(tf.range(-dimension // 2, dimension // 2), [dimension])\n z = tf.ones([dimension * dimension], dtype='int32')\n idx = tf.stack([x, y, z])\n\n # ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS\n idx2 = K.dot(m, tf.cast(idx, dtype='float32'))\n idx2 = K.cast(idx2, dtype='int32')\n idx2 = K.clip(idx2, -dimension // 2 + XDIM + 1, dimension // 2)\n\n # FIND ORIGIN PIXEL VALUES\n idx3 = tf.stack([dimension // 2 - idx2[0,], dimension // 2 - 1 + idx2[1,]])\n d = tf.gather_nd(image, tf.transpose(idx3))\n\n return tf.reshape(d, [dimension, dimension, 3])\n\n\ndef _reconstruct2D(a, b, xa, xb, ya, yb):\n one = a[ya:yb, :xa, :]\n two = b[ya:yb, xa:xb, :]\n three = a[ya:yb, xb:, :]\n middle = tf.concat([one, two, three], axis=1)\n return tf.concat([a[:ya, :, :], middle, a[yb:, :, :]], axis=0)\n\n\ndef _reconstruct3D(a, b, xa, xb, ya, yb, za, zb):\n one = a[ya:yb, :xa, :, :]\n two_a = a[ya:yb, xa:xb, :za, :]\n two = b[ya:yb, xa:xb, za:zb, :]\n two_b = a[ya:yb, xa:xb, zb:, :]\n three = a[ya:yb, xb:, :, :]\n two = tf.concat([two_a, two, two_b], axis=2)\n middle = tf.concat([one, two, three], axis=1)\n return tf.concat([a[:ya, :, :, :], middle, a[yb:, :, :, :]], axis=0)\n\n\ndef _points(dim, location, size):\n a = tf.math.maximum(0, location - size // 2)\n b = tf.math.minimum(dim, location + size // 2)\n return a, b\n\n\ndef dropout(image, prob=0.75, ct=8, sz=0.2, rank=2):\n \"\"\"\n Coarse dropout randomly remove squares from training images\n\n Parameters\n ----------\n image : tf.Tensor\n image of size [height,width,3] not a batch of [b,dim,dim,3]\n prob : float\n probability to perform dropout\n ct : int\n number of squares to remove\n sz : size\n size of square (in % of the image dimension)\n rank : int\n values must be 2 (image) or 3 (3d shape)\n\n Returns\n -------\n image with ct squares of side size sz*dimension removed\n \"\"\"\n\n if (rank != 2) & (rank != 3):\n raise Exception('Rank must be 2 or 3')\n\n if rank == 2:\n h, w, c = image.shape\n else:\n h, w, d, c = image.shape\n\n # DO DROPOUT WITH PROBABILITY DEFINED ABOVE\n P = tf.cast(tf.random.uniform([], 0, 1) < prob, tf.int32)\n if (P == 0) | (ct == 0) | (sz == 0):\n if rank == 2:\n image = tf.reshape(image, [h, w, 3])\n else:\n image = tf.reshape(image, [h, w, d, 3])\n return image # no action\n\n # Extract dimension\n if rank == 2:\n h, w, c = image.shape\n else:\n h, w, d, c = image.shape\n\n # Calculate square/box size\n sq_height = tf.cast(sz * h, tf.int32) * P\n sq_width = tf.cast(sz * w, tf.int32) * P\n\n if rank == 3:\n sq_depth = tf.cast(sz * d, tf.int32) * P\n\n # generate random black squares\n for k in range(ct):\n # Choose random location\n x = tf.cast(tf.random.uniform([], 0, w), tf.int32)\n y = tf.cast(tf.random.uniform([], 0, h), tf.int32)\n\n # Compute square / cube\n ya, yb = _points(h, y, sq_height)\n xa, xb = _points(w, x, sq_width)\n\n # Include third dimension for 3D\n if rank == 3:\n z = tf.cast(tf.random.uniform([], 0, d), tf.int32)\n za, zb = _points(h, z, sq_depth)\n\n # Dropout Image\n if rank == 2:\n image = _reconstruct2D(image, tf.zeros_like(image), xa, xb, ya, yb)\n else:\n image = _reconstruct3D(image, tf.zeros_like(image), xa, xb, ya, yb, za, zb)\n\n # Reshape hack so TPU compiler knows shape of output tensor\n if rank == 2:\n image = tf.reshape(image, [h, w, 3])\n else:\n image = tf.reshape(image, [h, w, d, 3])\n\n return image\n\n\ndef _mixup_labels(shape, label1, label2, n_classes, a):\n if len(shape) == 1:\n lab1 = tf.one_hot(label1, n_classes)\n lab2 = tf.one_hot(label2, n_classes)\n else:\n lab1 = tf.cast(label1, dtype=tf.float32)\n lab2 = tf.cast(label2, dtype=tf.float32)\n return (1 - a) * lab1 + a * lab2\n\n\ndef cutmix(batch, label, batch_size=32, prob=1.0, dimension=256, n_classes=1, n_labels=None):\n \"\"\"\n Cutmix randomly remove squares from training images\n\n Parameters\n ----------\n batch : tf.Tensor\n batch of [b,dim,dim,3] or [b,dim,dim,dim,3]\n label : tf.tensor\n batch of shape [b,] if labels are integer, or [b,n_classes] if format is one-hot\n prob : float\n probability to perform dropout\n batch_size : int\n batch size\n dimension : int\n dimension of the data\n n_classes : int\n number of classes\n rank : int\n values must be 2 (image) or 3 (3d shape)\n\n Returns\n -------\n tf.Tensor\n A batch of images with Cutmix applied\n \"\"\"\n\n rank = len(batch.shape) - 2\n\n imgs = []\n labs = []\n for j in range(batch_size):\n # DO CUTMIX WITH PROBABILITY DEFINED ABOVE\n P = tf.cast(tf.random.uniform([], 0, 1) <= prob, tf.int32)\n\n b = tf.random.uniform([], 0, 1) # this is beta dist with alpha=1.0\n WIDTH = tf.cast(dimension * tf.math.sqrt(1 - b), tf.int32) * P\n\n # Choose random location\n x = tf.cast(tf.random.uniform([], 0, dimension), tf.int32)\n y = tf.cast(tf.random.uniform([], 0, dimension), tf.int32)\n\n # Compute square / cube\n ya, yb = _points(dimension, y, WIDTH)\n xa, xb = _points(dimension, x, WIDTH)\n\n # Include third dimension for 3D\n if rank == 3:\n z = tf.cast(tf.random.uniform([], 0, dimension), tf.int32)\n za, zb = _points(dimension, z, WIDTH)\n\n # Choose Random Image to Cutmix with\n k = tf.cast(tf.random.uniform([], 0, batch_size), tf.int32)\n\n # Make Cutmix Image\n if rank == 2:\n image = _reconstruct2D(batch[j], batch[k], xa, xb, ya, yb)\n elif rank == 3:\n image = _reconstruct3D(batch[j], batch[k], xa, xb, ya, yb, za, zb)\n else:\n raise Exception(f\"Rank incorrect. Should be 2 or 3, but it is {rank}\") \n imgs.append(image)\n\n # Make Cutmix Label\n a = tf.cast((WIDTH ** rank) / (dimension ** rank), tf.float32)\n labs.append(_mixup_labels(label.shape, label[j], label[k], n_classes, a))\n\n # RESHAPE HACK SO TPU COMPILER KNOWS SHAPE OF OUTPUT TENSOR (maybe use Python typing instead?)\n if rank == 2:\n image2 = tf.reshape(tf.stack(imgs), (batch_size, dimension, dimension, 3))\n elif rank == 3:\n image2 = tf.reshape(tf.stack(imgs), (batch_size, dimension, dimension, dimension, 3))\n else:\n raise Exception(f\"Rank incorrect. Should be 2 or 3, but it is {rank}\") \n\n if n_labels:\n label2 = tf.reshape(tf.stack(labs), (batch_size, n_labels))\n else:\n label2 = tf.reshape(tf.stack(labs), (batch_size, n_classes))\n return image2, label2\n\n\ndef mixup(batch, label, batch_size=32, prob=1.0, dimension=256, n_classes=1, n_labels=None):\n \"\"\"\n Mixup randomly mixes data from two samples\n\n Parameters\n ----------\n batch : tf.Tensor\n batch of [b,dim,dim,3] or [b,dim,dim,dim,3]\n label : tf.tensor\n batch of shape [b,] if labels are integer, or [b,n_classes] if format is one-hot\n prob : float\n probability to perform dropout\n batch_size : int\n batch size\n dimension : int\n dimension of the data\n n_classes : int\n number of classes\n rank : int\n values must be 2 (image) or 3 (3d shape)\n\n Returns\n -------\n tf.Tensor\n A batch of images with Mixup applied\n \"\"\"\n\n rank = len(batch.shape) - 2\n #batch_size = batch.shape[0]\n\n imgs = []\n labs = []\n for j in range(batch_size):\n # DO MIXUP WITH PROBABILITY DEFINED ABOVE\n P = tf.cast(tf.random.uniform([], 0, 1) <= prob, tf.float32)\n # Choose Random\n k = tf.cast(tf.random.uniform([], 0, batch_size), tf.int32)\n a = tf.random.uniform([], 0, 1) * P # this is beta dist with alpha=1.0\n # Make mixup image\n img1 = batch[j,]\n img2 = batch[k,]\n imgs.append((1 - a) * img1 + a * img2)\n\n # Make Cutmix Label\n labs.append(_mixup_labels(label.shape, label[j], label[k], n_classes, a))\n\n # RESHAPE HACK SO TPU COMPILER KNOWS SHAPE OF OUTPUT TENSOR (maybe use Python typing instead?)\n image2 = tf.reshape(tf.stack(imgs), (batch_size, dimension, dimension, 3)) if rank == 2 else tf.reshape(tf.stack(imgs), (\n batch_size, dimension, dimension, dimension, 3))\n\n if n_labels:\n label2 = tf.reshape(tf.stack(labs), (batch_size, n_labels))\n else:\n label2 = tf.reshape(tf.stack(labs), (batch_size, n_classes))\n return image2, label2\n\n\ndef spec_augmentation(image, prob=0.66, time_drop_width=0.0625, time_stripes_num=2, freq_drop_width=0.125,\n freq_stripes_num=2, height=64, width=501):\n \"\"\"\n Add white noise to object or image\n\n Parameters\n ----------\n image : tf.Tensor\n image of size [height,width,3] not a batch of [b,dim,dim,3]\n prob : float\n probability to perform dropout\n time_drop_width : float\n\n time_stripes_num : int\n\n freq_drop_width : float\n\n freq_stripes_num : int\n\n height : int\n\n width : int\n\n\n Returns\n -------\n tf.Tensor\n input image or object with added white noise\n \"\"\"\n P = tf.cast(tf.random.uniform([], 0, 1) < prob, tf.int32)\n if (P == 0):\n return image # no action\n\n time_drop_size = tf.cast(time_drop_width * width, tf.int32)\n\n for i in range(time_stripes_num):\n begin = tf.cast(tf.random.uniform([], 0, width), tf.int32)\n end = tf.cast(tf.math.minimum(width, begin + time_drop_size), tf.int32)\n zeros = tf.zeros([height, end - begin, 3])\n image = tf.concat([image[:, :begin, :], zeros, image[:, end:, :]], axis=1)\n\n freq_drop_size = tf.cast(freq_drop_width * height, tf.int32)\n\n for i in range(freq_stripes_num):\n begin = tf.cast(tf.random.uniform([], 0, height), tf.int32)\n end = tf.cast(tf.math.minimum(height, begin + freq_drop_size), tf.int32)\n zeros = tf.zeros([end - begin, width, 3])\n image = tf.concat([image[:begin, :, :], zeros, image[end:, :, :]], axis=0)\n\n image = tf.reshape(image, [height, width, 3])\n\n return image\n\n\ndef add_white_noise(image, prob=0.3, std=0.2):\n \"\"\"\n Add white noise to object or image\n\n Parameters\n ----------\n image : tf.Tensor\n image of size [height,width,3] not a batch of [b,dim,dim,3]\n prob : float\n probability to perform dropout\n std : size\n Number of standard deviations to calculate noise with\n\n Returns\n -------\n tf.Tensor\n input image or object with added white noise\n \"\"\"\n P = tf.cast(tf.random.uniform([], 0, 1) < prob, tf.int32)\n if (P == 0):\n return image # no action\n\n h, w, c = image.shape\n\n noise = tf.random.normal(shape=image.shape, mean=tf.reduce_mean(image), stddev=tf.math.reduce_std(image) * std)\n image = image + noise\n image = tf.reshape(image, image.shape)\n return image\n\n\ndef add_band_noise(image, prob=0.3, std=0.2, band_height=0.125):\n \"\"\"\n Add white noise to an horizontal band in an image\n\n Parameters\n ----------\n image : tf.Tensor\n image of size [height,width,3] not a batch of [b,dim,dim,3]\n prob : float\n probability to perform dropout\n std : size\n Number of standard deviations to calculate noise with\n band_height : float\n Percentage of the height of the band\n\n Returns\n -------\n tf.Tensor\n input image or object with added noise\n \"\"\"\n P = tf.cast(tf.random.uniform([], 0, 1) < prob, tf.int32)\n if (P == 0):\n return image # no action\n\n h, w, c = image.shape\n\n band_height = tf.cast(band_height * h, tf.int32)\n\n begin = tf.cast(tf.random.uniform([], 0, h), tf.int32)\n end = tf.cast(tf.math.minimum(h, begin + band_height), tf.int32)\n\n noise = tf.random.normal(shape=[end - begin, w, 3], mean=tf.reduce_mean(image),\n stddev=tf.math.reduce_std(image) * std)\n image = tf.concat([image[:begin, :, :], image[begin:end, :, :] + noise, image[end:, :, :]], axis=0)\n image = tf.reshape(image, [h, w, c])\n return image",
"from collections.abc import Iterable\nimport pydicom\nimport scipy.ndimage\nimport numpy as np\nimport cv2\nfrom skimage import morphology\nfrom skimage import measure\nfrom sklearn.cluster import KMeans\nimport vtk\nfrom vtk.util import numpy_support\nimport os\nimport matplotlib.pyplot as plt\n\ndef load_scan(paths, library='vtk', resample_scan=True):\n \"\"\"\n Load a scan\n\n Parameters\n ----------\n library : str\n engine to be used to load scan\n resample_scan : bool\n whether to resample or not a scan\n\n Returns\n -------\n ndarray\n numpy array containing all the CT scan slices\n\n Raise\n -----\n library not supported\n \"\"\"\n if library == 'vtk':\n return load_vtk(paths, resample_scan=resample_scan)\n if library == 'pydicom':\n return load_pydicom(paths, resample_scan=resample_scan)\n raise Exception(f'Library {library} not supported')\n\n\ndef load_pydicom(paths, resample_scan=True, return_spacing=False):\n \"\"\"\n Function to read all DICOM files belonging to a scan. The functions sorts the slices in order.\n\n Parameters\n ----------\n paths : list of str\n list of paths to read. Normally you should use glob to get the files\n return_thickness : bool\n return slice thickness\n\n Returns\n -------\n List of slices sorted by Instance Number\n \"\"\"\n slices, spacing = load_slices(paths, return_spacing=True)\n hu_scan = get_pixels_hu(slices)\n\n if resample_scan:\n hu_scan = resample(hu_scan, scan_spacing=spacing)\n\n if return_spacing:\n return hu_scan, spacing\n\n return hu_scan\n\n\ndef load_slices(paths, return_spacing=False):\n \"\"\"\n Function to read all DICOM files belonging to a scan. The functions sorts the slices in order.\n Parameters\n ----------\n paths : list of str\n list of paths to read. Normally you should use glob to get the files\n return_thickness : bool\n return slice thickness\n\n Outputs\n -------\n slices :\n List of slices sorted by Instance Number\n \"\"\"\n slices = [pydicom.read_file(path) for path in paths]\n slices.sort(key=lambda x: int(x.InstanceNumber), reverse=True)\n try:\n slice_thickness = np.median([np.abs(slices[i].ImagePositionPatient[2] - slices[i + 1].ImagePositionPatient[2]) for i in range(len(slices) - 1)])\n except:\n slice_thickness = np.median([np.abs(slices[0].SliceLocation - slices[1].SliceLocation) for i in range(len(slices) - 1)])\n\n for s in slices:\n s.SliceThickness = slice_thickness\n\n if return_spacing:\n return slices, np.array([slice_thickness] + list(slices[0].PixelSpacing))\n\n return slices\n\n\ndef get_pixels_hu(scans):\n \"\"\"\n Function that converts a list of scans into a numpy array converted to HU scale\n Inputs:\n scans: List of sorted scans\n Output:\n numpy array of scans of shape H x W x D\n \"\"\"\n image = np.stack([s.pixel_array for s in scans])\n # Convert to int16 (from sometimes int16),\n # should be possible as values should always be low enough (<32k)\n image = image.astype(np.int16)\n\n # Set outside-of-scan pixels to 1\n # The intercept is usually -1024, so air is approximately 0\n image[image == -2000] = 0\n\n # Convert to Hounsfield units (HU)\n for n in range(len(scans)):\n\n intercept = scans[n].RescaleIntercept\n slope = scans[n].RescaleSlope\n\n if slope != 1:\n image[n] = slope * image[n].astype(np.float64)\n image[n] = image[n].astype(np.int16)\n\n image[n] += np.int16(intercept)\n\n return np.array(image, dtype=np.int16)\n\n\ndef resample(scan_arr, scan=None, scan_spacing=None):\n \"\"\"\n Resample a scan in array format, to adjust it to its pixel spacing and thickness\n Input:\n scan_arr: CT Scan in numpy array format\n scan =\n \"\"\"\n # Determine current pixel spacing\n spacing = scan_spacing if scan_spacing else np.array([scan[0].SliceThickness] + list(scan[0].PixelSpacing), dtype=np.float32)\n\n resize_factor = spacing\n new_real_shape = scan_arr.shape * resize_factor\n new_shape = np.round(new_real_shape)\n real_resize_factor = new_shape / scan_arr.shape\n new_spacing = spacing / real_resize_factor\n\n image = scipy.ndimage.interpolation.zoom(scan_arr, real_resize_factor, mode='nearest')\n\n return image\n\n\ndef resize_scan(scan, new_shape=(128,128,128)):\n \"\"\"\n Resize a 3D image using Open CV. The resizing is slice by slice\n Input:\n scan =\n new_shape =\n \"\"\"\n resized = np.stack([cv2.resize(img, (new_shape[1], new_shape[2])) for img in scan])\n resized = np.stack([cv2.resize(resized[:,:,i], (new_shape[0], new_shape[1])) for i in range(resized.shape[2])], axis=-1 )\n return resized\n\n\ndef air_removal_mask(dilation):\n labels = measure.label(dilation)\n label_vals = np.unique(labels)\n if labels[0,0] == labels[-1, -1]:\n upper_cut = (labels==labels[0,0])\n mask = np.abs(upper_cut*1 -1)\n else:\n upper_cut = (labels == labels[0,0])\n lower_cut = (labels == labels[-1,-1])\n mask = np.abs((upper_cut + lower_cut )*1 -1)\n return mask\n\n\ndef make_lungmask(img, display=False, mean=None, std=None):\n row_size = img.shape[0]\n col_size = img.shape[1]\n\n # re-scale\n mean = np.mean(img) if mean is None else mean\n std = np.std(img) if std is None else std\n img = img - mean\n img = img / std\n # Find the average pixel value near the lungs\n # to renormalize washed out images\n middle = img[int(col_size / 5):int(col_size / 5 * 4), int(row_size / 5):int(row_size / 5 * 4)]\n mean = np.mean(middle)\n max = np.max(img)\n min = np.min(img)\n # To improve threshold finding, I'm moving the\n # underflow and overflow on the pixel spectrum\n img[img == max] = mean\n img[img == min] = mean\n #\n # Using Kmeans to separate foreground (soft tissue / bone) and background (lung/air)\n #\n kmeans = KMeans(n_clusters=2).fit(np.reshape(middle, [np.prod(middle.shape), 1]))\n centers = sorted(kmeans.cluster_centers_.flatten())\n threshold = np.mean(centers)\n thresh_img = np.where(img < threshold, 1.0, 0.0) # threshold the image\n\n # First erode away the finer elements, then dilate to include some of the pixels surrounding the lung.\n # We don't want to accidentally clip the lung.\n\n eroded = morphology.erosion(thresh_img, np.ones([3, 3]))\n dilation = morphology.dilation(eroded, np.ones([8, 8]))\n\n labels = measure.label(dilation) # Different labels are displayed in different colors\n label_vals = np.unique(labels)\n regions = measure.regionprops(labels)\n good_labels = []\n for prop in regions:\n B = prop.bbox\n if B[2] - B[0] < row_size / 10 * 9 and B[3] - B[1] < col_size / 10 * 9 and B[0] > row_size / 5 and B[\n 2] < col_size / 5 * 4:\n good_labels.append(prop.label)\n mask = np.ndarray([row_size, col_size], dtype=np.int8)\n mask[:] = 0\n\n # After just the lungs are left, we do another large dilation\n # in order to fill in and out the lung mask\n #\n for N in good_labels:\n mask = mask + np.where(labels == N, 1, 0)\n mask = morphology.dilation(mask, np.ones([10, 10])) # one last dilation [10,10]\n\n mask = dilation.astype('int16') * air_removal_mask(dilation)\n\n if (display):\n fig, ax = plt.subplots(3, 2, figsize=[12, 12])\n ax[0, 0].set_title(\"Original\")\n ax[0, 0].imshow(img, cmap='gray')\n ax[0, 0].axis('off')\n\n ax[0, 1].set_title(\"Threshold\")\n ax[0, 1].imshow(thresh_img, cmap='gray')\n ax[0, 1].axis('off')\n\n ax[1, 0].set_title(\"After Erosion and Dilation\")\n ax[1, 0].imshow(dilation, cmap='gray')\n ax[1, 0].axis('off')\n\n ax[1, 1].set_title(\"Color Labels\")\n ax[1, 1].imshow(labels)\n ax[1, 1].axis('off')\n\n ax[2, 0].set_title(\"Final Mask\")\n ax[2, 0].imshow(mask, cmap='gray')\n ax[2, 0].axis('off')\n\n ax[2, 1].set_title(\"Apply Mask on Original\")\n ax[2, 1].imshow(mask * img, cmap='gray')\n ax[2, 1].axis('off')\n\n plt.show()\n return mask * img\n\n\n########## VTK Library ########################\n\ndef load_vtk_dir(path):\n reader = vtk.vtkDICOMImageReader()\n reader.SetDirectoryName(path)\n reader.Update()\n _extent = reader.GetDataExtent()\n ConstPixelDims = [_extent[1] - _extent[0] + 1, _extent[3] - _extent[2] + 1, _extent[5] - _extent[4] + 1]\n\n # Load spacing values\n ConstPixelSpacing = reader.GetPixelSpacing()\n\n # Get the 'vtkImageData' object from the reader and get the 'vtkPointData' object from the 'vtkImageData' object\n pointData = reader.GetOutput().GetPointData()\n # Ensure that only one array exists within the 'vtkPointData' object\n assert (pointData.GetNumberOfArrays() == 1)\n # Get the `vtkArray` (or whatever derived type) which is needed for the `numpy_support.vtk_to_numpy` function\n arrayData = pointData.GetArray(0)\n # Convert the `vtkArray` to a NumPy array and reshape the NumPy array to 3D using 'ConstPixelDims' as a 'shape'\n arr = numpy_support.vtk_to_numpy(arrayData).reshape(ConstPixelDims, order='F')\n\n return arr, reader\n\ndef load_vtk_file(path):\n reader = vtk.vtkDICOMImageReader()\n if os.path.isdir(path):\n reader.SetDirectoryName(path)\n else:\n reader.SetFileName(path)\n reader.Update()\n _extent = reader.GetDataExtent()\n ConstPixelDims = [_extent[1] - _extent[0] + 1, _extent[3] - _extent[2] + 1, _extent[5] - _extent[4] + 1]\n\n arrayData = reader.GetOutput().GetPointData().GetArray(0)\n ArrayDicom = numpy_support.vtk_to_numpy(arrayData)\n ArrayDicom = ArrayDicom.reshape((reader.GetHeight(), reader.GetWidth()), order='F')\n return ArrayDicom, reader\n\n\ndef load_vtk(paths, resample_scan=True, return_spacing=False, sort_paths=False):\n if isinstance(paths, Iterable):\n if sort_paths:\n paths.sort(key=lambda x: int(x.split('/')[-1].split('.')[0]), reverse=True)\n slices = [load_vtk_file(path) for path in paths]\n scan = np.stack([s[0] for s in slices]).astype(np.int16)\n pixel_spacing = np.array([s[1].GetPixelSpacing() for s in slices])\n pixel_spacing = np.median(pixel_spacing, axis=0)\n else:\n if os.path.isdir(paths):\n scan, reader = load_vtk_dir(paths)\n pixel_spacing = reader.GetPixelSpacing()\n else:\n raise Exception(\"No valid paths format\")\n\n thickness = pixel_spacing[2]\n pixel_spacing = pixel_spacing[0]\n\n if resample_scan:\n scan = resample(scan, scan_spacing=np.array([thickness, pixel_spacing, pixel_spacing]))\n\n if return_spacing:\n return scan, np.array([thickness, pixel_spacing, pixel_spacing])\n\n return scan"
] | [
[
"torch.load",
"numpy.isnan",
"pandas.DataFrame",
"torch.cuda.amp.autocast",
"torch.unsqueeze",
"torch.cuda.amp.GradScaler",
"numpy.argmax",
"torch.no_grad",
"numpy.array",
"numpy.isinf",
"torch.save"
],
[
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.math.sin",
"tensorflow.keras.backend.clip",
"tensorflow.math.cos",
"tensorflow.keras.backend.dot",
"tensorflow.random.uniform",
"tensorflow.math.reduce_std",
"tensorflow.zeros_like",
"tensorflow.one_hot",
"tensorflow.math.sqrt",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.range",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.math.maximum",
"tensorflow.ones",
"tensorflow.keras.backend.cast",
"numpy.isscalar",
"tensorflow.random.normal",
"tensorflow.math.minimum"
],
[
"numpy.abs",
"sklearn.cluster.KMeans",
"numpy.unique",
"numpy.min",
"numpy.median",
"numpy.ndarray",
"numpy.stack",
"numpy.int16",
"numpy.round",
"numpy.max",
"numpy.std",
"numpy.ones",
"numpy.mean",
"matplotlib.pyplot.subplots",
"numpy.prod",
"numpy.array",
"numpy.where",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nghorbani/body_visualizer | [
"be9cf756f8d1daed870d4c7ad1aa5cc3478a546c"
] | [
"src/body_visualizer/tools/vis_tools.py"
] | [
"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),\n# acting on behalf of its Max Planck Institute for Intelligent Systems and the\n# Max Planck Institute for Biological Cybernetics. All rights reserved.\n#\n# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights\n# on this computer program. You can only use this computer program if you have closed a license agreement\n# with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.\n# Any use of the computer program without a valid license is prohibited and liable to prosecution.\n# Contact: [email protected]\n#\n#\n# If you use this code in a research publication please consider citing the following:\n#\n# Expressive Body Capture: 3D Hands, Face, and Body from a Single Image <https://arxiv.org/abs/1904.05866>\n#\n#\n# Code Developed by:\n# Nima Ghorbani <https://nghorbani.github.io/>\n#\n# 2018.01.02\n\nimport numpy as np\nimport cv2\nimport os\nimport trimesh\n\n\n\ncolors = {\n 'pink': [.6, .0, .4],\n 'purple': [.9, .7, .7],\n 'cyan': [.7, .75, .5],\n 'red': [1.0, 0.0, 0.0],\n\n 'green': [.0, 1., .0],\n 'yellow': [1., 1., 0],\n 'brown': [.5, .2, .1],\n 'brown-light': [0.654, 0.396, 0.164],\n 'blue': [.0, .0, 1.],\n\n 'offwhite': [.8, .9, .9],\n 'white': [1., 1., 1.],\n 'orange': [1., .2, 0],\n\n 'grey': [.7, .7, .7],\n 'grey-blue': [0.345, 0.580, 0.713],\n 'black': np.zeros(3),\n 'white': np.ones(3),\n\n 'yellowg': [0.83, 1, 0],\n}\n\ndef imagearray2file(img_array, outpath=None, fps=30):\n '''\n :param nparray: RxCxTxwidthxheightx3\n :param outpath: the directory where T images will be dumped for each time point in range T\n :param fps: fps of the gif file\n :return:\n it will return an image list with length T\n if outpath is given as a png file, an image will be saved for each t in T.\n if outpath is given as a gif file, an animated image with T frames will be created.\n '''\n\n if outpath is not None:\n outdir = os.path.dirname(outpath)\n if not os.path.exists(outdir): os.makedirs(outdir)\n\n if not isinstance(img_array, np.ndarray) or img_array.ndim < 6:\n raise ValueError('img_array should be a numpy array of shape RxCxTxwidthxheightx3')\n\n R, C, T, img_h, img_w, img_c = img_array.shape\n\n out_images = []\n for tIdx in range(T):\n row_images = []\n for rIdx in range(R):\n col_images = []\n for cIdx in range(C):\n col_images.append(img_array[rIdx, cIdx, tIdx])\n row_images.append(np.hstack(col_images))\n t_image = np.vstack(row_images)\n out_images.append(t_image)\n\n if outpath is not None:\n ext = outpath.split('.')[-1]\n if ext in ['png', 'jpeg', 'jpg']:\n for tIdx in range(T):\n if T > 1:\n cur_outpath = outpath.replace('.%s'%ext, '_%03d.%s'%(tIdx, ext))\n else:\n cur_outpath = outpath\n \n img = cv2.cvtColor(out_images[tIdx], cv2.COLOR_BGR2RGB)\n cv2.imwrite(cur_outpath, img)\n while not os.path.exists(cur_outpath): continue # wait until the snapshot is written to the disk\n elif ext == 'gif':\n import imageio\n with imageio.get_writer(outpath, mode='I', fps = fps) as writer:\n for tIdx in range(T):\n img = out_images[tIdx].astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n writer.append_data(img)\n elif ext == 'avi':\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n video = cv2.VideoWriter(outpath, fourcc, fps, (img_w, img_h), True)\n for tIdx in range(T):\n img = out_images[tIdx].astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n video.write(img)\n\n video.release()\n cv2.destroyAllWindows()\n elif ext == 'mp4':\n #\n # from moviepy.editor import ImageSequenceClip\n # animation = ImageSequenceClip(out_images, fps=fps)\n # animation.write_videofile(outpath, verbose=False)\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n video = cv2.VideoWriter(outpath, fourcc, fps, (img_w, img_h), True)\n for tIdx in range(T):\n img = out_images[tIdx].astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n video.write(img)\n\n video.release()\n try:\n cv2.destroyAllWindows()\n except:\n pass\n\n return out_images\n\ndef render_smpl_params(bm, body_parms):\n '''\n :param bm: pytorch body model with batch_size 1\n :param pose_body: Nx21x3\n :param trans: Nx3\n :param betas: Nxnum_betas\n :return: N x 400 x 400 x 3\n '''\n\n from human_body_prior.tools.omni_tools import copy2cpu as c2c\n from body_visualizer.mesh.mesh_viewer import MeshViewer\n\n imw, imh = 400, 400\n\n mv = MeshViewer(width=imw, height=imh, use_offscreen=True)\n\n faces = c2c(bm.f)\n\n v = c2c(bm(**body_parms).v)\n\n T, num_verts = v.shape[:-1]\n\n images = []\n for fIdx in range(T):\n\n mesh = trimesh.base.Trimesh(v[fIdx], faces, vertex_colors=num_verts*colors['grey'])\n mv.set_meshes([mesh], 'static')\n\n images.append(mv.render())\n\n return np.array(images).reshape(T, imw, imh, 3)\n\ndef meshes_as_png(meshes, outpath=None, view_angles=[0, 180]):\n from body_visualizer.mesh.mesh_viewer import MeshViewer\n\n imw = 800\n imh = 800\n mv = MeshViewer(imh, imw)\n mv.set_cam_trans([0, -.5, 1.75])\n images = np.zeros([len(meshes), len(view_angles), 1, imw, imh, 3])\n for mIdx, mesh in enumerate(meshes):\n for rId, angle in enumerate(view_angles):\n if angle != 0: mesh.apply_transform(trimesh.transformations.rotation_matrix(np.radians(angle), (0, 1, 0)))\n mv.set_meshes([mesh], group_name='static')\n images[mIdx, rId, 0] = cv2.cvtColor(mv.render(render_wireframe=False), cv2.COLOR_BGR2RGB)\n if angle != 0: mesh.apply_transform(trimesh.transformations.rotation_matrix(np.radians(-angle), (0, 1, 0)))\n\n if outpath is not None: imagearray2file(images, outpath)\n return images\n\ndef show_image(img_ndarray):\n '''\n Visualize rendered body images resulted from render_smpl_params in Jupyter notebook\n :param img_ndarray: Nxim_hxim_wx3\n '''\n import matplotlib.pyplot as plt\n import cv2\n fig = plt.figure(figsize=(4, 4), dpi=300)\n ax = fig.gca()\n\n img = img_ndarray.astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n ax.imshow(img)\n plt.axis('off')\n\n # fig.canvas.draw()\n # return True"
] | [
[
"numpy.hstack",
"numpy.radians",
"numpy.ones",
"matplotlib.pyplot.axis",
"numpy.array",
"numpy.zeros",
"numpy.vstack",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bmello4688/deep-reinforcement-learning | [
"2337db08e7ea6bd512afa670e8e142b47acd688e"
] | [
"p2_continuous-control/model.py"
] | [
"import numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n#copied from pendulum\n\ndef hidden_init(layer):\n fan_in = layer.weight.data.size()[0]\n lim = 1. / np.sqrt(fan_in)\n return (-lim, lim)\n\nclass Actor(nn.Module):\n \"\"\"Actor (Policy) Model.\"\"\"\n\n def __init__(self, state_size, action_size, seed, fc1_units=400, fc2_units=300):\n \"\"\"Initialize parameters and build model.\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n seed (int): Random seed\n fc1_units (int): Number of nodes in first hidden layer\n fc2_units (int): Number of nodes in second hidden layer\n \"\"\"\n super(Actor, self).__init__()\n self.seed = torch.manual_seed(seed)\n self.fc1 = nn.Linear(state_size, fc1_units)\n self.fc2 = nn.Linear(fc1_units, fc2_units)\n self.fc3 = nn.Linear(fc2_units, action_size)\n self.reset_parameters()\n\n def reset_parameters(self):\n self.fc1.weight.data.uniform_(*hidden_init(self.fc1))\n self.fc2.weight.data.uniform_(*hidden_init(self.fc2))\n self.fc3.weight.data.uniform_(-3e-3, 3e-3)\n\n def forward(self, state):\n \"\"\"Build an actor (policy) network that maps states -> actions.\"\"\"\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return torch.tanh(self.fc3(x))\n\n\nclass Critic(nn.Module):\n \"\"\"Critic (Value) Model.\"\"\"\n\n def __init__(self, state_size, action_size, seed, fcs1_units=400, fc2_units=300):\n \"\"\"Initialize parameters and build model.\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n seed (int): Random seed\n fcs1_units (int): Number of nodes in the first hidden layer\n fc2_units (int): Number of nodes in the second hidden layer\n \"\"\"\n super(Critic, self).__init__()\n self.seed = torch.manual_seed(seed)\n self.fcs1 = nn.Linear(state_size, fcs1_units)\n self.fc2 = nn.Linear(fcs1_units+action_size, fc2_units)\n self.fc3 = nn.Linear(fc2_units, 1)\n self.reset_parameters()\n\n def reset_parameters(self):\n self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))\n self.fc2.weight.data.uniform_(*hidden_init(self.fc2))\n self.fc3.weight.data.uniform_(-3e-3, 3e-3)\n\n def forward(self, state, action):\n \"\"\"Build a critic (value) network that maps (state, action) pairs -> Q-values.\"\"\"\n xs = F.relu(self.fcs1(state))\n x = torch.cat((xs, action), dim=1)\n x = F.relu(self.fc2(x))\n return self.fc3(x)"
] | [
[
"torch.nn.Linear",
"torch.manual_seed",
"numpy.sqrt",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qnano/photonpy | [
"9c03a1c9f4c2177c9c6fb3f2f16dfec2306006d4",
"4f149d4e6c997954ac862cc5a7a404855b2a0be9",
"9c03a1c9f4c2177c9c6fb3f2f16dfec2306006d4"
] | [
"photonpy/cpp/calib.py",
"photonpy/utils/running_mean.py",
"photonpy/cpp/spotdetect.py"
] | [
"import ctypes as ct\nfrom .lib import SMLM\nimport numpy as np\nimport numpy.ctypeslib as ctl\nfrom .context import Context\n\n#CDLL_EXPORT sCMOS_CalibrationTransform* sCMOS_Calib_Create(int w, int h, \n#const float* offset, const float* gain, const float *variance, Context* ctx);\n#CDLL_EXPORT GainOffsetTransform* GainOffsetCalib_Create(float gain, float offset, Context* ctx);\n\n\nclass sCMOS_Calib:\n def __init__(self, ctx, offset, gain, variance):\n self._sCMOS_Calib_Create = ctx.lib.sCMOS_Calib_Create\n self._sCMOS_Calib_Create.argtypes = [\n ct.c_int32, ct.c_int32,\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"),\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), \n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"),\n ct.c_void_p\n ]\n self._sCMOS_Calib_Create.restype = ct.c_void_p\n\n offset = np.ascontiguousarray(offset,dtype=np.float32)\n gain = np.ascontiguousarray(gain,dtype=np.float32)\n variance = np.ascontiguousarray(variance,dtype=np.float32)\n \n assert(len(offset.shape)==2)\n assert(np.array_equal(offset.shape,gain.shape))\n assert(np.array_equal(offset.shape,variance.shape))\n \n self.inst = self._sCMOS_Calib_Create(offset.shape[1],offset.shape[0],offset,gain,variance,ctx.inst)\n \n# Constant global Gain/offset \nclass GainOffset_Calib:\n def __init__(self, gain, offset, ctx):\n self._GainOffsetCalib_Create = ctx.lib.GainOffsetCalib_Create\n self._GainOffsetCalib_Create.argtypes=[\n ct.c_float, \n ct.c_float, \n ct.c_void_p]\n self._GainOffsetCalib_Create.restype=ct.c_void_p\n self.inst = self._GainOffsetCalib_Create(gain,offset,ctx.inst)\n\n\n# Gain/offset supplied by image\nclass GainOffsetImage_Calib:\n def __init__(self, gain, offset, ctx):\n self._GainOffsetImageCalib_Create = ctx.lib.GainOffsetImageCalib_Create\n self._GainOffsetImageCalib_Create.argtypes=[\n ct.c_int32, ct.c_int32,\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"),\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"),\n ct.c_void_p]\n self._GainOffsetImageCalib_Create.restype=ct.c_void_p\n\n gain = np.ascontiguousarray(gain,dtype=np.float32)\n offset = np.ascontiguousarray(offset,dtype=np.float32)\n assert(np.array_equal(gain.shape,offset.shape))\n self.inst = self._GainOffsetImageCalib_Create(gain.shape[1], gain.shape[0], gain,offset,ctx.inst)\n",
"# https://stackoverflow.com/questions/13728392/moving-average-or-running-mean\nimport numpy as np\n\n\ndef running_mean(y_in, x_in=None, N_out=None, sigma=1):\n \"\"\"\n Returns running mean as a Bell-curve weighted average at evenly spaced\n points. Does NOT wrap signal around, or pad with zeros.\n\n Arguments:\n y_in -- y values, the values to be smoothed and re-sampled\n x_in -- x values for array\n\n Keyword arguments:\n N_out -- NoOf elements in resampled array.\n sigma -- 'Width' of Bell-curve in units of param x .\n \"\"\"\n N_in = np.size(y_in)\n\n if x_in == None:\n x_in = np.arange(len(y_in))\n\n if N_out == None:\n N_out = N_in\n\n # Gaussian kernel\n x_out = np.linspace(np.min(x_in), np.max(x_in), N_out)\n x_in_mesh, x_out_mesh = np.meshgrid(x_in, x_out)\n gauss_kernel = np.exp(-np.square(x_in_mesh - x_out_mesh) / (2 * sigma ** 2))\n # Normalize kernel, such that the sum is one along axis 1\n normalization = np.tile(np.reshape(np.sum(gauss_kernel, axis=1), (N_out, 1)), (1, N_in))\n gauss_kernel_normalized = gauss_kernel / normalization\n # Perform running average as a linear operation\n y_out = gauss_kernel_normalized @ y_in\n\n return y_out, x_out\n\n\nif __name__ == \"__main__\":\n\n import matplotlib.pyplot as plt\n\n y, x = np.random.uniform(size=100), np.arange(100)\n y_avg, x_avg = running_mean(y, x, 100, sigma=6)\n\n plt.figure()\n plt.plot(x, y)\n plt.plot(x_avg, y_avg)\n",
"# -*- coding: utf-8 -*-\n\nimport ctypes\nfrom .lib import SMLM\nimport numpy as np\nimport numpy.ctypeslib as ctl\nimport numbers\n\nfrom .image_proc import ImageProcessor\nfrom .estim_queue import EstimQueue\n\nfrom .roi_queue import ROIQueue\n\nclass SpotDetectorNativeFactory:\n def __init__(self, inst, destructor):\n self.inst = inst\n self.destructor = destructor\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n d = self.destructor\n d(self.inst)\n\nclass SpotDetector(ctypes.Structure):\n # detectionImage = uniform1 - uniform2\n # Selected spot locations = max(detectionImage, maxFilterSize) == detectionImage\n _fields_ = [\n (\"uniformFilter1Size\", ctypes.c_int32),\n (\"uniformFilter2Size\", ctypes.c_int32),\n (\"maxFilterSize\", ctypes.c_int32),\n (\"roisize\", ctypes.c_int32), # Roisize is used to remove ROIs near the image border\n (\"minIntensity\", ctypes.c_float), # Only spots where detectionImage > intensityThreshold are selected\n (\"maxIntensity\", ctypes.c_float),\n (\"backgroundImagePtr\", ctypes.c_void_p)\n ] # Only spots where detectionImage > intensityThreshold are selected\n\n def __init__(self, psfSigma, roisize, minIntensity=7, maxIntensity=np.inf, backgroundImage=None):\n psfSigma = np.mean(psfSigma)\n self.uniformFilter1Size = int(psfSigma * 2 + 2)\n self.uniformFilter2Size = self.uniformFilter1Size * 2\n self.maxFilterSize = int(psfSigma * 5)\n self.roisize = roisize\n self.minIntensity = minIntensity\n self.maxIntensity = maxIntensity\n self.backgroundImage_ = np.ascontiguousarray(backgroundImage,dtype=np.float32)\n if backgroundImage is not None:\n self.backgroundImagePtr = self.backgroundImage_.ctypes.data\n else:\n self.backgroundImagePtr = 0\n \n def print(self):\n print(\"Spot detector config: \\n\" +\n f\"uniform filter 1: {self.uniformFilter1Size}\\n\"\n f\"uniform filter 2: {self.uniformFilter2Size}\\n\"\n f\"maxfilter: {self.maxFilterSize}\\n\"\n f\"min intensity: {self.minIntensity}\\n\"\n f\"max intensity: {self.maxIntensity}\\n\"\n )\n\n def CreateNativeFactory(self, ctx):\n m = SpotDetectionMethods(ctx)\n return SpotDetectorNativeFactory(m._SpotDetector_Configure(self), \n m._SpotDetector_DestroyFactory)\n \n\n\n\n\nclass PSFCorrelationSpotDetector:\n def __init__(self, psfstack, bgimg, minPhotons, maxFilterSizeXY, bgFilterSize=12, debugMode=False, roisize=None):\n self.bgimg = bgimg.astype(np.float32)\n \n psfstack = np.ascontiguousarray(psfstack, dtype=np.float32)\n self.psfstack = psfstack\n self.minPhotons = minPhotons\n self.maxFilterSizeXY = maxFilterSizeXY\n self.debugMode = debugMode\n self.bgFilterSize = bgFilterSize\n if roisize is not None:\n self.roisize = roisize\n else:\n self.roisize = psfstack.shape[-1]\n \n if len(psfstack.shape) != 3 or psfstack.shape[1]!=psfstack.shape[2]:\n raise ValueError('expected a square 3D PSF stack (xy dims should be equal)')\n \n \n def CreateNativeFactory(self,ctx):\n m = SpotDetectionMethods(ctx)\n roisize = self.psfstack.shape[1]\n h,w = self.bgimg.shape\n depth = len(self.psfstack)\n return SpotDetectorNativeFactory(\n m._PSFCorrelationSpotDetector_Configure(\n self.bgimg, w, h, self.psfstack, roisize, depth,\n self.maxFilterSizeXY, self.minPhotons, self.bgFilterSize, self.debugMode),\n m._SpotDetector_DestroyFactory)\n\nclass SpotDetectionMethods:\n def __init__(self, ctx):\n InstancePtrType = ctypes.c_void_p\n self.ctx = ctx\n self.lib = ctx.smlm\n lib = ctx.smlm.lib\n\n self._SpotDetector_Configure = lib.SpotDetector_Configure\n self._SpotDetector_Configure.argtypes = [ctypes.POINTER(SpotDetector)]\n self._SpotDetector_Configure.restype = ctypes.c_void_p\n\n self._SpotDetector_DestroyFactory = lib.SpotDetector_DestroyFactory\n self._SpotDetector_DestroyFactory.argtypes = [InstancePtrType]\n\n#\n\n self._PSFCorrelationSpotDetector_Configure = lib.PSFCorrelationSpotDetector_Configure\n self._PSFCorrelationSpotDetector_Configure.argtypes=[\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # frame\n ctypes.c_int32, # width\n ctypes.c_int32, # height\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # frame\n ctypes.c_int32, # roisize\n ctypes.c_int32, # depth\n ctypes.c_int32, # maxFilterSizeXY\n ctypes.c_float,\n ctypes.c_int32, # bgfiltersize\n ctypes.c_int32 #debugmode\n ]\n self._PSFCorrelationSpotDetector_Configure.restype = ctypes.c_void_p\n\n\n#CDLL_EXPORT int SpotDetector_ProcessFrame(const float* frame, int width, int height,\n#\tint maxSpots, float* spotScores, Int2* cornerPos, float* rois, const SpotDetector & cfg)\n\n self._SpotDetector_ProcessFrame = lib.SpotDetector_ProcessFrame\n self._SpotDetector_ProcessFrame.argtypes = [\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # frame\n ctypes.c_int32, # width\n ctypes.c_int32, # height\n ctypes.c_int32, # roisize\n ctypes.c_int32, # maxspots\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # spotscores\n ctl.ndpointer(np.int32, flags=\"aligned, c_contiguous\"), # spotz\n ctl.ndpointer(np.int32, flags=\"aligned, c_contiguous\"), # cornerpos\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # rois [output]\n ctypes.c_void_p,\n ctypes.c_void_p, # Calibration object\n ]\n self._SpotDetector_ProcessFrame.restype = ctypes.c_int32\n\n #CDLL_EXPORT void ExtractROIs(const float *frames, int width, int height, int depth,\n # int roiX, int roiY, int roiZ, const Int3 * startpos, int numspots, float * rois);\n self._ExtractROIs = lib.ExtractROIs\n self._ExtractROIs.argtypes = [\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # frames\n ctypes.c_int32, # width\n ctypes.c_int32, # height\n ctypes.c_int32, # depth\n ctypes.c_int32, # roisizeX\n ctypes.c_int32, # roisizeY\n ctypes.c_int32, # roisizeZ\n ctl.ndpointer(np.int32, flags=\"aligned, c_contiguous\"), # startZYX\n ctypes.c_int32, # numspots\n ctl.ndpointer(np.float32, flags=\"aligned, c_contiguous\"), # rois\n ]\n \n#CDLL_EXPORT SpotLocalizerQueue * SpotLocalizerQueue_Create(int w,int h,, LocalizationQueue* queue, \n#\tISpotDetectorFactory* spotDetectorFactory, IDeviceImageProcessor* preprocessor, \n#\tint numDetectionThreads, Context* ctx)\n \n self._SpotExtractionQueue_Create= lib.SpotExtractionQueue_Create\n self._SpotExtractionQueue_Create.argtypes =[\n ctypes.c_int32, # w\n ctypes.c_int32, # h\n ctypes.c_void_p, # roiqueue\n ctypes.c_void_p, # spotDetectorFactory\n ctypes.c_void_p, # calib\n ctypes.c_int32, # nthreads\n ctypes.c_int32, # sumframes\n ctypes.c_void_p, #context\n ]\n self._SpotExtractionQueue_Create.restype = ctypes.c_void_p\n \n def CreateQueue(self, imgshape, roishape, spotDetectorType,\n calib=None, sumframes=1, numThreads=3, ctx=None):\n \"\"\"\n Create a spot-detection queue.\n Returns a tuple with (input_queue, output_queue).\n input_queue is an ImageProcessor, output_queue is a ROIQueue\n \"\"\"\n \n if ctx is None:\n ctx = self.ctx\n \n rq = ROIQueue([sumframes, *roishape], ctx)\n\n with spotDetectorType.CreateNativeFactory(self.ctx) as sdf:\n inst = self._SpotExtractionQueue_Create(imgshape[1],imgshape[0],\n rq.inst, \n sdf.inst, \n calib.inst if calib else None, \n numThreads, \n sumframes,\n ctx.inst if ctx else None)\n\n return ImageProcessor(imgshape, inst, self.ctx), rq\n\n def ExtractROIs(self, frames, roishape, cornerPosZYX):\n assert(len(frames.shape)==3)\n assert(cornerPosZYX.shape[1] == 3)\n\n numspots = len(cornerPosZYX)\n cornerPosZYX = np.ascontiguousarray(cornerPosZYX,dtype=np.int32)\n frames = np.ascontiguousarray(frames,dtype=np.float32)\n rois = np.zeros((numspots, *roishape),dtype=np.float32)\n\n self._ExtractROIs(frames, frames.shape[2], frames.shape[1], frames.shape[0],\n roishape[2],roishape[1],roishape[0],cornerPosZYX,numspots,rois)\n\n return rois\n\n\n def ProcessFrame(self, image, spotDetector, roisize, maxSpotsPerFrame, calib=None):\n assert len(image.shape)==2\n h = image.shape[0]\n w = image.shape[1]\n\n image = np.ascontiguousarray(image,dtype=np.float32)\n\n scores = np.zeros(maxSpotsPerFrame, dtype=np.float32)\n rois = np.zeros((maxSpotsPerFrame, roisize,roisize),dtype=np.float32)\n cornerYX = np.zeros((maxSpotsPerFrame, 2),dtype=np.int32)\n spotz = np.zeros(maxSpotsPerFrame, dtype=np.int32)\n\n with spotDetector.CreateNativeFactory(self.ctx) as sdf:\n numspots = self._SpotDetector_ProcessFrame(image, w,h,roisize,maxSpotsPerFrame,\n scores,spotz,cornerYX,rois,sdf.inst,\n calib.inst if calib else None)\n\n rois = rois[:numspots]\n scores = scores[:numspots]\n cornerYX = cornerYX[:numspots]\n spotz = spotz[:numspots]\n\n return rois, cornerYX, scores, spotz\n"
] | [
[
"numpy.ascontiguousarray",
"numpy.ctypeslib.ndpointer",
"numpy.array_equal"
],
[
"numpy.square",
"numpy.min",
"numpy.arange",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.size",
"numpy.random.uniform",
"numpy.meshgrid",
"numpy.sum",
"matplotlib.pyplot.figure"
],
[
"numpy.ascontiguousarray",
"numpy.ctypeslib.ndpointer",
"numpy.mean",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ethansaxenian/RosettaDecode | [
"8ea1a42a5f792280b50193ad47545d14ee371fb7",
"8ea1a42a5f792280b50193ad47545d14ee371fb7"
] | [
"lang/Python/sum-of-squares-3.py",
"lang/Python/sieve-of-eratosthenes-9.py"
] | [
"import numpy as np\na = np.array([1, 2, 3, 4, 5])\nnp.sum(a ** 2)\n",
"from numpy import array, bool_, multiply, nonzero, ones, put, resize\n#\ndef makepattern(smallprimes):\n pattern = ones(multiply.reduce(smallprimes), dtype=bool_)\n pattern[0] = 0\n for p in smallprimes:\n pattern[p::p] = 0\n return pattern\n#\ndef primes_upto3(limit, smallprimes=(2,3,5,7,11)):\n sp = array(smallprimes)\n if limit <= sp.max(): return sp[sp <= limit]\n #\n isprime = resize(makepattern(sp), limit + 1)\n isprime[:2] = 0; put(isprime, sp, 1)\n #\n for n in range(sp.max() + 2, int(limit**0.5 + 1.5), 2):\n if isprime[n]:\n isprime[n*n::n] = 0\n return nonzero(isprime)[0]\n"
] | [
[
"numpy.array",
"numpy.sum"
],
[
"numpy.put",
"numpy.array",
"numpy.multiply.reduce",
"numpy.nonzero"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yunshangyue71/mycodes | [
"54b876004c32d38d9c0363fd292d745fee8dff3c",
"54b876004c32d38d9c0363fd292d745fee8dff3c"
] | [
"projects/unet/net/unet.py",
"scripts_torch/loss/loss_example.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# 将图片的3通道提升维度\nclass DoubleConv(nn.Module):\n \"\"\"(convolution => [BN] => ReLU) * 2\"\"\"\n def __init__(self, in_channels, out_channels, mid_channels=None):\n super().__init__()\n if not mid_channels:\n mid_channels = out_channels\n self.double_conv = nn.Sequential(\n nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(mid_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n return self.double_conv(x)\n\n# top down\nclass Down(nn.Module):\n \"\"\"Downscaling with maxpool then double conv\"\"\"\n\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.maxpool_conv = nn.Sequential(\n nn.MaxPool2d(2),\n DoubleConv(in_channels, out_channels)\n )\n\n def forward(self, x):\n return self.maxpool_conv(x)\n\n#bottom up\nclass Up(nn.Module):\n \"\"\"Upscaling then double conv\"\"\"\n\n def __init__(self, in_channels, out_channels, bilinear=True):\n super().__init__()\n\n # if bilinear, use the normal convolutions to reduce the number of channels\n if bilinear:\n self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)\n else:\n self.up = nn.ConvTranspose2d(in_channels , in_channels // 2, kernel_size=2, stride=2)\n self.conv = DoubleConv(in_channels, out_channels)\n\n\n def forward(self, x1, x2):\n x1 = self.up(x1)\n # input is CHW\n diffY = x2.size()[2] - x1.size()[2]\n diffX = x2.size()[3] - x1.size()[3]\n\n x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,\n diffY // 2, diffY - diffY // 2])\n # if you have padding issues, see\n # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a\n # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd\n x = torch.cat([x2, x1], dim=1)\n return self.conv(x)\n\n# 将网络 输出对应的通道数目\nclass OutConv(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(OutConv, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)\n\n def forward(self, x):\n return self.conv(x)\n\n\nclass UNet(nn.Module):\n def __init__(self, n_channels, n_classes, bilinear=True):\n super(UNet, self).__init__()\n self.n_channels = n_channels\n self.n_classes = n_classes\n self.bilinear = bilinear\n\n self.inc = DoubleConv(n_channels, 64)\n self.down1 = Down(64, 128)\n self.down2 = Down(128, 256)\n self.down3 = Down(256, 512)\n factor = 2 if bilinear else 1\n self.down4 = Down(512, 1024 // factor)\n self.up1 = Up(1024, 512 // factor, bilinear)\n self.up2 = Up(512, 256 // factor, bilinear)\n self.up3 = Up(256, 128 // factor, bilinear)\n self.up4 = Up(128, 64, bilinear)\n self.outc = OutConv(64, n_classes)\n\n def forward(self, x):\n x1 = self.inc(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n x = self.up1(x5, x4)\n x = self.up2(x, x3)\n x = self.up3(x, x2)\n x = self.up4(x, x1)\n logits = self.outc(x)\n return logits",
"import torch\n\n#自己实现MSE loss\nclass My_loss(nn.Module):\n def __init__(self):\n super().__init__()\n \n def forward(self, x, y):\n return torch.mean(torch.pow((x - y), 2))\n"
] | [
[
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Upsample",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.functional.pad"
],
[
"torch.pow"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DLPerf/kglib | [
"d8f368b35fa06f8beb1c95348974d6e7e465afce"
] | [
"kglib/kgcn/learn/loss.py"
] | [
"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nimport numpy as np\nimport tensorflow as tf\n\n\ndef loss_ops_from_difference(target_op, output_ops):\n \"\"\"\n Loss operation which directly compares the target with the output over all nodes and edges\n Args:\n target_op: The target of the model\n output_ops: A list of the outputs of the model, one for each message-passing step\n\n Returns: The loss for each message-passing step\n\n \"\"\"\n loss_ops = [\n tf.losses.softmax_cross_entropy(target_op.nodes, output_op.nodes)\n for output_op in output_ops\n ]\n return loss_ops\n\n\ndef loss_ops_preexisting_no_penalty(target_op, output_ops):\n \"\"\"\n Loss operation which doesn't penalise the output values for pre-existing nodes and edges, treating them as slack\n variables\n\n Args:\n target_op: The target of the model\n output_ops: A list of the outputs of the model, one for each message-passing step\n\n Returns: The loss for each message-passing step\n\n \"\"\"\n loss_ops = []\n node_mask_op = tf.math.reduce_any(\n tf.math.not_equal(target_op.nodes, tf.constant(np.array([1., 0., 0.]), dtype=tf.float32)), axis=1)\n target_nodes = tf.boolean_mask(target_op.nodes, node_mask_op)\n output_nodes = tf.boolean_mask(output_op.nodes, node_mask_op)\n\n for output_op in output_ops:\n\n loss_op = tf.losses.softmax_cross_entropy(target_nodes, output_nodes)\n\n loss_ops.append(loss_op)\n\n return loss_ops"
] | [
[
"tensorflow.boolean_mask",
"tensorflow.losses.softmax_cross_entropy",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
kpj/rwrap | [
"da885dd518e7cf8eb3c6fa1f03d4816f31f11492"
] | [
"tests/scripts/DESeq2/DESeq2.py"
] | [
"import pandas as pd\n\nfrom rwrap import base, stats, DESeq2\n\n\n# read data\ndf_cts = pd.read_csv(\"count_data.csv\", index_col=0)\ndf_coldata = pd.read_csv(\"col_data.csv\", index_col=0)\n\n# do DEA\ndds = DESeq2.DESeqDataSetFromMatrix(\n countData=df_cts, colData=df_coldata, design=stats.as_formula(\"~ condition\")\n)\n\ndds = DESeq2.DESeq(dds)\n\nres = DESeq2.results(dds)\n\n# save result\ndf_res = base.as_data_frame(res)\n\n(\n df_res.reset_index()\n .rename(columns={\"index\": \"gene\"})\n .to_csv(\"result.csv\", index=False)\n)\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
gelnesr/SVD-of-MSAs | [
"681430e4e5f9e43a96676a91c8331df989069a5e"
] | [
"webscraper.py"
] | [
"# Modified source code from https://github.com/Aksh77/Bio-Scraper/blob/master/UniProt-Scraper/scraper.py\nimport os\nimport re\nimport sys\nimport csv\nimport pandas as pd\nimport pprint\nimport time\nimport urllib\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\n#display list\ndef display_list(arr):\n return \";\\n\".join(arr)\n\n#get data; handle error\ndef fetchdata(url):\n try:\n return urlopen(url)\n except:\n fetchdata(url)\n\n#get UniProt Protein IDs\ninput_file = sys.argv[1]\noutput_file = sys.argv[2]\ncode = sys.argv[3]\ndf = pd.read_csv(input_file)\nIDs = df[code]\n\n#Get molecular functions\nwith open(output_file, 'w') as csvfile1:\n #Write header row for Protein_data file\n datawriter1 = csv.writer(csvfile1, delimiter=',')\n header1 = [ \"Protein ID\", \"Identified Length\", \"Gene ID\", \"Protein Name\", \"Organism Name\",\n \"Taxonomic ID\", \"Molecular Function-GO Annot\",\"Molecular Function-Keyword\",\n \"Biological processes-GO Annot\",\"Biological processes-Keywords\",\n \"Cellular Component-Go Annot\", \"Cellular Component-Keywords\",\n \"Disease-OMIM ID\", \"Disease-Keywords\",\n \"Technical Terms-Keywords\", \"Polymorphism\" ]\n datawriter1.writerow(header1)\n\n countdown = len(IDs)\n print('Starting scraping of ' + code + ' from ' + input_file)\n start_time = time.time()\n for i in IDs:\n countdown -= 1\n try:\n pid = i.split('/')[0]\n plen = int(i.split('/')[1].split('-')[1]) - int(i.split('/')[1].split('-')[0]) + 2\n #specify the url\n url = \"https://www.uniprot.org/uniprot/\" + str(pid)\n #Query the website\n page = fetchdata(url)\n #Parse the html, store it in Beautiful Soup format\n bsf = BeautifulSoup(page, \"lxml\")\n\n #get Gene ID\n gid = \"\"\n ext_data = bsf.find('table', class_='databaseTable GENOME')\n if(not (ext_data is None)):\n data = ext_data.findAll(text=True)\n if \"GeneID\" in data:\n i = data.index(\"GeneID\")\n gid = data[i+2]\n\n #Molecular Function GO Annotation\n molecular_function_go = []\n ext_data = bsf.find('ul', class_='noNumbering molecular_function')\n if(not (ext_data is None)):\n for data in ext_data.findAll('li'):\n cells = data.find(lambda tag: tag.name == \"a\" and (tag.has_attr(\"onclick\")))\n if(not(cells is None)):\n cells = cells.find(text=True).strip()\n molecular_function_go.append(cells)\n\n #Biological Processes GO Annotation\n biological_process_go = []\n ext_data = bsf.find('ul', class_='noNumbering biological_process')\n if(not (ext_data is None)):\n for data in ext_data.findAll('li'):\n cells = data.find(lambda tag: tag.name == \"a\" and (tag.has_attr(\"onclick\")))\n if(not(cells is None)):\n cells = cells.find(text=True).strip()\n biological_process_go.append(cells)\n\n #Cellular Component GO Annotation\n cellular_component_go = []\n ext_data = bsf.find('div', id='table-go_annotation')\n if(not (ext_data is None)):\n lt = ext_data.find('ul', class_='noNumbering subcellLocations')\n for li in lt.findAll('li'):\n cell = li.find('h6')\n if(not(cell is None)):\n cell_loc = cell.find(text=True)\n cellular_component_go.append(cell_loc)\n\n #protein names and taxonomic identifiers\n organism_name = []\n tax_id = []\n org = 0\n ext_data = bsf.find('div', id=\"names_and_taxonomy\")\n if(not (ext_data is None)):\n ext_data = ext_data.find('table')\n for row in ext_data:\n data = row.findAll('td')\n head = data[0].find(text=True)\n vals = data[1].findAll('a')\n val = [v.find(text=True) for v in vals]\n v = list(filter(lambda x : x != ', ', vals))\n val = []\n tax = []\n for kw in v:\n kws = str(kw)\n if kws[:18]=='<a href=\"/taxonomy' and org == 0:\n val.append(re.sub('<[^<]+?>', '', kw.find(text=True)))\n organism_name = val\n org = org + 1\n elif kws[:18]=='<a href=\"/taxonomy' and org == 1:\n tax.append(re.sub('<[^<]+?>', '', kw.find(text=True)))\n tax_id = tax\n org = org + 1\n\n #protein names\n protein_name = 'Uncharacterized protein'\n ext_data = bsf.find('span', class_=\"recommended-name\")\n if(not (ext_data is None)):\n for row in ext_data:\n protein_name = row\n break\n\n\n #cellular component keywords\n cellular_component_kw = []\n ext_data = bsf.find('div', class_='section ', id=\"subcellular_location\")\n if(not (ext_data is None)):\n header = ext_data.find('table')\n if(not (header is None)):\n head_data = header.findAll(text=True)\n for h in head_data:\n data = header.next_sibling\n vals = data.findAll('a')\n val = [v.find(text=True) for v in vals]\n v = list(filter(lambda x : x != ', ', vals))\n val = []\n for kw in v:\n kws = str(kw)\n if kws[:18]=='<a href=\"/keywords':\n val.append(kw.find(text=True))\n cellular_component_kw = val\n\n #Keywords - Molecular Function and Biological processes\n molecular_function_kw = []\n biological_process_kw = []\n ext_data = bsf.find('table', class_='databaseTable')\n if(not (ext_data is None)):\n ext_data = ext_data.findAll('tr')\n for row in ext_data:\n data = row.findAll('td')\n head = data[0].find(text=True)\n vals = data[1].findAll('a')\n val = [v.find(text=True) for v in vals]\n v = list(filter(lambda x : x != ', ', vals))\n val = []\n for kw in v:\n kws = str(kw)\n if kws[:18]=='<a href=\"/keywords':\n val.append(kw.find(text=True))\n if(head==\"Molecular function\"):\n molecular_function_kw = val\n if(head==\"Biological process\"):\n biological_process_kw = val\n\n #Disease OMIM ID\n disease_omim_id = []\n ids = []\n ext_data = bsf.findAll('div', class_='diseaseAnnotation')\n if(not (ext_data is None)):\n for data in ext_data:\n val = data.findAll('a')\n for data1 in val:\n data2 = data.findAll(text=True)\n for j in data2:\n if j[:8]==\"See also\":\n ids.append(j[14:])\n ids = set(ids)\n disease_omim_id = list(ids)\n\n #Disease Keywords\n disease_kw = []\n ext_data = bsf.find('div', class_='section', id='pathology_and_biotech')\n if(not (ext_data is None)):\n heads = ext_data.findAll('h4')\n for head in heads:\n data = head.findAll(text=True)\n if 'Keywords - Disease' in data:\n j = data.index('Keywords - Disease')\n val = data[j].parent.parent\n cells = val.next_sibling\n vals = cells.findAll('a')\n val = [v.find(text=True) for v in vals]\n v = list(filter(lambda x : x != ', ', vals))\n val = []\n for kw in v:\n kws = str(kw)\n if kws[:18]=='<a href=\"/keywords':\n val.append(kw.find(text=True))\n disease_kw = val\n break\n\n #Technical Terms - Keywords\n tech_term_kw = []\n ext_data = bsf.find('div', class_='section', id='miscellaneous')\n if(not (ext_data is None)):\n heads = ext_data.findAll('h4')\n for head in heads:\n data = head.findAll(text=True)\n if 'Keywords - Technical term' in data:\n j = data.index('Keywords - Technical term')\n val = data[j].parent.parent\n cells = val.next_sibling\n vals = cells.findAll('a')\n val = [v.find(text=True) for v in vals]\n v = list(filter(lambda x : x != ', ', vals))\n val = []\n for kw in v:\n kws = str(kw)\n if kws[:18]=='<a href=\"/keywords':\n val.append(kw.find(text=True))\n tech_term_kw = val\n break\n\n #Polymorphism\n polymorphism = \"\"\n ext_data = bsf.find('div', class_='section', id='sequences')\n if(not (ext_data is None)):\n heads = ext_data.findAll('h4')\n for head in heads:\n data = head.findAll(text=True)\n if 'Polymorphism' in data:\n j = data.index('Polymorphism')\n val = data[j].parent.parent\n polymorphism = val.next_sibling.find(text=True)\n break\n #write data to Protein_data CSV file\n print(str(pid))\n datawriter1.writerow([pid, plen, gid, protein_name, display_list(organism_name), display_list(tax_id),\n display_list(molecular_function_go), display_list(molecular_function_kw),\n display_list(biological_process_go), display_list(biological_process_kw),\n display_list(cellular_component_go), display_list(cellular_component_kw),\n display_list(disease_omim_id), display_list(disease_kw),\n display_list(tech_term_kw), polymorphism]) \n except:\n pass\n\ndf1 = pd.read_csv(output_file)\ndf1['Protein Name'] = df1['Protein Name'].map(lambda x: x.replace('<strong>', '').replace('</strong>', ''))\nfor column in ['Molecular Function-GO Annot', 'Molecular Function-Keyword', 'Biological processes-GO Annot', 'Biological processes-Keywords', 'Cellular Component-Go Annot', 'Cellular Component-Keywords', 'Disease-OMIM ID', 'Disease-Keywords', 'Technical Terms-Keywords', 'Polymorphism']:\n df1[column] = df1[column].astype(str)\n df1[column] = df1[column].str.rsplit(';\\n')\n df1[column] = df1[column].str.join(', ')\ndf1.to_csv(output_file, index=False)\n\nprint('SCRAPING OF ' + code.upper() + ' FROM ' + input_file.upper() + ' COMPLETED')\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
kimballh/ShapeShifter | [
"a9897cabec700726629466eea0159e75ba68ba91",
"a9897cabec700726629466eea0159e75ba68ba91"
] | [
"ShapeShifter/StataFile.py",
"ShapeShifter/JSONFile.py"
] | [
"import os\nimport tempfile\n\nimport pandas as pd\n\nfrom SSFile import SSFile\n\n\nclass StataFile(SSFile):\n\n def read_input_to_pandas(self, columnList=[], indexCol=\"Sample\"):\n if self.isGzipped:\n tempFile = super()._gunzip_to_temp_file()\n if len(columnList)>0:\n df=pd.read_stata(tempFile.name, columns=columnList)\n else:\n df=pd.read_stata(tempFile.name)\n os.remove(tempFile.name)\n return df\n if len(columnList) > 0:\n return pd.read_stata(self.filePath, columns=columnList)\n return pd.read_stata(self.filePath)\n\n def export_filter_results(self, inputSSFile, column_list=[], query=None, transpose=False, include_all_columns=False,\n gzip_results=False, index_col=\"Sample\"):\n df = None\n includeIndex = False\n null = 'NA'\n query, inputSSFile, df, includeIndex = super()._prep_for_export(inputSSFile, column_list, query, transpose,\n include_all_columns, df, includeIndex, index_col)\n # if not transpose:\n # df = df.set_index(indexCol) if indexCol in df.columns else df\n\n self.write_to_file(df, gzip_results)\n\n def write_to_file(self, df, gzipResults=False, includeIndex=False, null='NA', indexCol=\"Sample\", transpose=False):\n # Sometimes stata interprets columns as 'object' type which is no good (sometimes). This code may fix it?\n # However, as a result, boolean values are now converted to 1s and 0s\n type_pref = [int, float, str]\n for colname in list(df.select_dtypes(include=['object']).columns):\n for t in type_pref:\n try:\n df[colname] = df[colname].astype(t)\n print(\"Warning: True/False values may have been converted to 1/0 in output\")\n except (ValueError, TypeError) as e:\n pass\n\n df = df.set_index(indexCol) if indexCol in df.columns else df.set_index(df.columns[0])\n if gzipResults:\n #write to temp file\n tempFile = tempfile.NamedTemporaryFile(delete=False)\n df.to_stata(tempFile.name, write_index=True)\n tempFile.close()\n super()._gzip_results(tempFile.name, self.filePath)\n else:\n df.to_stata(self.filePath, write_index=True)\n",
"import pandas as pd\n\nfrom SSFile import SSFile\n\n\nclass JSONFile(SSFile):\n\n def read_input_to_pandas(self, columnList=[], indexCol=\"Sample\"):\n df = pd.read_json(self.filePath)\n df = df.reset_index()\n #todo: name the index column \"Sample\" instead of \"index\" and give a warning indicating that happened\n columns=columnList.copy()\n if len(columns) > 0:\n columns[columns.index(indexCol)] = 'index'\n df = df[columns]\n df.rename(columns={'index':'Sample'}, inplace=True)\n return df\n\n def export_filter_results(self, inputSSFile, column_list=[], query=None, transpose=False, include_all_columns=False,\n gzip_results=False, index_col=\"Sample\"):\n df = None\n includeIndex = False\n null = 'NA'\n query, inputSSFile, df, includeIndex = super()._prep_for_export(inputSSFile, column_list, query, transpose,\n include_all_columns, df, includeIndex, index_col)\n\n self.write_to_file(df, gzip_results)\n\n def write_to_file(self, df, gzipResults=False, includeIndex=False, null='NA', indexCol=\"Sample\", transpose=False):\n if not transpose:\n df = df.set_index(indexCol, drop=True) if indexCol in df.columns else df.set_index(df.columns[0], drop=True)\n if gzipResults:\n outFilePath = super()._append_gz(self.filePath)\n df.to_json(path_or_buf=outFilePath, compression='gzip')\n else:\n df.to_json(path_or_buf=self.filePath)"
] | [
[
"pandas.read_stata"
],
[
"pandas.read_json"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
volcan01010/tephrange | [
"8268d2ff5e5d9d12fbd79ad2fb30acf28cbd7b1b"
] | [
"tephrange/atmos.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Functions for calculating the properties of a standard atmosphere at\na given altitude.\"\"\"\n\nimport numpy as np\n\n# Define physics parameters (in SI units):\nG = 9.80665 # Acceleration due to gravity.\nATM_GAS_CONSTANT = 287.05 # Specific gas constant for dry air.\nATM_DENSITY = 1.2250 # Atmospheric density at sea level\nATM_VISCOSITY = 1.7915e-5 # Atmospheric viscosity at sea level\n\n\ndef celcius(temperature):\n \"\"\"Converts a temperature from degrees Kelvin to degrees Celcius.\"\"\"\n return temperature - 273.15\n\n\ndef get_viscosity(altitude):\n \"\"\"Calculates the dynamic viscosity of the atmosphere at a\n given altitude (m) using the ICAO standard atmosphere.\"\"\"\n temp = get_atmos_temp_press(altitude)[0]\n\n # Dynamic viscosity calculation from NAME Physics.f90\n if temp > 273.15:\n viscosity = (1.718 + 0.0049*(temp - 273.15)) * 1e-5\n else:\n viscosity = (1.718 + 0.0049*(temp - 273.15) -\n 1.2e-5*(temp-273.15)**2) * 1e-5\n\n return viscosity\n\n\ndef get_density(altitude):\n \"\"\"Calculates the density of the atmosphere at a given altitude (m)\n using the ICAO standard atmosphere.\"\"\"\n temp, pressure = get_atmos_temp_press(altitude)\n atm_density = pressure / (ATM_GAS_CONSTANT * temp)\n return atm_density\n\n\ndef get_atmos_temp_press(altitude):\n \"\"\"Calculates temperature and pressure of the atmosphere at a given\n altitude (m) using the ICAO standard atmosphere.\"\"\"\n # Define internal constants\n temp0km = 288.15 # Temperature at 0km above mean sea level (K).\n temp11km = 216.65 # Temperature at 11km above mean sea level (K).\n temp20km = 216.65 # Temperature at 20km above mean sea level (K).\n lapse_rate_below_11km = 0.0065 # Lapse rate from 0 to 11km above mean sea level.\n lapse_rate_above_20km = -0.001 # Lapse rate at more than 20km above mean sea level.\n pressure0km = 101325 # Pressure at mean sea level (Pa).\n\n # Calculate anchor pressure levels\n pressure11km = pressure0km * \\\n (1 - lapse_rate_below_11km*11000 / temp0km) ** \\\n (G / (ATM_GAS_CONSTANT*lapse_rate_below_11km))\n pressure20km = pressure11km * np.exp(-G * 9000 /\n (ATM_GAS_CONSTANT*temp11km))\n\n # Interpolate between levels\n if altitude < 11000:\n pressure = pressure0km * \\\n (1 - lapse_rate_below_11km * altitude / temp0km) ** \\\n (G / (ATM_GAS_CONSTANT*lapse_rate_below_11km))\n temp = temp0km - lapse_rate_below_11km * altitude\n elif altitude < 20000:\n pressure = pressure11km * np.exp(-G * (altitude - 11000) /\n (ATM_GAS_CONSTANT*temp11km))\n temp = temp11km\n else:\n pressure = pressure20km * \\\n (1 - lapse_rate_above_20km * \\\n (altitude - 20000) / temp20km) ** \\\n (G / (ATM_GAS_CONSTANT*lapse_rate_above_20km)\n )\n temp = temp20km - lapse_rate_above_20km * (altitude - 20000)\n\n return temp, pressure\n"
] | [
[
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
binwang-intel/ZeroMQ_test01 | [
"407b28d1d9a5b27984f60b74b65af653328fef56"
] | [
"publisher.py"
] | [
"\"\"\"A test that publishes NumPy arrays.\nUses REQ/REP (on PUB/SUB socket + 1) to synchronize\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2010 Brian Granger\n#\n# Distributed under the terms of the New BSD License. The full license is in\n# the file COPYING.BSD, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\nimport sys\nimport time\n\nimport zmq\nimport numpy\n\ndef sync(bind_to):\n # use bind socket + 1\n sync_with = ':'.join(bind_to.split(':')[:-1] +\n [str(int(bind_to.split(':')[-1]) + 1)])\n ctx = zmq.Context.instance()\n \n s = ctx.socket(zmq.REP)\n s.bind(sync_with)\n print(\"Waiting for subscriber to connect...\")\n s.recv()\n print(\" Done.\")\n s.send('GO')\n\ndef main():\n if len (sys.argv) != 4:\n print('usage: publisher <bind-to> <array-size> <array-count>')\n sys.exit (1)\n\n try:\n bind_to = sys.argv[1]\n array_size = int(sys.argv[2])\n array_count = int (sys.argv[3])\n except (ValueError, OverflowError) as e:\n print('array-size and array-count must be integers')\n sys.exit (1)\n\n ctx = zmq.Context()\n s = ctx.socket(zmq.PUB)\n s.bind(bind_to)\n\n sync(bind_to)\n\n print(\"Sending arrays...\")\n for i in range(array_count):\n a = numpy.random.rand(array_size, array_size)\n s.send_pyobj(a)\n print(\" Done.\")\n\nif __name__ == \"__main__\":\n main()"
] | [
[
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
numerology/tfx | [
"4d418c70a030e444cc8b97c6fbd52ee4c72d6eba"
] | [
"tfx/orchestration/airflow/airflow_component_test.py"
] | [
"# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.orchestration.airflow.airflow_component.\"\"\"\n\nimport collections\nimport datetime\nimport functools\nimport os\nfrom unittest import mock\n\nfrom airflow import models\n\nimport tensorflow as tf\nfrom tfx import types\nfrom tfx.dsl.components.base import base_component\nfrom tfx.dsl.components.base import base_executor\nfrom tfx.dsl.components.base import executor_spec\nfrom tfx.orchestration import data_types\nfrom tfx.orchestration import metadata\nfrom tfx.orchestration.airflow import airflow_component\nfrom tfx.types import component_spec\n\n\nclass _ArtifactTypeA(types.Artifact):\n TYPE_NAME = 'ArtifactTypeA'\n\n\nclass _ArtifactTypeB(types.Artifact):\n TYPE_NAME = 'ArtifactTypeB'\n\n\nclass _FakeComponentSpec(types.ComponentSpec):\n PARAMETERS = {}\n INPUTS = {\n 'input': component_spec.ChannelParameter(type=_ArtifactTypeA),\n }\n OUTPUTS = {'output': component_spec.ChannelParameter(type=_ArtifactTypeB)}\n\n\nclass _FakeComponent(base_component.BaseComponent):\n\n SPEC_CLASS = types.ComponentSpec\n EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(base_executor.BaseExecutor)\n\n def __init__(self, spec: types.ComponentSpec):\n super(_FakeComponent, self).__init__(spec=spec)\n\n\nclass AirflowComponentTest(tf.test.TestCase):\n\n def setUp(self):\n super(AirflowComponentTest, self).setUp()\n self._component = _FakeComponent(\n _FakeComponentSpec(\n input=types.Channel(type=_ArtifactTypeA),\n output=types.Channel(type=_ArtifactTypeB)))\n self._pipeline_info = data_types.PipelineInfo('name', 'root')\n self._driver_args = data_types.DriverArgs(True)\n self._metadata_connection_config = metadata.sqlite_metadata_connection_config(\n os.path.join(\n os.environ.get('TEST_TMP_DIR', self.get_temp_dir()), 'metadata'))\n self._parent_dag = models.DAG(\n dag_id=self._pipeline_info.pipeline_name,\n start_date=datetime.datetime(2018, 1, 1),\n schedule_interval=None)\n\n def testAirflowAdaptor(self):\n fake_dagrun = collections.namedtuple('fake_dagrun', ['run_id'])\n mock_ti = mock.Mock()\n mock_ti.get_dagrun.return_value = fake_dagrun('run_id')\n mock_component_launcher = mock.Mock()\n mock_component_launcher_class = mock.Mock()\n mock_component_launcher_class.create.return_value = mock_component_launcher\n airflow_component._airflow_component_launcher(\n component=self._component,\n component_launcher_class=mock_component_launcher_class,\n pipeline_info=self._pipeline_info,\n driver_args=self._driver_args,\n metadata_connection_config=self._metadata_connection_config,\n beam_pipeline_args=[],\n additional_pipeline_args={},\n component_config=None,\n ti=mock_ti)\n mock_component_launcher_class.create.assert_called_once()\n arg_list = mock_component_launcher_class.create.call_args_list\n self.assertEqual(arg_list[0][1]['pipeline_info'].run_id, 'run_id')\n mock_component_launcher.launch.assert_called_once()\n\n @mock.patch.object(functools, 'partial', wraps=functools.partial)\n def testAirflowComponent(self, mock_functools_partial):\n mock_component_launcher_class = mock.Mock()\n airflow_component.AirflowComponent(\n parent_dag=self._parent_dag,\n component=self._component,\n component_launcher_class=mock_component_launcher_class,\n pipeline_info=self._pipeline_info,\n enable_cache=True,\n metadata_connection_config=self._metadata_connection_config,\n beam_pipeline_args=[],\n additional_pipeline_args={},\n component_config=None)\n # Airflow complained if we completely mock this function. So we \"wraps\" the\n # function. `partial` can be called multiple times from other than\n # AirflowComponent. We will check the first call only.\n mock_functools_partial.assert_called()\n args = mock_functools_partial.call_args_list[0][0]\n kwargs = mock_functools_partial.call_args_list[0][1]\n self.assertCountEqual(args,\n (airflow_component._airflow_component_launcher,))\n self.assertTrue(kwargs.pop('driver_args').enable_cache)\n self.assertEqual(\n kwargs, {\n 'component': self._component,\n 'component_launcher_class': mock_component_launcher_class,\n 'pipeline_info': self._pipeline_info,\n 'metadata_connection_config': self._metadata_connection_config,\n 'beam_pipeline_args': [],\n 'additional_pipeline_args': {},\n 'component_config': None\n })\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.test.main"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chrisbc/ffe-spark | [
"bda7f4b4a857f432a74a334afd7fa0ea5cc6714f"
] | [
"Fire_network_one_file_run.py"
] | [
"import datetime\nimport glob\nimport math\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport geopandas as gpd\nfrom shapely.geometry import box\nimport networkx as nx\nfrom shapely.geometry import Point\nimport imageio\n\npd.options.mode.chained_assignment = None # default='warn'\n\npath = \"G:/Sync/FFE/Mesa\"\npath_output = \"G:\\Sync\\FFE\\FireNetwork\"\n\n\n# path = '/Users/alex/Google Drive/05_Sync/FFE/Mesa'\n# path_output = '/Users/alex/Google Drive/05_Sync/FFE/Mesa/output'\n\n\n# path = '/Users/alex/Google Drive/05_Sync/FFE/Mesa'\n\ndef load_data(file_name, minx, miny, maxx, maxy):\n # crop data\n bbox = box(minx, miny, maxx, maxy)\n # building point dataset\n gdf_buildings = gpd.read_file(os.path.join(path, file_name), bbox=bbox)\n # gdf_buildings.IgnProb_bl = 0.02\n # xmin,ymin,xmax,ymax = gdf_buildings.total_bounds\n return gdf_buildings\n\n\ndef wind_scenario():\n wind_data = pd.read_csv(os.path.join(path, 'GD_wind.csv'))\n i = np.random.randint(0, wind_data.shape[0])\n w = wind_data.iloc[i, 2]\n d = wind_data.iloc[i, 1]\n b = wind_data.iloc[i, 3]\n return w, d, b\n\n\ndef eudistance(v1, v2):\n return np.linalg.norm(v1 - v2)\n\n\ndef calculate_azimuth(x1, y1, x2, y2):\n azimuth = math.degrees(math.atan2((x2 - x1), (y2 - y1)))\n return 360 + azimuth\n\n\ndef plot(df, column_df):\n fig, ax = plt.subplots(1, 1)\n df.plot(column=column_df, ax=ax, legend=True)\n plt.show()\n\n\ndef build_edge_list(geodataframe, maximum_distance, polygon_file):\n # create arrays for different id combination\n n = np.arange(0, len(geodataframe))\n target = [n] * len(geodataframe)\n target = np.hstack(target)\n source = np.repeat(n, len(geodataframe))\n # put arrays in dataframe\n df = pd.DataFrame()\n df['source_id'] = source\n df['target_id'] = target\n # merge source attributes with source index\n geo_df = geodataframe.copy()\n geo_df['id'] = geo_df.index\n # create source / target gdf from gdf.columns of interest\n geo_df = geo_df[['id', 'TARGET_FID', 'X', 'Y', 'geometry', 'IgnProb_bl']]\n geo_df_TRG = geo_df.copy()\n geo_df_TRG.columns = ['target_' + str(col) for col in geo_df_TRG.columns]\n geo_df_SRC = geo_df.copy()\n geo_df_SRC.columns = ['source_' + str(col) for col in geo_df_SRC.columns]\n # merge data\n merged_data = pd.merge(df, geo_df_SRC, left_on='source_id', right_on='source_id', how='outer')\n merged_data = pd.merge(merged_data, geo_df_TRG, left_on='target_id', right_on='target_id', how='outer')\n merged_data.rename(columns={'source_id': 'source', 'target_id': 'target'}, inplace=True)\n # calculate distance for each source / target pair\n # create a df from polygon shape to get accurate distance\n # print(list(polygon_file))\n polygon = polygon_file[['TARGET_FID', 'geometry']]\n # print(list(polygon))\n source_poly = merged_data[['source_TARGET_FID']]\n target_poly = merged_data[['target_TARGET_FID']]\n # print(list(source_poly))\n src_poly = pd.merge(source_poly, polygon, left_on='source_TARGET_FID', right_on='TARGET_FID', how='left')\n trg_poly = pd.merge(target_poly, polygon, left_on='target_TARGET_FID', right_on='TARGET_FID', how='left')\n src_poly_gdf = gpd.GeoDataFrame(src_poly, geometry='geometry')\n trg_poly_gdf = gpd.GeoDataFrame(trg_poly, geometry='geometry')\n distance_series = src_poly_gdf.distance(trg_poly_gdf)\n # print(distance_series)\n\n # insert distance in merged data column\n merged_data['v1'] = merged_data.source_X - merged_data.target_X\n merged_data['v2'] = merged_data.source_Y - merged_data.target_Y\n # merged_data['euc_distance'] = np.hypot(merged_data.v1, merged_data.v2)\n merged_data['euc_distance'] = distance_series\n # remove when distance \"illegal\"\n valid_distance = merged_data['euc_distance'] < maximum_distance\n not_same_node = merged_data['euc_distance'] != 0\n data = merged_data[valid_distance & not_same_node]\n # calculate azimuth\n data['azimuth'] = np.degrees(np.arctan2(merged_data['v2'], merged_data['v1']))\n data['bearing'] = (data.azimuth + 360) % 360\n return data\n\n\ndef create_network(edge_list_dataframe):\n graph = nx.from_pandas_edgelist(edge_list_dataframe, edge_attr=True)\n # options = {'node_color': 'red', 'node_size': 50, 'width': 1, 'alpha': 0.4,\n # 'with_labels': False, 'font_weight': 'bold'}\n # nx.draw_kamada_kawai(graph, **options)\n # plt.show()\n return graph\n\n\n# run model\ndef set_initial_fire_to(df):\n \"\"\"Fine = 0, Fire = 1, Burned = 2\"\"\"\n df['RNG'] = np.random.uniform(0, 1, size=len(df)) # add for random suppression per building, df.shape[0])\n onFire = df['source_IgnProb_bl'] > df['RNG']\n ignitions = df[onFire]\n # source nodes ignited\n sources_on_fire = list(ignitions.source)\n sources_on_fire = list(dict.fromkeys(sources_on_fire))\n return sources_on_fire\n\n\ndef set_fire_to(df, existing_fires):\n are_set_on_fire = (df['source'].isin(existing_fires))\n spark = df[are_set_on_fire]\n # source nodes ignited\n sources_on_fire = list(spark.source)\n sources_on_fire = list(dict.fromkeys(sources_on_fire))\n return sources_on_fire\n\n\ndef fire_spreading(list_fires, list_burn, wind_speed, wind_bearing, suppression_threshold, step_value, data):\n # check the fire potential targets\n # print(\"fire list before spreading : {}, length : {}\".format(fire_list, len(fire_list)))\n are_potential_targets = (data['source'].isin(list_fires))\n are_not_already_burned = (~data['target'].isin(list_burn))\n df = data[are_potential_targets & are_not_already_burned]\n if df.empty:\n # print(\"no fires\")\n list_burn.extend(list(list_fires))\n list_burn = list(dict.fromkeys(list_burn))\n return [], list_burn # to break the step loop\n # set up additional CONDITIONS for fire spreading\n\n # neighbors selection from buffer\n df['buffer_geometry'] = gdf.geometry.buffer(gdf['d_long'] + wind_speed)\n\n are_neighbors = df['euc_distance'] < wind_speed\n # print(\"neighbors affected ? {}\".format(list(dict.fromkeys(list(are_neighbors)))))\n df = df[are_neighbors]\n # wind direction\n wind_bearing_max = wind_bearing + 45\n wind_bearing_min = wind_bearing - 45\n if wind_bearing == 360:\n wind_bearing_max = 45\n if wind_bearing <= 0: # should not be necessary\n wind_bearing_min = 0\n if wind_bearing == 999:\n wind_bearing_max = 999\n wind_bearing_min = 0\n are_under_the_wind = (df['bearing'] < wind_bearing_max) & (df['bearing'] > wind_bearing_min)\n # print(\"targets under the wind ? {}\".format(list(dict.fromkeys(list(are_under_the_wind)))))\n df = df[are_under_the_wind]\n # suppression\n df['random'] = np.random.uniform(0, 1, size=len(df))\n are_not_suppressed = df['random'] > suppression_threshold\n # print(\"fire suppressed ? {}\".format(list(dict.fromkeys(list(are_not_suppressed)))))\n df = df[are_not_suppressed]\n\n # spread fire based on condition\n fire_df = df\n # fire_df = df[are_neighbors & are_under_the_wind & are_not_suppressed] # issues with \"are_under_the_wind\n # print(len(fire_df.head(5)))\n # print(len(fire_df))\n list_burn.extend(list(list_fires))\n fire_df['step'] = step_value\n fire_df.to_csv(os.path.join(path_output, \"step{}_fire.csv\".format(step_value)))\n list_fires = list(dict.fromkeys(list(fire_df.target)))\n list_burn.extend(list(fire_df.target))\n list_burn = list(dict.fromkeys(list_burn))\n return list_fires, list_burn\n\n\ndef log_files_concatenate(prefix, scenario_count):\n list_df = []\n files = glob.glob(os.path.join(path_output, prefix))\n if files:\n for file in files:\n # print(file)\n df = pd.read_csv(os.path.join(path_output, file))\n list_df.append(df)\n os.remove(file)\n data = pd.concat(list_df)\n data['scenario'] = scenario_count\n data.to_csv(os.path.join(path_output, \"fire_scenario_{}.csv\".format(scenario_count)))\n else:\n print(\"no files to concatenate\")\n\n\ndef clean_up_file(prefix, path_path=path_output):\n files = glob.glob(os.path.join(path_path, prefix))\n for file in files:\n # print(file)\n os.remove(file)\n\n\ndef postprocessing(scenarios_recorded, burned_asset, edge_list, gdf_polygons):\n list_of_tuples = list(zip(scenarios_recorded, burned_asset))\n df = pd.DataFrame(list_of_tuples, columns=['scenarios', 'burned_asset_index'])\n # df['count'] = df['burned_asset_index'].value_counts().values\n df['count'] = df.groupby('burned_asset_index')['burned_asset_index'].transform('count')\n print(df.describe())\n df = df[['burned_asset_index', 'count']].drop_duplicates()\n edge = edge_list[\n ['source', 'source_TARGET_FID', 'source_X', 'source_Y', 'source_geometry']]\n df_id = pd.merge(df, edge, left_on='burned_asset_index', right_on='source', how='left')\n # print(list(df_id))\n df_count = pd.merge(gdf_polygons, df_id, left_on='TARGET_FID', right_on='source_TARGET_FID', how='outer')\n df_count = df_count.drop_duplicates()\n dataframe = pd.DataFrame(df_count.drop(columns=['geometry', 'source_geometry']))\n dataframe = dataframe.dropna()\n fig, ax = plt.subplots(1, 1)\n df_count.plot(column='count', cmap='RdYlBu_r', ax=ax, legend=True)\n ax.title.set_text(\"Burned buildings after {} scenarios\".format(max(scenarios_recorded)))\n plt.show()\n df_count = df_count.drop(columns=['source', 'source_TARGET_FID', 'source_X', 'source_Y', 'source_geometry'])\n df_count.to_csv(os.path.join(path_output, \"results.csv\"))\n # df_count.to_file(os.path.join(path_output, \"results.shp\"))\n return df_count, dataframe\n\n\n# set up & load input data\n# gdf = load_data(\"buildings_raw_pts.shp\", 1748570, 5426959, 1748841, 5427115)\ngdf_polygon = load_data(\"buildings_raw.shp\", 1748000, 5424148, 1750000, 5427600)\ngdf_polygon[\"area\"] = gdf_polygon['geometry'].area # m2\ngdf = gdf_polygon.copy()\ngdf['geometry'] = gdf['geometry'].centroid\ngdf['X'] = gdf.centroid.x\ngdf['Y'] = gdf.centroid.y\ngdf['d_short'] = gdf_polygon.exterior.distance(gdf)\ngdf['d_long'] = gdf['area'] / gdf['d_short']\n\n# create edge list and network\nedges = build_edge_list(gdf, 45, gdf_polygon)\n\n# create edges\nG = create_network(edges)\n\n\n#################################\n# set number of scenarios\nnumber_of_scenarios = 10\n# display of the input data\nprint(\"{} assets loaded\".format(len(gdf)))\nfig, ax = plt.subplots(2, 2)\n# gdf.plot(column='area', cmap='hsv', ax=ax[0, 0], legend=True)\ngdf_polygon.plot(column='area', cmap='hsv', ax=ax[0, 0], legend=True)\n# gdf.plot(column='TARGET_FID', cmap='hsv', ax=ax[1, 0], legend=True)\noptions = {'node_color': 'red', 'node_size': 50, 'width': 1, 'alpha': 0.4,\n 'with_labels': False, 'font_weight': 'bold'}\nnx.draw_kamada_kawai(G, **options, ax=ax[1, 1])\nax[0,0].title.set_text(\"area\")\nax[0,1].title.set_text(\"area\")\nax[1,0].title.set_text('FID')\nax[1,1].title.set_text('Network display')\nplt.tight_layout()\nplt.savefig(os.path.join(path_output, \"inputs_{}.png\".format(number_of_scenarios)))\nplt.show()\nplt.close(fig)\n################################\n\n\n# run model\nclean_up_file(\"*csv\")\nscenarios_list = []\nlog_burned = [] # no removing duplicate\n# --- SCENARIOS\nt = datetime.datetime.now()\nfor scenario in range(number_of_scenarios):\n t0 = datetime.datetime.now()\n burn_list = []\n print(\"--- SCENARIO : {}\".format(scenario))\n # print(\"initiate fire\")\n fire_list = set_initial_fire_to(edges)\n x = fire_list\n # print(\"fire list : {}, length : {}\".format(fire_list, len(fire_list)))\n # print(\"fires list in scenario loop: {}, length : {}\".format(fire_list, len(fire_list)))\n if len(fire_list) == 0:\n print(\"no fire\")\n continue\n w_direction, w_speed, w_bearing = wind_scenario()\n # print((\"critical distance : {}, wind bearing : {}\".format(w_speed, w_bearing)))\n # --------- STEPS\n for step in range(len(edges)):\n print(\"--------- STEP : {}\".format(step))\n fire_list = set_fire_to(edges, fire_list)\n y = fire_list\n # print(\"fire datasets are identical with initial fire : {}\".format(set(x) == set(y)))\n # print(\"fire list : {}, length : {}\".format(fire_list, len(fire_list)))\n # print(\"burn list : {}, length : {}\".format(burn_list, len(burn_list)))\n # print(\"spread fire\")\n fire_list, burn_list = fire_spreading(fire_list, burn_list, w_speed, w_bearing, 0, step, edges)\n if len(fire_list) == 0:\n # print(\"no fires\")\n break\n # print(\"fires list : {}, length : {}\".format(fire_list, len(fire_list)))\n # print(\"burn list : {}, length : {}\".format(burn_list, len(burn_list)))\n log_burned.extend(burn_list)\n scenarios_list.extend([scenario] * len(burn_list))\n # print(\"log all burn list : {}, length : {}\".format(log_burned, len(log_burned)))\n # print(scenarios_list)\n\n log_files_concatenate('step*', scenario)\n t1 = datetime.datetime.now()\n print(\"..... took : {}\".format(t1 - t0))\nt2 = datetime.datetime.now()\nprint(\"total time : {}\".format(t2 - t))\n\ncount_gdf, count_df = postprocessing(scenarios_list, log_burned, edges, gdf_polygon)"
] | [
[
"numpy.hstack",
"pandas.merge",
"matplotlib.pyplot.tight_layout",
"pandas.concat",
"matplotlib.pyplot.subplots",
"numpy.linalg.norm",
"pandas.DataFrame",
"numpy.arctan2",
"matplotlib.pyplot.close",
"matplotlib.pyplot.show",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
milmor/GPT | [
"e88164eb890c939ea97f83ac1e7792110939b466"
] | [
"train.py"
] | [
"'''\nAuthor: Emilio Morales ([email protected])\n Mar 2022\n'''\nimport argparse\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Disable tensorflow debugging logs\nimport time\nimport tensorflow as tf\nimport tensorflow_text as text\nfrom model import GPT\nfrom utils import *\nfrom hparams import hparams\n\nAUTOTUNE = tf.data.experimental.AUTOTUNE\n\n\ndef load_file(filename):\n raw_text = tf.io.read_file(filename)\n return raw_text\n\n\ndef preprocess(raw_text, maxlen, vocab_file):\n tokenizer = text.BertTokenizer(vocab_file)\n tokenized_text = tokenizer.tokenize(raw_text).merge_dims(1, -1) \n trimmer = text.RoundRobinTrimmer(max_seq_length=maxlen + 1)\n trimmed_feat = trimmer.trim([tokenized_text])\n input_word_ids, _ = text.pad_model_inputs(input=trimmed_feat[0], max_seq_length=maxlen + 1)\n x = input_word_ids[:, :-1]\n y = input_word_ids[:, 1:]\n return x, y\n\n\ndef create_ds(file_pattern, batch_size, maxlen, vocab_file):\n text_paths = tf.data.Dataset.list_files(file_pattern)\n BUFFER_SIZE = tf.data.experimental.cardinality(text_paths)\n print(f'Train dataset size: {BUFFER_SIZE}')\n text_paths = text_paths.cache().shuffle(BUFFER_SIZE)\n\n dataset = text_paths.map(load_file, \n num_parallel_calls=AUTOTUNE).batch(batch_size, num_parallel_calls=AUTOTUNE)\n dataset = dataset.map(lambda filename: preprocess(filename, maxlen, vocab_file), \n num_parallel_calls=AUTOTUNE).prefetch(AUTOTUNE)\n return dataset\n\n\ndef train(args):\n print('\\n#########')\n print('GPT Train')\n print('#########\\n')\n file_pattern = args.file_pattern\n model_dir = args.model_dir\n vocab_file = args.vocab_file\n build_vocab = args.build_vocab\n epochs = args.epochs\n ckpt_interval = args.ckpt_interval\n max_ckpt_to_keep = args.max_ckpt_to_keep\n context = args.context\n\n model = GPT(vocab_size=hparams['vocab_size'], \n maxlen=hparams['maxlen'], emb_dim=hparams['emb_dim'],\n heads=hparams['heads'], mlp_dim=hparams['mlp_dim'],\n depth=hparams['depth'], rate=hparams['rate'], \n initializer=hparams['initializer'])\n\n if hparams['decay_lr']:\n lr = tf.keras.optimizers.schedules.CosineDecay(hparams['learning_rate'], \n hparams['decay_steps'])\n else:\n lr = hparams['learning_rate']\n\n optimizer = tf.keras.optimizers.Adam(lr, \n beta_1=hparams['beta_1'], \n beta_2=hparams['beta_2'])\n\n loss_function = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n\n if build_vocab:\n build_vocabulary(file_pattern, hparams['vocab_size'], vocab_file)\n print(f'Build {vocab_file}')\n else:\n tokenizer = text.BertTokenizer(vocab_file)\n print(f'{vocab_file} loaded')\n\n dataset = create_ds(file_pattern, hparams['batch_size'], hparams['maxlen'], vocab_file)\n tokenizer = text.BertTokenizer(vocab_file)\n\n log_dir = os.path.join(model_dir, 'log-dir')\n writer = tf.summary.create_file_writer(log_dir)\n\n checkpoint_dir = os.path.join(model_dir, 'training-checkpoints')\n ckpt = tf.train.Checkpoint(optimizer=optimizer,\n model=model,\n epoch=tf.Variable(0))\n\n ckpt_manager = tf.train.CheckpointManager(ckpt, directory=checkpoint_dir, \n max_to_keep=max_ckpt_to_keep)\n\n if ckpt_manager.latest_checkpoint: \n ckpt.restore(ckpt_manager.latest_checkpoint)\n print(f'Checkpoint restored from {ckpt_manager.latest_checkpoint} at epoch {int(ckpt.epoch)}')\n ckpt.epoch.assign_add(1)\n start_epoch = int(ckpt.epoch)\n\n train_loss_avg = tf.keras.metrics.Mean(name='train_loss')\n\n @tf.function\n def train_step(inp, tar):\n with tf.GradientTape() as tape:\n predictions = model(inp, training=True)\n loss = loss_function(tar, predictions)\n gradients = tape.gradient(loss, model.trainable_variables)\n \n if hparams['clip_global_norm']:\n gradients, _ = tf.clip_by_global_norm(gradients, hparams['clip_norm'])\n \n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n train_loss_avg(loss)\n\n for epoch in range(start_epoch, epochs):\n start = time.time()\n for inp, tar in dataset:\n train_step(inp, tar)\n \n print(f'\\nTime taken for epoch {epoch} is {time.time() - start:.2f} secs')\n print(f'Loss: {train_loss_avg.result():.4f}')\n sample_text = sample(model, context, hparams['maxlen'], tokenizer)\n print(f'Sample text: \\n{sample_text}')\n \n with writer.as_default():\n tf.summary.scalar('train_loss', train_loss_avg.result(), step=epoch)\n \n train_loss_avg.reset_states()\n \n if epoch % ckpt_interval == 0:\n ckpt_manager.save(epoch)\n print(f'Checkpoint saved at epoch {epoch}\\n') \n\n ckpt.epoch.assign_add(1)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--file_pattern')\n parser.add_argument('--model_dir', default='model-1')\n parser.add_argument('--vocab_file', default='vocab.txt')\n parser.add_argument('--build_vocab', default=False)\n parser.add_argument('--epochs', type=int, default=10000) \n parser.add_argument('--ckpt_interval', type=int, default=5)\n parser.add_argument('--max_ckpt_to_keep', type=int, default=3) \n parser.add_argument('--context', default='Enter context here...') \n args = parser.parse_args()\n\n train(args)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"tensorflow.train.CheckpointManager",
"tensorflow.Variable",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.optimizers.schedules.CosineDecay",
"tensorflow.data.Dataset.list_files",
"tensorflow.keras.optimizers.Adam",
"tensorflow.GradientTape",
"tensorflow.clip_by_global_norm",
"tensorflow.io.read_file",
"tensorflow.data.experimental.cardinality",
"tensorflow.keras.metrics.Mean",
"tensorflow.summary.create_file_writer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
oeway/redner | [
"3bb5541ccd72d2497ac8e5f6bc09990618b317ec"
] | [
"pyredner/camera.py"
] | [
"import torch\nimport pyredner.transform as transform\nimport redner\nimport math\nimport pyredner\nfrom typing import Tuple, Optional, List\n\nclass Camera:\n \"\"\"\n Redner supports four types of cameras\\: perspective, orthographic, fisheye, and panorama.\n The camera takes a look at transform or a cam_to_world matrix to\n transform from camera local space to world space. It also can optionally\n take an intrinsic matrix that models field of view and camera skew.\n\n Args\n ====\n position: Optional[torch.Tensor]\n the origin of the camera, 1-d tensor with size 3 and type float32\n look_at: Optional[torch.Tensor]\n the point camera is looking at, 1-d tensor with size 3 and type float32\n up: Optional[torch.Tensor]\n the up vector of the camera, 1-d tensor with size 3 and type float32\n fov: Optional[torch.Tensor]\n the field of view of the camera in angle, \n no effect if the camera is a fisheye or panorama camera, \n 1-d tensor with size 1 and type float32\n clip_near: float\n the near clipping plane of the camera, need to > 0\n resolution: Tuple[int, int]\n the size of the output image in (height, width)\n viewport: Optional[Tuple[int, int, int, int]]\n optional viewport argument for rendering only a region of an image in\n (left_top_y, left_top_x, bottom_right_y, bottom_right_x),\n bottom_right is not inclusive.\n if set to None the viewport is the whole image (i.e., (0, 0, cam.height, cam.width))\n cam_to_world: Optional[torch.Tensor]\n overrides position, look_at, up vectors\n 4x4 matrix, optional\n intrinsic_mat: Optional[torch.Tensor]\n a matrix that transforms a point in camera space before the point\n is projected to 2D screen space\n used for modelling field of view and camera skewing\n after the multiplication the point should be in\n [-1, 1/aspect_ratio] x [1, -1/aspect_ratio] in homogeneous coordinates\n the projection is then carried by the specific camera types\n perspective camera normalizes the homogeneous coordinates\n while orthogonal camera drop the Z coordinate.\n ignored by fisheye or panorama cameras\n overrides fov\n 3x3 matrix, optional\n distortion_params: Optional[torch.Tensor]\n an array describing the coefficient of a Brown–Conrady lens distortion model.\n the array is expected to be 1D with size of 8. the first six coefficients describes\n the parameters of the rational polynomial for radial distortion (k1~k6) and\n the last two coefficients are for the tangential distortion (p1~p2).\n see https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\n for more details.\n camera_type: render.camera_type\n the type of the camera (perspective, orthographic, fisheye, or panorama)\n fisheye: bool\n whether the camera is a fisheye camera\n (legacy parameter just to ensure compatibility).\n \"\"\"\n def __init__(self,\n position: Optional[torch.Tensor] = None,\n look_at: Optional[torch.Tensor] = None,\n up: Optional[torch.Tensor] = None,\n fov: Optional[torch.Tensor] = None,\n clip_near: float = 1e-4,\n resolution: Tuple[int, int] = (256, 256),\n viewport: Optional[Tuple[int, int, int, int]] = None,\n cam_to_world: Optional[torch.Tensor] = None,\n intrinsic_mat: Optional[torch.Tensor] = None,\n distortion_params: Optional[torch.Tensor] = None,\n camera_type = pyredner.camera_type.perspective,\n fisheye: bool = False):\n if position is not None:\n assert(position.dtype == torch.float32)\n assert(len(position.shape) == 1 and position.shape[0] == 3)\n if look_at is not None:\n assert(look_at.dtype == torch.float32)\n assert(len(look_at.shape) == 1 and look_at.shape[0] == 3)\n if up is not None:\n assert(up.dtype == torch.float32)\n assert(len(up.shape) == 1 and up.shape[0] == 3)\n if fov is not None:\n assert(fov.dtype == torch.float32)\n assert(len(fov.shape) == 1 and fov.shape[0] == 1)\n if cam_to_world is not None:\n assert(cam_to_world.dtype == torch.float32)\n assert(len(cam_to_world.shape) == 2 and cam_to_world.shape[0] == 4 and cam_to_world.shape[1] == 4)\n if intrinsic_mat is not None:\n assert(intrinsic_mat.dtype == torch.float32)\n assert(len(intrinsic_mat.shape) == 2 and intrinsic_mat.shape[0] == 3 and intrinsic_mat.shape[1] == 3)\n assert(isinstance(clip_near, float))\n if position is None and look_at is None and up is None:\n assert(cam_to_world is not None)\n\n self.position = position\n self.look_at = look_at\n self.up = up\n self._fov = fov\n self._cam_to_world = cam_to_world\n if cam_to_world is not None:\n self.world_to_cam = torch.inverse(self.cam_to_world).contiguous()\n else:\n self.world_to_cam = None\n if intrinsic_mat is None:\n if camera_type == redner.CameraType.perspective:\n fov_factor = 1.0 / torch.tan(transform.radians(0.5 * fov))\n o = torch.ones([1], dtype=torch.float32, device = fov_factor.device)\n diag = torch.cat([fov_factor, fov_factor, o], 0)\n self._intrinsic_mat = torch.diag(diag).contiguous()\n else:\n self._intrinsic_mat = torch.eye(3, dtype=torch.float32)\n else:\n self._intrinsic_mat = intrinsic_mat\n self.intrinsic_mat_inv = torch.inverse(self.intrinsic_mat).contiguous()\n self.distortion_params = distortion_params\n self.clip_near = clip_near\n self.resolution = resolution\n self.viewport = viewport\n self.camera_type = camera_type\n if fisheye:\n self.camera_type = pyredner.camera_type.fisheye\n\n @property\n def fov(self):\n return self._fov\n\n @fov.setter\n def fov(self, value):\n self._fov = value\n fov_factor = 1.0 / torch.tan(transform.radians(0.5 * self._fov))\n o = torch.ones([1], dtype=torch.float32, device = fov_factor.device)\n diag = torch.cat([fov_factor, fov_factor, o], 0)\n self._intrinsic_mat = torch.diag(diag).contiguous()\n self.intrinsic_mat_inv = torch.inverse(self._intrinsic_mat).contiguous()\n\n @property\n def intrinsic_mat(self):\n return self._intrinsic_mat\n\n @intrinsic_mat.setter\n def intrinsic_mat(self, value):\n if value is not None:\n self._intrinsic_mat = value\n self.intrinsic_mat_inv = torch.inverse(self._intrinsic_mat).contiguous()\n else:\n assert(self.fov is not None)\n self.fov = self._fov\n\n @property\n def cam_to_world(self):\n return self._cam_to_world\n\n @cam_to_world.setter\n def cam_to_world(self, value):\n if value is not None:\n self._cam_to_world = value\n self.world_to_cam = torch.inverse(self.cam_to_world).contiguous()\n else:\n self._cam_to_world = None\n self.world_to_cam = None\n\n def state_dict(self):\n return {\n 'position': self._position,\n 'look_at': self._look_at,\n 'up': self._up,\n 'fov': self._fov,\n 'cam_to_world': self._cam_to_world,\n 'intrinsic_mat': self._intrinsic_mat,\n 'clip_near': self.clip_near,\n 'resolution': self.resolution,\n 'camera_type': self.camera_type\n }\n\n @classmethod\n def load_state_dict(cls, state_dict):\n out = cls.__new__(Camera)\n out._position = state_dict['position']\n out._look_at = state_dict['look_at']\n out._up = state_dict['up']\n out._fov = state_dict['fov']\n out.cam_to_world = state_dict['cam_to_world']\n out.intrinsic_mat = state_dict['intrinsic_mat']\n out.clip_near = state_dict['clip_near']\n out.resolution = state_dict['resolution']\n out.camera_type = state_dict['camera_type']\n return out\n\ndef automatic_camera_placement(shapes: List,\n resolution: Tuple[int, int]):\n \"\"\"\n Given a list of objects or shapes, generates camera parameters automatically\n using the bounding boxes of the shapes. Place the camera at\n some distances from the shapes, so that it can see all of them.\n Inspired by https://github.com/mitsuba-renderer/mitsuba/blob/master/src/librender/scene.cpp#L286\n\n Parameters\n ==========\n shapes: List\n a list of redner Shape or Object\n resolution: Tuple[int, int]\n the size of the output image in (height, width)\n\n Returns\n =======\n pyredner.Camera\n a camera that can see all the objects.\n \"\"\"\n aabb_min = torch.tensor((float('inf'), float('inf'), float('inf')))\n aabb_max = -torch.tensor((float('inf'), float('inf'), float('inf')))\n for shape in shapes:\n v = shape.vertices\n v_min = torch.min(v, 0)[0].cpu()\n v_max = torch.max(v, 0)[0].cpu()\n aabb_min = torch.min(aabb_min, v_min)\n aabb_max = torch.max(aabb_max, v_max)\n assert(torch.isfinite(aabb_min).all() and torch.isfinite(aabb_max).all())\n center = (aabb_max + aabb_min) * 0.5\n extents = aabb_max - aabb_min\n max_extents_xy = torch.max(extents[0], extents[1])\n distance = max_extents_xy / (2 * math.tan(45 * 0.5 * math.pi / 180.0))\n max_extents_xyz = torch.max(extents[2], max_extents_xy) \n return Camera(position = torch.tensor((center[0], center[1], aabb_min[2] - distance)),\n look_at = center,\n up = torch.tensor((0.0, 1.0, 0.0)),\n fov = torch.tensor([45.0]),\n clip_near = 0.001 * float(distance),\n resolution = resolution)\n\ndef generate_intrinsic_mat(fx: torch.Tensor,\n fy: torch.Tensor,\n skew: torch.Tensor,\n x0: torch.Tensor,\n y0: torch.Tensor):\n \"\"\"\n | Generate the following 3x3 intrinsic matrix given the parameters.\n | fx, skew, x0\n | 0, fy, y0\n | 0, 0, 1\n\n Parameters\n ==========\n fx: torch.Tensor\n Focal length at x dimension. 1D tensor with size 1.\n fy: torch.Tensor\n Focal length at y dimension. 1D tensor with size 1.\n skew: torch.Tensor\n Axis skew parameter describing shearing transform. 1D tensor with size 1.\n x0: torch.Tensor\n Principle point offset at x dimension. 1D tensor with size 1.\n y0: torch.Tensor\n Principle point offset at y dimension. 1D tensor with size 1.\n\n Returns\n =======\n torch.Tensor\n 3x3 intrinsic matrix\n \"\"\"\n z = torch.zeros_like(fx)\n o = torch.ones_like(fx)\n row0 = torch.cat([fx, skew, x0])\n row1 = torch.cat([ z, fy, y0])\n row2 = torch.cat([ z, z, o])\n return torch.stack([row0, row1, row2]).contiguous()\n"
] | [
[
"torch.ones",
"torch.max",
"torch.cat",
"torch.min",
"torch.zeros_like",
"torch.eye",
"torch.tensor",
"torch.inverse",
"torch.isfinite",
"torch.diag",
"torch.stack",
"torch.ones_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yanzhenxing123/illegal_fund_raising_forecast | [
"dcff8f3d73c1f1ea3548e8d25afc9fe5233e3f64"
] | [
"forecast/views.py"
] | [
"from django.shortcuts import render\nfrom rest_framework.views import APIView\n\nfrom illegal_fund_raising_forecast import settings\nfrom .serializer import TestDatasetSerializer, TrainDatasetSerializer\nfrom rest_framework.response import Response\nfrom .models import TrainDataset\nfrom utils import Res\nimport json\nimport random\nimport pandas as pd\n\n\n\nclass TestUploadView(APIView):\n def post(self, request):\n base_info = request.FILES.get(\"base_info\")\n annual_report_info = request.FILES.get(\"annual_report_info\")\n tax_info = request.FILES.get(\"tax_info\")\n change_info = request.FILES.get(\"change_info\")\n news_info = request.FILES.get(\"news_info\")\n other_info = request.FILES.get(\"other_info\")\n data = {\n \"base_info\": base_info,\n \"annual_report_info\": annual_report_info,\n \"tax_info\": tax_info,\n \"change_info\": change_info,\n \"news_info\": news_info,\n \"other_info\": other_info,\n \"user_id\": request.user.id,\n }\n serializer = TestDatasetSerializer(data=data)\n obj = serializer.create(data)\n return Response(status=200,\n data={\"code\": 200, \"msg\": \"上传成功\", \"data\": {\n \"test_id\": obj.id,\n \"dataset_lines\": random.randint(7731, 1000000),\n \"p_and_n_proportion\": {\n \"positive\": random.randint(1000, 10000),\n \"negative\": random.randint(1000, 10000),\n },\n \"area_distribution\": [\n {\n \"name\": '北京',\n \"selected\": True\n },\n {\n \"name\": '天津',\n 'selected': True\n },\n {\n 'name': '上海',\n 'selected': True\n },\n {\n \"name\": '重庆',\n 'selected': True\n },\n {\n 'name': '河北',\n 'selected': True\n },\n {\n 'name': '河南',\n 'selected': True\n },\n {\n 'name': '四川',\n 'selected': True\n }\n ]}}\n )\n\n\nclass TrainUploadView(APIView):\n def post(self, request):\n train = request.FILES.get(\"train\")\n data = {\n \"train\": train,\n \"user_id\": request.user.id,\n }\n serializer = TrainDatasetSerializer(data=data)\n obj = serializer.create(validated_data=data)\n return Response(status=200,\n data={\"code\": 200, \"msg\": \"上传成功\", \"data\": {\n \"train_id\": obj.id,\n \"dataset_lines\": random.randint(7731, 1000000),\n \"p_and_n_proportion\": {\n \"positive\": random.randint(1000, 10000),\n \"negative\": random.randint(1000, 10000),\n },\n \"area_distribution\": [\n {\n \"name\": '北京',\n \"selected\": True\n },\n {\n \"name\": '天津',\n 'selected': True\n },\n {\n 'name': '上海',\n 'selected': True\n },\n {\n \"name\": '重庆',\n 'selected': True\n },\n {\n 'name': '河北',\n 'selected': True\n },\n {\n 'name': '河南',\n 'selected': True\n },\n {\n 'name': '四川',\n 'selected': True\n }\n ],\n \"url\": obj.train.url,\n }}\n )\n\n\nclass TrainDownloadView(APIView):\n def get(self, request):\n train_id = request.query_params.get(\"train_id\")\n if not train_id:\n return Response(json.loads(Res(code=400, msg=\"train_id is None\", data=None).json()))\n try:\n obj = TrainDataset.objects.get(id=train_id)\n url = obj.train.url\n except Exception as e:\n url = None\n return Response(json.loads(Res(code=200, msg=\"success\",\n data={\n \"train_url\": url\n }\n ).json()))\n\n\nclass TrainStartView(APIView):\n def get(self):\n pass\n\n\nclass ResultView(APIView):\n def get(self, request):\n pageIndex = int(request.query_params.get(\"pageIndex\"))\n pageSize = int(request.query_params.get(\"pageSize\"))\n df = pd.read_csv(settings.MEDIA_ROOT + \"/testdata.csv\")\n if pageIndex * pageSize > len(df) or pageIndex * pageSize <= 0:\n return Response({\"code\": 400, 'msg': \"参数有误\"})\n start_index = 0 if pageIndex * pageSize <= 0 else (pageIndex - 1) * pageSize\n df_ = df.iloc[start_index:pageIndex * pageSize, :]\n res = list(json.loads(df_.to_json(orient='index')).values())\n\n return Response(\n {\n \"code\": 200,\n \"msg\": \"\",\n 'data': res,\n \"pageTotal\": len(df)\n }\n )\n\n\n\n\nclass ConsoleView(APIView):\n def get(self, request):\n return Response(\n {\n \"code\": 200,\n \"msg\": \"success\",\n \"data\": {\n \"running\": 0,\n \"train_lines\": 0,\n \"forecast_times\": 0\n }\n\n }\n )\n\n\nclass CompanyView(APIView):\n def get(self, request):\n return Response(\n {\n \"code\": 200,\n \"msg\": \"\",\n \"data\":\n [\n {\n \"labels\": ['合资', '独资', '国有', '私有', '集体所有制', '股份制', '有限责任制'],\n \"datasets\": {\n \"data\": [random.randint(100, 10000) for _ in range(7)]\n },\n },\n {\n \"labels\": ['地产', '银行', '互联网', '硬件', '半导体', '销售', '餐饮'],\n \"datasets\": {\n \"data\": [random.randint(100, 10000) for _ in range(7)]\n },\n\n },\n {\n \"labels\": ['采掘', '制造', '批发', '零售'],\n \"datasets\": {\n \"data\": [random.randint(100, 10000000) for _ in range(4)]\n },\n }\n ]\n }\n )\n\nclass ScoreView(APIView):\n def get(self, request):\n return Response(\n {\n \"code\": 200,\n \"msg\": \"\",\n \"data\": {\n \"f1_score\": random.random(),\n }\n }\n )\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
oshaikh13/fairseq | [
"d81142405076a4d322985936316cfff9d2460273"
] | [
"fairseq/models/nat/levenshtein_transformer.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom fairseq.iterative_refinement_generator import DecoderOut\nfrom fairseq.models import register_model, register_model_architecture\nfrom fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_decoder\nfrom fairseq.models.transformer import Embedding, TransformerDecoderLayer\nfrom fairseq.modules.transformer_sentence_encoder import init_bert_params\n\nfrom .levenshtein_utils import (\n _apply_del_words,\n _apply_ins_masks,\n _apply_ins_words,\n _fill,\n _get_del_targets,\n _get_ins_targets,\n _skip,\n _skip_encoder_out,\n)\n\n\n@register_model(\"levenshtein_transformer\")\nclass LevenshteinTransformerModel(FairseqNATModel):\n @property\n def allow_length_beam(self):\n return False\n\n @staticmethod\n def add_args(parser):\n FairseqNATModel.add_args(parser)\n parser.add_argument(\n \"--early-exit\",\n default=\"6,6,6\",\n type=str,\n help=\"number of decoder layers before word_del, mask_ins, word_ins\",\n )\n parser.add_argument(\n \"--no-share-discriminator\",\n action=\"store_true\",\n help=\"separate parameters for discriminator\",\n )\n parser.add_argument(\n \"--no-share-maskpredictor\",\n action=\"store_true\",\n help=\"separate parameters for mask-predictor\",\n )\n parser.add_argument(\n \"--share-discriminator-maskpredictor\",\n action=\"store_true\",\n help=\"share the parameters for both mask-predictor and discriminator\",\n )\n parser.add_argument(\n \"--sampling-for-deletion\",\n action=\"store_true\",\n help=\"instead of argmax, use sampling to predict the tokens\",\n )\n\n @classmethod\n def build_decoder(cls, args, tgt_dict, embed_tokens):\n decoder = LevenshteinTransformerDecoder(args, tgt_dict, embed_tokens)\n if getattr(args, \"apply_bert_init\", False):\n decoder.apply(init_bert_params)\n return decoder\n\n def forward(\n self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs\n ):\n\n assert tgt_tokens is not None, \"forward function only supports training.\"\n\n # encoding\n encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)\n\n # generate training labels for insertion\n masked_tgt_masks, masked_tgt_tokens, mask_ins_targets = _get_ins_targets(\n prev_output_tokens, tgt_tokens, self.pad, self.unk\n )\n mask_ins_targets = mask_ins_targets.clamp(min=0, max=255) # for safe prediction\n mask_ins_masks = prev_output_tokens[:, 1:].ne(self.pad)\n\n mask_ins_out, _ = self.decoder.forward_mask_ins(\n normalize=False,\n prev_output_tokens=prev_output_tokens,\n encoder_out=encoder_out,\n )\n word_ins_out, _ = self.decoder.forward_word_ins(\n normalize=False,\n prev_output_tokens=masked_tgt_tokens,\n encoder_out=encoder_out,\n )\n\n # make online prediction\n if self.decoder.sampling_for_deletion:\n word_predictions = torch.multinomial(\n F.softmax(word_ins_out, -1).view(-1, word_ins_out.size(-1)), 1\n ).view(word_ins_out.size(0), -1)\n else:\n word_predictions = F.log_softmax(word_ins_out, dim=-1).max(2)[1]\n\n word_predictions.masked_scatter_(\n ~masked_tgt_masks, tgt_tokens[~masked_tgt_masks]\n )\n\n # generate training labels for deletion\n word_del_targets = _get_del_targets(word_predictions, tgt_tokens, self.pad)\n word_del_out, _ = self.decoder.forward_word_del(\n normalize=False,\n prev_output_tokens=word_predictions,\n encoder_out=encoder_out,\n )\n word_del_masks = word_predictions.ne(self.pad)\n\n return {\n \"mask_ins\": {\n \"out\": mask_ins_out,\n \"tgt\": mask_ins_targets,\n \"mask\": mask_ins_masks,\n \"ls\": 0.01,\n },\n \"word_ins\": {\n \"out\": word_ins_out,\n \"tgt\": tgt_tokens,\n \"mask\": masked_tgt_masks,\n \"ls\": self.args.label_smoothing,\n \"nll_loss\": True,\n },\n \"word_del\": {\n \"out\": word_del_out,\n \"tgt\": word_del_targets,\n \"mask\": word_del_masks,\n },\n }\n\n def forward_decoder(\n self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs\n ):\n\n output_tokens = decoder_out.output_tokens\n output_scores = decoder_out.output_scores\n attn = decoder_out.attn\n history = decoder_out.history\n\n bsz = output_tokens.size(0)\n if max_ratio is None:\n max_lens = torch.zeros_like(output_tokens).fill_(255)\n else:\n if encoder_out.encoder_padding_mask is None:\n max_src_len = encoder_out.encoder_out.size(0)\n src_lens = encoder_out.encoder_out.new(bsz).fill_(max_src_len)\n else:\n src_lens = (~encoder_out.encoder_padding_mask).sum(1)\n max_lens = (src_lens * max_ratio).clamp(min=10).long()\n\n # delete words\n # do not delete tokens if it is <s> </s>\n can_del_word = output_tokens.ne(self.pad).sum(1) > 2\n if can_del_word.sum() != 0: # we cannot delete, skip\n word_del_score, word_del_attn = self.decoder.forward_word_del(\n normalize=True,\n prev_output_tokens=_skip(output_tokens, can_del_word),\n encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_del_word),\n )\n word_del_pred = word_del_score.max(-1)[1].bool()\n\n _tokens, _scores, _attn = _apply_del_words(\n output_tokens[can_del_word],\n output_scores[can_del_word],\n word_del_attn,\n word_del_pred,\n self.pad,\n self.bos,\n self.eos,\n )\n output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad)\n output_scores = _fill(output_scores, can_del_word, _scores, 0)\n attn = _fill(attn, can_del_word, _attn, 0.0)\n\n if history is not None:\n history.append(output_tokens.clone())\n\n # insert placeholders\n can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens\n if can_ins_mask.sum() != 0:\n mask_ins_score, _ = self.decoder.forward_mask_ins(\n normalize=True,\n prev_output_tokens=_skip(output_tokens, can_ins_mask),\n encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_mask),\n )\n if eos_penalty > 0.0:\n mask_ins_score[:, :, 0] = mask_ins_score[:, :, 0] - eos_penalty\n mask_ins_pred = mask_ins_score.max(-1)[1]\n mask_ins_pred = torch.min(\n mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred)\n )\n\n _tokens, _scores = _apply_ins_masks(\n output_tokens[can_ins_mask],\n output_scores[can_ins_mask],\n mask_ins_pred,\n self.pad,\n self.unk,\n self.eos,\n )\n output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad)\n output_scores = _fill(output_scores, can_ins_mask, _scores, 0)\n\n if history is not None:\n history.append(output_tokens.clone())\n\n # insert words\n can_ins_word = output_tokens.eq(self.unk).sum(1) > 0\n if can_ins_word.sum() != 0:\n word_ins_score, word_ins_attn = self.decoder.forward_word_ins(\n normalize=True,\n prev_output_tokens=_skip(output_tokens, can_ins_word),\n encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_word),\n )\n word_ins_score, word_ins_pred = word_ins_score.max(-1)\n _tokens, _scores = _apply_ins_words(\n output_tokens[can_ins_word],\n output_scores[can_ins_word],\n word_ins_pred,\n word_ins_score,\n self.unk,\n )\n\n output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad)\n output_scores = _fill(output_scores, can_ins_word, _scores, 0)\n attn = _fill(attn, can_ins_word, word_ins_attn, 0.0)\n\n if history is not None:\n history.append(output_tokens.clone())\n\n # delete some unnecessary paddings\n cut_off = output_tokens.ne(self.pad).sum(1).max()\n output_tokens = output_tokens[:, :cut_off]\n output_scores = output_scores[:, :cut_off]\n attn = None if attn is None else attn[:, :cut_off, :]\n\n return decoder_out._replace(\n output_tokens=output_tokens,\n output_scores=output_scores,\n attn=attn,\n history=history,\n )\n\n def initialize_output_tokens(self, encoder_out, src_tokens):\n initial_output_tokens = src_tokens.new_zeros(src_tokens.size(0), 2)\n initial_output_tokens[:, 0] = self.bos\n initial_output_tokens[:, 1] = self.eos\n\n initial_output_scores = initial_output_tokens.new_zeros(\n *initial_output_tokens.size()\n ).type_as(encoder_out.encoder_out)\n\n return DecoderOut(\n output_tokens=initial_output_tokens,\n output_scores=initial_output_scores,\n attn=None,\n step=0,\n max_step=0,\n history=None,\n )\n\n\nclass LevenshteinTransformerDecoder(FairseqNATDecoder):\n def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):\n super().__init__(\n args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn\n )\n self.dictionary = dictionary\n self.bos = dictionary.bos()\n self.unk = dictionary.unk()\n self.eos = dictionary.eos()\n self.sampling_for_deletion = getattr(args, \"sampling_for_deletion\", False)\n self.embed_mask_ins = Embedding(256, self.output_embed_dim * 2, None)\n self.embed_word_del = Embedding(2, self.output_embed_dim, None)\n\n # del_word, ins_mask, ins_word\n self.early_exit = [int(i) for i in args.early_exit.split(\",\")]\n assert len(self.early_exit) == 3\n\n # copy layers for mask-predict/deletion\n self.layers_msk = None\n if getattr(args, \"no_share_maskpredictor\", False):\n self.layers_msk = nn.ModuleList(\n [\n TransformerDecoderLayer(args, no_encoder_attn)\n for _ in range(self.early_exit[1])\n ]\n )\n self.layers_del = None\n if getattr(args, \"no_share_discriminator\", False):\n self.layers_del = nn.ModuleList(\n [\n TransformerDecoderLayer(args, no_encoder_attn)\n for _ in range(self.early_exit[0])\n ]\n )\n\n if getattr(args, \"share_discriminator_maskpredictor\", False):\n assert getattr(\n args, \"no_share_discriminator\", False\n ), \"must set saperate discriminator\"\n self.layers_msk = self.layers_del\n\n def extract_features(\n self,\n prev_output_tokens,\n encoder_out=None,\n early_exit=None,\n layers=None,\n **unused\n ):\n \"\"\"\n Similar to *forward* but only return features.\n Inputs:\n prev_output_tokens: Tensor(B, T)\n encoder_out: a dictionary of hidden states and masks\n\n Returns:\n tuple:\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\n - a dictionary with any model-specific outputs\n the LevenshteinTransformer decoder has full-attention to all generated tokens\n \"\"\"\n # embed positions\n positions = (\n self.embed_positions(prev_output_tokens)\n if self.embed_positions is not None\n else None\n )\n\n # embed tokens and positions\n x = self.embed_scale * self.embed_tokens(prev_output_tokens)\n if self.project_in_dim is not None:\n x = self.project_in_dim(x)\n\n if positions is not None:\n x += positions\n x = self.dropout_module(x)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n attn = None\n inner_states = [x]\n\n # decoder layers\n decoder_padding_mask = prev_output_tokens.eq(self.padding_idx)\n layers = self.layers if layers is None else layers\n early_exit = len(layers) if early_exit is None else early_exit\n for _, layer in enumerate(layers[:early_exit]):\n x, attn, _ = layer(\n x,\n encoder_out.encoder_out if encoder_out is not None else None,\n encoder_out.encoder_padding_mask if encoder_out is not None else None,\n self_attn_mask=None,\n self_attn_padding_mask=decoder_padding_mask,\n )\n inner_states.append(x)\n\n if self.layer_norm:\n x = self.layer_norm(x)\n\n # T x B x C -> B x T x C\n x = x.transpose(0, 1)\n\n if self.project_out_dim is not None:\n x = self.project_out_dim(x)\n\n return x, {\"attn\": attn, \"inner_states\": inner_states}\n\n @ensemble_decoder\n def forward_mask_ins(self, normalize, encoder_out, prev_output_tokens, **unused):\n features, extra = self.extract_features(\n prev_output_tokens,\n encoder_out=encoder_out,\n early_exit=self.early_exit[1],\n layers=self.layers_msk,\n **unused\n )\n features_cat = torch.cat([features[:, :-1, :], features[:, 1:, :]], 2)\n decoder_out = F.linear(features_cat, self.embed_mask_ins.weight)\n if normalize:\n return F.log_softmax(decoder_out, -1), extra[\"attn\"]\n return decoder_out, extra[\"attn\"]\n\n @ensemble_decoder\n def forward_word_ins(self, normalize, encoder_out, prev_output_tokens, **unused):\n features, extra = self.extract_features(\n prev_output_tokens,\n encoder_out=encoder_out,\n early_exit=self.early_exit[2],\n layers=self.layers,\n **unused\n )\n decoder_out = self.output_layer(features)\n if normalize:\n return F.log_softmax(decoder_out, -1), extra[\"attn\"]\n return decoder_out, extra[\"attn\"]\n\n @ensemble_decoder\n def forward_word_del(self, normalize, encoder_out, prev_output_tokens, **unused):\n features, extra = self.extract_features(\n prev_output_tokens,\n encoder_out=encoder_out,\n early_exit=self.early_exit[0],\n layers=self.layers_del,\n **unused\n )\n decoder_out = F.linear(features, self.embed_word_del.weight)\n if normalize:\n return F.log_softmax(decoder_out, -1), extra[\"attn\"]\n return decoder_out, extra[\"attn\"]\n\n\n@register_model_architecture(\"levenshtein_transformer\", \"levenshtein_transformer\")\ndef levenshtein_base_architecture(args):\n args.encoder_embed_path = getattr(args, \"encoder_embed_path\", None)\n args.encoder_embed_dim = getattr(args, \"encoder_embed_dim\", 512)\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 2048)\n args.encoder_layers = getattr(args, \"encoder_layers\", 6)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 8)\n args.encoder_normalize_before = getattr(args, \"encoder_normalize_before\", False)\n args.encoder_learned_pos = getattr(args, \"encoder_learned_pos\", False)\n args.decoder_embed_path = getattr(args, \"decoder_embed_path\", None)\n args.decoder_embed_dim = getattr(args, \"decoder_embed_dim\", args.encoder_embed_dim)\n args.decoder_ffn_embed_dim = getattr(\n args, \"decoder_ffn_embed_dim\", args.encoder_ffn_embed_dim\n )\n args.decoder_layers = getattr(args, \"decoder_layers\", 6)\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 8)\n args.decoder_normalize_before = getattr(args, \"decoder_normalize_before\", False)\n args.decoder_learned_pos = getattr(args, \"decoder_learned_pos\", False)\n args.attention_dropout = getattr(args, \"attention_dropout\", 0.0)\n args.activation_dropout = getattr(args, \"activation_dropout\", 0.0)\n args.activation_fn = getattr(args, \"activation_fn\", \"relu\")\n args.dropout = getattr(args, \"dropout\", 0.1)\n args.adaptive_softmax_cutoff = getattr(args, \"adaptive_softmax_cutoff\", None)\n args.adaptive_softmax_dropout = getattr(args, \"adaptive_softmax_dropout\", 0)\n args.share_decoder_input_output_embed = getattr(\n args, \"share_decoder_input_output_embed\", False\n )\n args.share_all_embeddings = getattr(args, \"share_all_embeddings\", False)\n args.no_token_positional_embeddings = getattr(\n args, \"no_token_positional_embeddings\", False\n )\n args.adaptive_input = getattr(args, \"adaptive_input\", False)\n args.apply_bert_init = getattr(args, \"apply_bert_init\", False)\n\n args.decoder_output_dim = getattr(\n args, \"decoder_output_dim\", args.decoder_embed_dim\n )\n args.sampling_for_deletion = getattr(args, \"sampling_for_deletion\", False)\n args.decoder_input_dim = getattr(args, \"decoder_input_dim\", args.decoder_embed_dim)\n args.early_exit = getattr(args, \"early_exit\", \"6,6,6\")\n args.no_share_discriminator = getattr(args, \"no_share_discriminator\", False)\n args.no_share_maskpredictor = getattr(args, \"no_share_maskpredictor\", False)\n args.share_discriminator_maskpredictor = getattr(\n args, \"share_discriminator_maskpredictor\", False\n )\n args.no_share_last_layer = getattr(args, \"no_share_last_layer\", False)\n\n\n@register_model_architecture(\n \"levenshtein_transformer\", \"levenshtein_transformer_wmt_en_de\"\n)\ndef levenshtein_transformer_wmt_en_de(args):\n levenshtein_base_architecture(args)\n\n\n# similar parameters used in the \"Attention Is All You Need\" paper (Vaswani et al., 2017)\n@register_model_architecture(\n \"levenshtein_transformer\", \"levenshtein_transformer_vaswani_wmt_en_de_big\"\n)\ndef levenshtein_transformer_vaswani_wmt_en_de_big(args):\n args.encoder_embed_dim = getattr(args, \"encoder_embed_dim\", 1024)\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 4096)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 16)\n args.encoder_normalize_before = getattr(args, \"encoder_normalize_before\", False)\n args.decoder_embed_dim = getattr(args, \"decoder_embed_dim\", 1024)\n args.decoder_ffn_embed_dim = getattr(args, \"decoder_ffn_embed_dim\", 4096)\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 16)\n args.dropout = getattr(args, \"dropout\", 0.3)\n levenshtein_base_architecture(args)\n\n\n# default parameters used in tensor2tensor implementation\n@register_model_architecture(\n \"levenshtein_transformer\", \"levenshtein_transformer_wmt_en_de_big\"\n)\ndef levenshtein_transformer_wmt_en_de_big_t2t(args):\n args.encoder_normalize_before = getattr(args, \"encoder_normalize_before\", True)\n args.decoder_normalize_before = getattr(args, \"decoder_normalize_before\", True)\n args.attention_dropout = getattr(args, \"attention_dropout\", 0.1)\n args.activation_dropout = getattr(args, \"activation_dropout\", 0.1)\n levenshtein_transformer_vaswani_wmt_en_de_big(args)\n"
] | [
[
"torch.nn.functional.softmax",
"torch.nn.functional.log_softmax",
"torch.cat",
"torch.zeros_like",
"torch.nn.functional.linear"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
soumyave/Facial-Attribute-Analysis-and-Handwritten-Digit-Recognition | [
"acd847a27c7a8f0ec6506b614297d20f21fe8d6a"
] | [
"Handwritten Digits Recognition Using Classification/convolutednn.py"
] | [
"import tensorflow as tf\nimport numpy as np\nimport math\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\ndef conv2d(xl, W):\n return tf.nn.conv2d(xl, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool_2x2(xl):\n return tf.nn.max_pool(xl, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\ndef create_convoluted_multilayer_perceptron1():\n n_hidden_1 = 256 \n n_input = 784 \n n_classes = 10\n x = tf.placeholder(\"float\", [None, n_input])\n y = tf.placeholder(\"float\", [None, n_classes])\n W_conv1 = weight_variable([5, 5, 1, 32])\n b_conv1 = bias_variable([32])\n x_image = tf.reshape(x, [-1, 28, 28, 1])\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n h_pool1 = max_pool_2x2(h_conv1)\n W_conv2 = weight_variable([5, 5, 32, 64])\n b_conv2 = bias_variable([64])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n h_pool2 = max_pool_2x2(h_conv2)\n W_fc1 = weight_variable([7 * 7 * 64, 1024])\n b_fc1 = bias_variable([1024])\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n keep_prob = tf.placeholder(tf.float32)\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n W_fc2 = weight_variable([1024, 10])\n b_fc2 = bias_variable([10])\n out_layer = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n return out_layer,x,y,keep_prob\n\ndef train_and_test_convNN(mnist,uspsImages,uspsLabels):\n learning_rate = 0.5\n training_epochs = 20000\n batch_size = 50\n pred,x,y,keep_prob = create_convoluted_multilayer_perceptron1()\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n optimizer = tf.train.AdamOptimizer(1e-4).minimize(cost)\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init) \n for epoch in range(training_epochs):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n c = optimizer.run(feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5})\n correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n print(\"Accuracy for MNIST test data for convoluted Neural network trained data:\", accuracy.eval({x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0})*100)\n print(\"Accuracy for USPS test data for convoluted Neural network trained data:\", accuracy.eval({x: uspsImages, y: uspsLabels, keep_prob: 1.0})*100)"
] | [
[
"tensorflow.matmul",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.constant",
"tensorflow.truncated_normal",
"tensorflow.Variable",
"tensorflow.nn.max_pool",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.dropout",
"tensorflow.argmax",
"tensorflow.nn.conv2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
clazaro/sfepy | [
"78757a6989d6aaf85a3fb27957b9179c5e2aa2c7",
"78757a6989d6aaf85a3fb27957b9179c5e2aa2c7",
"78757a6989d6aaf85a3fb27957b9179c5e2aa2c7",
"78757a6989d6aaf85a3fb27957b9179c5e2aa2c7",
"78757a6989d6aaf85a3fb27957b9179c5e2aa2c7",
"78757a6989d6aaf85a3fb27957b9179c5e2aa2c7"
] | [
"sfepy/discrete/iga/io.py",
"examples/navier_stokes/stokes_slip_bc_penalty.py",
"examples/multi_physics/biot_npbc.py",
"sfepy/terms/terms_navier_stokes.py",
"examples/linear_elasticity/modal_analysis.py",
"examples/navier_stokes/stokes_slip_bc.py"
] | [
"\"\"\"\nIO for NURBS and Bezier extraction data.\n\"\"\"\nfrom __future__ import absolute_import\nimport numpy as nm\nimport six\nfrom six.moves import range\nfrom sfepy.base.ioutils import HDF5ContextManager, enc, dec\n\ndef write_iga_data(filename, group, knots, degrees, control_points, weights,\n cs, conn, bezier_control_points, bezier_weights, bezier_conn,\n regions, name=None):\n \"\"\"\n Write IGA-related data into a HDF5 file using pytables.\n\n filename: str or tables.File\n File to read the hdf5 mesh to.\n group: tables.group.Group, optional\n HDF5 file group to read the data from.\n If None, the root of file is used.\n\n Returns\n -------\n tuple\n Data for restoring IGA domain.\n \"\"\"\n\n with HDF5ContextManager(filename, mode = 'w',\n title='SfePy IGA data file') as fd:\n if group is None:\n group = fd.root\n\n if isinstance(degrees, int): degrees = [degrees]\n degrees = nm.asarray(degrees)\n\n nurbs = fd.create_group(group, 'nurbs', 'nurbs')\n\n fd.create_array(nurbs, 'dim', control_points.shape[1], 'dim')\n fd.create_array(nurbs, 'tdim', len(degrees), 'tdim')\n for ii, kv in enumerate(knots):\n key = 'knots_%d' % ii\n fd.create_array(nurbs, key, kv, key)\n fd.create_array(nurbs, 'degrees', degrees, 'degrees')\n fd.create_array(nurbs, 'control_points', control_points,\n 'control_points')\n fd.create_array(nurbs, 'weights', weights, 'weights')\n\n bezier = fd.create_group(group, 'bezier', 'bezier')\n\n fd.create_array(bezier, 'bezier_control_points', bezier_control_points,\n 'bezier_control_points')\n fd.create_array(bezier, 'bezier_weights', bezier_weights,\n 'bezier_weights')\n for ii, op in enumerate(cs):\n key = 'extraction_%d' % ii\n fd.create_array(bezier, key, op, key)\n fd.create_array(bezier, 'global_connectivity', conn,\n 'global_connectivity')\n fd.create_array(bezier, 'bezier_connectivity', bezier_conn,\n 'bezier_connectivity')\n\n regs = fd.create_group(group, 'regions', 'regions')\n for key, val in six.iteritems(regions):\n fd.create_array(regs, key, val, key)\n\n if name is not None:\n fd.create_array( group, 'name', nm.array( enc(name)) )\n\ndef read_iga_data(filename, group=None):\n \"\"\"\n Read IGA-related data from a HDF5 file using pytables.\n\n filename: str or tables.File\n File to read the hdf5 mesh to.\n group: tables.group.Group or None\n HDF5 file group to read the mesh from.\n If it's None, the root of file is used.\n\n Returns\n -------\n tuple\n Data for restoring IGA domain.\n \"\"\"\n\n with HDF5ContextManager(filename, 'r') as fd:\n if group is None:\n group = fd.root\n\n nurbs = group.nurbs\n\n tdim = nurbs.tdim.read()\n\n knots = []\n for ii in range(tdim):\n name = 'knots_%d' % ii\n knots.append(nurbs._f_get_child(name).read())\n knots = tuple(knots)\n\n degrees = nurbs.degrees.read()\n control_points = nurbs.control_points.read()\n weights = nurbs.weights.read()\n\n bezier = group.bezier\n\n cs = []\n for ii in range(tdim):\n name = 'extraction_%d' % ii\n cs.append(bezier._f_get_child(name).read())\n\n conn = bezier.global_connectivity.read()\n bezier_control_points = bezier.bezier_control_points.read()\n bezier_weights = bezier.bezier_weights.read()\n bezier_conn = bezier.bezier_connectivity.read()\n\n regions = {}\n for region in group.regions:\n regions[region.name] = region.read()\n\n out = (knots, degrees, control_points, weights, cs, conn,\n bezier_control_points, bezier_weights, bezier_conn, regions)\n\n if hasattr(group, 'name'):\n out = out + ( dec(group.name.read().item()), )\n\n return out\n",
"r\"\"\"\nIncompressible Stokes flow with Navier (slip) boundary conditions, flow driven\nby a moving wall and a small diffusion for stabilization.\n\nThis example demonstrates a weak application of `no-penetration` boundary\nconditions using the penalty term ``dw_non_penetration_p``.\n\nFind :math:`\\ul{u}`, :math:`p` such that:\n\n.. math::\n \\int_{\\Omega} \\nu\\ \\nabla \\ul{v} : \\nabla \\ul{u}\n - \\int_{\\Omega} p\\ \\nabla \\cdot \\ul{v}\n + \\int_{\\Gamma_1} \\beta \\ul{v} \\cdot (\\ul{u} - \\ul{u}_d)\n + \\int_{\\Gamma_2} \\beta \\ul{v} \\cdot \\ul{u}\n + \\int_{\\Gamma_1 \\cup \\Gamma_2} \\epsilon (\\ul{n} \\cdot \\ul{v})\n (\\ul{n} \\cdot \\ul{u})\n = 0\n \\;, \\quad \\forall \\ul{v} \\;,\n\n \\int_{\\Omega} \\mu \\nabla q \\cdot \\nabla p\n + \\int_{\\Omega} q\\ \\nabla \\cdot \\ul{u}\n = 0\n \\;, \\quad \\forall q \\;,\n\nwhere :math:`\\nu` is the fluid viscosity, :math:`\\beta` is the slip\ncoefficient, :math:`\\mu` is the (small) numerical diffusion coefficient,\n:math:`\\epsilon` is the penalty coefficient (sufficiently large),\n:math:`\\Gamma_1` is the top wall that moves with the given driving velocity\n:math:`\\ul{u}_d` and :math:`\\Gamma_2` are the remaining walls. The Navier\nconditions are in effect on both :math:`\\Gamma_1`, :math:`\\Gamma_2` and are\nexpressed by the corresponding integrals in the equations above.\n\nThe `no-penetration` boundary conditions are applied on :math:`\\Gamma_1`,\n:math:`\\Gamma_2`. Optionally, Dirichlet boundary conditions can be applied on\nthe inlet, see the code below.\n\nThe mesh is created by ``gen_block_mesh()`` function - try different mesh\ndimensions and resolutions below. For large meshes use the ``'ls_i'`` linear\nsolver - PETSc + petsc4py is needed in that case.\n\nSee also :ref:`navier_stokes-stokes_slip_bc`.\n\"\"\"\nfrom __future__ import absolute_import\nimport numpy as nm\n\nfrom sfepy.discrete.fem.meshio import UserMeshIO\nfrom sfepy.mesh.mesh_generators import gen_block_mesh\nfrom sfepy.homogenization.utils import define_box_regions\n\n# Mesh dimensions.\ndims = nm.array([3, 1, 0.5])\n\n# Mesh resolution: increase to improve accuracy.\nshape = [11, 15, 15]\n\ndef mesh_hook(mesh, mode):\n \"\"\"\n Generate the block mesh.\n \"\"\"\n if mode == 'read':\n mesh = gen_block_mesh(dims, shape, [0, 0, 0], name='user_block',\n verbose=False)\n return mesh\n\n elif mode == 'write':\n pass\n\nfilename_mesh = UserMeshIO(mesh_hook)\n\nregions = define_box_regions(3, 0.5 * dims)\nregions.update({\n 'Omega' : 'all',\n 'Gamma1_f' : ('copy r.Top', 'face'),\n 'Gamma2_f' : ('r.Near +v r.Bottom +v r.Far', 'face'),\n 'Gamma_f' : ('r.Gamma1_f +v r.Gamma2_f', 'face'),\n 'Inlet_f' : ('r.Left -v r.Gamma_f', 'face'),\n})\n\nfields = {\n 'velocity' : ('real', 3, 'Omega', 1),\n 'pressure' : ('real', 1, 'Omega', 1),\n}\n\ndef get_u_d(ts, coors, region=None):\n \"\"\"\n Given stator velocity.\n \"\"\"\n out = nm.zeros_like(coors)\n out[:] = [1.0, 1.0, 0.0]\n\n return out\n\nfunctions = {\n 'get_u_d' : (get_u_d,),\n}\n\nvariables = {\n 'u' : ('unknown field', 'velocity', 0),\n 'v' : ('test field', 'velocity', 'u'),\n 'u_d' : ('parameter field', 'velocity',\n {'setter' : 'get_u_d'}),\n 'p' : ('unknown field', 'pressure', 1),\n 'q' : ('test field', 'pressure', 'p'),\n}\n\n# Try setting the inlet velocity by un-commenting the 'inlet' ebcs.\nebcs = {\n ## 'inlet' : ('Inlet_f', {'u.0' : 1.0, 'u.[1, 2]' : 0.0}),\n}\n\nmaterials = {\n 'm' : ({\n 'nu' : 1e-3,\n 'beta' : 1e-2,\n 'mu' : 1e-10,\n 'np_eps' : 1e3,\n },),\n}\n\nequations = {\n 'balance' :\n \"\"\"dw_div_grad.5.Omega(m.nu, v, u)\n - dw_stokes.5.Omega(v, p)\n + dw_surface_dot.5.Gamma1_f(m.beta, v, u)\n + dw_surface_dot.5.Gamma2_f(m.beta, v, u)\n + dw_non_penetration_p.5.Gamma1_f(m.np_eps, v, u)\n + dw_non_penetration_p.5.Gamma2_f(m.np_eps, v, u)\n =\n + dw_surface_dot.5.Gamma1_f(m.beta, v, u_d)\"\"\",\n 'incompressibility' :\n \"\"\"dw_laplace.5.Omega(m.mu, q, p)\n + dw_stokes.5.Omega(u, q) = 0\"\"\",\n}\n\nsolvers = {\n 'ls_d' : ('ls.scipy_direct', {}),\n 'ls_i' : ('ls.petsc', {\n 'method' : 'bcgsl', # ksp_type\n 'precond' : 'bjacobi', # pc_type\n 'sub_precond' : 'ilu', # sub_pc_type\n 'eps_a' : 0.0, # abstol\n 'eps_r' : 1e-12, # rtol\n 'eps_d' : 1e10, # Divergence tolerance.\n 'i_max' : 1000, # maxits\n }),\n 'newton' : ('nls.newton', {\n 'i_max' : 1,\n 'eps_a' : 1e-10,\n }),\n}\n\noptions = {\n 'nls' : 'newton',\n 'ls' : 'ls_d',\n}\n",
"r\"\"\"\nBiot problem - deformable porous medium with the no-penetration boundary\ncondition on a boundary region.\n\nFind :math:`\\ul{u}`, :math:`p` such that:\n\n.. math::\n \\int_{\\Omega} D_{ijkl}\\ e_{ij}(\\ul{v}) e_{kl}(\\ul{u})\n - \\int_{\\Omega} p\\ \\alpha_{ij} e_{ij}(\\ul{v})\n = 0\n \\;, \\quad \\forall \\ul{v} \\;,\n\n \\int_{\\Omega} q\\ \\alpha_{ij} e_{ij}(\\ul{u})\n + \\int_{\\Omega} K_{ij} \\nabla_i q \\nabla_j p\n = 0\n \\;, \\quad \\forall q \\;,\n\n \\ul{u} \\cdot \\ul{n} = 0 \\mbox{ on } \\Gamma_{walls} \\;,\n\nwhere\n\n.. math::\n D_{ijkl} = \\mu (\\delta_{ik} \\delta_{jl}+\\delta_{il} \\delta_{jk}) +\n \\lambda \\ \\delta_{ij} \\delta_{kl}\n \\;.\n\"\"\"\nfrom __future__ import absolute_import\nimport os\nimport numpy as nm\n\nfrom sfepy.linalg import get_coors_in_tube\nfrom sfepy.mechanics.matcoefs import stiffness_from_lame\n\ndef define():\n from sfepy import data_dir\n\n filename = data_dir + '/meshes/3d/cylinder.mesh'\n output_dir = 'output'\n return define_input(filename, output_dir)\n\ndef cinc_simple(coors, mode):\n axis = nm.array([1, 0, 0], nm.float64)\n if mode == 0: # In\n centre = nm.array([0.0, 0.0, 0.0], nm.float64)\n radius = 0.019\n length = 0.00002\n elif mode == 1: # Out\n centre = nm.array([0.1, 0.0, 0.0], nm.float64)\n radius = 0.019\n length = 0.00002\n elif mode == 2: # Rigid\n centre = nm.array([0.05, 0.0, 0.0], nm.float64)\n radius = 0.015\n length = 0.03\n else:\n raise ValueError('unknown mode %s!' % mode)\n\n return get_coors_in_tube(coors,\n centre, axis, -1, radius, length)\n\ndef define_regions(filename):\n if filename.find('simple.mesh'):\n dim = 3\n regions = {\n 'Omega' : 'all',\n 'Walls' : ('vertices of surface -v (r.Outlet +f r.Inlet)', 'facet'),\n 'Inlet' : ('vertices by cinc_simple0', 'facet'),\n 'Outlet' : ('vertices by cinc_simple1', 'facet'),\n 'Rigid' : 'vertices by cinc_simple2',\n }\n\n else:\n raise ValueError('unknown mesh %s!' % filename)\n\n return regions, dim\n\ndef get_pars(ts, coor, mode, output_dir='.', **kwargs):\n if mode == 'qp':\n n_nod, dim = coor.shape\n sym = (dim + 1) * dim // 2\n\n out = {}\n out['D'] = nm.tile(stiffness_from_lame(dim, lam=1.7, mu=0.3),\n (coor.shape[0], 1, 1))\n\n aa = nm.zeros((sym, 1), dtype=nm.float64)\n aa[:dim] = 0.132\n aa[dim:sym] = 0.092\n out['alpha'] = nm.tile(aa, (coor.shape[0], 1, 1))\n\n perm = nm.eye(dim, dtype=nm.float64)\n out['K'] = nm.tile(perm, (coor.shape[0], 1, 1))\n\n return out\n\ndef post_process(out, pb, state, extend=False):\n from sfepy.base.base import Struct\n\n dvel = pb.evaluate('ev_diffusion_velocity.i.Omega( m.K, p )',\n mode='el_avg')\n out['dvel'] = Struct(name='output_data',\n mode='cell', data=dvel, dofs=None)\n\n stress = pb.evaluate('ev_cauchy_stress.i.Omega( m.D, u )',\n mode='el_avg')\n out['cauchy_stress'] = Struct(name='output_data',\n mode='cell', data=stress, dofs=None)\n return out\n\ndef define_input(filename, output_dir):\n\n filename_mesh = filename\n options = {\n 'output_dir' : output_dir,\n 'output_format' : 'vtk',\n 'post_process_hook' : 'post_process',\n\n 'ls' : 'ls',\n 'nls' : 'newton',\n }\n\n functions = {\n 'cinc_simple0' : (lambda coors, domain:\n cinc_simple(coors, 0),),\n 'cinc_simple1' : (lambda coors, domain:\n cinc_simple(coors, 1),),\n 'cinc_simple2' : (lambda coors, domain:\n cinc_simple(coors, 2),),\n 'get_pars' : (lambda ts, coors, mode=None, **kwargs:\n get_pars(ts, coors, mode,\n output_dir=output_dir, **kwargs),),\n }\n regions, dim = define_regions(filename_mesh)\n\n field_1 = {\n 'name' : 'displacement',\n 'dtype' : nm.float64,\n 'shape' : dim,\n 'region' : 'Omega',\n 'approx_order' : 1,\n }\n field_2 = {\n 'name' : 'pressure',\n 'dtype' : nm.float64,\n 'shape' : 1,\n 'region' : 'Omega',\n 'approx_order' : 1,\n }\n\n variables = {\n 'u' : ('unknown field', 'displacement', 0),\n 'v' : ('test field', 'displacement', 'u'),\n 'p' : ('unknown field', 'pressure', 1),\n 'q' : ('test field', 'pressure', 'p'),\n }\n\n ebcs = {\n 'inlet' : ('Inlet', {'p.0' : 1.0, 'u.all' : 0.0}),\n 'outlet' : ('Outlet', {'p.0' : -1.0}),\n }\n\n lcbcs = {\n 'rigid' : ('Outlet', {'u.all' : None}, None, 'rigid'),\n 'no_penetration' : ('Walls', {'u.all' : None}, None,\n 'no_penetration', None),\n }\n\n material_1 = {\n 'name' : 'm',\n 'function' : 'get_pars',\n }\n\n integral_1 = {\n 'name' : 'i',\n 'order' : 2,\n }\n\n equations = {\n 'eq_1' :\n \"\"\"dw_lin_elastic.i.Omega( m.D, v, u )\n - dw_biot.i.Omega( m.alpha, v, p )\n = 0\"\"\",\n 'eq_2' :\n \"\"\"dw_biot.i.Omega( m.alpha, u, q )\n + dw_diffusion.i.Omega( m.K, q, p )\n = 0\"\"\",\n }\n\n solver_0 = {\n 'name' : 'ls',\n 'kind' : 'ls.scipy_direct', # Direct solver.\n }\n\n solver_1 = {\n 'name' : 'newton',\n 'kind' : 'nls.newton',\n }\n\n return locals()\n",
"import numpy as nm\n\nfrom sfepy.linalg import dot_sequences\nfrom sfepy.terms.terms import Term, terms\n\nclass DivGradTerm(Term):\n r\"\"\"\n Diffusion term.\n\n :Definition:\n\n .. math::\n \\int_{\\Omega} \\nu\\ \\nabla \\ul{v} : \\nabla \\ul{u} \\mbox{ , }\n \\int_{\\Omega} \\nu\\ \\nabla \\ul{u} : \\nabla \\ul{w} \\\\\n \\int_{\\Omega} \\nabla \\ul{v} : \\nabla \\ul{u} \\mbox{ , }\n \\int_{\\Omega} \\nabla \\ul{u} : \\nabla \\ul{w}\n\n :Arguments 1:\n - material : :math:`\\nu` (viscosity, optional)\n - virtual : :math:`\\ul{v}`\n - state : :math:`\\ul{u}`\n\n :Arguments 2:\n - material : :math:`\\nu` (viscosity, optional)\n - parameter_1 : :math:`\\ul{u}`\n - parameter_2 : :math:`\\ul{w}`\n \"\"\"\n name = 'dw_div_grad'\n arg_types = (('opt_material', 'virtual', 'state'),\n ('opt_material', 'parameter_1', 'parameter_2'))\n arg_shapes = [{'opt_material' : '1, 1', 'virtual' : ('D', 'state'),\n 'state' : 'D', 'parameter_1' : 'D', 'parameter_2' : 'D'},\n {'opt_material' : None}]\n modes = ('weak', 'eval')\n\n function = staticmethod(terms.term_ns_asm_div_grad)\n\n def d_div_grad(self, out, grad1, grad2, mat, vg, fmode):\n sh = grad1.shape\n g1 = grad1.reshape((sh[0], sh[1], sh[2] * sh[3]))\n g2 = grad2.reshape((sh[0], sh[1], sh[2] * sh[3]))\n aux = mat * dot_sequences(g1[..., None], g2, 'ATB')[..., None]\n\n if fmode == 2:\n out[:] = aux\n status = 0\n\n else:\n status = vg.integrate(out, aux, fmode)\n\n return status\n\n def get_fargs(self, mat, virtual, state,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n vg, _ = self.get_mapping(state)\n\n if mat is None:\n n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)\n mat = nm.ones((1, n_qp, 1, 1), dtype=nm.float64)\n\n if mode == 'weak':\n if diff_var is None:\n grad = self.get(state, 'grad').transpose((0, 1, 3, 2))\n sh = grad.shape\n grad = grad.reshape((sh[0], sh[1], sh[2] * sh[3], 1))\n fmode = 0\n\n else:\n grad = nm.array([0], ndmin=4, dtype=nm.float64)\n fmode = 1\n\n return grad, mat, vg, fmode\n\n elif mode == 'eval':\n grad1 = self.get(virtual, 'grad')\n grad2 = self.get(state, 'grad')\n fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)\n\n return grad1, grad2, mat, vg, fmode\n\n else:\n raise ValueError('unsupported evaluation mode in %s! (%s)'\n % (self.name, mode))\n\n def get_eval_shape(self, mat, virtual, state,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)\n\n return (n_el, 1, 1, 1), state.dtype\n\n def set_arg_types(self):\n if self.mode == 'weak':\n self.function = terms.term_ns_asm_div_grad\n\n else:\n self.function = self.d_div_grad\n\nclass ConvectTerm(Term):\n r\"\"\"\n Nonlinear convective term.\n\n :Definition:\n\n .. math::\n \\int_{\\Omega} ((\\ul{u} \\cdot \\nabla) \\ul{u}) \\cdot \\ul{v}\n\n :Arguments:\n - virtual : :math:`\\ul{v}`\n - state : :math:`\\ul{u}`\n \"\"\"\n name = 'dw_convect'\n arg_types = ('virtual', 'state')\n arg_shapes = {'virtual' : ('D', 'state'), 'state' : 'D'}\n\n function = staticmethod(terms.term_ns_asm_convect)\n\n def get_fargs(self, virtual, state,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n vg, _ = self.get_mapping(state)\n\n grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()\n val_qp = self.get(state, 'val')\n\n fmode = diff_var is not None\n\n return grad, val_qp, vg, fmode\n\nclass LinearConvectTerm(Term):\n r\"\"\"\n Linearized convective term.\n\n :Definition:\n\n .. math::\n \\int_{\\Omega} ((\\ul{b} \\cdot \\nabla) \\ul{u}) \\cdot \\ul{v}\n\n .. math::\n ((\\ul{b} \\cdot \\nabla) \\ul{u})|_{qp}\n\n :Arguments:\n - virtual : :math:`\\ul{v}`\n - parameter : :math:`\\ul{b}`\n - state : :math:`\\ul{u}`\n \"\"\"\n name = 'dw_lin_convect'\n arg_types = ('virtual', 'parameter', 'state')\n arg_shapes = {'virtual' : ('D', 'state'), 'parameter' : 'D', 'state' : 'D'}\n\n function = staticmethod(terms.dw_lin_convect)\n\n def get_fargs(self, virtual, parameter, state,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n vg, _ = self.get_mapping(state)\n\n val_qp = self.get(parameter, 'val')\n\n if mode == 'weak':\n if diff_var is None:\n grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()\n fmode = 0\n\n else:\n grad = nm.array([0], ndmin=4, dtype=nm.float64)\n fmode = 1\n\n return grad, val_qp, vg, fmode\n\n elif mode == 'qp':\n grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()\n fmode = 2\n\n return grad, val_qp, vg, fmode\n\n else:\n raise ValueError('unsupported evaluation mode in %s! (%s)'\n % (self.name, mode))\n\nclass StokesTerm(Term):\n r\"\"\"\n Stokes problem coupling term. Corresponds to weak forms of gradient and\n divergence terms. Can be evaluated.\n\n :Definition:\n\n .. math::\n \\int_{\\Omega} p\\ \\nabla \\cdot \\ul{v} \\mbox{ , }\n \\int_{\\Omega} q\\ \\nabla \\cdot \\ul{u}\n \\mbox{ or }\n \\int_{\\Omega} c\\ p\\ \\nabla \\cdot \\ul{v} \\mbox{ , }\n \\int_{\\Omega} c\\ q\\ \\nabla \\cdot \\ul{u}\n\n :Arguments 1:\n - material : :math:`c` (optional)\n - virtual : :math:`\\ul{v}`\n - state : :math:`p`\n\n :Arguments 2:\n - material : :math:`c` (optional)\n - state : :math:`\\ul{u}`\n - virtual : :math:`q`\n\n :Arguments 3:\n - material : :math:`c` (optional)\n - parameter_v : :math:`\\ul{u}`\n - parameter_s : :math:`p`\n \"\"\"\n name = 'dw_stokes'\n arg_types = (('opt_material', 'virtual', 'state'),\n ('opt_material', 'state', 'virtual'),\n ('opt_material', 'parameter_v', 'parameter_s'))\n arg_shapes = [{'opt_material' : '1, 1',\n 'virtual/grad' : ('D', None), 'state/grad' : 1,\n 'virtual/div' : (1, None), 'state/div' : 'D',\n 'parameter_v' : 'D', 'parameter_s' : 1},\n {'opt_material' : None}]\n modes = ('grad', 'div', 'eval')\n\n @staticmethod\n def d_eval(out, coef, vec_qp, div, vvg):\n out_qp = coef * vec_qp * div\n\n status = vvg.integrate(out, out_qp)\n\n return status\n\n def get_fargs(self, coef, vvar, svar,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n if self.mode == 'grad':\n qp_var, qp_name = svar, 'val'\n\n else:\n qp_var, qp_name = vvar, 'div'\n\n n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)\n if coef is None:\n coef = nm.ones((1, n_qp, 1, 1), dtype=nm.float64)\n\n if mode == 'weak':\n vvg, _ = self.get_mapping(vvar)\n svg, _ = self.get_mapping(svar)\n\n if diff_var is None:\n val_qp = self.get(qp_var, qp_name)\n fmode = 0\n\n else:\n val_qp = nm.array([0], ndmin=4, dtype=nm.float64)\n fmode = 1\n\n return coef, val_qp, svg, vvg, fmode\n\n elif mode == 'eval':\n vvg, _ = self.get_mapping(vvar)\n\n div = self.get(vvar, 'div')\n vec_qp = self.get(svar, 'val')\n\n return coef, vec_qp, div, vvg\n\n else:\n raise ValueError('unsupported evaluation mode in %s! (%s)'\n % (self.name, mode))\n\n def get_eval_shape(self, coef, vvar, svar,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)\n\n return (n_el, 1, 1, 1), vvar.dtype\n\n def set_arg_types(self):\n self.function = {\n 'grad' : terms.dw_grad,\n 'div' : terms.dw_div,\n 'eval' : self.d_eval,\n }[self.mode]\n\nclass GradTerm(Term):\n r\"\"\"\n Evaluate gradient of a scalar or vector field.\n\n Supports 'eval', 'el_avg' and 'qp' evaluation modes.\n\n :Definition:\n\n .. math::\n \\int_{\\Omega} \\nabla p \\mbox{ or } \\int_{\\Omega} \\nabla \\ul{w}\n\n .. math::\n \\mbox{vector for } K \\from \\Ical_h: \\int_{T_K} \\nabla p /\n \\int_{T_K} 1 \\mbox{ or } \\int_{T_K} \\nabla \\ul{w} /\n \\int_{T_K} 1\n\n .. math::\n (\\nabla p)|_{qp} \\mbox{ or } \\nabla \\ul{w}|_{qp}\n\n :Arguments:\n - parameter : :math:`p` or :math:`\\ul{w}`\n \"\"\"\n name = 'ev_grad'\n arg_types = ('parameter',)\n arg_shapes = {'parameter' : 'N'}\n\n @staticmethod\n def function(out, grad, vg, fmode):\n if fmode == 2:\n out[:] = grad\n status = 0\n\n else:\n status = vg.integrate(out, grad, fmode)\n\n return status\n\n def get_fargs(self, parameter,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n vg, _ = self.get_mapping(parameter)\n\n grad = self.get(parameter, 'grad')\n\n fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)\n\n return grad, vg, fmode\n\n def get_eval_shape(self, parameter,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)\n\n if mode != 'qp':\n n_qp = 1\n\n return (n_el, n_qp, dim, n_c), parameter.dtype\n\nclass DivTerm(Term):\n r\"\"\"\n Evaluate divergence of a vector field.\n\n Supports 'eval', 'el_avg' and 'qp' evaluation modes.\n\n :Definition:\n\n .. math::\n \\int_{\\Omega} \\nabla \\cdot \\ul{u}\n\n .. math::\n \\mbox{vector for } K \\from \\Ical_h:\n \\int_{T_K} \\nabla \\cdot \\ul{u} / \\int_{T_K} 1\n\n .. math::\n (\\nabla \\cdot \\ul{u})|_{qp}\n\n :Arguments:\n - parameter : :math:`\\ul{u}`\n \"\"\"\n name = 'ev_div'\n arg_types = ('parameter',)\n arg_shapes = {'parameter' : 'D'}\n\n @staticmethod\n def function(out, div, vg, fmode):\n if fmode == 2:\n out[:] = div\n status = 0\n\n else:\n status = vg.integrate(out, div, fmode)\n\n return status\n\n def get_fargs(self, parameter,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n vg, _ = self.get_mapping(parameter)\n\n div = self.get(parameter, 'div')\n\n fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)\n\n return div, vg, fmode\n\n def get_eval_shape(self, parameter,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)\n\n if mode != 'qp':\n n_qp = 1\n\n return (n_el, n_qp, 1, 1), parameter.dtype\n\nclass DivOperatorTerm(Term):\n r\"\"\"\n Weighted divergence term of a test function.\n\n :Definition:\n\n .. math::\n \\int_{\\Omega} \\nabla \\cdot \\ul{v} \\mbox { or } \\int_{\\Omega} c \\nabla\n \\cdot \\ul{v}\n\n :Arguments:\n - material : :math:`c` (optional)\n - virtual : :math:`\\ul{v}`\n \"\"\"\n name = 'dw_div'\n arg_types = ('opt_material', 'virtual')\n arg_shapes = [{'opt_material' : '1, 1', 'virtual' : ('D', None)},\n {'opt_material' : None}]\n\n @staticmethod\n def function(out, mat, vg):\n div_bf = vg.bfg\n\n n_el, n_qp, dim, n_ep = div_bf.shape\n div_bf = div_bf.reshape((n_el, n_qp, dim * n_ep, 1))\n div_bf = nm.ascontiguousarray(div_bf)\n\n if mat is not None:\n status = vg.integrate(out, mat * div_bf)\n else:\n status = vg.integrate(out, div_bf)\n\n return status\n\n def get_fargs(self, mat, virtual,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n vg, _ = self.get_mapping(virtual)\n\n return mat, vg\n\nclass GradDivStabilizationTerm(Term):\n r\"\"\"\n Grad-div stabilization term ( :math:`\\gamma` is a global stabilization\n parameter).\n\n :Definition:\n\n .. math::\n \\gamma \\int_{\\Omega} (\\nabla\\cdot\\ul{u}) \\cdot (\\nabla\\cdot\\ul{v})\n\n :Arguments:\n - material : :math:`\\gamma`\n - virtual : :math:`\\ul{v}`\n - state : :math:`\\ul{u}`\n \"\"\"\n name = 'dw_st_grad_div'\n arg_types = ('material', 'virtual', 'state')\n arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),\n 'state' : 'D'}\n\n function = staticmethod(terms.dw_st_grad_div)\n\n def get_fargs(self, gamma, virtual, state,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n vg, _ = self.get_mapping(state)\n\n if diff_var is None:\n div = self.get(state, 'div')\n fmode = 0\n\n else:\n div = nm.array([0], ndmin=4, dtype=nm.float64)\n fmode = 1\n\n return div, gamma, vg, fmode\n\nfrom sfepy.terms.terms_diffusion import LaplaceTerm\nclass PSPGPStabilizationTerm(LaplaceTerm):\n r\"\"\"\n PSPG stabilization term, pressure part ( :math:`\\tau` is a local\n stabilization parameter), alias to Laplace term dw_laplace.\n\n :Definition:\n\n .. math::\n \\sum_{K \\in \\Ical_h}\\int_{T_K} \\tau_K\\ \\nabla p \\cdot \\nabla q\n\n :Arguments:\n - material : :math:`\\tau_K`\n - virtual : :math:`q`\n - state : :math:`p`\n \"\"\"\n name = 'dw_st_pspg_p'\n\nclass PSPGCStabilizationTerm(Term):\n r\"\"\"\n PSPG stabilization term, convective part ( :math:`\\tau` is a local\n stabilization parameter).\n\n :Definition:\n\n .. math::\n \\sum_{K \\in \\Ical_h}\\int_{T_K} \\tau_K\\ ((\\ul{b} \\cdot \\nabla) \\ul{u})\n \\cdot \\nabla q\n\n :Arguments:\n - material : :math:`\\tau_K`\n - virtual : :math:`q`\n - parameter : :math:`\\ul{b}`\n - state : :math:`\\ul{u}`\n \"\"\"\n name = 'dw_st_pspg_c'\n arg_types = ('material', 'virtual', 'parameter', 'state')\n arg_shapes = {'material' : '1, 1', 'virtual' : (1, None),\n 'parameter' : 'D', 'state' : 'D'}\n\n function = staticmethod(terms.dw_st_pspg_c)\n\n def get_fargs(self, tau, virtual, parameter, state,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n svg, _ = self.get_mapping(virtual)\n vvg, _ = self.get_mapping(state)\n\n val_qp = self.get(parameter, 'val')\n conn = state.field.get_connectivity(self.region, self.integration)\n\n if diff_var is None:\n fmode = 0\n\n else:\n fmode = 1\n\n return val_qp, state(), tau, svg, vvg, conn, fmode\n\nclass SUPGPStabilizationTerm(Term):\n r\"\"\"\n SUPG stabilization term, pressure part ( :math:`\\delta` is a local\n stabilization parameter).\n\n :Definition:\n\n .. math::\n \\sum_{K \\in \\Ical_h}\\int_{T_K} \\delta_K\\ \\nabla p\\cdot ((\\ul{b} \\cdot\n \\nabla) \\ul{v})\n\n :Arguments:\n - material : :math:`\\delta_K`\n - virtual : :math:`\\ul{v}`\n - parameter : :math:`\\ul{b}`\n - state : :math:`p`\n \"\"\"\n name = 'dw_st_supg_p'\n arg_types = ('material', 'virtual', 'parameter', 'state')\n arg_shapes = {'material' : '1, 1', 'virtual' : ('D', None),\n 'parameter' : 'D', 'state' : 1}\n\n function = staticmethod(terms.dw_st_supg_p)\n\n def get_fargs(self, delta, virtual, parameter, state,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n vvg, _ = self.get_mapping(virtual)\n svg, _ = self.get_mapping(state)\n\n val_qp = self.get(parameter, 'val')\n\n if diff_var is None:\n grad = self.get(state, 'grad')\n fmode = 0\n\n else:\n grad = nm.array([0], ndmin=4, dtype=nm.float64)\n fmode = 1\n\n return val_qp, grad, delta, vvg, svg, fmode\n\nclass SUPGCStabilizationTerm(Term):\n r\"\"\"\n SUPG stabilization term, convective part ( :math:`\\delta` is a local\n stabilization parameter).\n\n :Definition:\n\n .. math::\n \\sum_{K \\in \\Ical_h}\\int_{T_K} \\delta_K\\ ((\\ul{b} \\cdot \\nabla)\n \\ul{u})\\cdot ((\\ul{b} \\cdot \\nabla) \\ul{v})\n\n :Arguments:\n - material : :math:`\\delta_K`\n - virtual : :math:`\\ul{v}`\n - parameter : :math:`\\ul{b}`\n - state : :math:`\\ul{u}`\n \"\"\"\n name = 'dw_st_supg_c'\n arg_types = ('material', 'virtual', 'parameter', 'state')\n arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),\n 'parameter' : 'D', 'state' : 'D'}\n\n function = staticmethod(terms.dw_st_supg_c)\n\n def get_fargs(self, delta, virtual, parameter, state,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n vg, _ = self.get_mapping(virtual)\n\n val_qp = self.get(parameter, 'val')\n conn = virtual.field.get_connectivity(self.region, self.integration)\n\n if diff_var is None:\n fmode = 0\n\n else:\n fmode = 1\n\n return val_qp, state(), delta, vg, conn, fmode\n",
"#!/usr/bin/env python\n\"\"\"\nModal analysis of a linear elastic block in 2D or 3D.\n\nThe dimension of the problem is determined by the length of the vector\nin ``--dims`` option.\n\nOptionally, a mesh file name can be given as a positional argument. In that\ncase, the mesh generation options are ignored.\n\nThe default material properties correspond to aluminium in the following units:\n\n- length: m\n- mass: kg\n- stiffness / stress: Pa\n- density: kg / m^3\n\nExamples\n--------\n\n- Run with the default arguments, show results (color = strain)::\n\n python examples/linear_elasticity/modal_analysis.py --show\n\n- Fix bottom surface of the domain, show 9 eigen-shapes::\n\n python examples/linear_elasticity/modal_analysis.py -b cantilever -n 9 --show\n\n- Increase mesh resolution::\n\n python examples/linear_elasticity/modal_analysis.py -s 31,31 -n 9 --show\n\n- Use 3D domain::\n\n python examples/linear_elasticity/modal_analysis.py -d 1,1,1 -c 0,0,0 -s 8,8,8 --show\n\n- Change the eigenvalue problem solver to LOBPCG::\n\n python examples/linear_elasticity/modal_analysis.py --solver=\"eig.scipy_lobpcg,i_max:100,largest:False\" --show\n\n See :mod:`sfepy.solvers.eigen` for available solvers.\n\"\"\"\nfrom __future__ import absolute_import\nimport sys\nimport six\nfrom six.moves import range\nsys.path.append('.')\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter\n\nimport numpy as nm\nimport scipy.sparse.linalg as sla\n\nfrom sfepy.base.base import assert_, output, Struct\nfrom sfepy.discrete import (FieldVariable, Material, Integral, Integrals,\n Equation, Equations, Problem)\nfrom sfepy.discrete.fem import Mesh, FEDomain, Field\nfrom sfepy.terms import Term\nfrom sfepy.discrete.conditions import Conditions, EssentialBC\nfrom sfepy.mechanics.matcoefs import stiffness_from_youngpoisson\nfrom sfepy.mesh.mesh_generators import gen_block_mesh\nfrom sfepy.solvers import Solver\n\nhelps = {\n 'dims' :\n 'dimensions of the block [default: %(default)s]',\n 'centre' :\n 'centre of the block [default: %(default)s]',\n 'shape' :\n 'numbers of vertices along each axis [default: %(default)s]',\n 'bc_kind' :\n 'kind of Dirichlet boundary conditions on the bottom and top surfaces,'\n ' one of: free, cantilever, fixed [default: %(default)s]',\n 'axis' :\n 'the axis index of the block that the bottom and top surfaces are related'\n ' to [default: %(default)s]',\n 'young' : \"the Young's modulus [default: %(default)s]\",\n 'poisson' : \"the Poisson's ratio [default: %(default)s]\",\n 'density' : \"the material density [default: %(default)s]\",\n 'order' : 'displacement field approximation order [default: %(default)s]',\n 'n_eigs' : 'the number of eigenvalues to compute [default: %(default)s]',\n 'ignore' : 'if given, the number of eigenvalues to ignore (e.g. rigid'\n ' body modes); has precedence over the default setting determined by'\n ' --bc-kind [default: %(default)s]',\n 'solver' : 'the eigenvalue problem solver to use. It should be given'\n ' as a comma-separated list: solver_kind,option0:value0,option1:value1,...'\n ' [default: %(default)s]',\n 'show' : 'show the results figure',\n}\n\ndef main():\n parser = ArgumentParser(description=__doc__,\n formatter_class=RawDescriptionHelpFormatter)\n parser.add_argument('--version', action='version', version='%(prog)s')\n parser.add_argument('-d', '--dims', metavar='dims',\n action='store', dest='dims',\n default='[1.0, 1.0]', help=helps['dims'])\n parser.add_argument('-c', '--centre', metavar='centre',\n action='store', dest='centre',\n default='[0.0, 0.0]', help=helps['centre'])\n parser.add_argument('-s', '--shape', metavar='shape',\n action='store', dest='shape',\n default='[11, 11]', help=helps['shape'])\n parser.add_argument('-b', '--bc-kind', metavar='kind',\n action='store', dest='bc_kind',\n choices=['free', 'cantilever', 'fixed'],\n default='free', help=helps['bc_kind'])\n parser.add_argument('-a', '--axis', metavar='0, ..., dim, or -1',\n type=int, action='store', dest='axis',\n default=-1, help=helps['axis'])\n parser.add_argument('--young', metavar='float', type=float,\n action='store', dest='young',\n default=6.80e+10, help=helps['young'])\n parser.add_argument('--poisson', metavar='float', type=float,\n action='store', dest='poisson',\n default=0.36, help=helps['poisson'])\n parser.add_argument('--density', metavar='float', type=float,\n action='store', dest='density',\n default=2700.0, help=helps['density'])\n parser.add_argument('--order', metavar='int', type=int,\n action='store', dest='order',\n default=1, help=helps['order'])\n parser.add_argument('-n', '--n-eigs', metavar='int', type=int,\n action='store', dest='n_eigs',\n default=6, help=helps['n_eigs'])\n parser.add_argument('-i', '--ignore', metavar='int', type=int,\n action='store', dest='ignore',\n default=None, help=helps['ignore'])\n parser.add_argument('--solver', metavar='solver', action='store',\n dest='solver',\n default= \\\n \"eig.scipy,method:'eigh',tol:1e-5,maxiter:1000\",\n help=helps['solver'])\n parser.add_argument('--show',\n action=\"store_true\", dest='show',\n default=False, help=helps['show'])\n parser.add_argument('filename', nargs='?', default=None)\n options = parser.parse_args()\n\n aux = options.solver.split(',')\n kwargs = {}\n for option in aux[1:]:\n key, val = option.split(':')\n kwargs[key.strip()] = eval(val)\n eig_conf = Struct(name='evp', kind=aux[0], **kwargs)\n\n output('using values:')\n output(\" Young's modulus:\", options.young)\n output(\" Poisson's ratio:\", options.poisson)\n output(' density:', options.density)\n output('displacement field approximation order:', options.order)\n output('requested %d eigenvalues' % options.n_eigs)\n output('using eigenvalue problem solver:', eig_conf.kind)\n output.level += 1\n for key, val in six.iteritems(kwargs):\n output('%s: %r' % (key, val))\n output.level -= 1\n\n assert_((0.0 < options.poisson < 0.5),\n \"Poisson's ratio must be in ]0, 0.5[!\")\n assert_((0 < options.order),\n 'displacement approximation order must be at least 1!')\n\n filename = options.filename\n if filename is not None:\n mesh = Mesh.from_file(filename)\n dim = mesh.dim\n dims = nm.diff(mesh.get_bounding_box(), axis=0)\n\n else:\n dims = nm.array(eval(options.dims), dtype=nm.float64)\n dim = len(dims)\n\n centre = nm.array(eval(options.centre), dtype=nm.float64)[:dim]\n shape = nm.array(eval(options.shape), dtype=nm.int32)[:dim]\n\n output('dimensions:', dims)\n output('centre: ', centre)\n output('shape: ', shape)\n\n mesh = gen_block_mesh(dims, shape, centre, name='mesh')\n\n output('axis: ', options.axis)\n assert_((-dim <= options.axis < dim), 'invalid axis value!')\n\n eig_solver = Solver.any_from_conf(eig_conf)\n\n # Build the problem definition.\n domain = FEDomain('domain', mesh)\n\n bbox = domain.get_mesh_bounding_box()\n min_coor, max_coor = bbox[:, options.axis]\n eps = 1e-8 * (max_coor - min_coor)\n ax = 'xyz'[:dim][options.axis]\n\n omega = domain.create_region('Omega', 'all')\n bottom = domain.create_region('Bottom',\n 'vertices in (%s < %.10f)'\n % (ax, min_coor + eps),\n 'facet')\n bottom_top = domain.create_region('BottomTop',\n 'r.Bottom +v vertices in (%s > %.10f)'\n % (ax, max_coor - eps),\n 'facet')\n\n field = Field.from_args('fu', nm.float64, 'vector', omega,\n approx_order=options.order)\n\n u = FieldVariable('u', 'unknown', field)\n v = FieldVariable('v', 'test', field, primary_var_name='u')\n\n mtx_d = stiffness_from_youngpoisson(dim, options.young, options.poisson)\n\n m = Material('m', D=mtx_d, rho=options.density)\n\n integral = Integral('i', order=2*options.order)\n\n t1 = Term.new('dw_lin_elastic(m.D, v, u)', integral, omega, m=m, v=v, u=u)\n t2 = Term.new('dw_volume_dot(m.rho, v, u)', integral, omega, m=m, v=v, u=u)\n eq1 = Equation('stiffness', t1)\n eq2 = Equation('mass', t2)\n lhs_eqs = Equations([eq1, eq2])\n\n pb = Problem('modal', equations=lhs_eqs)\n\n if options.bc_kind == 'free':\n pb.time_update()\n n_rbm = dim * (dim + 1) // 2\n\n elif options.bc_kind == 'cantilever':\n fixed = EssentialBC('Fixed', bottom, {'u.all' : 0.0})\n pb.time_update(ebcs=Conditions([fixed]))\n n_rbm = 0\n\n elif options.bc_kind == 'fixed':\n fixed = EssentialBC('Fixed', bottom_top, {'u.all' : 0.0})\n pb.time_update(ebcs=Conditions([fixed]))\n n_rbm = 0\n\n else:\n raise ValueError('unsupported BC kind! (%s)' % options.bc_kind)\n\n if options.ignore is not None:\n n_rbm = options.ignore\n\n pb.update_materials()\n\n # Assemble stiffness and mass matrices.\n mtx_k = eq1.evaluate(mode='weak', dw_mode='matrix', asm_obj=pb.mtx_a)\n mtx_m = mtx_k.copy()\n mtx_m.data[:] = 0.0\n mtx_m = eq2.evaluate(mode='weak', dw_mode='matrix', asm_obj=mtx_m)\n\n try:\n eigs, svecs = eig_solver(mtx_k, mtx_m, options.n_eigs + n_rbm,\n eigenvectors=True)\n\n except sla.ArpackNoConvergence as ee:\n eigs = ee.eigenvalues\n svecs = ee.eigenvectors\n output('only %d eigenvalues converged!' % len(eigs))\n\n output('%d eigenvalues converged (%d ignored as rigid body modes)' %\n (len(eigs), n_rbm))\n\n eigs = eigs[n_rbm:]\n svecs = svecs[:, n_rbm:]\n\n omegas = nm.sqrt(eigs)\n freqs = omegas / (2 * nm.pi)\n\n output('number | eigenvalue | angular frequency '\n '| frequency')\n for ii, eig in enumerate(eigs):\n output('%6d | %17.12e | %17.12e | %17.12e'\n % (ii + 1, eig, omegas[ii], freqs[ii]))\n\n # Make full eigenvectors (add DOFs fixed by boundary conditions).\n variables = pb.get_variables()\n\n vecs = nm.empty((variables.di.ptr[-1], svecs.shape[1]),\n dtype=nm.float64)\n for ii in range(svecs.shape[1]):\n vecs[:, ii] = variables.make_full_vec(svecs[:, ii])\n\n # Save the eigenvectors.\n out = {}\n state = pb.create_state()\n for ii in range(eigs.shape[0]):\n state.set_full(vecs[:, ii])\n aux = state.create_output_dict()\n strain = pb.evaluate('ev_cauchy_strain.i.Omega(u)',\n integrals=Integrals([integral]),\n mode='el_avg', verbose=False)\n out['u%03d' % ii] = aux.popitem()[1]\n out['strain%03d' % ii] = Struct(mode='cell', data=strain)\n\n pb.save_state('eigenshapes.vtk', out=out)\n pb.save_regions_as_groups('regions')\n\n if len(eigs) and options.show:\n # Show the solution. If the approximation order is greater than 1, the\n # extra DOFs are simply thrown away.\n from sfepy.postprocess.viewer import Viewer\n from sfepy.postprocess.domain_specific import DomainSpecificPlot\n\n scaling = 0.05 * dims.max() / nm.abs(vecs).max()\n\n ds = {}\n for ii in range(eigs.shape[0]):\n pd = DomainSpecificPlot('plot_displacements',\n ['rel_scaling=%s' % scaling,\n 'color_kind=\"tensors\"',\n 'color_name=\"strain%03d\"' % ii])\n ds['u%03d' % ii] = pd\n\n view = Viewer('eigenshapes.vtk')\n view(domain_specific=ds, only_names=sorted(ds.keys()),\n is_scalar_bar=False, is_wireframe=True)\n\nif __name__ == '__main__':\n main()\n",
"r\"\"\"\nIncompressible Stokes flow with Navier (slip) boundary conditions, flow driven\nby a moving wall and a small diffusion for stabilization.\n\nThis example demonstrates the use of `no-penetration` boundary conditions as\nwell as `edge direction` boundary conditions together with Navier or slip\nboundary conditions.\n\nFind :math:`\\ul{u}`, :math:`p` such that:\n\n.. math::\n \\int_{\\Omega} \\nu\\ \\nabla \\ul{v} : \\nabla \\ul{u}\n - \\int_{\\Omega} p\\ \\nabla \\cdot \\ul{v}\n + \\int_{\\Gamma_1} \\beta \\ul{v} \\cdot (\\ul{u} - \\ul{u}_d)\n + \\int_{\\Gamma_2} \\beta \\ul{v} \\cdot \\ul{u}\n = 0\n \\;, \\quad \\forall \\ul{v} \\;,\n\n \\int_{\\Omega} \\mu \\nabla q \\cdot \\nabla p\n + \\int_{\\Omega} q\\ \\nabla \\cdot \\ul{u}\n = 0\n \\;, \\quad \\forall q \\;,\n\nwhere :math:`\\nu` is the fluid viscosity, :math:`\\beta` is the slip\ncoefficient, :math:`\\mu` is the (small) numerical diffusion coefficient,\n:math:`\\Gamma_1` is the top wall that moves with the given driving velocity\n:math:`\\ul{u}_d` and :math:`\\Gamma_2` are the remaining walls. The Navier\nconditions are in effect on both :math:`\\Gamma_1`, :math:`\\Gamma_2` and are\nexpressed by the corresponding integrals in the equations above.\n\nThe `no-penetration` boundary conditions are applied on :math:`\\Gamma_1`,\n:math:`\\Gamma_2`, except the vertices of the block edges, where the `edge\ndirection` boundary conditions are applied. Optionally, Dirichlet boundary\nconditions can be applied on the inlet, see the code below.\n\nThe mesh is created by ``gen_block_mesh()`` function - try different mesh\ndimensions and resolutions below. For large meshes use the ``'ls_i'`` linear\nsolver - PETSc + petsc4py is needed in that case.\n\nSee also :ref:`navier_stokes-stokes_slip_bc_penalty`.\n\"\"\"\nfrom __future__ import absolute_import\nimport numpy as nm\n\nfrom sfepy.discrete.fem.meshio import UserMeshIO\nfrom sfepy.mesh.mesh_generators import gen_block_mesh\nfrom sfepy.homogenization.utils import define_box_regions\n\n# Mesh dimensions.\ndims = nm.array([3, 1, 0.5])\n\n# Mesh resolution: increase to improve accuracy.\nshape = [11, 15, 15]\n\ndef mesh_hook(mesh, mode):\n \"\"\"\n Generate the block mesh.\n \"\"\"\n if mode == 'read':\n mesh = gen_block_mesh(dims, shape, [0, 0, 0], name='user_block',\n verbose=False)\n return mesh\n\n elif mode == 'write':\n pass\n\nfilename_mesh = UserMeshIO(mesh_hook)\n\nregions = define_box_regions(3, 0.5 * dims)\nregions.update({\n 'Omega' : 'all',\n 'Edges_v' : (\"\"\"(r.Near *v r.Bottom) +v\n (r.Bottom *v r.Far) +v\n (r.Far *v r.Top) +v\n (r.Top *v r.Near)\"\"\", 'edge'),\n 'Gamma1_f' : ('copy r.Top', 'face'),\n 'Gamma2_f' : ('r.Near +v r.Bottom +v r.Far', 'face'),\n 'Gamma_f' : ('r.Gamma1_f +v r.Gamma2_f', 'face'),\n 'Gamma_v' : ('r.Gamma_f -v r.Edges_v', 'face'),\n 'Inlet_f' : ('r.Left -v r.Gamma_f', 'face'),\n})\n\nfields = {\n 'velocity' : ('real', 3, 'Omega', 1),\n 'pressure' : ('real', 1, 'Omega', 1),\n}\n\ndef get_u_d(ts, coors, region=None):\n \"\"\"\n Given stator velocity.\n \"\"\"\n out = nm.zeros_like(coors)\n out[:] = [1.0, 1.0, 0.0]\n\n return out\n\nfunctions = {\n 'get_u_d' : (get_u_d,),\n}\n\nvariables = {\n 'u' : ('unknown field', 'velocity', 0),\n 'v' : ('test field', 'velocity', 'u'),\n 'u_d' : ('parameter field', 'velocity',\n {'setter' : 'get_u_d'}),\n 'p' : ('unknown field', 'pressure', 1),\n 'q' : ('test field', 'pressure', 'p'),\n}\n\n# Try setting the inlet velocity by un-commenting the 'inlet' ebcs.\nebcs = {\n ## 'inlet' : ('Inlet_f', {'u.0' : 1.0, 'u.[1, 2]' : 0.0}),\n}\n\nlcbcs = {\n 'walls' : ('Gamma_v', {'u.all' : None}, None, 'no_penetration',\n 'normals_Gamma.vtk'),\n 'edges' : ('Edges_v', [(-0.5, 1.5)], {'u.all' : None}, None,\n 'edge_direction', 'edges_Edges.vtk'),\n}\n\nmaterials = {\n 'm' : ({\n 'nu' : 1e-3,\n 'beta' : 1e-2,\n 'mu' : 1e-10,\n },),\n}\n\nequations = {\n 'balance' :\n \"\"\"dw_div_grad.5.Omega(m.nu, v, u)\n - dw_stokes.5.Omega(v, p)\n + dw_surface_dot.5.Gamma1_f(m.beta, v, u)\n + dw_surface_dot.5.Gamma2_f(m.beta, v, u)\n =\n + dw_surface_dot.5.Gamma1_f(m.beta, v, u_d)\"\"\",\n 'incompressibility' :\n \"\"\"dw_laplace.5.Omega(m.mu, q, p)\n + dw_stokes.5.Omega(u, q) = 0\"\"\",\n}\n\nsolvers = {\n 'ls_d' : ('ls.scipy_direct', {}),\n 'ls_i' : ('ls.petsc', {\n 'method' : 'bcgsl', # ksp_type\n 'precond' : 'bjacobi', # pc_type\n 'sub_precond' : 'ilu', # sub_pc_type\n 'eps_a' : 0.0, # abstol\n 'eps_r' : 1e-12, # rtol\n 'eps_d' : 1e10, # Divergence tolerance.\n 'i_max' : 2500, # maxits\n }),\n 'newton' : ('nls.newton', {\n 'i_max' : 1,\n 'eps_a' : 1e-10,\n }),\n}\n\noptions = {\n 'nls' : 'newton',\n 'ls' : 'ls_d',\n}\n"
] | [
[
"numpy.asarray"
],
[
"numpy.array",
"numpy.zeros_like"
],
[
"numpy.eye",
"numpy.array",
"numpy.zeros",
"numpy.tile"
],
[
"numpy.ascontiguousarray",
"numpy.array",
"numpy.ones"
],
[
"numpy.abs",
"numpy.sqrt",
"numpy.empty"
],
[
"numpy.array",
"numpy.zeros_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fukien/incubator-singa | [
"ced9e9d44c200d709db5a2354076390788986b77"
] | [
"setup.py"
] | [
"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n'''Script for building wheel package for installing singa via pip.\n\nThis script must be launched at the root dir of the singa project \ninside the docker container created via tool/docker/devel/centos/cudaxx/Dockerfile.manylinux2014.\n\n # launch docker container\n $ nvidia-docker run -v <local singa dir>:/root/singa -it apache/singa:manylinux2014-cuda10.2\n # build the wheel packag; replace cp36-cp36m to compile singa for other py version\n $ /opt/python/cp36-cp36m/bin/python setup.py bdist_wheel\n $ /opt/python/cp37-cp37m/bin/python setup.py bdist_wheel\n $ /opt/python/cp38-cp38/bin/python setup.py bdist_wheel\n\nThe generted wheel file should be repaired by the auditwheel tool to make it \ncompatible with PEP513. Otherwise, the dependent libs will not be included in\nthe wheel package and the wheel file will be rejected by PYPI website during\nuploading due to file name error.\n\n # repair the wheel pakage and upload to pypi\n $ /opt/python/cp36-cp36m/bin/python setup.py audit\n\nFor the Dockerfile with CUDA and CUDNN installed, the CUDA version and \nCUDNN version are exported as environment variable: CUDA_VERSION, CUDNN_VERSION.\nYou can control the script to build CUDA enabled singa package by exporting\nSINGA_CUDA=ON; otherwise the CPU only package will be built.\n\n\nRef: \n[1] https://github.com/bytedance/byteps/blob/master/setup.py\n[2] https://setuptools.readthedocs.io/en/latest/setuptools.html\n[3] https://packaging.python.org/tutorials/packaging-projects/ \n'''\n\nfrom setuptools import find_packages, setup, Command, Extension\nfrom setuptools.command.build_ext import build_ext\nfrom distutils.errors import CompileError, DistutilsSetupError\n\nimport os\nimport io\nimport sys\nimport subprocess\nimport shutil\nimport shlex\nfrom pathlib import Path\n\nimport numpy as np\n\nNAME = 'singa'\n'''\nPypi does not allow you to overwrite the uploaded package;\ntherefore, you have to bump the version.\nPypi does not allow [local version label](https://www.python.org/dev/peps/pep-0440/#local-version-segments) \nto appear in the version, therefore, you have to include the public \nversion label only. Currently, due to the pypi size limit, the package \nuploaded to pypi is cpu only (without cuda and cudnn), which can be installed via\n \n $ pip install singa\n $ pip install singa=3.0.0.dev1\n\nThe cuda and cudnn enabled package's version consists of the public \nversion label + local version label, e.g., 3.0.0.dev1+cuda10.2, which\ncan be installed via\n\n $ pip install singa=3.0.0.dev1+cuda10.2 -f <url of the repo>\n\n'''\nfrom datetime import date\n\n# stable version\nVERSION = '3.1.0.rc1'\n# get the git hash\n# git_hash = subprocess.check_output([\"git\", \"describe\"]).strip().split('-')[-1][1:]\n# comment the next line to build wheel for stable version\n# VERSION += '.dev' + date.today().strftime('%y%m%d')\n\nSINGA_PY = Path('python')\nSINGA_SRC = Path('src')\nSINGA_HDR = Path('include')\n\n\nclass AuditCommand(Command):\n \"\"\"Support setup.py upload.\"\"\"\n\n description = 'Repair the package via auditwheel tool.'\n user_options = []\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print('\\033[1m{0}\\033[0m'.format(s))\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n self.status('Removing previous wheel files under wheelhouse')\n shutil.rmtree('wheelhouse', ignore_errors=True)\n for wheel in os.listdir('dist'):\n self.status('Repair the dist/{} via auditwheel'.format(wheel))\n os.system('auditwheel repair dist/{}'.format(wheel))\n\n # self.status('Uploading the package to PyPI via Twine…')\n # os.system('{} -m twine upload dist/*'.format(sys.executable))\n sys.exit()\n\n\ndef parse_compile_options():\n '''Read the environment variables to parse the compile options.\n\n Returns:\n a tuple of bool values as the indicators\n '''\n with_cuda = os.environ.get('SINGA_CUDA', False)\n with_nccl = os.environ.get('SINGA_NCCL', False)\n with_test = os.environ.get('SINGA_TEST', False)\n with_debug = os.environ.get('SINGA_DEBUG', False)\n\n return with_cuda, with_nccl, with_test, with_debug\n\n\ndef generate_singa_config(with_cuda, with_nccl):\n '''Generate singa_config.h file to define some macros for the cpp code.\n\n Args:\n with_cuda(bool): indicator for cudnn and cuda lib\n with_nccl(bool): indicator for nccl lib\n '''\n config = ['#define USE_CBLAS', '#define USE_GLOG', '#define USE_DNNL']\n if not with_cuda:\n config.append('#define CPU_ONLY')\n else:\n config.append('#define USE_CUDA')\n config.append('#define USE_CUDNN')\n\n if with_nccl:\n config.append('#define ENABLE_DIST')\n config.append('#define USE_DIST')\n\n # singa_config.h to be included by cpp code\n cpp_conf_path = SINGA_HDR / 'singa/singa_config.h'\n print('Writing configs to {}'.format(cpp_conf_path))\n with cpp_conf_path.open('w') as fd:\n for line in config:\n fd.write(line + '\\n')\n versions = [int(x) for x in VERSION.split('+')[0].split('.')[:3]]\n fd.write('#define SINGA_MAJOR_VERSION {}\\n'.format(versions[0]))\n fd.write('#define SINGA_MINOR_VERSION {}\\n'.format(versions[1]))\n fd.write('#define SINGA_PATCH_VERSION {}\\n'.format(versions[2]))\n fd.write('#define SINGA_VERSION \"{}\"\\n'.format(VERSION))\n\n # config.i to be included by swig files\n swig_conf_path = SINGA_SRC / 'api/config.i'\n with swig_conf_path.open('w') as fd:\n for line in config:\n fd.write(line + ' 1 \\n')\n\n fd.write('#define USE_PYTHON 1\\n')\n if not with_nccl:\n fd.write('#define USE_DIST 0\\n')\n if not with_cuda:\n fd.write('#define USE_CUDA 0\\n')\n fd.write('#define USE_CUDNN 0\\n')\n else:\n fd.write('#define CUDNN_VERSION \"{}\"\\n'.format(\n os.environ.get('CUDNN_VERSION')))\n versions = [int(x) for x in VERSION.split('+')[0].split('.')[:3]]\n fd.write('#define SINGA_MAJOR_VERSION {}\\n'.format(versions[0]))\n fd.write('#define SINGA_MINOR_VERSION {}\\n'.format(versions[1]))\n fd.write('#define SINGA_PATCH_VERSION {}\\n'.format(versions[2]))\n fd.write('#define SINGA_VERSION \"{}\"\\n'.format(VERSION))\n\n\ndef get_cpp_flags():\n default_flags = ['-std=c++11', '-fPIC', '-g', '-O2', '-Wall', '-pthread']\n # avx_flags = [ '-mavx'] #'-mf16c',\n if sys.platform == 'darwin':\n # Darwin most likely will have Clang, which has libc++.\n return default_flags + ['-stdlib=libc++']\n else:\n return default_flags\n\n\ndef generate_proto_files():\n print('----------------------')\n print('Generating proto files')\n print('----------------------')\n proto_src = SINGA_SRC / 'proto'\n cmd = \"/usr/bin/protoc --proto_path={} --cpp_out={} {}\".format(\n proto_src, proto_src, proto_src / 'core.proto')\n subprocess.run(cmd, shell=True, check=True)\n\n proto_hdr_dir = SINGA_HDR / 'singa/proto'\n proto_hdr_file = proto_hdr_dir / 'core.pb.h'\n if proto_hdr_dir.exists():\n if proto_hdr_file.exists():\n proto_hdr_file.unlink()\n else:\n proto_hdr_dir.mkdir()\n\n shutil.copyfile(Path(proto_src / 'core.pb.h'), proto_hdr_file)\n return proto_hdr_file, proto_src / 'core.pb.cc'\n\n\ndef path_to_str(path_list):\n return [str(x) if not isinstance(x, str) else x for x in path_list]\n\n\ndef prepare_extension_options():\n with_cuda, with_nccl, with_test, with_debug = parse_compile_options()\n\n generate_singa_config(with_cuda, with_nccl)\n generate_proto_files()\n\n link_libs = ['glog', 'protobuf', 'openblas', 'dnnl']\n\n sources = path_to_str([\n *list((SINGA_SRC / 'core').rglob('*.cc')), *list(\n (SINGA_SRC / 'model/operation').glob('*.cc')), *list(\n (SINGA_SRC / 'utils').glob('*.cc')),\n SINGA_SRC / 'proto/core.pb.cc', SINGA_SRC / 'api/singa.i'\n ])\n include_dirs = path_to_str([\n SINGA_HDR, SINGA_HDR / 'singa/proto',\n np.get_include(), '/usr/include', '/usr/include/openblas',\n '/usr/local/include'\n ])\n\n try:\n np_include = np.get_include()\n except AttributeError:\n np_include = np.get_numpy_include()\n include_dirs.append(np_include)\n\n library_dirs = [] # path_to_str(['/usr/lib64', '/usr/local/lib'])\n\n if with_cuda:\n link_libs.extend(['cudart', 'cudnn', 'curand', 'cublas', 'cnmem'])\n include_dirs.append('/usr/local/cuda/include')\n library_dirs.append('/usr/local/cuda/lib64')\n sources.append(str(SINGA_SRC / 'core/tensor/math_kernel.cu'))\n if with_nccl:\n link_libs.extend(['nccl', 'cusparse', 'mpicxx', 'mpi'])\n sources.append(str(SINGA_SRC / 'io/communicator.cc'))\n # print(link_libs, extra_libs)\n\n libraries = link_libs\n runtime_library_dirs = ['.'] + library_dirs\n extra_compile_args = {'gcc': get_cpp_flags()}\n\n if with_cuda:\n cuda9_gencode = (' -gencode arch=compute_35,code=sm_35'\n ' -gencode arch=compute_50,code=sm_50'\n ' -gencode arch=compute_60,code=sm_60'\n ' -gencode arch=compute_70,code=sm_70')\n cuda10_gencode = ' -gencode arch=compute_75,code=sm_75'\n cuda11_gencode = ' -gencode arch=compute_80,code=sm_80'\n cuda9_ptx = ' -gencode arch=compute_70,code=compute_70'\n cuda10_ptx = ' -gencode arch=compute_75,code=compute_75'\n cuda11_ptx = ' -gencode arch=compute_80,code=compute_80'\n if cuda_major >= 11:\n gencode = cuda9_gencode + cuda10_gencode + cuda11_gencode + cuda11_ptx\n elif cuda_major >= 10:\n gencode = cuda9_gencode + cuda10_gencode + cuda10_ptx\n elif cuda_major >= 9:\n gencode = cuda9_gencode + cuda9_ptx\n else:\n raise CompileError(\n 'CUDA version must be >=9.0, the current version is {}'.format(\n cuda_major))\n\n extra_compile_args['nvcc'] = shlex.split(gencode) + [\n '-Xcompiler', '-fPIC'\n ]\n options = {\n 'sources': sources,\n 'include_dirs': include_dirs,\n 'library_dirs': library_dirs,\n 'libraries': libraries,\n 'runtime_library_dirs': runtime_library_dirs,\n 'extra_compile_args': extra_compile_args\n }\n\n return options\n\n\n# credit: https://github.com/rmcgibbo/npcuda-example/blob/master/cython/setup.py#L55\ndef customize_compiler_for_nvcc(self):\n \"\"\"Inject deep into distutils to customize how the dispatch\n to gcc/nvcc works.\n If you subclass UnixCCompiler, it's not trivial to get your subclass\n injected in, and still have the right customizations (i.e.\n distutils.sysconfig.customize_compiler) run on it. So instead of going\n the OO route, I have this. Note, it's kindof like a wierd functional\n subclassing going on.\n \"\"\"\n\n # Tell the compiler it can processes .cu\n self.src_extensions.append('.cu')\n\n # Save references to the default compiler_so and _comple methods\n default_compiler_so = self.compiler_so\n super = self._compile\n\n # Now redefine the _compile method. This gets executed for each\n # object but distutils doesn't have the ability to change compilers\n # based on source extension: we add it.\n def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):\n if os.path.splitext(src)[1] == '.cu':\n # use the cuda for .cu files\n self.set_executable('compiler_so', 'nvcc')\n # use only a subset of the extra_postargs, which are 1-1\n # translated from the extra_compile_args in the Extension class\n postargs = extra_postargs['nvcc']\n else:\n postargs = extra_postargs['gcc']\n\n super(obj, src, ext, cc_args, postargs, pp_opts)\n # Reset the default compiler_so, which we might have changed for cuda\n self.compiler_so = default_compiler_so\n\n # Inject our redefined _compile method into the class\n self._compile = _compile\n\n\nclass custom_build_ext(build_ext):\n '''Customize the process for building the extension by chaning \n the options for compiling swig files and cu files.\n\n Ref: https://github.com/python/cpython/blob/master/Lib/distutils/command/build_ext.py\n '''\n\n def finalize_options(self):\n self.swig_cpp = True\n print('build temp', self.build_temp)\n print('build lib', self.build_lib)\n super(custom_build_ext, self).finalize_options()\n self.swig_opts = '-py3 -outdir {}/singa/'.format(self.build_lib).split()\n print('build temp', self.build_temp)\n print('build lib', self.build_lib)\n\n def build_extensions(self):\n options = prepare_extension_options()\n for key, val in options.items():\n singa_wrap.__dict__[key] = val\n customize_compiler_for_nvcc(self.compiler)\n build_ext.build_extensions(self)\n\n\ntry:\n with io.open('README.md', encoding='utf-8') as f:\n long_description = '\\n' + f.read()\nexcept OSError:\n long_description = ''\n\nclassifiers = [\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'License :: OSI Approved :: Apache Software License',\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence'\n]\nif sys.platform == 'darwin':\n classifiers.append('Operating System :: MacOS :: MacOS X')\nelif sys.platform == 'linux':\n 'Operating System :: POSIX :: Linux'\nelse:\n raise DistutilsSetupError('Building on Windows is not supported currently.')\n\nkeywords = 'deep learning, apache singa'\nwith_cuda, with_nccl, _, _ = parse_compile_options()\nif with_cuda:\n classifiers.append('Environment :: GPU :: NVIDIA CUDA')\n cuda_version = os.environ.get('CUDA_VERSION')\n cudnn_version = os.environ.get('CUDNN_VERSION')\n keywords += ', cuda{}, cudnn{}'.format(cuda_version, cudnn_version)\n cuda_major = int(cuda_version.split('.')[0])\n cuda_minor = int(cuda_version.split('.')[1])\n # local label '+cuda10.2'. Ref: https://www.python.org/dev/peps/pep-0440/\n VERSION = VERSION + '+cuda{}.{}'.format(cuda_major, cuda_minor)\n if with_nccl:\n classifiers.append('Topic :: System :: Distributed Computing')\n keywords += ', distributed'\nelse:\n keywords += ', cpu-only'\n\nsinga_wrap = Extension('singa._singa_wrap', [])\n\nsetup(\n name=NAME,\n version=VERSION,\n description='A General Deep Learning System',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='Apache SINGA Community',\n author_email='[email protected]',\n url='http://singa.apache.org',\n python_requires='>=3',\n install_requires=[\n 'numpy >=1.16,<2.0', #1.16\n 'onnx==1.6',\n 'deprecated',\n 'unittest-xml-reporting',\n 'future',\n 'pillow',\n 'tqdm',\n ],\n include_package_data=True,\n license='Apache 2',\n classifiers=classifiers,\n keywords=keywords,\n packages=find_packages('python'),\n package_dir={'': 'python'},\n ext_modules=[singa_wrap],\n cmdclass={\n 'build_ext': custom_build_ext,\n 'audit': AuditCommand\n })\n"
] | [
[
"numpy.get_numpy_include",
"numpy.get_include"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wfehrnstrom/harmonize | [
"3f1fdcb7693ff152f17623ce549526ec272698b1",
"e5661d24b2021739e8ac4bf1d3a530eda4e155b3",
"e5661d24b2021739e8ac4bf1d3a530eda4e155b3",
"e5661d24b2021739e8ac4bf1d3a530eda4e155b3",
"e5661d24b2021739e8ac4bf1d3a530eda4e155b3",
"e5661d24b2021739e8ac4bf1d3a530eda4e155b3",
"e5661d24b2021739e8ac4bf1d3a530eda4e155b3",
"3f1fdcb7693ff152f17623ce549526ec272698b1",
"e5661d24b2021739e8ac4bf1d3a530eda4e155b3",
"e5661d24b2021739e8ac4bf1d3a530eda4e155b3",
"3f1fdcb7693ff152f17623ce549526ec272698b1",
"e5661d24b2021739e8ac4bf1d3a530eda4e155b3",
"e5661d24b2021739e8ac4bf1d3a530eda4e155b3",
"e5661d24b2021739e8ac4bf1d3a530eda4e155b3",
"e5661d24b2021739e8ac4bf1d3a530eda4e155b3",
"e5661d24b2021739e8ac4bf1d3a530eda4e155b3",
"e5661d24b2021739e8ac4bf1d3a530eda4e155b3",
"e5661d24b2021739e8ac4bf1d3a530eda4e155b3",
"e5661d24b2021739e8ac4bf1d3a530eda4e155b3",
"1985d4c73fabd5f08f54b922e73a9306e09c77a5",
"e5661d24b2021739e8ac4bf1d3a530eda4e155b3",
"3f1fdcb7693ff152f17623ce549526ec272698b1",
"3f1fdcb7693ff152f17623ce549526ec272698b1",
"1985d4c73fabd5f08f54b922e73a9306e09c77a5",
"e5661d24b2021739e8ac4bf1d3a530eda4e155b3"
] | [
"lib/python2.7/site-packages/scipy/special/basic.py",
"lib/python2.7/site-packages/sklearn/cross_decomposition/pls_.py",
"lib/python2.7/site-packages/sklearn/datasets/covtype.py",
"lib/python2.7/site-packages/sklearn/neighbors/tests/test_approximate.py",
"lib/python2.7/site-packages/matplotlib/tests/test_path.py",
"lib/python2.7/site-packages/matplotlib/legend_handler.py",
"lib/python2.7/site-packages/sklearn/linear_model/bayes.py",
"lib/python2.7/site-packages/scipy/special/orthogonal.py",
"lib/python2.7/site-packages/matplotlib/cm.py",
"lib/python2.7/site-packages/numpy/linalg/linalg.py",
"lib/python2.7/site-packages/scipy/_lib/_numpy_compat.py",
"lib/python2.7/site-packages/numpy/distutils/conv_template.py",
"lib/python2.7/site-packages/matplotlib/tests/test_container.py",
"lib/python2.7/site-packages/sklearn/utils/tests/test_graph.py",
"lib/python2.7/site-packages/sklearn/feature_selection/univariate_selection.py",
"lib/python2.7/site-packages/numpy/fft/helper.py",
"lib/python2.7/site-packages/matplotlib/backends/qt_compat.py",
"lib/python2.7/site-packages/sklearn/linear_model/ransac.py",
"lib/python2.7/site-packages/matplotlib/tests/test_dates.py",
"lib/python2.7/site-packages/matplotlib/tests/test_texmanager.py",
"lib/python2.7/site-packages/sklearn/covariance/outlier_detection.py",
"lib/python2.7/site-packages/scipy/spatial/__init__.py",
"lib/python2.7/site-packages/scipy/signal/_savitzky_golay.py",
"lib/python2.7/site-packages/sklearn/datasets/tests/test_mldata.py",
"lib/python2.7/site-packages/sklearn/model_selection/tests/test_search.py"
] | [
"#\n# Author: Travis Oliphant, 2002\n#\n\nfrom __future__ import division, print_function, absolute_import\n\nimport warnings\n\nimport numpy as np\nimport math\nfrom scipy._lib.six import xrange\nfrom numpy import (pi, asarray, floor, isscalar, iscomplex, real,\n imag, sqrt, where, mgrid, sin, place, issubdtype,\n extract, less, inexact, nan, zeros, sinc)\nfrom . import _ufuncs as ufuncs\nfrom ._ufuncs import (ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma,\n psi, _zeta, hankel1, hankel2, yv, kv, _gammaln,\n ndtri, poch, binom, hyp0f1)\nfrom . import specfun\nfrom . import orthogonal\nfrom ._comb import _comb_int\n\n\n__all__ = ['agm', 'ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros',\n 'ber_zeros', 'bernoulli', 'berp_zeros', 'bessel_diff_formula',\n 'bi_zeros', 'clpmn', 'comb', 'digamma', 'diric', 'ellipk',\n 'erf_zeros', 'erfcinv', 'erfinv', 'euler', 'factorial',\n 'factorialk', 'factorial2', 'fresnel_zeros',\n 'fresnelc_zeros', 'fresnels_zeros', 'gamma', 'gammaln', 'h1vp',\n 'h2vp', 'hankel1', 'hankel2', 'hyp0f1', 'iv', 'ivp', 'jn_zeros',\n 'jnjnp_zeros', 'jnp_zeros', 'jnyn_zeros', 'jv', 'jvp', 'kei_zeros',\n 'keip_zeros', 'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv',\n 'kvp', 'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a',\n 'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef', 'ndtri',\n 'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq', 'perm',\n 'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn', 'riccati_yn',\n 'sinc', 'sph_in', 'sph_inkn',\n 'sph_jn', 'sph_jnyn', 'sph_kn', 'sph_yn', 'y0_zeros', 'y1_zeros',\n 'y1p_zeros', 'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta']\n\n\ndef diric(x, n):\n \"\"\"Periodic sinc function, also called the Dirichlet function.\n\n The Dirichlet function is defined as::\n\n diric(x) = sin(x * n/2) / (n * sin(x / 2)),\n\n where `n` is a positive integer.\n\n Parameters\n ----------\n x : array_like\n Input data\n n : int\n Integer defining the periodicity.\n\n Returns\n -------\n diric : ndarray\n\n Examples\n --------\n >>> from scipy import special\n >>> import matplotlib.pyplot as plt\n\n >>> x = np.linspace(-8*np.pi, 8*np.pi, num=201)\n >>> plt.figure(figsize=(8, 8));\n >>> for idx, n in enumerate([2, 3, 4, 9]):\n ... plt.subplot(2, 2, idx+1)\n ... plt.plot(x, special.diric(x, n))\n ... plt.title('diric, n={}'.format(n))\n >>> plt.show()\n\n The following example demonstrates that `diric` gives the magnitudes\n (modulo the sign and scaling) of the Fourier coefficients of a\n rectangular pulse.\n\n Suppress output of values that are effectively 0:\n\n >>> np.set_printoptions(suppress=True)\n\n Create a signal `x` of length `m` with `k` ones:\n\n >>> m = 8\n >>> k = 3\n >>> x = np.zeros(m)\n >>> x[:k] = 1\n\n Use the FFT to compute the Fourier transform of `x`, and\n inspect the magnitudes of the coefficients:\n\n >>> np.abs(np.fft.fft(x))\n array([ 3. , 2.41421356, 1. , 0.41421356, 1. ,\n 0.41421356, 1. , 2.41421356])\n\n Now find the same values (up to sign) using `diric`. We multiply\n by `k` to account for the different scaling conventions of\n `numpy.fft.fft` and `diric`:\n\n >>> theta = np.linspace(0, 2*np.pi, m, endpoint=False)\n >>> k * special.diric(theta, k)\n array([ 3. , 2.41421356, 1. , -0.41421356, -1. ,\n -0.41421356, 1. , 2.41421356])\n \"\"\"\n x, n = asarray(x), asarray(n)\n n = asarray(n + (x-x))\n x = asarray(x + (n-n))\n if issubdtype(x.dtype, inexact):\n ytype = x.dtype\n else:\n ytype = float\n y = zeros(x.shape, ytype)\n\n # empirical minval for 32, 64 or 128 bit float computations\n # where sin(x/2) < minval, result is fixed at +1 or -1\n if np.finfo(ytype).eps < 1e-18:\n minval = 1e-11\n elif np.finfo(ytype).eps < 1e-15:\n minval = 1e-7\n else:\n minval = 1e-3\n\n mask1 = (n <= 0) | (n != floor(n))\n place(y, mask1, nan)\n\n x = x / 2\n denom = sin(x)\n mask2 = (1-mask1) & (abs(denom) < minval)\n xsub = extract(mask2, x)\n nsub = extract(mask2, n)\n zsub = xsub / pi\n place(y, mask2, pow(-1, np.round(zsub)*(nsub-1)))\n\n mask = (1-mask1) & (1-mask2)\n xsub = extract(mask, x)\n nsub = extract(mask, n)\n dsub = extract(mask, denom)\n place(y, mask, sin(nsub*xsub)/(nsub*dsub))\n return y\n\n\ndef gammaln(x):\n \"\"\"\n Logarithm of the absolute value of the Gamma function for real inputs.\n\n Parameters\n ----------\n x : array-like\n Values on the real line at which to compute ``gammaln``\n\n Returns\n -------\n gammaln : ndarray\n Values of ``gammaln`` at x.\n\n See Also\n --------\n gammasgn : sign of the gamma function\n loggamma : principal branch of the logarithm of the gamma function\n\n Notes\n -----\n When used in conjunction with `gammasgn`, this function is useful\n for working in logspace on the real axis without having to deal with\n complex numbers, via the relation ``exp(gammaln(x)) = gammasgn(x)*gamma(x)``.\n\n Note that `gammaln` currently accepts complex-valued inputs, but it is not\n the same function as for real-valued inputs, and the branch is not\n well-defined --- using `gammaln` with complex is deprecated and will be\n disallowed in future Scipy versions.\n\n For complex-valued log-gamma, use `loggamma` instead of `gammaln`.\n\n \"\"\"\n if np.iscomplexobj(x):\n warnings.warn((\"Use of gammaln for complex arguments is \"\n \"deprecated as of scipy 0.18.0. Use \"\n \"scipy.special.loggamma instead.\"),\n DeprecationWarning)\n return _gammaln(x)\n\n\ndef jnjnp_zeros(nt):\n \"\"\"Compute zeros of integer-order Bessel functions Jn and Jn'.\n\n Results are arranged in order of the magnitudes of the zeros.\n\n Parameters\n ----------\n nt : int\n Number (<=1200) of zeros to compute\n\n Returns\n -------\n zo[l-1] : ndarray\n Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`.\n n[l-1] : ndarray\n Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.\n m[l-1] : ndarray\n Serial number of the zeros of Jn(x) or Jn'(x) associated\n with lth zero. Of length `nt`.\n t[l-1] : ndarray\n 0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of\n length `nt`.\n\n See Also\n --------\n jn_zeros, jnp_zeros : to get separated arrays of zeros.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 5.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200):\n raise ValueError(\"Number must be integer <= 1200.\")\n nt = int(nt)\n n, m, t, zo = specfun.jdzo(nt)\n return zo[1:nt+1], n[:nt], m[:nt], t[:nt]\n\n\ndef jnyn_zeros(n, nt):\n \"\"\"Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).\n\n Returns 4 arrays of length `nt`, corresponding to the first `nt` zeros of\n Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively.\n\n Parameters\n ----------\n n : int\n Order of the Bessel functions\n nt : int\n Number (<=1200) of zeros to compute\n\n See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 5.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not (isscalar(nt) and isscalar(n)):\n raise ValueError(\"Arguments must be scalars.\")\n if (floor(n) != n) or (floor(nt) != nt):\n raise ValueError(\"Arguments must be integers.\")\n if (nt <= 0):\n raise ValueError(\"nt > 0\")\n return specfun.jyzo(abs(n), nt)\n\n\ndef jn_zeros(n, nt):\n \"\"\"Compute zeros of integer-order Bessel function Jn(x).\n\n Parameters\n ----------\n n : int\n Order of Bessel function\n nt : int\n Number of zeros to return\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 5.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n return jnyn_zeros(n, nt)[0]\n\n\ndef jnp_zeros(n, nt):\n \"\"\"Compute zeros of integer-order Bessel function derivative Jn'(x).\n\n Parameters\n ----------\n n : int\n Order of Bessel function\n nt : int\n Number of zeros to return\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 5.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n return jnyn_zeros(n, nt)[1]\n\n\ndef yn_zeros(n, nt):\n \"\"\"Compute zeros of integer-order Bessel function Yn(x).\n\n Parameters\n ----------\n n : int\n Order of Bessel function\n nt : int\n Number of zeros to return\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 5.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n return jnyn_zeros(n, nt)[2]\n\n\ndef ynp_zeros(n, nt):\n \"\"\"Compute zeros of integer-order Bessel function derivative Yn'(x).\n\n Parameters\n ----------\n n : int\n Order of Bessel function\n nt : int\n Number of zeros to return\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 5.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n return jnyn_zeros(n, nt)[3]\n\n\ndef y0_zeros(nt, complex=False):\n \"\"\"Compute nt zeros of Bessel function Y0(z), and derivative at each zero.\n\n The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0.\n\n Parameters\n ----------\n nt : int\n Number of zeros to return\n complex : bool, default False\n Set to False to return only the real zeros; set to True to return only\n the complex zeros with negative real part and positive imaginary part.\n Note that the complex conjugates of the latter are also zeros of the\n function, but are not returned by this routine.\n\n Returns\n -------\n z0n : ndarray\n Location of nth zero of Y0(z)\n y0pz0n : ndarray\n Value of derivative Y0'(z0) for nth zero\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 5.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):\n raise ValueError(\"Arguments must be scalar positive integer.\")\n kf = 0\n kc = not complex\n return specfun.cyzo(nt, kf, kc)\n\n\ndef y1_zeros(nt, complex=False):\n \"\"\"Compute nt zeros of Bessel function Y1(z), and derivative at each zero.\n\n The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1.\n\n Parameters\n ----------\n nt : int\n Number of zeros to return\n complex : bool, default False\n Set to False to return only the real zeros; set to True to return only\n the complex zeros with negative real part and positive imaginary part.\n Note that the complex conjugates of the latter are also zeros of the\n function, but are not returned by this routine.\n\n Returns\n -------\n z1n : ndarray\n Location of nth zero of Y1(z)\n y1pz1n : ndarray\n Value of derivative Y1'(z1) for nth zero\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 5.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):\n raise ValueError(\"Arguments must be scalar positive integer.\")\n kf = 1\n kc = not complex\n return specfun.cyzo(nt, kf, kc)\n\n\ndef y1p_zeros(nt, complex=False):\n \"\"\"Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.\n\n The values are given by Y1(z1) at each z1 where Y1'(z1)=0.\n\n Parameters\n ----------\n nt : int\n Number of zeros to return\n complex : bool, default False\n Set to False to return only the real zeros; set to True to return only\n the complex zeros with negative real part and positive imaginary part.\n Note that the complex conjugates of the latter are also zeros of the\n function, but are not returned by this routine.\n\n Returns\n -------\n z1pn : ndarray\n Location of nth zero of Y1'(z)\n y1z1pn : ndarray\n Value of derivative Y1(z1) for nth zero\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 5.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):\n raise ValueError(\"Arguments must be scalar positive integer.\")\n kf = 2\n kc = not complex\n return specfun.cyzo(nt, kf, kc)\n\n\ndef _bessel_diff_formula(v, z, n, L, phase):\n # from AMS55.\n # L(v, z) = J(v, z), Y(v, z), H1(v, z), H2(v, z), phase = -1\n # L(v, z) = I(v, z) or exp(v*pi*i)K(v, z), phase = 1\n # For K, you can pull out the exp((v-k)*pi*i) into the caller\n v = asarray(v)\n p = 1.0\n s = L(v-n, z)\n for i in xrange(1, n+1):\n p = phase * (p * (n-i+1)) / i # = choose(k, i)\n s += p*L(v-n + i*2, z)\n return s / (2.**n)\n\n\nbessel_diff_formula = np.deprecate(_bessel_diff_formula,\n message=\"bessel_diff_formula is a private function, do not use it!\")\n\n\ndef jvp(v, z, n=1):\n \"\"\"Compute nth derivative of Bessel function Jv(z) with respect to `z`.\n\n Parameters\n ----------\n v : float\n Order of Bessel function\n z : complex\n Argument at which to evaluate the derivative\n n : int, default 1\n Order of derivative\n\n Notes\n -----\n The derivative is computed using the relation DLFM 10.6.7 [2]_.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 5.\n http://jin.ece.illinois.edu/specfunc.html\n .. [2] NIST Digital Library of Mathematical Functions.\n http://dlmf.nist.gov/10.6.E7\n\n \"\"\"\n if not isinstance(n, int) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n if n == 0:\n return jv(v, z)\n else:\n return _bessel_diff_formula(v, z, n, jv, -1)\n\n\ndef yvp(v, z, n=1):\n \"\"\"Compute nth derivative of Bessel function Yv(z) with respect to `z`.\n\n Parameters\n ----------\n v : float\n Order of Bessel function\n z : complex\n Argument at which to evaluate the derivative\n n : int, default 1\n Order of derivative\n\n Notes\n -----\n The derivative is computed using the relation DLFM 10.6.7 [2]_.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 5.\n http://jin.ece.illinois.edu/specfunc.html\n .. [2] NIST Digital Library of Mathematical Functions.\n http://dlmf.nist.gov/10.6.E7\n\n \"\"\"\n if not isinstance(n, int) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n if n == 0:\n return yv(v, z)\n else:\n return _bessel_diff_formula(v, z, n, yv, -1)\n\n\ndef kvp(v, z, n=1):\n \"\"\"Compute nth derivative of real-order modified Bessel function Kv(z)\n\n Kv(z) is the modified Bessel function of the second kind.\n Derivative is calculated with respect to `z`.\n\n Parameters\n ----------\n v : array_like of float\n Order of Bessel function\n z : array_like of complex\n Argument at which to evaluate the derivative\n n : int\n Order of derivative. Default is first derivative.\n\n Returns\n -------\n out : ndarray\n The results\n\n Examples\n --------\n Calculate multiple values at order 5:\n\n >>> from scipy.special import kvp\n >>> kvp(5, (1, 2, 3+5j))\n array([-1849.0354+0.j , -25.7735+0.j , -0.0307+0.0875j])\n\n Calculate for a single value at multiple orders:\n\n >>> kvp((4, 4.5, 5), 1)\n array([ -184.0309, -568.9585, -1849.0354])\n\n Notes\n -----\n The derivative is computed using the relation DLFM 10.29.5 [2]_.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 6.\n http://jin.ece.illinois.edu/specfunc.html\n .. [2] NIST Digital Library of Mathematical Functions.\n http://dlmf.nist.gov/10.29.E5\n\n \"\"\"\n if not isinstance(n, int) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n if n == 0:\n return kv(v, z)\n else:\n return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1)\n\n\ndef ivp(v, z, n=1):\n \"\"\"Compute nth derivative of modified Bessel function Iv(z) with respect\n to `z`.\n\n Parameters\n ----------\n v : array_like of float\n Order of Bessel function\n z : array_like of complex\n Argument at which to evaluate the derivative\n n : int, default 1\n Order of derivative\n\n Notes\n -----\n The derivative is computed using the relation DLFM 10.29.5 [2]_.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 6.\n http://jin.ece.illinois.edu/specfunc.html\n .. [2] NIST Digital Library of Mathematical Functions.\n http://dlmf.nist.gov/10.29.E5\n\n \"\"\"\n if not isinstance(n, int) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n if n == 0:\n return iv(v, z)\n else:\n return _bessel_diff_formula(v, z, n, iv, 1)\n\n\ndef h1vp(v, z, n=1):\n \"\"\"Compute nth derivative of Hankel function H1v(z) with respect to `z`.\n\n Parameters\n ----------\n v : float\n Order of Hankel function\n z : complex\n Argument at which to evaluate the derivative\n n : int, default 1\n Order of derivative\n\n Notes\n -----\n The derivative is computed using the relation DLFM 10.6.7 [2]_.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 5.\n http://jin.ece.illinois.edu/specfunc.html\n .. [2] NIST Digital Library of Mathematical Functions.\n http://dlmf.nist.gov/10.6.E7\n\n \"\"\"\n if not isinstance(n, int) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n if n == 0:\n return hankel1(v, z)\n else:\n return _bessel_diff_formula(v, z, n, hankel1, -1)\n\n\ndef h2vp(v, z, n=1):\n \"\"\"Compute nth derivative of Hankel function H2v(z) with respect to `z`.\n\n Parameters\n ----------\n v : float\n Order of Hankel function\n z : complex\n Argument at which to evaluate the derivative\n n : int, default 1\n Order of derivative\n\n Notes\n -----\n The derivative is computed using the relation DLFM 10.6.7 [2]_.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 5.\n http://jin.ece.illinois.edu/specfunc.html\n .. [2] NIST Digital Library of Mathematical Functions.\n http://dlmf.nist.gov/10.6.E7\n\n \"\"\"\n if not isinstance(n, int) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n if n == 0:\n return hankel2(v, z)\n else:\n return _bessel_diff_formula(v, z, n, hankel2, -1)\n\n\[email protected](message=\"scipy.special.sph_jn is deprecated in scipy 0.18.0. \"\n \"Use scipy.special.spherical_jn instead. \"\n \"Note that the new function has a different signature.\")\ndef sph_jn(n, z):\n \"\"\"Compute spherical Bessel function jn(z) and derivative.\n\n This function computes the value and first derivative of jn(z) for all\n orders up to and including n.\n\n Parameters\n ----------\n n : int\n Maximum order of jn to compute\n z : complex\n Argument at which to evaluate\n\n Returns\n -------\n jn : ndarray\n Value of j0(z), ..., jn(z)\n jnp : ndarray\n First derivative j0'(z), ..., jn'(z)\n\n See also\n --------\n spherical_jn\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 8.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not (isscalar(n) and isscalar(z)):\n raise ValueError(\"arguments must be scalars.\")\n if (n != floor(n)) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n if (n < 1):\n n1 = 1\n else:\n n1 = n\n if iscomplex(z):\n nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)\n else:\n nm, jn, jnp = specfun.sphj(n1, z)\n return jn[:(n+1)], jnp[:(n+1)]\n\n\[email protected](message=\"scipy.special.sph_yn is deprecated in scipy 0.18.0. \"\n \"Use scipy.special.spherical_yn instead. \"\n \"Note that the new function has a different signature.\")\ndef sph_yn(n, z):\n \"\"\"Compute spherical Bessel function yn(z) and derivative.\n\n This function computes the value and first derivative of yn(z) for all\n orders up to and including n.\n\n Parameters\n ----------\n n : int\n Maximum order of yn to compute\n z : complex\n Argument at which to evaluate\n\n Returns\n -------\n yn : ndarray\n Value of y0(z), ..., yn(z)\n ynp : ndarray\n First derivative y0'(z), ..., yn'(z)\n\n See also\n --------\n spherical_yn\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 8.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not (isscalar(n) and isscalar(z)):\n raise ValueError(\"arguments must be scalars.\")\n if (n != floor(n)) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n if (n < 1):\n n1 = 1\n else:\n n1 = n\n if iscomplex(z) or less(z, 0):\n nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)\n else:\n nm, yn, ynp = specfun.sphy(n1, z)\n return yn[:(n+1)], ynp[:(n+1)]\n\n\[email protected](message=\"scipy.special.sph_jnyn is deprecated in scipy 0.18.0. \"\n \"Use scipy.special.spherical_jn and \"\n \"scipy.special.spherical_yn instead. \"\n \"Note that the new function has a different signature.\")\ndef sph_jnyn(n, z):\n \"\"\"Compute spherical Bessel functions jn(z) and yn(z) and derivatives.\n\n This function computes the value and first derivative of jn(z) and yn(z)\n for all orders up to and including n.\n\n Parameters\n ----------\n n : int\n Maximum order of jn and yn to compute\n z : complex\n Argument at which to evaluate\n\n Returns\n -------\n jn : ndarray\n Value of j0(z), ..., jn(z)\n jnp : ndarray\n First derivative j0'(z), ..., jn'(z)\n yn : ndarray\n Value of y0(z), ..., yn(z)\n ynp : ndarray\n First derivative y0'(z), ..., yn'(z)\n\n See also\n --------\n spherical_jn\n spherical_yn\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 8.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not (isscalar(n) and isscalar(z)):\n raise ValueError(\"arguments must be scalars.\")\n if (n != floor(n)) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n if (n < 1):\n n1 = 1\n else:\n n1 = n\n if iscomplex(z) or less(z, 0):\n nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)\n else:\n nm, yn, ynp = specfun.sphy(n1, z)\n nm, jn, jnp = specfun.sphj(n1, z)\n return jn[:(n+1)], jnp[:(n+1)], yn[:(n+1)], ynp[:(n+1)]\n\n\[email protected](message=\"scipy.special.sph_in is deprecated in scipy 0.18.0. \"\n \"Use scipy.special.spherical_in instead. \"\n \"Note that the new function has a different signature.\")\ndef sph_in(n, z):\n \"\"\"Compute spherical Bessel function in(z) and derivative.\n\n This function computes the value and first derivative of in(z) for all\n orders up to and including n.\n\n Parameters\n ----------\n n : int\n Maximum order of in to compute\n z : complex\n Argument at which to evaluate\n\n Returns\n -------\n in : ndarray\n Value of i0(z), ..., in(z)\n inp : ndarray\n First derivative i0'(z), ..., in'(z)\n\n See also\n --------\n spherical_in\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 8.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not (isscalar(n) and isscalar(z)):\n raise ValueError(\"arguments must be scalars.\")\n if (n != floor(n)) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n if (n < 1):\n n1 = 1\n else:\n n1 = n\n if iscomplex(z):\n nm, In, Inp, kn, knp = specfun.csphik(n1, z)\n else:\n nm, In, Inp = specfun.sphi(n1, z)\n return In[:(n+1)], Inp[:(n+1)]\n\n\[email protected](message=\"scipy.special.sph_kn is deprecated in scipy 0.18.0. \"\n \"Use scipy.special.spherical_kn instead. \"\n \"Note that the new function has a different signature.\")\ndef sph_kn(n, z):\n \"\"\"Compute spherical Bessel function kn(z) and derivative.\n\n This function computes the value and first derivative of kn(z) for all\n orders up to and including n.\n\n Parameters\n ----------\n n : int\n Maximum order of kn to compute\n z : complex\n Argument at which to evaluate\n\n Returns\n -------\n kn : ndarray\n Value of k0(z), ..., kn(z)\n knp : ndarray\n First derivative k0'(z), ..., kn'(z)\n\n See also\n --------\n spherical_kn\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 8.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not (isscalar(n) and isscalar(z)):\n raise ValueError(\"arguments must be scalars.\")\n if (n != floor(n)) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n if (n < 1):\n n1 = 1\n else:\n n1 = n\n if iscomplex(z) or less(z, 0):\n nm, In, Inp, kn, knp = specfun.csphik(n1, z)\n else:\n nm, kn, knp = specfun.sphk(n1, z)\n return kn[:(n+1)], knp[:(n+1)]\n\n\[email protected](message=\"scipy.special.sph_inkn is deprecated in scipy 0.18.0. \"\n \"Use scipy.special.spherical_in and \"\n \"scipy.special.spherical_kn instead. \"\n \"Note that the new function has a different signature.\")\ndef sph_inkn(n, z):\n \"\"\"Compute spherical Bessel functions in(z), kn(z), and derivatives.\n\n This function computes the value and first derivative of in(z) and kn(z)\n for all orders up to and including n.\n\n Parameters\n ----------\n n : int\n Maximum order of in and kn to compute\n z : complex\n Argument at which to evaluate\n\n Returns\n -------\n in : ndarray\n Value of i0(z), ..., in(z)\n inp : ndarray\n First derivative i0'(z), ..., in'(z)\n kn : ndarray\n Value of k0(z), ..., kn(z)\n knp : ndarray\n First derivative k0'(z), ..., kn'(z)\n\n See also\n --------\n spherical_in\n spherical_kn\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 8.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not (isscalar(n) and isscalar(z)):\n raise ValueError(\"arguments must be scalars.\")\n if (n != floor(n)) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n if (n < 1):\n n1 = 1\n else:\n n1 = n\n if iscomplex(z) or less(z, 0):\n nm, In, Inp, kn, knp = specfun.csphik(n1, z)\n else:\n nm, In, Inp = specfun.sphi(n1, z)\n nm, kn, knp = specfun.sphk(n1, z)\n return In[:(n+1)], Inp[:(n+1)], kn[:(n+1)], knp[:(n+1)]\n\n\ndef riccati_jn(n, x):\n r\"\"\"Compute Ricatti-Bessel function of the first kind and its derivative.\n\n The Ricatti-Bessel function of the first kind is defined as :math:`x\n j_n(x)`, where :math:`j_n` is the spherical Bessel function of the first\n kind of order :math:`n`.\n\n This function computes the value and first derivative of the\n Ricatti-Bessel function for all orders up to and including `n`.\n\n Parameters\n ----------\n n : int\n Maximum order of function to compute\n x : float\n Argument at which to evaluate\n\n Returns\n -------\n jn : ndarray\n Value of j0(x), ..., jn(x)\n jnp : ndarray\n First derivative j0'(x), ..., jn'(x)\n\n Notes\n -----\n The computation is carried out via backward recurrence, using the\n relation DLMF 10.51.1 [2]_.\n\n Wrapper for a Fortran routine created by Shanjie Zhang and Jianming\n Jin [1]_.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n .. [2] NIST Digital Library of Mathematical Functions.\n http://dlmf.nist.gov/10.51.E1\n\n \"\"\"\n if not (isscalar(n) and isscalar(x)):\n raise ValueError(\"arguments must be scalars.\")\n if (n != floor(n)) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n if (n == 0):\n n1 = 1\n else:\n n1 = n\n nm, jn, jnp = specfun.rctj(n1, x)\n return jn[:(n+1)], jnp[:(n+1)]\n\n\ndef riccati_yn(n, x):\n \"\"\"Compute Ricatti-Bessel function of the second kind and its derivative.\n\n The Ricatti-Bessel function of the second kind is defined as :math:`x\n y_n(x)`, where :math:`y_n` is the spherical Bessel function of the second\n kind of order :math:`n`.\n\n This function computes the value and first derivative of the function for\n all orders up to and including `n`.\n\n Parameters\n ----------\n n : int\n Maximum order of function to compute\n x : float\n Argument at which to evaluate\n\n Returns\n -------\n yn : ndarray\n Value of y0(x), ..., yn(x)\n ynp : ndarray\n First derivative y0'(x), ..., yn'(x)\n\n Notes\n -----\n The computation is carried out via ascending recurrence, using the\n relation DLMF 10.51.1 [2]_.\n\n Wrapper for a Fortran routine created by Shanjie Zhang and Jianming\n Jin [1]_.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n .. [2] NIST Digital Library of Mathematical Functions.\n http://dlmf.nist.gov/10.51.E1\n\n \"\"\"\n if not (isscalar(n) and isscalar(x)):\n raise ValueError(\"arguments must be scalars.\")\n if (n != floor(n)) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n if (n == 0):\n n1 = 1\n else:\n n1 = n\n nm, jn, jnp = specfun.rcty(n1, x)\n return jn[:(n+1)], jnp[:(n+1)]\n\n\ndef erfinv(y):\n \"\"\"Inverse function for erf.\n \"\"\"\n return ndtri((y+1)/2.0)/sqrt(2)\n\n\ndef erfcinv(y):\n \"\"\"Inverse function for erfc.\n \"\"\"\n return -ndtri(0.5*y)/sqrt(2)\n\n\ndef erf_zeros(nt):\n \"\"\"Compute nt complex zeros of error function erf(z).\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):\n raise ValueError(\"Argument must be positive scalar integer.\")\n return specfun.cerzo(nt)\n\n\ndef fresnelc_zeros(nt):\n \"\"\"Compute nt complex zeros of cosine Fresnel integral C(z).\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):\n raise ValueError(\"Argument must be positive scalar integer.\")\n return specfun.fcszo(1, nt)\n\n\ndef fresnels_zeros(nt):\n \"\"\"Compute nt complex zeros of sine Fresnel integral S(z).\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):\n raise ValueError(\"Argument must be positive scalar integer.\")\n return specfun.fcszo(2, nt)\n\n\ndef fresnel_zeros(nt):\n \"\"\"Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):\n raise ValueError(\"Argument must be positive scalar integer.\")\n return specfun.fcszo(2, nt), specfun.fcszo(1, nt)\n\n\ndef assoc_laguerre(x, n, k=0.0):\n \"\"\"Compute the generalized (associated) Laguerre polynomial of degree n and order k.\n\n The polynomial :math:`L^{(k)}_n(x)` is orthogonal over ``[0, inf)``,\n with weighting function ``exp(-x) * x**k`` with ``k > -1``.\n\n Notes\n -----\n `assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with\n reversed argument order ``(x, n, k=0.0) --> (n, k, x)``.\n\n \"\"\"\n return orthogonal.eval_genlaguerre(n, k, x)\n\ndigamma = psi\n\n\ndef polygamma(n, x):\n \"\"\"Polygamma function n.\n\n This is the nth derivative of the digamma (psi) function.\n\n Parameters\n ----------\n n : array_like of int\n The order of the derivative of `psi`.\n x : array_like\n Where to evaluate the polygamma function.\n\n Returns\n -------\n polygamma : ndarray\n The result.\n\n Examples\n --------\n >>> from scipy import special\n >>> x = [2, 3, 25.5]\n >>> special.polygamma(1, x)\n array([ 0.64493407, 0.39493407, 0.03999467])\n >>> special.polygamma(0, x) == special.psi(x)\n array([ True, True, True], dtype=bool)\n\n \"\"\"\n n, x = asarray(n), asarray(x)\n fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1, x)\n return where(n == 0, psi(x), fac2)\n\n\ndef mathieu_even_coef(m, q):\n r\"\"\"Fourier coefficients for even Mathieu and modified Mathieu functions.\n\n The Fourier series of the even solutions of the Mathieu differential\n equation are of the form\n\n .. math:: \\mathrm{ce}_{2n}(z, q) = \\sum_{k=0}^{\\infty} A_{(2n)}^{(2k)} \\cos 2kz\n\n .. math:: \\mathrm{ce}_{2n+1}(z, q) = \\sum_{k=0}^{\\infty} A_{(2n+1)}^{(2k+1)} \\cos (2k+1)z\n\n This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even\n input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input\n m=2n+1.\n\n Parameters\n ----------\n m : int\n Order of Mathieu functions. Must be non-negative.\n q : float (>=0)\n Parameter of Mathieu functions. Must be non-negative.\n\n Returns\n -------\n Ak : ndarray\n Even or odd Fourier coefficients, corresponding to even or odd m.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n .. [2] NIST Digital Library of Mathematical Functions\n http://dlmf.nist.gov/28.4#i\n\n \"\"\"\n if not (isscalar(m) and isscalar(q)):\n raise ValueError(\"m and q must be scalars.\")\n if (q < 0):\n raise ValueError(\"q >=0\")\n if (m != floor(m)) or (m < 0):\n raise ValueError(\"m must be an integer >=0.\")\n\n if (q <= 1):\n qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q\n else:\n qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q\n km = int(qm + 0.5*m)\n if km > 251:\n print(\"Warning, too many predicted coefficients.\")\n kd = 1\n m = int(floor(m))\n if m % 2:\n kd = 2\n\n a = mathieu_a(m, q)\n fc = specfun.fcoef(kd, m, q, a)\n return fc[:km]\n\n\ndef mathieu_odd_coef(m, q):\n r\"\"\"Fourier coefficients for even Mathieu and modified Mathieu functions.\n\n The Fourier series of the odd solutions of the Mathieu differential\n equation are of the form\n\n .. math:: \\mathrm{se}_{2n+1}(z, q) = \\sum_{k=0}^{\\infty} B_{(2n+1)}^{(2k+1)} \\sin (2k+1)z\n\n .. math:: \\mathrm{se}_{2n+2}(z, q) = \\sum_{k=0}^{\\infty} B_{(2n+2)}^{(2k+2)} \\sin (2k+2)z\n\n This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even\n input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd\n input m=2n+1.\n\n Parameters\n ----------\n m : int\n Order of Mathieu functions. Must be non-negative.\n q : float (>=0)\n Parameter of Mathieu functions. Must be non-negative.\n\n Returns\n -------\n Bk : ndarray\n Even or odd Fourier coefficients, corresponding to even or odd m.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not (isscalar(m) and isscalar(q)):\n raise ValueError(\"m and q must be scalars.\")\n if (q < 0):\n raise ValueError(\"q >=0\")\n if (m != floor(m)) or (m <= 0):\n raise ValueError(\"m must be an integer > 0\")\n\n if (q <= 1):\n qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q\n else:\n qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q\n km = int(qm + 0.5*m)\n if km > 251:\n print(\"Warning, too many predicted coefficients.\")\n kd = 4\n m = int(floor(m))\n if m % 2:\n kd = 3\n\n b = mathieu_b(m, q)\n fc = specfun.fcoef(kd, m, q, b)\n return fc[:km]\n\n\ndef lpmn(m, n, z):\n \"\"\"Sequence of associated Legendre functions of the first kind.\n\n Computes the associated Legendre function of the first kind of order m and\n degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.\n Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and\n ``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.\n\n This function takes a real argument ``z``. For complex arguments ``z``\n use clpmn instead.\n\n Parameters\n ----------\n m : int\n ``|m| <= n``; the order of the Legendre function.\n n : int\n where ``n >= 0``; the degree of the Legendre function. Often\n called ``l`` (lower case L) in descriptions of the associated\n Legendre function\n z : float\n Input value.\n\n Returns\n -------\n Pmn_z : (m+1, n+1) array\n Values for all orders 0..m and degrees 0..n\n Pmn_d_z : (m+1, n+1) array\n Derivatives for all orders 0..m and degrees 0..n\n\n See Also\n --------\n clpmn: associated Legendre functions of the first kind for complex z\n\n Notes\n -----\n In the interval (-1, 1), Ferrer's function of the first kind is\n returned. The phase convention used for the intervals (1, inf)\n and (-inf, -1) is such that the result is always real.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n .. [2] NIST Digital Library of Mathematical Functions\n http://dlmf.nist.gov/14.3\n\n \"\"\"\n if not isscalar(m) or (abs(m) > n):\n raise ValueError(\"m must be <= n.\")\n if not isscalar(n) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n if not isscalar(z):\n raise ValueError(\"z must be scalar.\")\n if iscomplex(z):\n raise ValueError(\"Argument must be real. Use clpmn instead.\")\n if (m < 0):\n mp = -m\n mf, nf = mgrid[0:mp+1, 0:n+1]\n with ufuncs.errstate(all='ignore'):\n if abs(z) < 1:\n # Ferrer function; DLMF 14.9.3\n fixarr = where(mf > nf, 0.0,\n (-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))\n else:\n # Match to clpmn; DLMF 14.9.13\n fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))\n else:\n mp = m\n p, pd = specfun.lpmn(mp, n, z)\n if (m < 0):\n p = p * fixarr\n pd = pd * fixarr\n return p, pd\n\n\ndef clpmn(m, n, z, type=3):\n \"\"\"Associated Legendre function of the first kind for complex arguments.\n\n Computes the associated Legendre function of the first kind of order m and\n degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.\n Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and\n ``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.\n\n Parameters\n ----------\n m : int\n ``|m| <= n``; the order of the Legendre function.\n n : int\n where ``n >= 0``; the degree of the Legendre function. Often\n called ``l`` (lower case L) in descriptions of the associated\n Legendre function\n z : float or complex\n Input value.\n type : int, optional\n takes values 2 or 3\n 2: cut on the real axis ``|x| > 1``\n 3: cut on the real axis ``-1 < x < 1`` (default)\n\n Returns\n -------\n Pmn_z : (m+1, n+1) array\n Values for all orders ``0..m`` and degrees ``0..n``\n Pmn_d_z : (m+1, n+1) array\n Derivatives for all orders ``0..m`` and degrees ``0..n``\n\n See Also\n --------\n lpmn: associated Legendre functions of the first kind for real z\n\n Notes\n -----\n By default, i.e. for ``type=3``, phase conventions are chosen according\n to [1]_ such that the function is analytic. The cut lies on the interval\n (-1, 1). Approaching the cut from above or below in general yields a phase\n factor with respect to Ferrer's function of the first kind\n (cf. `lpmn`).\n\n For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values\n on the interval (-1, 1) in the complex plane yields Ferrer's function\n of the first kind.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n .. [2] NIST Digital Library of Mathematical Functions\n http://dlmf.nist.gov/14.21\n\n \"\"\"\n if not isscalar(m) or (abs(m) > n):\n raise ValueError(\"m must be <= n.\")\n if not isscalar(n) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n if not isscalar(z):\n raise ValueError(\"z must be scalar.\")\n if not(type == 2 or type == 3):\n raise ValueError(\"type must be either 2 or 3.\")\n if (m < 0):\n mp = -m\n mf, nf = mgrid[0:mp+1, 0:n+1]\n with ufuncs.errstate(all='ignore'):\n if type == 2:\n fixarr = where(mf > nf, 0.0,\n (-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))\n else:\n fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))\n else:\n mp = m\n p, pd = specfun.clpmn(mp, n, real(z), imag(z), type)\n if (m < 0):\n p = p * fixarr\n pd = pd * fixarr\n return p, pd\n\n\ndef lqmn(m, n, z):\n \"\"\"Sequence of associated Legendre functions of the second kind.\n\n Computes the associated Legendre function of the second kind of order m and\n degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``.\n Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and\n ``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.\n\n Parameters\n ----------\n m : int\n ``|m| <= n``; the order of the Legendre function.\n n : int\n where ``n >= 0``; the degree of the Legendre function. Often\n called ``l`` (lower case L) in descriptions of the associated\n Legendre function\n z : complex\n Input value.\n\n Returns\n -------\n Qmn_z : (m+1, n+1) array\n Values for all orders 0..m and degrees 0..n\n Qmn_d_z : (m+1, n+1) array\n Derivatives for all orders 0..m and degrees 0..n\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not isscalar(m) or (m < 0):\n raise ValueError(\"m must be a non-negative integer.\")\n if not isscalar(n) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n if not isscalar(z):\n raise ValueError(\"z must be scalar.\")\n m = int(m)\n n = int(n)\n\n # Ensure neither m nor n == 0\n mm = max(1, m)\n nn = max(1, n)\n\n if iscomplex(z):\n q, qd = specfun.clqmn(mm, nn, z)\n else:\n q, qd = specfun.lqmn(mm, nn, z)\n return q[:(m+1), :(n+1)], qd[:(m+1), :(n+1)]\n\n\ndef bernoulli(n):\n \"\"\"Bernoulli numbers B0..Bn (inclusive).\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not isscalar(n) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n n = int(n)\n if (n < 2):\n n1 = 2\n else:\n n1 = n\n return specfun.bernob(int(n1))[:(n+1)]\n\n\ndef euler(n):\n \"\"\"Euler numbers E0..En (inclusive).\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not isscalar(n) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n n = int(n)\n if (n < 2):\n n1 = 2\n else:\n n1 = n\n return specfun.eulerb(n1)[:(n+1)]\n\n\ndef lpn(n, z):\n \"\"\"Legendre function of the first kind.\n\n Compute sequence of Legendre functions of the first kind (polynomials),\n Pn(z) and derivatives for all degrees from 0 to n (inclusive).\n\n See also special.legendre for polynomial class.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not (isscalar(n) and isscalar(z)):\n raise ValueError(\"arguments must be scalars.\")\n if (n != floor(n)) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n if (n < 1):\n n1 = 1\n else:\n n1 = n\n if iscomplex(z):\n pn, pd = specfun.clpn(n1, z)\n else:\n pn, pd = specfun.lpn(n1, z)\n return pn[:(n+1)], pd[:(n+1)]\n\n\ndef lqn(n, z):\n \"\"\"Legendre function of the second kind.\n\n Compute sequence of Legendre functions of the second kind, Qn(z) and\n derivatives for all degrees from 0 to n (inclusive).\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not (isscalar(n) and isscalar(z)):\n raise ValueError(\"arguments must be scalars.\")\n if (n != floor(n)) or (n < 0):\n raise ValueError(\"n must be a non-negative integer.\")\n if (n < 1):\n n1 = 1\n else:\n n1 = n\n if iscomplex(z):\n qn, qd = specfun.clqn(n1, z)\n else:\n qn, qd = specfun.lqnb(n1, z)\n return qn[:(n+1)], qd[:(n+1)]\n\n\ndef ai_zeros(nt):\n \"\"\"\n Compute `nt` zeros and values of the Airy function Ai and its derivative.\n\n Computes the first `nt` zeros, `a`, of the Airy function Ai(x);\n first `nt` zeros, `ap`, of the derivative of the Airy function Ai'(x);\n the corresponding values Ai(a');\n and the corresponding values Ai'(a).\n\n Parameters\n ----------\n nt : int\n Number of zeros to compute\n\n Returns\n -------\n a : ndarray\n First `nt` zeros of Ai(x)\n ap : ndarray\n First `nt` zeros of Ai'(x)\n ai : ndarray\n Values of Ai(x) evaluated at first `nt` zeros of Ai'(x)\n aip : ndarray\n Values of Ai'(x) evaluated at first `nt` zeros of Ai(x)\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n kf = 1\n if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):\n raise ValueError(\"nt must be a positive integer scalar.\")\n return specfun.airyzo(nt, kf)\n\n\ndef bi_zeros(nt):\n \"\"\"\n Compute `nt` zeros and values of the Airy function Bi and its derivative.\n\n Computes the first `nt` zeros, b, of the Airy function Bi(x);\n first `nt` zeros, b', of the derivative of the Airy function Bi'(x);\n the corresponding values Bi(b');\n and the corresponding values Bi'(b).\n\n Parameters\n ----------\n nt : int\n Number of zeros to compute\n\n Returns\n -------\n b : ndarray\n First `nt` zeros of Bi(x)\n bp : ndarray\n First `nt` zeros of Bi'(x)\n bi : ndarray\n Values of Bi(x) evaluated at first `nt` zeros of Bi'(x)\n bip : ndarray\n Values of Bi'(x) evaluated at first `nt` zeros of Bi(x)\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n kf = 2\n if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):\n raise ValueError(\"nt must be a positive integer scalar.\")\n return specfun.airyzo(nt, kf)\n\n\ndef lmbda(v, x):\n r\"\"\"Jahnke-Emden Lambda function, Lambdav(x).\n\n This function is defined as [2]_,\n\n .. math:: \\Lambda_v(x) = \\Gamma(v+1) \\frac{J_v(x)}{(x/2)^v},\n\n where :math:`\\Gamma` is the gamma function and :math:`J_v` is the\n Bessel function of the first kind.\n\n Parameters\n ----------\n v : float\n Order of the Lambda function\n x : float\n Value at which to evaluate the function and derivatives\n\n Returns\n -------\n vl : ndarray\n Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.\n dl : ndarray\n Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n .. [2] Jahnke, E. and Emde, F. \"Tables of Functions with Formulae and\n Curves\" (4th ed.), Dover, 1945\n \"\"\"\n if not (isscalar(v) and isscalar(x)):\n raise ValueError(\"arguments must be scalars.\")\n if (v < 0):\n raise ValueError(\"argument must be > 0.\")\n n = int(v)\n v0 = v - n\n if (n < 1):\n n1 = 1\n else:\n n1 = n\n v1 = n1 + v0\n if (v != floor(v)):\n vm, vl, dl = specfun.lamv(v1, x)\n else:\n vm, vl, dl = specfun.lamn(v1, x)\n return vl[:(n+1)], dl[:(n+1)]\n\n\ndef pbdv_seq(v, x):\n \"\"\"Parabolic cylinder functions Dv(x) and derivatives.\n\n Parameters\n ----------\n v : float\n Order of the parabolic cylinder function\n x : float\n Value at which to evaluate the function and derivatives\n\n Returns\n -------\n dv : ndarray\n Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.\n dp : ndarray\n Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 13.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not (isscalar(v) and isscalar(x)):\n raise ValueError(\"arguments must be scalars.\")\n n = int(v)\n v0 = v-n\n if (n < 1):\n n1 = 1\n else:\n n1 = n\n v1 = n1 + v0\n dv, dp, pdf, pdd = specfun.pbdv(v1, x)\n return dv[:n1+1], dp[:n1+1]\n\n\ndef pbvv_seq(v, x):\n \"\"\"Parabolic cylinder functions Vv(x) and derivatives.\n\n Parameters\n ----------\n v : float\n Order of the parabolic cylinder function\n x : float\n Value at which to evaluate the function and derivatives\n\n Returns\n -------\n dv : ndarray\n Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.\n dp : ndarray\n Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 13.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not (isscalar(v) and isscalar(x)):\n raise ValueError(\"arguments must be scalars.\")\n n = int(v)\n v0 = v-n\n if (n <= 1):\n n1 = 1\n else:\n n1 = n\n v1 = n1 + v0\n dv, dp, pdf, pdd = specfun.pbvv(v1, x)\n return dv[:n1+1], dp[:n1+1]\n\n\ndef pbdn_seq(n, z):\n \"\"\"Parabolic cylinder functions Dn(z) and derivatives.\n\n Parameters\n ----------\n n : int\n Order of the parabolic cylinder function\n z : complex\n Value at which to evaluate the function and derivatives\n\n Returns\n -------\n dv : ndarray\n Values of D_i(z), for i=0, ..., i=n.\n dp : ndarray\n Derivatives D_i'(z), for i=0, ..., i=n.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996, chapter 13.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not (isscalar(n) and isscalar(z)):\n raise ValueError(\"arguments must be scalars.\")\n if (floor(n) != n):\n raise ValueError(\"n must be an integer.\")\n if (abs(n) <= 1):\n n1 = 1\n else:\n n1 = n\n cpb, cpd = specfun.cpbdn(n1, z)\n return cpb[:n1+1], cpd[:n1+1]\n\n\ndef ber_zeros(nt):\n \"\"\"Compute nt zeros of the Kelvin function ber(x).\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):\n raise ValueError(\"nt must be positive integer scalar.\")\n return specfun.klvnzo(nt, 1)\n\n\ndef bei_zeros(nt):\n \"\"\"Compute nt zeros of the Kelvin function bei(x).\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):\n raise ValueError(\"nt must be positive integer scalar.\")\n return specfun.klvnzo(nt, 2)\n\n\ndef ker_zeros(nt):\n \"\"\"Compute nt zeros of the Kelvin function ker(x).\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):\n raise ValueError(\"nt must be positive integer scalar.\")\n return specfun.klvnzo(nt, 3)\n\n\ndef kei_zeros(nt):\n \"\"\"Compute nt zeros of the Kelvin function kei(x).\n \"\"\"\n if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):\n raise ValueError(\"nt must be positive integer scalar.\")\n return specfun.klvnzo(nt, 4)\n\n\ndef berp_zeros(nt):\n \"\"\"Compute nt zeros of the Kelvin function ber'(x).\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):\n raise ValueError(\"nt must be positive integer scalar.\")\n return specfun.klvnzo(nt, 5)\n\n\ndef beip_zeros(nt):\n \"\"\"Compute nt zeros of the Kelvin function bei'(x).\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):\n raise ValueError(\"nt must be positive integer scalar.\")\n return specfun.klvnzo(nt, 6)\n\n\ndef kerp_zeros(nt):\n \"\"\"Compute nt zeros of the Kelvin function ker'(x).\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):\n raise ValueError(\"nt must be positive integer scalar.\")\n return specfun.klvnzo(nt, 7)\n\n\ndef keip_zeros(nt):\n \"\"\"Compute nt zeros of the Kelvin function kei'(x).\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):\n raise ValueError(\"nt must be positive integer scalar.\")\n return specfun.klvnzo(nt, 8)\n\n\ndef kelvin_zeros(nt):\n \"\"\"Compute nt zeros of all Kelvin functions.\n\n Returned in a length-8 tuple of arrays of length nt. The tuple contains\n the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei').\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):\n raise ValueError(\"nt must be positive integer scalar.\")\n return (specfun.klvnzo(nt, 1),\n specfun.klvnzo(nt, 2),\n specfun.klvnzo(nt, 3),\n specfun.klvnzo(nt, 4),\n specfun.klvnzo(nt, 5),\n specfun.klvnzo(nt, 6),\n specfun.klvnzo(nt, 7),\n specfun.klvnzo(nt, 8))\n\n\ndef pro_cv_seq(m, n, c):\n \"\"\"Characteristic values for prolate spheroidal wave functions.\n\n Compute a sequence of characteristic values for the prolate\n spheroidal wave functions for mode m and n'=m..n and spheroidal\n parameter c.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not (isscalar(m) and isscalar(n) and isscalar(c)):\n raise ValueError(\"Arguments must be scalars.\")\n if (n != floor(n)) or (m != floor(m)):\n raise ValueError(\"Modes must be integers.\")\n if (n-m > 199):\n raise ValueError(\"Difference between n and m is too large.\")\n maxL = n-m+1\n return specfun.segv(m, n, c, 1)[1][:maxL]\n\n\ndef obl_cv_seq(m, n, c):\n \"\"\"Characteristic values for oblate spheroidal wave functions.\n\n Compute a sequence of characteristic values for the oblate\n spheroidal wave functions for mode m and n'=m..n and spheroidal\n parameter c.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n http://jin.ece.illinois.edu/specfunc.html\n\n \"\"\"\n if not (isscalar(m) and isscalar(n) and isscalar(c)):\n raise ValueError(\"Arguments must be scalars.\")\n if (n != floor(n)) or (m != floor(m)):\n raise ValueError(\"Modes must be integers.\")\n if (n-m > 199):\n raise ValueError(\"Difference between n and m is too large.\")\n maxL = n-m+1\n return specfun.segv(m, n, c, -1)[1][:maxL]\n\n\ndef ellipk(m):\n \"\"\"Complete elliptic integral of the first kind.\n\n This function is defined as\n\n .. math:: K(m) = \\\\int_0^{\\\\pi/2} [1 - m \\\\sin(t)^2]^{-1/2} dt\n\n Parameters\n ----------\n m : array_like\n The parameter of the elliptic integral.\n\n Returns\n -------\n K : array_like\n Value of the elliptic integral.\n\n Notes\n -----\n For more precision around point m = 1, use `ellipkm1`, which this\n function calls.\n\n See Also\n --------\n ellipkm1 : Complete elliptic integral of the first kind around m = 1\n ellipkinc : Incomplete elliptic integral of the first kind\n ellipe : Complete elliptic integral of the second kind\n ellipeinc : Incomplete elliptic integral of the second kind\n\n\n \"\"\"\n return ellipkm1(1 - asarray(m))\n\n\ndef agm(a, b):\n \"\"\"Arithmetic, Geometric Mean.\n\n Start with a_0=a and b_0=b and iteratively compute\n\n a_{n+1} = (a_n+b_n)/2\n b_{n+1} = sqrt(a_n*b_n)\n\n until a_n=b_n. The result is agm(a, b)\n\n agm(a, b)=agm(b, a)\n agm(a, a) = a\n min(a, b) < agm(a, b) < max(a, b)\n \"\"\"\n s = a + b + 0.0\n return (pi / 4) * s / ellipkm1(4 * a * b / s ** 2)\n\n\ndef comb(N, k, exact=False, repetition=False):\n \"\"\"The number of combinations of N things taken k at a time.\n\n This is often expressed as \"N choose k\".\n\n Parameters\n ----------\n N : int, ndarray\n Number of things.\n k : int, ndarray\n Number of elements taken.\n exact : bool, optional\n If `exact` is False, then floating point precision is used, otherwise\n exact long integer is computed.\n repetition : bool, optional\n If `repetition` is True, then the number of combinations with\n repetition is computed.\n\n Returns\n -------\n val : int, ndarray\n The total number of combinations.\n\n See Also\n --------\n binom : Binomial coefficient ufunc\n\n Notes\n -----\n - Array arguments accepted only for exact=False case.\n - If k > N, N < 0, or k < 0, then a 0 is returned.\n\n Examples\n --------\n >>> from scipy.special import comb\n >>> k = np.array([3, 4])\n >>> n = np.array([10, 10])\n >>> comb(n, k, exact=False)\n array([ 120., 210.])\n >>> comb(10, 3, exact=True)\n 120L\n >>> comb(10, 3, exact=True, repetition=True)\n 220L\n\n \"\"\"\n if repetition:\n return comb(N + k - 1, k, exact)\n if exact:\n return _comb_int(N, k)\n else:\n k, N = asarray(k), asarray(N)\n cond = (k <= N) & (N >= 0) & (k >= 0)\n vals = binom(N, k)\n if isinstance(vals, np.ndarray):\n vals[~cond] = 0\n elif not cond:\n vals = np.float64(0)\n return vals\n\n\ndef perm(N, k, exact=False):\n \"\"\"Permutations of N things taken k at a time, i.e., k-permutations of N.\n\n It's also known as \"partial permutations\".\n\n Parameters\n ----------\n N : int, ndarray\n Number of things.\n k : int, ndarray\n Number of elements taken.\n exact : bool, optional\n If `exact` is False, then floating point precision is used, otherwise\n exact long integer is computed.\n\n Returns\n -------\n val : int, ndarray\n The number of k-permutations of N.\n\n Notes\n -----\n - Array arguments accepted only for exact=False case.\n - If k > N, N < 0, or k < 0, then a 0 is returned.\n\n Examples\n --------\n >>> from scipy.special import perm\n >>> k = np.array([3, 4])\n >>> n = np.array([10, 10])\n >>> perm(n, k)\n array([ 720., 5040.])\n >>> perm(10, 3, exact=True)\n 720\n\n \"\"\"\n if exact:\n if (k > N) or (N < 0) or (k < 0):\n return 0\n val = 1\n for i in xrange(N - k + 1, N + 1):\n val *= i\n return val\n else:\n k, N = asarray(k), asarray(N)\n cond = (k <= N) & (N >= 0) & (k >= 0)\n vals = poch(N - k + 1, k)\n if isinstance(vals, np.ndarray):\n vals[~cond] = 0\n elif not cond:\n vals = np.float64(0)\n return vals\n\n\n# http://stackoverflow.com/a/16327037/125507\ndef _range_prod(lo, hi):\n \"\"\"\n Product of a range of numbers.\n\n Returns the product of\n lo * (lo+1) * (lo+2) * ... * (hi-2) * (hi-1) * hi\n = hi! / (lo-1)!\n\n Breaks into smaller products first for speed:\n _range_prod(2, 9) = ((2*3)*(4*5))*((6*7)*(8*9))\n \"\"\"\n if lo + 1 < hi:\n mid = (hi + lo) // 2\n return _range_prod(lo, mid) * _range_prod(mid + 1, hi)\n if lo == hi:\n return lo\n return lo * hi\n\n\ndef factorial(n, exact=False):\n \"\"\"\n The factorial of a number or array of numbers.\n\n The factorial of non-negative integer `n` is the product of all\n positive integers less than or equal to `n`::\n\n n! = n * (n - 1) * (n - 2) * ... * 1\n\n Parameters\n ----------\n n : int or array_like of ints\n Input values. If ``n < 0``, the return value is 0.\n exact : bool, optional\n If True, calculate the answer exactly using long integer arithmetic.\n If False, result is approximated in floating point rapidly using the\n `gamma` function.\n Default is False.\n\n Returns\n -------\n nf : float or int or ndarray\n Factorial of `n`, as integer or float depending on `exact`.\n\n Notes\n -----\n For arrays with ``exact=True``, the factorial is computed only once, for\n the largest input, with each other result computed in the process.\n The output dtype is increased to ``int64`` or ``object`` if necessary.\n\n With ``exact=False`` the factorial is approximated using the gamma\n function:\n\n .. math:: n! = \\\\Gamma(n+1)\n\n Examples\n --------\n >>> from scipy.special import factorial\n >>> arr = np.array([3, 4, 5])\n >>> factorial(arr, exact=False)\n array([ 6., 24., 120.])\n >>> factorial(arr, exact=True)\n array([ 6, 24, 120])\n >>> factorial(5, exact=True)\n 120L\n\n \"\"\"\n if exact:\n if np.ndim(n) == 0:\n return 0 if n < 0 else math.factorial(n)\n else:\n n = asarray(n)\n un = np.unique(n).astype(object)\n\n # Convert to object array of long ints if np.int can't handle size\n if un[-1] > 20:\n dt = object\n elif un[-1] > 12:\n dt = np.int64\n else:\n dt = np.int\n\n out = np.empty_like(n, dtype=dt)\n\n # Handle invalid/trivial values\n un = un[un > 1]\n out[n < 2] = 1\n out[n < 0] = 0\n\n # Calculate products of each range of numbers\n if un.size:\n val = math.factorial(un[0])\n out[n == un[0]] = val\n for i in xrange(len(un) - 1):\n prev = un[i] + 1\n current = un[i + 1]\n val *= _range_prod(prev, current)\n out[n == current] = val\n return out\n else:\n n = asarray(n)\n vals = gamma(n + 1)\n return where(n >= 0, vals, 0)\n\n\ndef factorial2(n, exact=False):\n \"\"\"Double factorial.\n\n This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5\n * 3 * 1``. It can be approximated numerically as::\n\n n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd\n = 2**(n/2) * (n/2)! n even\n\n Parameters\n ----------\n n : int or array_like\n Calculate ``n!!``. Arrays are only supported with `exact` set\n to False. If ``n < 0``, the return value is 0.\n exact : bool, optional\n The result can be approximated rapidly using the gamma-formula\n above (default). If `exact` is set to True, calculate the\n answer exactly using integer arithmetic.\n\n Returns\n -------\n nff : float or int\n Double factorial of `n`, as an int or a float depending on\n `exact`.\n\n Examples\n --------\n >>> from scipy.special import factorial2\n >>> factorial2(7, exact=False)\n array(105.00000000000001)\n >>> factorial2(7, exact=True)\n 105L\n\n \"\"\"\n if exact:\n if n < -1:\n return 0\n if n <= 0:\n return 1\n val = 1\n for k in xrange(n, 0, -2):\n val *= k\n return val\n else:\n n = asarray(n)\n vals = zeros(n.shape, 'd')\n cond1 = (n % 2) & (n >= -1)\n cond2 = (1-(n % 2)) & (n >= -1)\n oddn = extract(cond1, n)\n evenn = extract(cond2, n)\n nd2o = oddn / 2.0\n nd2e = evenn / 2.0\n place(vals, cond1, gamma(nd2o + 1) / sqrt(pi) * pow(2.0, nd2o + 0.5))\n place(vals, cond2, gamma(nd2e + 1) * pow(2.0, nd2e))\n return vals\n\n\ndef factorialk(n, k, exact=True):\n \"\"\"Multifactorial of n of order k, n(!!...!).\n\n This is the multifactorial of n skipping k values. For example,\n\n factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1\n\n In particular, for any integer ``n``, we have\n\n factorialk(n, 1) = factorial(n)\n\n factorialk(n, 2) = factorial2(n)\n\n Parameters\n ----------\n n : int\n Calculate multifactorial. If `n` < 0, the return value is 0.\n k : int\n Order of multifactorial.\n exact : bool, optional\n If exact is set to True, calculate the answer exactly using\n integer arithmetic.\n\n Returns\n -------\n val : int\n Multifactorial of `n`.\n\n Raises\n ------\n NotImplementedError\n Raises when exact is False\n\n Examples\n --------\n >>> from scipy.special import factorialk\n >>> factorialk(5, 1, exact=True)\n 120L\n >>> factorialk(5, 3, exact=True)\n 10L\n\n \"\"\"\n if exact:\n if n < 1-k:\n return 0\n if n <= 0:\n return 1\n val = 1\n for j in xrange(n, 0, -k):\n val = val*j\n return val\n else:\n raise NotImplementedError\n\n\ndef zeta(x, q=None, out=None):\n r\"\"\"\n Riemann zeta function.\n\n The two-argument version is the Hurwitz zeta function:\n\n .. math:: \\zeta(x, q) = \\sum_{k=0}^{\\infty} \\frac{1}{(k + q)^x},\n\n Riemann zeta function corresponds to ``q = 1``.\n\n See also\n --------\n zetac\n\n \"\"\"\n if q is None:\n q = 1\n return _zeta(x, q, out)\n\n",
"\"\"\"\nThe :mod:`sklearn.pls` module implements Partial Least Squares (PLS).\n\"\"\"\n\n# Author: Edouard Duchesnay <[email protected]>\n# License: BSD 3 clause\nfrom distutils.version import LooseVersion\nfrom sklearn.utils.extmath import svd_flip\n\nfrom ..base import BaseEstimator, RegressorMixin, TransformerMixin\nfrom ..utils import check_array, check_consistent_length\nfrom ..externals import six\n\nimport warnings\nfrom abc import ABCMeta, abstractmethod\nimport numpy as np\nfrom scipy import linalg\nfrom ..utils import arpack\nfrom ..utils.validation import check_is_fitted, FLOAT_DTYPES\n\n__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']\n\nimport scipy\npinv2_args = {}\nif LooseVersion(scipy.__version__) >= LooseVersion('0.12'):\n # check_finite=False is an optimization available only in scipy >=0.12\n pinv2_args = {'check_finite': False}\n\n\ndef _nipals_twoblocks_inner_loop(X, Y, mode=\"A\", max_iter=500, tol=1e-06,\n norm_y_weights=False):\n \"\"\"Inner loop of the iterative NIPALS algorithm.\n\n Provides an alternative to the svd(X'Y); returns the first left and right\n singular vectors of X'Y. See PLS for the meaning of the parameters. It is\n similar to the Power method for determining the eigenvectors and\n eigenvalues of a X'Y.\n \"\"\"\n y_score = Y[:, [0]]\n x_weights_old = 0\n ite = 1\n X_pinv = Y_pinv = None\n eps = np.finfo(X.dtype).eps\n # Inner loop of the Wold algo.\n while True:\n # 1.1 Update u: the X weights\n if mode == \"B\":\n if X_pinv is None:\n # We use slower pinv2 (same as np.linalg.pinv) for stability\n # reasons\n X_pinv = linalg.pinv2(X, **pinv2_args)\n x_weights = np.dot(X_pinv, y_score)\n else: # mode A\n # Mode A regress each X column on y_score\n x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)\n # If y_score only has zeros x_weights will only have zeros. In\n # this case add an epsilon to converge to a more acceptable\n # solution\n if np.dot(x_weights.T, x_weights) < eps:\n x_weights += eps\n # 1.2 Normalize u\n x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps\n # 1.3 Update x_score: the X latent scores\n x_score = np.dot(X, x_weights)\n # 2.1 Update y_weights\n if mode == \"B\":\n if Y_pinv is None:\n Y_pinv = linalg.pinv2(Y, **pinv2_args) # compute once pinv(Y)\n y_weights = np.dot(Y_pinv, x_score)\n else:\n # Mode A regress each Y column on x_score\n y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)\n # 2.2 Normalize y_weights\n if norm_y_weights:\n y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps\n # 2.3 Update y_score: the Y latent scores\n y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)\n # y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG\n x_weights_diff = x_weights - x_weights_old\n if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:\n break\n if ite == max_iter:\n warnings.warn('Maximum number of iterations reached')\n break\n x_weights_old = x_weights\n ite += 1\n return x_weights, y_weights, ite\n\n\ndef _svd_cross_product(X, Y):\n C = np.dot(X.T, Y)\n U, s, Vh = linalg.svd(C, full_matrices=False)\n u = U[:, [0]]\n v = Vh.T[:, [0]]\n return u, v\n\n\ndef _center_scale_xy(X, Y, scale=True):\n \"\"\" Center X, Y and scale if the scale parameter==True\n\n Returns\n -------\n X, Y, x_mean, y_mean, x_std, y_std\n \"\"\"\n # center\n x_mean = X.mean(axis=0)\n X -= x_mean\n y_mean = Y.mean(axis=0)\n Y -= y_mean\n # scale\n if scale:\n x_std = X.std(axis=0, ddof=1)\n x_std[x_std == 0.0] = 1.0\n X /= x_std\n y_std = Y.std(axis=0, ddof=1)\n y_std[y_std == 0.0] = 1.0\n Y /= y_std\n else:\n x_std = np.ones(X.shape[1])\n y_std = np.ones(Y.shape[1])\n return X, Y, x_mean, y_mean, x_std, y_std\n\n\nclass _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,\n RegressorMixin):\n \"\"\"Partial Least Squares (PLS)\n\n This class implements the generic PLS algorithm, constructors' parameters\n allow to obtain a specific implementation such as:\n\n - PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation\n and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.\n With univariate response it implements PLS1.\n\n - PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and\n normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and\n [Wegelin et al. 2000]. This parametrization implements the original Wold\n algorithm.\n\n We use the terminology defined by [Wegelin et al. 2000].\n This implementation uses the PLS Wold 2 blocks algorithm based on two\n nested loops:\n (i) The outer loop iterate over components.\n (ii) The inner loop estimates the weights vectors. This can be done\n with two algo. (a) the inner loop of the original NIPALS algo. or (b) a\n SVD on residuals cross-covariance matrices.\n\n n_components : int, number of components to keep. (default 2).\n\n scale : boolean, scale data? (default True)\n\n deflation_mode : str, \"canonical\" or \"regression\". See notes.\n\n mode : \"A\" classical PLS and \"B\" CCA. See notes.\n\n norm_y_weights: boolean, normalize Y weights to one? (default False)\n\n algorithm : string, \"nipals\" or \"svd\"\n The algorithm used to estimate the weights. It will be called\n n_components times, i.e. once for each iteration of the outer loop.\n\n max_iter : an integer, the maximum number of iterations (default 500)\n of the NIPALS inner loop (used only if algorithm=\"nipals\")\n\n tol : non-negative real, default 1e-06\n The tolerance used in the iterative algorithm.\n\n copy : boolean, default True\n Whether the deflation should be done on a copy. Let the default\n value to True unless you don't care about side effects.\n\n Attributes\n ----------\n x_weights_ : array, [p, n_components]\n X block weights vectors.\n\n y_weights_ : array, [q, n_components]\n Y block weights vectors.\n\n x_loadings_ : array, [p, n_components]\n X block loadings vectors.\n\n y_loadings_ : array, [q, n_components]\n Y block loadings vectors.\n\n x_scores_ : array, [n_samples, n_components]\n X scores.\n\n y_scores_ : array, [n_samples, n_components]\n Y scores.\n\n x_rotations_ : array, [p, n_components]\n X block to latents rotations.\n\n y_rotations_ : array, [q, n_components]\n Y block to latents rotations.\n\n coef_: array, [p, q]\n The coefficients of the linear model: ``Y = X coef_ + Err``\n\n n_iter_ : array-like\n Number of iterations of the NIPALS inner loop for each\n component. Not useful if the algorithm given is \"svd\".\n\n References\n ----------\n\n Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with\n emphasis on the two-block case. Technical Report 371, Department of\n Statistics, University of Washington, Seattle, 2000.\n\n In French but still a reference:\n Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:\n Editions Technic.\n\n See also\n --------\n PLSCanonical\n PLSRegression\n CCA\n PLS_SVD\n \"\"\"\n\n @abstractmethod\n def __init__(self, n_components=2, scale=True, deflation_mode=\"regression\",\n mode=\"A\", algorithm=\"nipals\", norm_y_weights=False,\n max_iter=500, tol=1e-06, copy=True):\n self.n_components = n_components\n self.deflation_mode = deflation_mode\n self.mode = mode\n self.norm_y_weights = norm_y_weights\n self.scale = scale\n self.algorithm = algorithm\n self.max_iter = max_iter\n self.tol = tol\n self.copy = copy\n\n def fit(self, X, Y):\n \"\"\"Fit model to data.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Training vectors, where n_samples in the number of samples and\n n_features is the number of predictors.\n\n Y : array-like of response, shape = [n_samples, n_targets]\n Target vectors, where n_samples in the number of samples and\n n_targets is the number of response variables.\n \"\"\"\n\n # copy since this will contains the residuals (deflated) matrices\n check_consistent_length(X, Y)\n X = check_array(X, dtype=np.float64, copy=self.copy)\n Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)\n if Y.ndim == 1:\n Y = Y.reshape(-1, 1)\n\n n = X.shape[0]\n p = X.shape[1]\n q = Y.shape[1]\n\n if self.n_components < 1 or self.n_components > p:\n raise ValueError('Invalid number of components: %d' %\n self.n_components)\n if self.algorithm not in (\"svd\", \"nipals\"):\n raise ValueError(\"Got algorithm %s when only 'svd' \"\n \"and 'nipals' are known\" % self.algorithm)\n if self.algorithm == \"svd\" and self.mode == \"B\":\n raise ValueError('Incompatible configuration: mode B is not '\n 'implemented with svd algorithm')\n if self.deflation_mode not in [\"canonical\", \"regression\"]:\n raise ValueError('The deflation mode is unknown')\n # Scale (in place)\n X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (\n _center_scale_xy(X, Y, self.scale))\n # Residuals (deflated) matrices\n Xk = X\n Yk = Y\n # Results matrices\n self.x_scores_ = np.zeros((n, self.n_components))\n self.y_scores_ = np.zeros((n, self.n_components))\n self.x_weights_ = np.zeros((p, self.n_components))\n self.y_weights_ = np.zeros((q, self.n_components))\n self.x_loadings_ = np.zeros((p, self.n_components))\n self.y_loadings_ = np.zeros((q, self.n_components))\n self.n_iter_ = []\n\n # NIPALS algo: outer loop, over components\n for k in range(self.n_components):\n if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):\n # Yk constant\n warnings.warn('Y residual constant at iteration %s' % k)\n break\n # 1) weights estimation (inner loop)\n # -----------------------------------\n if self.algorithm == \"nipals\":\n x_weights, y_weights, n_iter_ = \\\n _nipals_twoblocks_inner_loop(\n X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,\n tol=self.tol, norm_y_weights=self.norm_y_weights)\n self.n_iter_.append(n_iter_)\n elif self.algorithm == \"svd\":\n x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)\n # Forces sign stability of x_weights and y_weights\n # Sign undeterminacy issue from svd if algorithm == \"svd\"\n # and from platform dependent computation if algorithm == 'nipals'\n x_weights, y_weights = svd_flip(x_weights, y_weights.T)\n y_weights = y_weights.T\n # compute scores\n x_scores = np.dot(Xk, x_weights)\n if self.norm_y_weights:\n y_ss = 1\n else:\n y_ss = np.dot(y_weights.T, y_weights)\n y_scores = np.dot(Yk, y_weights) / y_ss\n # test for null variance\n if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:\n warnings.warn('X scores are null at iteration %s' % k)\n break\n # 2) Deflation (in place)\n # ----------------------\n # Possible memory footprint reduction may done here: in order to\n # avoid the allocation of a data chunk for the rank-one\n # approximations matrix which is then subtracted to Xk, we suggest\n # to perform a column-wise deflation.\n #\n # - regress Xk's on x_score\n x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)\n # - subtract rank-one approximations to obtain remainder matrix\n Xk -= np.dot(x_scores, x_loadings.T)\n if self.deflation_mode == \"canonical\":\n # - regress Yk's on y_score, then subtract rank-one approx.\n y_loadings = (np.dot(Yk.T, y_scores)\n / np.dot(y_scores.T, y_scores))\n Yk -= np.dot(y_scores, y_loadings.T)\n if self.deflation_mode == \"regression\":\n # - regress Yk's on x_score, then subtract rank-one approx.\n y_loadings = (np.dot(Yk.T, x_scores)\n / np.dot(x_scores.T, x_scores))\n Yk -= np.dot(x_scores, y_loadings.T)\n # 3) Store weights, scores and loadings # Notation:\n self.x_scores_[:, k] = x_scores.ravel() # T\n self.y_scores_[:, k] = y_scores.ravel() # U\n self.x_weights_[:, k] = x_weights.ravel() # W\n self.y_weights_[:, k] = y_weights.ravel() # C\n self.x_loadings_[:, k] = x_loadings.ravel() # P\n self.y_loadings_[:, k] = y_loadings.ravel() # Q\n # Such that: X = TP' + Err and Y = UQ' + Err\n\n # 4) rotations from input space to transformed space (scores)\n # T = X W(P'W)^-1 = XW* (W* : p x k matrix)\n # U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)\n self.x_rotations_ = np.dot(\n self.x_weights_,\n linalg.pinv2(np.dot(self.x_loadings_.T, self.x_weights_),\n **pinv2_args))\n if Y.shape[1] > 1:\n self.y_rotations_ = np.dot(\n self.y_weights_,\n linalg.pinv2(np.dot(self.y_loadings_.T, self.y_weights_),\n **pinv2_args))\n else:\n self.y_rotations_ = np.ones(1)\n\n if True or self.deflation_mode == \"regression\":\n # FIXME what's with the if?\n # Estimate regression coefficient\n # Regress Y on T\n # Y = TQ' + Err,\n # Then express in function of X\n # Y = X W(P'W)^-1Q' + Err = XB + Err\n # => B = W*Q' (p x q)\n self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)\n self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *\n self.y_std_)\n return self\n\n def transform(self, X, Y=None, copy=True):\n \"\"\"Apply the dimension reduction learned on the train data.\n\n Parameters\n ----------\n X : array-like of predictors, shape = [n_samples, p]\n Training vectors, where n_samples in the number of samples and\n p is the number of predictors.\n\n Y : array-like of response, shape = [n_samples, q], optional\n Training vectors, where n_samples in the number of samples and\n q is the number of response variables.\n\n copy : boolean, default True\n Whether to copy X and Y, or perform in-place normalization.\n\n Returns\n -------\n x_scores if Y is not given, (x_scores, y_scores) otherwise.\n \"\"\"\n check_is_fitted(self, 'x_mean_')\n X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)\n # Normalize\n X -= self.x_mean_\n X /= self.x_std_\n # Apply rotation\n x_scores = np.dot(X, self.x_rotations_)\n if Y is not None:\n Y = check_array(Y, ensure_2d=False, copy=copy, dtype=FLOAT_DTYPES)\n if Y.ndim == 1:\n Y = Y.reshape(-1, 1)\n Y -= self.y_mean_\n Y /= self.y_std_\n y_scores = np.dot(Y, self.y_rotations_)\n return x_scores, y_scores\n\n return x_scores\n\n def predict(self, X, copy=True):\n \"\"\"Apply the dimension reduction learned on the train data.\n\n Parameters\n ----------\n X : array-like of predictors, shape = [n_samples, p]\n Training vectors, where n_samples in the number of samples and\n p is the number of predictors.\n\n copy : boolean, default True\n Whether to copy X and Y, or perform in-place normalization.\n\n Notes\n -----\n This call requires the estimation of a p x q matrix, which may\n be an issue in high dimensional space.\n \"\"\"\n check_is_fitted(self, 'x_mean_')\n X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)\n # Normalize\n X -= self.x_mean_\n X /= self.x_std_\n Ypred = np.dot(X, self.coef_)\n return Ypred + self.y_mean_\n\n def fit_transform(self, X, y=None, **fit_params):\n \"\"\"Learn and apply the dimension reduction on the train data.\n\n Parameters\n ----------\n X : array-like of predictors, shape = [n_samples, p]\n Training vectors, where n_samples in the number of samples and\n p is the number of predictors.\n\n Y : array-like of response, shape = [n_samples, q], optional\n Training vectors, where n_samples in the number of samples and\n q is the number of response variables.\n\n copy : boolean, default True\n Whether to copy X and Y, or perform in-place normalization.\n\n Returns\n -------\n x_scores if Y is not given, (x_scores, y_scores) otherwise.\n \"\"\"\n return self.fit(X, y, **fit_params).transform(X, y)\n\n\nclass PLSRegression(_PLS):\n \"\"\"PLS regression\n\n PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1\n in case of one dimensional response.\n This class inherits from _PLS with mode=\"A\", deflation_mode=\"regression\",\n norm_y_weights=False and algorithm=\"nipals\".\n\n Read more in the :ref:`User Guide <cross_decomposition>`.\n\n Parameters\n ----------\n n_components : int, (default 2)\n Number of components to keep.\n\n scale : boolean, (default True)\n whether to scale the data\n\n max_iter : an integer, (default 500)\n the maximum number of iterations of the NIPALS inner loop (used\n only if algorithm=\"nipals\")\n\n tol : non-negative real\n Tolerance used in the iterative algorithm default 1e-06.\n\n copy : boolean, default True\n Whether the deflation should be done on a copy. Let the default\n value to True unless you don't care about side effect\n\n Attributes\n ----------\n x_weights_ : array, [p, n_components]\n X block weights vectors.\n\n y_weights_ : array, [q, n_components]\n Y block weights vectors.\n\n x_loadings_ : array, [p, n_components]\n X block loadings vectors.\n\n y_loadings_ : array, [q, n_components]\n Y block loadings vectors.\n\n x_scores_ : array, [n_samples, n_components]\n X scores.\n\n y_scores_ : array, [n_samples, n_components]\n Y scores.\n\n x_rotations_ : array, [p, n_components]\n X block to latents rotations.\n\n y_rotations_ : array, [q, n_components]\n Y block to latents rotations.\n\n coef_: array, [p, q]\n The coefficients of the linear model: ``Y = X coef_ + Err``\n\n n_iter_ : array-like\n Number of iterations of the NIPALS inner loop for each\n component.\n\n Notes\n -----\n Matrices::\n\n T: x_scores_\n U: y_scores_\n W: x_weights_\n C: y_weights_\n P: x_loadings_\n Q: y_loadings__\n\n Are computed such that::\n\n X = T P.T + Err and Y = U Q.T + Err\n T[:, k] = Xk W[:, k] for k in range(n_components)\n U[:, k] = Yk C[:, k] for k in range(n_components)\n x_rotations_ = W (P.T W)^(-1)\n y_rotations_ = C (Q.T C)^(-1)\n\n where Xk and Yk are residual matrices at iteration k.\n\n `Slides explaining PLS <http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`\n\n For each component k, find weights u, v that optimizes:\n ``max corr(Xk u, Yk v) * std(Xk u) std(Yk u)``, such that ``|u| = 1``\n\n Note that it maximizes both the correlations between the scores and the\n intra-block variances.\n\n The residual matrix of X (Xk+1) block is obtained by the deflation on\n the current X score: x_score.\n\n The residual matrix of Y (Yk+1) block is obtained by deflation on the\n current X score. This performs the PLS regression known as PLS2. This\n mode is prediction oriented.\n\n This implementation provides the same results that 3 PLS packages\n provided in the R language (R-project):\n\n - \"mixOmics\" with function pls(X, Y, mode = \"regression\")\n - \"plspm \" with function plsreg2(X, Y)\n - \"pls\" with function oscorespls.fit(X, Y)\n\n Examples\n --------\n >>> from sklearn.cross_decomposition import PLSRegression\n >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]\n >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]\n >>> pls2 = PLSRegression(n_components=2)\n >>> pls2.fit(X, Y)\n ... # doctest: +NORMALIZE_WHITESPACE\n PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,\n tol=1e-06)\n >>> Y_pred = pls2.predict(X)\n\n References\n ----------\n\n Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with\n emphasis on the two-block case. Technical Report 371, Department of\n Statistics, University of Washington, Seattle, 2000.\n\n In french but still a reference:\n Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:\n Editions Technic.\n \"\"\"\n\n def __init__(self, n_components=2, scale=True,\n max_iter=500, tol=1e-06, copy=True):\n super(PLSRegression, self).__init__(\n n_components=n_components, scale=scale,\n deflation_mode=\"regression\", mode=\"A\",\n norm_y_weights=False, max_iter=max_iter, tol=tol,\n copy=copy)\n\n\nclass PLSCanonical(_PLS):\n \"\"\" PLSCanonical implements the 2 blocks canonical PLS of the original Wold\n algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].\n\n This class inherits from PLS with mode=\"A\" and deflation_mode=\"canonical\",\n norm_y_weights=True and algorithm=\"nipals\", but svd should provide similar\n results up to numerical errors.\n\n Read more in the :ref:`User Guide <cross_decomposition>`.\n\n Parameters\n ----------\n scale : boolean, scale data? (default True)\n\n algorithm : string, \"nipals\" or \"svd\"\n The algorithm used to estimate the weights. It will be called\n n_components times, i.e. once for each iteration of the outer loop.\n\n max_iter : an integer, (default 500)\n the maximum number of iterations of the NIPALS inner loop (used\n only if algorithm=\"nipals\")\n\n tol : non-negative real, default 1e-06\n the tolerance used in the iterative algorithm\n\n copy : boolean, default True\n Whether the deflation should be done on a copy. Let the default\n value to True unless you don't care about side effect\n\n n_components : int, number of components to keep. (default 2).\n\n Attributes\n ----------\n x_weights_ : array, shape = [p, n_components]\n X block weights vectors.\n\n y_weights_ : array, shape = [q, n_components]\n Y block weights vectors.\n\n x_loadings_ : array, shape = [p, n_components]\n X block loadings vectors.\n\n y_loadings_ : array, shape = [q, n_components]\n Y block loadings vectors.\n\n x_scores_ : array, shape = [n_samples, n_components]\n X scores.\n\n y_scores_ : array, shape = [n_samples, n_components]\n Y scores.\n\n x_rotations_ : array, shape = [p, n_components]\n X block to latents rotations.\n\n y_rotations_ : array, shape = [q, n_components]\n Y block to latents rotations.\n\n n_iter_ : array-like\n Number of iterations of the NIPALS inner loop for each\n component. Not useful if the algorithm provided is \"svd\".\n\n Notes\n -----\n Matrices::\n\n T: x_scores_\n U: y_scores_\n W: x_weights_\n C: y_weights_\n P: x_loadings_\n Q: y_loadings__\n\n Are computed such that::\n\n X = T P.T + Err and Y = U Q.T + Err\n T[:, k] = Xk W[:, k] for k in range(n_components)\n U[:, k] = Yk C[:, k] for k in range(n_components)\n x_rotations_ = W (P.T W)^(-1)\n y_rotations_ = C (Q.T C)^(-1)\n\n where Xk and Yk are residual matrices at iteration k.\n\n `Slides explaining PLS <http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`\n\n For each component k, find weights u, v that optimize::\n\n max corr(Xk u, Yk v) * std(Xk u) std(Yk u), such that ``|u| = |v| = 1``\n\n Note that it maximizes both the correlations between the scores and the\n intra-block variances.\n\n The residual matrix of X (Xk+1) block is obtained by the deflation on the\n current X score: x_score.\n\n The residual matrix of Y (Yk+1) block is obtained by deflation on the\n current Y score. This performs a canonical symmetric version of the PLS\n regression. But slightly different than the CCA. This is mostly used\n for modeling.\n\n This implementation provides the same results that the \"plspm\" package\n provided in the R language (R-project), using the function plsca(X, Y).\n Results are equal or collinear with the function\n ``pls(..., mode = \"canonical\")`` of the \"mixOmics\" package. The difference\n relies in the fact that mixOmics implementation does not exactly implement\n the Wold algorithm since it does not normalize y_weights to one.\n\n Examples\n --------\n >>> from sklearn.cross_decomposition import PLSCanonical\n >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]\n >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]\n >>> plsca = PLSCanonical(n_components=2)\n >>> plsca.fit(X, Y)\n ... # doctest: +NORMALIZE_WHITESPACE\n PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,\n scale=True, tol=1e-06)\n >>> X_c, Y_c = plsca.transform(X, Y)\n\n References\n ----------\n\n Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with\n emphasis on the two-block case. Technical Report 371, Department of\n Statistics, University of Washington, Seattle, 2000.\n\n Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:\n Editions Technic.\n\n See also\n --------\n CCA\n PLSSVD\n \"\"\"\n\n def __init__(self, n_components=2, scale=True, algorithm=\"nipals\",\n max_iter=500, tol=1e-06, copy=True):\n super(PLSCanonical, self).__init__(\n n_components=n_components, scale=scale,\n deflation_mode=\"canonical\", mode=\"A\",\n norm_y_weights=True, algorithm=algorithm,\n max_iter=max_iter, tol=tol, copy=copy)\n\n\nclass PLSSVD(BaseEstimator, TransformerMixin):\n \"\"\"Partial Least Square SVD\n\n Simply perform a svd on the crosscovariance matrix: X'Y\n There are no iterative deflation here.\n\n Read more in the :ref:`User Guide <cross_decomposition>`.\n\n Parameters\n ----------\n n_components : int, default 2\n Number of components to keep.\n\n scale : boolean, default True\n Whether to scale X and Y.\n\n copy : boolean, default True\n Whether to copy X and Y, or perform in-place computations.\n\n Attributes\n ----------\n x_weights_ : array, [p, n_components]\n X block weights vectors.\n\n y_weights_ : array, [q, n_components]\n Y block weights vectors.\n\n x_scores_ : array, [n_samples, n_components]\n X scores.\n\n y_scores_ : array, [n_samples, n_components]\n Y scores.\n\n See also\n --------\n PLSCanonical\n CCA\n \"\"\"\n\n def __init__(self, n_components=2, scale=True, copy=True):\n self.n_components = n_components\n self.scale = scale\n self.copy = copy\n\n def fit(self, X, Y):\n # copy since this will contains the centered data\n check_consistent_length(X, Y)\n X = check_array(X, dtype=np.float64, copy=self.copy)\n Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)\n if Y.ndim == 1:\n Y = Y.reshape(-1, 1)\n\n if self.n_components > max(Y.shape[1], X.shape[1]):\n raise ValueError(\"Invalid number of components n_components=%d\"\n \" with X of shape %s and Y of shape %s.\"\n % (self.n_components, str(X.shape), str(Y.shape)))\n\n # Scale (in place)\n X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (\n _center_scale_xy(X, Y, self.scale))\n # svd(X'Y)\n C = np.dot(X.T, Y)\n\n # The arpack svds solver only works if the number of extracted\n # components is smaller than rank(X) - 1. Hence, if we want to extract\n # all the components (C.shape[1]), we have to use another one. Else,\n # let's use arpacks to compute only the interesting components.\n if self.n_components >= np.min(C.shape):\n U, s, V = linalg.svd(C, full_matrices=False)\n else:\n U, s, V = arpack.svds(C, k=self.n_components)\n # Deterministic output\n U, V = svd_flip(U, V)\n V = V.T\n self.x_scores_ = np.dot(X, U)\n self.y_scores_ = np.dot(Y, V)\n self.x_weights_ = U\n self.y_weights_ = V\n return self\n\n def transform(self, X, Y=None):\n \"\"\"Apply the dimension reduction learned on the train data.\"\"\"\n check_is_fitted(self, 'x_mean_')\n X = check_array(X, dtype=np.float64)\n Xr = (X - self.x_mean_) / self.x_std_\n x_scores = np.dot(Xr, self.x_weights_)\n if Y is not None:\n if Y.ndim == 1:\n Y = Y.reshape(-1, 1)\n Yr = (Y - self.y_mean_) / self.y_std_\n y_scores = np.dot(Yr, self.y_weights_)\n return x_scores, y_scores\n return x_scores\n\n def fit_transform(self, X, y=None, **fit_params):\n \"\"\"Learn and apply the dimension reduction on the train data.\n\n Parameters\n ----------\n X : array-like of predictors, shape = [n_samples, p]\n Training vectors, where n_samples in the number of samples and\n p is the number of predictors.\n\n Y : array-like of response, shape = [n_samples, q], optional\n Training vectors, where n_samples in the number of samples and\n q is the number of response variables.\n\n Returns\n -------\n x_scores if Y is not given, (x_scores, y_scores) otherwise.\n \"\"\"\n return self.fit(X, y, **fit_params).transform(X, y)\n",
"\"\"\"Forest covertype dataset.\n\nA classic dataset for classification benchmarks, featuring categorical and\nreal-valued features.\n\nThe dataset page is available from UCI Machine Learning Repository\n\n http://archive.ics.uci.edu/ml/datasets/Covertype\n\nCourtesy of Jock A. Blackard and Colorado State University.\n\"\"\"\n\n# Author: Lars Buitinck\n# Peter Prettenhofer <[email protected]>\n# License: BSD 3 clause\n\nfrom gzip import GzipFile\nfrom io import BytesIO\nimport logging\nfrom os.path import exists, join\ntry:\n from urllib2 import urlopen\nexcept ImportError:\n from urllib.request import urlopen\n\nimport numpy as np\n\nfrom .base import get_data_home\nfrom .base import Bunch\nfrom .base import _pkl_filepath\nfrom ..utils.fixes import makedirs\nfrom ..externals import joblib\nfrom ..utils import check_random_state\n\n\nURL = ('http://archive.ics.uci.edu/ml/'\n 'machine-learning-databases/covtype/covtype.data.gz')\n\n\nlogger = logging.getLogger()\n\n\ndef fetch_covtype(data_home=None, download_if_missing=True,\n random_state=None, shuffle=False):\n \"\"\"Load the covertype dataset, downloading it if necessary.\n\n Read more in the :ref:`User Guide <datasets>`.\n\n Parameters\n ----------\n data_home : string, optional\n Specify another download and cache folder for the datasets. By default\n all scikit learn data is stored in '~/scikit_learn_data' subfolders.\n\n download_if_missing : boolean, default=True\n If False, raise a IOError if the data is not locally available\n instead of trying to download the data from the source site.\n\n random_state : int, RandomState instance or None, optional (default=None)\n Random state for shuffling the dataset.\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n shuffle : bool, default=False\n Whether to shuffle dataset.\n\n Returns\n -------\n dataset : dict-like object with the following attributes:\n\n dataset.data : numpy array of shape (581012, 54)\n Each row corresponds to the 54 features in the dataset.\n\n dataset.target : numpy array of shape (581012,)\n Each value corresponds to one of the 7 forest covertypes with values\n ranging between 1 to 7.\n\n dataset.DESCR : string\n Description of the forest covertype dataset.\n\n \"\"\"\n\n data_home = get_data_home(data_home=data_home)\n covtype_dir = join(data_home, \"covertype\")\n samples_path = _pkl_filepath(covtype_dir, \"samples\")\n targets_path = _pkl_filepath(covtype_dir, \"targets\")\n available = exists(samples_path)\n\n if download_if_missing and not available:\n makedirs(covtype_dir, exist_ok=True)\n logger.warning(\"Downloading %s\" % URL)\n f = BytesIO(urlopen(URL).read())\n Xy = np.genfromtxt(GzipFile(fileobj=f), delimiter=',')\n\n X = Xy[:, :-1]\n y = Xy[:, -1].astype(np.int32)\n\n joblib.dump(X, samples_path, compress=9)\n joblib.dump(y, targets_path, compress=9)\n\n try:\n X, y\n except NameError:\n X = joblib.load(samples_path)\n y = joblib.load(targets_path)\n\n if shuffle:\n ind = np.arange(X.shape[0])\n rng = check_random_state(random_state)\n rng.shuffle(ind)\n X = X[ind]\n y = y[ind]\n\n return Bunch(data=X, target=y, DESCR=__doc__)\n",
"\"\"\"\nTesting for the approximate neighbor search using\nLocality Sensitive Hashing Forest module\n(sklearn.neighbors.LSHForest).\n\"\"\"\n\n# Author: Maheshakya Wijewardena, Joel Nothman\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import assert_array_less\nfrom sklearn.utils.testing import assert_greater\nfrom sklearn.utils.testing import assert_true\nfrom sklearn.utils.testing import assert_not_equal\nfrom sklearn.utils.testing import assert_warns_message\nfrom sklearn.utils.testing import ignore_warnings\n\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom sklearn.neighbors import LSHForest\nfrom sklearn.neighbors import NearestNeighbors\n\n\ndef test_neighbors_accuracy_with_n_candidates():\n # Checks whether accuracy increases as `n_candidates` increases.\n n_candidates_values = np.array([.1, 50, 500])\n n_samples = 100\n n_features = 10\n n_iter = 10\n n_points = 5\n rng = np.random.RandomState(42)\n accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)\n X = rng.rand(n_samples, n_features)\n\n for i, n_candidates in enumerate(n_candidates_values):\n lshf = LSHForest(n_candidates=n_candidates)\n ignore_warnings(lshf.fit)(X)\n for j in range(n_iter):\n query = X[rng.randint(0, n_samples)].reshape(1, -1)\n\n neighbors = lshf.kneighbors(query, n_neighbors=n_points,\n return_distance=False)\n distances = pairwise_distances(query, X, metric='cosine')\n ranks = np.argsort(distances)[0, :n_points]\n\n intersection = np.intersect1d(ranks, neighbors).shape[0]\n ratio = intersection / float(n_points)\n accuracies[i] = accuracies[i] + ratio\n\n accuracies[i] = accuracies[i] / float(n_iter)\n # Sorted accuracies should be equal to original accuracies\n assert_true(np.all(np.diff(accuracies) >= 0),\n msg=\"Accuracies are not non-decreasing.\")\n # Highest accuracy should be strictly greater than the lowest\n assert_true(np.ptp(accuracies) > 0,\n msg=\"Highest accuracy is not strictly greater than lowest.\")\n\n\ndef test_neighbors_accuracy_with_n_estimators():\n # Checks whether accuracy increases as `n_estimators` increases.\n n_estimators = np.array([1, 10, 100])\n n_samples = 100\n n_features = 10\n n_iter = 10\n n_points = 5\n rng = np.random.RandomState(42)\n accuracies = np.zeros(n_estimators.shape[0], dtype=float)\n X = rng.rand(n_samples, n_features)\n\n for i, t in enumerate(n_estimators):\n lshf = LSHForest(n_candidates=500, n_estimators=t)\n ignore_warnings(lshf.fit)(X)\n for j in range(n_iter):\n query = X[rng.randint(0, n_samples)].reshape(1, -1)\n neighbors = lshf.kneighbors(query, n_neighbors=n_points,\n return_distance=False)\n distances = pairwise_distances(query, X, metric='cosine')\n ranks = np.argsort(distances)[0, :n_points]\n\n intersection = np.intersect1d(ranks, neighbors).shape[0]\n ratio = intersection / float(n_points)\n accuracies[i] = accuracies[i] + ratio\n\n accuracies[i] = accuracies[i] / float(n_iter)\n # Sorted accuracies should be equal to original accuracies\n assert_true(np.all(np.diff(accuracies) >= 0),\n msg=\"Accuracies are not non-decreasing.\")\n # Highest accuracy should be strictly greater than the lowest\n assert_true(np.ptp(accuracies) > 0,\n msg=\"Highest accuracy is not strictly greater than lowest.\")\n\n\n@ignore_warnings\ndef test_kneighbors():\n # Checks whether desired number of neighbors are returned.\n # It is guaranteed to return the requested number of neighbors\n # if `min_hash_match` is set to 0. Returned distances should be\n # in ascending order.\n n_samples = 12\n n_features = 2\n n_iter = 10\n rng = np.random.RandomState(42)\n X = rng.rand(n_samples, n_features)\n\n lshf = LSHForest(min_hash_match=0)\n # Test unfitted estimator\n assert_raises(ValueError, lshf.kneighbors, X[0])\n\n ignore_warnings(lshf.fit)(X)\n\n for i in range(n_iter):\n n_neighbors = rng.randint(0, n_samples)\n query = X[rng.randint(0, n_samples)].reshape(1, -1)\n neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,\n return_distance=False)\n # Desired number of neighbors should be returned.\n assert_equal(neighbors.shape[1], n_neighbors)\n\n # Multiple points\n n_queries = 5\n queries = X[rng.randint(0, n_samples, n_queries)]\n distances, neighbors = lshf.kneighbors(queries,\n n_neighbors=1,\n return_distance=True)\n assert_equal(neighbors.shape[0], n_queries)\n assert_equal(distances.shape[0], n_queries)\n # Test only neighbors\n neighbors = lshf.kneighbors(queries, n_neighbors=1,\n return_distance=False)\n assert_equal(neighbors.shape[0], n_queries)\n # Test random point(not in the data set)\n query = rng.randn(n_features).reshape(1, -1)\n lshf.kneighbors(query, n_neighbors=1,\n return_distance=False)\n # Test n_neighbors at initialization\n neighbors = lshf.kneighbors(query, return_distance=False)\n assert_equal(neighbors.shape[1], 5)\n # Test `neighbors` has an integer dtype\n assert_true(neighbors.dtype.kind == 'i',\n msg=\"neighbors are not in integer dtype.\")\n\n\ndef test_radius_neighbors():\n # Checks whether Returned distances are less than `radius`\n # At least one point should be returned when the `radius` is set\n # to mean distance from the considering point to other points in\n # the database.\n # Moreover, this test compares the radius neighbors of LSHForest\n # with the `sklearn.neighbors.NearestNeighbors`.\n n_samples = 12\n n_features = 2\n n_iter = 10\n rng = np.random.RandomState(42)\n X = rng.rand(n_samples, n_features)\n\n lshf = LSHForest()\n # Test unfitted estimator\n assert_raises(ValueError, lshf.radius_neighbors, X[0])\n\n ignore_warnings(lshf.fit)(X)\n\n for i in range(n_iter):\n # Select a random point in the dataset as the query\n query = X[rng.randint(0, n_samples)].reshape(1, -1)\n\n # At least one neighbor should be returned when the radius is the\n # mean distance from the query to the points of the dataset.\n mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))\n neighbors = lshf.radius_neighbors(query, radius=mean_dist,\n return_distance=False)\n\n assert_equal(neighbors.shape, (1,))\n assert_equal(neighbors.dtype, object)\n assert_greater(neighbors[0].shape[0], 0)\n # All distances to points in the results of the radius query should\n # be less than mean_dist\n distances, neighbors = lshf.radius_neighbors(query,\n radius=mean_dist,\n return_distance=True)\n assert_array_less(distances[0], mean_dist)\n\n # Multiple points\n n_queries = 5\n queries = X[rng.randint(0, n_samples, n_queries)]\n distances, neighbors = lshf.radius_neighbors(queries,\n return_distance=True)\n\n # dists and inds should not be 1D arrays or arrays of variable lengths\n # hence the use of the object dtype.\n assert_equal(distances.shape, (n_queries,))\n assert_equal(distances.dtype, object)\n assert_equal(neighbors.shape, (n_queries,))\n assert_equal(neighbors.dtype, object)\n\n # Compare with exact neighbor search\n query = X[rng.randint(0, n_samples)].reshape(1, -1)\n mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))\n nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)\n\n distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)\n distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)\n\n # Radius-based queries do not sort the result points and the order\n # depends on the method, the random_state and the dataset order. Therefore\n # we need to sort the results ourselves before performing any comparison.\n sorted_dists_exact = np.sort(distances_exact[0])\n sorted_dists_approx = np.sort(distances_approx[0])\n\n # Distances to exact neighbors are less than or equal to approximate\n # counterparts as the approximate radius query might have missed some\n # closer neighbors.\n assert_true(np.all(np.less_equal(sorted_dists_exact,\n sorted_dists_approx)))\n\n\n@ignore_warnings\ndef test_radius_neighbors_boundary_handling():\n X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]\n n_points = len(X)\n\n # Build an exact nearest neighbors model as reference model to ensure\n # consistency between exact and approximate methods\n nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)\n\n # Build a LSHForest model with hyperparameter values that always guarantee\n # exact results on this toy dataset.\n lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)\n\n # define a query aligned with the first axis\n query = [[1., 0.]]\n\n # Compute the exact cosine distances of the query to the four points of\n # the dataset\n dists = pairwise_distances(query, X, metric='cosine').ravel()\n\n # The first point is almost aligned with the query (very small angle),\n # the cosine distance should therefore be almost null:\n assert_almost_equal(dists[0], 0, decimal=5)\n\n # The second point form an angle of 45 degrees to the query vector\n assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))\n\n # The third point is orthogonal from the query vector hence at a distance\n # exactly one:\n assert_almost_equal(dists[2], 1)\n\n # The last point is almost colinear but with opposite sign to the query\n # therefore it has a cosine 'distance' very close to the maximum possible\n # value of 2.\n assert_almost_equal(dists[3], 2, decimal=5)\n\n # If we query with a radius of one, all the samples except the last sample\n # should be included in the results. This means that the third sample\n # is lying on the boundary of the radius query:\n exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)\n approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)\n\n assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])\n assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])\n assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])\n assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])\n\n # If we perform the same query with a slightly lower radius, the third\n # point of the dataset that lay on the boundary of the previous query\n # is now rejected:\n eps = np.finfo(np.float64).eps\n exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)\n approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)\n\n assert_array_equal(np.sort(exact_idx[0]), [0, 1])\n assert_array_equal(np.sort(approx_idx[0]), [0, 1])\n assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])\n assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])\n\n\ndef test_distances():\n # Checks whether returned neighbors are from closest to farthest.\n n_samples = 12\n n_features = 2\n n_iter = 10\n rng = np.random.RandomState(42)\n X = rng.rand(n_samples, n_features)\n\n lshf = LSHForest()\n ignore_warnings(lshf.fit)(X)\n\n for i in range(n_iter):\n n_neighbors = rng.randint(0, n_samples)\n query = X[rng.randint(0, n_samples)].reshape(1, -1)\n distances, neighbors = lshf.kneighbors(query,\n n_neighbors=n_neighbors,\n return_distance=True)\n\n # Returned neighbors should be from closest to farthest, that is\n # increasing distance values.\n assert_true(np.all(np.diff(distances[0]) >= 0))\n\n # Note: the radius_neighbors method does not guarantee the order of\n # the results.\n\n\ndef test_fit():\n # Checks whether `fit` method sets all attribute values correctly.\n n_samples = 12\n n_features = 2\n n_estimators = 5\n rng = np.random.RandomState(42)\n X = rng.rand(n_samples, n_features)\n\n lshf = LSHForest(n_estimators=n_estimators)\n ignore_warnings(lshf.fit)(X)\n\n # _input_array = X\n assert_array_equal(X, lshf._fit_X)\n # A hash function g(p) for each tree\n assert_equal(n_estimators, len(lshf.hash_functions_))\n # Hash length = 32\n assert_equal(32, lshf.hash_functions_[0].components_.shape[0])\n # Number of trees_ in the forest\n assert_equal(n_estimators, len(lshf.trees_))\n # Each tree has entries for every data point\n assert_equal(n_samples, len(lshf.trees_[0]))\n # Original indices after sorting the hashes\n assert_equal(n_estimators, len(lshf.original_indices_))\n # Each set of original indices in a tree has entries for every data point\n assert_equal(n_samples, len(lshf.original_indices_[0]))\n\n\ndef test_partial_fit():\n # Checks whether inserting array is consistent with fitted data.\n # `partial_fit` method should set all attribute values correctly.\n n_samples = 12\n n_samples_partial_fit = 3\n n_features = 2\n rng = np.random.RandomState(42)\n X = rng.rand(n_samples, n_features)\n X_partial_fit = rng.rand(n_samples_partial_fit, n_features)\n\n lshf = LSHForest()\n\n # Test unfitted estimator\n ignore_warnings(lshf.partial_fit)(X)\n assert_array_equal(X, lshf._fit_X)\n\n ignore_warnings(lshf.fit)(X)\n\n # Insert wrong dimension\n assert_raises(ValueError, lshf.partial_fit,\n np.random.randn(n_samples_partial_fit, n_features - 1))\n\n ignore_warnings(lshf.partial_fit)(X_partial_fit)\n\n # size of _input_array = samples + 1 after insertion\n assert_equal(lshf._fit_X.shape[0],\n n_samples + n_samples_partial_fit)\n # size of original_indices_[1] = samples + 1\n assert_equal(len(lshf.original_indices_[0]),\n n_samples + n_samples_partial_fit)\n # size of trees_[1] = samples + 1\n assert_equal(len(lshf.trees_[1]),\n n_samples + n_samples_partial_fit)\n\n\ndef test_hash_functions():\n # Checks randomness of hash functions.\n # Variance and mean of each hash function (projection vector)\n # should be different from flattened array of hash functions.\n # If hash functions are not randomly built (seeded with\n # same value), variances and means of all functions are equal.\n n_samples = 12\n n_features = 2\n n_estimators = 5\n rng = np.random.RandomState(42)\n X = rng.rand(n_samples, n_features)\n\n lshf = LSHForest(n_estimators=n_estimators,\n random_state=rng.randint(0, np.iinfo(np.int32).max))\n ignore_warnings(lshf.fit)(X)\n\n hash_functions = []\n for i in range(n_estimators):\n hash_functions.append(lshf.hash_functions_[i].components_)\n\n for i in range(n_estimators):\n assert_not_equal(np.var(hash_functions),\n np.var(lshf.hash_functions_[i].components_))\n\n for i in range(n_estimators):\n assert_not_equal(np.mean(hash_functions),\n np.mean(lshf.hash_functions_[i].components_))\n\n\ndef test_candidates():\n # Checks whether candidates are sufficient.\n # This should handle the cases when number of candidates is 0.\n # User should be warned when number of candidates is less than\n # requested number of neighbors.\n X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],\n [6, 10, 2]], dtype=np.float32)\n X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)\n\n # For zero candidates\n lshf = LSHForest(min_hash_match=32)\n ignore_warnings(lshf.fit)(X_train)\n\n message = (\"Number of candidates is not sufficient to retrieve\"\n \" %i neighbors with\"\n \" min_hash_match = %i. Candidates are filled up\"\n \" uniformly from unselected\"\n \" indices.\" % (3, 32))\n assert_warns_message(UserWarning, message, lshf.kneighbors,\n X_test, n_neighbors=3)\n distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)\n assert_equal(distances.shape[1], 3)\n\n # For candidates less than n_neighbors\n lshf = LSHForest(min_hash_match=31)\n ignore_warnings(lshf.fit)(X_train)\n\n message = (\"Number of candidates is not sufficient to retrieve\"\n \" %i neighbors with\"\n \" min_hash_match = %i. Candidates are filled up\"\n \" uniformly from unselected\"\n \" indices.\" % (5, 31))\n assert_warns_message(UserWarning, message, lshf.kneighbors,\n X_test, n_neighbors=5)\n distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)\n assert_equal(distances.shape[1], 5)\n\n\ndef test_graphs():\n # Smoke tests for graph methods.\n n_samples_sizes = [5, 10, 20]\n n_features = 3\n rng = np.random.RandomState(42)\n\n for n_samples in n_samples_sizes:\n X = rng.rand(n_samples, n_features)\n lshf = LSHForest(min_hash_match=0)\n ignore_warnings(lshf.fit)(X)\n\n kneighbors_graph = lshf.kneighbors_graph(X)\n radius_neighbors_graph = lshf.radius_neighbors_graph(X)\n\n assert_equal(kneighbors_graph.shape[0], n_samples)\n assert_equal(kneighbors_graph.shape[1], n_samples)\n assert_equal(radius_neighbors_graph.shape[0], n_samples)\n assert_equal(radius_neighbors_graph.shape[1], n_samples)\n\n\ndef test_sparse_input():\n # note: Fixed random state in sp.rand is not supported in older scipy.\n # The test should succeed regardless.\n X1 = sp.rand(50, 100)\n X2 = sp.rand(10, 100)\n forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)\n forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)\n\n d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)\n d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)\n\n assert_almost_equal(d_sparse, d_dense)\n assert_almost_equal(i_sparse, i_dense)\n\n d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,\n return_distance=True)\n d_dense, i_dense = forest_dense.radius_neighbors(X2.A,\n return_distance=True)\n assert_equal(d_sparse.shape, d_dense.shape)\n for a, b in zip(d_sparse, d_dense):\n assert_almost_equal(a, b)\n for a, b in zip(i_sparse, i_dense):\n assert_almost_equal(a, b)\n",
"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport copy\n\nimport six\n\nimport numpy as np\n\nfrom numpy.testing import assert_array_equal\n\nfrom matplotlib.path import Path\nfrom matplotlib.patches import Polygon\nfrom nose.tools import assert_raises, assert_equal\nfrom matplotlib.testing.decorators import image_comparison\nimport matplotlib.pyplot as plt\nfrom matplotlib import transforms\n\n\ndef test_readonly_path():\n path = Path.unit_circle()\n\n def modify_vertices():\n path.vertices = path.vertices * 2.0\n\n assert_raises(AttributeError, modify_vertices)\n\n\ndef test_point_in_path():\n # Test #1787\n verts2 = [(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)]\n\n path = Path(verts2, closed=True)\n points = [(0.5, 0.5), (1.5, 0.5)]\n ret = path.contains_points(points)\n assert ret.dtype == 'bool'\n assert np.all(ret == [True, False])\n\n\ndef test_contains_points_negative_radius():\n path = Path.unit_circle()\n\n points = [(0.0, 0.0), (1.25, 0.0), (0.9, 0.9)]\n expected = [True, False, False]\n result = path.contains_points(points, radius=-0.5)\n\n assert np.all(result == expected)\n\n\n@image_comparison(baseline_images=['path_clipping'],\n extensions=['svg'], remove_text=True)\ndef test_path_clipping():\n fig = plt.figure(figsize=(6.0, 6.2))\n\n for i, xy in enumerate([\n [(200, 200), (200, 350), (400, 350), (400, 200)],\n [(200, 200), (200, 350), (400, 350), (400, 100)],\n [(200, 100), (200, 350), (400, 350), (400, 100)],\n [(200, 100), (200, 415), (400, 350), (400, 100)],\n [(200, 100), (200, 415), (400, 415), (400, 100)],\n [(200, 415), (400, 415), (400, 100), (200, 100)],\n [(400, 415), (400, 100), (200, 100), (200, 415)]]):\n ax = fig.add_subplot(4, 2, i+1)\n bbox = [0, 140, 640, 260]\n ax.set_xlim(bbox[0], bbox[0] + bbox[2])\n ax.set_ylim(bbox[1], bbox[1] + bbox[3])\n ax.add_patch(Polygon(\n xy, facecolor='none', edgecolor='red', closed=True))\n\n\ndef test_point_in_path_nan():\n box = np.array([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]])\n p = Path(box)\n test = np.array([[np.nan, 0.5]])\n contains = p.contains_points(test)\n assert len(contains) == 1\n assert not contains[0]\n\n\n@image_comparison(baseline_images=['semi_log_with_zero'], extensions=['png'])\ndef test_log_transform_with_zero():\n x = np.arange(-10, 10)\n y = (1.0 - 1.0/(x**2+1))**20\n\n fig, ax = plt.subplots()\n ax.semilogy(x, y, \"-o\", lw=15)\n ax.grid(True)\n\n\ndef test_make_compound_path_empty():\n # We should be able to make a compound path with no arguments.\n # This makes it easier to write generic path based code.\n r = Path.make_compound_path()\n assert_equal(r.vertices.shape, (0, 2))\n\n\n@image_comparison(baseline_images=['xkcd'], remove_text=True)\ndef test_xkcd():\n np.random.seed(0)\n\n x = np.linspace(0, 2.0 * np.pi, 100.0)\n y = np.sin(x)\n\n with plt.xkcd():\n fig, ax = plt.subplots()\n ax.plot(x, y)\n\n\n@image_comparison(baseline_images=['marker_paths'], extensions=['pdf'],\n remove_text=True)\ndef test_marker_paths_pdf():\n N = 7\n\n plt.errorbar(np.arange(N),\n np.ones(N) + 4,\n np.ones(N))\n plt.xlim(-1, N)\n plt.ylim(-1, 7)\n\n\n@image_comparison(baseline_images=['nan_path'], style='default',\n remove_text=True, extensions=['pdf', 'svg', 'eps', 'png'])\ndef test_nan_isolated_points():\n\n y0 = [0, np.nan, 2, np.nan, 4, 5, 6]\n y1 = [np.nan, 7, np.nan, 9, 10, np.nan, 12]\n\n fig, ax = plt.subplots()\n\n ax.plot(y0, '-o')\n ax.plot(y1, '-o')\n\n\ndef test_path_no_doubled_point_in_to_polygon():\n hand = np.array(\n [[1.64516129, 1.16145833],\n [1.64516129, 1.59375],\n [1.35080645, 1.921875],\n [1.375, 2.18229167],\n [1.68548387, 1.9375],\n [1.60887097, 2.55208333],\n [1.68548387, 2.69791667],\n [1.76209677, 2.56770833],\n [1.83064516, 1.97395833],\n [1.89516129, 2.75],\n [1.9516129, 2.84895833],\n [2.01209677, 2.76041667],\n [1.99193548, 1.99479167],\n [2.11290323, 2.63020833],\n [2.2016129, 2.734375],\n [2.25403226, 2.60416667],\n [2.14919355, 1.953125],\n [2.30645161, 2.36979167],\n [2.39112903, 2.36979167],\n [2.41532258, 2.1875],\n [2.1733871, 1.703125],\n [2.07782258, 1.16666667]])\n\n (r0, c0, r1, c1) = (1.0, 1.5, 2.1, 2.5)\n\n poly = Path(np.vstack((hand[:, 1], hand[:, 0])).T, closed=True)\n clip_rect = transforms.Bbox([[r0, c0], [r1, c1]])\n poly_clipped = poly.clip_to_bbox(clip_rect).to_polygons()[0]\n\n assert np.all(poly_clipped[-2] != poly_clipped[-1])\n assert np.all(poly_clipped[-1] == poly_clipped[0])\n\n\ndef test_path_to_polygons():\n data = [[10, 10], [20, 20]]\n p = Path(data)\n\n assert_array_equal(p.to_polygons(width=40, height=40), [])\n assert_array_equal(p.to_polygons(width=40, height=40, closed_only=False),\n [data])\n assert_array_equal(p.to_polygons(), [])\n assert_array_equal(p.to_polygons(closed_only=False), [data])\n\n data = [[10, 10], [20, 20], [30, 30]]\n closed_data = [[10, 10], [20, 20], [30, 30], [10, 10]]\n p = Path(data)\n\n assert_array_equal(p.to_polygons(width=40, height=40), [closed_data])\n assert_array_equal(p.to_polygons(width=40, height=40, closed_only=False),\n [data])\n assert_array_equal(p.to_polygons(), [closed_data])\n assert_array_equal(p.to_polygons(closed_only=False), [data])\n\n\ndef test_path_deepcopy():\n # Should not raise any error\n verts = [[0, 0], [1, 1]]\n codes = [Path.MOVETO, Path.LINETO]\n path1 = Path(verts)\n path2 = Path(verts, codes)\n copy.deepcopy(path1)\n copy.deepcopy(path2)\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule(argv=['-s', '--with-doctest'], exit=False)\n",
"\"\"\"\nThis module defines default legend handlers.\n\nIt is strongly encouraged to have read the :ref:`legend guide\n<plotting-guide-legend>` before this documentation.\n\nLegend handlers are expected to be a callable object with a following\nsignature. ::\n\n legend_handler(legend, orig_handle, fontsize, handlebox)\n\nWhere *legend* is the legend itself, *orig_handle* is the original\nplot, *fontsize* is the fontsize in pixles, and *handlebox* is a\nOffsetBox instance. Within the call, you should create relevant\nartists (using relevant properties from the *legend* and/or\n*orig_handle*) and add them into the handlebox. The artists needs to\nbe scaled according to the fontsize (note that the size is in pixel,\ni.e., this is dpi-scaled value).\n\nThis module includes definition of several legend handler classes\nderived from the base class (HandlerBase) with the following method.\n\n def legend_artist(self, legend, orig_handle, fontsize, handlebox):\n\n\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\nfrom six.moves import zip\n\nimport numpy as np\n\nfrom matplotlib.lines import Line2D\nfrom matplotlib.patches import Rectangle\nimport matplotlib.collections as mcoll\nimport matplotlib.colors as mcolors\n\n\ndef update_from_first_child(tgt, src):\n tgt.update_from(src.get_children()[0])\n\n\nclass HandlerBase(object):\n \"\"\"\n A Base class for default legend handlers.\n\n The derived classes are meant to override *create_artists* method, which\n has a following signature.::\n\n def create_artists(self, legend, orig_handle,\n xdescent, ydescent, width, height, fontsize,\n trans):\n\n The overridden method needs to create artists of the given\n transform that fits in the given dimension (xdescent, ydescent,\n width, height) that are scaled by fontsize if necessary.\n\n \"\"\"\n def __init__(self, xpad=0., ypad=0., update_func=None):\n self._xpad, self._ypad = xpad, ypad\n self._update_prop_func = update_func\n\n def _update_prop(self, legend_handle, orig_handle):\n if self._update_prop_func is None:\n self._default_update_prop(legend_handle, orig_handle)\n else:\n self._update_prop_func(legend_handle, orig_handle)\n\n def _default_update_prop(self, legend_handle, orig_handle):\n legend_handle.update_from(orig_handle)\n\n def update_prop(self, legend_handle, orig_handle, legend):\n\n self._update_prop(legend_handle, orig_handle)\n\n legend._set_artist_props(legend_handle)\n legend_handle.set_clip_box(None)\n legend_handle.set_clip_path(None)\n\n def adjust_drawing_area(self, legend, orig_handle,\n xdescent, ydescent, width, height, fontsize,\n ):\n xdescent = xdescent - self._xpad * fontsize\n ydescent = ydescent - self._ypad * fontsize\n width = width - self._xpad * fontsize\n height = height - self._ypad * fontsize\n return xdescent, ydescent, width, height\n\n def legend_artist(self, legend, orig_handle,\n fontsize, handlebox):\n \"\"\"\n Return the artist that this HandlerBase generates for the given\n original artist/handle.\n\n Parameters\n ----------\n legend : :class:`matplotlib.legend.Legend` instance\n The legend for which these legend artists are being created.\n orig_handle : :class:`matplotlib.artist.Artist` or similar\n The object for which these legend artists are being created.\n fontsize : float or int\n The fontsize in pixels. The artists being created should\n be scaled according to the given fontsize.\n handlebox : :class:`matplotlib.offsetbox.OffsetBox` instance\n The box which has been created to hold this legend entry's\n artists. Artists created in the `legend_artist` method must\n be added to this handlebox inside this method.\n\n \"\"\"\n xdescent, ydescent, width, height = self.adjust_drawing_area(\n legend, orig_handle,\n handlebox.xdescent, handlebox.ydescent,\n handlebox.width, handlebox.height,\n fontsize)\n artists = self.create_artists(legend, orig_handle,\n xdescent, ydescent, width, height,\n fontsize, handlebox.get_transform())\n\n # create_artists will return a list of artists.\n for a in artists:\n handlebox.add_artist(a)\n\n # we only return the first artist\n return artists[0]\n\n def create_artists(self, legend, orig_handle,\n xdescent, ydescent, width, height, fontsize,\n trans):\n raise NotImplementedError('Derived must override')\n\n\nclass HandlerNpoints(HandlerBase):\n def __init__(self, marker_pad=0.3, numpoints=None, **kw):\n HandlerBase.__init__(self, **kw)\n\n self._numpoints = numpoints\n self._marker_pad = marker_pad\n\n def get_numpoints(self, legend):\n if self._numpoints is None:\n return legend.numpoints\n else:\n return self._numpoints\n\n def get_xdata(self, legend, xdescent, ydescent, width, height, fontsize):\n numpoints = self.get_numpoints(legend)\n\n if numpoints > 1:\n # we put some pad here to compensate the size of the\n # marker\n xdata = np.linspace(-xdescent + self._marker_pad * fontsize,\n width - self._marker_pad * fontsize,\n numpoints)\n xdata_marker = xdata\n elif numpoints == 1:\n xdata = np.linspace(-xdescent, width, 2)\n xdata_marker = [0.5 * width - 0.5 * xdescent]\n\n return xdata, xdata_marker\n\n\nclass HandlerNpointsYoffsets(HandlerNpoints):\n def __init__(self, numpoints=None, yoffsets=None, **kw):\n HandlerNpoints.__init__(self, numpoints=numpoints, **kw)\n self._yoffsets = yoffsets\n\n def get_ydata(self, legend, xdescent, ydescent, width, height, fontsize):\n if self._yoffsets is None:\n ydata = height * legend._scatteryoffsets\n else:\n ydata = height * np.asarray(self._yoffsets)\n\n return ydata\n\n\nclass HandlerLine2D(HandlerNpoints):\n \"\"\"\n Handler for Line2D instances.\n \"\"\"\n def __init__(self, marker_pad=0.3, numpoints=None, **kw):\n HandlerNpoints.__init__(self, marker_pad=marker_pad, numpoints=numpoints, **kw)\n\n def create_artists(self, legend, orig_handle,\n xdescent, ydescent, width, height, fontsize,\n trans):\n\n xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,\n width, height, fontsize)\n\n ydata = ((height - ydescent) / 2.) * np.ones(xdata.shape, float)\n legline = Line2D(xdata, ydata)\n\n self.update_prop(legline, orig_handle, legend)\n legline.set_drawstyle('default')\n legline.set_marker(\"\")\n\n legline_marker = Line2D(xdata_marker, ydata[:len(xdata_marker)])\n self.update_prop(legline_marker, orig_handle, legend)\n legline_marker.set_linestyle('None')\n if legend.markerscale != 1:\n newsz = legline_marker.get_markersize() * legend.markerscale\n legline_marker.set_markersize(newsz)\n # we don't want to add this to the return list because\n # the texts and handles are assumed to be in one-to-one\n # correspondence.\n legline._legmarker = legline_marker\n\n legline.set_transform(trans)\n legline_marker.set_transform(trans)\n\n return [legline, legline_marker]\n\n\nclass HandlerPatch(HandlerBase):\n \"\"\"\n Handler for Patch instances.\n \"\"\"\n def __init__(self, patch_func=None, **kw):\n \"\"\"\n The HandlerPatch class optionally takes a function ``patch_func``\n who's responsibility is to create the legend key artist. The\n ``patch_func`` should have the signature::\n\n def patch_func(legend=legend, orig_handle=orig_handle,\n xdescent=xdescent, ydescent=ydescent,\n width=width, height=height, fontsize=fontsize)\n\n Subsequently the created artist will have its ``update_prop`` method\n called and the appropriate transform will be applied.\n\n \"\"\"\n HandlerBase.__init__(self, **kw)\n self._patch_func = patch_func\n\n def _create_patch(self, legend, orig_handle,\n xdescent, ydescent, width, height, fontsize):\n if self._patch_func is None:\n p = Rectangle(xy=(-xdescent, -ydescent),\n width=width, height=height)\n else:\n p = self._patch_func(legend=legend, orig_handle=orig_handle,\n xdescent=xdescent, ydescent=ydescent,\n width=width, height=height, fontsize=fontsize)\n return p\n\n def create_artists(self, legend, orig_handle,\n xdescent, ydescent, width, height, fontsize, trans):\n p = self._create_patch(legend, orig_handle,\n xdescent, ydescent, width, height, fontsize)\n self.update_prop(p, orig_handle, legend)\n p.set_transform(trans)\n return [p]\n\n\nclass HandlerLineCollection(HandlerLine2D):\n \"\"\"\n Handler for LineCollection instances.\n \"\"\"\n def get_numpoints(self, legend):\n if self._numpoints is None:\n return legend.scatterpoints\n else:\n return self._numpoints\n\n def _default_update_prop(self, legend_handle, orig_handle):\n lw = orig_handle.get_linewidths()[0]\n dashes = orig_handle._us_linestyles[0]\n color = orig_handle.get_colors()[0]\n legend_handle.set_color(color)\n legend_handle.set_linestyle(dashes)\n legend_handle.set_linewidth(lw)\n\n def create_artists(self, legend, orig_handle,\n xdescent, ydescent, width, height, fontsize, trans):\n\n xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,\n width, height, fontsize)\n ydata = ((height - ydescent) / 2.) * np.ones(xdata.shape, float)\n legline = Line2D(xdata, ydata)\n\n self.update_prop(legline, orig_handle, legend)\n legline.set_transform(trans)\n\n return [legline]\n\n\nclass HandlerRegularPolyCollection(HandlerNpointsYoffsets):\n \"\"\"\n Handler for RegularPolyCollections.\n \"\"\"\n def __init__(self, yoffsets=None, sizes=None, **kw):\n HandlerNpointsYoffsets.__init__(self, yoffsets=yoffsets, **kw)\n\n self._sizes = sizes\n\n def get_numpoints(self, legend):\n if self._numpoints is None:\n return legend.scatterpoints\n else:\n return self._numpoints\n\n def get_sizes(self, legend, orig_handle,\n xdescent, ydescent, width, height, fontsize):\n if self._sizes is None:\n handle_sizes = orig_handle.get_sizes()\n if not len(handle_sizes):\n handle_sizes = [1]\n size_max = max(handle_sizes) * legend.markerscale ** 2\n size_min = min(handle_sizes) * legend.markerscale ** 2\n\n numpoints = self.get_numpoints(legend)\n if numpoints < 4:\n sizes = [.5 * (size_max + size_min), size_max,\n size_min][:numpoints]\n else:\n rng = (size_max - size_min)\n sizes = rng * np.linspace(0, 1, numpoints) + size_min\n else:\n sizes = self._sizes\n\n return sizes\n\n def update_prop(self, legend_handle, orig_handle, legend):\n\n self._update_prop(legend_handle, orig_handle)\n\n legend_handle.set_figure(legend.figure)\n #legend._set_artist_props(legend_handle)\n legend_handle.set_clip_box(None)\n legend_handle.set_clip_path(None)\n\n def create_collection(self, orig_handle, sizes, offsets, transOffset):\n p = type(orig_handle)(orig_handle.get_numsides(),\n rotation=orig_handle.get_rotation(),\n sizes=sizes,\n offsets=offsets,\n transOffset=transOffset,\n )\n return p\n\n def create_artists(self, legend, orig_handle,\n xdescent, ydescent, width, height, fontsize,\n trans):\n xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,\n width, height, fontsize)\n\n ydata = self.get_ydata(legend, xdescent, ydescent,\n width, height, fontsize)\n\n sizes = self.get_sizes(legend, orig_handle, xdescent, ydescent,\n width, height, fontsize)\n\n p = self.create_collection(orig_handle, sizes,\n offsets=list(zip(xdata_marker, ydata)),\n transOffset=trans)\n\n self.update_prop(p, orig_handle, legend)\n p._transOffset = trans\n return [p]\n\n\nclass HandlerPathCollection(HandlerRegularPolyCollection):\n \"\"\"\n Handler for PathCollections, which are used by scatter\n \"\"\"\n def create_collection(self, orig_handle, sizes, offsets, transOffset):\n p = type(orig_handle)([orig_handle.get_paths()[0]],\n sizes=sizes,\n offsets=offsets,\n transOffset=transOffset,\n )\n return p\n\n\nclass HandlerCircleCollection(HandlerRegularPolyCollection):\n \"\"\"\n Handler for CircleCollections\n \"\"\"\n def create_collection(self, orig_handle, sizes, offsets, transOffset):\n p = type(orig_handle)(sizes,\n offsets=offsets,\n transOffset=transOffset,\n )\n return p\n\n\nclass HandlerErrorbar(HandlerLine2D):\n \"\"\"\n Handler for Errorbars\n \"\"\"\n def __init__(self, xerr_size=0.5, yerr_size=None,\n marker_pad=0.3, numpoints=None, **kw):\n\n self._xerr_size = xerr_size\n self._yerr_size = yerr_size\n\n HandlerLine2D.__init__(self, marker_pad=marker_pad, numpoints=numpoints,\n **kw)\n\n def get_err_size(self, legend, xdescent, ydescent, width, height, fontsize):\n xerr_size = self._xerr_size * fontsize\n\n if self._yerr_size is None:\n yerr_size = xerr_size\n else:\n yerr_size = self._yerr_size * fontsize\n\n return xerr_size, yerr_size\n\n def create_artists(self, legend, orig_handle,\n xdescent, ydescent, width, height, fontsize,\n trans):\n\n plotlines, caplines, barlinecols = orig_handle\n\n xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,\n width, height, fontsize)\n\n ydata = ((height - ydescent) / 2.) * np.ones(xdata.shape, float)\n legline = Line2D(xdata, ydata)\n\n\n xdata_marker = np.asarray(xdata_marker)\n ydata_marker = np.asarray(ydata[:len(xdata_marker)])\n\n xerr_size, yerr_size = self.get_err_size(legend, xdescent, ydescent,\n width, height, fontsize)\n\n legline_marker = Line2D(xdata_marker, ydata_marker)\n\n # when plotlines are None (only errorbars are drawn), we just\n # make legline invisible.\n if plotlines is None:\n legline.set_visible(False)\n legline_marker.set_visible(False)\n else:\n self.update_prop(legline, plotlines, legend)\n\n legline.set_drawstyle('default')\n legline.set_marker('None')\n\n self.update_prop(legline_marker, plotlines, legend)\n legline_marker.set_linestyle('None')\n\n if legend.markerscale != 1:\n newsz = legline_marker.get_markersize() * legend.markerscale\n legline_marker.set_markersize(newsz)\n\n handle_barlinecols = []\n handle_caplines = []\n\n if orig_handle.has_xerr:\n verts = [ ((x - xerr_size, y), (x + xerr_size, y))\n for x, y in zip(xdata_marker, ydata_marker)]\n coll = mcoll.LineCollection(verts)\n self.update_prop(coll, barlinecols[0], legend)\n handle_barlinecols.append(coll)\n\n if caplines:\n capline_left = Line2D(xdata_marker - xerr_size, ydata_marker)\n capline_right = Line2D(xdata_marker + xerr_size, ydata_marker)\n self.update_prop(capline_left, caplines[0], legend)\n self.update_prop(capline_right, caplines[0], legend)\n capline_left.set_marker(\"|\")\n capline_right.set_marker(\"|\")\n\n handle_caplines.append(capline_left)\n handle_caplines.append(capline_right)\n\n if orig_handle.has_yerr:\n verts = [ ((x, y - yerr_size), (x, y + yerr_size))\n for x, y in zip(xdata_marker, ydata_marker)]\n coll = mcoll.LineCollection(verts)\n self.update_prop(coll, barlinecols[0], legend)\n handle_barlinecols.append(coll)\n\n if caplines:\n capline_left = Line2D(xdata_marker, ydata_marker - yerr_size)\n capline_right = Line2D(xdata_marker, ydata_marker + yerr_size)\n self.update_prop(capline_left, caplines[0], legend)\n self.update_prop(capline_right, caplines[0], legend)\n capline_left.set_marker(\"_\")\n capline_right.set_marker(\"_\")\n\n handle_caplines.append(capline_left)\n handle_caplines.append(capline_right)\n\n artists = []\n artists.extend(handle_barlinecols)\n artists.extend(handle_caplines)\n artists.append(legline)\n artists.append(legline_marker)\n\n for artist in artists:\n artist.set_transform(trans)\n\n return artists\n\nclass HandlerStem(HandlerNpointsYoffsets):\n \"\"\"\n Handler for Errorbars\n \"\"\"\n def __init__(self, marker_pad=0.3, numpoints=None,\n bottom=None, yoffsets=None, **kw):\n\n HandlerNpointsYoffsets.__init__(self, marker_pad=marker_pad,\n numpoints=numpoints,\n yoffsets=yoffsets,\n **kw)\n self._bottom = bottom\n\n def get_ydata(self, legend, xdescent, ydescent, width, height, fontsize):\n if self._yoffsets is None:\n ydata = height * (0.5 * legend._scatteryoffsets + 0.5)\n else:\n ydata = height * np.asarray(self._yoffsets)\n\n return ydata\n\n def create_artists(self, legend, orig_handle,\n xdescent, ydescent, width, height, fontsize,\n trans):\n\n markerline, stemlines, baseline = orig_handle\n\n xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,\n width, height, fontsize)\n\n ydata = self.get_ydata(legend, xdescent, ydescent,\n width, height, fontsize)\n\n if self._bottom is None:\n bottom = 0.\n else:\n bottom = self._bottom\n\n leg_markerline = Line2D(xdata_marker, ydata[:len(xdata_marker)])\n self.update_prop(leg_markerline, markerline, legend)\n\n leg_stemlines = []\n for thisx, thisy in zip(xdata_marker, ydata):\n l = Line2D([thisx, thisx], [bottom, thisy])\n leg_stemlines.append(l)\n\n for lm, m in zip(leg_stemlines, stemlines):\n self.update_prop(lm, m, legend)\n\n leg_baseline = Line2D([np.amin(xdata), np.amax(xdata)],\n [bottom, bottom])\n\n self.update_prop(leg_baseline, baseline, legend)\n\n artists = [leg_markerline]\n artists.extend(leg_stemlines)\n artists.append(leg_baseline)\n\n for artist in artists:\n artist.set_transform(trans)\n\n return artists\n\n\nclass HandlerTuple(HandlerBase):\n \"\"\"\n Handler for Tuple\n \"\"\"\n def __init__(self, **kwargs):\n HandlerBase.__init__(self, **kwargs)\n\n def create_artists(self, legend, orig_handle,\n xdescent, ydescent, width, height, fontsize,\n trans):\n\n handler_map = legend.get_legend_handler_map()\n a_list = []\n for handle1 in orig_handle:\n handler = legend.get_legend_handler(handler_map, handle1)\n _a_list = handler.create_artists(legend, handle1,\n xdescent, ydescent, width, height,\n fontsize,\n trans)\n a_list.extend(_a_list)\n\n return a_list\n\n\nclass HandlerPolyCollection(HandlerBase):\n \"\"\"\n Handler for PolyCollection used in fill_between and stackplot.\n \"\"\"\n def _update_prop(self, legend_handle, orig_handle):\n def first_color(colors):\n if colors is None:\n return None\n colors = mcolors.to_rgba_array(colors)\n if len(colors):\n return colors[0]\n else:\n return \"none\"\n def get_first(prop_array):\n if len(prop_array):\n return prop_array[0]\n else:\n return None\n edgecolor = getattr(orig_handle, '_original_edgecolor',\n orig_handle.get_edgecolor())\n legend_handle.set_edgecolor(first_color(edgecolor))\n facecolor = getattr(orig_handle, '_original_facecolor',\n orig_handle.get_facecolor())\n legend_handle.set_facecolor(first_color(facecolor))\n legend_handle.set_fill(orig_handle.get_fill())\n legend_handle.set_hatch(orig_handle.get_hatch())\n legend_handle.set_linewidth(get_first(orig_handle.get_linewidths()))\n legend_handle.set_linestyle(get_first(orig_handle.get_linestyles()))\n legend_handle.set_transform(get_first(orig_handle.get_transforms()))\n legend_handle.set_figure(orig_handle.get_figure())\n legend_handle.set_alpha(orig_handle.get_alpha())\n\n def create_artists(self, legend, orig_handle,\n xdescent, ydescent, width, height, fontsize, trans):\n p = Rectangle(xy=(-xdescent, -ydescent),\n width=width, height=height)\n self.update_prop(p, orig_handle, legend)\n p.set_transform(trans)\n return [p]\n",
"\"\"\"\nVarious bayesian regression\n\"\"\"\nfrom __future__ import print_function\n\n# Authors: V. Michel, F. Pedregosa, A. Gramfort\n# License: BSD 3 clause\n\nfrom math import log\nimport numpy as np\nfrom scipy import linalg\n\nfrom .base import LinearModel\nfrom ..base import RegressorMixin\nfrom ..utils.extmath import fast_logdet, pinvh\nfrom ..utils import check_X_y\n\n\n###############################################################################\n# BayesianRidge regression\n\nclass BayesianRidge(LinearModel, RegressorMixin):\n \"\"\"Bayesian ridge regression\n\n Fit a Bayesian ridge model and optimize the regularization parameters\n lambda (precision of the weights) and alpha (precision of the noise).\n\n Read more in the :ref:`User Guide <bayesian_regression>`.\n\n Parameters\n ----------\n n_iter : int, optional\n Maximum number of iterations. Default is 300.\n\n tol : float, optional\n Stop the algorithm if w has converged. Default is 1.e-3.\n\n alpha_1 : float, optional\n Hyper-parameter : shape parameter for the Gamma distribution prior\n over the alpha parameter. Default is 1.e-6\n\n alpha_2 : float, optional\n Hyper-parameter : inverse scale parameter (rate parameter) for the\n Gamma distribution prior over the alpha parameter.\n Default is 1.e-6.\n\n lambda_1 : float, optional\n Hyper-parameter : shape parameter for the Gamma distribution prior\n over the lambda parameter. Default is 1.e-6.\n\n lambda_2 : float, optional\n Hyper-parameter : inverse scale parameter (rate parameter) for the\n Gamma distribution prior over the lambda parameter.\n Default is 1.e-6\n\n compute_score : boolean, optional\n If True, compute the objective function at each step of the model.\n Default is False\n\n fit_intercept : boolean, optional\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n Default is True.\n\n normalize : boolean, optional, default False\n If True, the regressors X will be normalized before regression.\n This parameter is ignored when `fit_intercept` is set to False.\n When the regressors are normalized, note that this makes the\n hyperparameters learnt more robust and almost independent of the number\n of samples. The same property is not valid for standardized data.\n However, if you wish to standardize, please use\n `preprocessing.StandardScaler` before calling `fit` on an estimator\n with `normalize=False`.\n\n copy_X : boolean, optional, default True\n If True, X will be copied; else, it may be overwritten.\n\n verbose : boolean, optional, default False\n Verbose mode when fitting the model.\n\n\n Attributes\n ----------\n coef_ : array, shape = (n_features)\n Coefficients of the regression model (mean of distribution)\n\n alpha_ : float\n estimated precision of the noise.\n\n lambda_ : float\n estimated precision of the weights.\n\n scores_ : float\n if computed, value of the objective function (to be maximized)\n\n Examples\n --------\n >>> from sklearn import linear_model\n >>> clf = linear_model.BayesianRidge()\n >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])\n ... # doctest: +NORMALIZE_WHITESPACE\n BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,\n copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,\n n_iter=300, normalize=False, tol=0.001, verbose=False)\n >>> clf.predict([[1, 1]])\n array([ 1.])\n\n Notes\n -----\n See examples/linear_model/plot_bayesian_ridge.py for an example.\n \"\"\"\n\n def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,\n lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,\n fit_intercept=True, normalize=False, copy_X=True,\n verbose=False):\n self.n_iter = n_iter\n self.tol = tol\n self.alpha_1 = alpha_1\n self.alpha_2 = alpha_2\n self.lambda_1 = lambda_1\n self.lambda_2 = lambda_2\n self.compute_score = compute_score\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.copy_X = copy_X\n self.verbose = verbose\n\n def fit(self, X, y):\n \"\"\"Fit the model\n\n Parameters\n ----------\n X : numpy array of shape [n_samples,n_features]\n Training data\n y : numpy array of shape [n_samples]\n Target values\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)\n X, y, X_offset, y_offset, X_scale = self._preprocess_data(\n X, y, self.fit_intercept, self.normalize, self.copy_X)\n n_samples, n_features = X.shape\n\n # Initialization of the values of the parameters\n alpha_ = 1. / np.var(y)\n lambda_ = 1.\n\n verbose = self.verbose\n lambda_1 = self.lambda_1\n lambda_2 = self.lambda_2\n alpha_1 = self.alpha_1\n alpha_2 = self.alpha_2\n\n self.scores_ = list()\n coef_old_ = None\n\n XT_y = np.dot(X.T, y)\n U, S, Vh = linalg.svd(X, full_matrices=False)\n eigen_vals_ = S ** 2\n\n # Convergence loop of the bayesian ridge regression\n for iter_ in range(self.n_iter):\n\n # Compute mu and sigma\n # sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)\n # coef_ = sigma_^-1 * XT * y\n if n_samples > n_features:\n coef_ = np.dot(Vh.T,\n Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])\n coef_ = np.dot(coef_, XT_y)\n if self.compute_score:\n logdet_sigma_ = - np.sum(\n np.log(lambda_ + alpha_ * eigen_vals_))\n else:\n coef_ = np.dot(X.T, np.dot(\n U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))\n coef_ = np.dot(coef_, y)\n if self.compute_score:\n logdet_sigma_ = lambda_ * np.ones(n_features)\n logdet_sigma_[:n_samples] += alpha_ * eigen_vals_\n logdet_sigma_ = - np.sum(np.log(logdet_sigma_))\n\n # Update alpha and lambda\n rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)\n gamma_ = (np.sum((alpha_ * eigen_vals_) /\n (lambda_ + alpha_ * eigen_vals_)))\n lambda_ = ((gamma_ + 2 * lambda_1) /\n (np.sum(coef_ ** 2) + 2 * lambda_2))\n alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /\n (rmse_ + 2 * alpha_2))\n\n # Compute the objective function\n if self.compute_score:\n s = lambda_1 * log(lambda_) - lambda_2 * lambda_\n s += alpha_1 * log(alpha_) - alpha_2 * alpha_\n s += 0.5 * (n_features * log(lambda_) +\n n_samples * log(alpha_) -\n alpha_ * rmse_ -\n (lambda_ * np.sum(coef_ ** 2)) -\n logdet_sigma_ -\n n_samples * log(2 * np.pi))\n self.scores_.append(s)\n\n # Check for convergence\n if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:\n if verbose:\n print(\"Convergence after \", str(iter_), \" iterations\")\n break\n coef_old_ = np.copy(coef_)\n\n self.alpha_ = alpha_\n self.lambda_ = lambda_\n self.coef_ = coef_\n\n self._set_intercept(X_offset, y_offset, X_scale)\n return self\n\n\n###############################################################################\n# ARD (Automatic Relevance Determination) regression\n\n\nclass ARDRegression(LinearModel, RegressorMixin):\n \"\"\"Bayesian ARD regression.\n\n Fit the weights of a regression model, using an ARD prior. The weights of\n the regression model are assumed to be in Gaussian distributions.\n Also estimate the parameters lambda (precisions of the distributions of the\n weights) and alpha (precision of the distribution of the noise).\n The estimation is done by an iterative procedures (Evidence Maximization)\n\n Read more in the :ref:`User Guide <bayesian_regression>`.\n\n Parameters\n ----------\n n_iter : int, optional\n Maximum number of iterations. Default is 300\n\n tol : float, optional\n Stop the algorithm if w has converged. Default is 1.e-3.\n\n alpha_1 : float, optional\n Hyper-parameter : shape parameter for the Gamma distribution prior\n over the alpha parameter. Default is 1.e-6.\n\n alpha_2 : float, optional\n Hyper-parameter : inverse scale parameter (rate parameter) for the\n Gamma distribution prior over the alpha parameter. Default is 1.e-6.\n\n lambda_1 : float, optional\n Hyper-parameter : shape parameter for the Gamma distribution prior\n over the lambda parameter. Default is 1.e-6.\n\n lambda_2 : float, optional\n Hyper-parameter : inverse scale parameter (rate parameter) for the\n Gamma distribution prior over the lambda parameter. Default is 1.e-6.\n\n compute_score : boolean, optional\n If True, compute the objective function at each step of the model.\n Default is False.\n\n threshold_lambda : float, optional\n threshold for removing (pruning) weights with high precision from\n the computation. Default is 1.e+4.\n\n fit_intercept : boolean, optional\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n Default is True.\n\n normalize : boolean, optional, default False\n If True, the regressors X will be normalized before regression.\n This parameter is ignored when `fit_intercept` is set to False.\n When the regressors are normalized, note that this makes the\n hyperparameters learnt more robust and almost independent of the number\n of samples. The same property is not valid for standardized data.\n However, if you wish to standardize, please use\n `preprocessing.StandardScaler` before calling `fit` on an estimator\n with `normalize=False`.\n\n copy_X : boolean, optional, default True.\n If True, X will be copied; else, it may be overwritten.\n\n verbose : boolean, optional, default False\n Verbose mode when fitting the model.\n\n Attributes\n ----------\n coef_ : array, shape = (n_features)\n Coefficients of the regression model (mean of distribution)\n\n alpha_ : float\n estimated precision of the noise.\n\n lambda_ : array, shape = (n_features)\n estimated precisions of the weights.\n\n sigma_ : array, shape = (n_features, n_features)\n estimated variance-covariance matrix of the weights\n\n scores_ : float\n if computed, value of the objective function (to be maximized)\n\n Examples\n --------\n >>> from sklearn import linear_model\n >>> clf = linear_model.ARDRegression()\n >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])\n ... # doctest: +NORMALIZE_WHITESPACE\n ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,\n copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,\n n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,\n verbose=False)\n >>> clf.predict([[1, 1]])\n array([ 1.])\n\n Notes\n --------\n See examples/linear_model/plot_ard.py for an example.\n \"\"\"\n\n def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,\n lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,\n threshold_lambda=1.e+4, fit_intercept=True, normalize=False,\n copy_X=True, verbose=False):\n self.n_iter = n_iter\n self.tol = tol\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.alpha_1 = alpha_1\n self.alpha_2 = alpha_2\n self.lambda_1 = lambda_1\n self.lambda_2 = lambda_2\n self.compute_score = compute_score\n self.threshold_lambda = threshold_lambda\n self.copy_X = copy_X\n self.verbose = verbose\n\n def fit(self, X, y):\n \"\"\"Fit the ARDRegression model according to the given training data\n and parameters.\n\n Iterative procedure to maximize the evidence\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Training vector, where n_samples in the number of samples and\n n_features is the number of features.\n y : array, shape = [n_samples]\n Target values (integers)\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)\n\n n_samples, n_features = X.shape\n coef_ = np.zeros(n_features)\n\n X, y, X_offset, y_offset, X_scale = self._preprocess_data(\n X, y, self.fit_intercept, self.normalize, self.copy_X)\n\n # Launch the convergence loop\n keep_lambda = np.ones(n_features, dtype=bool)\n\n lambda_1 = self.lambda_1\n lambda_2 = self.lambda_2\n alpha_1 = self.alpha_1\n alpha_2 = self.alpha_2\n verbose = self.verbose\n\n # Initialization of the values of the parameters\n alpha_ = 1. / np.var(y)\n lambda_ = np.ones(n_features)\n\n self.scores_ = list()\n coef_old_ = None\n\n # Iterative procedure of ARDRegression\n for iter_ in range(self.n_iter):\n # Compute mu and sigma (using Woodbury matrix identity)\n sigma_ = pinvh(np.eye(n_samples) / alpha_ +\n np.dot(X[:, keep_lambda] *\n np.reshape(1. / lambda_[keep_lambda], [1, -1]),\n X[:, keep_lambda].T))\n sigma_ = np.dot(sigma_, X[:, keep_lambda] *\n np.reshape(1. / lambda_[keep_lambda], [1, -1]))\n sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) *\n X[:, keep_lambda].T, sigma_)\n sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]\n coef_[keep_lambda] = alpha_ * np.dot(\n sigma_, np.dot(X[:, keep_lambda].T, y))\n\n # Update alpha and lambda\n rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)\n gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)\n lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) /\n ((coef_[keep_lambda]) ** 2 +\n 2. * lambda_2))\n alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) /\n (rmse_ + 2. * alpha_2))\n\n # Prune the weights with a precision over a threshold\n keep_lambda = lambda_ < self.threshold_lambda\n coef_[~keep_lambda] = 0\n\n # Compute the objective function\n if self.compute_score:\n s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()\n s += alpha_1 * log(alpha_) - alpha_2 * alpha_\n s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +\n np.sum(np.log(lambda_)))\n s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())\n self.scores_.append(s)\n\n # Check for convergence\n if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:\n if verbose:\n print(\"Converged after %s iterations\" % iter_)\n break\n coef_old_ = np.copy(coef_)\n\n self.coef_ = coef_\n self.alpha_ = alpha_\n self.sigma_ = sigma_\n self.lambda_ = lambda_\n self._set_intercept(X_offset, y_offset, X_scale)\n return self\n",
"\"\"\"\nA collection of functions to find the weights and abscissas for\nGaussian Quadrature.\n\nThese calculations are done by finding the eigenvalues of a\ntridiagonal matrix whose entries are dependent on the coefficients\nin the recursion formula for the orthogonal polynomials with the\ncorresponding weighting function over the interval.\n\nMany recursion relations for orthogonal polynomials are given:\n\n.. math::\n\n a1n f_{n+1} (x) = (a2n + a3n x ) f_n (x) - a4n f_{n-1} (x)\n\nThe recursion relation of interest is\n\n.. math::\n\n P_{n+1} (x) = (x - A_n) P_n (x) - B_n P_{n-1} (x)\n\nwhere :math:`P` has a different normalization than :math:`f`.\n\nThe coefficients can be found as:\n\n.. math::\n\n A_n = -a2n / a3n\n \\\\qquad\n B_n = ( a4n / a3n \\\\sqrt{h_n-1 / h_n})^2\n\nwhere\n\n.. math::\n\n h_n = \\\\int_a^b w(x) f_n(x)^2\n\nassume:\n\n.. math::\n\n P_0 (x) = 1\n \\\\qquad\n P_{-1} (x) == 0\n\nFor the mathematical background, see [golub.welsch-1969-mathcomp]_ and\n[abramowitz.stegun-1965]_.\n\nReferences\n----------\n.. [golub.welsch-1969-mathcomp]\n Golub, Gene H, and John H Welsch. 1969. Calculation of Gauss\n Quadrature Rules. *Mathematics of Computation* 23, 221-230+s1--s10.\n\n.. [abramowitz.stegun-1965]\n Abramowitz, Milton, and Irene A Stegun. (1965) *Handbook of\n Mathematical Functions: with Formulas, Graphs, and Mathematical\n Tables*. Gaithersburg, MD: National Bureau of Standards.\n http://www.math.sfu.ca/~cbm/aands/\n\n.. [townsend.trogdon.olver-2014]\n Townsend, A. and Trogdon, T. and Olver, S. (2014)\n *Fast computation of Gauss quadrature nodes and\n weights on the whole real line*. :arXiv:`1410.5286`.\n\n.. [townsend.trogdon.olver-2015]\n Townsend, A. and Trogdon, T. and Olver, S. (2015)\n *Fast computation of Gauss quadrature nodes and\n weights on the whole real line*.\n IMA Journal of Numerical Analysis\n :doi:`10.1093/imanum/drv002`.\n\"\"\"\n#\n# Author: Travis Oliphant 2000\n# Updated Sep. 2003 (fixed bugs --- tested to be accurate)\n\nfrom __future__ import division, print_function, absolute_import\n\n# Scipy imports.\nimport numpy as np\nfrom numpy import (exp, inf, pi, sqrt, floor, sin, cos, around, int,\n hstack, arccos, arange)\nfrom scipy import linalg\nfrom scipy.special import airy\n\n# Local imports.\nfrom . import _ufuncs as cephes\n_gam = cephes.gamma\nfrom . import specfun\n\n_polyfuns = ['legendre', 'chebyt', 'chebyu', 'chebyc', 'chebys',\n 'jacobi', 'laguerre', 'genlaguerre', 'hermite',\n 'hermitenorm', 'gegenbauer', 'sh_legendre', 'sh_chebyt',\n 'sh_chebyu', 'sh_jacobi']\n\n# Correspondence between new and old names of root functions\n_rootfuns_map = {'roots_legendre': 'p_roots',\n 'roots_chebyt': 't_roots',\n 'roots_chebyu': 'u_roots',\n 'roots_chebyc': 'c_roots',\n 'roots_chebys': 's_roots',\n 'roots_jacobi': 'j_roots',\n 'roots_laguerre': 'l_roots',\n 'roots_genlaguerre': 'la_roots',\n 'roots_hermite': 'h_roots',\n 'roots_hermitenorm': 'he_roots',\n 'roots_gegenbauer': 'cg_roots',\n 'roots_sh_legendre': 'ps_roots',\n 'roots_sh_chebyt': 'ts_roots',\n 'roots_sh_chebyu': 'us_roots',\n 'roots_sh_jacobi': 'js_roots'}\n\n_evalfuns = ['eval_legendre', 'eval_chebyt', 'eval_chebyu',\n 'eval_chebyc', 'eval_chebys', 'eval_jacobi',\n 'eval_laguerre', 'eval_genlaguerre', 'eval_hermite',\n 'eval_hermitenorm', 'eval_gegenbauer',\n 'eval_sh_legendre', 'eval_sh_chebyt', 'eval_sh_chebyu',\n 'eval_sh_jacobi']\n\n__all__ = _polyfuns + list(_rootfuns_map.keys()) + _evalfuns + ['poch', 'binom']\n\n\nclass orthopoly1d(np.poly1d):\n\n def __init__(self, roots, weights=None, hn=1.0, kn=1.0, wfunc=None,\n limits=None, monic=False, eval_func=None):\n np.poly1d.__init__(self, roots, r=1)\n equiv_weights = [weights[k] / wfunc(roots[k]) for\n k in range(len(roots))]\n self.__dict__['weights'] = np.array(list(zip(roots,\n weights, equiv_weights)))\n self.__dict__['weight_func'] = wfunc\n self.__dict__['limits'] = limits\n mu = sqrt(hn)\n if monic:\n evf = eval_func\n if evf:\n eval_func = lambda x: evf(x) / kn\n mu = mu / abs(kn)\n kn = 1.0\n self.__dict__['normcoef'] = mu\n self.__dict__['coeffs'] *= float(kn)\n\n # Note: eval_func will be discarded on arithmetic\n self.__dict__['_eval_func'] = eval_func\n\n def __call__(self, v):\n if self._eval_func and not isinstance(v, np.poly1d):\n return self._eval_func(v)\n else:\n return np.poly1d.__call__(self, v)\n\n def _scale(self, p):\n if p == 1.0:\n return\n self.__dict__['coeffs'] *= p\n evf = self.__dict__['_eval_func']\n if evf:\n self.__dict__['_eval_func'] = lambda x: evf(x) * p\n self.__dict__['normcoef'] *= p\n\n\ndef _gen_roots_and_weights(n, mu0, an_func, bn_func, f, df, symmetrize, mu):\n \"\"\"[x,w] = gen_roots_and_weights(n,an_func,sqrt_bn_func,mu)\n\n Returns the roots (x) of an nth order orthogonal polynomial,\n and weights (w) to use in appropriate Gaussian quadrature with that\n orthogonal polynomial.\n\n The polynomials have the recurrence relation\n P_n+1(x) = (x - A_n) P_n(x) - B_n P_n-1(x)\n\n an_func(n) should return A_n\n sqrt_bn_func(n) should return sqrt(B_n)\n mu ( = h_0 ) is the integral of the weight over the orthogonal\n interval\n \"\"\"\n k = np.arange(n, dtype='d')\n c = np.zeros((2, n))\n c[0,1:] = bn_func(k[1:])\n c[1,:] = an_func(k)\n x = linalg.eigvals_banded(c, overwrite_a_band=True)\n\n # improve roots by one application of Newton's method\n y = f(n, x)\n dy = df(n, x)\n x -= y/dy\n\n fm = f(n-1, x)\n fm /= np.abs(fm).max()\n dy /= np.abs(dy).max()\n w = 1.0 / (fm * dy)\n\n if symmetrize:\n w = (w + w[::-1]) / 2\n x = (x - x[::-1]) / 2\n\n w *= mu0 / w.sum()\n\n if mu:\n return x, w, mu0\n else:\n return x, w\n\n# Jacobi Polynomials 1 P^(alpha,beta)_n(x)\n\n\ndef roots_jacobi(n, alpha, beta, mu=False):\n r\"\"\"Gauss-Jacobi quadrature.\n\n Computes the sample points and weights for Gauss-Jacobi quadrature. The\n sample points are the roots of the n-th degree Jacobi polynomial,\n :math:`P^{\\alpha, \\beta}_n(x)`. These sample points and weights\n correctly integrate polynomials of degree :math:`2n - 1` or less over the\n interval :math:`[-1, 1]` with weight function\n :math:`f(x) = (1 - x)^{\\alpha} (1 + x)^{\\beta}`.\n\n Parameters\n ----------\n n : int\n quadrature order\n alpha : float\n alpha must be > -1\n beta : float\n beta must be > 0\n mu : bool, optional\n If True, return the sum of the weights, optional.\n\n Returns\n -------\n x : ndarray\n Sample points\n w : ndarray\n Weights\n mu : float\n Sum of the weights\n\n See Also\n --------\n scipy.integrate.quadrature\n scipy.integrate.fixed_quad\n \"\"\"\n m = int(n)\n if n < 1 or n != m:\n raise ValueError(\"n must be a positive integer.\")\n if alpha <= -1 or beta <= -1:\n raise ValueError(\"alpha and beta must be greater than -1.\")\n\n if alpha == 0.0 and beta == 0.0:\n return roots_legendre(m, mu)\n if alpha == beta:\n return roots_gegenbauer(m, alpha+0.5, mu)\n\n mu0 = 2.0**(alpha+beta+1)*cephes.beta(alpha+1, beta+1)\n a = alpha\n b = beta\n if a + b == 0.0:\n an_func = lambda k: np.where(k == 0, (b-a)/(2+a+b), 0.0)\n else:\n an_func = lambda k: np.where(k == 0, (b-a)/(2+a+b),\n (b*b - a*a) / ((2.0*k+a+b)*(2.0*k+a+b+2)))\n\n bn_func = lambda k: 2.0 / (2.0*k+a+b)*np.sqrt((k+a)*(k+b) / (2*k+a+b+1)) \\\n * np.where(k == 1, 1.0, np.sqrt(k*(k+a+b) / (2.0*k+a+b-1)))\n\n f = lambda n, x: cephes.eval_jacobi(n, a, b, x)\n df = lambda n, x: 0.5 * (n + a + b + 1) \\\n * cephes.eval_jacobi(n-1, a+1, b+1, x)\n return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu)\n\n\ndef jacobi(n, alpha, beta, monic=False):\n r\"\"\"Jacobi polynomial.\n\n Defined to be the solution of\n\n .. math::\n (1 - x^2)\\frac{d^2}{dx^2}P_n^{(\\alpha, \\beta)}\n + (\\beta - \\alpha - (\\alpha + \\beta + 2)x)\n \\frac{d}{dx}P_n^{(\\alpha, \\beta)}\n + n(n + \\alpha + \\beta + 1)P_n^{(\\alpha, \\beta)} = 0\n\n for :math:`\\alpha, \\beta > -1`; :math:`P_n^{(\\alpha, \\beta)}` is a\n polynomial of degree :math:`n`.\n\n Parameters\n ----------\n n : int\n Degree of the polynomial.\n alpha : float\n Parameter, must be greater than -1.\n beta : float\n Parameter, must be greater than -1.\n monic : bool, optional\n If `True`, scale the leading coefficient to be 1. Default is\n `False`.\n\n Returns\n -------\n P : orthopoly1d\n Jacobi polynomial.\n\n Notes\n -----\n For fixed :math:`\\alpha, \\beta`, the polynomials\n :math:`P_n^{(\\alpha, \\beta)}` are orthogonal over :math:`[-1, 1]`\n with weight function :math:`(1 - x)^\\alpha(1 + x)^\\beta`.\n\n \"\"\"\n if n < 0:\n raise ValueError(\"n must be nonnegative.\")\n\n wfunc = lambda x: (1 - x)**alpha * (1 + x)**beta\n if n == 0:\n return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic,\n eval_func=np.ones_like)\n x, w, mu = roots_jacobi(n, alpha, beta, mu=True)\n ab1 = alpha + beta + 1.0\n hn = 2**ab1 / (2 * n + ab1) * _gam(n + alpha + 1)\n hn *= _gam(n + beta + 1.0) / _gam(n + 1) / _gam(n + ab1)\n kn = _gam(2 * n + ab1) / 2.0**n / _gam(n + 1) / _gam(n + ab1)\n # here kn = coefficient on x^n term\n p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic,\n lambda x: eval_jacobi(n, alpha, beta, x))\n return p\n\n# Jacobi Polynomials shifted G_n(p,q,x)\n\n\ndef roots_sh_jacobi(n, p1, q1, mu=False):\n \"\"\"Gauss-Jacobi (shifted) quadrature.\n\n Computes the sample points and weights for Gauss-Jacobi (shifted)\n quadrature. The sample points are the roots of the n-th degree shifted\n Jacobi polynomial, :math:`G^{p,q}_n(x)`. These sample points and weights\n correctly integrate polynomials of degree :math:`2n - 1` or less over the\n interval :math:`[0, 1]` with weight function\n :math:`f(x) = (1 - x)^{p-q} x^{q-1}`\n\n Parameters\n ----------\n n : int\n quadrature order\n p1 : float\n (p1 - q1) must be > -1\n q1 : float\n q1 must be > 0\n mu : bool, optional\n If True, return the sum of the weights, optional.\n\n Returns\n -------\n x : ndarray\n Sample points\n w : ndarray\n Weights\n mu : float\n Sum of the weights\n\n See Also\n --------\n scipy.integrate.quadrature\n scipy.integrate.fixed_quad\n \"\"\"\n if (p1-q1) <= -1 or q1 <= 0:\n raise ValueError(\"(p - q) must be greater than -1, and q must be greater than 0.\")\n x, w, m = roots_jacobi(n, p1-q1, q1-1, True)\n x = (x + 1) / 2\n scale = 2.0**p1\n w /= scale\n m /= scale\n if mu:\n return x, w, m\n else:\n return x, w\n\ndef sh_jacobi(n, p, q, monic=False):\n r\"\"\"Shifted Jacobi polynomial.\n\n Defined by\n\n .. math::\n\n G_n^{(p, q)}(x) \n = \\binom{2n + p - 1}{n}^{-1}P_n^{(p - q, q - 1)}(2x - 1),\n\n where :math:`P_n^{(\\cdot, \\cdot)}` is the nth Jacobi polynomial.\n\n Parameters\n ----------\n n : int\n Degree of the polynomial.\n p : float\n Parameter, must have :math:`p > q - 1`.\n q : float\n Parameter, must be greater than 0.\n monic : bool, optional\n If `True`, scale the leading coefficient to be 1. Default is\n `False`.\n\n Returns\n -------\n G : orthopoly1d\n Shifted Jacobi polynomial.\n\n Notes\n -----\n For fixed :math:`p, q`, the polynomials :math:`G_n^{(p, q)}` are\n orthogonal over :math:`[0, 1]` with weight function :math:`(1 -\n x)^{p - q}x^{q - 1}`.\n\n \"\"\"\n if n < 0:\n raise ValueError(\"n must be nonnegative.\")\n\n wfunc = lambda x: (1.0 - x)**(p - q) * (x)**(q - 1.)\n if n == 0:\n return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic,\n eval_func=np.ones_like)\n n1 = n\n x, w, mu0 = roots_sh_jacobi(n1, p, q, mu=True)\n hn = _gam(n + 1) * _gam(n + q) * _gam(n + p) * _gam(n + p - q + 1)\n hn /= (2 * n + p) * (_gam(2 * n + p)**2)\n # kn = 1.0 in standard form so monic is redundant. Kept for compatibility.\n kn = 1.0\n pp = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(0, 1), monic=monic,\n eval_func=lambda x: eval_sh_jacobi(n, p, q, x))\n return pp\n\n# Generalized Laguerre L^(alpha)_n(x)\n\n\ndef roots_genlaguerre(n, alpha, mu=False):\n r\"\"\"Gauss-generalized Laguerre quadrature.\n\n Computes the sample points and weights for Gauss-generalized Laguerre\n quadrature. The sample points are the roots of the n-th degree generalized\n Laguerre polynomial, :math:`L^{\\alpha}_n(x)`. These sample points and\n weights correctly integrate polynomials of degree :math:`2n - 1` or less\n over the interval :math:`[0, \\infty]` with weight function\n :math:`f(x) = x^{\\alpha} e^{-x}`.\n\n Parameters\n ----------\n n : int\n quadrature order\n alpha : float\n alpha must be > -1\n mu : bool, optional\n If True, return the sum of the weights, optional.\n\n Returns\n -------\n x : ndarray\n Sample points\n w : ndarray\n Weights\n mu : float\n Sum of the weights\n\n See Also\n --------\n scipy.integrate.quadrature\n scipy.integrate.fixed_quad\n \"\"\"\n m = int(n)\n if n < 1 or n != m:\n raise ValueError(\"n must be a positive integer.\")\n if alpha < -1:\n raise ValueError(\"alpha must be greater than -1.\")\n\n mu0 = cephes.gamma(alpha + 1)\n\n if m == 1:\n x = np.array([alpha+1.0], 'd')\n w = np.array([mu0], 'd')\n if mu:\n return x, w, mu0\n else:\n return x, w\n\n an_func = lambda k: 2 * k + alpha + 1\n bn_func = lambda k: -np.sqrt(k * (k + alpha))\n f = lambda n, x: cephes.eval_genlaguerre(n, alpha, x)\n df = lambda n, x: (n*cephes.eval_genlaguerre(n, alpha, x)\n - (n + alpha)*cephes.eval_genlaguerre(n-1, alpha, x))/x\n return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu)\n\n\ndef genlaguerre(n, alpha, monic=False):\n r\"\"\"Generalized (associated) Laguerre polynomial.\n\n Defined to be the solution of\n\n .. math::\n x\\frac{d^2}{dx^2}L_n^{(\\alpha)} \n + (\\alpha + 1 - x)\\frac{d}{dx}L_n^{(\\alpha)}\n + nL_n^{(\\alpha)} = 0,\n\n where :math:`\\alpha > -1`; :math:`L_n^{(\\alpha)}` is a polynomial\n of degree :math:`n`.\n\n Parameters\n ----------\n n : int\n Degree of the polynomial.\n alpha : float\n Parameter, must be greater than -1.\n monic : bool, optional\n If `True`, scale the leading coefficient to be 1. Default is\n `False`.\n\n Returns\n -------\n L : orthopoly1d\n Generalized Laguerre polynomial.\n\n Notes\n -----\n For fixed :math:`\\alpha`, the polynomials :math:`L_n^{(\\alpha)}`\n are orthogonal over :math:`[0, \\infty)` with weight function\n :math:`e^{-x}x^\\alpha`.\n\n The Laguerre polynomials are the special case where :math:`\\alpha\n = 0`.\n\n See Also\n --------\n laguerre : Laguerre polynomial.\n\n \"\"\"\n if alpha <= -1:\n raise ValueError(\"alpha must be > -1\")\n if n < 0:\n raise ValueError(\"n must be nonnegative.\")\n\n if n == 0:\n n1 = n + 1\n else:\n n1 = n\n x, w, mu0 = roots_genlaguerre(n1, alpha, mu=True)\n wfunc = lambda x: exp(-x) * x**alpha\n if n == 0:\n x, w = [], []\n hn = _gam(n + alpha + 1) / _gam(n + 1)\n kn = (-1)**n / _gam(n + 1)\n p = orthopoly1d(x, w, hn, kn, wfunc, (0, inf), monic,\n lambda x: eval_genlaguerre(n, alpha, x))\n return p\n\n# Laguerre L_n(x)\n\n\ndef roots_laguerre(n, mu=False):\n r\"\"\"Gauss-Laguerre quadrature.\n\n Computes the sample points and weights for Gauss-Laguerre quadrature.\n The sample points are the roots of the n-th degree Laguerre polynomial,\n :math:`L_n(x)`. These sample points and weights correctly integrate\n polynomials of degree :math:`2n - 1` or less over the interval\n :math:`[0, \\infty]` with weight function :math:`f(x) = e^{-x}`.\n\n Parameters\n ----------\n n : int\n quadrature order\n mu : bool, optional\n If True, return the sum of the weights, optional.\n\n Returns\n -------\n x : ndarray\n Sample points\n w : ndarray\n Weights\n mu : float\n Sum of the weights\n\n See Also\n --------\n scipy.integrate.quadrature\n scipy.integrate.fixed_quad\n numpy.polynomial.laguerre.laggauss\n \"\"\"\n return roots_genlaguerre(n, 0.0, mu=mu)\n\n\ndef laguerre(n, monic=False):\n r\"\"\"Laguerre polynomial.\n\n Defined to be the solution of\n\n .. math::\n x\\frac{d^2}{dx^2}L_n + (1 - x)\\frac{d}{dx}L_n + nL_n = 0;\n\n :math:`L_n` is a polynomial of degree :math:`n`.\n\n Parameters\n ----------\n n : int\n Degree of the polynomial.\n monic : bool, optional\n If `True`, scale the leading coefficient to be 1. Default is\n `False`.\n\n Returns\n -------\n L : orthopoly1d\n Laguerre Polynomial.\n\n Notes\n -----\n The polynomials :math:`L_n` are orthogonal over :math:`[0,\n \\infty)` with weight function :math:`e^{-x}`.\n\n \"\"\"\n if n < 0:\n raise ValueError(\"n must be nonnegative.\")\n\n if n == 0:\n n1 = n + 1\n else:\n n1 = n\n x, w, mu0 = roots_laguerre(n1, mu=True)\n if n == 0:\n x, w = [], []\n hn = 1.0\n kn = (-1)**n / _gam(n + 1)\n p = orthopoly1d(x, w, hn, kn, lambda x: exp(-x), (0, inf), monic,\n lambda x: eval_laguerre(n, x))\n return p\n\n# Hermite 1 H_n(x)\n\n\ndef roots_hermite(n, mu=False):\n r\"\"\"Gauss-Hermite (physicst's) quadrature.\n\n Computes the sample points and weights for Gauss-Hermite quadrature.\n The sample points are the roots of the n-th degree Hermite polynomial,\n :math:`H_n(x)`. These sample points and weights correctly integrate\n polynomials of degree :math:`2n - 1` or less over the interval\n :math:`[-\\infty, \\infty]` with weight function :math:`f(x) = e^{-x^2}`.\n\n Parameters\n ----------\n n : int\n quadrature order\n mu : bool, optional\n If True, return the sum of the weights, optional.\n\n Returns\n -------\n x : ndarray\n Sample points\n w : ndarray\n Weights\n mu : float\n Sum of the weights\n\n Notes\n -----\n For small n up to 150 a modified version of the Golub-Welsch\n algorithm is used. Nodes are computed from the eigenvalue\n problem and improved by one step of a Newton iteration.\n The weights are computed from the well-known analytical formula.\n\n For n larger than 150 an optimal asymptotic algorithm is applied\n which computes nodes and weights in a numerically stable manner.\n The algorithm has linear runtime making computation for very\n large n (several thousand or more) feasible.\n\n See Also\n --------\n scipy.integrate.quadrature\n scipy.integrate.fixed_quad\n numpy.polynomial.hermite.hermgauss\n roots_hermitenorm\n\n References\n ----------\n .. [townsend.trogdon.olver-2014]\n Townsend, A. and Trogdon, T. and Olver, S. (2014)\n *Fast computation of Gauss quadrature nodes and\n weights on the whole real line*. :arXiv:`1410.5286`.\n\n .. [townsend.trogdon.olver-2015]\n Townsend, A. and Trogdon, T. and Olver, S. (2015)\n *Fast computation of Gauss quadrature nodes and\n weights on the whole real line*.\n IMA Journal of Numerical Analysis\n :doi:`10.1093/imanum/drv002`.\n \"\"\"\n m = int(n)\n if n < 1 or n != m:\n raise ValueError(\"n must be a positive integer.\")\n\n mu0 = np.sqrt(np.pi)\n if n <= 150:\n an_func = lambda k: 0.0*k\n bn_func = lambda k: np.sqrt(k/2.0)\n f = cephes.eval_hermite\n df = lambda n, x: 2.0 * n * cephes.eval_hermite(n-1, x)\n return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)\n else:\n nodes, weights = _roots_hermite_asy(m)\n if mu:\n return nodes, weights, mu0\n else:\n return nodes, weights\n\n\ndef _compute_tauk(n, k, maxit=5):\n \"\"\"Helper function for Tricomi initial guesses\n\n For details, see formula 3.1 in lemma 3.1 in the\n original paper.\n\n Parameters\n ----------\n n : int\n Quadrature order\n k : ndarray of type int\n Index of roots :math:`\\tau_k` to compute\n maxit : int\n Number of Newton maxit performed, the default\n value of 5 is sufficient.\n\n Returns\n -------\n tauk : ndarray\n Roots of equation 3.1\n\n See Also\n --------\n initial_nodes_a\n roots_hermite_asy\n \"\"\"\n a = n % 2 - 0.5\n c = (4.0*floor(n/2.0) - 4.0*k + 3.0)*pi / (4.0*floor(n/2.0) + 2.0*a + 2.0)\n f = lambda x: x - sin(x) - c\n df = lambda x: 1.0 - cos(x)\n xi = 0.5*pi\n for i in range(maxit):\n xi = xi - f(xi)/df(xi)\n return xi\n\n\ndef _initial_nodes_a(n, k):\n r\"\"\"Tricomi initial guesses\n\n Computes an initial approximation to the square of the `k`-th\n (positive) root :math:`x_k` of the Hermite polynomial :math:`H_n`\n of order :math:`n`. The formula is the one from lemma 3.1 in the\n original paper. The guesses are accurate except in the region\n near :math:`\\sqrt{2n + 1}`.\n\n Parameters\n ----------\n n : int\n Quadrature order\n k : ndarray of type int\n Index of roots to compute\n\n Returns\n -------\n xksq : ndarray\n Square of the approximate roots\n\n See Also\n --------\n initial_nodes\n roots_hermite_asy\n \"\"\"\n tauk = _compute_tauk(n, k)\n sigk = cos(0.5*tauk)**2\n a = n % 2 - 0.5\n nu = 4.0*floor(n/2.0) + 2.0*a + 2.0\n # Initial approximation of Hermite roots (square)\n xksq = nu*sigk - 1.0/(3.0*nu) * (5.0/(4.0*(1.0-sigk)**2) - 1.0/(1.0-sigk) - 0.25)\n return xksq\n\n\ndef _initial_nodes_b(n, k):\n r\"\"\"Gatteschi initial guesses\n\n Computes an initial approximation to the square of the `k`-th\n (positive) root :math:`x_k` of the Hermite polynomial :math:`H_n`\n of order :math:`n`. The formula is the one from lemma 3.2 in the\n original paper. The guesses are accurate in the region just\n below :math:`\\sqrt{2n + 1}`.\n\n Parameters\n ----------\n n : int\n Quadrature order\n k : ndarray of type int\n Index of roots to compute\n\n Returns\n -------\n xksq : ndarray\n Square of the approximate root\n\n See Also\n --------\n initial_nodes\n roots_hermite_asy\n \"\"\"\n a = n % 2 - 0.5\n nu = 4.0*floor(n/2.0) + 2.0*a + 2.0\n # Airy roots by approximation\n ak = specfun.airyzo(k.max(), 1)[0][::-1]\n # Initial approximation of Hermite roots (square)\n xksq = (nu +\n 2.0**(2.0/3.0) * ak * nu**(1.0/3.0) +\n 1.0/5.0 * 2.0**(4.0/3.0) * ak**2 * nu**(-1.0/3.0) +\n (9.0/140.0 - 12.0/175.0 * ak**3) * nu**(-1.0) +\n (16.0/1575.0 * ak + 92.0/7875.0 * ak**4) * 2.0**(2.0/3.0) * nu**(-5.0/3.0) -\n (15152.0/3031875.0 * ak**5 + 1088.0/121275.0 * ak**2) * 2.0**(1.0/3.0) * nu**(-7.0/3.0))\n return xksq\n\n\ndef _initial_nodes(n):\n \"\"\"Initial guesses for the Hermite roots\n\n Computes an initial approximation to the non-negative\n roots :math:`x_k` of the Hermite polynomial :math:`H_n`\n of order :math:`n`. The Tricomi and Gatteschi initial\n guesses are used in the region where they are accurate.\n\n Parameters\n ----------\n n : int\n Quadrature order\n\n Returns\n -------\n xk : ndarray\n Approximate roots\n\n See Also\n --------\n roots_hermite_asy\n \"\"\"\n # Turnover point\n # linear polynomial fit to error of 10, 25, 40, ..., 1000 point rules\n fit = 0.49082003*n - 4.37859653\n turnover = around(fit).astype(int)\n # Compute all approximations\n ia = arange(1, int(floor(n*0.5)+1))\n ib = ia[::-1]\n xasq = _initial_nodes_a(n, ia[:turnover+1])\n xbsq = _initial_nodes_b(n, ib[turnover+1:])\n # Combine\n iv = sqrt(hstack([xasq, xbsq]))\n # Central node is always zero\n if n % 2 == 1:\n iv = hstack([0.0, iv])\n return iv\n\n\ndef _pbcf(n, theta):\n r\"\"\"Asymptotic series expansion of parabolic cylinder function\n\n The implementation is based on sections 3.2 and 3.3 from the\n original paper. Compared to the published version this code\n adds one more term to the asymptotic series. The detailed\n formulas can be found at [parabolic-asymptotics]_. The evaluation\n is done in a transformed variable :math:`\\theta := \\arccos(t)`\n where :math:`t := x / \\mu` and :math:`\\mu := \\sqrt{2n + 1}`.\n\n Parameters\n ----------\n n : int\n Quadrature order\n theta : ndarray\n Transformed position variable\n\n Returns\n -------\n U : ndarray\n Value of the parabolic cylinder function :math:`U(a, \\theta)`.\n Ud : ndarray\n Value of the derivative :math:`U^{\\prime}(a, \\theta)` of\n the parabolic cylinder function.\n\n See Also\n --------\n roots_hermite_asy\n\n References\n ----------\n .. [parabolic-asymptotics]\n http://dlmf.nist.gov/12.10#vii\n \"\"\"\n st = sin(theta)\n ct = cos(theta)\n # http://dlmf.nist.gov/12.10#vii\n mu = 2.0*n + 1.0\n # http://dlmf.nist.gov/12.10#E23\n eta = 0.5*theta - 0.5*st*ct\n # http://dlmf.nist.gov/12.10#E39\n zeta = -(3.0*eta/2.0) ** (2.0/3.0)\n # http://dlmf.nist.gov/12.10#E40\n phi = (-zeta / st**2) ** (0.25)\n # Coefficients\n # http://dlmf.nist.gov/12.10#E43\n a0 = 1.0\n a1 = 0.10416666666666666667\n a2 = 0.08355034722222222222\n a3 = 0.12822657455632716049\n a4 = 0.29184902646414046425\n a5 = 0.88162726744375765242\n b0 = 1.0\n b1 = -0.14583333333333333333\n b2 = -0.09874131944444444444\n b3 = -0.14331205391589506173\n b4 = -0.31722720267841354810\n b5 = -0.94242914795712024914\n # Polynomials\n # http://dlmf.nist.gov/12.10#E9\n # http://dlmf.nist.gov/12.10#E10\n ctp = ct ** arange(16).reshape((-1,1))\n u0 = 1.0\n u1 = (1.0*ctp[3,:] - 6.0*ct) / 24.0\n u2 = (-9.0*ctp[4,:] + 249.0*ctp[2,:] + 145.0) / 1152.0\n u3 = (-4042.0*ctp[9,:] + 18189.0*ctp[7,:] - 28287.0*ctp[5,:] - 151995.0*ctp[3,:] - 259290.0*ct) / 414720.0\n u4 = (72756.0*ctp[10,:] - 321339.0*ctp[8,:] - 154982.0*ctp[6,:] + 50938215.0*ctp[4,:] + 122602962.0*ctp[2,:] + 12773113.0) / 39813120.0\n u5 = (82393456.0*ctp[15,:] - 617950920.0*ctp[13,:] + 1994971575.0*ctp[11,:] - 3630137104.0*ctp[9,:] + 4433574213.0*ctp[7,:]\n - 37370295816.0*ctp[5,:] - 119582875013.0*ctp[3,:] - 34009066266.0*ct) / 6688604160.0\n v0 = 1.0\n v1 = (1.0*ctp[3,:] + 6.0*ct) / 24.0\n v2 = (15.0*ctp[4,:] - 327.0*ctp[2,:] - 143.0) / 1152.0\n v3 = (-4042.0*ctp[9,:] + 18189.0*ctp[7,:] - 36387.0*ctp[5,:] + 238425.0*ctp[3,:] + 259290.0*ct) / 414720.0\n v4 = (-121260.0*ctp[10,:] + 551733.0*ctp[8,:] - 151958.0*ctp[6,:] - 57484425.0*ctp[4,:] - 132752238.0*ctp[2,:] - 12118727) / 39813120.0\n v5 = (82393456.0*ctp[15,:] - 617950920.0*ctp[13,:] + 2025529095.0*ctp[11,:] - 3750839308.0*ctp[9,:] + 3832454253.0*ctp[7,:]\n + 35213253348.0*ctp[5,:] + 130919230435.0*ctp[3,:] + 34009066266*ct) / 6688604160.0\n # Airy Evaluation (Bi and Bip unused)\n Ai, Aip, Bi, Bip = airy(mu**(4.0/6.0) * zeta)\n # Prefactor for U\n P = 2.0*sqrt(pi) * mu**(1.0/6.0) * phi\n # Terms for U\n # http://dlmf.nist.gov/12.10#E42\n phip = phi ** arange(6, 31, 6).reshape((-1,1))\n A0 = b0*u0\n A1 = (b2*u0 + phip[0,:]*b1*u1 + phip[1,:]*b0*u2) / zeta**3\n A2 = (b4*u0 + phip[0,:]*b3*u1 + phip[1,:]*b2*u2 + phip[2,:]*b1*u3 + phip[3,:]*b0*u4) / zeta**6\n B0 = -(a1*u0 + phip[0,:]*a0*u1) / zeta**2\n B1 = -(a3*u0 + phip[0,:]*a2*u1 + phip[1,:]*a1*u2 + phip[2,:]*a0*u3) / zeta**5\n B2 = -(a5*u0 + phip[0,:]*a4*u1 + phip[1,:]*a3*u2 + phip[2,:]*a2*u3 + phip[3,:]*a1*u4 + phip[4,:]*a0*u5) / zeta**8\n # U\n # http://dlmf.nist.gov/12.10#E35\n U = P * (Ai * (A0 + A1/mu**2.0 + A2/mu**4.0) +\n Aip * (B0 + B1/mu**2.0 + B2/mu**4.0) / mu**(8.0/6.0))\n # Prefactor for derivative of U\n Pd = sqrt(2.0*pi) * mu**(2.0/6.0) / phi\n # Terms for derivative of U\n # http://dlmf.nist.gov/12.10#E46\n C0 = -(b1*v0 + phip[0,:]*b0*v1) / zeta\n C1 = -(b3*v0 + phip[0,:]*b2*v1 + phip[1,:]*b1*v2 + phip[2,:]*b0*v3) / zeta**4\n C2 = -(b5*v0 + phip[0,:]*b4*v1 + phip[1,:]*b3*v2 + phip[2,:]*b2*v3 + phip[3,:]*b1*v4 + phip[4,:]*b0*v5) / zeta**7\n D0 = a0*v0\n D1 = (a2*v0 + phip[0,:]*a1*v1 + phip[1,:]*a0*v2) / zeta**3\n D2 = (a4*v0 + phip[0,:]*a3*v1 + phip[1,:]*a2*v2 + phip[2,:]*a1*v3 + phip[3,:]*a0*v4) / zeta**6\n # Derivative of U\n # http://dlmf.nist.gov/12.10#E36\n Ud = Pd * (Ai * (C0 + C1/mu**2.0 + C2/mu**4.0) / mu**(4.0/6.0) +\n Aip * (D0 + D1/mu**2.0 + D2/mu**4.0))\n return U, Ud\n\n\ndef _newton(n, x_initial, maxit=5):\n \"\"\"Newton iteration for polishing the asymptotic approximation\n to the zeros of the Hermite polynomials.\n\n Parameters\n ----------\n n : int\n Quadrature order\n x_initial : ndarray\n Initial guesses for the roots\n maxit : int\n Maximal number of Newton iterations.\n The default 5 is sufficient, usually\n only one or two steps are needed.\n\n Returns\n -------\n nodes : ndarray\n Quadrature nodes\n weights : ndarray\n Quadrature weights\n\n See Also\n --------\n roots_hermite_asy\n \"\"\"\n # Variable transformation\n mu = sqrt(2.0*n + 1.0)\n t = x_initial / mu\n theta = arccos(t)\n # Newton iteration\n for i in range(maxit):\n u, ud = _pbcf(n, theta)\n dtheta = u / (sqrt(2.0) * mu * sin(theta) * ud)\n theta = theta + dtheta\n if max(abs(dtheta)) < 1e-14:\n break\n # Undo variable transformation\n x = mu * cos(theta)\n # Central node is always zero\n if n % 2 == 1:\n x[0] = 0.0\n # Compute weights\n w = exp(-x**2) / (2.0*ud**2)\n return x, w\n\n\ndef _roots_hermite_asy(n):\n r\"\"\"Gauss-Hermite (physicst's) quadrature for large n.\n\n Computes the sample points and weights for Gauss-Hermite quadrature.\n The sample points are the roots of the n-th degree Hermite polynomial,\n :math:`H_n(x)`. These sample points and weights correctly integrate\n polynomials of degree :math:`2n - 1` or less over the interval\n :math:`[-\\infty, \\infty]` with weight function :math:`f(x) = e^{-x^2}`.\n\n This method relies on asymptotic expansions which work best for n > 150.\n The algorithm has linear runtime making computation for very large n\n feasible.\n\n Parameters\n ----------\n n : int\n quadrature order\n\n Returns\n -------\n nodes : ndarray\n Quadrature nodes\n weights : ndarray\n Quadrature weights\n\n See Also\n --------\n roots_hermite\n\n References\n ----------\n .. [townsend.trogdon.olver-2014]\n Townsend, A. and Trogdon, T. and Olver, S. (2014)\n *Fast computation of Gauss quadrature nodes and\n weights on the whole real line*. :arXiv:`1410.5286`.\n\n .. [townsend.trogdon.olver-2015]\n Townsend, A. and Trogdon, T. and Olver, S. (2015)\n *Fast computation of Gauss quadrature nodes and\n weights on the whole real line*.\n IMA Journal of Numerical Analysis\n :doi:`10.1093/imanum/drv002`.\n \"\"\"\n iv = _initial_nodes(n)\n nodes, weights = _newton(n, iv)\n # Combine with negative parts\n if n % 2 == 0:\n nodes = hstack([-nodes[::-1], nodes])\n weights = hstack([weights[::-1], weights])\n else:\n nodes = hstack([-nodes[-1:0:-1], nodes])\n weights = hstack([weights[-1:0:-1], weights])\n # Scale weights\n weights *= sqrt(pi) / sum(weights)\n return nodes, weights\n\n\ndef hermite(n, monic=False):\n r\"\"\"Physicist's Hermite polynomial.\n\n Defined by\n\n .. math::\n\n H_n(x) = (-1)^ne^{x^2}\\frac{d^n}{dx^n}e^{-x^2};\n\n :math:`H_n` is a polynomial of degree :math:`n`.\n\n Parameters\n ----------\n n : int\n Degree of the polynomial.\n monic : bool, optional\n If `True`, scale the leading coefficient to be 1. Default is\n `False`.\n\n Returns\n -------\n H : orthopoly1d\n Hermite polynomial.\n\n Notes\n -----\n The polynomials :math:`H_n` are orthogonal over :math:`(-\\infty,\n \\infty)` with weight function :math:`e^{-x^2}`.\n\n \"\"\"\n if n < 0:\n raise ValueError(\"n must be nonnegative.\")\n\n if n == 0:\n n1 = n + 1\n else:\n n1 = n\n x, w, mu0 = roots_hermite(n1, mu=True)\n wfunc = lambda x: exp(-x * x)\n if n == 0:\n x, w = [], []\n hn = 2**n * _gam(n + 1) * sqrt(pi)\n kn = 2**n\n p = orthopoly1d(x, w, hn, kn, wfunc, (-inf, inf), monic,\n lambda x: eval_hermite(n, x))\n return p\n\n# Hermite 2 He_n(x)\n\n\ndef roots_hermitenorm(n, mu=False):\n r\"\"\"Gauss-Hermite (statistician's) quadrature.\n\n Computes the sample points and weights for Gauss-Hermite quadrature.\n The sample points are the roots of the n-th degree Hermite polynomial,\n :math:`He_n(x)`. These sample points and weights correctly integrate\n polynomials of degree :math:`2n - 1` or less over the interval\n :math:`[-\\infty, \\infty]` with weight function :math:`f(x) = e^{-x^2/2}`.\n\n Parameters\n ----------\n n : int\n quadrature order\n mu : bool, optional\n If True, return the sum of the weights, optional.\n\n Returns\n -------\n x : ndarray\n Sample points\n w : ndarray\n Weights\n mu : float\n Sum of the weights\n\n Notes\n -----\n For small n up to 150 a modified version of the Golub-Welsch\n algorithm is used. Nodes are computed from the eigenvalue\n problem and improved by one step of a Newton iteration.\n The weights are computed from the well-known analytical formula.\n\n For n larger than 150 an optimal asymptotic algorithm is used\n which computes nodes and weights in a numerical stable manner.\n The algorithm has linear runtime making computation for very\n large n (several thousand or more) feasible.\n\n See Also\n --------\n scipy.integrate.quadrature\n scipy.integrate.fixed_quad\n numpy.polynomial.hermite_e.hermegauss\n \"\"\"\n m = int(n)\n if n < 1 or n != m:\n raise ValueError(\"n must be a positive integer.\")\n\n mu0 = np.sqrt(2.0*np.pi)\n if n <= 150:\n an_func = lambda k: 0.0*k\n bn_func = lambda k: np.sqrt(k)\n f = cephes.eval_hermitenorm\n df = lambda n, x: n * cephes.eval_hermitenorm(n-1, x)\n return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)\n else:\n nodes, weights = _roots_hermite_asy(m)\n # Transform\n nodes *= sqrt(2)\n weights *= sqrt(2)\n if mu:\n return nodes, weights, mu0\n else:\n return nodes, weights\n\n\ndef hermitenorm(n, monic=False):\n r\"\"\"Normalized (probabilist's) Hermite polynomial.\n\n Defined by\n\n .. math::\n\n He_n(x) = (-1)^ne^{x^2/2}\\frac{d^n}{dx^n}e^{-x^2/2};\n\n :math:`He_n` is a polynomial of degree :math:`n`.\n\n Parameters\n ----------\n n : int\n Degree of the polynomial.\n monic : bool, optional\n If `True`, scale the leading coefficient to be 1. Default is\n `False`.\n\n Returns\n -------\n He : orthopoly1d\n Hermite polynomial.\n\n Notes\n -----\n\n The polynomials :math:`He_n` are orthogonal over :math:`(-\\infty,\n \\infty)` with weight function :math:`e^{-x^2/2}`.\n\n \"\"\"\n if n < 0:\n raise ValueError(\"n must be nonnegative.\")\n\n if n == 0:\n n1 = n + 1\n else:\n n1 = n\n x, w, mu0 = roots_hermitenorm(n1, mu=True)\n wfunc = lambda x: exp(-x * x / 2.0)\n if n == 0:\n x, w = [], []\n hn = sqrt(2 * pi) * _gam(n + 1)\n kn = 1.0\n p = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(-inf, inf), monic=monic,\n eval_func=lambda x: eval_hermitenorm(n, x))\n return p\n\n# The remainder of the polynomials can be derived from the ones above.\n\n# Ultraspherical (Gegenbauer) C^(alpha)_n(x)\n\n\ndef roots_gegenbauer(n, alpha, mu=False):\n r\"\"\"Gauss-Gegenbauer quadrature.\n\n Computes the sample points and weights for Gauss-Gegenbauer quadrature.\n The sample points are the roots of the n-th degree Gegenbauer polynomial,\n :math:`C^{\\alpha}_n(x)`. These sample points and weights correctly\n integrate polynomials of degree :math:`2n - 1` or less over the interval\n :math:`[-1, 1]` with weight function\n :math:`f(x) = (1 - x^2)^{\\alpha - 1/2}`.\n\n Parameters\n ----------\n n : int\n quadrature order\n alpha : float\n alpha must be > -0.5\n mu : bool, optional\n If True, return the sum of the weights, optional.\n\n Returns\n -------\n x : ndarray\n Sample points\n w : ndarray\n Weights\n mu : float\n Sum of the weights\n\n See Also\n --------\n scipy.integrate.quadrature\n scipy.integrate.fixed_quad\n \"\"\"\n m = int(n)\n if n < 1 or n != m:\n raise ValueError(\"n must be a positive integer.\")\n if alpha < -0.5:\n raise ValueError(\"alpha must be greater than -0.5.\")\n elif alpha == 0.0:\n # C(n,0,x) == 0 uniformly, however, as alpha->0, C(n,alpha,x)->T(n,x)\n # strictly, we should just error out here, since the roots are not\n # really defined, but we used to return something useful, so let's\n # keep doing so.\n return roots_chebyt(n, mu)\n\n mu0 = np.sqrt(np.pi) * cephes.gamma(alpha + 0.5) / cephes.gamma(alpha + 1)\n an_func = lambda k: 0.0 * k\n bn_func = lambda k: np.sqrt(k * (k + 2 * alpha - 1)\n / (4 * (k + alpha) * (k + alpha - 1)))\n f = lambda n, x: cephes.eval_gegenbauer(n, alpha, x)\n df = lambda n, x: (-n*x*cephes.eval_gegenbauer(n, alpha, x)\n + (n + 2*alpha - 1)*cephes.eval_gegenbauer(n-1, alpha, x))/(1-x**2)\n return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)\n\n\ndef gegenbauer(n, alpha, monic=False):\n r\"\"\"Gegenbauer (ultraspherical) polynomial.\n\n Defined to be the solution of\n\n .. math::\n (1 - x^2)\\frac{d^2}{dx^2}C_n^{(\\alpha)}\n - (2\\alpha + 1)x\\frac{d}{dx}C_n^{(\\alpha)}\n + n(n + 2\\alpha)C_n^{(\\alpha)} = 0\n\n for :math:`\\alpha > -1/2`; :math:`C_n^{(\\alpha)}` is a polynomial\n of degree :math:`n`.\n\n Parameters\n ----------\n n : int\n Degree of the polynomial.\n monic : bool, optional\n If `True`, scale the leading coefficient to be 1. Default is\n `False`.\n\n Returns\n -------\n C : orthopoly1d\n Gegenbauer polynomial.\n\n Notes\n -----\n The polynomials :math:`C_n^{(\\alpha)}` are orthogonal over\n :math:`[-1,1]` with weight function :math:`(1 - x^2)^{(\\alpha -\n 1/2)}`.\n\n \"\"\"\n base = jacobi(n, alpha - 0.5, alpha - 0.5, monic=monic)\n if monic:\n return base\n # Abrahmowitz and Stegan 22.5.20\n factor = (_gam(2*alpha + n) * _gam(alpha + 0.5) /\n _gam(2*alpha) / _gam(alpha + 0.5 + n))\n base._scale(factor)\n base.__dict__['_eval_func'] = lambda x: eval_gegenbauer(float(n), alpha, x)\n return base\n\n# Chebyshev of the first kind: T_n(x) =\n# n! sqrt(pi) / _gam(n+1./2)* P^(-1/2,-1/2)_n(x)\n# Computed anew.\n\n\ndef roots_chebyt(n, mu=False):\n r\"\"\"Gauss-Chebyshev (first kind) quadrature.\n\n Computes the sample points and weights for Gauss-Chebyshev quadrature.\n The sample points are the roots of the n-th degree Chebyshev polynomial of\n the first kind, :math:`T_n(x)`. These sample points and weights correctly\n integrate polynomials of degree :math:`2n - 1` or less over the interval\n :math:`[-1, 1]` with weight function :math:`f(x) = 1/\\sqrt{1 - x^2}`.\n\n Parameters\n ----------\n n : int\n quadrature order\n mu : bool, optional\n If True, return the sum of the weights, optional.\n\n Returns\n -------\n x : ndarray\n Sample points\n w : ndarray\n Weights\n mu : float\n Sum of the weights\n\n See Also\n --------\n scipy.integrate.quadrature\n scipy.integrate.fixed_quad\n numpy.polynomial.chebyshev.chebgauss\n \"\"\"\n m = int(n)\n if n < 1 or n != m:\n raise ValueError('n must be a positive integer.')\n x = np.cos(np.arange(2 * m - 1, 0, -2) * pi / (2 * m))\n w = np.empty_like(x)\n w.fill(pi/m)\n if mu:\n return x, w, pi\n else:\n return x, w\n\n\ndef chebyt(n, monic=False):\n r\"\"\"Chebyshev polynomial of the first kind.\n\n Defined to be the solution of\n\n .. math::\n (1 - x^2)\\frac{d^2}{dx^2}T_n - x\\frac{d}{dx}T_n + n^2T_n = 0;\n\n :math:`T_n` is a polynomial of degree :math:`n`.\n\n Parameters\n ----------\n n : int\n Degree of the polynomial.\n monic : bool, optional\n If `True`, scale the leading coefficient to be 1. Default is\n `False`.\n\n Returns\n -------\n T : orthopoly1d\n Chebyshev polynomial of the first kind.\n\n Notes\n -----\n The polynomials :math:`T_n` are orthogonal over :math:`[-1, 1]`\n with weight function :math:`(1 - x^2)^{-1/2}`.\n\n See Also\n --------\n chebyu : Chebyshev polynomial of the second kind.\n\n \"\"\"\n if n < 0:\n raise ValueError(\"n must be nonnegative.\")\n\n wfunc = lambda x: 1.0 / sqrt(1 - x * x)\n if n == 0:\n return orthopoly1d([], [], pi, 1.0, wfunc, (-1, 1), monic,\n lambda x: eval_chebyt(n, x))\n n1 = n\n x, w, mu = roots_chebyt(n1, mu=True)\n hn = pi / 2\n kn = 2**(n - 1)\n p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic,\n lambda x: eval_chebyt(n, x))\n return p\n\n# Chebyshev of the second kind\n# U_n(x) = (n+1)! sqrt(pi) / (2*_gam(n+3./2)) * P^(1/2,1/2)_n(x)\n\n\ndef roots_chebyu(n, mu=False):\n r\"\"\"Gauss-Chebyshev (second kind) quadrature.\n\n Computes the sample points and weights for Gauss-Chebyshev quadrature.\n The sample points are the roots of the n-th degree Chebyshev polynomial of\n the second kind, :math:`U_n(x)`. These sample points and weights correctly\n integrate polynomials of degree :math:`2n - 1` or less over the interval\n :math:`[-1, 1]` with weight function :math:`f(x) = \\sqrt{1 - x^2}`.\n\n Parameters\n ----------\n n : int\n quadrature order\n mu : bool, optional\n If True, return the sum of the weights, optional.\n\n Returns\n -------\n x : ndarray\n Sample points\n w : ndarray\n Weights\n mu : float\n Sum of the weights\n\n See Also\n --------\n scipy.integrate.quadrature\n scipy.integrate.fixed_quad\n \"\"\"\n m = int(n)\n if n < 1 or n != m:\n raise ValueError('n must be a positive integer.')\n t = np.arange(m, 0, -1) * pi / (m + 1)\n x = np.cos(t)\n w = pi * np.sin(t)**2 / (m + 1)\n if mu:\n return x, w, pi / 2\n else:\n return x, w\n\n\ndef chebyu(n, monic=False):\n r\"\"\"Chebyshev polynomial of the second kind.\n\n Defined to be the solution of\n\n .. math::\n (1 - x^2)\\frac{d^2}{dx^2}U_n - 3x\\frac{d}{dx}U_n\n + n(n + 2)U_n = 0;\n\n :math:`U_n` is a polynomial of degree :math:`n`.\n\n Parameters\n ----------\n n : int\n Degree of the polynomial.\n monic : bool, optional\n If `True`, scale the leading coefficient to be 1. Default is\n `False`.\n\n Returns\n -------\n U : orthopoly1d\n Chebyshev polynomial of the second kind.\n\n Notes\n -----\n The polynomials :math:`U_n` are orthogonal over :math:`[-1, 1]`\n with weight function :math:`(1 - x^2)^{1/2}`.\n\n See Also\n --------\n chebyt : Chebyshev polynomial of the first kind.\n\n \"\"\"\n base = jacobi(n, 0.5, 0.5, monic=monic)\n if monic:\n return base\n factor = sqrt(pi) / 2.0 * _gam(n + 2) / _gam(n + 1.5)\n base._scale(factor)\n return base\n\n# Chebyshev of the first kind C_n(x)\n\n\ndef roots_chebyc(n, mu=False):\n r\"\"\"Gauss-Chebyshev (first kind) quadrature.\n\n Computes the sample points and weights for Gauss-Chebyshev quadrature.\n The sample points are the roots of the n-th degree Chebyshev polynomial of\n the first kind, :math:`C_n(x)`. These sample points and weights correctly\n integrate polynomials of degree :math:`2n - 1` or less over the interval\n :math:`[-2, 2]` with weight function :math:`f(x) = 1/\\sqrt{1 - (x/2)^2}`.\n\n Parameters\n ----------\n n : int\n quadrature order\n mu : bool, optional\n If True, return the sum of the weights, optional.\n\n Returns\n -------\n x : ndarray\n Sample points\n w : ndarray\n Weights\n mu : float\n Sum of the weights\n\n See Also\n --------\n scipy.integrate.quadrature\n scipy.integrate.fixed_quad\n \"\"\"\n x, w, m = roots_chebyt(n, True)\n x *= 2\n w *= 2\n m *= 2\n if mu:\n return x, w, m\n else:\n return x, w\n\n\ndef chebyc(n, monic=False):\n r\"\"\"Chebyshev polynomial of the first kind on :math:`[-2, 2]`.\n\n Defined as :math:`C_n(x) = 2T_n(x/2)`, where :math:`T_n` is the\n nth Chebychev polynomial of the first kind.\n\n Parameters\n ----------\n n : int\n Degree of the polynomial.\n monic : bool, optional\n If `True`, scale the leading coefficient to be 1. Default is\n `False`.\n\n Returns\n -------\n C : orthopoly1d\n Chebyshev polynomial of the first kind on :math:`[-2, 2]`.\n\n Notes\n -----\n The polynomials :math:`C_n(x)` are orthogonal over :math:`[-2, 2]`\n with weight function :math:`1/\\sqrt{1 - (x/2)^2}`.\n\n See Also\n --------\n chebyt : Chebyshev polynomial of the first kind.\n\n References\n ----------\n .. [1] Abramowitz and Stegun, \"Handbook of Mathematical Functions\"\n Section 22. National Bureau of Standards, 1972.\n\n \"\"\"\n if n < 0:\n raise ValueError(\"n must be nonnegative.\")\n\n if n == 0:\n n1 = n + 1\n else:\n n1 = n\n x, w, mu0 = roots_chebyc(n1, mu=True)\n if n == 0:\n x, w = [], []\n hn = 4 * pi * ((n == 0) + 1)\n kn = 1.0\n p = orthopoly1d(x, w, hn, kn,\n wfunc=lambda x: 1.0 / sqrt(1 - x * x / 4.0),\n limits=(-2, 2), monic=monic)\n if not monic:\n p._scale(2.0 / p(2))\n p.__dict__['_eval_func'] = lambda x: eval_chebyc(n, x)\n return p\n\n# Chebyshev of the second kind S_n(x)\n\n\ndef roots_chebys(n, mu=False):\n r\"\"\"Gauss-Chebyshev (second kind) quadrature.\n\n Computes the sample points and weights for Gauss-Chebyshev quadrature.\n The sample points are the roots of the n-th degree Chebyshev polynomial of\n the second kind, :math:`S_n(x)`. These sample points and weights correctly\n integrate polynomials of degree :math:`2n - 1` or less over the interval\n :math:`[-2, 2]` with weight function :math:`f(x) = \\sqrt{1 - (x/2)^2}`.\n\n Parameters\n ----------\n n : int\n quadrature order\n mu : bool, optional\n If True, return the sum of the weights, optional.\n\n Returns\n -------\n x : ndarray\n Sample points\n w : ndarray\n Weights\n mu : float\n Sum of the weights\n\n See Also\n --------\n scipy.integrate.quadrature\n scipy.integrate.fixed_quad\n \"\"\"\n x, w, m = roots_chebyu(n, True)\n x *= 2\n w *= 2\n m *= 2\n if mu:\n return x, w, m\n else:\n return x, w\n\n\ndef chebys(n, monic=False):\n r\"\"\"Chebyshev polynomial of the second kind on :math:`[-2, 2]`.\n\n Defined as :math:`S_n(x) = U_n(x/2)` where :math:`U_n` is the\n nth Chebychev polynomial of the second kind.\n\n Parameters\n ----------\n n : int\n Degree of the polynomial.\n monic : bool, optional\n If `True`, scale the leading coefficient to be 1. Default is\n `False`.\n\n Returns\n -------\n S : orthopoly1d\n Chebyshev polynomial of the second kind on :math:`[-2, 2]`.\n\n Notes\n -----\n The polynomials :math:`S_n(x)` are orthogonal over :math:`[-2, 2]`\n with weight function :math:`\\sqrt{1 - (x/2)}^2`.\n\n See Also\n --------\n chebyu : Chebyshev polynomial of the second kind\n\n References\n ----------\n .. [1] Abramowitz and Stegun, \"Handbook of Mathematical Functions\"\n Section 22. National Bureau of Standards, 1972.\n\n \"\"\"\n if n < 0:\n raise ValueError(\"n must be nonnegative.\")\n\n if n == 0:\n n1 = n + 1\n else:\n n1 = n\n x, w, mu0 = roots_chebys(n1, mu=True)\n if n == 0:\n x, w = [], []\n hn = pi\n kn = 1.0\n p = orthopoly1d(x, w, hn, kn,\n wfunc=lambda x: sqrt(1 - x * x / 4.0),\n limits=(-2, 2), monic=monic)\n if not monic:\n factor = (n + 1.0) / p(2)\n p._scale(factor)\n p.__dict__['_eval_func'] = lambda x: eval_chebys(n, x)\n return p\n\n# Shifted Chebyshev of the first kind T^*_n(x)\n\n\ndef roots_sh_chebyt(n, mu=False):\n r\"\"\"Gauss-Chebyshev (first kind, shifted) quadrature.\n\n Computes the sample points and weights for Gauss-Chebyshev quadrature.\n The sample points are the roots of the n-th degree shifted Chebyshev\n polynomial of the first kind, :math:`T_n(x)`. These sample points and\n weights correctly integrate polynomials of degree :math:`2n - 1` or less\n over the interval :math:`[0, 1]` with weight function\n :math:`f(x) = 1/\\sqrt{x - x^2}`.\n\n Parameters\n ----------\n n : int\n quadrature order\n mu : bool, optional\n If True, return the sum of the weights, optional.\n\n Returns\n -------\n x : ndarray\n Sample points\n w : ndarray\n Weights\n mu : float\n Sum of the weights\n\n See Also\n --------\n scipy.integrate.quadrature\n scipy.integrate.fixed_quad\n \"\"\"\n xw = roots_chebyt(n, mu)\n return ((xw[0] + 1) / 2,) + xw[1:]\n\n\ndef sh_chebyt(n, monic=False):\n r\"\"\"Shifted Chebyshev polynomial of the first kind.\n\n Defined as :math:`T^*_n(x) = T_n(2x - 1)` for :math:`T_n` the nth\n Chebyshev polynomial of the first kind.\n\n Parameters\n ----------\n n : int\n Degree of the polynomial.\n monic : bool, optional\n If `True`, scale the leading coefficient to be 1. Default is\n `False`.\n\n Returns\n -------\n T : orthopoly1d\n Shifted Chebyshev polynomial of the first kind.\n\n Notes\n -----\n The polynomials :math:`T^*_n` are orthogonal over :math:`[0, 1]`\n with weight function :math:`(x - x^2)^{-1/2}`.\n\n \"\"\"\n base = sh_jacobi(n, 0.0, 0.5, monic=monic)\n if monic:\n return base\n if n > 0:\n factor = 4**n / 2.0\n else:\n factor = 1.0\n base._scale(factor)\n return base\n\n\n# Shifted Chebyshev of the second kind U^*_n(x)\ndef roots_sh_chebyu(n, mu=False):\n r\"\"\"Gauss-Chebyshev (second kind, shifted) quadrature.\n\n Computes the sample points and weights for Gauss-Chebyshev quadrature.\n The sample points are the roots of the n-th degree shifted Chebyshev\n polynomial of the second kind, :math:`U_n(x)`. These sample points and\n weights correctly integrate polynomials of degree :math:`2n - 1` or less\n over the interval :math:`[0, 1]` with weight function\n :math:`f(x) = \\sqrt{x - x^2}`.\n\n Parameters\n ----------\n n : int\n quadrature order\n mu : bool, optional\n If True, return the sum of the weights, optional.\n\n Returns\n -------\n x : ndarray\n Sample points\n w : ndarray\n Weights\n mu : float\n Sum of the weights\n\n See Also\n --------\n scipy.integrate.quadrature\n scipy.integrate.fixed_quad\n \"\"\"\n x, w, m = roots_chebyu(n, True)\n x = (x + 1) / 2\n m_us = cephes.beta(1.5, 1.5)\n w *= m_us / m\n if mu:\n return x, w, m_us\n else:\n return x, w\n\n\ndef sh_chebyu(n, monic=False):\n r\"\"\"Shifted Chebyshev polynomial of the second kind.\n\n Defined as :math:`U^*_n(x) = U_n(2x - 1)` for :math:`U_n` the nth\n Chebyshev polynomial of the second kind.\n\n Parameters\n ----------\n n : int\n Degree of the polynomial.\n monic : bool, optional\n If `True`, scale the leading coefficient to be 1. Default is\n `False`.\n\n Returns\n -------\n U : orthopoly1d\n Shifted Chebyshev polynomial of the second kind.\n\n Notes\n -----\n The polynomials :math:`U^*_n` are orthogonal over :math:`[0, 1]`\n with weight function :math:`(x - x^2)^{1/2}`.\n\n \"\"\"\n base = sh_jacobi(n, 2.0, 1.5, monic=monic)\n if monic:\n return base\n factor = 4**n\n base._scale(factor)\n return base\n\n# Legendre\n\n\ndef roots_legendre(n, mu=False):\n r\"\"\"Gauss-Legendre quadrature.\n\n Computes the sample points and weights for Gauss-Legendre quadrature.\n The sample points are the roots of the n-th degree Legendre polynomial\n :math:`P_n(x)`. These sample points and weights correctly integrate\n polynomials of degree :math:`2n - 1` or less over the interval\n :math:`[-1, 1]` with weight function :math:`f(x) = 1.0`.\n\n Parameters\n ----------\n n : int\n quadrature order\n mu : bool, optional\n If True, return the sum of the weights, optional.\n\n Returns\n -------\n x : ndarray\n Sample points\n w : ndarray\n Weights\n mu : float\n Sum of the weights\n\n See Also\n --------\n scipy.integrate.quadrature\n scipy.integrate.fixed_quad\n numpy.polynomial.legendre.leggauss\n \"\"\"\n m = int(n)\n if n < 1 or n != m:\n raise ValueError(\"n must be a positive integer.\")\n\n mu0 = 2.0\n an_func = lambda k: 0.0 * k\n bn_func = lambda k: k * np.sqrt(1.0 / (4 * k * k - 1))\n f = cephes.eval_legendre\n df = lambda n, x: (-n*x*cephes.eval_legendre(n, x)\n + n*cephes.eval_legendre(n-1, x))/(1-x**2)\n return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)\n\n\ndef legendre(n, monic=False):\n r\"\"\"Legendre polynomial.\n\n Defined to be the solution of\n\n .. math::\n \\frac{d}{dx}\\left[(1 - x^2)\\frac{d}{dx}P_n(x)\\right]\n + n(n + 1)P_n(x) = 0;\n\n :math:`P_n(x)` is a polynomial of degree :math:`n`.\n\n Parameters\n ----------\n n : int\n Degree of the polynomial.\n monic : bool, optional\n If `True`, scale the leading coefficient to be 1. Default is\n `False`.\n\n Returns\n -------\n P : orthopoly1d\n Legendre polynomial.\n\n Notes\n -----\n The polynomials :math:`P_n` are orthogonal over :math:`[-1, 1]`\n with weight function 1.\n\n Examples\n --------\n Generate the 3rd-order Legendre polynomial 1/2*(5x^3 + 0x^2 - 3x + 0):\n\n >>> from scipy.special import legendre\n >>> legendre(3)\n poly1d([ 2.5, 0. , -1.5, 0. ])\n\n \"\"\"\n if n < 0:\n raise ValueError(\"n must be nonnegative.\")\n\n if n == 0:\n n1 = n + 1\n else:\n n1 = n\n x, w, mu0 = roots_legendre(n1, mu=True)\n if n == 0:\n x, w = [], []\n hn = 2.0 / (2 * n + 1)\n kn = _gam(2 * n + 1) / _gam(n + 1)**2 / 2.0**n\n p = orthopoly1d(x, w, hn, kn, wfunc=lambda x: 1.0, limits=(-1, 1),\n monic=monic, eval_func=lambda x: eval_legendre(n, x))\n return p\n\n# Shifted Legendre P^*_n(x)\n\n\ndef roots_sh_legendre(n, mu=False):\n r\"\"\"Gauss-Legendre (shifted) quadrature.\n\n Computes the sample points and weights for Gauss-Legendre quadrature.\n The sample points are the roots of the n-th degree shifted Legendre\n polynomial :math:`P^*_n(x)`. These sample points and weights correctly\n integrate polynomials of degree :math:`2n - 1` or less over the interval\n :math:`[0, 1]` with weight function :math:`f(x) = 1.0`.\n\n Parameters\n ----------\n n : int\n quadrature order\n mu : bool, optional\n If True, return the sum of the weights, optional.\n\n Returns\n -------\n x : ndarray\n Sample points\n w : ndarray\n Weights\n mu : float\n Sum of the weights\n\n See Also\n --------\n scipy.integrate.quadrature\n scipy.integrate.fixed_quad\n \"\"\"\n x, w = roots_legendre(n)\n x = (x + 1) / 2\n w /= 2\n if mu:\n return x, w, 1.0\n else:\n return x, w\n\ndef sh_legendre(n, monic=False):\n r\"\"\"Shifted Legendre polynomial.\n\n Defined as :math:`P^*_n(x) = P_n(2x - 1)` for :math:`P_n` the nth\n Legendre polynomial.\n\n Parameters\n ----------\n n : int\n Degree of the polynomial.\n monic : bool, optional\n If `True`, scale the leading coefficient to be 1. Default is\n `False`.\n\n Returns\n -------\n P : orthopoly1d\n Shifted Legendre polynomial.\n\n Notes\n -----\n The polynomials :math:`P^*_n` are orthogonal over :math:`[0, 1]`\n with weight function 1.\n\n \"\"\"\n if n < 0:\n raise ValueError(\"n must be nonnegative.\")\n\n wfunc = lambda x: 0.0 * x + 1.0\n if n == 0:\n return orthopoly1d([], [], 1.0, 1.0, wfunc, (0, 1), monic,\n lambda x: eval_sh_legendre(n, x))\n x, w, mu0 = roots_sh_legendre(n, mu=True)\n hn = 1.0 / (2 * n + 1.0)\n kn = _gam(2 * n + 1) / _gam(n + 1)**2\n p = orthopoly1d(x, w, hn, kn, wfunc, limits=(0, 1), monic=monic,\n eval_func=lambda x: eval_sh_legendre(n, x))\n return p\n\n\n# -----------------------------------------------------------------------------\n# Code for backwards compatibility\n# -----------------------------------------------------------------------------\n\n# Import functions in case someone is still calling the orthogonal\n# module directly. (They shouldn't be; it's not in the public API).\npoch = cephes.poch\n\nfrom ._ufuncs import (binom, eval_jacobi, eval_sh_jacobi, eval_gegenbauer,\n eval_chebyt, eval_chebyu, eval_chebys, eval_chebyc,\n eval_sh_chebyt, eval_sh_chebyu, eval_legendre,\n eval_sh_legendre, eval_genlaguerre, eval_laguerre,\n eval_hermite, eval_hermitenorm)\n\n# Make the old root function names an alias for the new ones\n_modattrs = globals()\nfor newfun, oldfun in _rootfuns_map.items():\n _modattrs[oldfun] = _modattrs[newfun]\n __all__.append(oldfun)\n",
"\"\"\"\nThis module provides a large set of colormaps, functions for\nregistering new colormaps and for getting a colormap by name,\nand a mixin class for adding color mapping functionality.\n\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\n\nimport os\nimport numpy as np\nfrom numpy import ma\nimport matplotlib as mpl\nimport matplotlib.colors as colors\nimport matplotlib.cbook as cbook\nfrom matplotlib._cm import datad, _deprecation_datad\nfrom matplotlib._cm import cubehelix\nfrom matplotlib._cm_listed import cmaps as cmaps_listed\n\ncmap_d = _deprecation_datad()\n\n# reverse all the colormaps.\n# reversed colormaps have '_r' appended to the name.\n\n\ndef _reverser(f):\n def freversed(x):\n return f(1 - x)\n return freversed\n\n\ndef revcmap(data):\n \"\"\"Can only handle specification *data* in dictionary format.\"\"\"\n data_r = {}\n for key, val in six.iteritems(data):\n if six.callable(val):\n valnew = _reverser(val)\n # This doesn't work: lambda x: val(1-x)\n # The same \"val\" (the first one) is used\n # each time, so the colors are identical\n # and the result is shades of gray.\n else:\n # Flip x and exchange the y values facing x = 0 and x = 1.\n valnew = [(1.0 - x, y1, y0) for x, y0, y1 in reversed(val)]\n data_r[key] = valnew\n return data_r\n\n\ndef _reverse_cmap_spec(spec):\n \"\"\"Reverses cmap specification *spec*, can handle both dict and tuple\n type specs.\"\"\"\n\n if 'listed' in spec:\n return {'listed': spec['listed'][::-1]}\n\n if 'red' in spec:\n return revcmap(spec)\n else:\n revspec = list(reversed(spec))\n if len(revspec[0]) == 2: # e.g., (1, (1.0, 0.0, 1.0))\n revspec = [(1.0 - a, b) for a, b in revspec]\n return revspec\n\n\ndef _generate_cmap(name, lutsize):\n \"\"\"Generates the requested cmap from its *name*. The lut size is\n *lutsize*.\"\"\"\n\n # Use superclass method to avoid deprecation warnings during initial load.\n spec = dict.__getitem__(datad, name)\n\n # Generate the colormap object.\n if 'red' in spec:\n return colors.LinearSegmentedColormap(name, spec, lutsize)\n elif 'listed' in spec:\n return colors.ListedColormap(spec['listed'], name)\n else:\n return colors.LinearSegmentedColormap.from_list(name, spec, lutsize)\n\nLUTSIZE = mpl.rcParams['image.lut']\n\n# Generate the reversed specifications ...\nfor cmapname in list(six.iterkeys(datad)):\n # Use superclass method to avoid deprecation warnings during initial load.\n spec = dict.__getitem__(datad, cmapname)\n spec_reversed = _reverse_cmap_spec(spec)\n datad[cmapname + '_r'] = spec_reversed\n\n# Precache the cmaps with ``lutsize = LUTSIZE`` ...\n\n# Use datad.keys() to also add the reversed ones added in the section\n# above:\nfor cmapname in six.iterkeys(datad):\n cmap_d[cmapname] = _generate_cmap(cmapname, LUTSIZE)\n\ncmap_d.update(cmaps_listed)\n\nlocals().update(cmap_d)\n\n\n# Continue with definitions ...\n\n\ndef register_cmap(name=None, cmap=None, data=None, lut=None):\n \"\"\"\n Add a colormap to the set recognized by :func:`get_cmap`.\n\n It can be used in two ways::\n\n register_cmap(name='swirly', cmap=swirly_cmap)\n\n register_cmap(name='choppy', data=choppydata, lut=128)\n\n In the first case, *cmap* must be a :class:`matplotlib.colors.Colormap`\n instance. The *name* is optional; if absent, the name will\n be the :attr:`~matplotlib.colors.Colormap.name` attribute of the *cmap*.\n\n In the second case, the three arguments are passed to\n the :class:`~matplotlib.colors.LinearSegmentedColormap` initializer,\n and the resulting colormap is registered.\n\n \"\"\"\n if name is None:\n try:\n name = cmap.name\n except AttributeError:\n raise ValueError(\"Arguments must include a name or a Colormap\")\n\n if not cbook.is_string_like(name):\n raise ValueError(\"Colormap name must be a string\")\n\n if isinstance(cmap, colors.Colormap):\n cmap_d[name] = cmap\n return\n\n # For the remainder, let exceptions propagate.\n if lut is None:\n lut = mpl.rcParams['image.lut']\n cmap = colors.LinearSegmentedColormap(name, data, lut)\n cmap_d[name] = cmap\n\n\ndef get_cmap(name=None, lut=None):\n \"\"\"\n Get a colormap instance, defaulting to rc values if *name* is None.\n\n Colormaps added with :func:`register_cmap` take precedence over\n built-in colormaps.\n\n If *name* is a :class:`matplotlib.colors.Colormap` instance, it will be\n returned.\n\n If *lut* is not None it must be an integer giving the number of\n entries desired in the lookup table, and *name* must be a standard\n mpl colormap name.\n \"\"\"\n if name is None:\n name = mpl.rcParams['image.cmap']\n\n if isinstance(name, colors.Colormap):\n return name\n\n if name in cmap_d:\n if lut is None:\n return cmap_d[name]\n else:\n return cmap_d[name]._resample(lut)\n else:\n raise ValueError(\n \"Colormap %s is not recognized. Possible values are: %s\"\n % (name, ', '.join(sorted(cmap_d.keys()))))\n\n\nclass ScalarMappable(object):\n \"\"\"\n This is a mixin class to support scalar data to RGBA mapping.\n The ScalarMappable makes use of data normalization before returning\n RGBA colors from the given colormap.\n\n \"\"\"\n def __init__(self, norm=None, cmap=None):\n r\"\"\"\n\n Parameters\n ----------\n norm : :class:`matplotlib.colors.Normalize` instance\n The normalizing object which scales data, typically into the\n interval ``[0, 1]``.\n If *None*, *norm* defaults to a *colors.Normalize* object which\n initializes its scaling based on the first data processed.\n cmap : str or :class:`~matplotlib.colors.Colormap` instance\n The colormap used to map normalized data values to RGBA colors.\n \"\"\"\n\n self.callbacksSM = cbook.CallbackRegistry()\n\n if cmap is None:\n cmap = get_cmap()\n if norm is None:\n norm = colors.Normalize()\n\n self._A = None\n #: The Normalization instance of this ScalarMappable.\n self.norm = norm\n #: The Colormap instance of this ScalarMappable.\n self.cmap = get_cmap(cmap)\n #: The last colorbar associated with this ScalarMappable. May be None.\n self.colorbar = None\n self.update_dict = {'array': False}\n\n def to_rgba(self, x, alpha=None, bytes=False, norm=True):\n \"\"\"\n Return a normalized rgba array corresponding to *x*.\n\n In the normal case, *x* is a 1-D or 2-D sequence of scalars, and\n the corresponding ndarray of rgba values will be returned,\n based on the norm and colormap set for this ScalarMappable.\n\n There is one special case, for handling images that are already\n rgb or rgba, such as might have been read from an image file.\n If *x* is an ndarray with 3 dimensions,\n and the last dimension is either 3 or 4, then it will be\n treated as an rgb or rgba array, and no mapping will be done.\n If the last dimension is 3, the *alpha* kwarg (defaulting to 1)\n will be used to fill in the transparency. If the last dimension\n is 4, the *alpha* kwarg is ignored; it does not\n replace the pre-existing alpha. A ValueError will be raised\n if the third dimension is other than 3 or 4.\n\n In either case, if *bytes* is *False* (default), the rgba\n array will be floats in the 0-1 range; if it is *True*,\n the returned rgba array will be uint8 in the 0 to 255 range.\n\n If norm is False, no normalization of the input data is\n performed, and it is assumed to already be in the range (0-1).\n\n Note: this method assumes the input is well-behaved; it does\n not check for anomalies such as *x* being a masked rgba\n array, or being an integer type other than uint8, or being\n a floating point rgba array with values outside the 0-1 range.\n \"\"\"\n # First check for special case, image input:\n try:\n if x.ndim == 3:\n if x.shape[2] == 3:\n if alpha is None:\n alpha = 1\n if x.dtype == np.uint8:\n alpha = np.uint8(alpha * 255)\n m, n = x.shape[:2]\n xx = np.empty(shape=(m, n, 4), dtype=x.dtype)\n xx[:, :, :3] = x\n xx[:, :, 3] = alpha\n elif x.shape[2] == 4:\n xx = x\n else:\n raise ValueError(\"third dimension must be 3 or 4\")\n if bytes and xx.dtype != np.uint8:\n xx = (xx * 255).astype(np.uint8)\n if not bytes and xx.dtype == np.uint8:\n xx = xx.astype(float) / 255\n return xx\n except AttributeError:\n # e.g., x is not an ndarray; so try mapping it\n pass\n\n # This is the normal case, mapping a scalar array:\n x = ma.asarray(x)\n if norm:\n x = self.norm(x)\n rgba = self.cmap(x, alpha=alpha, bytes=bytes)\n return rgba\n\n def set_array(self, A):\n 'Set the image array from numpy array *A*'\n self._A = A\n self.update_dict['array'] = True\n\n def get_array(self):\n 'Return the array'\n return self._A\n\n def get_cmap(self):\n 'return the colormap'\n return self.cmap\n\n def get_clim(self):\n 'return the min, max of the color limits for image scaling'\n return self.norm.vmin, self.norm.vmax\n\n def set_clim(self, vmin=None, vmax=None):\n \"\"\"\n set the norm limits for image scaling; if *vmin* is a length2\n sequence, interpret it as ``(vmin, vmax)`` which is used to\n support setp\n\n ACCEPTS: a length 2 sequence of floats\n \"\"\"\n if (vmin is not None and vmax is None and\n cbook.iterable(vmin) and len(vmin) == 2):\n vmin, vmax = vmin\n\n if vmin is not None:\n self.norm.vmin = vmin\n if vmax is not None:\n self.norm.vmax = vmax\n self.changed()\n\n def set_cmap(self, cmap):\n \"\"\"\n set the colormap for luminance data\n\n ACCEPTS: a colormap or registered colormap name\n \"\"\"\n cmap = get_cmap(cmap)\n self.cmap = cmap\n self.changed()\n\n def set_norm(self, norm):\n 'set the normalization instance'\n if norm is None:\n norm = colors.Normalize()\n self.norm = norm\n self.changed()\n\n def autoscale(self):\n \"\"\"\n Autoscale the scalar limits on the norm instance using the\n current array\n \"\"\"\n if self._A is None:\n raise TypeError('You must first set_array for mappable')\n self.norm.autoscale(self._A)\n self.changed()\n\n def autoscale_None(self):\n \"\"\"\n Autoscale the scalar limits on the norm instance using the\n current array, changing only limits that are None\n \"\"\"\n if self._A is None:\n raise TypeError('You must first set_array for mappable')\n self.norm.autoscale_None(self._A)\n self.changed()\n\n def add_checker(self, checker):\n \"\"\"\n Add an entry to a dictionary of boolean flags\n that are set to True when the mappable is changed.\n \"\"\"\n self.update_dict[checker] = False\n\n def check_update(self, checker):\n \"\"\"\n If mappable has changed since the last check,\n return True; else return False\n \"\"\"\n if self.update_dict[checker]:\n self.update_dict[checker] = False\n return True\n return False\n\n def changed(self):\n \"\"\"\n Call this whenever the mappable is changed to notify all the\n callbackSM listeners to the 'changed' signal\n \"\"\"\n self.callbacksSM.process('changed', self)\n\n for key in self.update_dict:\n self.update_dict[key] = True\n",
"\"\"\"Lite version of scipy.linalg.\n\nNotes\n-----\nThis module is a lite version of the linalg.py module in SciPy which\ncontains high-level Python interface to the LAPACK library. The lite\nversion only accesses the following LAPACK functions: dgesv, zgesv,\ndgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,\nzgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\n\n__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',\n 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',\n 'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',\n 'LinAlgError', 'multi_dot']\n\nimport warnings\n\nfrom numpy.core import (\n array, asarray, zeros, empty, empty_like, transpose, intc, single, double,\n csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,\n add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,\n finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,\n broadcast, atleast_2d, intp, asanyarray, isscalar, object_\n )\nfrom numpy.lib import triu, asfarray\nfrom numpy.linalg import lapack_lite, _umath_linalg\nfrom numpy.matrixlib.defmatrix import matrix_power\nfrom numpy.compat import asbytes\n\n# For Python2/3 compatibility\n_N = asbytes('N')\n_V = asbytes('V')\n_A = asbytes('A')\n_S = asbytes('S')\n_L = asbytes('L')\n\nfortran_int = intc\n\n# Error object\nclass LinAlgError(Exception):\n \"\"\"\n Generic Python-exception-derived object raised by linalg functions.\n\n General purpose exception class, derived from Python's exception.Exception\n class, programmatically raised in linalg functions when a Linear\n Algebra-related condition would prevent further correct execution of the\n function.\n\n Parameters\n ----------\n None\n\n Examples\n --------\n >>> from numpy import linalg as LA\n >>> LA.inv(np.zeros((2,2)))\n Traceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"...linalg.py\", line 350,\n in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))\n File \"...linalg.py\", line 249,\n in solve\n raise LinAlgError('Singular matrix')\n numpy.linalg.LinAlgError: Singular matrix\n\n \"\"\"\n pass\n\n# Dealing with errors in _umath_linalg\n\n_linalg_error_extobj = None\n\ndef _determine_error_states():\n global _linalg_error_extobj\n errobj = geterrobj()\n bufsize = errobj[0]\n\n with errstate(invalid='call', over='ignore',\n divide='ignore', under='ignore'):\n invalid_call_errmask = geterrobj()[1]\n\n _linalg_error_extobj = [bufsize, invalid_call_errmask, None]\n\n_determine_error_states()\n\ndef _raise_linalgerror_singular(err, flag):\n raise LinAlgError(\"Singular matrix\")\n\ndef _raise_linalgerror_nonposdef(err, flag):\n raise LinAlgError(\"Matrix is not positive definite\")\n\ndef _raise_linalgerror_eigenvalues_nonconvergence(err, flag):\n raise LinAlgError(\"Eigenvalues did not converge\")\n\ndef _raise_linalgerror_svd_nonconvergence(err, flag):\n raise LinAlgError(\"SVD did not converge\")\n\ndef get_linalg_error_extobj(callback):\n extobj = list(_linalg_error_extobj)\n extobj[2] = callback\n return extobj\n\ndef _makearray(a):\n new = asarray(a)\n wrap = getattr(a, \"__array_prepare__\", new.__array_wrap__)\n return new, wrap\n\ndef isComplexType(t):\n return issubclass(t, complexfloating)\n\n_real_types_map = {single : single,\n double : double,\n csingle : single,\n cdouble : double}\n\n_complex_types_map = {single : csingle,\n double : cdouble,\n csingle : csingle,\n cdouble : cdouble}\n\ndef _realType(t, default=double):\n return _real_types_map.get(t, default)\n\ndef _complexType(t, default=cdouble):\n return _complex_types_map.get(t, default)\n\ndef _linalgRealType(t):\n \"\"\"Cast the type t to either double or cdouble.\"\"\"\n return double\n\n_complex_types_map = {single : csingle,\n double : cdouble,\n csingle : csingle,\n cdouble : cdouble}\n\ndef _commonType(*arrays):\n # in lite version, use higher precision (always double or cdouble)\n result_type = single\n is_complex = False\n for a in arrays:\n if issubclass(a.dtype.type, inexact):\n if isComplexType(a.dtype.type):\n is_complex = True\n rt = _realType(a.dtype.type, default=None)\n if rt is None:\n # unsupported inexact scalar\n raise TypeError(\"array type %s is unsupported in linalg\" %\n (a.dtype.name,))\n else:\n rt = double\n if rt is double:\n result_type = double\n if is_complex:\n t = cdouble\n result_type = _complex_types_map[result_type]\n else:\n t = double\n return t, result_type\n\n\n# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).\n\n_fastCT = fastCopyAndTranspose\n\ndef _to_native_byte_order(*arrays):\n ret = []\n for arr in arrays:\n if arr.dtype.byteorder not in ('=', '|'):\n ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))\n else:\n ret.append(arr)\n if len(ret) == 1:\n return ret[0]\n else:\n return ret\n\ndef _fastCopyAndTranspose(type, *arrays):\n cast_arrays = ()\n for a in arrays:\n if a.dtype.type is type:\n cast_arrays = cast_arrays + (_fastCT(a),)\n else:\n cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)\n if len(cast_arrays) == 1:\n return cast_arrays[0]\n else:\n return cast_arrays\n\ndef _assertRank2(*arrays):\n for a in arrays:\n if len(a.shape) != 2:\n raise LinAlgError('%d-dimensional array given. Array must be '\n 'two-dimensional' % len(a.shape))\n\ndef _assertRankAtLeast2(*arrays):\n for a in arrays:\n if len(a.shape) < 2:\n raise LinAlgError('%d-dimensional array given. Array must be '\n 'at least two-dimensional' % len(a.shape))\n\ndef _assertSquareness(*arrays):\n for a in arrays:\n if max(a.shape) != min(a.shape):\n raise LinAlgError('Array must be square')\n\ndef _assertNdSquareness(*arrays):\n for a in arrays:\n if max(a.shape[-2:]) != min(a.shape[-2:]):\n raise LinAlgError('Last 2 dimensions of the array must be square')\n\ndef _assertFinite(*arrays):\n for a in arrays:\n if not (isfinite(a).all()):\n raise LinAlgError(\"Array must not contain infs or NaNs\")\n\ndef _assertNoEmpty2d(*arrays):\n for a in arrays:\n if a.size == 0 and product(a.shape[-2:]) == 0:\n raise LinAlgError(\"Arrays cannot be empty\")\n\n\n# Linear equations\n\ndef tensorsolve(a, b, axes=None):\n \"\"\"\n Solve the tensor equation ``a x = b`` for x.\n\n It is assumed that all indices of `x` are summed over in the product,\n together with the rightmost indices of `a`, as is done in, for example,\n ``tensordot(a, x, axes=len(b.shape))``.\n\n Parameters\n ----------\n a : array_like\n Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals\n the shape of that sub-tensor of `a` consisting of the appropriate\n number of its rightmost indices, and must be such that\n ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be\n 'square').\n b : array_like\n Right-hand tensor, which can be of any shape.\n axes : tuple of ints, optional\n Axes in `a` to reorder to the right, before inversion.\n If None (default), no reordering is done.\n\n Returns\n -------\n x : ndarray, shape Q\n\n Raises\n ------\n LinAlgError\n If `a` is singular or not 'square' (in the above sense).\n\n See Also\n --------\n numpy.tensordot, tensorinv, numpy.einsum\n\n Examples\n --------\n >>> a = np.eye(2*3*4)\n >>> a.shape = (2*3, 4, 2, 3, 4)\n >>> b = np.random.randn(2*3, 4)\n >>> x = np.linalg.tensorsolve(a, b)\n >>> x.shape\n (2, 3, 4)\n >>> np.allclose(np.tensordot(a, x, axes=3), b)\n True\n\n \"\"\"\n a, wrap = _makearray(a)\n b = asarray(b)\n an = a.ndim\n\n if axes is not None:\n allaxes = list(range(0, an))\n for k in axes:\n allaxes.remove(k)\n allaxes.insert(an, k)\n a = a.transpose(allaxes)\n\n oldshape = a.shape[-(an-b.ndim):]\n prod = 1\n for k in oldshape:\n prod *= k\n\n a = a.reshape(-1, prod)\n b = b.ravel()\n res = wrap(solve(a, b))\n res.shape = oldshape\n return res\n\ndef solve(a, b):\n \"\"\"\n Solve a linear matrix equation, or system of linear scalar equations.\n\n Computes the \"exact\" solution, `x`, of the well-determined, i.e., full\n rank, linear matrix equation `ax = b`.\n\n Parameters\n ----------\n a : (..., M, M) array_like\n Coefficient matrix.\n b : {(..., M,), (..., M, K)}, array_like\n Ordinate or \"dependent variable\" values.\n\n Returns\n -------\n x : {(..., M,), (..., M, K)} ndarray\n Solution to the system a x = b. Returned shape is identical to `b`.\n\n Raises\n ------\n LinAlgError\n If `a` is singular or not square.\n\n Notes\n -----\n\n .. versionadded:: 1.8.0\n\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n The solutions are computed using LAPACK routine _gesv\n\n `a` must be square and of full-rank, i.e., all rows (or, equivalently,\n columns) must be linearly independent; if either is not true, use\n `lstsq` for the least-squares best \"solution\" of the\n system/equation.\n\n References\n ----------\n .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,\n FL, Academic Press, Inc., 1980, pg. 22.\n\n Examples\n --------\n Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:\n\n >>> a = np.array([[3,1], [1,2]])\n >>> b = np.array([9,8])\n >>> x = np.linalg.solve(a, b)\n >>> x\n array([ 2., 3.])\n\n Check that the solution is correct:\n\n >>> np.allclose(np.dot(a, x), b)\n True\n\n \"\"\"\n a, _ = _makearray(a)\n _assertRankAtLeast2(a)\n _assertNdSquareness(a)\n b, wrap = _makearray(b)\n t, result_t = _commonType(a, b)\n\n # We use the b = (..., M,) logic, only if the number of extra dimensions\n # match exactly\n if b.ndim == a.ndim - 1:\n if a.shape[-1] == 0 and b.shape[-1] == 0:\n # Legal, but the ufunc cannot handle the 0-sized inner dims\n # let the ufunc handle all wrong cases.\n a = a.reshape(a.shape[:-1])\n bc = broadcast(a, b)\n return wrap(empty(bc.shape, dtype=result_t))\n\n gufunc = _umath_linalg.solve1\n else:\n if b.size == 0:\n if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:\n a = a[:,:1].reshape(a.shape[:-1] + (1,))\n bc = broadcast(a, b)\n return wrap(empty(bc.shape, dtype=result_t))\n\n gufunc = _umath_linalg.solve\n\n signature = 'DD->D' if isComplexType(t) else 'dd->d'\n extobj = get_linalg_error_extobj(_raise_linalgerror_singular)\n r = gufunc(a, b, signature=signature, extobj=extobj)\n\n return wrap(r.astype(result_t, copy=False))\n\n\ndef tensorinv(a, ind=2):\n \"\"\"\n Compute the 'inverse' of an N-dimensional array.\n\n The result is an inverse for `a` relative to the tensordot operation\n ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,\n ``tensordot(tensorinv(a), a, ind)`` is the \"identity\" tensor for the\n tensordot operation.\n\n Parameters\n ----------\n a : array_like\n Tensor to 'invert'. Its shape must be 'square', i. e.,\n ``prod(a.shape[:ind]) == prod(a.shape[ind:])``.\n ind : int, optional\n Number of first indices that are involved in the inverse sum.\n Must be a positive integer, default is 2.\n\n Returns\n -------\n b : ndarray\n `a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.\n\n Raises\n ------\n LinAlgError\n If `a` is singular or not 'square' (in the above sense).\n\n See Also\n --------\n numpy.tensordot, tensorsolve\n\n Examples\n --------\n >>> a = np.eye(4*6)\n >>> a.shape = (4, 6, 8, 3)\n >>> ainv = np.linalg.tensorinv(a, ind=2)\n >>> ainv.shape\n (8, 3, 4, 6)\n >>> b = np.random.randn(4, 6)\n >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))\n True\n\n >>> a = np.eye(4*6)\n >>> a.shape = (24, 8, 3)\n >>> ainv = np.linalg.tensorinv(a, ind=1)\n >>> ainv.shape\n (8, 3, 24)\n >>> b = np.random.randn(24)\n >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))\n True\n\n \"\"\"\n a = asarray(a)\n oldshape = a.shape\n prod = 1\n if ind > 0:\n invshape = oldshape[ind:] + oldshape[:ind]\n for k in oldshape[ind:]:\n prod *= k\n else:\n raise ValueError(\"Invalid ind argument.\")\n a = a.reshape(prod, -1)\n ia = inv(a)\n return ia.reshape(*invshape)\n\n\n# Matrix inversion\n\ndef inv(a):\n \"\"\"\n Compute the (multiplicative) inverse of a matrix.\n\n Given a square matrix `a`, return the matrix `ainv` satisfying\n ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.\n\n Parameters\n ----------\n a : (..., M, M) array_like\n Matrix to be inverted.\n\n Returns\n -------\n ainv : (..., M, M) ndarray or matrix\n (Multiplicative) inverse of the matrix `a`.\n\n Raises\n ------\n LinAlgError\n If `a` is not square or inversion fails.\n\n Notes\n -----\n\n .. versionadded:: 1.8.0\n\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n Examples\n --------\n >>> from numpy.linalg import inv\n >>> a = np.array([[1., 2.], [3., 4.]])\n >>> ainv = inv(a)\n >>> np.allclose(np.dot(a, ainv), np.eye(2))\n True\n >>> np.allclose(np.dot(ainv, a), np.eye(2))\n True\n\n If a is a matrix object, then the return value is a matrix as well:\n\n >>> ainv = inv(np.matrix(a))\n >>> ainv\n matrix([[-2. , 1. ],\n [ 1.5, -0.5]])\n\n Inverses of several matrices can be computed at once:\n\n >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])\n >>> inv(a)\n array([[[-2. , 1. ],\n [ 1.5, -0.5]],\n [[-5. , 2. ],\n [ 3. , -1. ]]])\n\n \"\"\"\n a, wrap = _makearray(a)\n _assertRankAtLeast2(a)\n _assertNdSquareness(a)\n t, result_t = _commonType(a)\n\n if a.shape[-1] == 0:\n # The inner array is 0x0, the ufunc cannot handle this case\n return wrap(empty_like(a, dtype=result_t))\n\n signature = 'D->D' if isComplexType(t) else 'd->d'\n extobj = get_linalg_error_extobj(_raise_linalgerror_singular)\n ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)\n return wrap(ainv.astype(result_t, copy=False))\n\n\n# Cholesky decomposition\n\ndef cholesky(a):\n \"\"\"\n Cholesky decomposition.\n\n Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,\n where `L` is lower-triangular and .H is the conjugate transpose operator\n (which is the ordinary transpose if `a` is real-valued). `a` must be\n Hermitian (symmetric if real-valued) and positive-definite. Only `L` is\n actually returned.\n\n Parameters\n ----------\n a : (..., M, M) array_like\n Hermitian (symmetric if all elements are real), positive-definite\n input matrix.\n\n Returns\n -------\n L : (..., M, M) array_like\n Upper or lower-triangular Cholesky factor of `a`. Returns a\n matrix object if `a` is a matrix object.\n\n Raises\n ------\n LinAlgError\n If the decomposition fails, for example, if `a` is not\n positive-definite.\n\n Notes\n -----\n\n .. versionadded:: 1.8.0\n\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n The Cholesky decomposition is often used as a fast way of solving\n\n .. math:: A \\\\mathbf{x} = \\\\mathbf{b}\n\n (when `A` is both Hermitian/symmetric and positive-definite).\n\n First, we solve for :math:`\\\\mathbf{y}` in\n\n .. math:: L \\\\mathbf{y} = \\\\mathbf{b},\n\n and then for :math:`\\\\mathbf{x}` in\n\n .. math:: L.H \\\\mathbf{x} = \\\\mathbf{y}.\n\n Examples\n --------\n >>> A = np.array([[1,-2j],[2j,5]])\n >>> A\n array([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> L = np.linalg.cholesky(A)\n >>> L\n array([[ 1.+0.j, 0.+0.j],\n [ 0.+2.j, 1.+0.j]])\n >>> np.dot(L, L.T.conj()) # verify that L * L.H = A\n array([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?\n >>> np.linalg.cholesky(A) # an ndarray object is returned\n array([[ 1.+0.j, 0.+0.j],\n [ 0.+2.j, 1.+0.j]])\n >>> # But a matrix object is returned if A is a matrix object\n >>> LA.cholesky(np.matrix(A))\n matrix([[ 1.+0.j, 0.+0.j],\n [ 0.+2.j, 1.+0.j]])\n\n \"\"\"\n extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)\n gufunc = _umath_linalg.cholesky_lo\n a, wrap = _makearray(a)\n _assertRankAtLeast2(a)\n _assertNdSquareness(a)\n t, result_t = _commonType(a)\n signature = 'D->D' if isComplexType(t) else 'd->d'\n r = gufunc(a, signature=signature, extobj=extobj)\n return wrap(r.astype(result_t, copy=False))\n\n# QR decompostion\n\ndef qr(a, mode='reduced'):\n \"\"\"\n Compute the qr factorization of a matrix.\n\n Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is\n upper-triangular.\n\n Parameters\n ----------\n a : array_like, shape (M, N)\n Matrix to be factored.\n mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional\n If K = min(M, N), then\n\n 'reduced' : returns q, r with dimensions (M, K), (K, N) (default)\n 'complete' : returns q, r with dimensions (M, M), (M, N)\n 'r' : returns r only with dimensions (K, N)\n 'raw' : returns h, tau with dimensions (N, M), (K,)\n 'full' : alias of 'reduced', deprecated\n 'economic' : returns h from 'raw', deprecated.\n\n The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,\n see the notes for more information. The default is 'reduced' and to\n maintain backward compatibility with earlier versions of numpy both\n it and the old default 'full' can be omitted. Note that array h\n returned in 'raw' mode is transposed for calling Fortran. The\n 'economic' mode is deprecated. The modes 'full' and 'economic' may\n be passed using only the first letter for backwards compatibility,\n but all others must be spelled out. See the Notes for more\n explanation.\n\n\n Returns\n -------\n q : ndarray of float or complex, optional\n A matrix with orthonormal columns. When mode = 'complete' the\n result is an orthogonal/unitary matrix depending on whether or not\n a is real/complex. The determinant may be either +/- 1 in that\n case.\n r : ndarray of float or complex, optional\n The upper-triangular matrix.\n (h, tau) : ndarrays of np.double or np.cdouble, optional\n The array h contains the Householder reflectors that generate q\n along with r. The tau array contains scaling factors for the\n reflectors. In the deprecated 'economic' mode only h is returned.\n\n Raises\n ------\n LinAlgError\n If factoring fails.\n\n Notes\n -----\n This is an interface to the LAPACK routines dgeqrf, zgeqrf,\n dorgqr, and zungqr.\n\n For more information on the qr factorization, see for example:\n http://en.wikipedia.org/wiki/QR_factorization\n\n Subclasses of `ndarray` are preserved except for the 'raw' mode. So if\n `a` is of type `matrix`, all the return values will be matrices too.\n\n New 'reduced', 'complete', and 'raw' options for mode were added in\n NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In\n addition the options 'full' and 'economic' were deprecated. Because\n 'full' was the previous default and 'reduced' is the new default,\n backward compatibility can be maintained by letting `mode` default.\n The 'raw' option was added so that LAPACK routines that can multiply\n arrays by q using the Householder reflectors can be used. Note that in\n this case the returned arrays are of type np.double or np.cdouble and\n the h array is transposed to be FORTRAN compatible. No routines using\n the 'raw' return are currently exposed by numpy, but some are available\n in lapack_lite and just await the necessary work.\n\n Examples\n --------\n >>> a = np.random.randn(9, 6)\n >>> q, r = np.linalg.qr(a)\n >>> np.allclose(a, np.dot(q, r)) # a does equal qr\n True\n >>> r2 = np.linalg.qr(a, mode='r')\n >>> r3 = np.linalg.qr(a, mode='economic')\n >>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'\n True\n >>> # But only triu parts are guaranteed equal when mode='economic'\n >>> np.allclose(r, np.triu(r3[:6,:6], k=0))\n True\n\n Example illustrating a common use of `qr`: solving of least squares\n problems\n\n What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for\n the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points\n and you'll see that it should be y0 = 0, m = 1.) The answer is provided\n by solving the over-determined matrix equation ``Ax = b``, where::\n\n A = array([[0, 1], [1, 1], [1, 1], [2, 1]])\n x = array([[y0], [m]])\n b = array([[1], [0], [2], [1]])\n\n If A = qr such that q is orthonormal (which is always possible via\n Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,\n however, we simply use `lstsq`.)\n\n >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])\n >>> A\n array([[0, 1],\n [1, 1],\n [1, 1],\n [2, 1]])\n >>> b = np.array([1, 0, 2, 1])\n >>> q, r = LA.qr(A)\n >>> p = np.dot(q.T, b)\n >>> np.dot(LA.inv(r), p)\n array([ 1.1e-16, 1.0e+00])\n\n \"\"\"\n if mode not in ('reduced', 'complete', 'r', 'raw'):\n if mode in ('f', 'full'):\n # 2013-04-01, 1.8\n msg = \"\".join((\n \"The 'full' option is deprecated in favor of 'reduced'.\\n\",\n \"For backward compatibility let mode default.\"))\n warnings.warn(msg, DeprecationWarning, stacklevel=2)\n mode = 'reduced'\n elif mode in ('e', 'economic'):\n # 2013-04-01, 1.8\n msg = \"The 'economic' option is deprecated.\"\n warnings.warn(msg, DeprecationWarning, stacklevel=2)\n mode = 'economic'\n else:\n raise ValueError(\"Unrecognized mode '%s'\" % mode)\n\n a, wrap = _makearray(a)\n _assertRank2(a)\n _assertNoEmpty2d(a)\n m, n = a.shape\n t, result_t = _commonType(a)\n a = _fastCopyAndTranspose(t, a)\n a = _to_native_byte_order(a)\n mn = min(m, n)\n tau = zeros((mn,), t)\n if isComplexType(t):\n lapack_routine = lapack_lite.zgeqrf\n routine_name = 'zgeqrf'\n else:\n lapack_routine = lapack_lite.dgeqrf\n routine_name = 'dgeqrf'\n\n # calculate optimal size of work data 'work'\n lwork = 1\n work = zeros((lwork,), t)\n results = lapack_routine(m, n, a, m, tau, work, -1, 0)\n if results['info'] != 0:\n raise LinAlgError('%s returns %d' % (routine_name, results['info']))\n\n # do qr decomposition\n lwork = int(abs(work[0]))\n work = zeros((lwork,), t)\n results = lapack_routine(m, n, a, m, tau, work, lwork, 0)\n if results['info'] != 0:\n raise LinAlgError('%s returns %d' % (routine_name, results['info']))\n\n # handle modes that don't return q\n if mode == 'r':\n r = _fastCopyAndTranspose(result_t, a[:, :mn])\n return wrap(triu(r))\n\n if mode == 'raw':\n return a, tau\n\n if mode == 'economic':\n if t != result_t :\n a = a.astype(result_t, copy=False)\n return wrap(a.T)\n\n # generate q from a\n if mode == 'complete' and m > n:\n mc = m\n q = empty((m, m), t)\n else:\n mc = mn\n q = empty((n, m), t)\n q[:n] = a\n\n if isComplexType(t):\n lapack_routine = lapack_lite.zungqr\n routine_name = 'zungqr'\n else:\n lapack_routine = lapack_lite.dorgqr\n routine_name = 'dorgqr'\n\n # determine optimal lwork\n lwork = 1\n work = zeros((lwork,), t)\n results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)\n if results['info'] != 0:\n raise LinAlgError('%s returns %d' % (routine_name, results['info']))\n\n # compute q\n lwork = int(abs(work[0]))\n work = zeros((lwork,), t)\n results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)\n if results['info'] != 0:\n raise LinAlgError('%s returns %d' % (routine_name, results['info']))\n\n q = _fastCopyAndTranspose(result_t, q[:mc])\n r = _fastCopyAndTranspose(result_t, a[:, :mc])\n\n return wrap(q), wrap(triu(r))\n\n\n# Eigenvalues\n\n\ndef eigvals(a):\n \"\"\"\n Compute the eigenvalues of a general matrix.\n\n Main difference between `eigvals` and `eig`: the eigenvectors aren't\n returned.\n\n Parameters\n ----------\n a : (..., M, M) array_like\n A complex- or real-valued matrix whose eigenvalues will be computed.\n\n Returns\n -------\n w : (..., M,) ndarray\n The eigenvalues, each repeated according to its multiplicity.\n They are not necessarily ordered, nor are they necessarily\n real for real matrices.\n\n Raises\n ------\n LinAlgError\n If the eigenvalue computation does not converge.\n\n See Also\n --------\n eig : eigenvalues and right eigenvectors of general arrays\n eigvalsh : eigenvalues of symmetric or Hermitian arrays.\n eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.\n\n Notes\n -----\n\n .. versionadded:: 1.8.0\n\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n This is implemented using the _geev LAPACK routines which compute\n the eigenvalues and eigenvectors of general square arrays.\n\n Examples\n --------\n Illustration, using the fact that the eigenvalues of a diagonal matrix\n are its diagonal elements, that multiplying a matrix on the left\n by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose\n of `Q`), preserves the eigenvalues of the \"middle\" matrix. In other words,\n if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as\n ``A``:\n\n >>> from numpy import linalg as LA\n >>> x = np.random.random()\n >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])\n >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])\n (1.0, 1.0, 0.0)\n\n Now multiply a diagonal matrix by Q on one side and by Q.T on the other:\n\n >>> D = np.diag((-1,1))\n >>> LA.eigvals(D)\n array([-1., 1.])\n >>> A = np.dot(Q, D)\n >>> A = np.dot(A, Q.T)\n >>> LA.eigvals(A)\n array([ 1., -1.])\n\n \"\"\"\n a, wrap = _makearray(a)\n _assertNoEmpty2d(a)\n _assertRankAtLeast2(a)\n _assertNdSquareness(a)\n _assertFinite(a)\n t, result_t = _commonType(a)\n\n extobj = get_linalg_error_extobj(\n _raise_linalgerror_eigenvalues_nonconvergence)\n signature = 'D->D' if isComplexType(t) else 'd->D'\n w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)\n\n if not isComplexType(t):\n if all(w.imag == 0):\n w = w.real\n result_t = _realType(result_t)\n else:\n result_t = _complexType(result_t)\n\n return w.astype(result_t, copy=False)\n\ndef eigvalsh(a, UPLO='L'):\n \"\"\"\n Compute the eigenvalues of a Hermitian or real symmetric matrix.\n\n Main difference from eigh: the eigenvectors are not computed.\n\n Parameters\n ----------\n a : (..., M, M) array_like\n A complex- or real-valued matrix whose eigenvalues are to be\n computed.\n UPLO : {'L', 'U'}, optional\n Specifies whether the calculation is done with the lower triangular\n part of `a` ('L', default) or the upper triangular part ('U').\n Irrespective of this value only the real parts of the diagonal will\n be considered in the computation to preserve the notion of a Hermitian\n matrix. It therefore follows that the imaginary part of the diagonal\n will always be treated as zero.\n\n Returns\n -------\n w : (..., M,) ndarray\n The eigenvalues in ascending order, each repeated according to\n its multiplicity.\n\n Raises\n ------\n LinAlgError\n If the eigenvalue computation does not converge.\n\n See Also\n --------\n eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.\n eigvals : eigenvalues of general real or complex arrays.\n eig : eigenvalues and right eigenvectors of general real or complex\n arrays.\n\n Notes\n -----\n\n .. versionadded:: 1.8.0\n\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n The eigenvalues are computed using LAPACK routines _syevd, _heevd\n\n Examples\n --------\n >>> from numpy import linalg as LA\n >>> a = np.array([[1, -2j], [2j, 5]])\n >>> LA.eigvalsh(a)\n array([ 0.17157288, 5.82842712])\n \n >>> # demonstrate the treatment of the imaginary part of the diagonal\n >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) \n >>> a\n array([[ 5.+2.j, 9.-2.j],\n [ 0.+2.j, 2.-1.j]])\n >>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()\n >>> # with:\n >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])\n >>> b\n array([[ 5.+0.j, 0.-2.j],\n [ 0.+2.j, 2.+0.j]])\n >>> wa = LA.eigvalsh(a)\n >>> wb = LA.eigvals(b)\n >>> wa; wb\n array([ 1., 6.])\n array([ 6.+0.j, 1.+0.j])\n\n \"\"\"\n UPLO = UPLO.upper()\n if UPLO not in ('L', 'U'):\n raise ValueError(\"UPLO argument must be 'L' or 'U'\")\n\n extobj = get_linalg_error_extobj(\n _raise_linalgerror_eigenvalues_nonconvergence)\n if UPLO == 'L':\n gufunc = _umath_linalg.eigvalsh_lo\n else:\n gufunc = _umath_linalg.eigvalsh_up\n\n a, wrap = _makearray(a)\n _assertNoEmpty2d(a)\n _assertRankAtLeast2(a)\n _assertNdSquareness(a)\n t, result_t = _commonType(a)\n signature = 'D->d' if isComplexType(t) else 'd->d'\n w = gufunc(a, signature=signature, extobj=extobj)\n return w.astype(_realType(result_t), copy=False)\n\ndef _convertarray(a):\n t, result_t = _commonType(a)\n a = _fastCT(a.astype(t))\n return a, t, result_t\n\n\n# Eigenvectors\n\n\ndef eig(a):\n \"\"\"\n Compute the eigenvalues and right eigenvectors of a square array.\n\n Parameters\n ----------\n a : (..., M, M) array\n Matrices for which the eigenvalues and right eigenvectors will\n be computed\n\n Returns\n -------\n w : (..., M) array\n The eigenvalues, each repeated according to its multiplicity.\n The eigenvalues are not necessarily ordered. The resulting\n array will be of complex type, unless the imaginary part is\n zero in which case it will be cast to a real type. When `a`\n is real the resulting eigenvalues will be real (0 imaginary\n part) or occur in conjugate pairs\n\n v : (..., M, M) array\n The normalized (unit \"length\") eigenvectors, such that the\n column ``v[:,i]`` is the eigenvector corresponding to the\n eigenvalue ``w[i]``.\n\n Raises\n ------\n LinAlgError\n If the eigenvalue computation does not converge.\n\n See Also\n --------\n eigvals : eigenvalues of a non-symmetric array.\n\n eigh : eigenvalues and eigenvectors of a symmetric or Hermitian\n (conjugate symmetric) array.\n\n eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)\n array.\n\n Notes\n -----\n\n .. versionadded:: 1.8.0\n\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n This is implemented using the _geev LAPACK routines which compute\n the eigenvalues and eigenvectors of general square arrays.\n\n The number `w` is an eigenvalue of `a` if there exists a vector\n `v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and\n `v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``\n for :math:`i \\\\in \\\\{0,...,M-1\\\\}`.\n\n The array `v` of eigenvectors may not be of maximum rank, that is, some\n of the columns may be linearly dependent, although round-off error may\n obscure that fact. If the eigenvalues are all different, then theoretically\n the eigenvectors are linearly independent. Likewise, the (complex-valued)\n matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,\n if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate\n transpose of `a`.\n\n Finally, it is emphasized that `v` consists of the *right* (as in\n right-hand side) eigenvectors of `a`. A vector `y` satisfying\n ``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*\n eigenvector of `a`, and, in general, the left and right eigenvectors\n of a matrix are not necessarily the (perhaps conjugate) transposes\n of each other.\n\n References\n ----------\n G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,\n Academic Press, Inc., 1980, Various pp.\n\n Examples\n --------\n >>> from numpy import linalg as LA\n\n (Almost) trivial example with real e-values and e-vectors.\n\n >>> w, v = LA.eig(np.diag((1, 2, 3)))\n >>> w; v\n array([ 1., 2., 3.])\n array([[ 1., 0., 0.],\n [ 0., 1., 0.],\n [ 0., 0., 1.]])\n\n Real matrix possessing complex e-values and e-vectors; note that the\n e-values are complex conjugates of each other.\n\n >>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))\n >>> w; v\n array([ 1. + 1.j, 1. - 1.j])\n array([[ 0.70710678+0.j , 0.70710678+0.j ],\n [ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])\n\n Complex-valued matrix with real e-values (but complex-valued e-vectors);\n note that a.conj().T = a, i.e., a is Hermitian.\n\n >>> a = np.array([[1, 1j], [-1j, 1]])\n >>> w, v = LA.eig(a)\n >>> w; v\n array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}\n array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],\n [ 0.70710678+0.j , 0.00000000+0.70710678j]])\n\n Be careful about round-off error!\n\n >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])\n >>> # Theor. e-values are 1 +/- 1e-9\n >>> w, v = LA.eig(a)\n >>> w; v\n array([ 1., 1.])\n array([[ 1., 0.],\n [ 0., 1.]])\n\n \"\"\"\n a, wrap = _makearray(a)\n _assertRankAtLeast2(a)\n _assertNdSquareness(a)\n _assertFinite(a)\n t, result_t = _commonType(a)\n\n extobj = get_linalg_error_extobj(\n _raise_linalgerror_eigenvalues_nonconvergence)\n signature = 'D->DD' if isComplexType(t) else 'd->DD'\n w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)\n\n if not isComplexType(t) and all(w.imag == 0.0):\n w = w.real\n vt = vt.real\n result_t = _realType(result_t)\n else:\n result_t = _complexType(result_t)\n\n vt = vt.astype(result_t, copy=False)\n return w.astype(result_t, copy=False), wrap(vt)\n\n\ndef eigh(a, UPLO='L'):\n \"\"\"\n Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.\n\n Returns two objects, a 1-D array containing the eigenvalues of `a`, and\n a 2-D square array or matrix (depending on the input type) of the\n corresponding eigenvectors (in columns).\n\n Parameters\n ----------\n a : (..., M, M) array\n Hermitian/Symmetric matrices whose eigenvalues and\n eigenvectors are to be computed.\n UPLO : {'L', 'U'}, optional\n Specifies whether the calculation is done with the lower triangular\n part of `a` ('L', default) or the upper triangular part ('U').\n Irrespective of this value only the real parts of the diagonal will\n be considered in the computation to preserve the notion of a Hermitian\n matrix. It therefore follows that the imaginary part of the diagonal\n will always be treated as zero.\n\n Returns\n -------\n w : (..., M) ndarray\n The eigenvalues in ascending order, each repeated according to\n its multiplicity.\n v : {(..., M, M) ndarray, (..., M, M) matrix}\n The column ``v[:, i]`` is the normalized eigenvector corresponding\n to the eigenvalue ``w[i]``. Will return a matrix object if `a` is\n a matrix object.\n\n Raises\n ------\n LinAlgError\n If the eigenvalue computation does not converge.\n\n See Also\n --------\n eigvalsh : eigenvalues of symmetric or Hermitian arrays.\n eig : eigenvalues and right eigenvectors for non-symmetric arrays.\n eigvals : eigenvalues of non-symmetric arrays.\n\n Notes\n -----\n\n .. versionadded:: 1.8.0\n\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,\n _heevd\n\n The eigenvalues of real symmetric or complex Hermitian matrices are\n always real. [1]_ The array `v` of (column) eigenvectors is unitary\n and `a`, `w`, and `v` satisfy the equations\n ``dot(a, v[:, i]) = w[i] * v[:, i]``.\n\n References\n ----------\n .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,\n FL, Academic Press, Inc., 1980, pg. 222.\n\n Examples\n --------\n >>> from numpy import linalg as LA\n >>> a = np.array([[1, -2j], [2j, 5]])\n >>> a\n array([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> w, v = LA.eigh(a)\n >>> w; v\n array([ 0.17157288, 5.82842712])\n array([[-0.92387953+0.j , -0.38268343+0.j ],\n [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])\n\n >>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair\n array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])\n >>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair\n array([ 0.+0.j, 0.+0.j])\n\n >>> A = np.matrix(a) # what happens if input is a matrix object\n >>> A\n matrix([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> w, v = LA.eigh(A)\n >>> w; v\n array([ 0.17157288, 5.82842712])\n matrix([[-0.92387953+0.j , -0.38268343+0.j ],\n [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])\n\n >>> # demonstrate the treatment of the imaginary part of the diagonal\n >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) \n >>> a\n array([[ 5.+2.j, 9.-2.j],\n [ 0.+2.j, 2.-1.j]])\n >>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:\n >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])\n >>> b\n array([[ 5.+0.j, 0.-2.j],\n [ 0.+2.j, 2.+0.j]])\n >>> wa, va = LA.eigh(a)\n >>> wb, vb = LA.eig(b)\n >>> wa; wb\n array([ 1., 6.])\n array([ 6.+0.j, 1.+0.j])\n >>> va; vb\n array([[-0.44721360-0.j , -0.89442719+0.j ],\n [ 0.00000000+0.89442719j, 0.00000000-0.4472136j ]])\n array([[ 0.89442719+0.j , 0.00000000-0.4472136j],\n [ 0.00000000-0.4472136j, 0.89442719+0.j ]])\n \"\"\"\n UPLO = UPLO.upper()\n if UPLO not in ('L', 'U'):\n raise ValueError(\"UPLO argument must be 'L' or 'U'\")\n\n a, wrap = _makearray(a)\n _assertRankAtLeast2(a)\n _assertNdSquareness(a)\n t, result_t = _commonType(a)\n\n extobj = get_linalg_error_extobj(\n _raise_linalgerror_eigenvalues_nonconvergence)\n if UPLO == 'L':\n gufunc = _umath_linalg.eigh_lo\n else:\n gufunc = _umath_linalg.eigh_up\n\n signature = 'D->dD' if isComplexType(t) else 'd->dd'\n w, vt = gufunc(a, signature=signature, extobj=extobj)\n w = w.astype(_realType(result_t), copy=False)\n vt = vt.astype(result_t, copy=False)\n return w, wrap(vt)\n\n\n# Singular value decomposition\n\ndef svd(a, full_matrices=1, compute_uv=1):\n \"\"\"\n Singular Value Decomposition.\n\n Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`\n are unitary and `s` is a 1-d array of `a`'s singular values.\n\n Parameters\n ----------\n a : (..., M, N) array_like\n A real or complex matrix of shape (`M`, `N`) .\n full_matrices : bool, optional\n If True (default), `u` and `v` have the shapes (`M`, `M`) and\n (`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)\n and (`K`, `N`), respectively, where `K` = min(`M`, `N`).\n compute_uv : bool, optional\n Whether or not to compute `u` and `v` in addition to `s`. True\n by default.\n\n Returns\n -------\n u : { (..., M, M), (..., M, K) } array\n Unitary matrices. The actual shape depends on the value of\n ``full_matrices``. Only returned when ``compute_uv`` is True.\n s : (..., K) array\n The singular values for every matrix, sorted in descending order.\n v : { (..., N, N), (..., K, N) } array\n Unitary matrices. The actual shape depends on the value of\n ``full_matrices``. Only returned when ``compute_uv`` is True.\n\n Raises\n ------\n LinAlgError\n If SVD computation does not converge.\n\n Notes\n -----\n\n .. versionadded:: 1.8.0\n\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n The decomposition is performed using LAPACK routine _gesdd\n\n The SVD is commonly written as ``a = U S V.H``. The `v` returned\n by this function is ``V.H`` and ``u = U``.\n\n If ``U`` is a unitary matrix, it means that it\n satisfies ``U.H = inv(U)``.\n\n The rows of `v` are the eigenvectors of ``a.H a``. The columns\n of `u` are the eigenvectors of ``a a.H``. For row ``i`` in\n `v` and column ``i`` in `u`, the corresponding eigenvalue is\n ``s[i]**2``.\n\n If `a` is a `matrix` object (as opposed to an `ndarray`), then so\n are all the return values.\n\n Examples\n --------\n >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)\n\n Reconstruction based on full SVD:\n\n >>> U, s, V = np.linalg.svd(a, full_matrices=True)\n >>> U.shape, V.shape, s.shape\n ((9, 9), (6, 6), (6,))\n >>> S = np.zeros((9, 6), dtype=complex)\n >>> S[:6, :6] = np.diag(s)\n >>> np.allclose(a, np.dot(U, np.dot(S, V)))\n True\n\n Reconstruction based on reduced SVD:\n\n >>> U, s, V = np.linalg.svd(a, full_matrices=False)\n >>> U.shape, V.shape, s.shape\n ((9, 6), (6, 6), (6,))\n >>> S = np.diag(s)\n >>> np.allclose(a, np.dot(U, np.dot(S, V)))\n True\n\n \"\"\"\n a, wrap = _makearray(a)\n _assertNoEmpty2d(a)\n _assertRankAtLeast2(a)\n t, result_t = _commonType(a)\n\n extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)\n\n m = a.shape[-2]\n n = a.shape[-1]\n if compute_uv:\n if full_matrices:\n if m < n:\n gufunc = _umath_linalg.svd_m_f\n else:\n gufunc = _umath_linalg.svd_n_f\n else:\n if m < n:\n gufunc = _umath_linalg.svd_m_s\n else:\n gufunc = _umath_linalg.svd_n_s\n\n signature = 'D->DdD' if isComplexType(t) else 'd->ddd'\n u, s, vt = gufunc(a, signature=signature, extobj=extobj)\n u = u.astype(result_t, copy=False)\n s = s.astype(_realType(result_t), copy=False)\n vt = vt.astype(result_t, copy=False)\n return wrap(u), s, wrap(vt)\n else:\n if m < n:\n gufunc = _umath_linalg.svd_m\n else:\n gufunc = _umath_linalg.svd_n\n\n signature = 'D->d' if isComplexType(t) else 'd->d'\n s = gufunc(a, signature=signature, extobj=extobj)\n s = s.astype(_realType(result_t), copy=False)\n return s\n\ndef cond(x, p=None):\n \"\"\"\n Compute the condition number of a matrix.\n\n This function is capable of returning the condition number using\n one of seven different norms, depending on the value of `p` (see\n Parameters below).\n\n Parameters\n ----------\n x : (..., M, N) array_like\n The matrix whose condition number is sought.\n p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional\n Order of the norm:\n\n ===== ============================\n p norm for matrices\n ===== ============================\n None 2-norm, computed directly using the ``SVD``\n 'fro' Frobenius norm\n inf max(sum(abs(x), axis=1))\n -inf min(sum(abs(x), axis=1))\n 1 max(sum(abs(x), axis=0))\n -1 min(sum(abs(x), axis=0))\n 2 2-norm (largest sing. value)\n -2 smallest singular value\n ===== ============================\n\n inf means the numpy.inf object, and the Frobenius norm is\n the root-of-sum-of-squares norm.\n\n Returns\n -------\n c : {float, inf}\n The condition number of the matrix. May be infinite.\n\n See Also\n --------\n numpy.linalg.norm\n\n Notes\n -----\n The condition number of `x` is defined as the norm of `x` times the\n norm of the inverse of `x` [1]_; the norm can be the usual L2-norm\n (root-of-sum-of-squares) or one of a number of other matrix norms.\n\n References\n ----------\n .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,\n Academic Press, Inc., 1980, pg. 285.\n\n Examples\n --------\n >>> from numpy import linalg as LA\n >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])\n >>> a\n array([[ 1, 0, -1],\n [ 0, 1, 0],\n [ 1, 0, 1]])\n >>> LA.cond(a)\n 1.4142135623730951\n >>> LA.cond(a, 'fro')\n 3.1622776601683795\n >>> LA.cond(a, np.inf)\n 2.0\n >>> LA.cond(a, -np.inf)\n 1.0\n >>> LA.cond(a, 1)\n 2.0\n >>> LA.cond(a, -1)\n 1.0\n >>> LA.cond(a, 2)\n 1.4142135623730951\n >>> LA.cond(a, -2)\n 0.70710678118654746\n >>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))\n 0.70710678118654746\n\n \"\"\"\n x = asarray(x) # in case we have a matrix\n if p is None:\n s = svd(x, compute_uv=False)\n return s[..., 0]/s[..., -1]\n else:\n return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))\n\n\ndef matrix_rank(M, tol=None):\n \"\"\"\n Return matrix rank of array using SVD method\n\n Rank of the array is the number of SVD singular values of the array that are\n greater than `tol`.\n\n Parameters\n ----------\n M : {(M,), (M, N)} array_like\n array of <=2 dimensions\n tol : {None, float}, optional\n threshold below which SVD values are considered zero. If `tol` is\n None, and ``S`` is an array with singular values for `M`, and\n ``eps`` is the epsilon value for datatype of ``S``, then `tol` is\n set to ``S.max() * max(M.shape) * eps``.\n\n Notes\n -----\n The default threshold to detect rank deficiency is a test on the magnitude\n of the singular values of `M`. By default, we identify singular values less\n than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with\n the symbols defined above). This is the algorithm MATLAB uses [1]. It also\n appears in *Numerical recipes* in the discussion of SVD solutions for linear\n least squares [2].\n\n This default threshold is designed to detect rank deficiency accounting for\n the numerical errors of the SVD computation. Imagine that there is a column\n in `M` that is an exact (in floating point) linear combination of other\n columns in `M`. Computing the SVD on `M` will not produce a singular value\n exactly equal to 0 in general: any difference of the smallest SVD value from\n 0 will be caused by numerical imprecision in the calculation of the SVD.\n Our threshold for small SVD values takes this numerical imprecision into\n account, and the default threshold will detect such numerical rank\n deficiency. The threshold may declare a matrix `M` rank deficient even if\n the linear combination of some columns of `M` is not exactly equal to\n another column of `M` but only numerically very close to another column of\n `M`.\n\n We chose our default threshold because it is in wide use. Other thresholds\n are possible. For example, elsewhere in the 2007 edition of *Numerical\n recipes* there is an alternative threshold of ``S.max() *\n np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe\n this threshold as being based on \"expected roundoff error\" (p 71).\n\n The thresholds above deal with floating point roundoff error in the\n calculation of the SVD. However, you may have more information about the\n sources of error in `M` that would make you consider other tolerance values\n to detect *effective* rank deficiency. The most useful measure of the\n tolerance depends on the operations you intend to use on your matrix. For\n example, if your data come from uncertain measurements with uncertainties\n greater than floating point epsilon, choosing a tolerance near that\n uncertainty may be preferable. The tolerance may be absolute if the\n uncertainties are absolute rather than relative.\n\n References\n ----------\n .. [1] MATLAB reference documention, \"Rank\"\n http://www.mathworks.com/help/techdoc/ref/rank.html\n .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,\n \"Numerical Recipes (3rd edition)\", Cambridge University Press, 2007,\n page 795.\n\n Examples\n --------\n >>> from numpy.linalg import matrix_rank\n >>> matrix_rank(np.eye(4)) # Full rank matrix\n 4\n >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix\n >>> matrix_rank(I)\n 3\n >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0\n 1\n >>> matrix_rank(np.zeros((4,)))\n 0\n \"\"\"\n M = asarray(M)\n if M.ndim > 2:\n raise TypeError('array should have 2 or fewer dimensions')\n if M.ndim < 2:\n return int(not all(M==0))\n S = svd(M, compute_uv=False)\n if tol is None:\n tol = S.max() * max(M.shape) * finfo(S.dtype).eps\n return sum(S > tol)\n\n\n# Generalized inverse\n\ndef pinv(a, rcond=1e-15 ):\n \"\"\"\n Compute the (Moore-Penrose) pseudo-inverse of a matrix.\n\n Calculate the generalized inverse of a matrix using its\n singular-value decomposition (SVD) and including all\n *large* singular values.\n\n Parameters\n ----------\n a : (M, N) array_like\n Matrix to be pseudo-inverted.\n rcond : float\n Cutoff for small singular values.\n Singular values smaller (in modulus) than\n `rcond` * largest_singular_value (again, in modulus)\n are set to zero.\n\n Returns\n -------\n B : (N, M) ndarray\n The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so\n is `B`.\n\n Raises\n ------\n LinAlgError\n If the SVD computation does not converge.\n\n Notes\n -----\n The pseudo-inverse of a matrix A, denoted :math:`A^+`, is\n defined as: \"the matrix that 'solves' [the least-squares problem]\n :math:`Ax = b`,\" i.e., if :math:`\\\\bar{x}` is said solution, then\n :math:`A^+` is that matrix such that :math:`\\\\bar{x} = A^+b`.\n\n It can be shown that if :math:`Q_1 \\\\Sigma Q_2^T = A` is the singular\n value decomposition of A, then\n :math:`A^+ = Q_2 \\\\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are\n orthogonal matrices, :math:`\\\\Sigma` is a diagonal matrix consisting\n of A's so-called singular values, (followed, typically, by\n zeros), and then :math:`\\\\Sigma^+` is simply the diagonal matrix\n consisting of the reciprocals of A's singular values\n (again, followed by zeros). [1]_\n\n References\n ----------\n .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,\n FL, Academic Press, Inc., 1980, pp. 139-142.\n\n Examples\n --------\n The following example checks that ``a * a+ * a == a`` and\n ``a+ * a * a+ == a+``:\n\n >>> a = np.random.randn(9, 6)\n >>> B = np.linalg.pinv(a)\n >>> np.allclose(a, np.dot(a, np.dot(B, a)))\n True\n >>> np.allclose(B, np.dot(B, np.dot(a, B)))\n True\n\n \"\"\"\n a, wrap = _makearray(a)\n _assertNoEmpty2d(a)\n a = a.conjugate()\n u, s, vt = svd(a, 0)\n m = u.shape[0]\n n = vt.shape[1]\n cutoff = rcond*maximum.reduce(s)\n for i in range(min(n, m)):\n if s[i] > cutoff:\n s[i] = 1./s[i]\n else:\n s[i] = 0.\n res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))\n return wrap(res)\n\n# Determinant\n\ndef slogdet(a):\n \"\"\"\n Compute the sign and (natural) logarithm of the determinant of an array.\n\n If an array has a very small or very large determinant, then a call to\n `det` may overflow or underflow. This routine is more robust against such\n issues, because it computes the logarithm of the determinant rather than\n the determinant itself.\n\n Parameters\n ----------\n a : (..., M, M) array_like\n Input array, has to be a square 2-D array.\n\n Returns\n -------\n sign : (...) array_like\n A number representing the sign of the determinant. For a real matrix,\n this is 1, 0, or -1. For a complex matrix, this is a complex number\n with absolute value 1 (i.e., it is on the unit circle), or else 0.\n logdet : (...) array_like\n The natural log of the absolute value of the determinant.\n\n If the determinant is zero, then `sign` will be 0 and `logdet` will be\n -Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.\n\n See Also\n --------\n det\n\n Notes\n -----\n\n .. versionadded:: 1.8.0\n\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n .. versionadded:: 1.6.0\n\n The determinant is computed via LU factorization using the LAPACK\n routine z/dgetrf.\n\n\n Examples\n --------\n The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:\n\n >>> a = np.array([[1, 2], [3, 4]])\n >>> (sign, logdet) = np.linalg.slogdet(a)\n >>> (sign, logdet)\n (-1, 0.69314718055994529)\n >>> sign * np.exp(logdet)\n -2.0\n\n Computing log-determinants for a stack of matrices:\n\n >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])\n >>> a.shape\n (3, 2, 2)\n >>> sign, logdet = np.linalg.slogdet(a)\n >>> (sign, logdet)\n (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))\n >>> sign * np.exp(logdet)\n array([-2., -3., -8.])\n\n This routine succeeds where ordinary `det` does not:\n\n >>> np.linalg.det(np.eye(500) * 0.1)\n 0.0\n >>> np.linalg.slogdet(np.eye(500) * 0.1)\n (1, -1151.2925464970228)\n\n \"\"\"\n a = asarray(a)\n _assertNoEmpty2d(a)\n _assertRankAtLeast2(a)\n _assertNdSquareness(a)\n t, result_t = _commonType(a)\n real_t = _realType(result_t)\n signature = 'D->Dd' if isComplexType(t) else 'd->dd'\n sign, logdet = _umath_linalg.slogdet(a, signature=signature)\n if isscalar(sign):\n sign = sign.astype(result_t)\n else:\n sign = sign.astype(result_t, copy=False)\n if isscalar(logdet):\n logdet = logdet.astype(real_t)\n else:\n logdet = logdet.astype(real_t, copy=False)\n return sign, logdet\n\ndef det(a):\n \"\"\"\n Compute the determinant of an array.\n\n Parameters\n ----------\n a : (..., M, M) array_like\n Input array to compute determinants for.\n\n Returns\n -------\n det : (...) array_like\n Determinant of `a`.\n\n See Also\n --------\n slogdet : Another way to representing the determinant, more suitable\n for large matrices where underflow/overflow may occur.\n\n Notes\n -----\n\n .. versionadded:: 1.8.0\n\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n The determinant is computed via LU factorization using the LAPACK\n routine z/dgetrf.\n\n Examples\n --------\n The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:\n\n >>> a = np.array([[1, 2], [3, 4]])\n >>> np.linalg.det(a)\n -2.0\n\n Computing determinants for a stack of matrices:\n\n >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])\n >>> a.shape\n (3, 2, 2)\n >>> np.linalg.det(a)\n array([-2., -3., -8.])\n\n \"\"\"\n a = asarray(a)\n _assertNoEmpty2d(a)\n _assertRankAtLeast2(a)\n _assertNdSquareness(a)\n t, result_t = _commonType(a)\n signature = 'D->D' if isComplexType(t) else 'd->d'\n r = _umath_linalg.det(a, signature=signature)\n if isscalar(r):\n r = r.astype(result_t)\n else:\n r = r.astype(result_t, copy=False)\n return r\n\n# Linear Least Squares\n\ndef lstsq(a, b, rcond=-1):\n \"\"\"\n Return the least-squares solution to a linear matrix equation.\n\n Solves the equation `a x = b` by computing a vector `x` that\n minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may\n be under-, well-, or over- determined (i.e., the number of\n linearly independent rows of `a` can be less than, equal to, or\n greater than its number of linearly independent columns). If `a`\n is square and of full rank, then `x` (but for round-off error) is\n the \"exact\" solution of the equation.\n\n Parameters\n ----------\n a : (M, N) array_like\n \"Coefficient\" matrix.\n b : {(M,), (M, K)} array_like\n Ordinate or \"dependent variable\" values. If `b` is two-dimensional,\n the least-squares solution is calculated for each of the `K` columns\n of `b`.\n rcond : float, optional\n Cut-off ratio for small singular values of `a`.\n For the purposes of rank determination, singular values are treated\n as zero if they are smaller than `rcond` times the largest singular\n value of `a`.\n\n Returns\n -------\n x : {(N,), (N, K)} ndarray\n Least-squares solution. If `b` is two-dimensional,\n the solutions are in the `K` columns of `x`.\n residuals : {(), (1,), (K,)} ndarray\n Sums of residuals; squared Euclidean 2-norm for each column in\n ``b - a*x``.\n If the rank of `a` is < N or M <= N, this is an empty array.\n If `b` is 1-dimensional, this is a (1,) shape array.\n Otherwise the shape is (K,).\n rank : int\n Rank of matrix `a`.\n s : (min(M, N),) ndarray\n Singular values of `a`.\n\n Raises\n ------\n LinAlgError\n If computation does not converge.\n\n Notes\n -----\n If `b` is a matrix, then all array results are returned as matrices.\n\n Examples\n --------\n Fit a line, ``y = mx + c``, through some noisy data-points:\n\n >>> x = np.array([0, 1, 2, 3])\n >>> y = np.array([-1, 0.2, 0.9, 2.1])\n\n By examining the coefficients, we see that the line should have a\n gradient of roughly 1 and cut the y-axis at, more or less, -1.\n\n We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``\n and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:\n\n >>> A = np.vstack([x, np.ones(len(x))]).T\n >>> A\n array([[ 0., 1.],\n [ 1., 1.],\n [ 2., 1.],\n [ 3., 1.]])\n\n >>> m, c = np.linalg.lstsq(A, y)[0]\n >>> print(m, c)\n 1.0 -0.95\n\n Plot the data along with the fitted line:\n\n >>> import matplotlib.pyplot as plt\n >>> plt.plot(x, y, 'o', label='Original data', markersize=10)\n >>> plt.plot(x, m*x + c, 'r', label='Fitted line')\n >>> plt.legend()\n >>> plt.show()\n\n \"\"\"\n import math\n a, _ = _makearray(a)\n b, wrap = _makearray(b)\n is_1d = len(b.shape) == 1\n if is_1d:\n b = b[:, newaxis]\n _assertRank2(a, b)\n m = a.shape[0]\n n = a.shape[1]\n n_rhs = b.shape[1]\n ldb = max(n, m)\n if m != b.shape[0]:\n raise LinAlgError('Incompatible dimensions')\n t, result_t = _commonType(a, b)\n result_real_t = _realType(result_t)\n real_t = _linalgRealType(t)\n bstar = zeros((ldb, n_rhs), t)\n bstar[:b.shape[0], :n_rhs] = b.copy()\n a, bstar = _fastCopyAndTranspose(t, a, bstar)\n a, bstar = _to_native_byte_order(a, bstar)\n s = zeros((min(m, n),), real_t)\n nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )\n iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)\n if isComplexType(t):\n lapack_routine = lapack_lite.zgelsd\n lwork = 1\n rwork = zeros((lwork,), real_t)\n work = zeros((lwork,), t)\n results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,\n 0, work, -1, rwork, iwork, 0)\n lwork = int(abs(work[0]))\n rwork = zeros((lwork,), real_t)\n a_real = zeros((m, n), real_t)\n bstar_real = zeros((ldb, n_rhs,), real_t)\n results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,\n bstar_real, ldb, s, rcond,\n 0, rwork, -1, iwork, 0)\n lrwork = int(rwork[0])\n work = zeros((lwork,), t)\n rwork = zeros((lrwork,), real_t)\n results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,\n 0, work, lwork, rwork, iwork, 0)\n else:\n lapack_routine = lapack_lite.dgelsd\n lwork = 1\n work = zeros((lwork,), t)\n results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,\n 0, work, -1, iwork, 0)\n lwork = int(work[0])\n work = zeros((lwork,), t)\n results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,\n 0, work, lwork, iwork, 0)\n if results['info'] > 0:\n raise LinAlgError('SVD did not converge in Linear Least Squares')\n resids = array([], result_real_t)\n if is_1d:\n x = array(ravel(bstar)[:n], dtype=result_t, copy=True)\n if results['rank'] == n and m > n:\n if isComplexType(t):\n resids = array([sum(abs(ravel(bstar)[n:])**2)],\n dtype=result_real_t)\n else:\n resids = array([sum((ravel(bstar)[n:])**2)],\n dtype=result_real_t)\n else:\n x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)\n if results['rank'] == n and m > n:\n if isComplexType(t):\n resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(\n result_real_t, copy=False)\n else:\n resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(\n result_real_t, copy=False)\n\n st = s[:min(n, m)].astype(result_real_t, copy=True)\n return wrap(x), wrap(resids), results['rank'], st\n\n\ndef _multi_svd_norm(x, row_axis, col_axis, op):\n \"\"\"Compute a function of the singular values of the 2-D matrices in `x`.\n\n This is a private utility function used by numpy.linalg.norm().\n\n Parameters\n ----------\n x : ndarray\n row_axis, col_axis : int\n The axes of `x` that hold the 2-D matrices.\n op : callable\n This should be either numpy.amin or numpy.amax or numpy.sum.\n\n Returns\n -------\n result : float or ndarray\n If `x` is 2-D, the return values is a float.\n Otherwise, it is an array with ``x.ndim - 2`` dimensions.\n The return values are either the minimum or maximum or sum of the\n singular values of the matrices, depending on whether `op`\n is `numpy.amin` or `numpy.amax` or `numpy.sum`.\n\n \"\"\"\n if row_axis > col_axis:\n row_axis -= 1\n y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)\n result = op(svd(y, compute_uv=0), axis=-1)\n return result\n\n\ndef norm(x, ord=None, axis=None, keepdims=False):\n \"\"\"\n Matrix or vector norm.\n\n This function is able to return one of eight different matrix norms,\n or one of an infinite number of vector norms (described below), depending\n on the value of the ``ord`` parameter.\n\n Parameters\n ----------\n x : array_like\n Input array. If `axis` is None, `x` must be 1-D or 2-D.\n ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional\n Order of the norm (see table under ``Notes``). inf means numpy's\n `inf` object.\n axis : {int, 2-tuple of ints, None}, optional\n If `axis` is an integer, it specifies the axis of `x` along which to\n compute the vector norms. If `axis` is a 2-tuple, it specifies the\n axes that hold 2-D matrices, and the matrix norms of these matrices\n are computed. If `axis` is None then either a vector norm (when `x`\n is 1-D) or a matrix norm (when `x` is 2-D) is returned.\n keepdims : bool, optional\n If this is set to True, the axes which are normed over are left in the\n result as dimensions with size one. With this option the result will\n broadcast correctly against the original `x`.\n\n .. versionadded:: 1.10.0\n\n Returns\n -------\n n : float or ndarray\n Norm of the matrix or vector(s).\n\n Notes\n -----\n For values of ``ord <= 0``, the result is, strictly speaking, not a\n mathematical 'norm', but it may still be useful for various numerical\n purposes.\n\n The following norms can be calculated:\n\n ===== ============================ ==========================\n ord norm for matrices norm for vectors\n ===== ============================ ==========================\n None Frobenius norm 2-norm\n 'fro' Frobenius norm --\n 'nuc' nuclear norm --\n inf max(sum(abs(x), axis=1)) max(abs(x))\n -inf min(sum(abs(x), axis=1)) min(abs(x))\n 0 -- sum(x != 0)\n 1 max(sum(abs(x), axis=0)) as below\n -1 min(sum(abs(x), axis=0)) as below\n 2 2-norm (largest sing. value) as below\n -2 smallest singular value as below\n other -- sum(abs(x)**ord)**(1./ord)\n ===== ============================ ==========================\n\n The Frobenius norm is given by [1]_:\n\n :math:`||A||_F = [\\\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`\n\n The nuclear norm is the sum of the singular values.\n\n References\n ----------\n .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,\n Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15\n\n Examples\n --------\n >>> from numpy import linalg as LA\n >>> a = np.arange(9) - 4\n >>> a\n array([-4, -3, -2, -1, 0, 1, 2, 3, 4])\n >>> b = a.reshape((3, 3))\n >>> b\n array([[-4, -3, -2],\n [-1, 0, 1],\n [ 2, 3, 4]])\n\n >>> LA.norm(a)\n 7.745966692414834\n >>> LA.norm(b)\n 7.745966692414834\n >>> LA.norm(b, 'fro')\n 7.745966692414834\n >>> LA.norm(a, np.inf)\n 4.0\n >>> LA.norm(b, np.inf)\n 9.0\n >>> LA.norm(a, -np.inf)\n 0.0\n >>> LA.norm(b, -np.inf)\n 2.0\n\n >>> LA.norm(a, 1)\n 20.0\n >>> LA.norm(b, 1)\n 7.0\n >>> LA.norm(a, -1)\n -4.6566128774142013e-010\n >>> LA.norm(b, -1)\n 6.0\n >>> LA.norm(a, 2)\n 7.745966692414834\n >>> LA.norm(b, 2)\n 7.3484692283495345\n\n >>> LA.norm(a, -2)\n nan\n >>> LA.norm(b, -2)\n 1.8570331885190563e-016\n >>> LA.norm(a, 3)\n 5.8480354764257312\n >>> LA.norm(a, -3)\n nan\n\n Using the `axis` argument to compute vector norms:\n\n >>> c = np.array([[ 1, 2, 3],\n ... [-1, 1, 4]])\n >>> LA.norm(c, axis=0)\n array([ 1.41421356, 2.23606798, 5. ])\n >>> LA.norm(c, axis=1)\n array([ 3.74165739, 4.24264069])\n >>> LA.norm(c, ord=1, axis=1)\n array([ 6., 6.])\n\n Using the `axis` argument to compute matrix norms:\n\n >>> m = np.arange(8).reshape(2,2,2)\n >>> LA.norm(m, axis=(1,2))\n array([ 3.74165739, 11.22497216])\n >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])\n (3.7416573867739413, 11.224972160321824)\n\n \"\"\"\n x = asarray(x)\n\n if not issubclass(x.dtype.type, (inexact, object_)):\n x = x.astype(float)\n\n # Immediately handle some default, simple, fast, and common cases.\n if axis is None:\n ndim = x.ndim\n if ((ord is None) or\n (ord in ('f', 'fro') and ndim == 2) or\n (ord == 2 and ndim == 1)):\n\n x = x.ravel(order='K')\n if isComplexType(x.dtype.type):\n sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)\n else:\n sqnorm = dot(x, x)\n ret = sqrt(sqnorm)\n if keepdims:\n ret = ret.reshape(ndim*[1])\n return ret\n\n # Normalize the `axis` argument to a tuple.\n nd = x.ndim\n if axis is None:\n axis = tuple(range(nd))\n elif not isinstance(axis, tuple):\n try:\n axis = int(axis)\n except:\n raise TypeError(\"'axis' must be None, an integer or a tuple of integers\")\n axis = (axis,)\n\n if len(axis) == 1:\n if ord == Inf:\n return abs(x).max(axis=axis, keepdims=keepdims)\n elif ord == -Inf:\n return abs(x).min(axis=axis, keepdims=keepdims)\n elif ord == 0:\n # Zero norm\n return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims)\n elif ord == 1:\n # special case for speedup\n return add.reduce(abs(x), axis=axis, keepdims=keepdims)\n elif ord is None or ord == 2:\n # special case for speedup\n s = (x.conj() * x).real\n return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))\n else:\n try:\n ord + 1\n except TypeError:\n raise ValueError(\"Invalid norm order for vectors.\")\n if x.dtype.type is longdouble:\n # Convert to a float type, so integer arrays give\n # float results. Don't apply asfarray to longdouble arrays,\n # because it will downcast to float64.\n absx = abs(x)\n else:\n absx = x if isComplexType(x.dtype.type) else asfarray(x)\n if absx.dtype is x.dtype:\n absx = abs(absx)\n else:\n # if the type changed, we can safely overwrite absx\n abs(absx, out=absx)\n absx **= ord\n return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)\n elif len(axis) == 2:\n row_axis, col_axis = axis\n if row_axis < 0:\n row_axis += nd\n if col_axis < 0:\n col_axis += nd\n if not (0 <= row_axis < nd and 0 <= col_axis < nd):\n raise ValueError('Invalid axis %r for an array with shape %r' %\n (axis, x.shape))\n if row_axis == col_axis:\n raise ValueError('Duplicate axes given.')\n if ord == 2:\n ret = _multi_svd_norm(x, row_axis, col_axis, amax)\n elif ord == -2:\n ret = _multi_svd_norm(x, row_axis, col_axis, amin)\n elif ord == 1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)\n elif ord == Inf:\n if row_axis > col_axis:\n row_axis -= 1\n ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)\n elif ord == -1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)\n elif ord == -Inf:\n if row_axis > col_axis:\n row_axis -= 1\n ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)\n elif ord in [None, 'fro', 'f']:\n ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))\n elif ord == 'nuc':\n ret = _multi_svd_norm(x, row_axis, col_axis, sum)\n else:\n raise ValueError(\"Invalid norm order for matrices.\")\n if keepdims:\n ret_shape = list(x.shape)\n ret_shape[axis[0]] = 1\n ret_shape[axis[1]] = 1\n ret = ret.reshape(ret_shape)\n return ret\n else:\n raise ValueError(\"Improper number of dimensions to norm.\")\n\n\n# multi_dot\n\ndef multi_dot(arrays):\n \"\"\"\n Compute the dot product of two or more arrays in a single function call,\n while automatically selecting the fastest evaluation order.\n\n `multi_dot` chains `numpy.dot` and uses optimal parenthesization\n of the matrices [1]_ [2]_. Depending on the shapes of the matrices,\n this can speed up the multiplication a lot.\n\n If the first argument is 1-D it is treated as a row vector.\n If the last argument is 1-D it is treated as a column vector.\n The other arguments must be 2-D.\n\n Think of `multi_dot` as::\n\n def multi_dot(arrays): return functools.reduce(np.dot, arrays)\n\n\n Parameters\n ----------\n arrays : sequence of array_like\n If the first argument is 1-D it is treated as row vector.\n If the last argument is 1-D it is treated as column vector.\n The other arguments must be 2-D.\n\n Returns\n -------\n output : ndarray\n Returns the dot product of the supplied arrays.\n\n See Also\n --------\n dot : dot multiplication with two arguments.\n\n References\n ----------\n\n .. [1] Cormen, \"Introduction to Algorithms\", Chapter 15.2, p. 370-378\n .. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication\n\n Examples\n --------\n `multi_dot` allows you to write::\n\n >>> from numpy.linalg import multi_dot\n >>> # Prepare some data\n >>> A = np.random.random(10000, 100)\n >>> B = np.random.random(100, 1000)\n >>> C = np.random.random(1000, 5)\n >>> D = np.random.random(5, 333)\n >>> # the actual dot multiplication\n >>> multi_dot([A, B, C, D])\n\n instead of::\n\n >>> np.dot(np.dot(np.dot(A, B), C), D)\n >>> # or\n >>> A.dot(B).dot(C).dot(D)\n\n\n Example: multiplication costs of different parenthesizations\n ------------------------------------------------------------\n\n The cost for a matrix multiplication can be calculated with the\n following function::\n\n def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]\n\n Let's assume we have three matrices\n :math:`A_{10x100}, B_{100x5}, C_{5x50}$`.\n\n The costs for the two different parenthesizations are as follows::\n\n cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500\n cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000\n\n \"\"\"\n n = len(arrays)\n # optimization only makes sense for len(arrays) > 2\n if n < 2:\n raise ValueError(\"Expecting at least two arrays.\")\n elif n == 2:\n return dot(arrays[0], arrays[1])\n\n arrays = [asanyarray(a) for a in arrays]\n\n # save original ndim to reshape the result array into the proper form later\n ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim\n # Explicitly convert vectors to 2D arrays to keep the logic of the internal\n # _multi_dot_* functions as simple as possible.\n if arrays[0].ndim == 1:\n arrays[0] = atleast_2d(arrays[0])\n if arrays[-1].ndim == 1:\n arrays[-1] = atleast_2d(arrays[-1]).T\n _assertRank2(*arrays)\n\n # _multi_dot_three is much faster than _multi_dot_matrix_chain_order\n if n == 3:\n result = _multi_dot_three(arrays[0], arrays[1], arrays[2])\n else:\n order = _multi_dot_matrix_chain_order(arrays)\n result = _multi_dot(arrays, order, 0, n - 1)\n\n # return proper shape\n if ndim_first == 1 and ndim_last == 1:\n return result[0, 0] # scalar\n elif ndim_first == 1 or ndim_last == 1:\n return result.ravel() # 1-D\n else:\n return result\n\n\ndef _multi_dot_three(A, B, C):\n \"\"\"\n Find the best order for three arrays and do the multiplication.\n\n For three arguments `_multi_dot_three` is approximately 15 times faster\n than `_multi_dot_matrix_chain_order`\n\n \"\"\"\n a0, a1b0 = A.shape\n b1c0, c1 = C.shape\n # cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1\n cost1 = a0 * b1c0 * (a1b0 + c1)\n # cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1\n cost2 = a1b0 * c1 * (a0 + b1c0)\n\n if cost1 < cost2:\n return dot(dot(A, B), C)\n else:\n return dot(A, dot(B, C))\n\n\ndef _multi_dot_matrix_chain_order(arrays, return_costs=False):\n \"\"\"\n Return a np.array that encodes the optimal order of mutiplications.\n\n The optimal order array is then used by `_multi_dot()` to do the\n multiplication.\n\n Also return the cost matrix if `return_costs` is `True`\n\n The implementation CLOSELY follows Cormen, \"Introduction to Algorithms\",\n Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.\n\n cost[i, j] = min([\n cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)\n for k in range(i, j)])\n\n \"\"\"\n n = len(arrays)\n # p stores the dimensions of the matrices\n # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]\n p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]\n # m is a matrix of costs of the subproblems\n # m[i,j]: min number of scalar multiplications needed to compute A_{i..j}\n m = zeros((n, n), dtype=double)\n # s is the actual ordering\n # s[i, j] is the value of k at which we split the product A_i..A_j\n s = empty((n, n), dtype=intp)\n\n for l in range(1, n):\n for i in range(n - l):\n j = i + l\n m[i, j] = Inf\n for k in range(i, j):\n q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]\n if q < m[i, j]:\n m[i, j] = q\n s[i, j] = k # Note that Cormen uses 1-based index\n\n return (s, m) if return_costs else s\n\n\ndef _multi_dot(arrays, order, i, j):\n \"\"\"Actually do the multiplication with the given order.\"\"\"\n if i == j:\n return arrays[i]\n else:\n return dot(_multi_dot(arrays, order, i, order[i, j]),\n _multi_dot(arrays, order, order[i, j] + 1, j))\n",
"\"\"\"Functions copypasted from newer versions of numpy.\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport warnings\nimport sys\n\nimport numpy as np\nfrom numpy.testing.nosetester import import_nose\n\nfrom scipy._lib._version import NumpyVersion\n\n\nif NumpyVersion(np.__version__) > '1.7.0.dev':\n _assert_warns = np.testing.assert_warns\nelse:\n def _assert_warns(warning_class, func, *args, **kw):\n r\"\"\"\n Fail unless the given callable throws the specified warning.\n\n This definition is copypasted from numpy 1.9.0.dev.\n The version in earlier numpy returns None.\n\n Parameters\n ----------\n warning_class : class\n The class defining the warning that `func` is expected to throw.\n func : callable\n The callable to test.\n *args : Arguments\n Arguments passed to `func`.\n **kwargs : Kwargs\n Keyword arguments passed to `func`.\n\n Returns\n -------\n The value returned by `func`.\n\n \"\"\"\n with warnings.catch_warnings(record=True) as l:\n warnings.simplefilter('always')\n result = func(*args, **kw)\n if not len(l) > 0:\n raise AssertionError(\"No warning raised when calling %s\"\n % func.__name__)\n if not l[0].category is warning_class:\n raise AssertionError(\"First warning for %s is not a \"\n \"%s( is %s)\" % (func.__name__, warning_class, l[0]))\n return result\n\n\ndef assert_raises_regex(exception_class, expected_regexp,\n callable_obj=None, *args, **kwargs):\n \"\"\"\n Fail unless an exception of class exception_class and with message that\n matches expected_regexp is thrown by callable when invoked with arguments\n args and keyword arguments kwargs.\n Name of this function adheres to Python 3.2+ reference, but should work in\n all versions down to 2.6.\n Notes\n -----\n .. versionadded:: 1.8.0\n \"\"\"\n __tracebackhide__ = True # Hide traceback for py.test\n nose = import_nose()\n\n if sys.version_info.major >= 3:\n funcname = nose.tools.assert_raises_regex\n else:\n # Only present in Python 2.7, missing from unittest in 2.6\n funcname = nose.tools.assert_raises_regexp\n\n return funcname(exception_class, expected_regexp, callable_obj,\n *args, **kwargs)\n\n\nif NumpyVersion(np.__version__) >= '1.10.0':\n from numpy import broadcast_to\nelse:\n # Definition of `broadcast_to` from numpy 1.10.0.\n\n def _maybe_view_as_subclass(original_array, new_array):\n if type(original_array) is not type(new_array):\n # if input was an ndarray subclass and subclasses were OK,\n # then view the result as that subclass.\n new_array = new_array.view(type=type(original_array))\n # Since we have done something akin to a view from original_array, we\n # should let the subclass finalize (if it has it implemented, i.e., is\n # not None).\n if new_array.__array_finalize__:\n new_array.__array_finalize__(original_array)\n return new_array\n\n def _broadcast_to(array, shape, subok, readonly):\n shape = tuple(shape) if np.iterable(shape) else (shape,)\n array = np.array(array, copy=False, subok=subok)\n if not shape and array.shape:\n raise ValueError('cannot broadcast a non-scalar to a scalar array')\n if any(size < 0 for size in shape):\n raise ValueError('all elements of broadcast shape must be non-'\n 'negative')\n broadcast = np.nditer(\n (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'],\n op_flags=['readonly'], itershape=shape, order='C').itviews[0]\n result = _maybe_view_as_subclass(array, broadcast)\n if not readonly and array.flags.writeable:\n result.flags.writeable = True\n return result\n\n def broadcast_to(array, shape, subok=False):\n return _broadcast_to(array, shape, subok=subok, readonly=True)\n\n\nif NumpyVersion(np.__version__) >= '1.9.0':\n from numpy import unique\nelse:\n # the return_counts keyword was added in 1.9.0\n def unique(ar, return_index=False, return_inverse=False, return_counts=False):\n \"\"\"\n Find the unique elements of an array.\n\n Returns the sorted unique elements of an array. There are three optional\n outputs in addition to the unique elements: the indices of the input array\n that give the unique values, the indices of the unique array that\n reconstruct the input array, and the number of times each unique value\n comes up in the input array.\n\n Parameters\n ----------\n ar : array_like\n Input array. This will be flattened if it is not already 1-D.\n return_index : bool, optional\n If True, also return the indices of `ar` that result in the unique\n array.\n return_inverse : bool, optional\n If True, also return the indices of the unique array that can be used\n to reconstruct `ar`.\n return_counts : bool, optional\n If True, also return the number of times each unique value comes up\n in `ar`.\n\n .. versionadded:: 1.9.0\n\n Returns\n -------\n unique : ndarray\n The sorted unique values.\n unique_indices : ndarray, optional\n The indices of the first occurrences of the unique values in the\n (flattened) original array. Only provided if `return_index` is True.\n unique_inverse : ndarray, optional\n The indices to reconstruct the (flattened) original array from the\n unique array. Only provided if `return_inverse` is True.\n unique_counts : ndarray, optional\n The number of times each of the unique values comes up in the\n original array. Only provided if `return_counts` is True.\n\n .. versionadded:: 1.9.0\n\n Notes\n -----\n Taken over from numpy 1.12.0-dev (c8408bf9c). Omitted examples,\n see numpy documentation for those.\n\n \"\"\"\n ar = np.asanyarray(ar).flatten()\n\n optional_indices = return_index or return_inverse\n optional_returns = optional_indices or return_counts\n\n if ar.size == 0:\n if not optional_returns:\n ret = ar\n else:\n ret = (ar,)\n if return_index:\n ret += (np.empty(0, np.bool),)\n if return_inverse:\n ret += (np.empty(0, np.bool),)\n if return_counts:\n ret += (np.empty(0, np.intp),)\n return ret\n\n if optional_indices:\n perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')\n aux = ar[perm]\n else:\n ar.sort()\n aux = ar\n flag = np.concatenate(([True], aux[1:] != aux[:-1]))\n\n if not optional_returns:\n ret = aux[flag]\n else:\n ret = (aux[flag],)\n if return_index:\n ret += (perm[flag],)\n if return_inverse:\n iflag = np.cumsum(flag) - 1\n inv_idx = np.empty(ar.shape, dtype=np.intp)\n inv_idx[perm] = iflag\n ret += (inv_idx,)\n if return_counts:\n idx = np.concatenate(np.nonzero(flag) + ([ar.size],))\n ret += (np.diff(idx),)\n return ret\n\n\nif NumpyVersion(np.__version__) > '1.12.0.dev':\n polyvalfromroots = np.polynomial.polynomial.polyvalfromroots\nelse:\n def polyvalfromroots(x, r, tensor=True):\n \"\"\"\n Evaluate a polynomial specified by its roots at points x.\n\n This function is copypasted from numpy 1.12.0.dev.\n\n If `r` is of length `N`, this function returns the value\n\n .. math:: p(x) = \\prod_{n=1}^{N} (x - r_n)\n\n The parameter `x` is converted to an array only if it is a tuple or a\n list, otherwise it is treated as a scalar. In either case, either `x`\n or its elements must support multiplication and addition both with\n themselves and with the elements of `r`.\n\n If `r` is a 1-D array, then `p(x)` will have the same shape as `x`. If\n `r` is multidimensional, then the shape of the result depends on the\n value of `tensor`. If `tensor is ``True`` the shape will be r.shape[1:]\n + x.shape; that is, each polynomial is evaluated at every value of `x`.\n If `tensor` is ``False``, the shape will be r.shape[1:]; that is, each\n polynomial is evaluated only for the corresponding broadcast value of\n `x`. Note that scalars have shape (,).\n\n Parameters\n ----------\n x : array_like, compatible object\n If `x` is a list or tuple, it is converted to an ndarray, otherwise\n it is left unchanged and treated as a scalar. In either case, `x`\n or its elements must support addition and multiplication with with\n themselves and with the elements of `r`.\n r : array_like\n Array of roots. If `r` is multidimensional the first index is the\n root index, while the remaining indices enumerate multiple\n polynomials. For instance, in the two dimensional case the roots of\n each polynomial may be thought of as stored in the columns of `r`.\n tensor : boolean, optional\n If True, the shape of the roots array is extended with ones on the\n right, one for each dimension of `x`. Scalars have dimension 0 for\n this action. The result is that every column of coefficients in `r`\n is evaluated for every element of `x`. If False, `x` is broadcast\n over the columns of `r` for the evaluation. This keyword is useful\n when `r` is multidimensional. The default value is True.\n\n Returns\n -------\n values : ndarray, compatible object\n The shape of the returned array is described above.\n\n See Also\n --------\n polyroots, polyfromroots, polyval\n\n Examples\n --------\n >>> from numpy.polynomial.polynomial import polyvalfromroots\n >>> polyvalfromroots(1, [1,2,3])\n 0.0\n >>> a = np.arange(4).reshape(2,2)\n >>> a\n array([[0, 1],\n [2, 3]])\n >>> polyvalfromroots(a, [-1, 0, 1])\n array([[ -0., 0.],\n [ 6., 24.]])\n >>> r = np.arange(-2, 2).reshape(2,2) # multidimensional coefficients\n >>> r # each column of r defines one polynomial\n array([[-2, -1],\n [ 0, 1]])\n >>> b = [-2, 1]\n >>> polyvalfromroots(b, r, tensor=True)\n array([[-0., 3.],\n [ 3., 0.]])\n >>> polyvalfromroots(b, r, tensor=False)\n array([-0., 0.])\n \"\"\"\n r = np.array(r, ndmin=1, copy=0)\n if r.dtype.char in '?bBhHiIlLqQpP':\n r = r.astype(np.double)\n if isinstance(x, (tuple, list)):\n x = np.asarray(x)\n if isinstance(x, np.ndarray):\n if tensor:\n r = r.reshape(r.shape + (1,)*x.ndim)\n elif x.ndim >= r.ndim:\n raise ValueError(\"x.ndim must be < r.ndim when tensor == \"\n \"False\")\n return np.prod(x - r, axis=0)\n",
"#!/usr/bin/python\n\"\"\"\ntakes templated file .xxx.src and produces .xxx file where .xxx is\n.i or .c or .h, using the following template rules\n\n/**begin repeat -- on a line by itself marks the start of a repeated code\n segment\n/**end repeat**/ -- on a line by itself marks it's end\n\nAfter the /**begin repeat and before the */, all the named templates are placed\nthese should all have the same number of replacements\n\nRepeat blocks can be nested, with each nested block labeled with its depth,\ni.e.\n/**begin repeat1\n *....\n */\n/**end repeat1**/\n\nWhen using nested loops, you can optionally exclude particular\ncombinations of the variables using (inside the comment portion of the inner loop):\n\n :exclude: var1=value1, var2=value2, ...\n\nThis will exclude the pattern where var1 is value1 and var2 is value2 when\nthe result is being generated.\n\n\nIn the main body each replace will use one entry from the list of named replacements\n\n Note that all #..# forms in a block must have the same number of\n comma-separated entries.\n\nExample:\n\n An input file containing\n\n /**begin repeat\n * #a = 1,2,3#\n * #b = 1,2,3#\n */\n\n /**begin repeat1\n * #c = ted, jim#\n */\n @a@, @b@, @c@\n /**end repeat1**/\n\n /**end repeat**/\n\n produces\n\n line 1 \"template.c.src\"\n\n /*\n *********************************************************************\n ** This file was autogenerated from a template DO NOT EDIT!!**\n ** Changes should be made to the original source (.src) file **\n *********************************************************************\n */\n\n #line 9\n 1, 1, ted\n\n #line 9\n 1, 1, jim\n\n #line 9\n 2, 2, ted\n\n #line 9\n 2, 2, jim\n\n #line 9\n 3, 3, ted\n\n #line 9\n 3, 3, jim\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\n\n__all__ = ['process_str', 'process_file']\n\nimport os\nimport sys\nimport re\n\nfrom numpy.distutils.compat import get_exception\n\n# names for replacement that are already global.\nglobal_names = {}\n\n# header placed at the front of head processed file\nheader =\\\n\"\"\"\n/*\n *****************************************************************************\n ** This file was autogenerated from a template DO NOT EDIT!!!! **\n ** Changes should be made to the original source (.src) file **\n *****************************************************************************\n */\n\n\"\"\"\n# Parse string for repeat loops\ndef parse_structure(astr, level):\n \"\"\"\n The returned line number is from the beginning of the string, starting\n at zero. Returns an empty list if no loops found.\n\n \"\"\"\n if level == 0 :\n loopbeg = \"/**begin repeat\"\n loopend = \"/**end repeat**/\"\n else :\n loopbeg = \"/**begin repeat%d\" % level\n loopend = \"/**end repeat%d**/\" % level\n\n ind = 0\n line = 0\n spanlist = []\n while True:\n start = astr.find(loopbeg, ind)\n if start == -1:\n break\n start2 = astr.find(\"*/\", start)\n start2 = astr.find(\"\\n\", start2)\n fini1 = astr.find(loopend, start2)\n fini2 = astr.find(\"\\n\", fini1)\n line += astr.count(\"\\n\", ind, start2+1)\n spanlist.append((start, start2+1, fini1, fini2+1, line))\n line += astr.count(\"\\n\", start2+1, fini2)\n ind = fini2\n spanlist.sort()\n return spanlist\n\n\ndef paren_repl(obj):\n torep = obj.group(1)\n numrep = obj.group(2)\n return ','.join([torep]*int(numrep))\n\nparenrep = re.compile(r\"[(]([^)]*)[)]\\*(\\d+)\")\nplainrep = re.compile(r\"([^*]+)\\*(\\d+)\")\ndef parse_values(astr):\n # replaces all occurrences of '(a,b,c)*4' in astr\n # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate\n # empty values, i.e., ()*4 yields ',,,'. The result is\n # split at ',' and a list of values returned.\n astr = parenrep.sub(paren_repl, astr)\n # replaces occurrences of xxx*3 with xxx, xxx, xxx\n astr = ','.join([plainrep.sub(paren_repl, x.strip())\n for x in astr.split(',')])\n return astr.split(',')\n\n\nstripast = re.compile(r\"\\n\\s*\\*?\")\nnamed_re = re.compile(r\"#\\s*(\\w*)\\s*=([^#]*)#\")\nexclude_vars_re = re.compile(r\"(\\w*)=(\\w*)\")\nexclude_re = re.compile(\":exclude:\")\ndef parse_loop_header(loophead) :\n \"\"\"Find all named replacements in the header\n\n Returns a list of dictionaries, one for each loop iteration,\n where each key is a name to be substituted and the corresponding\n value is the replacement string.\n\n Also return a list of exclusions. The exclusions are dictionaries\n of key value pairs. There can be more than one exclusion.\n [{'var1':'value1', 'var2', 'value2'[,...]}, ...]\n\n \"\"\"\n # Strip out '\\n' and leading '*', if any, in continuation lines.\n # This should not effect code previous to this change as\n # continuation lines were not allowed.\n loophead = stripast.sub(\"\", loophead)\n # parse out the names and lists of values\n names = []\n reps = named_re.findall(loophead)\n nsub = None\n for rep in reps:\n name = rep[0]\n vals = parse_values(rep[1])\n size = len(vals)\n if nsub is None :\n nsub = size\n elif nsub != size :\n msg = \"Mismatch in number of values:\\n%s = %s\" % (name, vals)\n raise ValueError(msg)\n names.append((name, vals))\n\n\n # Find any exclude variables\n excludes = []\n\n for obj in exclude_re.finditer(loophead):\n span = obj.span()\n # find next newline\n endline = loophead.find('\\n', span[1])\n substr = loophead[span[1]:endline]\n ex_names = exclude_vars_re.findall(substr)\n excludes.append(dict(ex_names))\n\n # generate list of dictionaries, one for each template iteration\n dlist = []\n if nsub is None :\n raise ValueError(\"No substitution variables found\")\n for i in range(nsub) :\n tmp = {}\n for name, vals in names :\n tmp[name] = vals[i]\n dlist.append(tmp)\n return dlist\n\nreplace_re = re.compile(r\"@([\\w]+)@\")\ndef parse_string(astr, env, level, line) :\n lineno = \"#line %d\\n\" % line\n\n # local function for string replacement, uses env\n def replace(match):\n name = match.group(1)\n try :\n val = env[name]\n except KeyError:\n msg = 'line %d: no definition of key \"%s\"'%(line, name)\n raise ValueError(msg)\n return val\n\n code = [lineno]\n struct = parse_structure(astr, level)\n if struct :\n # recurse over inner loops\n oldend = 0\n newlevel = level + 1\n for sub in struct:\n pref = astr[oldend:sub[0]]\n head = astr[sub[0]:sub[1]]\n text = astr[sub[1]:sub[2]]\n oldend = sub[3]\n newline = line + sub[4]\n code.append(replace_re.sub(replace, pref))\n try :\n envlist = parse_loop_header(head)\n except ValueError:\n e = get_exception()\n msg = \"line %d: %s\" % (newline, e)\n raise ValueError(msg)\n for newenv in envlist :\n newenv.update(env)\n newcode = parse_string(text, newenv, newlevel, newline)\n code.extend(newcode)\n suff = astr[oldend:]\n code.append(replace_re.sub(replace, suff))\n else :\n # replace keys\n code.append(replace_re.sub(replace, astr))\n code.append('\\n')\n return ''.join(code)\n\ndef process_str(astr):\n code = [header]\n code.extend(parse_string(astr, global_names, 0, 1))\n return ''.join(code)\n\n\ninclude_src_re = re.compile(r\"(\\n|\\A)#include\\s*['\\\"]\"\n r\"(?P<name>[\\w\\d./\\\\]+[.]src)['\\\"]\", re.I)\n\ndef resolve_includes(source):\n d = os.path.dirname(source)\n fid = open(source)\n lines = []\n for line in fid:\n m = include_src_re.match(line)\n if m:\n fn = m.group('name')\n if not os.path.isabs(fn):\n fn = os.path.join(d, fn)\n if os.path.isfile(fn):\n print('Including file', fn)\n lines.extend(resolve_includes(fn))\n else:\n lines.append(line)\n else:\n lines.append(line)\n fid.close()\n return lines\n\ndef process_file(source):\n lines = resolve_includes(source)\n sourcefile = os.path.normcase(source).replace(\"\\\\\", \"\\\\\\\\\")\n try:\n code = process_str(''.join(lines))\n except ValueError:\n e = get_exception()\n raise ValueError('In \"%s\" loop at %s' % (sourcefile, e))\n return '#line 1 \"%s\"\\n%s' % (sourcefile, code)\n\n\ndef unique_key(adict):\n # this obtains a unique key given a dictionary\n # currently it works by appending together n of the letters of the\n # current keys and increasing n until a unique key is found\n # -- not particularly quick\n allkeys = list(adict.keys())\n done = False\n n = 1\n while not done:\n newkey = \"\".join([x[:n] for x in allkeys])\n if newkey in allkeys:\n n += 1\n else:\n done = True\n return newkey\n\n\nif __name__ == \"__main__\":\n\n try:\n file = sys.argv[1]\n except IndexError:\n fid = sys.stdin\n outfile = sys.stdout\n else:\n fid = open(file, 'r')\n (base, ext) = os.path.splitext(file)\n newname = base\n outfile = open(newname, 'w')\n\n allstr = fid.read()\n try:\n writestr = process_str(allstr)\n except ValueError:\n e = get_exception()\n raise ValueError(\"In %s loop at %s\" % (file, e))\n outfile.write(writestr)\n",
"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.testing.decorators import cleanup\n\n\n@cleanup\ndef test_stem_remove():\n ax = plt.gca()\n st = ax.stem([1, 2], [1, 2])\n st.remove()\n",
"# Author: Gael Varoquaux <[email protected]>\n# License: BSD 3 clause\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom sklearn.utils.graph import graph_laplacian\n\n\ndef test_graph_laplacian():\n for mat in (np.arange(10) * np.arange(10)[:, np.newaxis],\n np.ones((7, 7)),\n np.eye(19),\n np.vander(np.arange(4)) + np.vander(np.arange(4)).T,):\n sp_mat = sparse.csr_matrix(mat)\n for normed in (True, False):\n laplacian = graph_laplacian(mat, normed=normed)\n n_nodes = mat.shape[0]\n if not normed:\n np.testing.assert_array_almost_equal(laplacian.sum(axis=0),\n np.zeros(n_nodes))\n np.testing.assert_array_almost_equal(laplacian.T, laplacian)\n np.testing.assert_array_almost_equal(\n laplacian, graph_laplacian(sp_mat, normed=normed).toarray())\n",
"\"\"\"Univariate features selection.\"\"\"\n\n# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.\n# L. Buitinck, A. Joly\n# License: BSD 3 clause\n\n\nimport numpy as np\nimport warnings\n\nfrom scipy import special, stats\nfrom scipy.sparse import issparse\n\nfrom ..base import BaseEstimator\nfrom ..preprocessing import LabelBinarizer\nfrom ..utils import (as_float_array, check_array, check_X_y, safe_sqr,\n safe_mask)\nfrom ..utils.extmath import norm, safe_sparse_dot, row_norms\nfrom ..utils.validation import check_is_fitted\nfrom .base import SelectorMixin\n\n\ndef _clean_nans(scores):\n \"\"\"\n Fixes Issue #1240: NaNs can't be properly compared, so change them to the\n smallest value of scores's dtype. -inf seems to be unreliable.\n \"\"\"\n # XXX where should this function be called? fit? scoring functions\n # themselves?\n scores = as_float_array(scores, copy=True)\n scores[np.isnan(scores)] = np.finfo(scores.dtype).min\n return scores\n\n\n######################################################################\n# Scoring functions\n\n\n# The following function is a rewriting of scipy.stats.f_oneway\n# Contrary to the scipy.stats.f_oneway implementation it does not\n# copy the data while keeping the inputs unchanged.\ndef f_oneway(*args):\n \"\"\"Performs a 1-way ANOVA.\n\n The one-way ANOVA tests the null hypothesis that 2 or more groups have\n the same population mean. The test is applied to samples from two or\n more groups, possibly with differing sizes.\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n sample1, sample2, ... : array_like, sparse matrices\n The sample measurements should be given as arguments.\n\n Returns\n -------\n F-value : float\n The computed F-value of the test.\n p-value : float\n The associated p-value from the F-distribution.\n\n Notes\n -----\n The ANOVA test has important assumptions that must be satisfied in order\n for the associated p-value to be valid.\n\n 1. The samples are independent\n 2. Each sample is from a normally distributed population\n 3. The population standard deviations of the groups are all equal. This\n property is known as homoscedasticity.\n\n If these assumptions are not true for a given set of data, it may still be\n possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although\n with some loss of power.\n\n The algorithm is from Heiman[2], pp.394-7.\n\n See ``scipy.stats.f_oneway`` that should give the same results while\n being less efficient.\n\n References\n ----------\n\n .. [1] Lowry, Richard. \"Concepts and Applications of Inferential\n Statistics\". Chapter 14.\n http://faculty.vassar.edu/lowry/ch14pt1.html\n\n .. [2] Heiman, G.W. Research Methods in Statistics. 2002.\n\n \"\"\"\n n_classes = len(args)\n args = [as_float_array(a) for a in args]\n n_samples_per_class = np.array([a.shape[0] for a in args])\n n_samples = np.sum(n_samples_per_class)\n ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)\n sums_args = [np.asarray(a.sum(axis=0)) for a in args]\n square_of_sums_alldata = sum(sums_args) ** 2\n square_of_sums_args = [s ** 2 for s in sums_args]\n sstot = ss_alldata - square_of_sums_alldata / float(n_samples)\n ssbn = 0.\n for k, _ in enumerate(args):\n ssbn += square_of_sums_args[k] / n_samples_per_class[k]\n ssbn -= square_of_sums_alldata / float(n_samples)\n sswn = sstot - ssbn\n dfbn = n_classes - 1\n dfwn = n_samples - n_classes\n msb = ssbn / float(dfbn)\n msw = sswn / float(dfwn)\n constant_features_idx = np.where(msw == 0.)[0]\n if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):\n warnings.warn(\"Features %s are constant.\" % constant_features_idx,\n UserWarning)\n f = msb / msw\n # flatten matrix to vector in sparse case\n f = np.asarray(f).ravel()\n prob = special.fdtrc(dfbn, dfwn, f)\n return f, prob\n\n\ndef f_classif(X, y):\n \"\"\"Compute the ANOVA F-value for the provided sample.\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} shape = [n_samples, n_features]\n The set of regressors that will be tested sequentially.\n\n y : array of shape(n_samples)\n The data matrix.\n\n Returns\n -------\n F : array, shape = [n_features,]\n The set of F values.\n\n pval : array, shape = [n_features,]\n The set of p-values.\n\n See also\n --------\n chi2: Chi-squared stats of non-negative features for classification tasks.\n f_regression: F-value between label/feature for regression tasks.\n \"\"\"\n X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])\n args = [X[safe_mask(X, y == k)] for k in np.unique(y)]\n return f_oneway(*args)\n\n\ndef _chisquare(f_obs, f_exp):\n \"\"\"Fast replacement for scipy.stats.chisquare.\n\n Version from https://github.com/scipy/scipy/pull/2525 with additional\n optimizations.\n \"\"\"\n f_obs = np.asarray(f_obs, dtype=np.float64)\n\n k = len(f_obs)\n # Reuse f_obs for chi-squared statistics\n chisq = f_obs\n chisq -= f_exp\n chisq **= 2\n with np.errstate(invalid=\"ignore\"):\n chisq /= f_exp\n chisq = chisq.sum(axis=0)\n return chisq, special.chdtrc(k - 1, chisq)\n\n\ndef chi2(X, y):\n \"\"\"Compute chi-squared stats between each non-negative feature and class.\n\n This score can be used to select the n_features features with the\n highest values for the test chi-squared statistic from X, which must\n contain only non-negative features such as booleans or frequencies\n (e.g., term counts in document classification), relative to the classes.\n\n Recall that the chi-square test measures dependence between stochastic\n variables, so using this function \"weeds out\" the features that are the\n most likely to be independent of class and therefore irrelevant for\n classification.\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = (n_samples, n_features_in)\n Sample vectors.\n\n y : array-like, shape = (n_samples,)\n Target vector (class labels).\n\n Returns\n -------\n chi2 : array, shape = (n_features,)\n chi2 statistics of each feature.\n pval : array, shape = (n_features,)\n p-values of each feature.\n\n Notes\n -----\n Complexity of this algorithm is O(n_classes * n_features).\n\n See also\n --------\n f_classif: ANOVA F-value between label/feature for classification tasks.\n f_regression: F-value between label/feature for regression tasks.\n \"\"\"\n\n # XXX: we might want to do some of the following in logspace instead for\n # numerical stability.\n X = check_array(X, accept_sparse='csr')\n if np.any((X.data if issparse(X) else X) < 0):\n raise ValueError(\"Input X must be non-negative.\")\n\n Y = LabelBinarizer().fit_transform(y)\n if Y.shape[1] == 1:\n Y = np.append(1 - Y, Y, axis=1)\n\n observed = safe_sparse_dot(Y.T, X) # n_classes * n_features\n\n feature_count = X.sum(axis=0).reshape(1, -1)\n class_prob = Y.mean(axis=0).reshape(1, -1)\n expected = np.dot(class_prob.T, feature_count)\n\n return _chisquare(observed, expected)\n\n\ndef f_regression(X, y, center=True):\n \"\"\"Univariate linear regression tests.\n\n Quick linear model for testing the effect of a single regressor,\n sequentially for many regressors.\n\n This is done in 2 steps:\n\n 1. The cross correlation between each regressor and the target is computed,\n that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) *\n std(y)).\n 2. It is converted to an F score then to a p-value.\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} shape = (n_samples, n_features)\n The set of regressors that will be tested sequentially.\n\n y : array of shape(n_samples).\n The data matrix\n\n center : True, bool,\n If true, X and y will be centered.\n\n Returns\n -------\n F : array, shape=(n_features,)\n F values of features.\n\n pval : array, shape=(n_features,)\n p-values of F-scores.\n\n See also\n --------\n f_classif: ANOVA F-value between label/feature for classification tasks.\n chi2: Chi-squared stats of non-negative features for classification tasks.\n \"\"\"\n if issparse(X) and center:\n raise ValueError(\"center=True only allowed for dense data\")\n X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64)\n if center:\n y = y - np.mean(y)\n X = X.copy('F') # faster in fortran\n X -= X.mean(axis=0)\n\n # compute the correlation\n corr = safe_sparse_dot(y, X)\n corr /= row_norms(X.T)\n corr /= norm(y)\n\n # convert to p-value\n degrees_of_freedom = y.size - (2 if center else 1)\n F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom\n pv = stats.f.sf(F, 1, degrees_of_freedom)\n return F, pv\n\n\n######################################################################\n# Base classes\n\nclass _BaseFilter(BaseEstimator, SelectorMixin):\n \"\"\"Initialize the univariate feature selection.\n\n Parameters\n ----------\n score_func : callable\n Function taking two arrays X and y, and returning a pair of arrays\n (scores, pvalues) or a single array with scores.\n \"\"\"\n\n def __init__(self, score_func):\n self.score_func = score_func\n\n def fit(self, X, y):\n \"\"\"Run score function on (X, y) and get the appropriate features.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n The training input samples.\n\n y : array-like, shape = [n_samples]\n The target values (class labels in classification, real numbers in\n regression).\n\n Returns\n -------\n self : object\n Returns self.\n \"\"\"\n X, y = check_X_y(X, y, ['csr', 'csc'], multi_output=True)\n\n if not callable(self.score_func):\n raise TypeError(\"The score function should be a callable, %s (%s) \"\n \"was passed.\"\n % (self.score_func, type(self.score_func)))\n\n self._check_params(X, y)\n score_func_ret = self.score_func(X, y)\n if isinstance(score_func_ret, (list, tuple)):\n self.scores_, self.pvalues_ = score_func_ret\n self.pvalues_ = np.asarray(self.pvalues_)\n else:\n self.scores_ = score_func_ret\n self.pvalues_ = None\n\n self.scores_ = np.asarray(self.scores_)\n\n return self\n\n def _check_params(self, X, y):\n pass\n\n\n######################################################################\n# Specific filters\n######################################################################\nclass SelectPercentile(_BaseFilter):\n \"\"\"Select features according to a percentile of the highest scores.\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n score_func : callable\n Function taking two arrays X and y, and returning a pair of arrays\n (scores, pvalues) or a single array with scores.\n Default is f_classif (see below \"See also\"). The default function only\n works with classification tasks.\n\n percentile : int, optional, default=10\n Percent of features to keep.\n\n Attributes\n ----------\n scores_ : array-like, shape=(n_features,)\n Scores of features.\n\n pvalues_ : array-like, shape=(n_features,)\n p-values of feature scores, None if `score_func` returned only scores.\n\n Notes\n -----\n Ties between features with equal scores will be broken in an unspecified\n way.\n\n See also\n --------\n f_classif: ANOVA F-value between label/feature for classification tasks.\n mutual_info_classif: Mutual information for a discrete target.\n chi2: Chi-squared stats of non-negative features for classification tasks.\n f_regression: F-value between label/feature for regression tasks.\n mutual_info_regression: Mutual information for a continuous target.\n SelectKBest: Select features based on the k highest scores.\n SelectFpr: Select features based on a false positive rate test.\n SelectFdr: Select features based on an estimated false discovery rate.\n SelectFwe: Select features based on family-wise error rate.\n GenericUnivariateSelect: Univariate feature selector with configurable mode.\n \"\"\"\n\n def __init__(self, score_func=f_classif, percentile=10):\n super(SelectPercentile, self).__init__(score_func)\n self.percentile = percentile\n\n def _check_params(self, X, y):\n if not 0 <= self.percentile <= 100:\n raise ValueError(\"percentile should be >=0, <=100; got %r\"\n % self.percentile)\n\n def _get_support_mask(self):\n check_is_fitted(self, 'scores_')\n\n # Cater for NaNs\n if self.percentile == 100:\n return np.ones(len(self.scores_), dtype=np.bool)\n elif self.percentile == 0:\n return np.zeros(len(self.scores_), dtype=np.bool)\n\n scores = _clean_nans(self.scores_)\n treshold = stats.scoreatpercentile(scores,\n 100 - self.percentile)\n mask = scores > treshold\n ties = np.where(scores == treshold)[0]\n if len(ties):\n max_feats = int(len(scores) * self.percentile / 100)\n kept_ties = ties[:max_feats - mask.sum()]\n mask[kept_ties] = True\n return mask\n\n\nclass SelectKBest(_BaseFilter):\n \"\"\"Select features according to the k highest scores.\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n score_func : callable\n Function taking two arrays X and y, and returning a pair of arrays\n (scores, pvalues) or a single array with scores.\n Default is f_classif (see below \"See also\"). The default function only\n works with classification tasks.\n\n k : int or \"all\", optional, default=10\n Number of top features to select.\n The \"all\" option bypasses selection, for use in a parameter search.\n\n Attributes\n ----------\n scores_ : array-like, shape=(n_features,)\n Scores of features.\n\n pvalues_ : array-like, shape=(n_features,)\n p-values of feature scores, None if `score_func` returned only scores.\n\n Notes\n -----\n Ties between features with equal scores will be broken in an unspecified\n way.\n\n See also\n --------\n f_classif: ANOVA F-value between label/feature for classification tasks.\n mutual_info_classif: Mutual information for a discrete target.\n chi2: Chi-squared stats of non-negative features for classification tasks.\n f_regression: F-value between label/feature for regression tasks.\n mutual_info_regression: Mutual information for a continious target.\n SelectPercentile: Select features based on percentile of the highest scores.\n SelectFpr: Select features based on a false positive rate test.\n SelectFdr: Select features based on an estimated false discovery rate.\n SelectFwe: Select features based on family-wise error rate.\n GenericUnivariateSelect: Univariate feature selector with configurable mode.\n \"\"\"\n\n def __init__(self, score_func=f_classif, k=10):\n super(SelectKBest, self).__init__(score_func)\n self.k = k\n\n def _check_params(self, X, y):\n if not (self.k == \"all\" or 0 <= self.k <= X.shape[1]):\n raise ValueError(\"k should be >=0, <= n_features; got %r.\"\n \"Use k='all' to return all features.\"\n % self.k)\n\n def _get_support_mask(self):\n check_is_fitted(self, 'scores_')\n\n if self.k == 'all':\n return np.ones(self.scores_.shape, dtype=bool)\n elif self.k == 0:\n return np.zeros(self.scores_.shape, dtype=bool)\n else:\n scores = _clean_nans(self.scores_)\n mask = np.zeros(scores.shape, dtype=bool)\n\n # Request a stable sort. Mergesort takes more memory (~40MB per\n # megafeature on x86-64).\n mask[np.argsort(scores, kind=\"mergesort\")[-self.k:]] = 1\n return mask\n\n\nclass SelectFpr(_BaseFilter):\n \"\"\"Filter: Select the pvalues below alpha based on a FPR test.\n\n FPR test stands for False Positive Rate test. It controls the total\n amount of false detections.\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n score_func : callable\n Function taking two arrays X and y, and returning a pair of arrays\n (scores, pvalues).\n Default is f_classif (see below \"See also\"). The default function only\n works with classification tasks.\n\n alpha : float, optional\n The highest p-value for features to be kept.\n\n Attributes\n ----------\n scores_ : array-like, shape=(n_features,)\n Scores of features.\n\n pvalues_ : array-like, shape=(n_features,)\n p-values of feature scores.\n\n See also\n --------\n f_classif: ANOVA F-value between label/feature for classification tasks.\n chi2: Chi-squared stats of non-negative features for classification tasks.\n mutual_info_classif:\n f_regression: F-value between label/feature for regression tasks.\n mutual_info_regression: Mutual information between features and the target.\n SelectPercentile: Select features based on percentile of the highest scores.\n SelectKBest: Select features based on the k highest scores.\n SelectFdr: Select features based on an estimated false discovery rate.\n SelectFwe: Select features based on family-wise error rate.\n GenericUnivariateSelect: Univariate feature selector with configurable mode.\n \"\"\"\n\n def __init__(self, score_func=f_classif, alpha=5e-2):\n super(SelectFpr, self).__init__(score_func)\n self.alpha = alpha\n\n def _get_support_mask(self):\n check_is_fitted(self, 'scores_')\n\n return self.pvalues_ < self.alpha\n\n\nclass SelectFdr(_BaseFilter):\n \"\"\"Filter: Select the p-values for an estimated false discovery rate\n\n This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound\n on the expected false discovery rate.\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n score_func : callable\n Function taking two arrays X and y, and returning a pair of arrays\n (scores, pvalues).\n Default is f_classif (see below \"See also\"). The default function only\n works with classification tasks.\n\n alpha : float, optional\n The highest uncorrected p-value for features to keep.\n\n\n Attributes\n ----------\n scores_ : array-like, shape=(n_features,)\n Scores of features.\n\n pvalues_ : array-like, shape=(n_features,)\n p-values of feature scores.\n\n References\n ----------\n https://en.wikipedia.org/wiki/False_discovery_rate\n\n See also\n --------\n f_classif: ANOVA F-value between label/feature for classification tasks.\n mutual_info_classif: Mutual information for a discrete target.\n chi2: Chi-squared stats of non-negative features for classification tasks.\n f_regression: F-value between label/feature for regression tasks.\n mutual_info_regression: Mutual information for a contnuous target.\n SelectPercentile: Select features based on percentile of the highest scores.\n SelectKBest: Select features based on the k highest scores.\n SelectFpr: Select features based on a false positive rate test.\n SelectFwe: Select features based on family-wise error rate.\n GenericUnivariateSelect: Univariate feature selector with configurable mode.\n \"\"\"\n\n def __init__(self, score_func=f_classif, alpha=5e-2):\n super(SelectFdr, self).__init__(score_func)\n self.alpha = alpha\n\n def _get_support_mask(self):\n check_is_fitted(self, 'scores_')\n\n n_features = len(self.pvalues_)\n sv = np.sort(self.pvalues_)\n selected = sv[sv <= float(self.alpha) / n_features *\n np.arange(1, n_features + 1)]\n if selected.size == 0:\n return np.zeros_like(self.pvalues_, dtype=bool)\n return self.pvalues_ <= selected.max()\n\n\nclass SelectFwe(_BaseFilter):\n \"\"\"Filter: Select the p-values corresponding to Family-wise error rate\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n score_func : callable\n Function taking two arrays X and y, and returning a pair of arrays\n (scores, pvalues).\n Default is f_classif (see below \"See also\"). The default function only\n works with classification tasks.\n\n alpha : float, optional\n The highest uncorrected p-value for features to keep.\n\n Attributes\n ----------\n scores_ : array-like, shape=(n_features,)\n Scores of features.\n\n pvalues_ : array-like, shape=(n_features,)\n p-values of feature scores.\n\n See also\n --------\n f_classif: ANOVA F-value between label/feature for classification tasks.\n chi2: Chi-squared stats of non-negative features for classification tasks.\n f_regression: F-value between label/feature for regression tasks.\n SelectPercentile: Select features based on percentile of the highest scores.\n SelectKBest: Select features based on the k highest scores.\n SelectFpr: Select features based on a false positive rate test.\n SelectFdr: Select features based on an estimated false discovery rate.\n GenericUnivariateSelect: Univariate feature selector with configurable mode.\n \"\"\"\n\n def __init__(self, score_func=f_classif, alpha=5e-2):\n super(SelectFwe, self).__init__(score_func)\n self.alpha = alpha\n\n def _get_support_mask(self):\n check_is_fitted(self, 'scores_')\n\n return (self.pvalues_ < self.alpha / len(self.pvalues_))\n\n\n######################################################################\n# Generic filter\n######################################################################\n\n# TODO this class should fit on either p-values or scores,\n# depending on the mode.\nclass GenericUnivariateSelect(_BaseFilter):\n \"\"\"Univariate feature selector with configurable strategy.\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n score_func : callable\n Function taking two arrays X and y, and returning a pair of arrays\n (scores, pvalues). For modes 'percentile' or 'kbest' it can return\n a single array scores.\n\n mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}\n Feature selection mode.\n\n param : float or int depending on the feature selection mode\n Parameter of the corresponding mode.\n\n Attributes\n ----------\n scores_ : array-like, shape=(n_features,)\n Scores of features.\n\n pvalues_ : array-like, shape=(n_features,)\n p-values of feature scores, None if `score_func` returned scores only.\n\n See also\n --------\n f_classif: ANOVA F-value between label/feature for classification tasks.\n mutual_info_classif: Mutual information for a discrete target.\n chi2: Chi-squared stats of non-negative features for classification tasks.\n f_regression: F-value between label/feature for regression tasks.\n mutual_info_regression: Mutual information for a continuous target.\n SelectPercentile: Select features based on percentile of the highest scores.\n SelectKBest: Select features based on the k highest scores.\n SelectFpr: Select features based on a false positive rate test.\n SelectFdr: Select features based on an estimated false discovery rate.\n SelectFwe: Select features based on family-wise error rate.\n \"\"\"\n\n _selection_modes = {'percentile': SelectPercentile,\n 'k_best': SelectKBest,\n 'fpr': SelectFpr,\n 'fdr': SelectFdr,\n 'fwe': SelectFwe}\n\n def __init__(self, score_func=f_classif, mode='percentile', param=1e-5):\n super(GenericUnivariateSelect, self).__init__(score_func)\n self.mode = mode\n self.param = param\n\n def _make_selector(self):\n selector = self._selection_modes[self.mode](score_func=self.score_func)\n\n # Now perform some acrobatics to set the right named parameter in\n # the selector\n possible_params = selector._get_param_names()\n possible_params.remove('score_func')\n selector.set_params(**{possible_params[0]: self.param})\n\n return selector\n\n def _check_params(self, X, y):\n if self.mode not in self._selection_modes:\n raise ValueError(\"The mode passed should be one of %s, %r,\"\n \" (type %s) was passed.\"\n % (self._selection_modes.keys(), self.mode,\n type(self.mode)))\n\n self._make_selector()._check_params(X, y)\n\n def _get_support_mask(self):\n check_is_fitted(self, 'scores_')\n\n selector = self._make_selector()\n selector.pvalues_ = self.pvalues_\n selector.scores_ = self.scores_\n return selector._get_support_mask()\n",
"\"\"\"\nDiscrete Fourier Transforms - helper.py\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport collections\nimport threading\n\nfrom numpy.compat import integer_types\nfrom numpy.core import (\n asarray, concatenate, arange, take, integer, empty\n )\n\n# Created by Pearu Peterson, September 2002\n\n__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq']\n\ninteger_types = integer_types + (integer,)\n\n\ndef fftshift(x, axes=None):\n \"\"\"\n Shift the zero-frequency component to the center of the spectrum.\n\n This function swaps half-spaces for all axes listed (defaults to all).\n Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.\n\n Parameters\n ----------\n x : array_like\n Input array.\n axes : int or shape tuple, optional\n Axes over which to shift. Default is None, which shifts all axes.\n\n Returns\n -------\n y : ndarray\n The shifted array.\n\n See Also\n --------\n ifftshift : The inverse of `fftshift`.\n\n Examples\n --------\n >>> freqs = np.fft.fftfreq(10, 0.1)\n >>> freqs\n array([ 0., 1., 2., 3., 4., -5., -4., -3., -2., -1.])\n >>> np.fft.fftshift(freqs)\n array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.])\n\n Shift the zero-frequency component only along the second axis:\n\n >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)\n >>> freqs\n array([[ 0., 1., 2.],\n [ 3., 4., -4.],\n [-3., -2., -1.]])\n >>> np.fft.fftshift(freqs, axes=(1,))\n array([[ 2., 0., 1.],\n [-4., 3., 4.],\n [-1., -3., -2.]])\n\n \"\"\"\n tmp = asarray(x)\n ndim = len(tmp.shape)\n if axes is None:\n axes = list(range(ndim))\n elif isinstance(axes, integer_types):\n axes = (axes,)\n y = tmp\n for k in axes:\n n = tmp.shape[k]\n p2 = (n+1)//2\n mylist = concatenate((arange(p2, n), arange(p2)))\n y = take(y, mylist, k)\n return y\n\n\ndef ifftshift(x, axes=None):\n \"\"\"\n The inverse of `fftshift`. Although identical for even-length `x`, the\n functions differ by one sample for odd-length `x`.\n\n Parameters\n ----------\n x : array_like\n Input array.\n axes : int or shape tuple, optional\n Axes over which to calculate. Defaults to None, which shifts all axes.\n\n Returns\n -------\n y : ndarray\n The shifted array.\n\n See Also\n --------\n fftshift : Shift zero-frequency component to the center of the spectrum.\n\n Examples\n --------\n >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)\n >>> freqs\n array([[ 0., 1., 2.],\n [ 3., 4., -4.],\n [-3., -2., -1.]])\n >>> np.fft.ifftshift(np.fft.fftshift(freqs))\n array([[ 0., 1., 2.],\n [ 3., 4., -4.],\n [-3., -2., -1.]])\n\n \"\"\"\n tmp = asarray(x)\n ndim = len(tmp.shape)\n if axes is None:\n axes = list(range(ndim))\n elif isinstance(axes, integer_types):\n axes = (axes,)\n y = tmp\n for k in axes:\n n = tmp.shape[k]\n p2 = n-(n+1)//2\n mylist = concatenate((arange(p2, n), arange(p2)))\n y = take(y, mylist, k)\n return y\n\n\ndef fftfreq(n, d=1.0):\n \"\"\"\n Return the Discrete Fourier Transform sample frequencies.\n\n The returned float array `f` contains the frequency bin centers in cycles\n per unit of the sample spacing (with zero at the start). For instance, if\n the sample spacing is in seconds, then the frequency unit is cycles/second.\n\n Given a window length `n` and a sample spacing `d`::\n\n f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even\n f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd\n\n Parameters\n ----------\n n : int\n Window length.\n d : scalar, optional\n Sample spacing (inverse of the sampling rate). Defaults to 1.\n\n Returns\n -------\n f : ndarray\n Array of length `n` containing the sample frequencies.\n\n Examples\n --------\n >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)\n >>> fourier = np.fft.fft(signal)\n >>> n = signal.size\n >>> timestep = 0.1\n >>> freq = np.fft.fftfreq(n, d=timestep)\n >>> freq\n array([ 0. , 1.25, 2.5 , 3.75, -5. , -3.75, -2.5 , -1.25])\n\n \"\"\"\n if not isinstance(n, integer_types):\n raise ValueError(\"n should be an integer\")\n val = 1.0 / (n * d)\n results = empty(n, int)\n N = (n-1)//2 + 1\n p1 = arange(0, N, dtype=int)\n results[:N] = p1\n p2 = arange(-(n//2), 0, dtype=int)\n results[N:] = p2\n return results * val\n #return hstack((arange(0,(n-1)/2 + 1), arange(-(n/2),0))) / (n*d)\n\n\ndef rfftfreq(n, d=1.0):\n \"\"\"\n Return the Discrete Fourier Transform sample frequencies\n (for usage with rfft, irfft).\n\n The returned float array `f` contains the frequency bin centers in cycles\n per unit of the sample spacing (with zero at the start). For instance, if\n the sample spacing is in seconds, then the frequency unit is cycles/second.\n\n Given a window length `n` and a sample spacing `d`::\n\n f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even\n f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd\n\n Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`)\n the Nyquist frequency component is considered to be positive.\n\n Parameters\n ----------\n n : int\n Window length.\n d : scalar, optional\n Sample spacing (inverse of the sampling rate). Defaults to 1.\n\n Returns\n -------\n f : ndarray\n Array of length ``n//2 + 1`` containing the sample frequencies.\n\n Examples\n --------\n >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float)\n >>> fourier = np.fft.rfft(signal)\n >>> n = signal.size\n >>> sample_rate = 100\n >>> freq = np.fft.fftfreq(n, d=1./sample_rate)\n >>> freq\n array([ 0., 10., 20., 30., 40., -50., -40., -30., -20., -10.])\n >>> freq = np.fft.rfftfreq(n, d=1./sample_rate)\n >>> freq\n array([ 0., 10., 20., 30., 40., 50.])\n\n \"\"\"\n if not isinstance(n, integer_types):\n raise ValueError(\"n should be an integer\")\n val = 1.0/(n*d)\n N = n//2 + 1\n results = arange(0, N, dtype=int)\n return results * val\n\n\nclass _FFTCache(object):\n \"\"\"\n Cache for the FFT twiddle factors as an LRU (least recently used) cache.\n\n Parameters\n ----------\n max_size_in_mb : int\n Maximum memory usage of the cache before items are being evicted.\n max_item_count : int\n Maximum item count of the cache before items are being evicted.\n\n Notes\n -----\n Items will be evicted if either limit has been reached upon getting and\n setting. The maximum memory usages is not strictly the given\n ``max_size_in_mb`` but rather\n ``max(max_size_in_mb, 1.5 * size_of_largest_item)``. Thus the cache will\n never be completely cleared - at least one item will remain and a single\n large item can cause the cache to retain several smaller items even if the\n given maximum cache size has been exceeded.\n \"\"\"\n def __init__(self, max_size_in_mb, max_item_count):\n self._max_size_in_bytes = max_size_in_mb * 1024 ** 2\n self._max_item_count = max_item_count\n self._dict = collections.OrderedDict()\n self._lock = threading.Lock()\n\n def put_twiddle_factors(self, n, factors):\n \"\"\"\n Store twiddle factors for an FFT of length n in the cache.\n\n Putting multiple twiddle factors for a certain n will store it multiple\n times.\n\n Parameters\n ----------\n n : int\n Data length for the FFT.\n factors : ndarray\n The actual twiddle values.\n \"\"\"\n with self._lock:\n # Pop + later add to move it to the end for LRU behavior.\n # Internally everything is stored in a dictionary whose values are\n # lists.\n try:\n value = self._dict.pop(n)\n except KeyError:\n value = []\n value.append(factors)\n self._dict[n] = value\n self._prune_cache()\n\n def pop_twiddle_factors(self, n):\n \"\"\"\n Pop twiddle factors for an FFT of length n from the cache.\n\n Will return None if the requested twiddle factors are not available in\n the cache.\n\n Parameters\n ----------\n n : int\n Data length for the FFT.\n\n Returns\n -------\n out : ndarray or None\n The retrieved twiddle factors if available, else None.\n \"\"\"\n with self._lock:\n if n not in self._dict or not self._dict[n]:\n return None\n # Pop + later add to move it to the end for LRU behavior.\n all_values = self._dict.pop(n)\n value = all_values.pop()\n # Only put pack if there are still some arrays left in the list.\n if all_values:\n self._dict[n] = all_values\n return value\n\n def _prune_cache(self):\n # Always keep at least one item.\n while len(self._dict) > 1 and (\n len(self._dict) > self._max_item_count or self._check_size()):\n self._dict.popitem(last=False)\n\n def _check_size(self):\n item_sizes = [sum(_j.nbytes for _j in _i)\n for _i in self._dict.values() if _i]\n if not item_sizes:\n return False\n max_size = max(self._max_size_in_bytes, 1.5 * max(item_sizes))\n return sum(item_sizes) > max_size\n",
"\"\"\" A Qt API selector that can be used to switch between PyQt and PySide.\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\n\nimport os\nimport sys\nfrom matplotlib import rcParams, verbose\n\n# Available APIs.\nQT_API_PYQT = 'PyQt4' # API is not set here; Python 2.x default is V 1\nQT_API_PYQTv2 = 'PyQt4v2' # forced to Version 2 API\nQT_API_PYSIDE = 'PySide' # only supports Version 2 API\nQT_API_PYQT5 = 'PyQt5' # use PyQt5 API; Version 2 with module shim\n\nETS = dict(pyqt=(QT_API_PYQTv2, 4), pyside=(QT_API_PYSIDE, 4),\n pyqt5=(QT_API_PYQT5, 5))\n# ETS is a dict of env variable to (QT_API, QT_MAJOR_VERSION)\n# If the ETS QT_API environment variable is set, use it, but only\n# if the varible if of the same major QT version. Note that\n# ETS requires the version 2 of PyQt4, which is not the platform\n# default for Python 2.x.\n\nQT_API_ENV = os.environ.get('QT_API')\n\nif rcParams['backend'] == 'Qt5Agg':\n QT_RC_MAJOR_VERSION = 5\nelif rcParams['backend'] == 'Qt4Agg':\n QT_RC_MAJOR_VERSION = 4\nelse:\n # A different backend was specified, but we still got here because a Qt\n # related file was imported. This is allowed, so lets try and guess\n # what we should be using.\n if \"PyQt4\" in sys.modules or \"PySide\" in sys.modules:\n # PyQt4 or PySide is actually used.\n QT_RC_MAJOR_VERSION = 4\n else:\n # This is a fallback: PyQt5\n QT_RC_MAJOR_VERSION = 5\n\nQT_API = None\n\n# check if any binding is already imported, if so silently ignore the\n# rcparams/ENV settings and use what ever is already imported.\nif 'PySide' in sys.modules:\n # user has imported PySide before importing mpl\n QT_API = QT_API_PYSIDE\n\nif 'PyQt4' in sys.modules:\n # user has imported PyQt4 before importing mpl\n # this case also handles the PyQt4v2 case as once sip is imported\n # the API versions can not be changed so do not try\n QT_API = QT_API_PYQT\n\nif 'PyQt5' in sys.modules:\n # the user has imported PyQt5 before importing mpl\n QT_API = QT_API_PYQT5\n\nif (QT_API_ENV is not None) and QT_API is None:\n try:\n QT_ENV_MAJOR_VERSION = ETS[QT_API_ENV][1]\n except KeyError:\n raise RuntimeError(\n ('Unrecognized environment variable %r, valid values are:'\n ' %r, %r or %r' % (QT_API_ENV, 'pyqt', 'pyside', 'pyqt5')))\n if QT_ENV_MAJOR_VERSION == QT_RC_MAJOR_VERSION:\n # Only if backend and env qt major version are\n # compatible use the env variable.\n QT_API = ETS[QT_API_ENV][0]\n\n_fallback_to_qt4 = False\nif QT_API is None:\n # No ETS environment or incompatible so use rcParams.\n if rcParams['backend'] == 'Qt5Agg':\n QT_API = rcParams['backend.qt5']\n elif rcParams['backend'] == 'Qt4Agg':\n QT_API = rcParams['backend.qt4']\n else:\n # A non-Qt backend was specified, no version of the Qt\n # bindings is imported, but we still got here because a Qt\n # related file was imported. This is allowed, fall back to Qt5\n # using which ever binding the rparams ask for.\n _fallback_to_qt4 = True\n QT_API = rcParams['backend.qt5']\n\n# We will define an appropriate wrapper for the differing versions\n# of file dialog.\n_getSaveFileName = None\n\n# Flag to check if sip could be imported\n_sip_imported = False\n\n# Now perform the imports.\nif QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYQT5):\n try:\n import sip\n _sip_imported = True\n except ImportError:\n # Try using PySide\n QT_API = QT_API_PYSIDE\n cond = (\"Could not import sip; falling back on PySide\\n\"\n \"in place of PyQt4 or PyQt5.\\n\")\n verbose.report(cond, 'helpful')\n\nif _sip_imported:\n if QT_API == QT_API_PYQTv2:\n if QT_API_ENV == 'pyqt':\n cond = (\"Found 'QT_API=pyqt' environment variable. \"\n \"Setting PyQt4 API accordingly.\\n\")\n else:\n cond = \"PyQt API v2 specified.\"\n try:\n sip.setapi('QString', 2)\n except:\n res = 'QString API v2 specification failed. Defaulting to v1.'\n verbose.report(cond + res, 'helpful')\n # condition has now been reported, no need to repeat it:\n cond = \"\"\n try:\n sip.setapi('QVariant', 2)\n except:\n res = 'QVariant API v2 specification failed. Defaulting to v1.'\n verbose.report(cond + res, 'helpful')\n if QT_API == QT_API_PYQT5:\n try:\n from PyQt5 import QtCore, QtGui, QtWidgets\n _getSaveFileName = QtWidgets.QFileDialog.getSaveFileName\n except ImportError:\n if _fallback_to_qt4:\n # fell through, tried PyQt5, failed fall back to PyQt4\n QT_API = rcParams['backend.qt4']\n QT_RC_MAJOR_VERSION = 4\n else:\n raise\n\n # needs to be if so we can re-test the value of QT_API which may\n # have been changed in the above if block\n if QT_API in [QT_API_PYQT, QT_API_PYQTv2]: # PyQt4 API\n from PyQt4 import QtCore, QtGui\n\n try:\n if sip.getapi(\"QString\") > 1:\n # Use new getSaveFileNameAndFilter()\n _getSaveFileName = QtGui.QFileDialog.getSaveFileNameAndFilter\n else:\n\n # Use old getSaveFileName()\n def _getSaveFileName(*args, **kwargs):\n return (QtGui.QFileDialog.getSaveFileName(*args, **kwargs),\n None)\n\n except (AttributeError, KeyError):\n\n # call to getapi() can fail in older versions of sip\n def _getSaveFileName(*args, **kwargs):\n return QtGui.QFileDialog.getSaveFileName(*args, **kwargs), None\n try:\n # Alias PyQt-specific functions for PySide compatibility.\n QtCore.Signal = QtCore.pyqtSignal\n try:\n QtCore.Slot = QtCore.pyqtSlot\n except AttributeError:\n # Not a perfect match but works in simple cases\n QtCore.Slot = QtCore.pyqtSignature\n\n QtCore.Property = QtCore.pyqtProperty\n __version__ = QtCore.PYQT_VERSION_STR\n except NameError:\n # QtCore did not get imported, fall back to pyside\n QT_API = QT_API_PYSIDE\n\nif QT_API == QT_API_PYSIDE: # try importing pyside\n try:\n from PySide import QtCore, QtGui, __version__, __version_info__\n except ImportError:\n raise ImportError(\n \"Matplotlib qt-based backends require an external PyQt4, PyQt5,\\n\"\n \"or PySide package to be installed, but it was not found.\")\n\n if __version_info__ < (1, 0, 3):\n raise ImportError(\n \"Matplotlib backend_qt4 and backend_qt4agg require PySide >=1.0.3\")\n\n _getSaveFileName = QtGui.QFileDialog.getSaveFileName\n\n\n# Apply shim to Qt4 APIs to make them look like Qt5\nif QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYSIDE):\n '''Import all used QtGui objects into QtWidgets\n\n Here I've opted to simple copy QtGui into QtWidgets as that\n achieves the same result as copying over the objects, and will\n continue to work if other objects are used.\n\n '''\n QtWidgets = QtGui\n\n\ndef is_pyqt5():\n return QT_API == QT_API_PYQT5\n",
"# coding: utf-8\n\n# Author: Johannes Schönberger\n#\n# License: BSD 3 clause\n\nimport numpy as np\nimport warnings\n\nfrom ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone\nfrom ..utils import check_random_state, check_array, check_consistent_length\nfrom ..utils.random import sample_without_replacement\nfrom ..utils.validation import check_is_fitted\nfrom .base import LinearRegression\nfrom ..utils.validation import has_fit_parameter\n\n_EPSILON = np.spacing(1)\n\n\ndef _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):\n \"\"\"Determine number trials such that at least one outlier-free subset is\n sampled for the given inlier/outlier ratio.\n\n Parameters\n ----------\n n_inliers : int\n Number of inliers in the data.\n\n n_samples : int\n Total number of samples in the data.\n\n min_samples : int\n Minimum number of samples chosen randomly from original data.\n\n probability : float\n Probability (confidence) that one outlier-free sample is generated.\n\n Returns\n -------\n trials : int\n Number of trials.\n\n \"\"\"\n inlier_ratio = n_inliers / float(n_samples)\n nom = max(_EPSILON, 1 - probability)\n denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)\n if nom == 1:\n return 0\n if denom == 1:\n return float('inf')\n return abs(float(np.ceil(np.log(nom) / np.log(denom))))\n\n\nclass RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):\n \"\"\"RANSAC (RANdom SAmple Consensus) algorithm.\n\n RANSAC is an iterative algorithm for the robust estimation of parameters\n from a subset of inliers from the complete data set. More information can\n be found in the general documentation of linear models.\n\n A detailed description of the algorithm can be found in the documentation\n of the ``linear_model`` sub-package.\n\n Read more in the :ref:`User Guide <ransac_regression>`.\n\n Parameters\n ----------\n base_estimator : object, optional\n Base estimator object which implements the following methods:\n\n * `fit(X, y)`: Fit model to given training data and target values.\n * `score(X, y)`: Returns the mean accuracy on the given test data,\n which is used for the stop criterion defined by `stop_score`.\n Additionally, the score is used to decide which of two equally\n large consensus sets is chosen as the better one.\n\n If `base_estimator` is None, then\n ``base_estimator=sklearn.linear_model.LinearRegression()`` is used for\n target values of dtype float.\n\n Note that the current implementation only supports regression\n estimators.\n\n min_samples : int (>= 1) or float ([0, 1]), optional\n Minimum number of samples chosen randomly from original data. Treated\n as an absolute number of samples for `min_samples >= 1`, treated as a\n relative number `ceil(min_samples * X.shape[0]`) for\n `min_samples < 1`. This is typically chosen as the minimal number of\n samples necessary to estimate the given `base_estimator`. By default a\n ``sklearn.linear_model.LinearRegression()`` estimator is assumed and\n `min_samples` is chosen as ``X.shape[1] + 1``.\n\n residual_threshold : float, optional\n Maximum residual for a data sample to be classified as an inlier.\n By default the threshold is chosen as the MAD (median absolute\n deviation) of the target values `y`.\n\n is_data_valid : callable, optional\n This function is called with the randomly selected data before the\n model is fitted to it: `is_data_valid(X, y)`. If its return value is\n False the current randomly chosen sub-sample is skipped.\n\n is_model_valid : callable, optional\n This function is called with the estimated model and the randomly\n selected data: `is_model_valid(model, X, y)`. If its return value is\n False the current randomly chosen sub-sample is skipped.\n Rejecting samples with this function is computationally costlier than\n with `is_data_valid`. `is_model_valid` should therefore only be used if\n the estimated model is needed for making the rejection decision.\n\n max_trials : int, optional\n Maximum number of iterations for random sample selection.\n\n stop_n_inliers : int, optional\n Stop iteration if at least this number of inliers are found.\n\n stop_score : float, optional\n Stop iteration if score is greater equal than this threshold.\n\n stop_probability : float in range [0, 1], optional\n RANSAC iteration stops if at least one outlier-free set of the training\n data is sampled in RANSAC. This requires to generate at least N\n samples (iterations)::\n\n N >= log(1 - probability) / log(1 - e**m)\n\n where the probability (confidence) is typically set to high value such\n as 0.99 (the default) and e is the current fraction of inliers w.r.t.\n the total number of samples.\n\n residual_metric : callable, optional\n Metric to reduce the dimensionality of the residuals to 1 for\n multi-dimensional target values ``y.shape[1] > 1``. By default the sum\n of absolute differences is used::\n\n lambda dy: np.sum(np.abs(dy), axis=1)\n\n NOTE: residual_metric is deprecated from 0.18 and will be removed in 0.20\n Use ``loss`` instead.\n\n loss : string, callable, optional, default \"absolute_loss\"\n String inputs, \"absolute_loss\" and \"squared_loss\" are supported which\n find the absolute loss and squared loss per sample\n respectively.\n\n If ``loss`` is a callable, then it should be a function that takes\n two arrays as inputs, the true and predicted value and returns a 1-D\n array with the ``i``th value of the array corresponding to the loss\n on `X[i]`.\n\n If the loss on a sample is greater than the ``residual_threshold``, then\n this sample is classified as an outlier.\n\n random_state : integer or numpy.RandomState, optional\n The generator used to initialize the centers. If an integer is\n given, it fixes the seed. Defaults to the global numpy random\n number generator.\n\n Attributes\n ----------\n estimator_ : object\n Best fitted model (copy of the `base_estimator` object).\n\n n_trials_ : int\n Number of random selection trials until one of the stop criteria is\n met. It is always ``<= max_trials``.\n\n inlier_mask_ : bool array of shape [n_samples]\n Boolean mask of inliers classified as ``True``.\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/RANSAC\n .. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf\n .. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf\n \"\"\"\n\n def __init__(self, base_estimator=None, min_samples=None,\n residual_threshold=None, is_data_valid=None,\n is_model_valid=None, max_trials=100,\n stop_n_inliers=np.inf, stop_score=np.inf,\n stop_probability=0.99, residual_metric=None,\n loss='absolute_loss', random_state=None):\n\n self.base_estimator = base_estimator\n self.min_samples = min_samples\n self.residual_threshold = residual_threshold\n self.is_data_valid = is_data_valid\n self.is_model_valid = is_model_valid\n self.max_trials = max_trials\n self.stop_n_inliers = stop_n_inliers\n self.stop_score = stop_score\n self.stop_probability = stop_probability\n self.residual_metric = residual_metric\n self.random_state = random_state\n self.loss = loss\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit estimator using RANSAC algorithm.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape [n_samples, n_features]\n Training data.\n\n y : array-like, shape = [n_samples] or [n_samples, n_targets]\n Target values.\n\n sample_weight : array-like, shape = [n_samples]\n Individual weights for each sample\n raises error if sample_weight is passed and base_estimator\n fit method does not support it.\n\n Raises\n ------\n ValueError\n If no valid consensus set could be found. This occurs if\n `is_data_valid` and `is_model_valid` return False for all\n `max_trials` randomly chosen sub-samples.\n\n \"\"\"\n X = check_array(X, accept_sparse='csr')\n y = check_array(y, ensure_2d=False)\n check_consistent_length(X, y)\n\n if self.base_estimator is not None:\n base_estimator = clone(self.base_estimator)\n else:\n base_estimator = LinearRegression()\n\n if self.min_samples is None:\n # assume linear model by default\n min_samples = X.shape[1] + 1\n elif 0 < self.min_samples < 1:\n min_samples = np.ceil(self.min_samples * X.shape[0])\n elif self.min_samples >= 1:\n if self.min_samples % 1 != 0:\n raise ValueError(\"Absolute number of samples must be an \"\n \"integer value.\")\n min_samples = self.min_samples\n else:\n raise ValueError(\"Value for `min_samples` must be scalar and \"\n \"positive.\")\n if min_samples > X.shape[0]:\n raise ValueError(\"`min_samples` may not be larger than number \"\n \"of samples ``X.shape[0]``.\")\n\n if self.stop_probability < 0 or self.stop_probability > 1:\n raise ValueError(\"`stop_probability` must be in range [0, 1].\")\n\n if self.residual_threshold is None:\n # MAD (median absolute deviation)\n residual_threshold = np.median(np.abs(y - np.median(y)))\n else:\n residual_threshold = self.residual_threshold\n\n if self.residual_metric is not None:\n warnings.warn(\n \"'residual_metric' was deprecated in version 0.18 and \"\n \"will be removed in version 0.20. Use 'loss' instead.\",\n DeprecationWarning)\n\n if self.loss == \"absolute_loss\":\n if y.ndim == 1:\n loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred)\n else:\n loss_function = lambda \\\n y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)\n\n elif self.loss == \"squared_loss\":\n if y.ndim == 1:\n loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2\n else:\n loss_function = lambda \\\n y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)\n\n elif callable(self.loss):\n loss_function = self.loss\n\n else:\n raise ValueError(\n \"loss should be 'absolute_loss', 'squared_loss' or a callable.\"\n \"Got %s. \" % self.loss)\n\n\n random_state = check_random_state(self.random_state)\n\n try: # Not all estimator accept a random_state\n base_estimator.set_params(random_state=random_state)\n except ValueError:\n pass\n\n estimator_fit_has_sample_weight = has_fit_parameter(base_estimator,\n \"sample_weight\")\n estimator_name = type(base_estimator).__name__\n if (sample_weight is not None and not\n estimator_fit_has_sample_weight):\n raise ValueError(\"%s does not support sample_weight. Samples\"\n \" weights are only used for the calibration\"\n \" itself.\" % estimator_name)\n if sample_weight is not None:\n sample_weight = np.asarray(sample_weight)\n\n n_inliers_best = 0\n score_best = np.inf\n inlier_mask_best = None\n X_inlier_best = None\n y_inlier_best = None\n\n # number of data samples\n n_samples = X.shape[0]\n sample_idxs = np.arange(n_samples)\n\n n_samples, _ = X.shape\n\n for self.n_trials_ in range(1, self.max_trials + 1):\n\n # choose random sample set\n subset_idxs = sample_without_replacement(n_samples, min_samples,\n random_state=random_state)\n X_subset = X[subset_idxs]\n y_subset = y[subset_idxs]\n\n # check if random sample set is valid\n if (self.is_data_valid is not None\n and not self.is_data_valid(X_subset, y_subset)):\n continue\n\n # fit model for current random sample set\n if sample_weight is None:\n base_estimator.fit(X_subset, y_subset)\n else:\n base_estimator.fit(X_subset, y_subset,\n sample_weight=sample_weight[subset_idxs])\n\n # check if estimated model is valid\n if (self.is_model_valid is not None and not\n self.is_model_valid(base_estimator, X_subset, y_subset)):\n continue\n\n # residuals of all data for current random sample model\n y_pred = base_estimator.predict(X)\n\n # XXX: Deprecation: Remove this if block in 0.20\n if self.residual_metric is not None:\n diff = y_pred - y\n if diff.ndim == 1:\n diff = diff.reshape(-1, 1)\n residuals_subset = self.residual_metric(diff)\n else:\n residuals_subset = loss_function(y, y_pred)\n\n # classify data into inliers and outliers\n inlier_mask_subset = residuals_subset < residual_threshold\n n_inliers_subset = np.sum(inlier_mask_subset)\n\n # less inliers -> skip current random sample\n if n_inliers_subset < n_inliers_best:\n continue\n if n_inliers_subset == 0:\n raise ValueError(\"No inliers found, possible cause is \"\n \"setting residual_threshold ({0}) too low.\".format(\n self.residual_threshold))\n\n # extract inlier data set\n inlier_idxs_subset = sample_idxs[inlier_mask_subset]\n X_inlier_subset = X[inlier_idxs_subset]\n y_inlier_subset = y[inlier_idxs_subset]\n\n # score of inlier data set\n score_subset = base_estimator.score(X_inlier_subset,\n y_inlier_subset)\n\n # same number of inliers but worse score -> skip current random\n # sample\n if (n_inliers_subset == n_inliers_best\n and score_subset < score_best):\n continue\n\n # save current random sample as best sample\n n_inliers_best = n_inliers_subset\n score_best = score_subset\n inlier_mask_best = inlier_mask_subset\n X_inlier_best = X_inlier_subset\n y_inlier_best = y_inlier_subset\n\n # break if sufficient number of inliers or score is reached\n if (n_inliers_best >= self.stop_n_inliers\n or score_best >= self.stop_score\n or self.n_trials_\n >= _dynamic_max_trials(n_inliers_best, n_samples,\n min_samples,\n self.stop_probability)):\n break\n\n # if none of the iterations met the required criteria\n if inlier_mask_best is None:\n raise ValueError(\n \"RANSAC could not find valid consensus set, because\"\n \" either the `residual_threshold` rejected all the samples or\"\n \" `is_data_valid` and `is_model_valid` returned False for all\"\n \" `max_trials` randomly \"\"chosen sub-samples. Consider \"\n \"relaxing the \"\"constraints.\")\n\n # estimate final model using all inliers\n base_estimator.fit(X_inlier_best, y_inlier_best)\n\n self.estimator_ = base_estimator\n self.inlier_mask_ = inlier_mask_best\n return self\n\n def predict(self, X):\n \"\"\"Predict using the estimated model.\n\n This is a wrapper for `estimator_.predict(X)`.\n\n Parameters\n ----------\n X : numpy array of shape [n_samples, n_features]\n\n Returns\n -------\n y : array, shape = [n_samples] or [n_samples, n_targets]\n Returns predicted values.\n \"\"\"\n check_is_fitted(self, 'estimator_')\n\n return self.estimator_.predict(X)\n\n def score(self, X, y):\n \"\"\"Returns the score of the prediction.\n\n This is a wrapper for `estimator_.score(X, y)`.\n\n Parameters\n ----------\n X : numpy array or sparse matrix of shape [n_samples, n_features]\n Training data.\n\n y : array, shape = [n_samples] or [n_samples, n_targets]\n Target values.\n\n Returns\n -------\n z : float\n Score of the prediction.\n \"\"\"\n check_is_fitted(self, 'estimator_')\n\n return self.estimator_.score(X, y)\n",
"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\nfrom six.moves import map\n\nimport datetime\nimport warnings\nimport tempfile\n\nimport dateutil\nimport pytz\n\ntry:\n # mock in python 3.3+\n from unittest import mock\nexcept ImportError:\n import mock\nfrom nose.tools import assert_raises, assert_equal\nfrom nose.plugins.skip import SkipTest\n\nfrom matplotlib.testing.decorators import image_comparison, cleanup\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\n\n\n@image_comparison(baseline_images=['date_empty'], extensions=['png'])\ndef test_date_empty():\n # make sure mpl does the right thing when told to plot dates even\n # if no date data has been presented, cf\n # http://sourceforge.net/tracker/?func=detail&aid=2850075&group_id=80706&atid=560720\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.xaxis_date()\n\n\n@image_comparison(baseline_images=['date_axhspan'], extensions=['png'])\ndef test_date_axhspan():\n # test ax hspan with date inputs\n t0 = datetime.datetime(2009, 1, 20)\n tf = datetime.datetime(2009, 1, 21)\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.axhspan(t0, tf, facecolor=\"blue\", alpha=0.25)\n ax.set_ylim(t0 - datetime.timedelta(days=5),\n tf + datetime.timedelta(days=5))\n fig.subplots_adjust(left=0.25)\n\n\n@image_comparison(baseline_images=['date_axvspan'], extensions=['png'])\ndef test_date_axvspan():\n # test ax hspan with date inputs\n t0 = datetime.datetime(2000, 1, 20)\n tf = datetime.datetime(2010, 1, 21)\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.axvspan(t0, tf, facecolor=\"blue\", alpha=0.25)\n ax.set_xlim(t0 - datetime.timedelta(days=720),\n tf + datetime.timedelta(days=720))\n fig.autofmt_xdate()\n\n\n@image_comparison(baseline_images=['date_axhline'],\n extensions=['png'])\ndef test_date_axhline():\n # test ax hline with date inputs\n t0 = datetime.datetime(2009, 1, 20)\n tf = datetime.datetime(2009, 1, 31)\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.axhline(t0, color=\"blue\", lw=3)\n ax.set_ylim(t0 - datetime.timedelta(days=5),\n tf + datetime.timedelta(days=5))\n fig.subplots_adjust(left=0.25)\n\n\n@image_comparison(baseline_images=['date_axvline'],\n extensions=['png'])\ndef test_date_axvline():\n # test ax hline with date inputs\n t0 = datetime.datetime(2000, 1, 20)\n tf = datetime.datetime(2000, 1, 21)\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.axvline(t0, color=\"red\", lw=3)\n ax.set_xlim(t0 - datetime.timedelta(days=5),\n tf + datetime.timedelta(days=5))\n fig.autofmt_xdate()\n\n\n@cleanup\ndef test_too_many_date_ticks():\n # Attempt to test SF 2715172, see\n # https://sourceforge.net/tracker/?func=detail&aid=2715172&group_id=80706&atid=560720\n # setting equal datetimes triggers and expander call in\n # transforms.nonsingular which results in too many ticks in the\n # DayLocator. This should trigger a Locator.MAXTICKS RuntimeError\n warnings.filterwarnings(\n 'ignore',\n 'Attempting to set identical left==right results\\\\nin singular '\n 'transformations; automatically expanding.\\\\nleft=\\d*\\.\\d*, '\n 'right=\\d*\\.\\d*',\n UserWarning, module='matplotlib.axes')\n t0 = datetime.datetime(2000, 1, 20)\n tf = datetime.datetime(2000, 1, 20)\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.set_xlim((t0, tf), auto=True)\n ax.plot([], [])\n ax.xaxis.set_major_locator(mdates.DayLocator())\n assert_raises(RuntimeError, fig.savefig, 'junk.png')\n\n\n@image_comparison(baseline_images=['RRuleLocator_bounds'], extensions=['png'])\ndef test_RRuleLocator():\n import matplotlib.testing.jpl_units as units\n units.register()\n\n # This will cause the RRuleLocator to go out of bounds when it tries\n # to add padding to the limits, so we make sure it caps at the correct\n # boundary values.\n t0 = datetime.datetime(1000, 1, 1)\n tf = datetime.datetime(6000, 1, 1)\n\n fig = plt.figure()\n ax = plt.subplot(111)\n ax.set_autoscale_on(True)\n ax.plot([t0, tf], [0.0, 1.0], marker='o')\n\n rrule = mdates.rrulewrapper(dateutil.rrule.YEARLY, interval=500)\n locator = mdates.RRuleLocator(rrule)\n ax.xaxis.set_major_locator(locator)\n ax.xaxis.set_major_formatter(mdates.AutoDateFormatter(locator))\n\n ax.autoscale_view()\n fig.autofmt_xdate()\n\n\n@image_comparison(baseline_images=['DateFormatter_fractionalSeconds'],\n extensions=['png'])\ndef test_DateFormatter():\n import matplotlib.testing.jpl_units as units\n units.register()\n\n # Lets make sure that DateFormatter will allow us to have tick marks\n # at intervals of fractional seconds.\n\n t0 = datetime.datetime(2001, 1, 1, 0, 0, 0)\n tf = datetime.datetime(2001, 1, 1, 0, 0, 1)\n\n fig = plt.figure()\n ax = plt.subplot(111)\n ax.set_autoscale_on(True)\n ax.plot([t0, tf], [0.0, 1.0], marker='o')\n\n # rrule = mpldates.rrulewrapper( dateutil.rrule.YEARLY, interval=500 )\n # locator = mpldates.RRuleLocator( rrule )\n # ax.xaxis.set_major_locator( locator )\n # ax.xaxis.set_major_formatter( mpldates.AutoDateFormatter(locator) )\n\n ax.autoscale_view()\n fig.autofmt_xdate()\n\n\ndef test_date_formatter_strftime():\n \"\"\"\n Tests that DateFormatter matches datetime.strftime,\n check microseconds for years before 1900 for bug #3179\n as well as a few related issues for years before 1900.\n \"\"\"\n def test_strftime_fields(dt):\n \"\"\"For datetime object dt, check DateFormatter fields\"\"\"\n # Note: the last couple of %%s are to check multiple %s are handled\n # properly; %% should get replaced by %.\n formatter = mdates.DateFormatter(\"%w %d %m %y %Y %H %I %M %S %%%f %%x\")\n # Compute date fields without using datetime.strftime,\n # since datetime.strftime does not work before year 1900\n formatted_date_str = (\n \"{weekday} {day:02d} {month:02d} {year:02d} {full_year:04d} \"\n \"{hour24:02d} {hour12:02d} {minute:02d} {second:02d} \"\n \"%{microsecond:06d} %x\"\n .format(\n # weeknum=dt.isocalendar()[1], # %U/%W {weeknum:02d}\n # %w Sunday=0, weekday() Monday=0\n weekday=str((dt.weekday() + 1) % 7),\n day=dt.day,\n month=dt.month,\n year=dt.year % 100,\n full_year=dt.year,\n hour24=dt.hour,\n hour12=((dt.hour-1) % 12) + 1,\n minute=dt.minute,\n second=dt.second,\n microsecond=dt.microsecond))\n assert_equal(formatter.strftime(dt), formatted_date_str)\n\n try:\n # Test strftime(\"%x\") with the current locale.\n import locale # Might not exist on some platforms, such as Windows\n locale_formatter = mdates.DateFormatter(\"%x\")\n locale_d_fmt = locale.nl_langinfo(locale.D_FMT)\n expanded_formatter = mdates.DateFormatter(locale_d_fmt)\n assert_equal(locale_formatter.strftime(dt),\n expanded_formatter.strftime(dt))\n except (ImportError, AttributeError):\n pass\n\n for year in range(1, 3000, 71):\n # Iterate through random set of years\n test_strftime_fields(datetime.datetime(year, 1, 1))\n test_strftime_fields(datetime.datetime(year, 2, 3, 4, 5, 6, 12345))\n\n\ndef test_date_formatter_callable():\n scale = -11\n locator = mock.Mock(_get_unit=mock.Mock(return_value=scale))\n callable_formatting_function = (lambda dates, _:\n [dt.strftime('%d-%m//%Y') for dt in dates])\n\n formatter = mdates.AutoDateFormatter(locator)\n formatter.scaled[-10] = callable_formatting_function\n assert_equal(formatter([datetime.datetime(2014, 12, 25)]),\n ['25-12//2014'])\n\n\ndef test_drange():\n \"\"\"\n This test should check if drange works as expected, and if all the\n rounding errors are fixed\n \"\"\"\n start = datetime.datetime(2011, 1, 1, tzinfo=mdates.UTC)\n end = datetime.datetime(2011, 1, 2, tzinfo=mdates.UTC)\n delta = datetime.timedelta(hours=1)\n # We expect 24 values in drange(start, end, delta), because drange returns\n # dates from an half open interval [start, end)\n assert_equal(24, len(mdates.drange(start, end, delta)))\n\n # if end is a little bit later, we expect the range to contain one element\n # more\n end = end + datetime.timedelta(microseconds=1)\n assert_equal(25, len(mdates.drange(start, end, delta)))\n\n # reset end\n end = datetime.datetime(2011, 1, 2, tzinfo=mdates.UTC)\n\n # and tst drange with \"complicated\" floats:\n # 4 hours = 1/6 day, this is an \"dangerous\" float\n delta = datetime.timedelta(hours=4)\n daterange = mdates.drange(start, end, delta)\n assert_equal(6, len(daterange))\n assert_equal(mdates.num2date(daterange[-1]), end - delta)\n\n\n@cleanup\ndef test_empty_date_with_year_formatter():\n # exposes sf bug 2861426:\n # https://sourceforge.net/tracker/?func=detail&aid=2861426&group_id=80706&atid=560720\n\n # update: I am no longer believe this is a bug, as I commented on\n # the tracker. The question is now: what to do with this test\n\n import matplotlib.dates as dates\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n yearFmt = dates.DateFormatter('%Y')\n ax.xaxis.set_major_formatter(yearFmt)\n\n with tempfile.TemporaryFile() as fh:\n assert_raises(ValueError, fig.savefig, fh)\n\n\ndef test_auto_date_locator():\n def _create_auto_date_locator(date1, date2):\n locator = mdates.AutoDateLocator()\n locator.create_dummy_axis()\n locator.set_view_interval(mdates.date2num(date1),\n mdates.date2num(date2))\n return locator\n\n d1 = datetime.datetime(1990, 1, 1)\n results = ([datetime.timedelta(weeks=52 * 200),\n ['1990-01-01 00:00:00+00:00', '2010-01-01 00:00:00+00:00',\n '2030-01-01 00:00:00+00:00', '2050-01-01 00:00:00+00:00',\n '2070-01-01 00:00:00+00:00', '2090-01-01 00:00:00+00:00',\n '2110-01-01 00:00:00+00:00', '2130-01-01 00:00:00+00:00',\n '2150-01-01 00:00:00+00:00', '2170-01-01 00:00:00+00:00']\n ],\n [datetime.timedelta(weeks=52),\n ['1990-01-01 00:00:00+00:00', '1990-02-01 00:00:00+00:00',\n '1990-03-01 00:00:00+00:00', '1990-04-01 00:00:00+00:00',\n '1990-05-01 00:00:00+00:00', '1990-06-01 00:00:00+00:00',\n '1990-07-01 00:00:00+00:00', '1990-08-01 00:00:00+00:00',\n '1990-09-01 00:00:00+00:00', '1990-10-01 00:00:00+00:00',\n '1990-11-01 00:00:00+00:00', '1990-12-01 00:00:00+00:00']\n ],\n [datetime.timedelta(days=141),\n ['1990-01-05 00:00:00+00:00', '1990-01-26 00:00:00+00:00',\n '1990-02-16 00:00:00+00:00', '1990-03-09 00:00:00+00:00',\n '1990-03-30 00:00:00+00:00', '1990-04-20 00:00:00+00:00',\n '1990-05-11 00:00:00+00:00']\n ],\n [datetime.timedelta(days=40),\n ['1990-01-03 00:00:00+00:00', '1990-01-10 00:00:00+00:00',\n '1990-01-17 00:00:00+00:00', '1990-01-24 00:00:00+00:00',\n '1990-01-31 00:00:00+00:00', '1990-02-07 00:00:00+00:00']\n ],\n [datetime.timedelta(hours=40),\n ['1990-01-01 00:00:00+00:00', '1990-01-01 04:00:00+00:00',\n '1990-01-01 08:00:00+00:00', '1990-01-01 12:00:00+00:00',\n '1990-01-01 16:00:00+00:00', '1990-01-01 20:00:00+00:00',\n '1990-01-02 00:00:00+00:00', '1990-01-02 04:00:00+00:00',\n '1990-01-02 08:00:00+00:00', '1990-01-02 12:00:00+00:00',\n '1990-01-02 16:00:00+00:00']\n ],\n [datetime.timedelta(minutes=20),\n ['1990-01-01 00:00:00+00:00', '1990-01-01 00:05:00+00:00',\n '1990-01-01 00:10:00+00:00', '1990-01-01 00:15:00+00:00',\n '1990-01-01 00:20:00+00:00']\n\n ],\n [datetime.timedelta(seconds=40),\n ['1990-01-01 00:00:00+00:00', '1990-01-01 00:00:05+00:00',\n '1990-01-01 00:00:10+00:00', '1990-01-01 00:00:15+00:00',\n '1990-01-01 00:00:20+00:00', '1990-01-01 00:00:25+00:00',\n '1990-01-01 00:00:30+00:00', '1990-01-01 00:00:35+00:00',\n '1990-01-01 00:00:40+00:00']\n ],\n [datetime.timedelta(microseconds=1500),\n ['1989-12-31 23:59:59.999507+00:00',\n '1990-01-01 00:00:00+00:00',\n '1990-01-01 00:00:00.000502+00:00',\n '1990-01-01 00:00:00.001005+00:00',\n '1990-01-01 00:00:00.001508+00:00']\n ],\n )\n\n for t_delta, expected in results:\n d2 = d1 + t_delta\n locator = _create_auto_date_locator(d1, d2)\n assert_equal(list(map(str, mdates.num2date(locator()))),\n expected)\n\n\n@image_comparison(baseline_images=['date_inverted_limit'],\n extensions=['png'])\ndef test_date_inverted_limit():\n # test ax hline with date inputs\n t0 = datetime.datetime(2009, 1, 20)\n tf = datetime.datetime(2009, 1, 31)\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.axhline(t0, color=\"blue\", lw=3)\n ax.set_ylim(t0 - datetime.timedelta(days=5),\n tf + datetime.timedelta(days=5))\n ax.invert_yaxis()\n fig.subplots_adjust(left=0.25)\n\n\ndef _test_date2num_dst(date_range, tz_convert):\n # Timezones\n BRUSSELS = pytz.timezone('Europe/Brussels')\n UTC = pytz.UTC\n\n # Create a list of timezone-aware datetime objects in UTC\n # Interval is 0b0.0000011 days, to prevent float rounding issues\n dtstart = datetime.datetime(2014, 3, 30, 0, 0, tzinfo=UTC)\n interval = datetime.timedelta(minutes=33, seconds=45)\n interval_days = 0.0234375 # 2025 / 86400 seconds\n N = 8\n\n dt_utc = date_range(start=dtstart, freq=interval, periods=N)\n dt_bxl = tz_convert(dt_utc, BRUSSELS)\n\n expected_ordinalf = [735322.0 + (i * interval_days) for i in range(N)]\n actual_ordinalf = list(mdates.date2num(dt_bxl))\n\n assert_equal(actual_ordinalf, expected_ordinalf)\n\n\ndef test_date2num_dst():\n # Test for github issue #3896, but in date2num around DST transitions\n # with a timezone-aware pandas date_range object.\n\n class dt_tzaware(datetime.datetime):\n \"\"\"\n This bug specifically occurs because of the normalization behavior of\n pandas Timestamp objects, so in order to replicate it, we need a\n datetime-like object that applies timezone normalization after\n subtraction.\n \"\"\"\n def __sub__(self, other):\n r = super(dt_tzaware, self).__sub__(other)\n tzinfo = getattr(r, 'tzinfo', None)\n\n if tzinfo is not None:\n localizer = getattr(tzinfo, 'normalize', None)\n if localizer is not None:\n r = tzinfo.normalize(r)\n\n if isinstance(r, datetime.datetime):\n r = self.mk_tzaware(r)\n\n return r\n\n def __add__(self, other):\n return self.mk_tzaware(super(dt_tzaware, self).__add__(other))\n\n def astimezone(self, tzinfo):\n dt = super(dt_tzaware, self).astimezone(tzinfo)\n return self.mk_tzaware(dt)\n\n @classmethod\n def mk_tzaware(cls, datetime_obj):\n kwargs = {}\n attrs = ('year',\n 'month',\n 'day',\n 'hour',\n 'minute',\n 'second',\n 'microsecond',\n 'tzinfo')\n\n for attr in attrs:\n val = getattr(datetime_obj, attr, None)\n if val is not None:\n kwargs[attr] = val\n\n return cls(**kwargs)\n\n # Define a date_range function similar to pandas.date_range\n def date_range(start, freq, periods):\n dtstart = dt_tzaware.mk_tzaware(start)\n\n return [dtstart + (i * freq) for i in range(periods)]\n\n # Define a tz_convert function that converts a list to a new time zone.\n def tz_convert(dt_list, tzinfo):\n return [d.astimezone(tzinfo) for d in dt_list]\n\n _test_date2num_dst(date_range, tz_convert)\n\n\ndef test_date2num_dst_pandas():\n # Test for github issue #3896, but in date2num around DST transitions\n # with a timezone-aware pandas date_range object.\n try:\n import pandas as pd\n except ImportError:\n raise SkipTest('pandas not installed')\n\n def tz_convert(*args):\n return pd.DatetimeIndex.tz_convert(*args).astype(object)\n\n _test_date2num_dst(pd.date_range, tz_convert)\n\n\ndef test_DayLocator():\n assert_raises(ValueError, mdates.DayLocator, interval=-1)\n assert_raises(ValueError, mdates.DayLocator, interval=-1.5)\n assert_raises(ValueError, mdates.DayLocator, interval=0)\n assert_raises(ValueError, mdates.DayLocator, interval=1.3)\n mdates.DayLocator(interval=1.0)\n\n\ndef test_tz_utc():\n dt = datetime.datetime(1970, 1, 1, tzinfo=mdates.UTC)\n dt.tzname()\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule(argv=['-s', '--with-doctest'], exit=False)\n",
"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.texmanager import TexManager\n\n\ndef test_fontconfig_preamble():\n \"\"\"\n Test that the preamble is included in _fontconfig\n \"\"\"\n plt.rcParams['text.usetex'] = True\n\n tm1 = TexManager()\n font_config1 = tm1.get_font_config()\n\n plt.rcParams['text.latex.preamble'] = ['\\\\usepackage{txfonts}']\n tm2 = TexManager()\n font_config2 = tm2.get_font_config()\n\n assert font_config1 != font_config2\n",
"\"\"\"\nClass for outlier detection.\n\nThis class provides a framework for outlier detection. It consists in\nseveral methods that can be added to a covariance estimator in order to\nassess the outlying-ness of the observations of a data set.\nSuch a \"outlier detector\" object is proposed constructed from a robust\ncovariance estimator (the Minimum Covariance Determinant).\n\n\"\"\"\n# Author: Virgile Fritsch <[email protected]>\n#\n# License: BSD 3 clause\n\nimport numpy as np\nimport scipy as sp\nfrom . import MinCovDet\nfrom ..base import ClassifierMixin\nfrom ..utils.validation import check_is_fitted\n\n\nclass OutlierDetectionMixin(object):\n \"\"\"Set of methods for outliers detection with covariance estimators.\n\n Parameters\n ----------\n contamination : float, 0. < contamination < 0.5\n The amount of contamination of the data set, i.e. the proportion\n of outliers in the data set.\n\n Notes\n -----\n Outlier detection from covariance estimation may break or not\n perform well in high-dimensional settings. In particular, one will\n always take care to work with ``n_samples > n_features ** 2``.\n\n \"\"\"\n def __init__(self, contamination=0.1):\n self.contamination = contamination\n\n def decision_function(self, X, raw_values=False):\n \"\"\"Compute the decision function of the given observations.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n\n raw_values : bool\n Whether or not to consider raw Mahalanobis distances as the\n decision function. Must be False (default) for compatibility\n with the others outlier detection tools.\n\n Returns\n -------\n decision : array-like, shape (n_samples, )\n The values of the decision function for each observations.\n It is equal to the Mahalanobis distances if `raw_values`\n is True. By default (``raw_values=True``), it is equal\n to the cubic root of the shifted Mahalanobis distances.\n In that case, the threshold for being an outlier is 0, which\n ensures a compatibility with other outlier detection tools\n such as the One-Class SVM.\n\n \"\"\"\n check_is_fitted(self, 'threshold_')\n mahal_dist = self.mahalanobis(X)\n if raw_values:\n decision = mahal_dist\n else:\n check_is_fitted(self, 'threshold_')\n transformed_mahal_dist = mahal_dist ** 0.33\n decision = self.threshold_ ** 0.33 - transformed_mahal_dist\n\n return decision\n\n def predict(self, X):\n \"\"\"Outlyingness of observations in X according to the fitted model.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_features)\n\n Returns\n -------\n is_outliers : array, shape = (n_samples, ), dtype = bool\n For each observations, tells whether or not it should be considered\n as an outlier according to the fitted model.\n\n threshold : float,\n The values of the less outlying point's decision function.\n\n \"\"\"\n check_is_fitted(self, 'threshold_')\n is_inlier = -np.ones(X.shape[0], dtype=int)\n if self.contamination is not None:\n values = self.decision_function(X, raw_values=True)\n is_inlier[values <= self.threshold_] = 1\n else:\n raise NotImplementedError(\"You must provide a contamination rate.\")\n\n return is_inlier\n\n\nclass EllipticEnvelope(ClassifierMixin, OutlierDetectionMixin, MinCovDet):\n \"\"\"An object for detecting outliers in a Gaussian distributed dataset.\n\n Read more in the :ref:`User Guide <outlier_detection>`.\n\n Parameters\n ----------\n store_precision : bool\n Specify if the estimated precision is stored.\n\n assume_centered : Boolean\n If True, the support of robust location and covariance estimates\n is computed, and a covariance estimate is recomputed from it,\n without centering the data.\n Useful to work with data whose mean is significantly equal to\n zero but is not exactly zero.\n If False, the robust location and covariance are directly computed\n with the FastMCD algorithm without additional treatment.\n\n support_fraction : float, 0 < support_fraction < 1\n The proportion of points to be included in the support of the raw\n MCD estimate. Default is ``None``, which implies that the minimum\n value of support_fraction will be used within the algorithm:\n `[n_sample + n_features + 1] / 2`.\n\n contamination : float, 0. < contamination < 0.5\n The amount of contamination of the data set, i.e. the proportion\n of outliers in the data set.\n\n Attributes\n ----------\n location_ : array-like, shape (n_features,)\n Estimated robust location\n\n covariance_ : array-like, shape (n_features, n_features)\n Estimated robust covariance matrix\n\n precision_ : array-like, shape (n_features, n_features)\n Estimated pseudo inverse matrix.\n (stored only if store_precision is True)\n\n support_ : array-like, shape (n_samples,)\n A mask of the observations that have been used to compute the\n robust estimates of location and shape.\n\n See Also\n --------\n EmpiricalCovariance, MinCovDet\n\n Notes\n -----\n Outlier detection from covariance estimation may break or not\n perform well in high-dimensional settings. In particular, one will\n always take care to work with ``n_samples > n_features ** 2``.\n\n References\n ----------\n .. [1] Rousseeuw, P.J., Van Driessen, K. \"A fast algorithm for the minimum\n covariance determinant estimator\" Technometrics 41(3), 212 (1999)\n\n \"\"\"\n def __init__(self, store_precision=True, assume_centered=False,\n support_fraction=None, contamination=0.1,\n random_state=None):\n MinCovDet.__init__(self, store_precision=store_precision,\n assume_centered=assume_centered,\n support_fraction=support_fraction,\n random_state=random_state)\n OutlierDetectionMixin.__init__(self, contamination=contamination)\n\n def fit(self, X, y=None):\n MinCovDet.fit(self, X)\n self.threshold_ = sp.stats.scoreatpercentile(\n self.dist_, 100. * (1. - self.contamination))\n return self\n",
"\"\"\"\n=============================================================\nSpatial algorithms and data structures (:mod:`scipy.spatial`)\n=============================================================\n\n.. currentmodule:: scipy.spatial\n\nNearest-neighbor Queries\n========================\n.. autosummary::\n :toctree: generated/\n\n KDTree -- class for efficient nearest-neighbor queries\n cKDTree -- class for efficient nearest-neighbor queries (faster impl.)\n distance -- module containing many different distance measures\n Rectangle\n\nDelaunay Triangulation, Convex Hulls and Voronoi Diagrams\n=========================================================\n\n.. autosummary::\n :toctree: generated/\n\n Delaunay -- compute Delaunay triangulation of input points\n ConvexHull -- compute a convex hull for input points\n Voronoi -- compute a Voronoi diagram hull from input points\n SphericalVoronoi -- compute a Voronoi diagram from input points on the surface of a sphere\n HalfspaceIntersection -- compute the intersection points of input halfspaces\n\nPlotting Helpers\n================\n\n.. autosummary::\n :toctree: generated/\n\n delaunay_plot_2d -- plot 2-D triangulation\n convex_hull_plot_2d -- plot 2-D convex hull\n voronoi_plot_2d -- plot 2-D voronoi diagram\n\n.. seealso:: :ref:`Tutorial <qhulltutorial>`\n\n\nSimplex representation\n======================\nThe simplices (triangles, tetrahedra, ...) appearing in the Delaunay\ntesselation (N-dim simplices), convex hull facets, and Voronoi ridges\n(N-1 dim simplices) are represented in the following scheme::\n\n tess = Delaunay(points)\n hull = ConvexHull(points)\n voro = Voronoi(points)\n\n # coordinates of the j-th vertex of the i-th simplex\n tess.points[tess.simplices[i, j], :] # tesselation element\n hull.points[hull.simplices[i, j], :] # convex hull facet\n voro.vertices[voro.ridge_vertices[i, j], :] # ridge between Voronoi cells\n\nFor Delaunay triangulations and convex hulls, the neighborhood\nstructure of the simplices satisfies the condition:\n\n ``tess.neighbors[i,j]`` is the neighboring simplex of the i-th\n simplex, opposite to the j-vertex. It is -1 in case of no\n neighbor.\n\nConvex hull facets also define a hyperplane equation::\n\n (hull.equations[i,:-1] * coord).sum() + hull.equations[i,-1] == 0\n\nSimilar hyperplane equations for the Delaunay triangulation correspond\nto the convex hull facets on the corresponding N+1 dimensional\nparaboloid.\n\nThe Delaunay triangulation objects offer a method for locating the\nsimplex containing a given point, and barycentric coordinate\ncomputations.\n\nFunctions\n---------\n\n.. autosummary::\n :toctree: generated/\n\n tsearch\n distance_matrix\n minkowski_distance\n minkowski_distance_p\n procrustes\n\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nfrom .kdtree import *\nfrom .ckdtree import *\nfrom .qhull import *\nfrom ._spherical_voronoi import SphericalVoronoi\nfrom ._plotutils import *\nfrom ._procrustes import procrustes\n\n__all__ = [s for s in dir() if not s.startswith('_')]\n__all__ += ['distance']\n\nfrom . import distance\nfrom numpy.testing import Tester\ntest = Tester().test\n",
"from __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom scipy.linalg import lstsq\nfrom math import factorial\nfrom scipy.ndimage import convolve1d\nfrom ._arraytools import axis_slice\n\n\ndef savgol_coeffs(window_length, polyorder, deriv=0, delta=1.0, pos=None,\n use=\"conv\"):\n \"\"\"Compute the coefficients for a 1-d Savitzky-Golay FIR filter.\n\n Parameters\n ----------\n window_length : int\n The length of the filter window (i.e. the number of coefficients).\n `window_length` must be an odd positive integer.\n polyorder : int\n The order of the polynomial used to fit the samples.\n `polyorder` must be less than `window_length`.\n deriv : int, optional\n The order of the derivative to compute. This must be a\n nonnegative integer. The default is 0, which means to filter\n the data without differentiating.\n delta : float, optional\n The spacing of the samples to which the filter will be applied.\n This is only used if deriv > 0.\n pos : int or None, optional\n If pos is not None, it specifies evaluation position within the\n window. The default is the middle of the window.\n use : str, optional\n Either 'conv' or 'dot'. This argument chooses the order of the\n coefficients. The default is 'conv', which means that the\n coefficients are ordered to be used in a convolution. With\n use='dot', the order is reversed, so the filter is applied by\n dotting the coefficients with the data set.\n\n Returns\n -------\n coeffs : 1-d ndarray\n The filter coefficients.\n\n References\n ----------\n A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of Data by\n Simplified Least Squares Procedures. Analytical Chemistry, 1964, 36 (8),\n pp 1627-1639.\n\n See Also\n --------\n savgol_filter\n\n Notes\n -----\n\n .. versionadded:: 0.14.0\n\n Examples\n --------\n >>> from scipy.signal import savgol_coeffs\n >>> savgol_coeffs(5, 2)\n array([-0.08571429, 0.34285714, 0.48571429, 0.34285714, -0.08571429])\n >>> savgol_coeffs(5, 2, deriv=1)\n array([ 2.00000000e-01, 1.00000000e-01, 2.00607895e-16,\n -1.00000000e-01, -2.00000000e-01])\n\n Note that use='dot' simply reverses the coefficients.\n\n >>> savgol_coeffs(5, 2, pos=3)\n array([ 0.25714286, 0.37142857, 0.34285714, 0.17142857, -0.14285714])\n >>> savgol_coeffs(5, 2, pos=3, use='dot')\n array([-0.14285714, 0.17142857, 0.34285714, 0.37142857, 0.25714286])\n\n `x` contains data from the parabola x = t**2, sampled at\n t = -1, 0, 1, 2, 3. `c` holds the coefficients that will compute the\n derivative at the last position. When dotted with `x` the result should\n be 6.\n\n >>> x = np.array([1, 0, 1, 4, 9])\n >>> c = savgol_coeffs(5, 2, pos=4, deriv=1, use='dot')\n >>> c.dot(x)\n 6.0000000000000018\n \"\"\"\n\n # An alternative method for finding the coefficients when deriv=0 is\n # t = np.arange(window_length)\n # unit = (t == pos).astype(int)\n # coeffs = np.polyval(np.polyfit(t, unit, polyorder), t)\n # The method implemented here is faster.\n\n # To recreate the table of sample coefficients shown in the chapter on\n # the Savitzy-Golay filter in the Numerical Recipes book, use\n # window_length = nL + nR + 1\n # pos = nL + 1\n # c = savgol_coeffs(window_length, M, pos=pos, use='dot')\n\n if polyorder >= window_length:\n raise ValueError(\"polyorder must be less than window_length.\")\n\n halflen, rem = divmod(window_length, 2)\n\n if rem == 0:\n raise ValueError(\"window_length must be odd.\")\n\n if pos is None:\n pos = halflen\n\n if not (0 <= pos < window_length):\n raise ValueError(\"pos must be nonnegative and less than \"\n \"window_length.\")\n\n if use not in ['conv', 'dot']:\n raise ValueError(\"`use` must be 'conv' or 'dot'\")\n\n # Form the design matrix A. The columns of A are powers of the integers\n # from -pos to window_length - pos - 1. The powers (i.e. rows) range\n # from 0 to polyorder. (That is, A is a vandermonde matrix, but not\n # necessarily square.)\n x = np.arange(-pos, window_length - pos, dtype=float)\n if use == \"conv\":\n # Reverse so that result can be used in a convolution.\n x = x[::-1]\n\n order = np.arange(polyorder + 1).reshape(-1, 1)\n A = x ** order\n\n # y determines which order derivative is returned.\n y = np.zeros(polyorder + 1)\n # The coefficient assigned to y[deriv] scales the result to take into\n # account the order of the derivative and the sample spacing.\n y[deriv] = factorial(deriv) / (delta ** deriv)\n\n # Find the least-squares solution of A*c = y\n coeffs, _, _, _ = lstsq(A, y)\n\n return coeffs\n\n\ndef _polyder(p, m):\n \"\"\"Differentiate polynomials represented with coefficients.\n\n p must be a 1D or 2D array. In the 2D case, each column gives\n the coefficients of a polynomial; the first row holds the coefficients\n associated with the highest power. m must be a nonnegative integer.\n (numpy.polyder doesn't handle the 2D case.)\n \"\"\"\n\n if m == 0:\n result = p\n else:\n n = len(p)\n if n <= m:\n result = np.zeros_like(p[:1, ...])\n else:\n dp = p[:-m].copy()\n for k in range(m):\n rng = np.arange(n - k - 1, m - k - 1, -1)\n dp *= rng.reshape((n - m,) + (1,) * (p.ndim - 1))\n result = dp\n return result\n\n\ndef _fit_edge(x, window_start, window_stop, interp_start, interp_stop,\n axis, polyorder, deriv, delta, y):\n \"\"\"\n Given an n-d array `x` and the specification of a slice of `x` from\n `window_start` to `window_stop` along `axis`, create an interpolating\n polynomial of each 1-d slice, and evaluate that polynomial in the slice\n from `interp_start` to `interp_stop`. Put the result into the\n corresponding slice of `y`.\n \"\"\"\n\n # Get the edge into a (window_length, -1) array.\n x_edge = axis_slice(x, start=window_start, stop=window_stop, axis=axis)\n if axis == 0 or axis == -x.ndim:\n xx_edge = x_edge\n swapped = False\n else:\n xx_edge = x_edge.swapaxes(axis, 0)\n swapped = True\n xx_edge = xx_edge.reshape(xx_edge.shape[0], -1)\n\n # Fit the edges. poly_coeffs has shape (polyorder + 1, -1),\n # where '-1' is the same as in xx_edge.\n poly_coeffs = np.polyfit(np.arange(0, window_stop - window_start),\n xx_edge, polyorder)\n\n if deriv > 0:\n poly_coeffs = _polyder(poly_coeffs, deriv)\n\n # Compute the interpolated values for the edge.\n i = np.arange(interp_start - window_start, interp_stop - window_start)\n values = np.polyval(poly_coeffs, i.reshape(-1, 1)) / (delta ** deriv)\n\n # Now put the values into the appropriate slice of y.\n # First reshape values to match y.\n shp = list(y.shape)\n shp[0], shp[axis] = shp[axis], shp[0]\n values = values.reshape(interp_stop - interp_start, *shp[1:])\n if swapped:\n values = values.swapaxes(0, axis)\n # Get a view of the data to be replaced by values.\n y_edge = axis_slice(y, start=interp_start, stop=interp_stop, axis=axis)\n y_edge[...] = values\n\n\ndef _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y):\n \"\"\"\n Use polynomial interpolation of x at the low and high ends of the axis\n to fill in the halflen values in y.\n\n This function just calls _fit_edge twice, once for each end of the axis.\n \"\"\"\n halflen = window_length // 2\n _fit_edge(x, 0, window_length, 0, halflen, axis,\n polyorder, deriv, delta, y)\n n = x.shape[axis]\n _fit_edge(x, n - window_length, n, n - halflen, n, axis,\n polyorder, deriv, delta, y)\n\n\ndef savgol_filter(x, window_length, polyorder, deriv=0, delta=1.0,\n axis=-1, mode='interp', cval=0.0):\n \"\"\" Apply a Savitzky-Golay filter to an array.\n\n This is a 1-d filter. If `x` has dimension greater than 1, `axis`\n determines the axis along which the filter is applied.\n\n Parameters\n ----------\n x : array_like\n The data to be filtered. If `x` is not a single or double precision\n floating point array, it will be converted to type `numpy.float64`\n before filtering.\n window_length : int\n The length of the filter window (i.e. the number of coefficients).\n `window_length` must be a positive odd integer.\n polyorder : int\n The order of the polynomial used to fit the samples.\n `polyorder` must be less than `window_length`.\n deriv : int, optional\n The order of the derivative to compute. This must be a\n nonnegative integer. The default is 0, which means to filter\n the data without differentiating.\n delta : float, optional\n The spacing of the samples to which the filter will be applied.\n This is only used if deriv > 0. Default is 1.0.\n axis : int, optional\n The axis of the array `x` along which the filter is to be applied.\n Default is -1.\n mode : str, optional\n Must be 'mirror', 'constant', 'nearest', 'wrap' or 'interp'. This\n determines the type of extension to use for the padded signal to\n which the filter is applied. When `mode` is 'constant', the padding\n value is given by `cval`. See the Notes for more details on 'mirror',\n 'constant', 'wrap', and 'nearest'.\n When the 'interp' mode is selected (the default), no extension\n is used. Instead, a degree `polyorder` polynomial is fit to the\n last `window_length` values of the edges, and this polynomial is\n used to evaluate the last `window_length // 2` output values.\n cval : scalar, optional\n Value to fill past the edges of the input if `mode` is 'constant'.\n Default is 0.0.\n\n Returns\n -------\n y : ndarray, same shape as `x`\n The filtered data.\n\n See Also\n --------\n savgol_coeffs\n\n Notes\n -----\n Details on the `mode` options:\n\n 'mirror':\n Repeats the values at the edges in reverse order. The value\n closest to the edge is not included.\n 'nearest':\n The extension contains the nearest input value.\n 'constant':\n The extension contains the value given by the `cval` argument.\n 'wrap':\n The extension contains the values from the other end of the array.\n\n For example, if the input is [1, 2, 3, 4, 5, 6, 7, 8], and\n `window_length` is 7, the following shows the extended data for\n the various `mode` options (assuming `cval` is 0)::\n\n mode | Ext | Input | Ext\n -----------+---------+------------------------+---------\n 'mirror' | 4 3 2 | 1 2 3 4 5 6 7 8 | 7 6 5\n 'nearest' | 1 1 1 | 1 2 3 4 5 6 7 8 | 8 8 8\n 'constant' | 0 0 0 | 1 2 3 4 5 6 7 8 | 0 0 0\n 'wrap' | 6 7 8 | 1 2 3 4 5 6 7 8 | 1 2 3\n\n .. versionadded:: 0.14.0\n\n Examples\n --------\n >>> from scipy.signal import savgol_filter\n >>> np.set_printoptions(precision=2) # For compact display.\n >>> x = np.array([2, 2, 5, 2, 1, 0, 1, 4, 9])\n\n Filter with a window length of 5 and a degree 2 polynomial. Use\n the defaults for all other parameters.\n\n >>> savgol_filter(x, 5, 2)\n array([ 1.66, 3.17, 3.54, 2.86, 0.66, 0.17, 1. , 4. , 9. ])\n\n Note that the last five values in x are samples of a parabola, so\n when mode='interp' (the default) is used with polyorder=2, the last\n three values are unchanged. Compare that to, for example,\n `mode='nearest'`:\n\n >>> savgol_filter(x, 5, 2, mode='nearest')\n array([ 1.74, 3.03, 3.54, 2.86, 0.66, 0.17, 1. , 4.6 , 7.97])\n\n \"\"\"\n if mode not in [\"mirror\", \"constant\", \"nearest\", \"interp\", \"wrap\"]:\n raise ValueError(\"mode must be 'mirror', 'constant', 'nearest' \"\n \"'wrap' or 'interp'.\")\n\n x = np.asarray(x)\n # Ensure that x is either single or double precision floating point.\n if x.dtype != np.float64 and x.dtype != np.float32:\n x = x.astype(np.float64)\n\n coeffs = savgol_coeffs(window_length, polyorder, deriv=deriv, delta=delta)\n\n if mode == \"interp\":\n # Do not pad. Instead, for the elements within `window_length // 2`\n # of the ends of the sequence, use the polynomial that is fitted to\n # the last `window_length` elements.\n y = convolve1d(x, coeffs, axis=axis, mode=\"constant\")\n _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y)\n else:\n # Any mode other than 'interp' is passed on to ndimage.convolve1d.\n y = convolve1d(x, coeffs, axis=axis, mode=mode, cval=cval)\n\n return y\n",
"\"\"\"Test functionality of mldata fetching utilities.\"\"\"\n\nimport os\nimport shutil\nimport tempfile\nimport scipy as sp\n\nfrom sklearn import datasets\nfrom sklearn.datasets import mldata_filename, fetch_mldata\n\nfrom sklearn.utils.testing import assert_in\nfrom sklearn.utils.testing import assert_not_in\nfrom sklearn.utils.testing import mock_mldata_urlopen\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import with_setup\nfrom sklearn.utils.testing import assert_array_equal\n\n\ntmpdir = None\n\n\ndef setup_tmpdata():\n # create temporary dir\n global tmpdir\n tmpdir = tempfile.mkdtemp()\n os.makedirs(os.path.join(tmpdir, 'mldata'))\n\n\ndef teardown_tmpdata():\n # remove temporary dir\n if tmpdir is not None:\n shutil.rmtree(tmpdir)\n\n\ndef test_mldata_filename():\n cases = [('datasets-UCI iris', 'datasets-uci-iris'),\n ('news20.binary', 'news20binary'),\n ('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),\n ('Nile Water Level', 'nile-water-level'),\n ('MNIST (original)', 'mnist-original')]\n for name, desired in cases:\n assert_equal(mldata_filename(name), desired)\n\n\n@with_setup(setup_tmpdata, teardown_tmpdata)\ndef test_download():\n \"\"\"Test that fetch_mldata is able to download and cache a data set.\"\"\"\n\n _urlopen_ref = datasets.mldata.urlopen\n datasets.mldata.urlopen = mock_mldata_urlopen({\n 'mock': {\n 'label': sp.ones((150,)),\n 'data': sp.ones((150, 4)),\n },\n })\n try:\n mock = fetch_mldata('mock', data_home=tmpdir)\n for n in [\"COL_NAMES\", \"DESCR\", \"target\", \"data\"]:\n assert_in(n, mock)\n\n assert_equal(mock.target.shape, (150,))\n assert_equal(mock.data.shape, (150, 4))\n\n assert_raises(datasets.mldata.HTTPError,\n fetch_mldata, 'not_existing_name')\n finally:\n datasets.mldata.urlopen = _urlopen_ref\n\n\n@with_setup(setup_tmpdata, teardown_tmpdata)\ndef test_fetch_one_column():\n _urlopen_ref = datasets.mldata.urlopen\n try:\n dataname = 'onecol'\n # create fake data set in cache\n x = sp.arange(6).reshape(2, 3)\n datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})\n\n dset = fetch_mldata(dataname, data_home=tmpdir)\n for n in [\"COL_NAMES\", \"DESCR\", \"data\"]:\n assert_in(n, dset)\n assert_not_in(\"target\", dset)\n\n assert_equal(dset.data.shape, (2, 3))\n assert_array_equal(dset.data, x)\n\n # transposing the data array\n dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)\n assert_equal(dset.data.shape, (3, 2))\n finally:\n datasets.mldata.urlopen = _urlopen_ref\n\n\n@with_setup(setup_tmpdata, teardown_tmpdata)\ndef test_fetch_multiple_column():\n _urlopen_ref = datasets.mldata.urlopen\n try:\n # create fake data set in cache\n x = sp.arange(6).reshape(2, 3)\n y = sp.array([1, -1])\n z = sp.arange(12).reshape(4, 3)\n\n # by default\n dataname = 'threecol-default'\n datasets.mldata.urlopen = mock_mldata_urlopen({\n dataname: (\n {\n 'label': y,\n 'data': x,\n 'z': z,\n },\n ['z', 'data', 'label'],\n ),\n })\n\n dset = fetch_mldata(dataname, data_home=tmpdir)\n for n in [\"COL_NAMES\", \"DESCR\", \"target\", \"data\", \"z\"]:\n assert_in(n, dset)\n assert_not_in(\"x\", dset)\n assert_not_in(\"y\", dset)\n\n assert_array_equal(dset.data, x)\n assert_array_equal(dset.target, y)\n assert_array_equal(dset.z, z.T)\n\n # by order\n dataname = 'threecol-order'\n datasets.mldata.urlopen = mock_mldata_urlopen({\n dataname: ({'y': y, 'x': x, 'z': z},\n ['y', 'x', 'z']), })\n\n dset = fetch_mldata(dataname, data_home=tmpdir)\n for n in [\"COL_NAMES\", \"DESCR\", \"target\", \"data\", \"z\"]:\n assert_in(n, dset)\n assert_not_in(\"x\", dset)\n assert_not_in(\"y\", dset)\n\n assert_array_equal(dset.data, x)\n assert_array_equal(dset.target, y)\n assert_array_equal(dset.z, z.T)\n\n # by number\n dataname = 'threecol-number'\n datasets.mldata.urlopen = mock_mldata_urlopen({\n dataname: ({'y': y, 'x': x, 'z': z},\n ['z', 'x', 'y']),\n })\n\n dset = fetch_mldata(dataname, target_name=2, data_name=0,\n data_home=tmpdir)\n for n in [\"COL_NAMES\", \"DESCR\", \"target\", \"data\", \"x\"]:\n assert_in(n, dset)\n assert_not_in(\"y\", dset)\n assert_not_in(\"z\", dset)\n\n assert_array_equal(dset.data, z)\n assert_array_equal(dset.target, y)\n\n # by name\n dset = fetch_mldata(dataname, target_name='y', data_name='z',\n data_home=tmpdir)\n for n in [\"COL_NAMES\", \"DESCR\", \"target\", \"data\", \"x\"]:\n assert_in(n, dset)\n assert_not_in(\"y\", dset)\n assert_not_in(\"z\", dset)\n\n finally:\n datasets.mldata.urlopen = _urlopen_ref\n",
"\"\"\"Test the search module\"\"\"\n\nfrom collections import Iterable, Sized\nfrom sklearn.externals.six.moves import cStringIO as StringIO\nfrom sklearn.externals.six.moves import xrange\nfrom sklearn.externals.joblib._compat import PY3_OR_LATER\nfrom itertools import chain, product\nimport pickle\nimport sys\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom sklearn.utils.fixes import in1d\nfrom sklearn.utils.fixes import sp_version\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_not_equal\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import assert_warns\nfrom sklearn.utils.testing import assert_raise_message\nfrom sklearn.utils.testing import assert_false, assert_true\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import ignore_warnings\nfrom sklearn.utils.mocking import CheckingClassifier, MockDataFrame\n\nfrom scipy.stats import bernoulli, expon, uniform\n\nfrom sklearn.externals.six.moves import zip\nfrom sklearn.base import BaseEstimator\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.datasets import make_classification\nfrom sklearn.datasets import make_blobs\nfrom sklearn.datasets import make_multilabel_classification\n\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.model_selection import LeaveOneGroupOut\nfrom sklearn.model_selection import LeavePGroupsOut\nfrom sklearn.model_selection import GroupKFold\nfrom sklearn.model_selection import GroupShuffleSplit\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.model_selection import ParameterGrid\nfrom sklearn.model_selection import ParameterSampler\n\nfrom sklearn.model_selection._validation import FitFailedWarning\n\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.cluster import KMeans\nfrom sklearn.neighbors import KernelDensity\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import make_scorer\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import SGDClassifier\n\nfrom sklearn.model_selection.tests.common import OneTimeSplitter\n\n\n# Neither of the following two estimators inherit from BaseEstimator,\n# to test hyperparameter search on user-defined classifiers.\nclass MockClassifier(object):\n \"\"\"Dummy classifier to test the parameter search algorithms\"\"\"\n def __init__(self, foo_param=0):\n self.foo_param = foo_param\n\n def fit(self, X, Y):\n assert_true(len(X) == len(Y))\n return self\n\n def predict(self, T):\n return T.shape[0]\n\n predict_proba = predict\n predict_log_proba = predict\n decision_function = predict\n transform = predict\n inverse_transform = predict\n\n def score(self, X=None, Y=None):\n if self.foo_param > 1:\n score = 1.\n else:\n score = 0.\n return score\n\n def get_params(self, deep=False):\n return {'foo_param': self.foo_param}\n\n def set_params(self, **params):\n self.foo_param = params['foo_param']\n return self\n\n\nclass LinearSVCNoScore(LinearSVC):\n \"\"\"An LinearSVC classifier that has no score method.\"\"\"\n @property\n def score(self):\n raise AttributeError\n\nX = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])\ny = np.array([1, 1, 2, 2])\n\n\ndef assert_grid_iter_equals_getitem(grid):\n assert_equal(list(grid), [grid[i] for i in range(len(grid))])\n\n\ndef test_parameter_grid():\n # Test basic properties of ParameterGrid.\n params1 = {\"foo\": [1, 2, 3]}\n grid1 = ParameterGrid(params1)\n assert_true(isinstance(grid1, Iterable))\n assert_true(isinstance(grid1, Sized))\n assert_equal(len(grid1), 3)\n assert_grid_iter_equals_getitem(grid1)\n\n params2 = {\"foo\": [4, 2],\n \"bar\": [\"ham\", \"spam\", \"eggs\"]}\n grid2 = ParameterGrid(params2)\n assert_equal(len(grid2), 6)\n\n # loop to assert we can iterate over the grid multiple times\n for i in xrange(2):\n # tuple + chain transforms {\"a\": 1, \"b\": 2} to (\"a\", 1, \"b\", 2)\n points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)\n assert_equal(points,\n set((\"bar\", x, \"foo\", y)\n for x, y in product(params2[\"bar\"], params2[\"foo\"])))\n assert_grid_iter_equals_getitem(grid2)\n\n # Special case: empty grid (useful to get default estimator settings)\n empty = ParameterGrid({})\n assert_equal(len(empty), 1)\n assert_equal(list(empty), [{}])\n assert_grid_iter_equals_getitem(empty)\n assert_raises(IndexError, lambda: empty[1])\n\n has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])\n assert_equal(len(has_empty), 4)\n assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])\n assert_grid_iter_equals_getitem(has_empty)\n\n\ndef test_grid_search():\n # Test that the best estimator contains the right value for foo_param\n clf = MockClassifier()\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)\n # make sure it selects the smallest parameter in case of ties\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n grid_search.fit(X, y)\n sys.stdout = old_stdout\n assert_equal(grid_search.best_estimator_.foo_param, 2)\n\n assert_array_equal(grid_search.cv_results_[\"param_foo_param\"].data,\n [1, 2, 3])\n\n # Smoke test the score etc:\n grid_search.score(X, y)\n grid_search.predict_proba(X)\n grid_search.decision_function(X)\n grid_search.transform(X)\n\n # Test exception handling on scoring\n grid_search.scoring = 'sklearn'\n assert_raises(ValueError, grid_search.fit, X, y)\n\n\n@ignore_warnings\ndef test_grid_search_no_score():\n # Test grid-search on classifier that has no score function.\n clf = LinearSVC(random_state=0)\n X, y = make_blobs(random_state=0, centers=2)\n Cs = [.1, 1, 10]\n clf_no_score = LinearSVCNoScore(random_state=0)\n grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')\n grid_search.fit(X, y)\n\n grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},\n scoring='accuracy')\n # smoketest grid search\n grid_search_no_score.fit(X, y)\n\n # check that best params are equal\n assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)\n # check that we can call score and that it gives the correct result\n assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))\n\n # giving no scoring function raises an error\n grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})\n assert_raise_message(TypeError, \"no scoring\", grid_search_no_score.fit,\n [[1]])\n\n\ndef test_grid_search_score_method():\n X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,\n random_state=0)\n clf = LinearSVC(random_state=0)\n grid = {'C': [.1]}\n\n search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)\n search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)\n search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,\n scoring='roc_auc').fit(X, y)\n search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)\n\n # Check warning only occurs in situation where behavior changed:\n # estimator requires score method to compete with scoring parameter\n score_no_scoring = search_no_scoring.score(X, y)\n score_accuracy = search_accuracy.score(X, y)\n score_no_score_auc = search_no_score_method_auc.score(X, y)\n score_auc = search_auc.score(X, y)\n\n # ensure the test is sane\n assert_true(score_auc < 1.0)\n assert_true(score_accuracy < 1.0)\n assert_not_equal(score_auc, score_accuracy)\n\n assert_almost_equal(score_accuracy, score_no_scoring)\n assert_almost_equal(score_auc, score_no_score_auc)\n\n\ndef test_grid_search_groups():\n # Check if ValueError (when groups is None) propagates to GridSearchCV\n # And also check if groups is correctly passed to the cv object\n rng = np.random.RandomState(0)\n\n X, y = make_classification(n_samples=15, n_classes=2, random_state=0)\n groups = rng.randint(0, 3, 15)\n\n clf = LinearSVC(random_state=0)\n grid = {'C': [1]}\n\n group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(),\n GroupShuffleSplit()]\n for cv in group_cvs:\n gs = GridSearchCV(clf, grid, cv=cv)\n assert_raise_message(ValueError,\n \"The groups parameter should not be None\",\n gs.fit, X, y)\n gs.fit(X, y, groups=groups)\n\n non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]\n for cv in non_group_cvs:\n gs = GridSearchCV(clf, grid, cv=cv)\n # Should not raise an error\n gs.fit(X, y)\n\n\ndef test_trivial_cv_results_attr():\n # Test search over a \"grid\" with only one point.\n # Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.\n clf = MockClassifier()\n grid_search = GridSearchCV(clf, {'foo_param': [1]})\n grid_search.fit(X, y)\n assert_true(hasattr(grid_search, \"cv_results_\"))\n\n random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)\n random_search.fit(X, y)\n assert_true(hasattr(grid_search, \"cv_results_\"))\n\n\ndef test_no_refit():\n # Test that GSCV can be used for model selection alone without refitting\n clf = MockClassifier()\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)\n grid_search.fit(X, y)\n assert_true(not hasattr(grid_search, \"best_estimator_\") and\n hasattr(grid_search, \"best_index_\") and\n hasattr(grid_search, \"best_params_\"))\n\n # Make sure the predict/transform etc fns raise meaningfull error msg\n for fn_name in ('predict', 'predict_proba', 'predict_log_proba',\n 'transform', 'inverse_transform'):\n assert_raise_message(NotFittedError,\n ('refit=False. %s is available only after '\n 'refitting on the best parameters' % fn_name),\n getattr(grid_search, fn_name), X)\n\n\ndef test_grid_search_error():\n # Test that grid search will capture errors on data with different length\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n assert_raises(ValueError, cv.fit, X_[:180], y_)\n\n\ndef test_grid_search_one_grid_point():\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n param_dict = {\"C\": [1.0], \"kernel\": [\"rbf\"], \"gamma\": [0.1]}\n\n clf = SVC()\n cv = GridSearchCV(clf, param_dict)\n cv.fit(X_, y_)\n\n clf = SVC(C=1.0, kernel=\"rbf\", gamma=0.1)\n clf.fit(X_, y_)\n\n assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)\n\n\ndef test_grid_search_when_param_grid_includes_range():\n # Test that the best estimator contains the right value for foo_param\n clf = MockClassifier()\n grid_search = None\n if PY3_OR_LATER:\n grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)})\n else:\n grid_search = GridSearchCV(clf, {'foo_param': xrange(1, 4)})\n grid_search.fit(X, y)\n assert_equal(grid_search.best_estimator_.foo_param, 2)\n\n\ndef test_grid_search_bad_param_grid():\n param_dict = {\"C\": 1.0}\n clf = SVC()\n assert_raise_message(\n ValueError,\n \"Parameter values for parameter (C) need to be a sequence\"\n \"(but not a string) or np.ndarray.\",\n GridSearchCV, clf, param_dict)\n\n param_dict = {\"C\": []}\n clf = SVC()\n assert_raise_message(\n ValueError,\n \"Parameter values for parameter (C) need to be a non-empty sequence.\",\n GridSearchCV, clf, param_dict)\n\n param_dict = {\"C\": \"1,2,3\"}\n clf = SVC()\n assert_raise_message(\n ValueError,\n \"Parameter values for parameter (C) need to be a sequence\"\n \"(but not a string) or np.ndarray.\",\n GridSearchCV, clf, param_dict)\n\n param_dict = {\"C\": np.ones(6).reshape(3, 2)}\n clf = SVC()\n assert_raises(ValueError, GridSearchCV, clf, param_dict)\n\n\ndef test_grid_search_sparse():\n # Test that grid search works with both dense and sparse matrices\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n cv.fit(X_[:180], y_[:180])\n y_pred = cv.predict(X_[180:])\n C = cv.best_estimator_.C\n\n X_ = sp.csr_matrix(X_)\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n cv.fit(X_[:180].tocoo(), y_[:180])\n y_pred2 = cv.predict(X_[180:])\n C2 = cv.best_estimator_.C\n\n assert_true(np.mean(y_pred == y_pred2) >= .9)\n assert_equal(C, C2)\n\n\ndef test_grid_search_sparse_scoring():\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=\"f1\")\n cv.fit(X_[:180], y_[:180])\n y_pred = cv.predict(X_[180:])\n C = cv.best_estimator_.C\n\n X_ = sp.csr_matrix(X_)\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=\"f1\")\n cv.fit(X_[:180], y_[:180])\n y_pred2 = cv.predict(X_[180:])\n C2 = cv.best_estimator_.C\n\n assert_array_equal(y_pred, y_pred2)\n assert_equal(C, C2)\n # Smoke test the score\n # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),\n # cv.score(X_[:180], y[:180]))\n\n # test loss where greater is worse\n def f1_loss(y_true_, y_pred_):\n return -f1_score(y_true_, y_pred_)\n F1Loss = make_scorer(f1_loss, greater_is_better=False)\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)\n cv.fit(X_[:180], y_[:180])\n y_pred3 = cv.predict(X_[180:])\n C3 = cv.best_estimator_.C\n\n assert_equal(C, C3)\n assert_array_equal(y_pred, y_pred3)\n\n\ndef test_grid_search_precomputed_kernel():\n # Test that grid search works when the input features are given in the\n # form of a precomputed kernel matrix\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n\n # compute the training kernel matrix corresponding to the linear kernel\n K_train = np.dot(X_[:180], X_[:180].T)\n y_train = y_[:180]\n\n clf = SVC(kernel='precomputed')\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n cv.fit(K_train, y_train)\n\n assert_true(cv.best_score_ >= 0)\n\n # compute the test kernel matrix\n K_test = np.dot(X_[180:], X_[:180].T)\n y_test = y_[180:]\n\n y_pred = cv.predict(K_test)\n\n assert_true(np.mean(y_pred == y_test) >= 0)\n\n # test error is raised when the precomputed kernel is not array-like\n # or sparse\n assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)\n\n\ndef test_grid_search_precomputed_kernel_error_nonsquare():\n # Test that grid search returns an error with a non-square precomputed\n # training kernel matrix\n K_train = np.zeros((10, 20))\n y_train = np.ones((10, ))\n clf = SVC(kernel='precomputed')\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n assert_raises(ValueError, cv.fit, K_train, y_train)\n\n\ndef test_grid_search_precomputed_kernel_error_kernel_function():\n # Test that grid search returns an error when using a kernel_function\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n kernel_function = lambda x1, x2: np.dot(x1, x2.T)\n clf = SVC(kernel=kernel_function)\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n assert_raises(ValueError, cv.fit, X_, y_)\n\n\nclass BrokenClassifier(BaseEstimator):\n \"\"\"Broken classifier that cannot be fit twice\"\"\"\n\n def __init__(self, parameter=None):\n self.parameter = parameter\n\n def fit(self, X, y):\n assert_true(not hasattr(self, 'has_been_fit_'))\n self.has_been_fit_ = True\n\n def predict(self, X):\n return np.zeros(X.shape[0])\n\n\n@ignore_warnings\ndef test_refit():\n # Regression test for bug in refitting\n # Simulates re-fitting a broken estimator; this used to break with\n # sparse SVMs.\n X = np.arange(100).reshape(10, 10)\n y = np.array([0] * 5 + [1] * 5)\n\n clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],\n scoring=\"precision\", refit=True)\n clf.fit(X, y)\n\n\ndef test_gridsearch_nd():\n # Pass X as list in GridSearchCV\n X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)\n y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)\n check_X = lambda x: x.shape[1:] == (5, 3, 2)\n check_y = lambda x: x.shape[1:] == (7, 11)\n clf = CheckingClassifier(check_X=check_X, check_y=check_y)\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})\n grid_search.fit(X_4d, y_3d).score(X, y)\n assert_true(hasattr(grid_search, \"cv_results_\"))\n\n\ndef test_X_as_list():\n # Pass X as list in GridSearchCV\n X = np.arange(100).reshape(10, 10)\n y = np.array([0] * 5 + [1] * 5)\n\n clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))\n cv = KFold(n_splits=3)\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)\n grid_search.fit(X.tolist(), y).score(X, y)\n assert_true(hasattr(grid_search, \"cv_results_\"))\n\n\ndef test_y_as_list():\n # Pass y as list in GridSearchCV\n X = np.arange(100).reshape(10, 10)\n y = np.array([0] * 5 + [1] * 5)\n\n clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))\n cv = KFold(n_splits=3)\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)\n grid_search.fit(X, y.tolist()).score(X, y)\n assert_true(hasattr(grid_search, \"cv_results_\"))\n\n\n@ignore_warnings\ndef test_pandas_input():\n # check cross_val_score doesn't destroy pandas dataframe\n types = [(MockDataFrame, MockDataFrame)]\n try:\n from pandas import Series, DataFrame\n types.append((DataFrame, Series))\n except ImportError:\n pass\n\n X = np.arange(100).reshape(10, 10)\n y = np.array([0] * 5 + [1] * 5)\n\n for InputFeatureType, TargetType in types:\n # X dataframe, y series\n X_df, y_ser = InputFeatureType(X), TargetType(y)\n check_df = lambda x: isinstance(x, InputFeatureType)\n check_series = lambda x: isinstance(x, TargetType)\n clf = CheckingClassifier(check_X=check_df, check_y=check_series)\n\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})\n grid_search.fit(X_df, y_ser).score(X_df, y_ser)\n grid_search.predict(X_df)\n assert_true(hasattr(grid_search, \"cv_results_\"))\n\n\ndef test_unsupervised_grid_search():\n # test grid-search with unsupervised estimator\n X, y = make_blobs(random_state=0)\n km = KMeans(random_state=0)\n grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),\n scoring='adjusted_rand_score')\n grid_search.fit(X, y)\n # ARI can find the right number :)\n assert_equal(grid_search.best_params_[\"n_clusters\"], 3)\n\n # Now without a score, and without y\n grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))\n grid_search.fit(X)\n assert_equal(grid_search.best_params_[\"n_clusters\"], 4)\n\n\ndef test_gridsearch_no_predict():\n # test grid-search with an estimator without predict.\n # slight duplication of a test from KDE\n def custom_scoring(estimator, X):\n return 42 if estimator.bandwidth == .1 else 0\n X, _ = make_blobs(cluster_std=.1, random_state=1,\n centers=[[0, 1], [1, 0], [0, 0]])\n search = GridSearchCV(KernelDensity(),\n param_grid=dict(bandwidth=[.01, .1, 1]),\n scoring=custom_scoring)\n search.fit(X)\n assert_equal(search.best_params_['bandwidth'], .1)\n assert_equal(search.best_score_, 42)\n\n\ndef test_param_sampler():\n # test basic properties of param sampler\n param_distributions = {\"kernel\": [\"rbf\", \"linear\"],\n \"C\": uniform(0, 1)}\n sampler = ParameterSampler(param_distributions=param_distributions,\n n_iter=10, random_state=0)\n samples = [x for x in sampler]\n assert_equal(len(samples), 10)\n for sample in samples:\n assert_true(sample[\"kernel\"] in [\"rbf\", \"linear\"])\n assert_true(0 <= sample[\"C\"] <= 1)\n\n # test that repeated calls yield identical parameters\n param_distributions = {\"C\": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}\n sampler = ParameterSampler(param_distributions=param_distributions,\n n_iter=3, random_state=0)\n assert_equal([x for x in sampler], [x for x in sampler])\n\n if sp_version >= (0, 16):\n param_distributions = {\"C\": uniform(0, 1)}\n sampler = ParameterSampler(param_distributions=param_distributions,\n n_iter=10, random_state=0)\n assert_equal([x for x in sampler], [x for x in sampler])\n\n\ndef check_cv_results_array_types(cv_results, param_keys, score_keys):\n # Check if the search `cv_results`'s array are of correct types\n assert_true(all(isinstance(cv_results[param], np.ma.MaskedArray)\n for param in param_keys))\n assert_true(all(cv_results[key].dtype == object for key in param_keys))\n assert_false(any(isinstance(cv_results[key], np.ma.MaskedArray)\n for key in score_keys))\n assert_true(all(cv_results[key].dtype == np.float64\n for key in score_keys if not key.startswith('rank')))\n assert_true(cv_results['rank_test_score'].dtype == np.int32)\n\n\ndef check_cv_results_keys(cv_results, param_keys, score_keys, n_cand):\n # Test the search.cv_results_ contains all the required results\n assert_array_equal(sorted(cv_results.keys()),\n sorted(param_keys + score_keys + ('params',)))\n assert_true(all(cv_results[key].shape == (n_cand,)\n for key in param_keys + score_keys))\n\n\ndef check_cv_results_grid_scores_consistency(search):\n # TODO Remove in 0.20\n cv_results = search.cv_results_\n res_scores = np.vstack(list([cv_results[\"split%d_test_score\" % i]\n for i in range(search.n_splits_)])).T\n res_means = cv_results[\"mean_test_score\"]\n res_params = cv_results[\"params\"]\n n_cand = len(res_params)\n grid_scores = assert_warns(DeprecationWarning, getattr,\n search, 'grid_scores_')\n assert_equal(len(grid_scores), n_cand)\n # Check consistency of the structure of grid_scores\n for i in range(n_cand):\n assert_equal(grid_scores[i].parameters, res_params[i])\n assert_array_equal(grid_scores[i].cv_validation_scores,\n res_scores[i, :])\n assert_array_equal(grid_scores[i].mean_validation_score, res_means[i])\n\n\ndef test_grid_search_cv_results():\n X, y = make_classification(n_samples=50, n_features=4,\n random_state=42)\n\n n_splits = 3\n n_grid_points = 6\n params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]),\n dict(kernel=['poly', ], degree=[1, 2])]\n grid_search = GridSearchCV(SVC(), cv=n_splits, iid=False,\n param_grid=params)\n grid_search.fit(X, y)\n grid_search_iid = GridSearchCV(SVC(), cv=n_splits, iid=True,\n param_grid=params)\n grid_search_iid.fit(X, y)\n\n param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel')\n score_keys = ('mean_test_score', 'mean_train_score',\n 'rank_test_score',\n 'split0_test_score', 'split1_test_score',\n 'split2_test_score',\n 'split0_train_score', 'split1_train_score',\n 'split2_train_score',\n 'std_test_score', 'std_train_score',\n 'mean_fit_time', 'std_fit_time',\n 'mean_score_time', 'std_score_time')\n n_candidates = n_grid_points\n\n for search, iid in zip((grid_search, grid_search_iid), (False, True)):\n assert_equal(iid, search.iid)\n cv_results = search.cv_results_\n # Check if score and timing are reasonable\n assert_true(all(cv_results['rank_test_score'] >= 1))\n assert_true(all(cv_results[k] >= 0) for k in score_keys\n if k is not 'rank_test_score')\n assert_true(all(cv_results[k] <= 1) for k in score_keys\n if 'time' not in k and\n k is not 'rank_test_score')\n # Check cv_results structure\n check_cv_results_array_types(cv_results, param_keys, score_keys)\n check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates)\n # Check masking\n cv_results = grid_search.cv_results_\n n_candidates = len(grid_search.cv_results_['params'])\n assert_true(all((cv_results['param_C'].mask[i] and\n cv_results['param_gamma'].mask[i] and\n not cv_results['param_degree'].mask[i])\n for i in range(n_candidates)\n if cv_results['param_kernel'][i] == 'linear'))\n assert_true(all((not cv_results['param_C'].mask[i] and\n not cv_results['param_gamma'].mask[i] and\n cv_results['param_degree'].mask[i])\n for i in range(n_candidates)\n if cv_results['param_kernel'][i] == 'rbf'))\n check_cv_results_grid_scores_consistency(search)\n\n\ndef test_random_search_cv_results():\n # Make a dataset with a lot of noise to get various kind of prediction\n # errors across CV folds and parameter settings\n X, y = make_classification(n_samples=200, n_features=100, n_informative=3,\n random_state=0)\n\n # scipy.stats dists now supports `seed` but we still support scipy 0.12\n # which doesn't support the seed. Hence the assertions in the test for\n # random_search alone should not depend on randomization.\n n_splits = 3\n n_search_iter = 30\n params = dict(C=expon(scale=10), gamma=expon(scale=0.1))\n random_search = RandomizedSearchCV(SVC(), n_iter=n_search_iter,\n cv=n_splits, iid=False,\n param_distributions=params)\n random_search.fit(X, y)\n random_search_iid = RandomizedSearchCV(SVC(), n_iter=n_search_iter,\n cv=n_splits, iid=True,\n param_distributions=params)\n random_search_iid.fit(X, y)\n\n param_keys = ('param_C', 'param_gamma')\n score_keys = ('mean_test_score', 'mean_train_score',\n 'rank_test_score',\n 'split0_test_score', 'split1_test_score',\n 'split2_test_score',\n 'split0_train_score', 'split1_train_score',\n 'split2_train_score',\n 'std_test_score', 'std_train_score',\n 'mean_fit_time', 'std_fit_time',\n 'mean_score_time', 'std_score_time')\n n_cand = n_search_iter\n\n for search, iid in zip((random_search, random_search_iid), (False, True)):\n assert_equal(iid, search.iid)\n cv_results = search.cv_results_\n # Check results structure\n check_cv_results_array_types(cv_results, param_keys, score_keys)\n check_cv_results_keys(cv_results, param_keys, score_keys, n_cand)\n # For random_search, all the param array vals should be unmasked\n assert_false(any(cv_results['param_C'].mask) or\n any(cv_results['param_gamma'].mask))\n check_cv_results_grid_scores_consistency(search)\n\n\ndef test_search_iid_param():\n # Test the IID parameter\n # noise-free simple 2d-data\n X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,\n cluster_std=0.1, shuffle=False, n_samples=80)\n # split dataset into two folds that are not iid\n # first one contains data of all 4 blobs, second only from two.\n mask = np.ones(X.shape[0], dtype=np.bool)\n mask[np.where(y == 1)[0][::2]] = 0\n mask[np.where(y == 2)[0][::2]] = 0\n # this leads to perfect classification on one fold and a score of 1/3 on\n # the other\n # create \"cv\" for splits\n cv = [[mask, ~mask], [~mask, mask]]\n # once with iid=True (default)\n grid_search = GridSearchCV(SVC(), param_grid={'C': [1, 10]}, cv=cv)\n random_search = RandomizedSearchCV(SVC(), n_iter=2,\n param_distributions={'C': [1, 10]},\n cv=cv)\n for search in (grid_search, random_search):\n search.fit(X, y)\n assert_true(search.iid)\n\n test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'\n % s_i][0]\n for s_i in range(search.n_splits_)))\n train_cv_scores = np.array(list(search.cv_results_['split%d_train_'\n 'score' % s_i][0]\n for s_i in range(search.n_splits_)))\n test_mean = search.cv_results_['mean_test_score'][0]\n test_std = search.cv_results_['std_test_score'][0]\n\n train_cv_scores = np.array(list(search.cv_results_['split%d_train_'\n 'score' % s_i][0]\n for s_i in range(search.n_splits_)))\n train_mean = search.cv_results_['mean_train_score'][0]\n train_std = search.cv_results_['std_train_score'][0]\n\n # Test the first candidate\n assert_equal(search.cv_results_['param_C'][0], 1)\n assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])\n assert_array_almost_equal(train_cv_scores, [1, 1])\n\n # for first split, 1/4 of dataset is in test, for second 3/4.\n # take weighted average and weighted std\n expected_test_mean = 1 * 1. / 4. + 1. / 3. * 3. / 4.\n expected_test_std = np.sqrt(1. / 4 * (expected_test_mean - 1) ** 2 +\n 3. / 4 * (expected_test_mean - 1. / 3.) **\n 2)\n assert_almost_equal(test_mean, expected_test_mean)\n assert_almost_equal(test_std, expected_test_std)\n\n # For the train scores, we do not take a weighted mean irrespective of\n # i.i.d. or not\n assert_almost_equal(train_mean, 1)\n assert_almost_equal(train_std, 0)\n\n # once with iid=False\n grid_search = GridSearchCV(SVC(),\n param_grid={'C': [1, 10]},\n cv=cv, iid=False)\n random_search = RandomizedSearchCV(SVC(), n_iter=2,\n param_distributions={'C': [1, 10]},\n cv=cv, iid=False)\n\n for search in (grid_search, random_search):\n search.fit(X, y)\n assert_false(search.iid)\n\n test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'\n % s][0]\n for s in range(search.n_splits_)))\n test_mean = search.cv_results_['mean_test_score'][0]\n test_std = search.cv_results_['std_test_score'][0]\n\n train_cv_scores = np.array(list(search.cv_results_['split%d_train_'\n 'score' % s][0]\n for s in range(search.n_splits_)))\n train_mean = search.cv_results_['mean_train_score'][0]\n train_std = search.cv_results_['std_train_score'][0]\n\n assert_equal(search.cv_results_['param_C'][0], 1)\n # scores are the same as above\n assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])\n # Unweighted mean/std is used\n assert_almost_equal(test_mean, np.mean(test_cv_scores))\n assert_almost_equal(test_std, np.std(test_cv_scores))\n\n # For the train scores, we do not take a weighted mean irrespective of\n # i.i.d. or not\n assert_almost_equal(train_mean, 1)\n assert_almost_equal(train_std, 0)\n\n\ndef test_search_cv_results_rank_tie_breaking():\n X, y = make_blobs(n_samples=50, random_state=42)\n\n # The two C values are close enough to give similar models\n # which would result in a tie of their mean cv-scores\n param_grid = {'C': [1, 1.001, 0.001]}\n\n grid_search = GridSearchCV(SVC(), param_grid=param_grid)\n random_search = RandomizedSearchCV(SVC(), n_iter=3,\n param_distributions=param_grid)\n\n for search in (grid_search, random_search):\n search.fit(X, y)\n cv_results = search.cv_results_\n # Check tie breaking strategy -\n # Check that there is a tie in the mean scores between\n # candidates 1 and 2 alone\n assert_almost_equal(cv_results['mean_test_score'][0],\n cv_results['mean_test_score'][1])\n assert_almost_equal(cv_results['mean_train_score'][0],\n cv_results['mean_train_score'][1])\n try:\n assert_almost_equal(cv_results['mean_test_score'][1],\n cv_results['mean_test_score'][2])\n except AssertionError:\n pass\n try:\n assert_almost_equal(cv_results['mean_train_score'][1],\n cv_results['mean_train_score'][2])\n except AssertionError:\n pass\n # 'min' rank should be assigned to the tied candidates\n assert_almost_equal(search.cv_results_['rank_test_score'], [1, 1, 3])\n\n\ndef test_search_cv_results_none_param():\n X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1]\n estimators = (DecisionTreeRegressor(), DecisionTreeClassifier())\n est_parameters = {\"random_state\": [0, None]}\n cv = KFold(random_state=0)\n\n for est in estimators:\n grid_search = GridSearchCV(est, est_parameters, cv=cv).fit(X, y)\n assert_array_equal(grid_search.cv_results_['param_random_state'],\n [0, None])\n\n\n@ignore_warnings()\ndef test_search_cv_timing():\n svc = LinearSVC(random_state=0)\n\n X = [[1, ], [2, ], [3, ], [4, ]]\n y = [0, 1, 1, 0]\n\n gs = GridSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0)\n rs = RandomizedSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0, n_iter=2)\n\n for search in (gs, rs):\n search.fit(X, y)\n for key in ['mean_fit_time', 'std_fit_time']:\n # NOTE The precision of time.time in windows is not high\n # enough for the fit/score times to be non-zero for trivial X and y\n assert_true(np.all(search.cv_results_[key] >= 0))\n assert_true(np.all(search.cv_results_[key] < 1))\n\n for key in ['mean_score_time', 'std_score_time']:\n assert_true(search.cv_results_[key][1] >= 0)\n assert_true(search.cv_results_[key][0] == 0.0)\n assert_true(np.all(search.cv_results_[key] < 1))\n\n\ndef test_grid_search_correct_score_results():\n # test that correct scores are used\n n_splits = 3\n clf = LinearSVC(random_state=0)\n X, y = make_blobs(random_state=0, centers=2)\n Cs = [.1, 1, 10]\n for score in ['f1', 'roc_auc']:\n grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score, cv=n_splits)\n cv_results = grid_search.fit(X, y).cv_results_\n\n # Test scorer names\n result_keys = list(cv_results.keys())\n expected_keys = ((\"mean_test_score\", \"rank_test_score\") +\n tuple(\"split%d_test_score\" % cv_i\n for cv_i in range(n_splits)))\n assert_true(all(in1d(expected_keys, result_keys)))\n\n cv = StratifiedKFold(n_splits=n_splits)\n n_splits = grid_search.n_splits_\n for candidate_i, C in enumerate(Cs):\n clf.set_params(C=C)\n cv_scores = np.array(\n list(grid_search.cv_results_['split%d_test_score'\n % s][candidate_i]\n for s in range(n_splits)))\n for i, (train, test) in enumerate(cv.split(X, y)):\n clf.fit(X[train], y[train])\n if score == \"f1\":\n correct_score = f1_score(y[test], clf.predict(X[test]))\n elif score == \"roc_auc\":\n dec = clf.decision_function(X[test])\n correct_score = roc_auc_score(y[test], dec)\n assert_almost_equal(correct_score, cv_scores[i])\n\n\ndef test_pickle():\n # Test that a fit search can be pickled\n clf = MockClassifier()\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)\n grid_search.fit(X, y)\n grid_search_pickled = pickle.loads(pickle.dumps(grid_search))\n assert_array_almost_equal(grid_search.predict(X),\n grid_search_pickled.predict(X))\n\n random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},\n refit=True, n_iter=3)\n random_search.fit(X, y)\n random_search_pickled = pickle.loads(pickle.dumps(random_search))\n assert_array_almost_equal(random_search.predict(X),\n random_search_pickled.predict(X))\n\n\ndef test_grid_search_with_multioutput_data():\n # Test search with multi-output estimator\n\n X, y = make_multilabel_classification(return_indicator=True,\n random_state=0)\n\n est_parameters = {\"max_depth\": [1, 2, 3, 4]}\n cv = KFold(random_state=0)\n\n estimators = [DecisionTreeRegressor(random_state=0),\n DecisionTreeClassifier(random_state=0)]\n\n # Test with grid search cv\n for est in estimators:\n grid_search = GridSearchCV(est, est_parameters, cv=cv)\n grid_search.fit(X, y)\n res_params = grid_search.cv_results_['params']\n for cand_i in range(len(res_params)):\n est.set_params(**res_params[cand_i])\n\n for i, (train, test) in enumerate(cv.split(X, y)):\n est.fit(X[train], y[train])\n correct_score = est.score(X[test], y[test])\n assert_almost_equal(\n correct_score,\n grid_search.cv_results_['split%d_test_score' % i][cand_i])\n\n # Test with a randomized search\n for est in estimators:\n random_search = RandomizedSearchCV(est, est_parameters,\n cv=cv, n_iter=3)\n random_search.fit(X, y)\n res_params = random_search.cv_results_['params']\n for cand_i in range(len(res_params)):\n est.set_params(**res_params[cand_i])\n\n for i, (train, test) in enumerate(cv.split(X, y)):\n est.fit(X[train], y[train])\n correct_score = est.score(X[test], y[test])\n assert_almost_equal(\n correct_score,\n random_search.cv_results_['split%d_test_score'\n % i][cand_i])\n\n\ndef test_predict_proba_disabled():\n # Test predict_proba when disabled on estimator.\n X = np.arange(20).reshape(5, -1)\n y = [0, 0, 1, 1, 1]\n clf = SVC(probability=False)\n gs = GridSearchCV(clf, {}, cv=2).fit(X, y)\n assert_false(hasattr(gs, \"predict_proba\"))\n\n\ndef test_grid_search_allows_nans():\n # Test GridSearchCV with Imputer\n X = np.arange(20, dtype=np.float64).reshape(5, -1)\n X[2, :] = np.nan\n y = [0, 0, 1, 1, 1]\n p = Pipeline([\n ('imputer', Imputer(strategy='mean', missing_values='NaN')),\n ('classifier', MockClassifier()),\n ])\n GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)\n\n\nclass FailingClassifier(BaseEstimator):\n \"\"\"Classifier that raises a ValueError on fit()\"\"\"\n\n FAILING_PARAMETER = 2\n\n def __init__(self, parameter=None):\n self.parameter = parameter\n\n def fit(self, X, y=None):\n if self.parameter == FailingClassifier.FAILING_PARAMETER:\n raise ValueError(\"Failing classifier failed as required\")\n\n def predict(self, X):\n return np.zeros(X.shape[0])\n\n\ndef test_grid_search_failing_classifier():\n # GridSearchCV with on_error != 'raise'\n # Ensures that a warning is raised and score reset where appropriate.\n\n X, y = make_classification(n_samples=20, n_features=10, random_state=0)\n\n clf = FailingClassifier()\n\n # refit=False because we only want to check that errors caused by fits\n # to individual folds will be caught and warnings raised instead. If\n # refit was done, then an exception would be raised on refit and not\n # caught by grid_search (expected behavior), and this would cause an\n # error in this test.\n gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',\n refit=False, error_score=0.0)\n assert_warns(FitFailedWarning, gs.fit, X, y)\n n_candidates = len(gs.cv_results_['params'])\n\n # Ensure that grid scores were set to zero as required for those fits\n # that are expected to fail.\n def get_cand_scores(i):\n return np.array(list(gs.cv_results_['split%d_test_score' % s][i]\n for s in range(gs.n_splits_)))\n\n assert all((np.all(get_cand_scores(cand_i) == 0.0)\n for cand_i in range(n_candidates)\n if gs.cv_results_['param_parameter'][cand_i] ==\n FailingClassifier.FAILING_PARAMETER))\n\n gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',\n refit=False, error_score=float('nan'))\n assert_warns(FitFailedWarning, gs.fit, X, y)\n n_candidates = len(gs.cv_results_['params'])\n assert all(np.all(np.isnan(get_cand_scores(cand_i)))\n for cand_i in range(n_candidates)\n if gs.cv_results_['param_parameter'][cand_i] ==\n FailingClassifier.FAILING_PARAMETER)\n\n\ndef test_grid_search_failing_classifier_raise():\n # GridSearchCV with on_error == 'raise' raises the error\n\n X, y = make_classification(n_samples=20, n_features=10, random_state=0)\n\n clf = FailingClassifier()\n\n # refit=False because we want to test the behaviour of the grid search part\n gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',\n refit=False, error_score='raise')\n\n # FailingClassifier issues a ValueError so this is what we look for.\n assert_raises(ValueError, gs.fit, X, y)\n\n\ndef test_parameters_sampler_replacement():\n # raise error if n_iter too large\n params = {'first': [0, 1], 'second': ['a', 'b', 'c']}\n sampler = ParameterSampler(params, n_iter=7)\n assert_raises(ValueError, list, sampler)\n # degenerates to GridSearchCV if n_iter the same as grid_size\n sampler = ParameterSampler(params, n_iter=6)\n samples = list(sampler)\n assert_equal(len(samples), 6)\n for values in ParameterGrid(params):\n assert_true(values in samples)\n\n # test sampling without replacement in a large grid\n params = {'a': range(10), 'b': range(10), 'c': range(10)}\n sampler = ParameterSampler(params, n_iter=99, random_state=42)\n samples = list(sampler)\n assert_equal(len(samples), 99)\n hashable_samples = [\"a%db%dc%d\" % (p['a'], p['b'], p['c'])\n for p in samples]\n assert_equal(len(set(hashable_samples)), 99)\n\n # doesn't go into infinite loops\n params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}\n sampler = ParameterSampler(params_distribution, n_iter=7)\n samples = list(sampler)\n assert_equal(len(samples), 7)\n\n\ndef test_stochastic_gradient_loss_param():\n # Make sure the predict_proba works when loss is specified\n # as one of the parameters in the param_grid.\n param_grid = {\n 'loss': ['log'],\n }\n X = np.arange(24).reshape(6, -1)\n y = [0, 0, 0, 1, 1, 1]\n clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),\n param_grid=param_grid)\n\n # When the estimator is not fitted, `predict_proba` is not available as the\n # loss is 'hinge'.\n assert_false(hasattr(clf, \"predict_proba\"))\n clf.fit(X, y)\n clf.predict_proba(X)\n clf.predict_log_proba(X)\n\n # Make sure `predict_proba` is not available when setting loss=['hinge']\n # in param_grid\n param_grid = {\n 'loss': ['hinge'],\n }\n clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),\n param_grid=param_grid)\n assert_false(hasattr(clf, \"predict_proba\"))\n clf.fit(X, y)\n assert_false(hasattr(clf, \"predict_proba\"))\n\n\ndef test_search_train_scores_set_to_false():\n X = np.arange(6).reshape(6, -1)\n y = [0, 0, 0, 1, 1, 1]\n clf = LinearSVC(random_state=0)\n\n gs = GridSearchCV(clf, param_grid={'C': [0.1, 0.2]},\n return_train_score=False)\n gs.fit(X, y)\n\n\ndef test_grid_search_cv_splits_consistency():\n # Check if a one time iterable is accepted as a cv parameter.\n n_samples = 100\n n_splits = 5\n X, y = make_classification(n_samples=n_samples, random_state=0)\n\n gs = GridSearchCV(LinearSVC(random_state=0),\n param_grid={'C': [0.1, 0.2, 0.3]},\n cv=OneTimeSplitter(n_splits=n_splits,\n n_samples=n_samples))\n gs.fit(X, y)\n\n gs2 = GridSearchCV(LinearSVC(random_state=0),\n param_grid={'C': [0.1, 0.2, 0.3]},\n cv=KFold(n_splits=n_splits))\n gs2.fit(X, y)\n\n def _pop_time_keys(cv_results):\n for key in ('mean_fit_time', 'std_fit_time',\n 'mean_score_time', 'std_score_time'):\n cv_results.pop(key)\n return cv_results\n\n # OneTimeSplitter is a non-re-entrant cv where split can be called only\n # once if ``cv.split`` is called once per param setting in GridSearchCV.fit\n # the 2nd and 3rd parameter will not be evaluated as no train/test indices\n # will be generated for the 2nd and subsequent cv.split calls.\n # This is a check to make sure cv.split is not called once per param\n # setting.\n np.testing.assert_equal(_pop_time_keys(gs.cv_results_),\n _pop_time_keys(gs2.cv_results_))\n\n # Check consistency of folds across the parameters\n gs = GridSearchCV(LinearSVC(random_state=0),\n param_grid={'C': [0.1, 0.1, 0.2, 0.2]},\n cv=KFold(n_splits=n_splits, shuffle=True))\n gs.fit(X, y)\n\n # As the first two param settings (C=0.1) and the next two param\n # settings (C=0.2) are same, the test and train scores must also be\n # same as long as the same train/test indices are generated for all\n # the cv splits, for both param setting\n for score_type in ('train', 'test'):\n per_param_scores = {}\n for param_i in range(4):\n per_param_scores[param_i] = list(\n gs.cv_results_['split%d_%s_score' % (s, score_type)][param_i]\n for s in range(5))\n\n assert_array_almost_equal(per_param_scores[0],\n per_param_scores[1])\n assert_array_almost_equal(per_param_scores[2],\n per_param_scores[3])\n"
] | [
[
"numpy.deprecate",
"numpy.imag",
"numpy.sqrt",
"numpy.asarray",
"numpy.issubdtype",
"numpy.round",
"numpy.iscomplexobj",
"numpy.where",
"numpy.place",
"numpy.unique",
"numpy.less",
"numpy.empty_like",
"numpy.sin",
"numpy.finfo",
"numpy.real",
"numpy.zeros",
"numpy.ndim",
"scipy._lib.six.xrange",
"numpy.floor",
"numpy.iscomplex",
"numpy.extract",
"numpy.float64",
"numpy.isscalar"
],
[
"numpy.dot",
"scipy.linalg.svd",
"numpy.min",
"numpy.ones",
"numpy.finfo",
"scipy.linalg.pinv2",
"sklearn.utils.extmath.svd_flip",
"numpy.zeros"
],
[
"numpy.arange"
],
[
"sklearn.utils.testing.assert_almost_equal",
"sklearn.neighbors.LSHForest",
"sklearn.utils.testing.assert_raises",
"sklearn.utils.testing.assert_array_less",
"sklearn.utils.testing.assert_true",
"numpy.random.randn",
"numpy.mean",
"numpy.iinfo",
"numpy.var",
"sklearn.utils.testing.ignore_warnings",
"sklearn.utils.testing.assert_greater",
"numpy.finfo",
"sklearn.utils.testing.assert_warns_message",
"numpy.intersect1d",
"numpy.diff",
"numpy.less_equal",
"sklearn.neighbors.NearestNeighbors",
"numpy.zeros",
"sklearn.utils.testing.assert_array_equal",
"numpy.argsort",
"numpy.array",
"sklearn.metrics.pairwise.pairwise_distances",
"numpy.random.RandomState",
"sklearn.utils.testing.assert_equal",
"scipy.sparse.rand",
"numpy.ptp",
"numpy.sort",
"numpy.cos"
],
[
"matplotlib.transforms.Bbox",
"matplotlib.pyplot.xkcd",
"numpy.linspace",
"numpy.all",
"matplotlib.patches.Polygon",
"numpy.arange",
"numpy.sin",
"matplotlib.testing.decorators.image_comparison",
"matplotlib.path.Path.unit_circle",
"matplotlib.pyplot.figure",
"matplotlib.path.Path.make_compound_path",
"matplotlib.pyplot.ylim",
"matplotlib.path.Path",
"numpy.array",
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"numpy.ones",
"matplotlib.pyplot.xlim",
"numpy.vstack"
],
[
"numpy.amax",
"numpy.linspace",
"numpy.asarray",
"matplotlib.collections.LineCollection",
"matplotlib.patches.Rectangle",
"matplotlib.lines.Line2D",
"numpy.amin",
"numpy.ones",
"matplotlib.colors.to_rgba_array"
],
[
"numpy.diag",
"numpy.dot",
"numpy.log",
"scipy.linalg.svd",
"numpy.abs",
"numpy.reshape",
"numpy.eye",
"numpy.ones",
"numpy.copy",
"numpy.var",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.sqrt",
"scipy.linalg.eigvals_banded",
"numpy.around",
"numpy.int",
"numpy.exp",
"numpy.where",
"numpy.poly1d.__call__",
"numpy.hstack",
"numpy.arange",
"numpy.empty_like",
"numpy.sin",
"scipy.special.airy",
"numpy.zeros",
"numpy.arccos",
"numpy.floor",
"numpy.array",
"numpy.abs",
"numpy.cos",
"numpy.poly1d.__init__"
],
[
"matplotlib.cbook.iterable",
"numpy.ma.asarray",
"matplotlib.colors.LinearSegmentedColormap",
"numpy.uint8",
"matplotlib.cbook.CallbackRegistry",
"matplotlib.colors.Normalize",
"numpy.empty",
"matplotlib.colors.ListedColormap",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib._cm._deprecation_datad",
"matplotlib.cbook.is_string_like"
],
[
"numpy.linalg._umath_linalg.eig",
"numpy.linalg.lapack_lite.dgelsd",
"numpy.core.zeros",
"numpy.core.ravel",
"numpy.core.broadcast",
"numpy.core.asanyarray",
"numpy.core.geterrobj",
"numpy.core.add.reduce",
"numpy.core.all",
"numpy.core.product",
"numpy.lib.asfarray",
"numpy.linalg._umath_linalg.inv",
"numpy.linalg._umath_linalg.slogdet",
"numpy.core.maximum.reduce",
"numpy.core.sum",
"numpy.core.array",
"numpy.lib.triu",
"numpy.core.empty",
"numpy.core.dot",
"numpy.core.finfo",
"numpy.core.isfinite",
"numpy.linalg._umath_linalg.det",
"numpy.core.abs",
"numpy.linalg._umath_linalg.eigvals",
"numpy.core.atleast_2d",
"numpy.core.transpose",
"numpy.core.isscalar",
"numpy.core.asarray",
"numpy.compat.asbytes",
"numpy.core.rollaxis",
"numpy.core.empty_like",
"numpy.core.sqrt",
"numpy.core.errstate"
],
[
"scipy._lib._version.NumpyVersion",
"numpy.testing.nosetester.import_nose",
"numpy.nditer",
"numpy.nonzero",
"numpy.asarray",
"numpy.cumsum",
"numpy.concatenate",
"numpy.asanyarray",
"numpy.diff",
"numpy.prod",
"numpy.iterable",
"numpy.array",
"numpy.empty"
],
[
"numpy.distutils.compat.get_exception"
],
[
"matplotlib.pyplot.gca"
],
[
"sklearn.utils.graph.graph_laplacian",
"numpy.arange",
"numpy.eye",
"scipy.sparse.csr_matrix",
"numpy.ones",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal"
],
[
"numpy.dot",
"numpy.asarray",
"scipy.special.chdtrc",
"numpy.mean",
"numpy.zeros_like",
"numpy.where",
"scipy.sparse.issparse",
"numpy.unique",
"numpy.arange",
"numpy.finfo",
"scipy.special.fdtrc",
"numpy.zeros",
"numpy.nonzero",
"numpy.isnan",
"numpy.append",
"scipy.stats.scoreatpercentile",
"numpy.errstate",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.sort",
"numpy.ones",
"scipy.stats.f.sf"
],
[
"numpy.core.arange",
"numpy.core.take",
"numpy.core.empty",
"numpy.core.asarray"
],
[
"matplotlib.verbose.report"
],
[
"numpy.log",
"numpy.spacing",
"numpy.abs",
"numpy.asarray",
"numpy.arange",
"numpy.median",
"numpy.ceil",
"numpy.sum"
],
[
"matplotlib.dates.DateFormatter",
"matplotlib.dates.RRuleLocator",
"matplotlib.dates.rrulewrapper",
"matplotlib.dates.drange",
"pandas.DatetimeIndex.tz_convert",
"matplotlib.testing.decorators.image_comparison",
"matplotlib.dates.AutoDateFormatter",
"matplotlib.testing.jpl_units.register",
"matplotlib.pyplot.subplot",
"matplotlib.dates.date2num",
"matplotlib.dates.AutoDateLocator",
"matplotlib.dates.DayLocator",
"matplotlib.dates.num2date",
"matplotlib.pyplot.figure"
],
[
"matplotlib.texmanager.TexManager"
],
[
"scipy.stats.scoreatpercentile",
"numpy.ones"
],
[
"numpy.testing.Tester"
],
[
"numpy.asarray",
"numpy.arange",
"scipy.linalg.lstsq",
"numpy.zeros_like",
"numpy.zeros",
"scipy.ndimage.convolve1d"
],
[
"sklearn.utils.testing.assert_equal",
"sklearn.utils.testing.assert_array_equal",
"sklearn.utils.testing.assert_raises",
"sklearn.datasets.mldata_filename",
"sklearn.utils.testing.assert_not_in",
"sklearn.datasets.fetch_mldata",
"scipy.ones",
"scipy.arange",
"sklearn.utils.testing.with_setup",
"scipy.array",
"sklearn.utils.testing.assert_in",
"sklearn.utils.testing.mock_mldata_urlopen"
],
[
"numpy.dot",
"sklearn.metrics.roc_auc_score",
"sklearn.utils.testing.assert_array_almost_equal",
"sklearn.datasets.make_classification",
"numpy.sqrt",
"sklearn.utils.testing.assert_almost_equal",
"sklearn.cluster.KMeans",
"sklearn.model_selection.LeavePGroupsOut",
"sklearn.utils.testing.assert_raises",
"sklearn.externals.six.moves.cStringIO",
"sklearn.model_selection.KFold",
"numpy.all",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.utils.testing.assert_true",
"numpy.mean",
"sklearn.svm.LinearSVC",
"sklearn.utils.testing.assert_warns",
"sklearn.utils.testing.ignore_warnings",
"sklearn.metrics.f1_score",
"numpy.where",
"sklearn.datasets.make_blobs",
"sklearn.linear_model.SGDClassifier",
"sklearn.externals.six.moves.zip",
"numpy.arange",
"sklearn.externals.six.moves.xrange",
"sklearn.model_selection.StratifiedKFold",
"sklearn.preprocessing.Imputer",
"numpy.std",
"sklearn.model_selection.LeaveOneGroupOut",
"sklearn.model_selection.StratifiedShuffleSplit",
"numpy.zeros",
"sklearn.utils.testing.assert_raise_message",
"sklearn.model_selection.GroupShuffleSplit",
"sklearn.utils.testing.assert_false",
"scipy.sparse.csr_matrix",
"sklearn.model_selection.tests.common.OneTimeSplitter",
"sklearn.model_selection.ParameterGrid",
"sklearn.datasets.make_multilabel_classification",
"sklearn.metrics.make_scorer",
"sklearn.svm.SVC",
"sklearn.utils.mocking.CheckingClassifier",
"sklearn.utils.testing.assert_array_equal",
"sklearn.neighbors.KernelDensity",
"scipy.stats.uniform",
"numpy.random.RandomState",
"numpy.array",
"sklearn.model_selection.GroupKFold",
"sklearn.utils.testing.assert_equal",
"sklearn.model_selection.GridSearchCV",
"sklearn.tree.DecisionTreeRegressor",
"sklearn.model_selection.RandomizedSearchCV",
"sklearn.model_selection.ParameterSampler",
"scipy.stats.expon",
"numpy.ones",
"sklearn.utils.testing.assert_not_equal",
"sklearn.utils.fixes.in1d",
"scipy.stats.bernoulli"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.13",
"1.16",
"1.9",
"1.7",
"1.15",
"1.14",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.6",
"1.10",
"1.12",
"1.11",
"1.13",
"1.16",
"1.9",
"1.18",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.13",
"1.16",
"1.9",
"1.7",
"1.15",
"1.14",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.5",
"1.2",
"0.20",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
VIDA-NYU/prida | [
"cb2af13704506abc73d10f5c346ea21f70dd6e65",
"cb2af13704506abc73d10f5c346ea21f70dd6e65"
] | [
"data-generation/generate-stats-from-training-data.py",
"improvement-prediction/helping_feature_selectors/plotting_scripts/plot_crash_variations.py"
] | [
"from hdfs import InsecureClient\nfrom io import StringIO\nimport json\nimport numpy as np\nfrom operator import add\nimport os\nimport pandas as pd\nfrom pyspark import SparkConf, SparkContext, StorageLevel\nimport sys\n\n\ndef list_dir(file_path, hdfs_client=None, use_hdfs=False):\n \"\"\"Lists all the files inside the directory specified by file_path.\n \"\"\"\n\n if use_hdfs:\n return hdfs_client.list(file_path)\n return os.listdir(file_path)\n\n\ndef generate_stats_from_record(record, load_dataframes):\n \"\"\"Computes some statistics related to the training data record.\n \"\"\"\n\n global n_records\n global before_mae_lte_after\n global before_mae_gt_after\n global before_mse_lte_after\n global before_mse_gt_after\n global before_mdae_lte_after\n global before_mdae_gt_after\n global before_r2_lte_after\n global before_r2_gt_after\n global query_size_lte_candidate_size\n global query_size_gt_candidate_size\n\n query = record['query_dataset']\n query_data_obj = record['query_data']\n target = record['target']\n candidate = record['candidate_dataset']\n candidate_data_obj = record['candidate_data']\n imputation_strategy = record['imputation_strategy']\n mae_before = record['mean_absolute_error'][0]\n mae_after = record['mean_absolute_error'][1]\n mse_before = record['mean_squared_error'][0]\n mse_after = record['mean_squared_error'][1]\n mdae_before = record['median_absolute_error'][0]\n mdae_after = record['median_absolute_error'][1]\n r2_before = record['r2_score'][0]\n r2_after = record['r2_score'][1]\n\n # incrementing number of records\n n_records += 1\n\n # learning scores\n if mae_before <= mae_after:\n before_mae_lte_after += 1\n else:\n before_mae_gt_after += 1\n if mse_before <= mse_after:\n before_mse_lte_after += 1\n else:\n before_mse_gt_after += 1\n if mdae_before <= mdae_after:\n before_mdae_lte_after += 1\n else:\n before_mdae_gt_after += 1\n if r2_before <= r2_after:\n before_r2_lte_after += 1\n else:\n before_r2_gt_after += 1\n\n # dataframes\n if load_dataframes:\n\n query_data = pd.read_csv(StringIO(query_data_obj))\n candidate_data = pd.read_csv(StringIO(candidate_data_obj))\n\n # dataframe sizes\n if query_data.shape[0] <= candidate_data.shape[0]:\n query_size_lte_candidate_size += 1\n else:\n query_size_gt_candidate_size += 1\n\n # keys\n query_data_keys = set(query_data['key-for-ranking'])\n candidate_data_keys = set(candidate_data['key-for-ranking'])\n\n # relative intersection size\n intersection_size = len(query_data_keys & candidate_data_keys)\n query_intersection_size = intersection_size / len(query_data_keys)\n candidate_intersection_size = intersection_size / len(candidate_data_keys)\n\n return (imputation_strategy, query_intersection_size, candidate_intersection_size,\n query_data.shape[0], query_data.shape[1], candidate_data.shape[0], candidate_data.shape[1],\n len(query_data_obj.encode('utf-8')), len(candidate_data_obj.encode('utf-8')))\n\n return (imputation_strategy, None, None, None, None, None, None, None, None)\n\n\ndef add_data_to_json(json_obj, query_data, candidate_data):\n \"\"\"Adds query and candidate datasets to json object.\n \"\"\"\n\n json_obj['query_data'] = query_data\n json_obj['candidate_data'] = candidate_data\n return json_obj\n \n\nif __name__ == '__main__':\n\n # Spark context\n conf = SparkConf().setAppName(\"Data Generation Stats\")\n sc = SparkContext(conf=conf)\n\n # accumulators and global variables\n query_size_lte_candidate_size = sc.accumulator(0)\n query_size_gt_candidate_size = sc.accumulator(0)\n query_intersection_sizes = list()\n candidate_intersection_sizes = list()\n query_candidate_size = list()\n query_n_rows = list()\n query_n_columns = list()\n candidate_n_rows = list()\n candidate_n_columns = list()\n query_size_bytes = list()\n candidate_size_bytes = list()\n\n # parameters\n params = json.load(open(\".params.json\"))\n output_dir = params['new_datasets_directory']\n cluster_execution = params['cluster']\n hdfs_address = params['hdfs_address']\n hdfs_user = params['hdfs_user']\n\n # HDFS Client\n hdfs_client = None\n if cluster_execution:\n hdfs_client = InsecureClient(hdfs_address, user=hdfs_user)\n\n # dataset mappings\n id_to_dataset_filename_training = os.path.join(output_dir, 'id-to-dataset-training')\n id_to_dataset_filename_test = os.path.join(output_dir, 'id-to-dataset-test')\n if not cluster_execution:\n id_to_dataset_filename_training = 'file://' + id_to_dataset_filename_training\n id_to_dataset_filename_test = 'file://' + id_to_dataset_filename_test\n\n id_to_dataset = dict()\n id_to_dataset['training'] = sc.pickleFile(\n id_to_dataset_filename_training\n ).persist(StorageLevel.MEMORY_AND_DISK)\n id_to_dataset['test'] = sc.pickleFile(\n id_to_dataset_filename_test\n ).persist(StorageLevel.MEMORY_AND_DISK)\n\n # searching for training data\n algorithms = dict()\n for key in ['training', 'test']:\n load_dataframes = True\n for file_ in list_dir(output_dir, hdfs_client, cluster_execution):\n if '%s-data-'%key not in file_:\n continue\n algorithm_name = ' '.join(file_.replace('%s-data-'%key, '').split('-'))\n if algorithm_name not in algorithms:\n algorithms[algorithm_name] = dict(\n n_records=0,\n before_mae_lte_after=0,\n before_mae_gt_after=0,\n before_mse_lte_after=0,\n before_mse_gt_after=0,\n before_mdae_lte_after=0,\n before_mdae_gt_after=0,\n before_r2_lte_after=0,\n before_r2_gt_after=0,\n imputation_strategies=dict()\n )\n filename = os.path.join(output_dir, file_ + '/*')\n if not cluster_execution:\n filename = 'file://' + filename\n\n # accumulators\n n_records = sc.accumulator(0)\n before_mae_lte_after = sc.accumulator(0)\n before_mae_gt_after = sc.accumulator(0)\n before_mse_lte_after = sc.accumulator(0)\n before_mse_gt_after = sc.accumulator(0)\n before_mdae_lte_after = sc.accumulator(0)\n before_mdae_gt_after = sc.accumulator(0)\n before_r2_lte_after = sc.accumulator(0)\n before_r2_gt_after = sc.accumulator(0)\n\n stats = sc.emptyRDD()\n if load_dataframes:\n stats = sc.textFile(filename).map(\n lambda x: json.loads(x)\n ).map(\n # first, let's use query dataset id as key\n # (query dataset id, (candidate dataset id, dict))\n lambda x: (x['query_dataset'], (x['candidate_dataset'], x))\n ).join(\n # we get the query datasets\n id_to_dataset[key]\n ).map(\n # (candidate dataset id, (query dataset, dict))\n lambda x: (x[1][0][0], (x[1][1], x[1][0][1]))\n ).join(\n # we get the candidate datasets\n id_to_dataset[key]\n ).repartition(372).map(\n lambda x: add_data_to_json(x[1][0][1], x[1][0][0], x[1][1])\n ).map(\n lambda x: generate_stats_from_record(x, load_dataframes)\n ).persist(StorageLevel.MEMORY_AND_DISK)\n else:\n stats = sc.textFile(filename).repartition(372).map(\n lambda x: add_data_to_json(json.loads(x), None, None)\n ).map(\n lambda x: generate_stats_from_record(x, load_dataframes)\n ).persist(StorageLevel.MEMORY_AND_DISK)\n\n imputation_strategies = stats.map(\n lambda x: (x[0], 1)\n ).reduceByKey(add).collect()\n\n intersection_sizes = stats.filter(\n lambda x: x[1] != None and x[2] != None\n ).map(\n lambda x: (x[1], x[2])\n ).collect()\n\n n_rows_columns = stats.filter(\n lambda x: x[1] != None and x[2] != None\n ).map(\n lambda x: (x[3], x[4], x[5], x[6])\n ).collect()\n\n size_bytes = stats.filter(\n lambda x: x[1] != None and x[2] != None\n ).map(\n lambda x: (x[7], x[8])\n ).collect()\n\n if len(intersection_sizes) > 0:\n query_intersection_sizes += [x for (x, y) in intersection_sizes]\n candidate_intersection_sizes += [y for (x, y) in intersection_sizes]\n\n if len(n_rows_columns) > 0:\n query_n_rows += [x for (x, y, w, z) in n_rows_columns]\n query_n_columns += [y for (x, y, w, z) in n_rows_columns]\n candidate_n_rows += [w for (x, y, w, z) in n_rows_columns]\n candidate_n_columns += [z for (x, y, w, z) in n_rows_columns]\n query_candidate_size += [y + z - 1 for (x, y, w, z) in n_rows_columns]\n\n if len(size_bytes) > 0:\n query_size_bytes += [x for (x, y) in size_bytes]\n candidate_size_bytes += [y for (x, y) in size_bytes]\n\n algorithms[algorithm_name]['n_records'] += n_records.value\n algorithms[algorithm_name]['before_mae_lte_after'] += before_mae_lte_after.value\n algorithms[algorithm_name]['before_mae_gt_after'] += before_mae_gt_after.value\n algorithms[algorithm_name]['before_mse_lte_after'] += before_mse_lte_after.value\n algorithms[algorithm_name]['before_mse_gt_after'] += before_mse_gt_after.value\n algorithms[algorithm_name]['before_mdae_lte_after'] += before_mdae_lte_after.value\n algorithms[algorithm_name]['before_mdae_gt_after'] += before_mdae_gt_after.value\n algorithms[algorithm_name]['before_r2_lte_after'] += before_r2_lte_after.value\n algorithms[algorithm_name]['before_r2_gt_after'] += before_r2_gt_after.value\n for (k, v) in imputation_strategies:\n if k not in algorithms[algorithm_name]['imputation_strategies']:\n algorithms[algorithm_name]['imputation_strategies'][k] = 0\n algorithms[algorithm_name]['imputation_strategies'][k] += v\n\n load_dataframes = False\n\n print('')\n for algorithm in algorithms:\n print('Statistics for %s:' % algorithm)\n print(' -- Number of records: %d' % algorithms[algorithm]['n_records'])\n print(' -- MAE before gt MAE after: %d (%.2f%%)' % (\n algorithms[algorithm]['before_mae_gt_after'],\n (100 * algorithms[algorithm]['before_mae_gt_after']) / algorithms[algorithm]['n_records']\n ))\n print(' -- MAE before lte MAE after: %d (%.2f%%)' % (\n algorithms[algorithm]['before_mae_lte_after'],\n (100 * algorithms[algorithm]['before_mae_lte_after']) / algorithms[algorithm]['n_records']\n ))\n print(' -- MSE before gt MSE after: %d (%.2f%%)' % (\n algorithms[algorithm]['before_mse_gt_after'],\n (100 * algorithms[algorithm]['before_mse_gt_after']) / algorithms[algorithm]['n_records']\n ))\n print(' -- MSE before lte MSE after: %d (%.2f%%)' % (\n algorithms[algorithm]['before_mse_lte_after'],\n (100 * algorithms[algorithm]['before_mse_lte_after']) / algorithms[algorithm]['n_records']\n ))\n print(' -- MDAE before gt MDAE after: %d (%.2f%%)' % (\n algorithms[algorithm]['before_mdae_gt_after'],\n (100 * algorithms[algorithm]['before_mdae_gt_after']) / algorithms[algorithm]['n_records']\n ))\n print(' -- MDAE before lte MDAE after: %d (%.2f%%)' % (\n algorithms[algorithm]['before_mdae_lte_after'],\n (100 * algorithms[algorithm]['before_mdae_lte_after']) / algorithms[algorithm]['n_records']\n ))\n print(' -- R^2 before gt R^2 after: %d (%.2f%%)' % (\n algorithms[algorithm]['before_r2_gt_after'],\n (100 * algorithms[algorithm]['before_r2_gt_after']) / algorithms[algorithm]['n_records']\n ))\n print(' -- R^2 before lte R^2 after: %d (%.2f%%)' % (\n algorithms[algorithm]['before_r2_lte_after'],\n (100 * algorithms[algorithm]['before_r2_lte_after']) / algorithms[algorithm]['n_records']\n ))\n print(' -- Missing value imputation strategies:')\n for (strategy, count) in sorted(algorithms[algorithm]['imputation_strategies'].items(), key=lambda x: x[1], reverse=True):\n print(' . %s\\t%d' % (strategy, count))\n print('')\n\n hist_query_intersection_size = np.histogram(query_intersection_sizes, bins=10)\n hist_candidate_intersection_size = np.histogram(candidate_intersection_sizes, bins=10)\n\n hist_query_n_rows = np.histogram(query_n_rows, bins=10)\n hist_query_n_columns = np.histogram(query_n_columns, bins=10)\n hist_candidate_n_rows = np.histogram(candidate_n_rows, bins=10)\n hist_candidate_n_columns = np.histogram(candidate_n_columns, bins=10)\n hist_query_candidate_size = np.histogram(query_candidate_size, bins=10)\n\n hist_query_size_bytes = np.histogram(query_size_bytes, bins=10)\n hist_candidate_size_bytes = np.histogram(candidate_size_bytes, bins=10)\n\n print('General statistics:')\n print(' -- Size query lte size candidate: %d (%.2f%%)' % (\n query_size_lte_candidate_size.value,\n (100 * query_size_lte_candidate_size.value) / (query_size_lte_candidate_size.value + query_size_gt_candidate_size.value)\n ))\n print(' -- Size query gt size candidate: %d (%.2f%%)' % (\n query_size_gt_candidate_size.value,\n (100 * query_size_gt_candidate_size.value) / (query_size_lte_candidate_size.value + query_size_gt_candidate_size.value)\n ))\n print(' -- Query intersection size: ')\n for i in range(1, len(hist_query_intersection_size[1])):\n print(' [%.4f, %4f]\\t%d' % (\n hist_query_intersection_size[1][i-1],\n hist_query_intersection_size[1][i],\n hist_query_intersection_size[0][i-1])\n )\n print(' -- Candidate intersection size: ')\n for i in range(1, len(hist_candidate_intersection_size[1])):\n print(' [%.4f, %4f]\\t%d' % (\n hist_candidate_intersection_size[1][i-1],\n hist_candidate_intersection_size[1][i],\n hist_candidate_intersection_size[0][i-1])\n )\n print(' -- Query number of records: ')\n for i in range(1, len(hist_query_n_rows[1])):\n print(' [%.4f, %4f]\\t%d' % (\n hist_query_n_rows[1][i-1],\n hist_query_n_rows[1][i],\n hist_query_n_rows[0][i-1])\n )\n print(' -- Query number of columns: ')\n for i in range(1, len(hist_query_n_columns[1])):\n print(' [%.4f, %4f]\\t%d' % (\n hist_query_n_columns[1][i-1],\n hist_query_n_columns[1][i],\n hist_query_n_columns[0][i-1])\n )\n print(' -- Candidate number of records: ')\n for i in range(1, len(hist_candidate_n_rows[1])):\n print(' [%.4f, %4f]\\t%d' % (\n hist_candidate_n_rows[1][i-1],\n hist_candidate_n_rows[1][i],\n hist_candidate_n_rows[0][i-1])\n )\n print(' -- Candidate number of columns: ')\n for i in range(1, len(hist_candidate_n_columns[1])):\n print(' [%.4f, %4f]\\t%d' % (\n hist_candidate_n_columns[1][i-1],\n hist_candidate_n_columns[1][i],\n hist_candidate_n_columns[0][i-1])\n )\n print(' -- Join size (number of columns): ')\n for i in range(1, len(hist_query_candidate_size[1])):\n print(' [%.4f, %4f]\\t%d' % (\n hist_query_candidate_size[1][i-1],\n hist_query_candidate_size[1][i],\n hist_query_candidate_size[0][i-1])\n )\n print(' -- Query size (bytes): ')\n for i in range(1, len(hist_query_size_bytes[1])):\n print(' [%.4f, %4f]\\t%d' % (\n hist_query_size_bytes[1][i-1],\n hist_query_size_bytes[1][i],\n hist_query_size_bytes[0][i-1])\n )\n print(' -- Candidate size (bytes): ')\n for i in range(1, len(hist_candidate_size_bytes[1])):\n print(' [%.4f, %4f]\\t%d' % (\n hist_candidate_size_bytes[1][i-1],\n hist_candidate_size_bytes[1][i],\n hist_candidate_size_bytes[0][i-1])\n )\n print('')\n\n print('Configuration:')\n print(' -- new_datasets_directory: %s' % params['new_datasets_directory'])\n print(' -- cluster: %s' % str(params['cluster']))\n print(' -- hdfs_address: %s' % params['hdfs_address'])\n print(' -- hdfs_user: %s' % params['hdfs_user'])\n print('')\n",
"import matplotlib.pyplot as plt\nimport numpy as np\n\nx_values = [20, 40, 60, 80, 90, 95]\nida_crash_r2_scores = [0.9691170515958021, 0.9649274101684133, 0.965736237091571, 0.9649797566410734, 0.9726417645516913, 0.9732515795925913]\ncontainment_crash_r2_scores = [0.5969617407747152, 0.5345370430890348, 0.46304474725286715, 0.3183301211414049, 0.35304095587276263, 0.05413232925765188]\n\nax1 = plt.subplot(211)\nplt.plot(x_values, ida_crash_r2_scores, marker='o', linestyle='--', color='blue', label='PRIDA')\nplt.plot(x_values, containment_crash_r2_scores, marker='*', color='red', label='Containment')\nax1.set_ylabel(r'$R^2$ scores')\nax1.set_xticks(x_values)\nplt.setp(ax1.get_xticklabels(), visible=False)\n# plt.setp(ax1.get_xticklabels(), fontsize=6)\n\n# share x only\nida_crash_times = np.array([25990.21053314209, 19730.383281707764, 18832.772006988525, 18392.82850265503, 17987.223072052002, 18055.146045684814])\ncontainment_crash_times = np.array([34347.195625305176, 32804.622650146484, 24829.435348510742, 20469.29121017456, 18388.445377349854, 17733.24966430664])\n\nax2 = plt.subplot(212, sharex=ax1)\nplt.plot(x_values, ida_crash_times/6000, marker='o', linestyle='--', color='blue', label='PRIDA')\nplt.plot(x_values, containment_crash_times/6000, marker='*', color='red', label='Containment')\nax2.set_xticks(x_values)\nax2.set_ylabel(r'Time ($min$)')\nax2.set_xlabel(r'Pruning Percentages')\nax1.legend()\nax2.legend()\nax1.grid(True)\nax2.grid(True)\nax1.set_title('Efficiency and effectiveness for different pruning percentages\\nVehicle collision use case')\nplt.savefig('crash_variations_stepwise_linreg.png')\nplt.close()\n# Crash -- Linear Regressor + Stepwise\n# r2scores = [0.9691170515958021, 0.9649274101684133, 0.965736237091571, 0.9649797566410734, 0.9726417645516913, 0.9732515795925913]\n# candidates = [58, 46, 30, 15, 7, 3]\n# times = [25990.21053314209, 19730.383281707764, 18832.772006988525, 18392.82850265503, 17987.223072052002, 18055.146045684814]\n# ** containment\n# r2scores = [0.5969617407747152, 0.5345370430890348, 0.46304474725286715, 0.3183301211414049, 0.35304095587276263, 0.05413232925765188]\n# times = [34347.195625305176, 32804.622650146484, 24829.435348510742, 20469.29121017456, 18388.445377349854, 17733.24966430664]\n# ** no pruning\n# time = 40054.03757095337\n# r2score = 0.9617897237026073\n\nida_crash_r2_scores = [0.9696469610825932, 0.9696469610825932, 0.9730639531052399, 0.9737635879260208, 0.9722640001534714, 0.9662954949436724]\ncontainment_crash_r2_scores = [0.40601316335388216, 0.44593563916595524, 0.48136595592668197, 0.15991760559135482, 0.22773310205157933, -0.33269406071225527]\n\nax1 = plt.subplot(211)\nplt.plot(x_values, ida_crash_r2_scores, marker='o', linestyle='--', color='blue', label='PRIDA')\nplt.plot(x_values, containment_crash_r2_scores, marker='*', color='red', label='Containment')\nax1.set_ylabel(r'$R^2$ scores')\nax1.set_xticks(x_values)\nplt.setp(ax1.get_xticklabels(), visible=False)\n# plt.setp(ax1.get_xticklabels(), fontsize=6)\n\n# share x only\nida_crash_times = np.array([170535.61153411865, 162653.5412979126, 226241.32177352905, 137746.0164451599, 86237.03544616699, 78501.02422714233])\ncontainment_crash_times = np.array([202683.35819244385, 183310.866355896, 141263.65184783936, 145183.8493347168, 113092.65375137329, 64686.195850372314])\n\nax2 = plt.subplot(212, sharex=ax1)\nplt.plot(x_values, ida_crash_times/6000, marker='o', linestyle='--', color='blue', label='PRIDA')\nplt.plot(x_values, containment_crash_times/6000, marker='*', color='red', label='Containment')\nax2.set_xticks(x_values)\nax2.set_ylabel(r'Time ($min$)')\nax2.set_xlabel(r'Pruning Percentages')\nax1.legend()\nax2.legend()\nax1.grid(True)\nax2.grid(True)\nax1.set_title('Efficiency and effectiveness for different pruning percentages\\nVehicle collision use case')\nplt.savefig('crash_variations_rifs_linreg.png')\n\n\n# Crash -- Linear Regressor + RIFS\n# r2scores = [0.9696469610825932, 0.9696469610825932, 0.9730639531052399, 0.9737635879260208, 0.9722640001534714, 0.9662954949436724]\n# candidates = [58, 46, 30, 15, 7, 3]\n# times = [170535.61153411865, 162653.5412979126, 226241.32177352905, 137746.0164451599, 86237.03544616699, 78501.02422714233]\n# ** containment\n# r2scores = [0.40601316335388216, 0.44593563916595524, 0.48136595592668197, 0.15991760559135482, 0.22773310205157933, -0.33269406071225527]\n# times = [202683.35819244385, 183310.866355896, 141263.65184783936, 145183.8493347168, 113092.65375137329, 64686.195850372314]\n# ** no pruning\n# time = 314160.1514816284\n# r2score = 0.9741459267782678\n"
] | [
[
"numpy.histogram"
],
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.close",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
freedomtan/caffe2 | [
"28523ff1ff33f18eaf8b04cc4e0f308826e1861a",
"8f41717c46d214aaf62b53e5b3b9b308b5b8db91"
] | [
"caffe2/python/operator_test/mod_op_test.py",
"caffe2/python/operator_test/reduce_ops_test.py"
] | [
"# Copyright (c) 2016-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy\n\nfrom caffe2.python import core\nfrom hypothesis import given\n\nimport caffe2.python.hypothesis_test_util as hu\nimport hypothesis.strategies as st\nimport numpy as np\n\n\[email protected]\ndef _data(draw):\n return draw(\n hu.tensor(dtype=np.int64,\n elements=st.integers(\n min_value=np.iinfo(np.int64).min, max_value=np.iinfo(np.int64).max\n )\n )\n )\n\n\nclass TestMod(hu.HypothesisTestCase):\n @given(\n data=_data(),\n divisor=st.integers(\n min_value=np.iinfo(np.int64).min, max_value=np.iinfo(np.int64).max\n ),\n inplace=st.booleans(),\n sign_follow_divisor=st.booleans(),\n **hu.gcs_cpu_only\n )\n def test_mod(\n self, data, divisor, inplace, sign_follow_divisor, gc, dc\n ):\n if divisor == 0:\n # invalid test case\n return None\n\n def ref(data):\n if sign_follow_divisor:\n output = data % divisor\n else:\n output = numpy.fmod(data, divisor)\n return [output]\n\n op = core.CreateOperator(\n 'Mod',\n ['data'],\n ['data' if inplace else 'output'],\n divisor=divisor,\n sign_follow_divisor=sign_follow_divisor\n )\n\n self.assertReferenceChecks(gc, op, [data], ref)\n\n\nif __name__ == \"__main__\":\n import unittest\n unittest.main()\n",
"# Copyright (c) 2016-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom caffe2.python import core, workspace\nfrom hypothesis import given\n\nimport caffe2.python.hypothesis_test_util as hu\nimport hypothesis.strategies as st\nimport numpy as np\n\n\nclass TestReduceFrontReductions(hu.HypothesisTestCase):\n\n def grad_variant_input_test(self, grad_op_name, X, ref, num_reduce_dim):\n workspace.ResetWorkspace()\n\n Y = np.array(ref(X)[0]).astype(np.float32)\n dY = np.array(np.random.rand(*Y.shape)).astype(np.float32)\n shape = np.array(X.shape).astype(np.int64)\n\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"dY\", dY)\n workspace.FeedBlob(\"shape\", shape)\n\n grad_op = core.CreateOperator(\n grad_op_name,\n [\"dY\", \"X\"],\n [\"dX\"],\n num_reduce_dim=num_reduce_dim\n )\n\n grad_op1 = core.CreateOperator(\n grad_op_name,\n [\"dY\", \"shape\"],\n [\"dX1\"],\n num_reduce_dim=num_reduce_dim\n )\n\n workspace.RunOperatorOnce(grad_op)\n workspace.RunOperatorOnce(grad_op1)\n\n dX = workspace.FetchBlob(\"dX\")\n dX1 = workspace.FetchBlob(\"dX1\")\n np.testing.assert_array_equal(dX, dX1)\n\n def reduce_op_test(self, op_name, op_ref, in_data, num_reduce_dims, device):\n op = core.CreateOperator(\n op_name,\n [\"inputs\"],\n [\"outputs\"],\n num_reduce_dim=num_reduce_dims\n )\n\n self.assertReferenceChecks(\n device_option=device,\n op=op,\n inputs=[in_data],\n reference=op_ref\n )\n\n self.assertGradientChecks(\n device, op, [in_data], 0, [0], stepsize=1e-2, threshold=1e-2)\n\n @given(num_reduce_dim=st.integers(0, 4), **hu.gcs)\n def test_reduce_front_sum(self, num_reduce_dim, gc, dc):\n X = np.random.rand(7, 4, 3, 5).astype(np.float32)\n\n def ref_sum(X):\n return [np.sum(X, axis=(tuple(range(num_reduce_dim))))]\n\n self.reduce_op_test(\"ReduceFrontSum\", ref_sum, X, num_reduce_dim, gc)\n self.grad_variant_input_test(\n \"ReduceFrontSumGradient\", X, ref_sum, num_reduce_dim)\n\n @given(num_reduce_dim=st.integers(0, 4), **hu.gcs)\n def test_reduce_front_mean(self, num_reduce_dim, gc, dc):\n X = np.random.rand(6, 7, 8, 2).astype(np.float32)\n\n def ref_mean(X):\n return [np.mean(X, axis=(tuple(range(num_reduce_dim))))]\n\n self.reduce_op_test(\"ReduceFrontMean\", ref_mean, X, num_reduce_dim, gc)\n self.grad_variant_input_test(\n \"ReduceFrontMeanGradient\", X, ref_mean, num_reduce_dim)\n\n @given(num_reduce_dim=st.integers(0, 4), **hu.gcs)\n def test_reduce_front_max(self, num_reduce_dim, gc, dc):\n X = np.random.rand(6, 7, 8, 2).astype(np.float32)\n\n def ref_frontmax(X):\n return [np.max(X, axis=(tuple(range(num_reduce_dim))))]\n\n op = core.CreateOperator(\n \"ReduceFrontMax\",\n [\"inputs\"],\n [\"outputs\"],\n num_reduce_dim=num_reduce_dim\n )\n\n self.assertReferenceChecks(\n device_option=gc,\n op=op,\n inputs=[X],\n reference=ref_frontmax,\n )\n\n # Skip gradient check because it is too unreliable with max.\n # Just check CPU and CUDA have same results\n Y = np.array(ref_frontmax(X)[0]).astype(np.float32)\n dY = np.array(np.random.rand(*Y.shape)).astype(np.float32)\n grad_op = core.CreateOperator(\n \"ReduceFrontMaxGradient\",\n [\"dY\", \"X\", \"Y\"],\n [\"dX\"],\n num_reduce_dim=num_reduce_dim\n )\n self.assertDeviceChecks(dc, grad_op, [dY, X, Y], [0])\n\n @given(num_reduce_dim=st.integers(0, 4), **hu.gcs)\n def test_reduce_back_max(self, num_reduce_dim, gc, dc):\n X = np.random.rand(6, 7, 8, 2).astype(np.float32)\n\n def ref_backmax(X):\n return [np.max(X, axis=(0, 1, 2, 3)[4 - num_reduce_dim:])]\n\n op = core.CreateOperator(\n \"ReduceBackMax\",\n [\"inputs\"],\n [\"outputs\"],\n num_reduce_dim=num_reduce_dim\n )\n\n self.assertReferenceChecks(\n device_option=gc,\n op=op,\n inputs=[X],\n reference=ref_backmax\n )\n\n # Skip gradient check because it is too unreliable with max\n # Just check CPU and CUDA have same results\n Y = np.array(ref_backmax(X)[0]).astype(np.float32)\n dY = np.array(np.random.rand(*Y.shape)).astype(np.float32)\n grad_op = core.CreateOperator(\n \"ReduceBackMaxGradient\",\n [\"dY\", \"X\", \"Y\"],\n [\"dX\"],\n num_reduce_dim=num_reduce_dim\n )\n self.assertDeviceChecks(dc, grad_op, [dY, X, Y], [0])\n\n @given(num_reduce_dim=st.integers(0, 4), **hu.gcs)\n def test_reduce_back_sum(self, num_reduce_dim, dc, gc):\n X = np.random.rand(6, 7, 8, 2).astype(np.float32)\n\n def ref_sum(X):\n return [np.sum(X, axis=(0, 1, 2, 3)[4 - num_reduce_dim:])]\n\n self.reduce_op_test(\"ReduceBackSum\", ref_sum, X, num_reduce_dim, gc)\n self.grad_variant_input_test(\n \"ReduceBackSumGradient\", X, ref_sum, num_reduce_dim)\n\n @given(num_reduce_dim=st.integers(0, 4), **hu.gcs)\n def test_reduce_back_mean(self, num_reduce_dim, dc, gc):\n X = np.random.rand(6, 7, 8, 2).astype(np.float32)\n\n def ref_mean(X):\n return [np.mean(X, axis=(0, 1, 2, 3)[4 - num_reduce_dim:])]\n\n self.reduce_op_test(\"ReduceBackMean\", ref_mean, X, num_reduce_dim, gc)\n self.grad_variant_input_test(\n \"ReduceBackMeanGradient\", X, ref_mean, num_reduce_dim)\n"
] | [
[
"numpy.fmod",
"numpy.iinfo"
],
[
"numpy.testing.assert_array_equal",
"numpy.max",
"numpy.mean",
"numpy.random.rand",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BelitK/bigdataodev | [
"ed952c55fce28c5de2a0890ea25b260b5248017c"
] | [
"American_prophet.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom fbprophet import Prophet\nimport pandas as pd\nfrom pandas import DataFrame\nfrom matplotlib import pyplot\n#univariate\n\n\n# In[2]:\n\n\nturkey = pd.read_excel(\"datasetler\\ecomretailfixed.xls\",header=0)\n\n\n# In[3]:\n\n\nturkey.columns = ['ds', 'y']\nturkey\nturkey['ds']= pd.to_datetime(turkey['ds'])\n\n\n# In[4]:\n\n\nmodel = Prophet()\n\n\n# In[5]:\n\n\nmodel.fit(turkey)\n\n\n# In[6]:\n\n\n# define the period for which we want a prediction\nfuture = list()\nwhile True:\n date_in = input(\"enter year or enter (q)uit\" )\n if date_in ==\"q\":\n break\n for i in range(1,12,3):\n date = \"{0}-{1}\".format(date_in,i)\n future.append([date])\n print(future)\nprint(future)\nfuture = DataFrame(future)\nfuture.columns = ['ds']\nfuture['ds']= pd.to_datetime(future['ds'])\n# use the model to make a forecast\nforecast = model.predict(future)\n# summarize the forecast\nprint(forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']])\n# plot forecast\n\nmodel.plot(forecast)\npyplot.scatter(forecast['ds'],forecast['yhat'])\npyplot.show()\n\n\n# In[7]:\n\n\n# create test dataset, in quarters length\ntrain = turkey.drop(turkey.index[:-18])\nprint(train)\nmodel.plot_components(forecast)\n\n\n# In[8]:\n\n\nlen(forecast['yhat'].values)\n\n\n# In[9]:\n\n\nfrom sklearn.metrics import mean_absolute_error, mean_squared_log_error, balanced_accuracy_score\n# calculate MAE between expected and predicted values for december\ny_true = turkey['y'][-len(future):].values\ny_pred = forecast['yhat'].values\nmae = mean_absolute_error(y_true, y_pred)\nloss = mean_squared_log_error(y_true,y_pred)\nprint(\"loss score\",loss)\nprint('MAE: %.3f' % mae)\n\n\n# In[10]:\n\n\n# plot expected vs actual\npyplot.plot(y_true, label='Actual')\npyplot.plot(y_pred, label='Predicted')\npyplot.legend()\npyplot.show()\n\n\n# In[11]:\n\n\nfrom fbprophet.plot import plot_plotly, plot_components_plotly\n\nplot_plotly(model, forecast)\n\n\n# In[12]:\n\n\nplot_components_plotly(model, forecast)\n\n\n# In[ ]:\n\n\n\n\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_excel",
"pandas.to_datetime",
"matplotlib.pyplot.scatter",
"sklearn.metrics.mean_squared_log_error",
"sklearn.metrics.mean_absolute_error",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
janelia-cosem/scripts | [
"f4bedbfc4ff81ec1b83282908ba6702baf98c734"
] | [
"tests/test_xarray.py"
] | [
"import pytest\nfrom fibsem_tools.io.zarr import zarr_n5_coordinate_inference\nfrom xarray import DataArray\nimport numpy as np\n\n\ndef pixelResolutionAttr(scales, units, **kwargs):\n return {\"pixelResolution\": {\"dimensions\": scales[::-1], \"unit\": units[0]}}\n\n\ndef resolutionAttr(scales, **kwargs):\n return {\"resolution\": scales[::-1]}\n\n\ndef cosemAttr(scales, units, axes, translates):\n return {\n \"transform\": {\n \"axes\": axes,\n \"scale\": scales,\n \"translate\": translates,\n \"units\": units,\n }\n }\n\n\[email protected](\n \"attr_factory\", [pixelResolutionAttr, resolutionAttr, cosemAttr]\n)\ndef test_coordinate_inference(attr_factory):\n shape = (100, 200, 300)\n axes = [\"z\", \"y\", \"x\"]\n scales = [1.0, 2.0, 3.0]\n units = [\"nm\", \"nm\", \"nm\"]\n translates = [0.0, 0.0, 0.0]\n\n attr = attr_factory(scales=scales, units=units, axes=axes, translates=translates)\n\n result = [\n DataArray(\n translates[idx] + np.arange(shape[idx]) * scales[idx],\n dims=ax,\n attrs={\"units\": units[idx]},\n )\n for idx, ax in enumerate(axes)\n ]\n coords, new_attrs = zarr_n5_coordinate_inference(shape, attr)\n for idx, r in enumerate(result):\n assert DataArray.equals(r, coords[idx])\n"
] | [
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sgbaird/CrabNet | [
"9b3966cb7238dd688b84eb3fae9f2c6ae3a4ae47"
] | [
"examples/paper_figures/Paper_FIG_2.py"
] | [
"import numpy as np\nimport pandas as pd\nimport os\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom matplotlib.colors import Normalize\nimport matplotlib.patheffects as path_effects\nimport seaborn as sns\n\nfrom crabnet.kingcrab import CrabNet\nfrom crabnet.model import Model\nfrom crabnet.utils.get_compute_device import get_compute_device\n\nimport torch\n\nfrom crabnet.utils.utils import CONSTANTS\n\ncompute_device = get_compute_device()\ncons = CONSTANTS()\n\n\n# %%\nmat_prop = \"aflow__Egap\"\ntorchnet_params = {\"d_model\": 512, \"N\": 3, \"heads\": 4}\nsymbol_idx_dict = {val: key for key, val in cons.idx_symbol_dict.items()}\n\nelem = \"Si\"\n# elem = 23\nif type(elem) == int:\n elem_sym = cons.idx_symbol_dict[elem]\n elem_Z = elem\nelif type(elem) == str:\n elem_Z = symbol_idx_dict[elem]\n elem_sym = elem\n\ntrain_data1 = rf\"data\\benchmark_data\\{mat_prop}\\train.csv\"\ndatas = [train_data1]\nin_fracs = [\"Average Attention (AFLOW Egap)\"]\n\n\nclass SaveOutput:\n def __init__(self):\n self.outputs = []\n\n def __call__(self, module, module_in, module_out):\n module_out = [out.detach().cpu() for out in module_out]\n self.outputs.append(module_out)\n\n def clear(self):\n self.outputs = []\n\n\nsave_output = SaveOutput()\n\nfor data, in_frac in zip(datas, in_fracs):\n\n # Create a model\n model = Model(\n CrabNet(**torchnet_params, compute_device=compute_device).to(compute_device)\n )\n model.load_network(f\"{mat_prop}.pth\")\n hook_handles = []\n\n # Insert forward hooks into model\n for layer in model.model.modules():\n if isinstance(layer, torch.nn.modules.activation.MultiheadAttention):\n # print('isinstance')\n handle = layer.register_forward_hook(save_output)\n hook_handles.append(handle)\n\n model.load_data(data) # data is reloaded to model.data_loader\n\n save_output.clear()\n output = model.predict(model.data_loader)\n\n elem_pred = [\n (i, out, output[1][i]) for i, out in enumerate(output[2]) if elem_sym in out\n ]\n df_elem = pd.DataFrame(elem_pred, columns=[\"idx\", \"formula\", \"prediction\"])\n\n mod_out = save_output.outputs\n\n n_mats = len(mod_out) # number of output matrices from hook\n bsz = model.data_loader.batch_size # batch size from data loader\n B = len(model.data_loader) # total number of batches from data loader\n H = model.model.heads # number of heads\n N = model.model.N # number of layers\n n_data = len(model.data_loader.dataset)\n n_elements = model.n_elements\n\n assert n_mats == N * B, \"something is wrong with the matrices\"\n\n attn_data = torch.zeros(size=(n_data, N, H, n_elements, n_elements))\n for layer in range(N):\n sliceN = [\n save_output.outputs[i][1].unsqueeze(1) for i in range(layer, n_mats, N)\n ]\n sliceN = torch.cat(sliceN, dim=0)\n attn_data[:, layer : layer + 1, :, :, :] = sliceN\n\n save_output.clear() # free up CPU RAM after getting attn info\n attn_data = attn_data.detach().cpu().numpy()\n data_loader = model.data_loader\n\n def get_datum(data_loader, idx=0):\n datum = data_loader.dataset[idx]\n return datum\n\n def get_x(data_loader, idx=0):\n x = get_datum(data_loader, idx=idx)[0]\n return x\n\n def get_atomic_numbers(data_loader, idx=0):\n nums = get_x(data_loader, idx=idx).chunk(2)[0].detach().cpu().numpy()\n nums = nums.astype(int)\n return nums\n\n def get_atomic_fracs(data_loader, idx=0):\n nums = get_x(data_loader, idx=idx).chunk(2)[1].detach().cpu().numpy()\n return nums\n\n def get_target(data_loader, idx=0):\n target = get_datum(data_loader, idx=idx)[1].detach().cpu().numpy()\n return target\n\n def get_form(data_loader, idx=0):\n form = get_datum(data_loader, idx=idx)[2]\n return form\n\n def get_attention(attn_mat, idx=0, layer=0, head=0):\n \"\"\"\n Get one slice of the attention map.\n\n Parameters\n ----------\n attn_mat : Tensor\n attn_mat is numpy array in the shape of [S, N, H, d, d], where\n S is the total number of data samples,\n N is the layer number in the attention mechanism,\n H is the head number in the attention mechanism, and\n d is the attention dimension in each head.\n idx : int, optional\n Index of the input material. The default is 0.\n layer : int, optional\n Layer number in the attention mechanism. The default is 0.\n head : int, optional\n Head number in the attention mechanism. The default is 0.\n\n Returns\n -------\n attn : Tensor\n\n \"\"\"\n attn_mat = attn_mat\n assert len(attn_mat.shape) == 5, \"input attn_map is of the wrong shape\"\n if head == \"average\":\n attn = attn_mat[idx, layer, :, :, :]\n attn = np.mean(attn, axis=0)\n elif isinstance(head, int):\n attn = attn_mat[idx, layer, head, :, :]\n return attn\n\n attn_mat = attn_data.copy()\n\n data_loader = model.data_loader\n\n idx = 1\n layer = 0\n\n other_dict = {i: [] for i in range(1, 119)}\n\n option = [0, 1, 2, 3, \"average\"]\n option_texts = [\"a)\", \"b)\", \"c)\", \"d)\", \"average\"]\n\n idx_plot = 0\n head_option = option[idx_plot]\n option_text = option_texts[idx_plot]\n\n for idx in range(len(data_loader.dataset)):\n map_data = get_attention(attn_mat, idx=idx, layer=layer, head=head_option)\n atom_fracs = get_atomic_fracs(data_loader, idx=idx)\n form = get_form(data_loader, idx=idx)\n atomic_numbers = get_atomic_numbers(data_loader, idx=idx).ravel().tolist()\n idx_symbol_dict = cons.idx_symbol_dict\n atoms = [idx_symbol_dict[num] for num in atomic_numbers]\n atom_presence = np.array(atom_fracs > 0)\n mask = atom_presence * atom_presence.T\n map_data = map_data * mask\n if elem_Z in atomic_numbers:\n row = atomic_numbers.index(elem_Z)\n for atomic_number in atomic_numbers:\n if atomic_number == 0:\n continue\n col = atomic_numbers.index(atomic_number)\n # get the raw attention value\n other_dict[atomic_number].append(map_data[row, col])\n\n # fmt: off\n all_symbols = ['None', 'H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na',\n 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar', 'K', 'Ca', 'Sc',\n 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga',\n 'Ge', 'As', 'Se', 'Br', 'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb',\n 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn', 'Sb',\n 'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr', 'Nd', 'Pm',\n 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu',\n 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl',\n 'Pb', 'Bi', 'Po', 'At', 'Rn', 'Fr', 'Ra', 'Ac', 'Th', 'Pa',\n 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk', 'Cf', 'Es', 'Fm', 'Md',\n 'No', 'Lr', 'Rf', 'Db', 'Sg', 'Bh', 'Hs', 'Mt', 'Ds', 'Rg',\n 'Cn', 'Nh', 'Fl', 'Mc', 'Lv', 'Ts', 'Og']\n # fmt: on\n\n property_tracker = {\n all_symbols[key]: np.array(val).mean()\n for key, val in other_dict.items()\n if len(val) != 0\n }\n\n def plot(mat_prop, property_tracker):\n ptable = pd.read_csv(\"data/element_properties/ptable.csv\")\n ptable.index = ptable[\"symbol\"].values\n elem_tracker = ptable[\"count\"]\n n_row = ptable[\"row\"].max()\n n_column = ptable[\"column\"].max()\n\n elem_tracker = elem_tracker + pd.Series(property_tracker)\n\n # log_scale = True\n log_scale = False\n\n fig, ax = plt.subplots(figsize=(n_column, n_row))\n rows = ptable[\"row\"]\n columns = ptable[\"column\"]\n symbols = ptable[\"symbol\"]\n rw = 0.9 # rectangle width (rw)\n rh = rw # rectangle height (rh)\n for row, column, symbol in zip(rows, columns, symbols):\n row = ptable[\"row\"].max() - row\n cmap = sns.cm.rocket_r\n count_min = elem_tracker.min()\n count_max = elem_tracker.max()\n count_min = 0\n count_max = 1\n norm = Normalize(vmin=count_min, vmax=count_max)\n count = elem_tracker[symbol]\n if log_scale:\n norm = Normalize(vmin=np.log(1), vmax=np.log(count_max))\n if count != 0:\n count = np.log(count)\n color = cmap(norm(count))\n if np.isnan(count):\n color = \"silver\"\n if row < 3:\n row += 0.5\n # element box\n rect = patches.Rectangle(\n (column, row),\n rw,\n rh,\n linewidth=1.5,\n edgecolor=\"gray\",\n facecolor=color,\n alpha=1,\n )\n # plot element text\n text = plt.text(\n column + rw / 2,\n row + rw / 2,\n symbol,\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n fontsize=22,\n fontweight=\"semibold\",\n color=\"white\",\n )\n\n text.set_path_effects(\n [\n path_effects.Stroke(linewidth=3, foreground=\"#030303\"),\n path_effects.Normal(),\n ]\n )\n\n ax.add_patch(rect)\n\n granularity = 20\n for i in range(granularity):\n value = (1 - i / (granularity - 1)) * count_min + (\n i / (granularity - 1)\n ) * count_max\n if log_scale:\n if value != 0:\n value = np.log(value)\n color = cmap(norm(value))\n length = 9\n x_offset = 3.5\n y_offset = 7.8\n x_loc = i / (granularity) * length + x_offset\n width = length / granularity\n height = 0.35\n rect = patches.Rectangle(\n (x_loc, y_offset),\n width,\n height,\n linewidth=1.5,\n edgecolor=\"gray\",\n facecolor=color,\n alpha=1,\n )\n\n if i in [0, 4, 9, 14, 19]:\n text = f\"{value:0.2f}\"\n if log_scale:\n text = f\"{np.exp(value):0.1e}\".replace(\"+\", \"\")\n plt.text(\n x_loc + width / 2,\n y_offset - 0.4,\n text,\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n fontweight=\"semibold\",\n fontsize=20,\n color=\"k\",\n )\n\n ax.add_patch(rect)\n\n legend_title = f\"{elem_sym}, {in_frac}\"\n plt.text(\n x_offset + length / 2,\n y_offset + 0.7,\n f\"log({legend_title})\" if log_scale else legend_title,\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n fontweight=\"semibold\",\n fontsize=20,\n color=\"k\",\n )\n # add annotation for subfigure numbering\n plt.text(\n 0.55,\n n_row + 0.1,\n option_text,\n fontweight=\"semibold\",\n fontsize=38,\n color=\"k\",\n )\n ax.set_ylim(-0.15, n_row + 0.1)\n ax.set_xlim(0.85, n_column + 1.1)\n\n # fig.patch.set_visible(False)\n ax.axis(\"off\")\n\n plt.draw()\n save_dir = \"figures/\"\n if save_dir is not None:\n fig_name = f\"{save_dir}/Figure2_{mat_prop}_ptable_{head_option}.png\"\n os.makedirs(save_dir, exist_ok=True)\n plt.savefig(fig_name, bbox_inches=\"tight\", dpi=300)\n\n plt.pause(0.001)\n plt.close()\n\n plot(mat_prop, property_tracker)\n"
] | [
[
"matplotlib.patheffects.Normal",
"pandas.Series",
"torch.cat",
"torch.zeros",
"pandas.DataFrame",
"numpy.mean",
"numpy.exp",
"pandas.read_csv",
"matplotlib.pyplot.close",
"matplotlib.pyplot.text",
"numpy.log",
"numpy.isnan",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.savefig",
"numpy.array",
"matplotlib.pyplot.subplots",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.draw",
"matplotlib.patheffects.Stroke",
"matplotlib.pyplot.pause"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
pythonmite/Daily-Coding-Problem | [
"8f758cb8cf0c6a7524c8874116ca9ed08545c773"
] | [
"problem_2_hard.py"
] | [
"\"\"\"\n Company Name : Uber\n \n Problem Statement : \n Given an array of integers, return a new array such that each element at index i of the new array is the product of all \n the numbers in the original array except the one at i.\n\n For example, if our input was [1, 2, 3, 4, 5], the expected output would be [120, 60, 40, 30, 24]. \n If our input was [3, 2, 1], the expected output would be [2, 3, 6]\n\n\"\"\"\nimport numpy as np\n\n\ndef solution(numbers:list):\n result = list(map(lambda x: np.prod(numbers[:x] + numbers[x+1:]), range(len(numbers))))\n return result\n\n\nif __name__ == '__main__':\n print(solution(numbers=[1, 2, 3, 4, 5]))\n # [120, 60, 40, 30, 24]\n print(solution(numbers=[3,2,1]))\n # [2,3,6]\n \n"
] | [
[
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Project-X-UBC/smoke-detection-net | [
"93efa7651b772ae89eb8a2214183f2a1f1748e85"
] | [
"scripts/video_splitter.py"
] | [
"import cv2\nfrom os import listdir\nimport json\nimport sys\nimport numpy as np\nfrom concurrent.futures import ProcessPoolExecutor\n\n\n# This script splits the videos in the raw_data folder into frames. Since it is a multi-hour long process, it saves the names of the videos it has already split into a json file called done_videos.json\n# If it is called again and done_videos.json already exists, it will skip the videos that have already been split\n\n\nVIDEO_FOLDER = '../../datasets/alert_wildfire/raw_data'\n#FRAMES_FOLDER = '../../datasets/frames_test'\n#DONE_VIDEOS_PATH = '../../datasets/done_videos.json'\n#VIDEO_FOLDER = '../data/full/raw_data'\nFRAMES_FOLDER = '../data/full/frames'\nDONE_VIDEOS_PATH = '../data/done_videos.json'\nFRAMES_PER_VIDEO = 200\n\n\ndef is_image_grayscale(img):\n # True if image is black and white, False if not\n if len(img.shape) < 3: return True\n if img.shape[2] == 1: return True\n width, height, _ = img.shape\n img = img[width//3 : width//3*2, height//3 : height//3*2, :]\n b,g,r = img[:,:,0], img[:,:,1], img[:,:,2]\n similar = np.vectorize(lambda x, y: abs(int(x)-int(y)) < 10)\n similar_bg, similar_br = similar(b, g), similar(b, r)\n percent_bw = (np.sum(similar_bg) + np.sum(similar_br)) / (similar_bg.size + similar_br.size)\n if percent_bw >= .7: return True\n return False\n\n\ndef is_image_bad(img):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n hue = np.mean(hsv[:,:,0]*2)\n is_gray = is_image_grayscale(img)\n is_dark = np.mean(hsv[:,:,2]/255*100) < 39\n is_orange = (hue >= 0 and hue <= 40)\n is_blurry = cv2.Laplacian(img, cv2.CV_64F).var() < 400\n return is_gray or is_dark or is_orange or is_blurry\n\n\ndef split_video(videoname):\n # Saves FRAMES_PER_VIDEO frames of a video as jpg's. If it gets a black and white frame, it will keep looking until it finds a color frame\n vidcap = cv2.VideoCapture(VIDEO_FOLDER + '/' + videoname)\n num_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n frame_gap = num_frames // FRAMES_PER_VIDEO if num_frames > FRAMES_PER_VIDEO else 1\n success = True\n frame, saved_frames_count = 0, 0\n print(videoname)\n while saved_frames_count < FRAMES_PER_VIDEO:\n success, img = vidcap.read()\n if frame % frame_gap == 0:\n while success and is_image_bad(img):\n # I make 10 frame jumps here because checking for b&w, hue, etc. is slow\n for _ in range(10):\n success, img = vidcap.read()\n frame += 1\n if not success:\n break\n jpg_name = videoname + '_frame_' + str(frame) + '.jpg'\n img = cv2.resize(img, (224, 224))\n cv2.imwrite(FRAMES_FOLDER + '/' + jpg_name, img)\n saved_frames_count += 1\n frame += 1\n vidcap.release()\n done_videos = get_done_videos()\n done_videos.add(videoname)\n save_done_videos(done_videos)\n print(f'Got {saved_frames_count} frames')\n\n\ndef get_done_videos():\n # Get list of videos that have already been split into frames\n try:\n with open(DONE_VIDEOS_PATH, 'rb') as f:\n done_videos = set(json.load(f))\n except IOError:\n done_videos = set()\n return done_videos\n\n\ndef save_done_videos(done_videos):\n with open(DONE_VIDEOS_PATH, 'w') as f:\n json.dump(list(done_videos), f)\n print(len(done_videos))\n\n\nif __name__ == '__main__':\n videonames = listdir(VIDEO_FOLDER)\n with ProcessPoolExecutor() as executor:\n executor.map(split_video, videonames)"
] | [
[
"numpy.mean",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aivian/robots | [
"6827886916e36432ce1d806f0a78edef6c9270d9",
"6827886916e36432ce1d806f0a78edef6c9270d9",
"6827886916e36432ce1d806f0a78edef6c9270d9",
"6827886916e36432ce1d806f0a78edef6c9270d9"
] | [
"pybots/src/filters/nonlinearities.py",
"pybots/src/geodesy/geoid.py",
"pybots/src/robot_control/states.py",
"pybots/src/filters/window.py"
] | [
"\"\"\"Implement some nonlinearies\n\"\"\"\nimport numpy\n\nimport scipy.interpolate\n\nclass DelayLine(object):\n \"\"\"Delay a signal\n \"\"\"\n def __init__(self, delay=None):\n \"\"\"Constructor\n\n Arguments:\n delay: time to delay the signal by, defaults to no delay (s)\n\n Returns:\n class instance\n \"\"\"\n self._filter_state = None\n self._delay = delay\n\n self._signal_queue = []\n self._time_queue = []\n\n def filter_value(self, value, time):\n \"\"\"Filter a new value\n\n Returns None if there are not any samples old enough. If the timestamp\n is less than any time currently in the queue, the filter will reset\n\n Arguments:\n value: the new value to filter\n time: the stamp for that time\n\n Returns:\n filtered: the new state of the filter\n \"\"\"\n if self._delay is None:\n self._filter_state = value\n return value\n\n self._signal_queue.append(value)\n self._time_queue.append(time)\n\n if time < numpy.amax(self._time_queue):\n self.reset()\n return None\n\n filtered = None\n while (\n (time - self._time_queue[0]) > self._delay and\n len(self._time_queue) > 0):\n filtered = self._signal_queue.pop(0)\n self._time_queue.pop(0)\n\n self._filter_state = filtered\n return filtered\n\n @property\n def value(self):\n \"\"\"Return the current value of the filter\n\n Assumes time starts with the first element in the filter. Returns None\n if there are not any samples old enough\n\n Arguments:\n no arguments\n\n Returns:\n value: current filter value\n \"\"\"\n return self._filter_state\n\n def value_at_time(self, time):\n \"\"\"Return the value of the filter for a specified time\n\n Returns None if there are not any samples old enough\n\n Arguments:\n time: the time to query\n\n Returns:\n value: filter value\n \"\"\"\n filtered = None\n idx = len(self._time_queue) - 1\n while (\n (time - self._time_queue[idx]) > self._delay and\n idx > 0):\n filtered = self._signal_queue[idx]\n idx -= 1\n\n return filtered\n\n def set_delay(self, delay):\n \"\"\"Set the delay time value\n\n Arguments:\n delay: the time to delay the signal for\n\n Returns:\n delay: delay duration (s)\n \"\"\"\n self._delay = delay\n\n def reset(self):\n \"\"\"Reset the delay queue\n\n Arguments:\n no arguments\n\n Returns:\n no returns\n \"\"\"\n self._time_queue = []\n self._signal_queue = []\n\nclass Deadband(object):\n \"\"\"Implement a deadband\n \"\"\"\n def __init__(self, center, width=None, width_fun=None):\n \"\"\"Constructor\n\n Arguments:\n center: center of the deadband\n width: width, can be a scalar for symmetric deadband or a vector\n for variable upper / lower bounds. If center is a N vector then\n this should be Nx2 matrix of bounds. Optional (can specify fun)\n width_fun: width function, evaluates a point and returns True if\n the point is within the deadband. Optional (can specify width\n instead)\n\n Returns:\n no returns\n \"\"\"\n self._center = center\n\n assert width is not None or width_fun is not None,\\\n 'Must specify width'\n\n if isinstance(center, numpy.ndarray) and width is not None:\n assert center.shape[0] == width.shape[0],\\\n 'for N element center, width must be Nx2'\n assert width.ndim == 2,\\\n 'for N element center, width must be Nx2'\n assert width.shape[1] == 2,\\\n 'for N element center, width must be Nx2'\n\n if width is not None:\n self._width_vector = width\n self._width = self._square_fun\n else:\n self._width = width_fun\n\n def _square_fun(self, point):\n \"\"\"Evaluate a point to see if it falls within a bracketed range\n\n This evalutes a square ball around a point and returns true if the point\n lies within it\n\n Arguments:\n point: location to be tested\n\n Returns:\n in_interior: true if the point lies within the interior of the ball\n \"\"\"\n point -= self._center\n\n if isinstance(self._center, numpy.ndarray):\n for x, bound in zip(self._width_vector, point):\n if x < numpy.amin(bound) or x > numpy.amax(bound):\n return False\n return True\n\n if (\n point < numpy.amin(self._width_vector) or\n point > numpy.amax(self._width_vector)):\n return False\n\n return True\n\n def filter_value(self, value):\n \"\"\"filter a signal through the deadband\n\n Arguments:\n value: the value we want to filter\n\n Returns:\n filtered: that value filtered through a deadband\n \"\"\"\n if self._width(value):\n return self._center\n\n return value\n",
"#!/usr/bin/python\n\n# This file is mostly a straight translation of\n# GeographicLib/src/Geoid.cpp from C++ to Python\n# by Kim Vandry <[email protected]>\n#\n# /**\n# * \\file Geoid.cpp\n# * \\brief Implementation for GeographicLib::Geoid class\n# *\n# * Copyright (c) Charles Karney (2009) <[email protected]>\n# * and licensed under the LGPL. For more information, see\n# * http://geographiclib.sourceforge.net/\n# **********************************************************************/\n#\n# Geoid height grade not supported\n\nimport os\nimport mmap\nimport struct\n\nimport numpy\n\nclass GeoidBadDataFile(Exception):\n pass\n\nclass GeoidHeight(object):\n \"\"\"Calculate the height of the WGS84 geoid above the\n ellipsoid at any given latitude and longitude\n\n :param name: name to PGM file containing model info\n download from http://geographiclib.sourceforge.net/1.18/geoid.html\n \"\"\"\n c0 = 240\n c3 = (\n ( 9, -18, -88, 0, 96, 90, 0, 0, -60, -20),\n ( -9, 18, 8, 0, -96, 30, 0, 0, 60, -20),\n ( 9, -88, -18, 90, 96, 0, -20, -60, 0, 0),\n (186, -42, -42, -150, -96, -150, 60, 60, 60, 60),\n ( 54, 162, -78, 30, -24, -90, -60, 60, -60, 60),\n ( -9, -32, 18, 30, 24, 0, 20, -60, 0, 0),\n ( -9, 8, 18, 30, -96, 0, -20, 60, 0, 0),\n ( 54, -78, 162, -90, -24, 30, 60, -60, 60, -60),\n (-54, 78, 78, 90, 144, 90, -60, -60, -60, -60),\n ( 9, -8, -18, -30, -24, 0, 20, 60, 0, 0),\n ( -9, 18, -32, 0, 24, 30, 0, 0, -60, 20),\n ( 9, -18, -8, 0, -24, -30, 0, 0, 60, 20),\n )\n\n c0n = 372\n c3n = (\n ( 0, 0, -131, 0, 138, 144, 0, 0, -102, -31),\n ( 0, 0, 7, 0, -138, 42, 0, 0, 102, -31),\n ( 62, 0, -31, 0, 0, -62, 0, 0, 0, 31),\n (124, 0, -62, 0, 0, -124, 0, 0, 0, 62),\n (124, 0, -62, 0, 0, -124, 0, 0, 0, 62),\n ( 62, 0, -31, 0, 0, -62, 0, 0, 0, 31),\n ( 0, 0, 45, 0, -183, -9, 0, 93, 18, 0),\n ( 0, 0, 216, 0, 33, 87, 0, -93, 12, -93),\n ( 0, 0, 156, 0, 153, 99, 0, -93, -12, -93),\n ( 0, 0, -45, 0, -3, 9, 0, 93, -18, 0),\n ( 0, 0, -55, 0, 48, 42, 0, 0, -84, 31),\n ( 0, 0, -7, 0, -48, -42, 0, 0, 84, 31),\n )\n\n c0s = 372\n c3s = (\n ( 18, -36, -122, 0, 120, 135, 0, 0, -84, -31),\n (-18, 36, -2, 0, -120, 51, 0, 0, 84, -31),\n ( 36, -165, -27, 93, 147, -9, 0, -93, 18, 0),\n (210, 45, -111, -93, -57, -192, 0, 93, 12, 93),\n (162, 141, -75, -93, -129, -180, 0, 93, -12, 93),\n (-36, -21, 27, 93, 39, 9, 0, -93, -18, 0),\n ( 0, 0, 62, 0, 0, 31, 0, 0, 0, -31),\n ( 0, 0, 124, 0, 0, 62, 0, 0, 0, -62),\n ( 0, 0, 124, 0, 0, 62, 0, 0, 0, -62),\n ( 0, 0, 62, 0, 0, 31, 0, 0, 0, -31),\n (-18, 36, -64, 0, 66, 51, 0, 0, -102, 31),\n ( 18, -36, 2, 0, -66, -51, 0, 0, 102, 31),\n )\n\n def __init__(self, name=\"egm2008-1.pgm\"):\n self.offset = None\n self.scale = None\n\n with open(name, \"rb\") as f:\n line = f.readline()\n if line != b\"P5\\012\" and line != b\"P5\\015\\012\":\n raise GeoidBadDataFile(\"No PGM header\")\n headerlen = len(line)\n while True:\n line = f.readline()\n if len(line) == 0:\n raise GeoidBadDataFile(\"EOF before end of file header\")\n headerlen += len(line)\n if line.startswith(b'# Offset '):\n try:\n self.offset = int(line[9:])\n except ValueError as e:\n raise GeoidBadDataFile(\"Error reading offset\", e)\n elif line.startswith(b'# Scale '):\n try:\n self.scale = float(line[8:])\n except ValueError as e:\n raise GeoidBadDataFile(\"Error reading scale\", e)\n elif not line.startswith(b'#'):\n try:\n self.width, self.height = list(map(int, line.split()))\n except ValueError as e:\n raise GeoidBadDataFile(\"Bad PGM width&height line\", e)\n break\n line = f.readline()\n headerlen += len(line)\n levels = int(line)\n if levels != 65535:\n raise GeoidBadDataFile(\"PGM file must have 65535 gray levels\")\n if self.offset is None:\n raise GeoidBadDataFile(\"PGM file does not contain offset\")\n if self.scale is None:\n raise GeoidBadDataFile(\"PGM file does not contain scale\")\n\n if self.width < 2 or self.height < 2:\n raise GeoidBadDataFile(\"Raster size too small\")\n\n fd = f.fileno()\n fullsize = os.fstat(fd).st_size\n\n if fullsize - headerlen != self.width * self.height * 2:\n raise GeoidBadDataFile(\"File has the wrong length\")\n\n self.headerlen = headerlen\n self.raw = mmap.mmap(fd, fullsize, mmap.MAP_SHARED, mmap.PROT_READ)\n\n self.rlonres = self.width / 360.0\n self.rlatres = (self.height - 1) / 180.0\n self.ix = None\n self.iy = None\n\n def _rawval(self, ix, iy):\n if iy < 0:\n iy = -iy;\n ix += self.width/2;\n elif iy >= self.height:\n iy = 2 * (self.height - 1) - iy;\n ix += self.width/2;\n if ix < 0:\n ix += self.width;\n elif ix >= self.width:\n ix -= self.width\n\n return struct.unpack_from('>H', self.raw,\n (iy * self.width + ix) * 2 + self.headerlen\n )[0]\n\n def get(self, lat, lon, cubic=True):\n lat = numpy.rad2deg(lat)\n lon = numpy.rad2deg(lon)\n if lon < 0:\n lon += 360\n fy = (90 - lat) * self.rlatres\n fx = lon * self.rlonres\n iy = int(fy)\n ix = int(fx)\n fx -= ix\n fy -= iy\n if iy == self.height - 1:\n iy -= 1\n\n if ix != self.ix or iy != self.iy:\n self.ix = ix\n self.iy = iy\n if not cubic:\n self.v00 = self._rawval(ix, iy)\n self.v01 = self._rawval(ix+1, iy)\n self.v10 = self._rawval(ix, iy+1)\n self.v11 = self._rawval(ix+1, iy+1)\n else:\n v = (\n self._rawval(ix , iy - 1),\n self._rawval(ix + 1, iy - 1),\n self._rawval(ix - 1, iy ),\n self._rawval(ix , iy ),\n self._rawval(ix + 1, iy ),\n self._rawval(ix + 2, iy ),\n self._rawval(ix - 1, iy + 1),\n self._rawval(ix , iy + 1),\n self._rawval(ix + 1, iy + 1),\n self._rawval(ix + 2, iy + 1),\n self._rawval(ix , iy + 2),\n self._rawval(ix + 1, iy + 2)\n )\n if iy == 0:\n c3x = GeoidHeight.c3n\n c0x = GeoidHeight.c0n\n elif iy == self.height - 2:\n c3x = GeoidHeight.c3s\n c0x = GeoidHeight.c0s\n else:\n c3x = GeoidHeight.c3\n c0x = GeoidHeight.c0\n self.t = [\n sum([ v[j] * c3x[j][i] for j in range(12) ]) / float(c0x)\n for i in range(10)\n ]\n if not cubic:\n a = (1 - fx) * self.v00 + fx * self.v01\n b = (1 - fx) * self.v10 + fx * self.v11\n h = (1 - fy) * a + fy * b\n else:\n h = (\n self.t[0] +\n fx * (self.t[1] + fx * (self.t[3] + fx * self.t[6])) +\n fy * (\n self.t[2] + fx * (self.t[4] + fx * self.t[7]) +\n fy * (self.t[5] + fx * self.t[8] + fy * self.t[9])\n )\n )\n return self.offset + self.scale * h\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 27 10:19:39 2015\n\n@author: Nate\n\"\"\"\n\n# import numpy module\nimport numpy as np\n\ndef limit_angle(angle_list):\n \"\"\"Function is used to sensibly limit an angle or a list of angles to the\n range [-pi, pi]\n\n Args:\n angle_list: either a single angle or a list of angles\n\n Returns:\n a single angle or a list of angles (of the same length as the input)\n but limited to the range [-pi, pi]\n \"\"\"\n if type(angle_list) is list:\n output = []\n for angle in angle_list:\n if np.isfinite(angle):\n output.append(np.clip(angle, -1.0*np.pi, 1.0*np.pi))\n else:\n output.append(0.0)\n return output\n else:\n angle = angle_list\n if np.isfinite(angle):\n return np.clip(angle, -1.0*np.pi, 1.0*np.pi)\n else:\n return 0.0\n\ndef wrap_angle(angle_list):\n \"\"\"Function is used to wrap an angle or a list of angles to the range\n [-pi, pi]\n\n Args:\n angle_list: either a single angle or a list of angles\n\n Returns:\n a single angle or a list of angles (of the same length as the input)\n but wrapped to the range [-pi, pi]\n \"\"\"\n if type(angle_list) is list:\n output = []\n for angle in angle_list:\n if np.isfinite(angle):\n output.append((angle + np.pi)%(2.0 * np.pi) - np.pi)\n else:\n output.append(0.0)\n return output\n else:\n angle = angle_list\n if np.isfinite(angle):\n return (angle + np.pi)%(2.0 * np.pi) - np.pi\n else:\n return 0.0\n\nclass ControlBase(object):\n \"\"\"Abstract base class for controllers and control states.\"\"\"\n def __init__(self, initial_value=0.0, is_angle=False):\n # most recent time the state was updated (sec)\n self._time = 0.0\n # previous time the state was updated (sec)\n self._timek_1 = 0.0\n # current value of the object\n self._value = float(initial_value)\n # set the angle flag\n self._is_angle = is_angle\n # Set limits\n self._limit = [0.0]*2\n # set limits on the magnitude of the state\n self.set_limit(-1e6, 1e6)\n # hold a reset (or initial) value for the state\n self._initial_value = float(initial_value)\n\n def set_limit(self, lower, upper):\n \"\"\"Method sets the lower and upper bounds that the state is allowed to\n acheive. Typically this is a bit of nonsense except for states where\n for example a negative value is nonsense (eg. airspeed). This is also\n useful in limiting an error state to reasonable bounds.\n\n Args:\n lower: float lower bound of the state\n upper: floar upper bound of the state\n \"\"\"\n self._limit = [lower, upper]\n if self._is_angle:\n self._limit = limit_angle(self._limit)\n\n def reset(self):\n \"\"\"Method called to reset the control base, specifically the times.\"\"\"\n # reset object value field\n self._value = np.clip(self._initial_value,\n self._limit[0], self._limit[1])\n # reset times\n self._time = 0.0\n self._timek_1 = 0.0\n\n @property\n def dt(self):\n \"\"\"Return the time difference between the most recent state\n settings.\"\"\"\n return self._time - self._timek_1\n\n @property\n def value(self):\n \"\"\"Return the current value of the object. This can and should be\n redefined in most inherited classes.\"\"\"\n return self._value\n\n @value.setter\n def value(self, input_value):\n \"\"\"Sets the value but without a timestamp. This method for setting the\n value is not recommended.\"\"\"\n if np.isfinite(input_value):\n # hold the current input value\n self._value = np.clip(input_value, self._limit[0], self._limit[1])\n\nclass ControlState(ControlBase):\n \"\"\"Class to hold and deal with the various properties associated with a\n system state used in a control framework. This state holds the current\n value, some history, functionality to determine the derivative (and\n optionally to filter the derivative), and the ability to compare it to a\n set value to determine an error.\"\"\"\n # TODO: using derivative, project state forward in time get_value(time_now), get_dt(time_now)\n def __init__(self, initial_value=0.0, is_angle=False, derivative_filt=None,\n derivative=None, bias=None):\n \"\"\"Initialize the state class.\n\n Args:\n is_angle: boolean indicating whether the state is an angle and thus\n should be dealt with differently\n initial_value: float initial value of the state, defaults to 0.0\n derivative_filt: optional error derivative filter (must contain a\n function called filter_value(value) that returns a float)\n derivative: handle of another ControlState object that is this\n state's derivative (eg, phi = ControlState(True, p)\n bias: either a float or an handle to another ControlState\n representing an subtractive bias applied to this state\n \"\"\"\n # initialize the base class\n super(ControlState, self).__init__(initial_value, is_angle)\n # current value of the state\n #self._value = float(initial_value)\n # previous value of the state\n self._valuek_1 = float(initial_value)\n # hold a bias for the value\n self._bias = 0.0\n # handle of a ControlState representing this state's subtractive bias\n self._bias_obj = bias\n # value of the current derivative\n self._derivative = 0.0\n # handle of a ControlState representing this state's derivative\n self._derivative_obj = derivative\n # optional derivative filter to be used when computing derivative internally\n self._derivative_filt = derivative_filt\n # flag indicating whether we're using a numerical derivative\n self._is_num_deriv = True\n # hold a reset (or initial) value for the state\n #self._initial_value = float(initial_value)\n\n def set_value(self, input_value, time=0.0):\n \"\"\"Method sets the current value of the state and updates previous\n values. If a time is provided and a derivative is not otherwise\n provided, a numerical derivative is taken here as well.\"\"\"\n if np.isfinite(input_value):\n # move the current time to the previous time spot\n self._timek_1 = self._time\n #save the new time\n self._time = time\n # use the setter method to set the current _value\n self.value = input_value\n # if we're using a numerical derivative, do this now\n if self._is_num_deriv and self.dt > 0.0:\n # TODO: if we wrapped the angle, special handling needed!\n if self._derivative_filt is None:\n # we have no derivative filter, take raw\n self._derivative = (self._value - self._valuek_1)/self.dt\n else:\n # filter the derivative\n pre_filt = (self._value - self._valuek_1)/self.dt\n self._derivative = self._derivative_filt.filter_value(pre_filt)\n\n def get_error(self, comparison=0.0):\n \"\"\"Returns the error between the current state value and a\n comparison.\"\"\"\n if np.isfinite(comparison):\n # limit comparison\n# if self._is_angle:\n# comparison = limit_angle(comparison)\n# comparison = np.clip(comparison, self._limit[0], self._limit[1])\n # wrap and return error\n if self._is_angle:\n comparison = wrap_angle(comparison)\n return wrap_angle(comparison - self.value)\n else:\n return comparison - self.value\n else:\n return 0.0\n\n def reset(self):\n \"\"\"Method resets the internal state of the state. Whoa man.\"\"\"\n # reset base\n super(ControlState, self).reset()\n # reset state tracker\n #self._value = np.clip(self._initial_value,\n # self._limit[0], self._limit[1])\n self._valuek_1 = np.clip(self._initial_value,\n self._limit[0], self._limit[1])\n # reset derivative\n self._derivative = 0.0\n # and derivative filter if available\n if self._derivative_filt is not None:\n self._derivative_filt.reset()\n # reset bias\n self._bias = 0.0\n\n @property\n def value(self):\n \"\"\"Return the current value of the state. Computed as the sum of the\n current value and an additive bias applied to the value.\"\"\"\n return self._value - self.bias\n\n @value.setter\n def value(self, input_value):\n \"\"\"Sets the value but without a timestamp. This method cannot compute a\n numerical derivative.\"\"\"\n if np.isfinite(input_value):\n # move the current value to the previous value spot\n self._valuek_1 = self._value\n # check angle status\n if self._is_angle:\n input_value = wrap_angle(input_value)\n # hold the current input value\n self._value = np.clip(input_value, self._limit[0], self._limit[1])\n\n @property\n def initial_value(self):\n \"\"\"Return the initial value of the state\"\"\"\n return self._initial_value\n\n @initial_value.setter\n def initial_value(self, input_value):\n \"\"\"Sets the initial_value as well as the value and previous values\"\"\"\n if np.isfinite(input_value):\n # hold the current input value\n self._initial_value = float(input_value)\n # current value of the state\n self._value = float(input_value)\n # previous value of the state\n self._valuek_1 = float(input_value)\n\n @property\n def derivative(self):\n \"\"\"Returns the current state derivative.\"\"\"\n if self._derivative_obj is None:\n # if the derivative is stored internally, report it\n return self._derivative\n else:\n # if the derivative comes from another state object, get it\n return self._derivative_obj.value\n\n @derivative.setter\n def derivative(self, input_value):\n \"\"\"Method sets the state derivative (as opposed to using the numerical\n derivative). Either a float value or an object may be provided\"\"\"\n if isinstance(input_value, ControlState):\n # we've been handed a handle to this state's derivative\n self._derivative_obj = input_value\n else:\n # set the numerical derivative flag false\n self._is_num_deriv = False\n # set the current derivative value\n if self._derivative_filt is None:\n # we have no derivative filter, take raw\n self._derivative = float(input_value)\n else:\n # filter the derivative\n self._derivative = self._derivative_filt.filter_value(input_value)\n\n @property\n def bias(self):\n \"\"\"Returns the current value of the state bias.\"\"\"\n if self._bias_obj is None:\n # we don't have a bias object, return the internally stored value\n return self._bias\n else:\n return self._bias_obj.value\n\n @bias.setter\n def bias(self, input_value):\n \"\"\"Sets the current value of the state bias.\"\"\"\n if isinstance(input_value, ControlState):\n # we've been handed a state object, use this going forward\n self._bias_obj = input_value\n else:\n self._bias = float(input_value)\n # we've been handed a value, send this to our state object\n #self._bias_obj.set_value(input_value)\n\nclass ErrorState(ControlState):\n \"\"\"ErrorState class is used to keep track of error-specific things on top\n of the functionality already contained in the ControlState class.\"\"\"\n def __init__(self, is_angle=False, derivative_filt=None):\n \"\"\"Initialize a memeber of the ErrorState class.\n\n Args:\n is_angle: boolean indicating whether this is an angular error\n derivative_filt: optional error derivative filter (must contain a\n function called filter_value(value) that returns a float)\n \"\"\"\n super(ErrorState, self).__init__(0.0, is_angle, derivative_filt)\n # hold onto the integrated error\n self._integral = 0.0\n # TODO: state rate limits\n # set limits on the magnitude of the integral\n self.set_integral_limit(-1e6, 1e6)\n\n def set_value(self, input_value, time=0.0):\n \"\"\"Method sets the current value of the state and updates previous\n values. If a time is provided and a derivative is not otherwise\n provided, a numerical derivative is taken here as well.\"\"\"\n # call the parent's implementation of this function\n super(ErrorState, self).set_value(input_value, time)\n # additionally, update the integral\n self._integral += (self._value + self._valuek_1)*self.dt/2.0\n # limit the size of the integral\n self._integral = np.clip(self._integral, self._integral_limit[0],\n self._integral_limit[1])\n\n def set_integral_limit(self, lower, upper):\n \"\"\"Method sets the lower and upper bounds that the state is allowed to\n acheive. Typically this is a bit of nonsense except for states where\n for example a negative value is nonsense (eg. airspeed). This is also\n useful in limiting an error state to reasonable bounds.\n\n Args:\n lower: float lower bound of the state\n upper: floar upper bound of the state\n \"\"\"\n self._integral_limit = [lower, upper]\n # TODO: I don't know that this is valid here..\n if self._is_angle:\n self._integral_limit = limit_angle(self._integral_limit)\n\n def reset(self):\n \"\"\"Method resets the internal state of the state. Whoa man.\"\"\"\n # call the parent's implementation of this function\n super(ErrorState, self).reset()\n # and clear out the integrated error\n self._integral = 0.0\n\n @property\n def integral(self):\n \"\"\"Return the integrated error.\"\"\"\n return self._integral\n\n @integral.setter\n def integral(self, input_value):\n \"\"\"Method is used to set the integral value. This is rarely used.\"\"\"\n self._integral = np.clip(input_value, self._integral_limit[0],\n self._integral_limit[1])\n\n",
"import copy\nimport pdb\n\nimport numpy\n\nimport geodesy.conversions\n\nclass Average(object):\n def __init__(self, n=None, n_max=numpy.inf, data=[], weights=[], dfun=None):\n \"\"\"Constructor\n\n Arguments:\n n: the number of samples to average over. Optional but must be\n specified when taking the average if not set\n n_max: the maximum number of points to keep. Defaults to inf so this\n will be a memory leak if this is not set!!!\n data: initial data, optional\n weights: initial weights, optional\n dfun: optional function to apply to data before averaging. default\n simply averages data\n\n Returns:\n class instance\n \"\"\"\n self._n = n\n self._n_max = n_max\n self._data = copy.deepcopy(data)\n self._weights = copy.deepcopy(weights)\n if dfun is None:\n dfun = lambda x: x\n self._dfun = dfun\n\n def add(self, x, w=1.0):\n \"\"\"add a new value\n\n Arguments:\n x: new value (can be list or numpy array of values)\n w: weight. optional, if specified must match dimension and type of x\n\n Returns:\n no returns\n \"\"\"\n self._data.append(x)\n self._weights.append(w)\n if len(self._data) > self._n_max:\n self._data.pop(0)\n if len(self._weights) > self._n_max:\n self._weights.pop(0)\n\n def set_n(self, n):\n \"\"\"Set the window length\n\n Arguments:\n n: number of samples to include in window\n\n Returns:\n no returns\n \"\"\"\n self._n = n\n\n def set_n_max(self, n_max):\n \"\"\"Set the number of samples saved\n\n Arguments:\n n_max: number of samples to save\n\n Returns:\n no returns\n \"\"\"\n self._n_max = n_max\n\n def mean(self, n=None):\n \"\"\"Get the value of this window filter\n\n Arguments:\n n: use this value for the filter length. optional, can use internal\n value for n\n\n Returns:\n v: value of the filter data over the previous n samples\n \"\"\"\n if n is None:\n assert self._n is not None, 'n must be set if not specified'\n n = self._n\n\n data = self._dfun(numpy.array(self._data[-n:]))\n weights = numpy.array(self._weights[-n:])\n weights /= numpy.sum(weights)\n\n return numpy.sum(data.T * weights, axis=-1).T\n\n def var(self, n=None):\n \"\"\"Get the variance of this window filter\n\n Arguments:\n n: use this value for the filter length. optional, can use internal\n value for n\n\n Returns:\n var: value of the filter data over the previous n samples\n \"\"\"\n if n is None:\n assert self._n is not None, 'n must be set if not specified'\n n = self._n\n\n mean = self.mean(n)\n data = self._dfun(numpy.array(self._data[-n:]))\n weights = numpy.array(self._weights[-n:])\n weights /= numpy.sum(weights)\n return (\n numpy.sum(numpy.power(data - mean, 2.0).T * weights, axis=-1).T /\n (1.0 - numpy.sum(numpy.power(weights, 2.0)))\n )\n\n def window(self, n=None):\n if n is None:\n assert self._n is not None, 'n must be set if not specified'\n n = self._n\n\n return self._dfun(numpy.array(self._data[-n:]))\n\nclass TimeAverage(object):\n def __init__(\n self, t=None, t_max=numpy.inf, data=[], t0=[], weights=[], dfun=None):\n \"\"\"Constructor\n\n Arguments:\n t: the length of time average over. Optional but must be\n specified when taking the average if not set\n t_max: the time to keep. Defaults to inf so this\n will be a memory leak if this is not set!!!\n data: initial data, optional\n t0: initial time, optional\n weights: initial weights, optional\n dfun: optional function to apply to data before averaging. default\n simply averages data\n\n Returns:\n class instance\n \"\"\"\n self._t_avg = t\n self._t_max = t_max\n self._data = copy.deepcopy(data)\n self._t = copy.deepcopy(t0)\n self._weights = copy.deepcopy(weights)\n\n if dfun is None:\n dfun = lambda x: x\n self._dfun = dfun\n\n def add(self, x, t, w=1.0):\n \"\"\"add a new value\n\n Arguments:\n x: new value (can be number, list, or numpy array of values)\n t: new data (can be number, list, or numpy array of value)\n w: weight. optional, if specified must match dimension and type of x\n\n Returns:\n no returns\n \"\"\"\n self._data.append(x)\n self._t.append(t)\n self._weights.append(w)\n while (self._t[-1] - self._t[0]) > self._t_max:\n self._data.pop(0)\n self._t.pop(0)\n self._weights.pop(0)\n\n def set_t(self, t):\n \"\"\"Set the window length\n\n Arguments:\n t: window length in seconds\n\n Returns:\n no returns\n \"\"\"\n self._t_avg = t\n\n def set_t_max(self, t_max):\n \"\"\"Set the length of time to save\n\n Arguments:\n t_max: max time to save (seconds)\n\n Returns:\n no returns\n \"\"\"\n self._t_max = t_max\n while (self._t[-1] - self._t[0]) > self._t_max:\n self._data.pop(0)\n self._t.pop(0)\n self._weights.pop(0)\n\n def mean(self, t_avg=None, t_ref=None):\n \"\"\"Get the value of this window filter\n\n Arguments:\n t_avg: use this value for the window length. optional, can use internal\n value for t\n t_ref: use this for the reference time when computing the age of\n each sample. Optional, defaults to the last sample time\n\n Returns:\n v: value of the filter data over the previous n samples\n \"\"\"\n tslice = self.slice(t_avg, t_ref)\n\n weights = numpy.array(self._weights)[tslice]\n weights /= numpy.sum(weights)\n\n mean = numpy.sum(\n self._dfun(numpy.array(self._data)[tslice]).T * weights,\n axis=-1).T\n return mean\n\n def var(self, t=None, t_ref=None):\n \"\"\"Get the variance of this window filter\n\n Arguments:\n t: use this value for the window length. optional, can use internal\n value for t\n t_ref: use this for the reference time when computing the age of\n each sample. Optional, defaults to the last sample time\n\n Returns:\n var: value of the filter data over the previous n samples\n \"\"\"\n tslice = self.slice(t_avg, t_ref)\n\n weights = numpy.array(self._weights)[tslice]\n weights /= numpy.sum(weights)\n data = self.dfun(numpy.array(self._data)[tslice])\n\n mean = self.mean(t, t_ref)\n return (\n numpy.sum(numpy.power(data - mean, 2.0) * weights, axis=-1) /\n (1.0 - numpy.sum(numpy.power(weights, 2.0)))\n )\n\n def slice(self, t_avg=None, t_ref=None):\n \"\"\"Get the slice of data to work with\n\n Arguments:\n t: use this value for the window length. optional, can use internal\n value for t\n t_ref: use this for the reference time when computing the age of\n each sample. Optional, defaults to the last sample time\n\n Returns:\n tslice: array slice index for saved data\n \"\"\"\n if t_avg is None:\n assert self._t is not None, 't must be set if not specified'\n t_avg = self._t_avg\n\n if t_ref is None:\n t_ref = self._t[-1]\n dt = t_ref - numpy.array(self._t)\n tslice = dt < t_avg\n return tslice\n\n def window(self, t_avg=None, t_ref=None):\n \"\"\"Get the window of data\n\n Arguments:\n t: use this value for the window length. optional, can use internal\n value for t\n t_ref: use this for the reference time when computing the age of\n each sample. Optional, defaults to the last sample time\n\n Returns:\n tslice: array slice index for saved data\n \"\"\"\n tslice = self.slice(t_avg, t_ref)\n return self._dfun(numpy.array(self._data)[tslice]).T\n\nclass GeodesicAverage(Average):\n \"\"\"A queue which has lat/long/alt as the first three elements of its data\n\n The LLA points will be converted to NED relative to the last point before\n averaging and then converted back, avoiding nonlinearity issues when\n averaging LLA\n \"\"\"\n def __init__(self, n=None, n_max=numpy.inf, data=[], weights=[]):\n \"\"\"Constructor\n\n Arguments:\n n: the number of samples to average over. Optional but must be\n specified when taking the average if not set\n n_max: the maximum number of points to keep. Defaults to inf so this\n will be a memory leak if this is not set!!!\n data: initial data, optional\n weights: initial weights, optional\n\n Returns:\n class instance\n \"\"\"\n super(GeodesicAverage, self).__init__(\n n, n_max, data, weights, self._lla_to_ned)\n\n def _lla_to_ned(self, x):\n \"\"\"Convert the first three elements of the data to ned for averaging\n \"\"\"\n x[:,:3] = geodesy.conversions.lla_to_ned(\n x[:,:3], numpy.array(self._data[-1][:3], ndmin=2))\n return x\n\n def _ned_to_lla(self, x):\n x = numpy.array(x, ndmin=2)\n x[:,:3] = geodesy.conversions.ned_to_lla(\n x[:,:3], numpy.array(self._data[-1][:3], ndmin=2))\n return numpy.squeeze(x)\n\n def mean(self, n=None):\n \"\"\"Get the value of this window filter\n\n Arguments:\n n: use this value for the filter length. optional, can use internal\n value for n\n\n Returns:\n v: value of the filter data over the previous n samples\n \"\"\"\n mean = super(GeodesicAverage, self).mean(n)\n return self._ned_to_lla(mean)\n\nclass GeodesicTimeAverage(TimeAverage):\n \"\"\"A queue which has lat/long/alt as the first three elements of its data\n\n The LLA points will be converted to NED relative to the last point before\n averaging and then converted back, avoiding nonlinearity issues when\n averaging LLA\n \"\"\"\n def __init__(\n self, t=None, t_max=numpy.inf, data=[], t0=[], weights=[]):\n \"\"\"Constructor\n\n Arguments:\n t: the length of time average over. Optional but must be\n specified when taking the average if not set\n t_max: the time to keep. Defaults to inf so this\n will be a memory leak if this is not set!!!\n data: initial data, optional\n t0: initial time, optional\n weights: initial weights, optional\n\n Returns:\n class instance\n \"\"\"\n super(GeodesicTimeAverage, self).__init__(\n t, t_max, data, t0, weights, self._lla_to_ned)\n\n def _lla_to_ned(self, x):\n \"\"\"Convert the first three elements of the data to ned for averaging\n \"\"\"\n x[:,:3] = geodesy.conversions.lla_to_ned(\n x[:,:3], numpy.array(self._data[-1][:3], ndmin=2))\n return x\n\n def _ned_to_lla(self, x):\n x = numpy.array(x, ndmin=2)\n x[:,:3] = geodesy.conversions.ned_to_lla(\n x[:,:3], numpy.array(self._data[-1][:3], ndmin=2))\n return numpy.squeeze(x)\n\n def mean(self, t_avg=None, t_ref=None):\n \"\"\"Get the value of this window filter\n\n Arguments:\n t: use this value for the window length. optional, can use internal\n value for t\n t_ref: use this for the reference time when computing the age of\n each sample. Optional, defaults to the last sample time\n\n Returns:\n v: value of the filter data over the previous n samples\n \"\"\"\n mean = super(GeodesicTimeAverage, self).mean(t_avg, t_ref)\n return self._ned_to_lla(mean)\n\n"
] | [
[
"numpy.amin",
"numpy.amax"
],
[
"numpy.rad2deg"
],
[
"numpy.isfinite",
"numpy.clip"
],
[
"numpy.squeeze",
"numpy.array",
"numpy.sum",
"numpy.power"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TomekTrzeciak/improver | [
"74b7bc0d194c30ea7af426d153e5047ccb67f60c",
"74b7bc0d194c30ea7af426d153e5047ccb67f60c"
] | [
"lib/improver/generate_ancillaries/generate_ancillary.py",
"lib/improver/tests/utilities/test_rescale.py"
] | [
"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2019 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Module containing ancillary generation utilities for Improver\"\"\"\n\nimport iris\nimport numpy as np\nfrom cf_units import Unit\n\n\ndef _make_mask_cube(\n mask_data, coords, topographic_bounds, topographic_units,\n sea_points_included=False):\n \"\"\"\n Makes cube from numpy masked array generated from orography fields.\n\n Args:\n mask_data (numpy.ma.core.MaskedArray):\n The numpy array to make a cube from.\n coords (dict):\n Dictionary of coordinate on the model ancillary file.\n topographic_bounds(list):\n List containing the lower and upper thresholds defining the mask\n topographic_units (str):\n Name of the units of the topographic zone coordinate of the output\n cube.\n sea_points_included (bool):\n Default is False. Value for the output cube attribute\n 'topographic_zones_include_seapoints', signifying whether sea\n points have been included when the ancillary is generated.\n\n Returns:\n mask_cube (iris.cube.Cube):\n Cube containing the mask_data array, with appropriate coordinate\n and attribute information.\n \"\"\"\n mask_cube = iris.cube.Cube(mask_data, long_name='Topography mask')\n if any([item is None for item in topographic_bounds]):\n msg = (\"The topographic bounds variable should have both an \"\n \"upper and lower limit: \"\n \"Your topographic_bounds are {}\")\n raise TypeError(msg.format(topographic_bounds))\n elif len(topographic_bounds) != 2:\n msg = (\"The topographic bounds variable should have only an \"\n \"upper and lower limit: \"\n \"Your topographic_bounds variable has length {}\")\n raise TypeError(msg.format(len(topographic_bounds)))\n else:\n coord_name = 'topographic_zone'\n central_point = np.mean(topographic_bounds)\n threshold_coord = iris.coords.AuxCoord(central_point,\n bounds=topographic_bounds,\n long_name=coord_name,\n units=Unit(topographic_units))\n mask_cube.add_aux_coord(threshold_coord)\n # We can't save attributes with boolean values so convert to string.\n mask_cube.attributes.update(\n {'topographic_zones_include_seapoints': str(sea_points_included)})\n for coord in coords:\n if coord.name() in ['projection_y_coordinate', 'latitude']:\n mask_cube.add_dim_coord(coord, 0)\n elif coord.name() in ['projection_x_coordinate', 'longitude']:\n mask_cube.add_dim_coord(coord, 1)\n else:\n mask_cube.add_aux_coord(coord)\n mask_cube = iris.util.new_axis(mask_cube, scalar_coord=coord_name)\n return mask_cube\n\n\nclass CorrectLandSeaMask(object):\n \"\"\"\n Round landsea mask to binary values\n\n Corrects interpolated land sea masks to boolean values of\n False [sea] and True [land].\n \"\"\"\n def __init__(self):\n pass\n\n def __repr__(self):\n \"\"\"Represent the configured plugin instance as a string\"\"\"\n result = ('<CorrectLandSeaMask>')\n return result\n\n @staticmethod\n def process(standard_landmask):\n \"\"\"Read in the interpolated landmask and round values < 0.5 to False\n and values >=0.5 to True.\n\n Args:\n standard_landmask (iris.cube.Cube):\n input landmask on standard grid.\n\n Returns:\n standard_landmask (iris.cube.Cube):\n output landmask of boolean values.\n \"\"\"\n mask_sea = np.ma.masked_less(standard_landmask.data, 0.5).mask\n standard_landmask.data[mask_sea] = False\n mask_land = np.ma.masked_greater(standard_landmask.data, 0.).mask\n standard_landmask.data[mask_land] = True\n return standard_landmask\n\n\nclass GenerateOrographyBandAncils(object):\n \"\"\"\n Generate topographic band ancillaries for the standard grids.\n\n Reads orography files, then generates binary mask\n of land points within the orography band specified.\n \"\"\"\n def __init__(self):\n pass\n\n def __repr__(self):\n \"\"\"Represent the configured plugin instance as a string.\"\"\"\n result = ('<GenerateOrographyBandAncils>')\n return result\n\n @staticmethod\n def sea_mask(landmask, orog_band, sea_fill_value=None):\n \"\"\"\n Function to mask sea points and substitute the default numpy\n fill value behind this mask_cube.\n\n Args:\n landmask (numpy.ndarray):\n The landmask generated by gen_landmask.\n orog_band (numpy.ndarray):\n The binary array to which the landmask will be applied.\n sea_fill_value (float):\n A fill value to set sea points to and leave the output\n unmasked, rather than the default behaviour of returning a\n masked array with a default fill value.\n\n Returns:\n mask_data (numpy.ndarray):\n An array where the sea points have been masked out and filled\n with a default fill value, or just filled with the given\n sea_fill_value and not masked.\n \"\"\"\n points_to_mask = np.logical_not(landmask)\n\n if sea_fill_value is None:\n mask_data = np.ma.masked_where(points_to_mask, orog_band)\n sea_fill_value = np.ma.default_fill_value(mask_data.data)\n mask_data.data[points_to_mask] = sea_fill_value\n else:\n mask_data = orog_band\n mask_data[points_to_mask] = sea_fill_value\n return mask_data\n\n def gen_orography_masks(\n self, standard_orography, standard_landmask, thresholds,\n units='m'):\n \"\"\"\n Function to generate topographical band masks.\n\n For each threshold defined in 'thresholds', a cube with 0 over sea\n points and 1 for land points within the topography band will be\n generated.\n The lower threshold is exclusive to the band whilst the upper\n threshold is inclusive i.e:\n lower_threshold < band <= upper_threshold\n\n\n For example, for threshold pair [1,3] with orography::\n\n [[0 0 2] and sea mask: [[0 0 1]\n [2 2 3] [1 1 1]\n [0 1 4]] [0 1 1]]\n\n the resultant array will be::\n\n [[0 0 1]\n [0 1 1]\n [0 0 0]]\n\n Args:\n standard_orography (iris.cube.Cube):\n The standard orography.\n standard_landmask (iris.cube.Cube):\n The landmask generated by gen_landmask.\n thresholds(list):\n Upper and/or lower thresholds of the current topographical\n band.\n units (str):\n Units to be fed to CF_units to create a unit for the cube.\n The unit must be convertable to meters. If no unit is given\n this will default to meters.\n\n Returns:\n mask_cube (iris.cube.Cube):\n Cube containing topographical band mask.\n\n Raises:\n KeyError: if the key does not match any in THRESHOLD_DICT.\n \"\"\"\n thresholds = np.array(thresholds, dtype=np.float32)\n thresholds = Unit(units).convert(\n thresholds, standard_orography.units)\n coords = standard_orography.coords()\n\n lower_threshold, upper_threshold = thresholds\n\n orog_band = np.ma.masked_where(\n np.ma.logical_and(\n (standard_orography.data > lower_threshold),\n (standard_orography.data <= upper_threshold)),\n standard_orography.data).mask.astype(int)\n\n # If we didn't find any points to mask, set all points to zero i.e\n # masked.\n if not isinstance(orog_band, np.ndarray):\n orog_band = np.zeros(standard_orography.data.shape).astype(int)\n\n if standard_landmask is not None:\n mask_data = self.sea_mask(standard_landmask.data, orog_band,\n sea_fill_value=0)\n mask_cube = _make_mask_cube(\n mask_data, coords, topographic_bounds=thresholds,\n topographic_units=standard_orography.units)\n else:\n mask_cube = _make_mask_cube(\n orog_band, coords, topographic_bounds=thresholds,\n topographic_units=standard_orography.units,\n sea_points_included=True)\n\n mask_cube.units = Unit('1')\n return mask_cube\n\n def process(self, orography, thresholds_dict, landmask=None):\n \"\"\"Loops over the supplied orographic bands, adding a cube\n for each band to the mask cubelist.\n\n Args:\n orography (iris.cube.Cube):\n orography on standard grid.\n thresholds_dict (dict):\n Definition of orography bands required. Has key-value pairs of\n \"bounds\": list of list of pairs of bounds for each band and\n \"units\":\"string containing units of bounds\", for example::\n\n {'bounds':[[0,100], [100,200]], 'units': \"m\"}\n\n landmask (iris.cube.Cube):\n land mask on standard grid. If provided sea points are set to\n zero in every band.\n\n Returns:\n cubelist (iris.cube.CubeList):\n list of orographic band mask cubes.\n \"\"\"\n cubelist = iris.cube.CubeList()\n if len(thresholds_dict) == 0:\n msg = 'No threshold(s) found for topographic bands.'\n raise ValueError(msg)\n\n for limits in thresholds_dict['bounds']:\n oro_band = self.gen_orography_masks(\n orography, landmask,\n limits, thresholds_dict['units'])\n cubelist.append(oro_band)\n return cubelist\n",
"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# (C) British Crown Copyright 2017-2019 Met Office.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Unit tests for the rescale function from rescale.py.\"\"\"\n\nimport unittest\nfrom datetime import datetime\n\nimport numpy as np\nfrom iris.tests import IrisTest\n\nfrom improver.tests.set_up_test_cubes import set_up_variable_cube\nfrom improver.utilities.rescale import rescale, apply_double_scaling\n\n\nclass Test_rescale(IrisTest):\n\n \"\"\"Test the utilities.rescale rescale function.\"\"\"\n\n def setUp(self):\n \"\"\"Create a cube of ones with a single zero point.\"\"\"\n self.cube = set_up_variable_cube(\n np.ones((1, 16, 16), dtype=np.float32))\n self.cube.data[0, 7, 7] = 0.\n\n def test_basic(self):\n \"\"\"Test that the method returns the expected array type\"\"\"\n result = rescale(self.cube.data)\n self.assertIsInstance(result, np.ndarray)\n\n def test_zero_range_input(self):\n \"\"\"Test that the method returns the expected error\"\"\"\n msg = \"Cannot rescale a zero input range\"\n with self.assertRaisesRegex(ValueError, msg):\n rescale(self.cube.data, data_range=[0, 0])\n\n def test_zero_range_output(self):\n \"\"\"Test that the method returns the expected error\"\"\"\n msg = \"Cannot rescale a zero output range\"\n with self.assertRaisesRegex(ValueError, msg):\n rescale(self.cube.data, scale_range=[4, 4])\n\n def test_rescaling_inrange(self):\n \"\"\"Test that the method returns the expected values when in range\"\"\"\n expected = self.cube.data.copy()\n expected[...] = 110.\n expected[0, 7, 7] = 100.\n result = rescale(self.cube.data, data_range=(0., 1.),\n scale_range=(100., 110.))\n self.assertArrayAlmostEqual(result, expected)\n\n def test_rescaling_outrange(self):\n \"\"\"Test that the method gives the expected values when out of range\"\"\"\n expected = self.cube.data.copy()\n expected[...] = 108.\n expected[0, 7, 7] = 98.\n result = rescale(self.cube.data, data_range=(0.2, 1.2),\n scale_range=(100., 110.))\n self.assertArrayAlmostEqual(result, expected)\n\n def test_clip(self):\n \"\"\"Test that the method clips values when out of range\"\"\"\n expected = self.cube.data.copy()\n expected[...] = 108.\n expected[0, 7, 7] = 100.\n result = rescale(self.cube.data, data_range=(0.2, 1.2),\n scale_range=(100., 110.), clip=True)\n self.assertArrayAlmostEqual(result, expected)\n\n\nclass Test_apply_double_scaling(IrisTest):\n\n \"\"\"Test the apply_double_scaling method.\"\"\"\n\n def setUp(self):\n \"\"\"Create cubes with a single zero prob(precip) point.\n The cubes look like this:\n precipitation_amount / (kg m^-2)\n Dimension coordinates:\n projection_y_coordinate: 4;\n projection_x_coordinate: 4;\n Scalar coordinates:\n time: 2015-11-23 03:00:00\n forecast_reference_time: 2015-11-23 03:00:00\n forecast_period (on time coord): 0.0 hours\n Data:\n self.cube_a:\n All points contain float(1.)\n self.cube_b:\n All points contain float(1.)\n \"\"\"\n self.cube_a = set_up_variable_cube(\n np.ones((4, 4), dtype=np.float32),\n time=datetime(2015, 11, 23, 3, 0),\n frt=datetime(2015, 11, 23, 3, 0))\n\n self.cube_b = set_up_variable_cube(\n np.ones((4, 4), dtype=np.float32),\n time=datetime(2015, 11, 23, 3, 0),\n frt=datetime(2015, 11, 23, 3, 0))\n\n self.thr_a = (0.1, 0.5, 0.8)\n self.thr_b = (0.0, 0.5, 0.9)\n\n def test_basic(self):\n \"\"\"Test that the method returns the expected cube type\"\"\"\n result = apply_double_scaling(self.cube_a,\n self.cube_b,\n self.thr_a,\n self.thr_b)\n self.assertIsInstance(result, np.ndarray)\n\n def test_input(self):\n \"\"\"Test that the method does not modify the input cubes.\"\"\"\n cube_a = self.cube_a.copy()\n cube_b = self.cube_b.copy()\n apply_double_scaling(self.cube_a,\n self.cube_b,\n self.thr_a,\n self.thr_b)\n self.assertArrayAlmostEqual(cube_a.data, self.cube_a.data)\n self.assertArrayAlmostEqual(cube_b.data, self.cube_b.data)\n\n def test_values_default(self):\n \"\"\"Test that the method returns the expected data values with default\n minimum function\"\"\"\n # Create an array of correct shape and fill with expected value\n expected = np.full_like(self.cube_a.data, 0.9)\n # Row zero should be changed to all-zeroes\n expected[0, :] = [0., 0., 0., 0.]\n # Row one should be like cube_a but with most values reduced to 0.5\n expected[1, :] = [0.0, 0.4, 0.5, 0.5]\n # Row two should be like cube_a but with late values limited to 0.9\n expected[2, :] = [0.0, 0.4, 0.8, 0.9]\n self.cube_a.data[0, :] = [0., 0., 0., 0.]\n self.cube_a.data[1, :] = [0.5, 0.5, 0.5, 0.5]\n self.cube_a.data[2, :] = [1., 1., 1., 1.]\n self.cube_b.data[0, :] = np.arange(0., 1.6, 0.4)\n self.cube_b.data[1, :] = np.arange(0., 1.6, 0.4)\n self.cube_b.data[2, :] = np.arange(0., 1.6, 0.4)\n result = apply_double_scaling(self.cube_a,\n self.cube_b,\n self.thr_a,\n self.thr_b)\n self.assertArrayAlmostEqual(result, expected)\n\n def test_values_max(self):\n \"\"\"Test that the method returns the expected data values with max\n function\"\"\"\n expected = self.cube_a.data.copy()\n # Row zero should be unchanged from ltng_cube\n expected[0, :] = np.arange(0., 1.6, 0.4)\n # Row one should be like cube_a but with early values raised to 0.5\n expected[1, :] = [0.5, 0.5, 0.8, 1.2]\n # Row two should be like cube_a but with most values raised to 0.9\n expected[2, :] = [0.9, 0.9, 0.9, 1.2]\n self.cube_a.data[0, :] = [0., 0., 0., 0.]\n self.cube_a.data[1, :] = [0.5, 0.5, 0.5, 0.5]\n self.cube_a.data[2, :] = [1., 1., 1., 1.]\n self.cube_b.data[0, :] = np.arange(0., 1.6, 0.4)\n self.cube_b.data[1, :] = np.arange(0., 1.6, 0.4)\n self.cube_b.data[2, :] = np.arange(0., 1.6, 0.4)\n result = apply_double_scaling(self.cube_a,\n self.cube_b,\n self.thr_a,\n self.thr_b,\n combine_function=np.maximum)\n self.assertArrayAlmostEqual(result, expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.logical_not",
"numpy.ma.masked_less",
"numpy.ma.masked_where",
"numpy.ma.default_fill_value",
"numpy.ma.masked_greater",
"numpy.mean",
"numpy.ma.logical_and",
"numpy.array",
"numpy.zeros"
],
[
"numpy.full_like",
"numpy.arange",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ysa2106/geco_data | [
"4f0319c01a5cc066b13511ed3f34df87ace822c2",
"4f0319c01a5cc066b13511ed3f34df87ace822c2"
] | [
"geco_diagnostic_plot_pages.py",
"geco_irig_decode.py"
] | [
"#!/usr/bin/env python\n# (c) Stefan Countryman 2017\n\nDESC=\"\"\"Plot a list of timing diagnostic channels, which will be read from\nstdin as a newline-delimited channel list, for a time window around a given\nGPS time. This script can also generate a summary webpage for easy viewing of\nplot results. Use this script when there is a timing error to make plots on a\nbunch of timing channels to quickly find the source of the problem.\"\"\"\nDT = 30\nMAX_SIMULTANEOUS_CHANS = 5\n# channels to use if there are none specified via CLI\nDEFAULT_CHANNELS=[\n \"L1:SYS-TIMING_C_GPS_A_ERROR_FLAG\",\n \"L1:SYS-TIMING_C_GPS_A_ERROR_FLAG\",\n \"L1:SYS-TIMING_C_GPS_A_LATITUDE\",\n \"L1:SYS-TIMING_C_GPS_A_LONGITUDE\",\n \"L1:SYS-TIMING_C_GPS_A_ALTITUDE\",\n \"L1:SYS-TIMING_C_GPS_A_SURVEYPROGRESS\",\n \"L1:SYS-TIMING_C_GPS_A_HOLDOVERDURATION\",\n \"L1:SYS-TIMING_C_GPS_A_DACVOLTAGE\",\n \"L1:SYS-TIMING_C_GPS_A_TEMPERATURE\",\n \"L1:SYS-TIMING_C_GPS_A_RECEIVERMODE\",\n \"L1:SYS-TIMING_C_GPS_A_DECODINGSTATUS\",\n \"L1:SYS-TIMING_C_GPS_A_TIMEVALID\",\n \"L1:SYS-TIMING_C_GPS_A_UTCOFFSET\",\n \"L1:SYS-TIMING_C_GPS_A_TIMESOURCE\",\n \"L1:SYS-TIMING_C_GPS_A_PPSSOURCE\",\n \"L1:SYS-TIMING_C_GPS_A_ALMANACINCOMPLETE\",\n \"L1:SYS-TIMING_C_GPS_A_NOTTRACKINGSATTELITES\",\n \"L1:SYS-TIMING_C_GPS_A_SURVEYINPROGRESS\",\n \"L1:SYS-TIMING_C_GPS_A_GPS\",\n \"L1:SYS-TIMING_C_GPS_A_YEAR\",\n \"L1:SYS-TIMING_C_GPS_A_MONTH\",\n \"L1:SYS-TIMING_C_GPS_A_DAY\",\n \"L1:SYS-TIMING_C_GPS_A_HOUR\",\n \"L1:SYS-TIMING_C_GPS_A_MINUTE\",\n \"L1:SYS-TIMING_C_GPS_A_SECOND\",\n \"L1:SYS-TIMING_C_GPS_A_LEAP\",\n \"L1:SYS-TIMING_C_GPS_A_LEAPSECONDPENDING\",\n \"L1:SYS-TIMING_C_GPS_A_WEEK\",\n \"L1:SYS-TIMING_C_GPS_A_TOW\",\n \"L1:SYS-TIMING_C_GPS_A_PPSOFFSET\",\n \"L1:SYS-TIMING_C_GPS_A_DISCIPLININGMODE\",\n \"L1:SYS-TIMING_C_GPS_A_DISCIPLININGACTIVITY\",\n \"L1:SYS-TIMING_C_GPS_A_DACATRAIL\",\n \"L1:SYS-TIMING_C_GPS_A_DACNEARRAIL\",\n \"L1:SYS-TIMING_C_GPS_A_NOSTOREDPOSITION\",\n \"L1:SYS-TIMING_C_GPS_A_POSITIONQUESTIONABLE\",\n \"L1:SYS-TIMING_C_GPS_A_NOPPS\",\n \"L1:SYS-TIMING_C_GPS_A_ANTENNAOPEN\",\n \"L1:SYS-TIMING_C_GPS_A_ANTENNASHORTED\",\n \"L1:SYS-TIMING_C_GPS_A_ERROR_FLAG\",\n \"L1:SYS-TIMING_C_GPS_A_ERROR_CODE\",\n\n \"L1:SYS-TIMING_X_GPS_A_ERROR_FLAG\",\n \"L1:SYS-TIMING_X_GPS_A_LATITUDE\",\n \"L1:SYS-TIMING_X_GPS_A_LONGITUDE\",\n \"L1:SYS-TIMING_X_GPS_A_ALTITUDE\",\n \"L1:SYS-TIMING_X_GPS_A_SPEED3D\",\n \"L1:SYS-TIMING_X_GPS_A_SPEED2D\",\n \"L1:SYS-TIMING_X_GPS_A_HEADING\",\n \"L1:SYS-TIMING_X_GPS_A_DOP\",\n \"L1:SYS-TIMING_X_GPS_A_VISSATELLITES\",\n \"L1:SYS-TIMING_X_GPS_A_TRACKSATELLITES\",\n \"L1:SYS-TIMING_X_GPS_A_TIMEVALID\",\n \"L1:SYS-TIMING_X_GPS_A_RECEIVERMODE\",\n \"L1:SYS-TIMING_X_GPS_A_UTCOFFSET\",\n \"L1:SYS-TIMING_X_GPS_A_TIMESOURCE\",\n \"L1:SYS-TIMING_X_GPS_A_ALMANACINCOMPLETE\",\n \"L1:SYS-TIMING_X_GPS_A_GPS\",\n \"L1:SYS-TIMING_X_GPS_A_YEAR\",\n \"L1:SYS-TIMING_X_GPS_A_MONTH\",\n \"L1:SYS-TIMING_X_GPS_A_DAY\",\n \"L1:SYS-TIMING_X_GPS_A_HOUR\",\n \"L1:SYS-TIMING_X_GPS_A_MINUTE\",\n \"L1:SYS-TIMING_X_GPS_A_SECOND\",\n \"L1:SYS-TIMING_X_GPS_A_LEAP\",\n \"L1:SYS-TIMING_X_GPS_A_WEEK\",\n \"L1:SYS-TIMING_X_GPS_A_TOW\",\n \"L1:SYS-TIMING_X_GPS_A_NOTTRACKINGSATTELITES\",\n \"L1:SYS-TIMING_X_GPS_A_SURVEYINPROGRESS\",\n \"L1:SYS-TIMING_X_GPS_A_ANTENNAOPEN\",\n \"L1:SYS-TIMING_X_GPS_A_ANTENNASHORTED\",\n \"L1:SYS-TIMING_X_GPS_A_NARROWBAND\",\n \"L1:SYS-TIMING_X_GPS_A_FASTACQUISITION\",\n \"L1:SYS-TIMING_X_GPS_A_FILTERRESET\",\n \"L1:SYS-TIMING_X_GPS_A_POSITIONLOCK\",\n \"L1:SYS-TIMING_X_GPS_A_DIFFERENTIALFIX\",\n\n \"L1:SYS-TIMING_Y_GPS_A_ERROR_FLAG\",\n \"L1:SYS-TIMING_Y_GPS_A_LATITUDE\",\n \"L1:SYS-TIMING_Y_GPS_A_LONGITUDE\",\n \"L1:SYS-TIMING_Y_GPS_A_ALTITUDE\",\n \"L1:SYS-TIMING_Y_GPS_A_SPEED3D\",\n \"L1:SYS-TIMING_Y_GPS_A_SPEED2D\",\n \"L1:SYS-TIMING_Y_GPS_A_HEADING\",\n \"L1:SYS-TIMING_Y_GPS_A_DOP\",\n \"L1:SYS-TIMING_Y_GPS_A_VISSATELLITES\",\n \"L1:SYS-TIMING_Y_GPS_A_TRACKSATELLITES\",\n \"L1:SYS-TIMING_Y_GPS_A_TIMEVALID\",\n \"L1:SYS-TIMING_Y_GPS_A_RECEIVERMODE\",\n \"L1:SYS-TIMING_Y_GPS_A_UTCOFFSET\",\n \"L1:SYS-TIMING_Y_GPS_A_TIMESOURCE\",\n \"L1:SYS-TIMING_Y_GPS_A_ALMANACINCOMPLETE\",\n \"L1:SYS-TIMING_Y_GPS_A_GPS\",\n \"L1:SYS-TIMING_Y_GPS_A_YEAR\",\n \"L1:SYS-TIMING_Y_GPS_A_MONTH\",\n \"L1:SYS-TIMING_Y_GPS_A_DAY\",\n \"L1:SYS-TIMING_Y_GPS_A_HOUR\",\n \"L1:SYS-TIMING_Y_GPS_A_MINUTE\",\n \"L1:SYS-TIMING_Y_GPS_A_SECOND\",\n \"L1:SYS-TIMING_Y_GPS_A_LEAP\",\n \"L1:SYS-TIMING_Y_GPS_A_WEEK\",\n \"L1:SYS-TIMING_Y_GPS_A_TOW\",\n \"L1:SYS-TIMING_Y_GPS_A_NOTTRACKINGSATTELITES\",\n \"L1:SYS-TIMING_Y_GPS_A_SURVEYINPROGRESS\",\n \"L1:SYS-TIMING_Y_GPS_A_ANTENNAOPEN\",\n \"L1:SYS-TIMING_Y_GPS_A_ANTENNASHORTED\",\n \"L1:SYS-TIMING_Y_GPS_A_NARROWBAND\",\n \"L1:SYS-TIMING_Y_GPS_A_FASTACQUISITION\",\n \"L1:SYS-TIMING_Y_GPS_A_FILTERRESET\",\n \"L1:SYS-TIMING_Y_GPS_A_POSITIONLOCK\",\n \"L1:SYS-TIMING_Y_GPS_A_DIFFERENTIALFIX\",\n\n \"L1:SYS-TIMING_C_PPS_B_SIGNAL_0_DIFF\",\n \"L1:SYS-TIMING_C_PPS_B_SIGNAL_1_DIFF\",\n \"L1:SYS-TIMING_C_PPS_B_SIGNAL_2_DIFF\",\n \"L1:SYS-TIMING_C_PPS_B_SIGNAL_3_DIFF\",\n \"L1:SYS-TIMING_C_PPS_B_SIGNAL_4_DIFF\",\n \"L1:SYS-TIMING_C_PPS_B_SIGNAL_5_DIFF\",\n \"L1:SYS-TIMING_C_PPS_B_SIGNAL_6_DIFF\",\n \"L1:SYS-TIMING_C_PPS_B_SIGNAL_7_DIFF\",\n\n \"L1:SYS-TIMING_X_PPS_A_SIGNAL_0_DIFF\",\n \"L1:SYS-TIMING_X_PPS_A_SIGNAL_1_DIFF\",\n \"L1:SYS-TIMING_X_PPS_A_SIGNAL_2_DIFF\",\n \"L1:SYS-TIMING_X_PPS_A_SIGNAL_3_DIFF\",\n \"L1:SYS-TIMING_X_PPS_A_SIGNAL_4_DIFF\",\n \"L1:SYS-TIMING_X_PPS_A_SIGNAL_5_DIFF\",\n \"L1:SYS-TIMING_X_PPS_A_SIGNAL_6_DIFF\",\n \"L1:SYS-TIMING_X_PPS_A_SIGNAL_7_DIFF\",\n\n \"L1:SYS-TIMING_Y_PPS_A_SIGNAL_0_DIFF\",\n \"L1:SYS-TIMING_Y_PPS_A_SIGNAL_1_DIFF\",\n \"L1:SYS-TIMING_Y_PPS_A_SIGNAL_2_DIFF\",\n \"L1:SYS-TIMING_Y_PPS_A_SIGNAL_3_DIFF\",\n \"L1:SYS-TIMING_Y_PPS_A_SIGNAL_4_DIFF\",\n \"L1:SYS-TIMING_Y_PPS_A_SIGNAL_5_DIFF\",\n \"L1:SYS-TIMING_Y_PPS_A_SIGNAL_6_DIFF\",\n \"L1:SYS-TIMING_Y_PPS_A_SIGNAL_7_DIFF\",\n\n \"H1:SYS-TIMING_C_GPS_A_ERROR_FLAG\",\n \"H1:SYS-TIMING_C_GPS_A_ERROR_FLAG\",\n \"H1:SYS-TIMING_C_GPS_A_LATITUDE\",\n \"H1:SYS-TIMING_C_GPS_A_LONGITUDE\",\n \"H1:SYS-TIMING_C_GPS_A_ALTITUDE\",\n \"H1:SYS-TIMING_C_GPS_A_SURVEYPROGRESS\",\n \"H1:SYS-TIMING_C_GPS_A_HOLDOVERDURATION\",\n \"H1:SYS-TIMING_C_GPS_A_DACVOLTAGE\",\n \"H1:SYS-TIMING_C_GPS_A_TEMPERATURE\",\n \"H1:SYS-TIMING_C_GPS_A_RECEIVERMODE\",\n \"H1:SYS-TIMING_C_GPS_A_DECODINGSTATUS\",\n \"H1:SYS-TIMING_C_GPS_A_TIMEVALID\",\n \"H1:SYS-TIMING_C_GPS_A_UTCOFFSET\",\n \"H1:SYS-TIMING_C_GPS_A_TIMESOURCE\",\n \"H1:SYS-TIMING_C_GPS_A_PPSSOURCE\",\n \"H1:SYS-TIMING_C_GPS_A_ALMANACINCOMPLETE\",\n \"H1:SYS-TIMING_C_GPS_A_NOTTRACKINGSATTELITES\",\n \"H1:SYS-TIMING_C_GPS_A_SURVEYINPROGRESS\",\n \"H1:SYS-TIMING_C_GPS_A_GPS\",\n \"H1:SYS-TIMING_C_GPS_A_YEAR\",\n \"H1:SYS-TIMING_C_GPS_A_MONTH\",\n \"H1:SYS-TIMING_C_GPS_A_DAY\",\n \"H1:SYS-TIMING_C_GPS_A_HOUR\",\n \"H1:SYS-TIMING_C_GPS_A_MINUTE\",\n \"H1:SYS-TIMING_C_GPS_A_SECOND\",\n \"H1:SYS-TIMING_C_GPS_A_LEAP\",\n \"H1:SYS-TIMING_C_GPS_A_LEAPSECONDPENDING\",\n \"H1:SYS-TIMING_C_GPS_A_WEEK\",\n \"H1:SYS-TIMING_C_GPS_A_TOW\",\n \"H1:SYS-TIMING_C_GPS_A_PPSOFFSET\",\n \"H1:SYS-TIMING_C_GPS_A_DISCIPLININGMODE\",\n \"H1:SYS-TIMING_C_GPS_A_DISCIPLININGACTIVITY\",\n \"H1:SYS-TIMING_C_GPS_A_DACATRAIL\",\n \"H1:SYS-TIMING_C_GPS_A_DACNEARRAIL\",\n \"H1:SYS-TIMING_C_GPS_A_NOSTOREDPOSITION\",\n \"H1:SYS-TIMING_C_GPS_A_POSITIONQUESTIONABLE\",\n \"H1:SYS-TIMING_C_GPS_A_NOPPS\",\n \"H1:SYS-TIMING_C_GPS_A_ANTENNAOPEN\",\n \"H1:SYS-TIMING_C_GPS_A_ANTENNASHORTED\",\n \"H1:SYS-TIMING_C_GPS_A_ERROR_FLAG\",\n \"H1:SYS-TIMING_C_GPS_A_ERROR_CODE\",\n\n \"H1:SYS-TIMING_X_GPS_A_ERROR_FLAG\",\n \"H1:SYS-TIMING_X_GPS_A_LATITUDE\",\n \"H1:SYS-TIMING_X_GPS_A_LONGITUDE\",\n \"H1:SYS-TIMING_X_GPS_A_ALTITUDE\",\n \"H1:SYS-TIMING_X_GPS_A_SPEED3D\",\n \"H1:SYS-TIMING_X_GPS_A_SPEED2D\",\n \"H1:SYS-TIMING_X_GPS_A_HEADING\",\n \"H1:SYS-TIMING_X_GPS_A_DOP\",\n \"H1:SYS-TIMING_X_GPS_A_VISSATELLITES\",\n \"H1:SYS-TIMING_X_GPS_A_TRACKSATELLITES\",\n \"H1:SYS-TIMING_X_GPS_A_TIMEVALID\",\n \"H1:SYS-TIMING_X_GPS_A_RECEIVERMODE\",\n \"H1:SYS-TIMING_X_GPS_A_UTCOFFSET\",\n \"H1:SYS-TIMING_X_GPS_A_TIMESOURCE\",\n \"H1:SYS-TIMING_X_GPS_A_ALMANACINCOMPLETE\",\n \"H1:SYS-TIMING_X_GPS_A_GPS\",\n \"H1:SYS-TIMING_X_GPS_A_YEAR\",\n \"H1:SYS-TIMING_X_GPS_A_MONTH\",\n \"H1:SYS-TIMING_X_GPS_A_DAY\",\n \"H1:SYS-TIMING_X_GPS_A_HOUR\",\n \"H1:SYS-TIMING_X_GPS_A_MINUTE\",\n \"H1:SYS-TIMING_X_GPS_A_SECOND\",\n \"H1:SYS-TIMING_X_GPS_A_LEAP\",\n \"H1:SYS-TIMING_X_GPS_A_WEEK\",\n \"H1:SYS-TIMING_X_GPS_A_TOW\",\n \"H1:SYS-TIMING_X_GPS_A_NOTTRACKINGSATTELITES\",\n \"H1:SYS-TIMING_X_GPS_A_SURVEYINPROGRESS\",\n \"H1:SYS-TIMING_X_GPS_A_ANTENNAOPEN\",\n \"H1:SYS-TIMING_X_GPS_A_ANTENNASHORTED\",\n \"H1:SYS-TIMING_X_GPS_A_NARROWBAND\",\n \"H1:SYS-TIMING_X_GPS_A_FASTACQUISITION\",\n \"H1:SYS-TIMING_X_GPS_A_FILTERRESET\",\n \"H1:SYS-TIMING_X_GPS_A_POSITIONLOCK\",\n \"H1:SYS-TIMING_X_GPS_A_DIFFERENTIALFIX\",\n\n \"H1:SYS-TIMING_Y_GPS_A_ERROR_FLAG\",\n \"H1:SYS-TIMING_Y_GPS_A_LATITUDE\",\n \"H1:SYS-TIMING_Y_GPS_A_LONGITUDE\",\n \"H1:SYS-TIMING_Y_GPS_A_ALTITUDE\",\n \"H1:SYS-TIMING_Y_GPS_A_SPEED3D\",\n \"H1:SYS-TIMING_Y_GPS_A_SPEED2D\",\n \"H1:SYS-TIMING_Y_GPS_A_HEADING\",\n \"H1:SYS-TIMING_Y_GPS_A_DOP\",\n \"H1:SYS-TIMING_Y_GPS_A_VISSATELLITES\",\n \"H1:SYS-TIMING_Y_GPS_A_TRACKSATELLITES\",\n \"H1:SYS-TIMING_Y_GPS_A_TIMEVALID\",\n \"H1:SYS-TIMING_Y_GPS_A_RECEIVERMODE\",\n \"H1:SYS-TIMING_Y_GPS_A_UTCOFFSET\",\n \"H1:SYS-TIMING_Y_GPS_A_TIMESOURCE\",\n \"H1:SYS-TIMING_Y_GPS_A_ALMANACINCOMPLETE\",\n \"H1:SYS-TIMING_Y_GPS_A_GPS\",\n \"H1:SYS-TIMING_Y_GPS_A_YEAR\",\n \"H1:SYS-TIMING_Y_GPS_A_MONTH\",\n \"H1:SYS-TIMING_Y_GPS_A_DAY\",\n \"H1:SYS-TIMING_Y_GPS_A_HOUR\",\n \"H1:SYS-TIMING_Y_GPS_A_MINUTE\",\n \"H1:SYS-TIMING_Y_GPS_A_SECOND\",\n \"H1:SYS-TIMING_Y_GPS_A_LEAP\",\n \"H1:SYS-TIMING_Y_GPS_A_WEEK\",\n \"H1:SYS-TIMING_Y_GPS_A_TOW\",\n \"H1:SYS-TIMING_Y_GPS_A_NOTTRACKINGSATTELITES\",\n \"H1:SYS-TIMING_Y_GPS_A_SURVEYINPROGRESS\",\n \"H1:SYS-TIMING_Y_GPS_A_ANTENNAOPEN\",\n \"H1:SYS-TIMING_Y_GPS_A_ANTENNASHORTED\",\n \"H1:SYS-TIMING_Y_GPS_A_NARROWBAND\",\n \"H1:SYS-TIMING_Y_GPS_A_FASTACQUISITION\",\n \"H1:SYS-TIMING_Y_GPS_A_FILTERRESET\",\n \"H1:SYS-TIMING_Y_GPS_A_POSITIONLOCK\",\n \"H1:SYS-TIMING_Y_GPS_A_DIFFERENTIALFIX\",\n\n \"H1:SYS-TIMING_C_PPS_B_SIGNAL_0_DIFF\",\n \"H1:SYS-TIMING_C_PPS_B_SIGNAL_1_DIFF\",\n \"H1:SYS-TIMING_C_PPS_B_SIGNAL_2_DIFF\",\n \"H1:SYS-TIMING_C_PPS_B_SIGNAL_3_DIFF\",\n \"H1:SYS-TIMING_C_PPS_B_SIGNAL_4_DIFF\",\n \"H1:SYS-TIMING_C_PPS_B_SIGNAL_5_DIFF\",\n \"H1:SYS-TIMING_C_PPS_B_SIGNAL_6_DIFF\",\n \"H1:SYS-TIMING_C_PPS_B_SIGNAL_7_DIFF\",\n\n \"H1:SYS-TIMING_X_PPS_A_SIGNAL_0_DIFF\",\n \"H1:SYS-TIMING_X_PPS_A_SIGNAL_1_DIFF\",\n \"H1:SYS-TIMING_X_PPS_A_SIGNAL_2_DIFF\",\n \"H1:SYS-TIMING_X_PPS_A_SIGNAL_3_DIFF\",\n \"H1:SYS-TIMING_X_PPS_A_SIGNAL_4_DIFF\",\n \"H1:SYS-TIMING_X_PPS_A_SIGNAL_5_DIFF\",\n \"H1:SYS-TIMING_X_PPS_A_SIGNAL_6_DIFF\",\n \"H1:SYS-TIMING_X_PPS_A_SIGNAL_7_DIFF\",\n\n \"H1:SYS-TIMING_Y_PPS_A_SIGNAL_0_DIFF\",\n \"H1:SYS-TIMING_Y_PPS_A_SIGNAL_1_DIFF\",\n \"H1:SYS-TIMING_Y_PPS_A_SIGNAL_2_DIFF\",\n \"H1:SYS-TIMING_Y_PPS_A_SIGNAL_3_DIFF\",\n \"H1:SYS-TIMING_Y_PPS_A_SIGNAL_4_DIFF\",\n \"H1:SYS-TIMING_Y_PPS_A_SIGNAL_5_DIFF\",\n \"H1:SYS-TIMING_Y_PPS_A_SIGNAL_6_DIFF\",\n \"H1:SYS-TIMING_Y_PPS_A_SIGNAL_7_DIFF\"\n]\nCSS=\"\"\"\nimg, p {{\n max-width: 600px;\n}}\ndiv {{\n display: inline\n}}\n\"\"\"\n\n\n# THE REST OF THE IMPORTS ARE AFTER THIS IF STATEMENT.\n# Quits immediately on --help or -h flags to skip slow imports when you just\n# want to read the help documentation.\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(description=DESC)\n parser.add_argument(\n \"-t\",\n \"--gpstimes\",\n type=int,\n nargs='*',\n help=\"\"\"\n The GPS times about which the plots should be centered.\n \"\"\"\n )\n parser.add_argument(\n \"-o\",\n \"--outdir\",\n default=\".\",\n help=\"\"\"\n Where should the files be saved? Defaults to current directory\n \"\"\"\n )\n parser.add_argument(\n \"-l\",\n \"--channellist\",\n help=\"\"\"\n Path to a text file containing the list of channels to plot. If\n not provided, this script will try to read the channel list in\n from STDIN. If nothing is available from stdin, a default,\n comprehensive channel list will be used. The channel list\n should be a newline delimited list of valid EPICS channel names,\n though badly-formed or invalid channel names will be skipped.\n \"\"\"\n )\n parser.add_argument(\n \"-d\",\n \"--deltat\",\n type=int,\n default=DT,\n help=\"\"\"\n The size of the time window to be plotted. The generated plots\n will show +/- deltat seconds of data around the central GPS time.\n Defaults to: {}\n \"\"\".format(DT)\n )\n parser.add_argument(\n \"-w\",\n \"--skipwebpage\",\n action='store_true',\n help=\"\"\"\n If this flag is provided, no preview webpage will be generated.\n By default, the webpage is generated.\n \"\"\"\n )\n parser.add_argument(\n \"-p\",\n \"--skipplots\",\n action='store_true',\n help=\"\"\"\n If this flag is provided, no plots will be generated (useful if\n you already made plots and just want to regenerate the webpage).\n By default, the plots are generated.\n \"\"\"\n )\n parser.add_argument(\n \"-s\",\n \"--maxsimultaneous\",\n type=int,\n default=MAX_SIMULTANEOUS_CHANS,\n help=\"\"\"\n The maximum number of simultaneous channels for which to fetch\n data at a given time. It is generally faster to fetch a batch of\n channels at a time, but getting too many channels at once might\n slow things down. Defaults to: {}\n \"\"\".format(MAX_SIMULTANEOUS_CHANS)\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action='store_true',\n help=\"\"\"\n Print verbose output for status monitoring while plotting.\n Defaults to False.\n \"\"\"\n )\n parser.add_argument(\n \"--print-default-channels\",\n action='store_true',\n help=\"\"\"\n Print the default list of channels as a newline-delimited list\n and exit.\n \"\"\"\n )\n args = parser.parse_args()\n if args.print_default_channels:\n print('\\n'.join(DEFAULT_CHANNELS))\n exit(0)\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport gwpy.timeseries\nimport sys\nimport os\n\ndef channel_fname(gps, chan):\n return '{}_{}.png'.format(gps, chan.replace(':', '..'))\n\ndef image_link_html(gpstimes, chan):\n \"\"\"return an HTML string for an image element for this plot\"\"\"\n headerfmt = \"<th>{}</th>\"\n imgfmt = \"\"\"\n <td>\n <img src=\"./{}\">\n </td>\n \"\"\"\n header = '\\n'.join([headerfmt.format(gps) for gps in gpstimes])\n images = '\\n'.join(\n [imgfmt.format(channel_fname(gps, chan)) for gps in gpstimes]\n )\n channel_entry_fmt = \"\"\"\n <div>\n <p>{}</p>\n <br>\n <table>\n <tr>{}</tr>\n <tr>{}</tr>\n </table>\n </div>\n \"\"\"\n return channel_entry_fmt.format(chan, header, images)\n\ndef read_channels(filedescriptor):\n return list(set(filedescriptor.read().split('\\n')) - {''})\n\ndef get_channel_list():\n \"\"\"Get the channel list from the file descriptor specified via the CLI.\n If no channel list is provided, try to read from sys.stdin. Otherwise,\n use the default comprehensive channel list.\"\"\"\n if args.channellist is None:\n # if stdin is a tty, then nothing is being piped in\n if sys.stdin.isatty():\n return DEFAULT_CHANNELS\n else:\n return read_channels(sys.stdin)\n else:\n with open(args.channellist, 'r') as f:\n return read_channels(f)\n\ndef make_preview_webpage(outdir, chans, gpstimes):\n \"\"\"Make a webpage called index.html displaying all generated plots and save\n it to the specified output directory.\"\"\"\n gpstimes_str = ', '.join(str(gpstimes))\n HTML_TOP = (\"\"\"\n <!DOCTYPE html>\n <html>\n <head>\n <meta charset=\"UTF-8\">\n <title>Plots around {}</title>\n <style type=\"text/css\">\n \"\"\" + CSS.replace('\\n', '\\n' + 16*' ') + \"\"\"\n </style>\n </head>\n <body>\n \"\"\").replace('\\n ', '\\n').format(gpstimes_str) # replace will remove 1st indent\n HTML_BOTTOM = \"\"\"\n </body>\n </html>\n \"\"\".replace('\\n ', '\\n') # replace will remove 1st indent\n filepath = os.path.join(outdir, 'index.html')\n with open(filepath, 'w') as f:\n f.write(HTML_TOP)\n f.write('\\n')\n for chan in chans:\n f.write(image_link_html(gpstimes, chan))\n f.write(HTML_BOTTOM)\n\ndef make_channel_plots(outdir, chans, gps, dt=DT, verbose=False,\n max_simultaneous_chans=MAX_SIMULTANEOUS_CHANS):\n for i in range(0, len(chans), max_simultaneous_chans):\n if verbose:\n print('plotting {} thru {} of {}'.format(i,\n i+max_simultaneous_chans,\n len(chans)))\n chans_sublist = chans[i:i+max_simultaneous_chans]\n try:\n bufs = gwpy.timeseries.TimeSeriesDict.fetch(chans_sublist, gps-dt,\n gps+dt,\n verbose=verbose)\n except RuntimeError:\n if verbose:\n print('Could not fetch all at once:\\n{}'.format(chans_sublist))\n bufs = {}\n for chan in chans_sublist:\n try:\n buf = gwpy.timeseries.TimeSeries.fetch(chan, gps-dt, gps+dt,\n verbose=verbose)\n bufs[chan] = buf\n except RuntimeError:\n if verbose: print('Bad channel: {}'.format(chan))\n for chan in bufs:\n buf = bufs[chan]\n filepath = os.path.join(outdir, channel_fname(gps, chan))\n plot = buf.plot()\n plot.set_title(buf.channel.name.replace('_', '\\_'))\n plot.savefig(filepath)\n\nif __name__ == '__main__':\n chans = get_channel_list()\n if args.verbose: print('channels to plot: {}'.format(chans))\n\n if not args.skipwebpage:\n make_preview_webpage(args.outdir, chans, args.gpstimes)\n\n if not args.skipplots:\n for gps in args.gpstimes:\n make_channel_plots(args.outdir, chans, gps, args.deltat,\n args.verbose, args.maxsimultaneous)\n",
"#!/usr/bin/env python\n# (c) Stefan Countryman, 2016-2017\n\nimport sys\nfrom datetime import datetime, timedelta\nfrom textwrap import fill\nimport numpy as np\n# import scipy.ndimage.filters as scf\n\nif __name__ == \"__main__\" and len(sys.argv) > 1:\n print(\n \"Usage: {} <input_file.txt\\n\\n\".format(sys.argv[0]) +\n fill(\n \"\"\"Read a raw IRIG-B signal from STDIN and print out decoded\n timestamps. Data must be a newline-delimited list of floating\n point values representing the value of the IRIG-B signal at each\n point in time. Sample rate must be 16,384 (2^14) Hz. The input\n file must contain an integer number of seconds worth of data, i.e.\n it must have (Sample Rate) x (N) values, where N is the number of\n seconds that must be decoded, and the data must start at the\n beginning of a second.\n \"\"\"\n )\n )\n exit(1)\n\n# ------------------------------------------------------------------------------\n# CONSTANTS\n# ------------------------------------------------------------------------------\n\n# max and min of histogram, and number of bins\nSAMPLE_RATE = 16384 # ADC sample rate\n\n# --------------------------------------------------------------------------\n# IRIG-B RELATED CONSTANTS\n# --------------------------------------------------------------------------\nBITS_PER_SECOND = 100 # bits per second in IRIG-B spec\nCONVOLUTION_SIGMA = 1e-4\nHIGH_SIGNAL_THRESHOLD = 3500\n\n# find the high/low test points for each type of bit (0, 1, or control);\n# measured as indices from start of each bit\n# TODO measure in fractions of a second and convert with SAMPLE_RATE\nTEST_POINTS = np.array([20, 60, 110, 150])\n\n# find the start of each bit\nBIT_STARTS = np.round(np.arange(0, 1, 1./BITS_PER_SECOND) * SAMPLE_RATE)\nALL_TEST_POINT_INDICES = (BIT_STARTS[:, np.newaxis] + TEST_POINTS).astype(int)\n\n# representations of each type of bit (0, 1, or control)\nREP_0 = [1, 0, 0, 0]\nREP_1 = [1, 1, 0, 0]\nREP_C = [1, 1, 1, 0]\nCURRENT_CENTURY = 20\n\n# how many seconds does each bit represent?\nSECONDS = np.zeros(BITS_PER_SECOND, dtype=int)\nSECONDS[1] = 1\nSECONDS[2] = 2\nSECONDS[3] = 4\nSECONDS[4] = 8\nSECONDS[6] = 10\nSECONDS[7] = 20\nSECONDS[8] = 40\n\n# how many minutes does each bit represent?\nMINUTES = np.zeros(BITS_PER_SECOND, dtype=int)\nMINUTES[10] = 1\nMINUTES[11] = 2\nMINUTES[12] = 4\nMINUTES[13] = 8\nMINUTES[15] = 10\nMINUTES[16] = 20\nMINUTES[17] = 40\n\n# how many hours does each bit represent?\nHOURS = np.zeros(BITS_PER_SECOND, dtype=int)\nHOURS[20] = 1\nHOURS[21] = 2\nHOURS[22] = 4\nHOURS[23] = 8\nHOURS[25] = 10\nHOURS[26] = 20\n\n# how many days does each bit represent?\nDAYS = np.zeros(BITS_PER_SECOND, dtype=int)\nDAYS[30] = 1\nDAYS[31] = 2\nDAYS[32] = 4\nDAYS[33] = 8\nDAYS[35] = 10\nDAYS[36] = 20\nDAYS[37] = 40\nDAYS[38] = 80\nDAYS[40] = 100\nDAYS[41] = 200\n\n# how many years does each bit represent?\nYEARS = np.zeros(BITS_PER_SECOND, dtype=int)\nYEARS[50] = 1\nYEARS[51] = 2\nYEARS[52] = 4\nYEARS[53] = 8\nYEARS[55] = 10\nYEARS[56] = 20\nYEARS[57] = 40\nYEARS[58] = 80\n\ncontrol_bits = np.zeros(BITS_PER_SECOND, dtype=bool)\ncontrol_bits[range(9,100,10)] = True\ncontrol_bits[0] = True\n\n#-------------------------------------------------------------------------------\n# UTILITY FUNCTIONS\n#-------------------------------------------------------------------------------\n\ndef decode_timeseries(timeseries):\n \"\"\"Return the full decoded information as a dictionary along with a decoded\n datetime object for more convenient manipulation.\"\"\"\n # filter the timeseries to remove ringing at corners\n # filt = scf.gaussian_filter1d(timeseries, CONVOLUTION_SIGMA * SAMPLE_RATE)\n\n # check all test points\n bits_high = (timeseries[ALL_TEST_POINT_INDICES] >= HIGH_SIGNAL_THRESHOLD).astype(int)\n\n # represent control bits with the number 2\n bits = np.zeros(BITS_PER_SECOND, dtype=int)\n for i in range(bits_high.shape[0]):\n if np.all(bits_high[i] == REP_0): bits[i] = 0\n elif np.all(bits_high[i] == REP_1): bits[i] = 1\n elif np.all(bits_high[i] == REP_C): bits[i] = 2\n else: raise ValueError(\"Bad bit: \" + str(i))\n\n # are the control bits in the correct spots?\n if not np.all((bits == 2) == control_bits):\n raise ValueError(\"Control bits are not present where expected: \\n\"\n + str((bits == 2) == control_bits))\n\n # find total seconds, minutes, hours, days, and years\n decoded = {\n 'second': bits.dot(SECONDS),\n 'minute': bits.dot(MINUTES),\n 'hour': bits.dot(HOURS),\n 'day': bits.dot(DAYS),\n 'year': bits.dot(YEARS) + 100*CURRENT_CENTURY,\n }\n\n # parse a datetime from this\n jan1 = datetime(decoded['year'], 1, 1, decoded['hour'], decoded['minute'],\n decoded['second'])\n decoded['datetime'] = jan1 + timedelta(decoded['day'] - 1)\n\n return decoded\n\ndef get_date_from_timeseries(timeseries):\n \"\"\"Decode the input waveform, which is assumed to be a 16384hz digitized\n IRIG-B signal using DCLS (DC Level Shift).\"\"\"\n return decode_timeseries(timeseries)['datetime']\n\ndef print_formatted_date(converted_date):\n # finally, print the date\n print(converted_date.strftime('%a %b %d %X %Y'))\n\n# and will cause a ValueError.\ndef read_1_second_from_stdin():\n # read in data from stdin; don't read more than a second worth of data\n timeseries = np.zeros(SAMPLE_RATE)\n line = ''\n i = 0\n while i < SAMPLE_RATE:\n line = sys.stdin.readline()\n if not line:\n if i == 0:\n raise EOFError('Hit EOF at end of a second.')\n else:\n raise ValueError('Hit EOF ' + str(i) + ' lines into second; '\n 'provide integer number of seconds of data.')\n timeseries[i] = float(line)\n i += 1\n return timeseries\n\ndef main():\n while True:\n try:\n timeseries = read_1_second_from_stdin()\n print_formatted_date(get_date_from_timeseries(timeseries))\n except EOFError:\n return\n\n# run this if we are running from command line\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"matplotlib.use"
],
[
"numpy.all",
"numpy.arange",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
1335654481ren/openpilot | [
"68485aa4e40d28a5d411e7494817f7b749ddc500"
] | [
"selfdrive/controls/lib/adaptivecruise.py"
] | [
"import math\nimport numpy as np\nfrom common.numpy_fast import clip, interp\nimport selfdrive.messaging as messaging\n\n# lookup tables VS speed to determine min and max accels in cruise\n_A_CRUISE_MIN_V = [-1.0, -.8, -.67, -.5, -.30]\n_A_CRUISE_MIN_BP = [ 0., 5., 10., 20., 40.]\n\n# need fast accel at very low speed for stop and go\n_A_CRUISE_MAX_V = [1., 1., .8, .5, .30]\n_A_CRUISE_MAX_BP = [0., 5., 10., 20., 40.]\n\ndef calc_cruise_accel_limits(v_ego):\n a_cruise_min = interp(v_ego, _A_CRUISE_MIN_BP, _A_CRUISE_MIN_V)\n a_cruise_max = interp(v_ego, _A_CRUISE_MAX_BP, _A_CRUISE_MAX_V)\n return np.vstack([a_cruise_min, a_cruise_max])\n\n_A_TOTAL_MAX_V = [1.5, 1.9, 3.2]\n_A_TOTAL_MAX_BP = [0., 20., 40.]\n\ndef limit_accel_in_turns(v_ego, angle_steers, a_target, a_pcm, CP):\n #*** this function returns a limited long acceleration allowed, depending on the existing lateral acceleration\n # this should avoid accelerating when losing the target in turns\n deg_to_rad = np.pi / 180. # from can reading to rad\n\n a_total_max = interp(v_ego, _A_TOTAL_MAX_BP, _A_TOTAL_MAX_V)\n a_y = v_ego**2 * angle_steers * deg_to_rad / (CP.steerRatio * CP.wheelBase)\n a_x_allowed = math.sqrt(max(a_total_max**2 - a_y**2, 0.))\n\n a_target[1] = min(a_target[1], a_x_allowed)\n a_pcm = min(a_pcm, a_x_allowed)\n return a_target, a_pcm\n\ndef process_a_lead(a_lead):\n # soft threshold of 0.5m/s^2 applied to a_lead to reject noise, also not considered positive a_lead\n a_lead_threshold = 0.5\n a_lead = min(a_lead + a_lead_threshold, 0)\n return a_lead\n\ndef calc_desired_distance(v_lead):\n #*** compute desired distance ***\n t_gap = 1.7 # good to be far away\n d_offset = 4 # distance when at zero speed\n return d_offset + v_lead * t_gap\n\n\n#linear slope\n_L_SLOPE_V = [0.40, 0.10]\n_L_SLOPE_BP = [0., 40]\n\n# parabola slope\n_P_SLOPE_V = [1.0, 0.25]\n_P_SLOPE_BP = [0., 40]\n\ndef calc_desired_speed(d_lead, d_des, v_lead, a_lead):\n #*** compute desired speed ***\n # the desired speed curve is divided in 4 portions: \n # 1-constant\n # 2-linear to regain distance\n # 3-linear to shorten distance\n # 4-parabolic (constant decel)\n\n max_runaway_speed = -2. # no slower than 2m/s over the lead\n\n # interpolate the lookups to find the slopes for a give lead speed\n l_slope = interp(v_lead, _L_SLOPE_BP, _L_SLOPE_V)\n p_slope = interp(v_lead, _P_SLOPE_BP, _P_SLOPE_V)\n\n # this is where parabola and linear curves are tangents \n x_linear_to_parabola = p_slope / l_slope**2\n\n # parabola offset to have the parabola being tangent to the linear curve\n x_parabola_offset = p_slope / (2 * l_slope**2)\n\n if d_lead < d_des:\n # calculate v_rel_des on the line that connects 0m at max_runaway_speed to d_des\n v_rel_des_1 = (- max_runaway_speed) / d_des * (d_lead - d_des)\n # calculate v_rel_des on one third of the linear slope\n v_rel_des_2 = (d_lead - d_des) * l_slope / 3.\n # take the min of the 2 above\n v_rel_des = min(v_rel_des_1, v_rel_des_2)\n v_rel_des = max(v_rel_des, max_runaway_speed)\n elif d_lead < d_des + x_linear_to_parabola:\n v_rel_des = (d_lead - d_des) * l_slope\n v_rel_des = max(v_rel_des, max_runaway_speed)\n else:\n v_rel_des = math.sqrt(2 * (d_lead - d_des - x_parabola_offset) * p_slope)\n\n # compute desired speed\n v_target = v_rel_des + v_lead\n\n # compute v_coast: above this speed we want to coast\n t_lookahead = 1. # how far in time we consider a_lead to anticipate the coast region\n v_coast_shift = max(a_lead * t_lookahead, - v_lead) # don't consider projections that would make v_lead<0\n v_coast = (v_lead + v_target)/2 + v_coast_shift # no accel allowed above this line\n v_coast = min(v_coast, v_target)\n\n return v_target, v_coast\n\ndef calc_critical_decel(d_lead, v_rel, d_offset, v_offset):\n # this function computes the required decel to avoid crashing, given safety offsets\n a_critical = - max(0., v_rel + v_offset)**2/max(2*(d_lead - d_offset), 0.5)\n return a_critical\n\n\n# maximum acceleration adjustment\n_A_CORR_BY_SPEED_V = [0.4, 0.4, 0]\n# speeds\n_A_CORR_BY_SPEED_BP = [0., 5., 20.]\n\ndef calc_positive_accel_limit(d_lead, d_des, v_ego, v_rel, v_ref, v_rel_ref, v_coast, v_target, a_lead_contr, a_max):\n a_coast_min = -1.0 # never coast faster then -1m/s^2\n # coasting behavior above v_coast. Forcing a_max to be negative will force the pid_speed to decrease,\n # regardless v_target\n if v_ref > min(v_coast, v_target):\n # for smooth coast we can be agrressive and target a point where car would actually crash\n v_offset_coast = 0.\n d_offset_coast = d_des/2. - 4.\n\n # acceleration value to smoothly coast until we hit v_target\n if d_lead > d_offset_coast + 0.1:\n a_coast = calc_critical_decel(d_lead, v_rel_ref, d_offset_coast, v_offset_coast)\n # if lead is decelerating, then offset the coast decel\n a_coast += a_lead_contr\n a_max = max(a_coast, a_coast_min)\n else:\n a_max = a_coast_min\n else:\n # same as cruise accel, but add a small correction based on lead acceleration at low speeds\n # when lead car accelerates faster, we can do the same, and vice versa\n\n a_max = a_max + interp(v_ego, _A_CORR_BY_SPEED_BP, _A_CORR_BY_SPEED_V) \\\n * clip(-v_rel / 4., -.5, 1)\n return a_max\n\n# arbitrary limits to avoid too high accel being computed\n_A_SAT = [-10., 5.]\n\n# do not consider a_lead at 0m/s, fully consider it at 10m/s\n_A_LEAD_LOW_SPEED_V = [0., 1.]\n\n# speed break points\n_A_LEAD_LOW_SPEED_BP = [0., 10.]\n\n# add a small offset to the desired decel, just for safety margin\n_DECEL_OFFSET_V = [-0.3, -0.5, -0.5, -0.4, -0.3]\n\n# speed bp: different offset based on the likelyhood that lead decels abruptly\n_DECEL_OFFSET_BP = [0., 4., 15., 30, 40.]\n\n\ndef calc_acc_accel_limits(d_lead, d_des, v_ego, v_pid, v_lead, v_rel, a_lead,\n v_target, v_coast, a_target, a_pcm):\n #*** compute max accel ***\n # v_rel is now your velocity in lead car frame\n v_rel = -v_rel # this simplifiess things when thinking in d_rel-v_rel diagram\n\n v_rel_pid = v_pid - v_lead\n\n # this is how much lead accel we consider in assigning the desired decel\n a_lead_contr = a_lead * interp(v_lead, _A_LEAD_LOW_SPEED_BP,\n _A_LEAD_LOW_SPEED_V) * 0.8\n\n # first call of calc_positive_accel_limit is used to shape v_pid\n a_target[1] = calc_positive_accel_limit(d_lead, d_des, v_ego, v_rel, v_pid,\n v_rel_pid, v_coast, v_target,\n a_lead_contr, a_target[1])\n # second call of calc_positive_accel_limit is used to limit the pcm throttle\n # control (only useful when we don't control throttle directly)\n a_pcm = calc_positive_accel_limit(d_lead, d_des, v_ego, v_rel, v_ego,\n v_rel, v_coast, v_target,\n a_lead_contr, a_pcm)\n\n #*** compute max decel ***\n v_offset = 1. # assume the car is 1m/s slower\n d_offset = 1. # assume the distance is 1m lower\n if v_target - v_ego > 0.5:\n pass # acc target speed is above vehicle speed, so we can use the cruise limits\n elif d_lead > d_offset + 0.01: # add small value to avoid by zero divisions\n # compute needed accel to get to 1m distance with -1m/s rel speed\n decel_offset = interp(v_lead, _DECEL_OFFSET_BP, _DECEL_OFFSET_V)\n\n critical_decel = calc_critical_decel(d_lead, v_rel, d_offset, v_offset)\n a_target[0] = min(decel_offset + critical_decel + a_lead_contr,\n a_target[0])\n else:\n a_target[0] = _A_SAT[0]\n # a_min can't be higher than a_max\n a_target[0] = min(a_target[0], a_target[1])\n # final check on limits\n a_target = np.clip(a_target, _A_SAT[0], _A_SAT[1])\n a_target = a_target.tolist()\n return a_target, a_pcm\n\ndef calc_jerk_factor(d_lead, v_rel):\n # we don't have an explicit jerk limit, so this function calculates a factor\n # that is used by the PID controller to scale the gains. Not the cleanest solution \n # but we need this for the demo.\n # TODO: Calculate Kp and Ki directly in this function.\n\n # the higher is the decel required to avoid a crash, the higher is the PI factor scaling\n d_offset = 0.5\n v_offset = 2.\n a_offset = 1.\n jerk_factor_max = 1.0 # can't increase Kp and Ki more than double.\n if d_lead < d_offset + 0.1: # add small value to avoid by zero divisions\n jerk_factor = jerk_factor_max\n else:\n a_critical = - calc_critical_decel(d_lead, -v_rel, d_offset, v_offset)\n # increase Kp and Ki by 20% for every 1m/s2 of decel required above 1m/s2\n jerk_factor = max(a_critical - a_offset, 0.)/5.\n jerk_factor = min(jerk_factor, jerk_factor_max)\n return jerk_factor\n\n\ndef calc_ttc(d_rel, v_rel, a_rel, v_lead):\n # this function returns the time to collision (ttc), assuming that a_rel will stay constant\n # TODO: Review these assumptions.\n # change sign to rel quantities as it's going to be easier for calculations\n v_rel = -v_rel\n a_rel = -a_rel\n\n # assuming that closing gap a_rel comes from lead vehicle decel, then limit a_rel so that v_lead will get to zero in no sooner than t_decel\n # this helps overweighting a_rel when v_lead is close to zero.\n t_decel = 2.\n a_rel = min(a_rel, v_lead/t_decel)\n\n delta = v_rel**2 + 2 * d_rel * a_rel\n # assign an arbitrary high ttc value if there is no solution to ttc\n if delta < 0.1:\n ttc = 5.\n elif math.sqrt(delta) + v_rel < 0.1:\n ttc = 5.\n else:\n ttc = 2 * d_rel / (math.sqrt(delta) + v_rel)\n return ttc\n\n\nMAX_SPEED_POSSIBLE = 55.\n\ndef compute_speed_with_leads(v_ego, angle_steers, v_pid, l1, l2, CP):\n # drive limits\n # TODO: Make lims function of speed (more aggressive at low speed).\n a_lim = [-3., 1.5]\n\n #*** set target speed pretty high, as lead hasn't been considered yet\n v_target_lead = MAX_SPEED_POSSIBLE\n\n #*** set accel limits as cruise accel/decel limits ***\n a_target = calc_cruise_accel_limits(v_ego)\n # Always 1 for now.\n a_pcm = 1\n\n #*** limit max accel in sharp turns\n a_target, a_pcm = limit_accel_in_turns(v_ego, angle_steers, a_target, a_pcm, CP)\n jerk_factor = 0.\n\n if l1 is not None and l1.status:\n #*** process noisy a_lead signal from radar processing ***\n a_lead_p = process_a_lead(l1.aLeadK)\n\n #*** compute desired distance ***\n d_des = calc_desired_distance(l1.vLead)\n\n #*** compute desired speed ***\n v_target_lead, v_coast = calc_desired_speed(l1.dRel, d_des, l1.vLead, a_lead_p)\n\n if l2 is not None and l2.status:\n #*** process noisy a_lead signal from radar processing ***\n a_lead_p2 = process_a_lead(l2.aLeadK)\n\n #*** compute desired distance ***\n d_des2 = calc_desired_distance(l2.vLead)\n\n #*** compute desired speed ***\n v_target_lead2, v_coast2 = calc_desired_speed(l2.dRel, d_des2, l2.vLead, a_lead_p2)\n\n # listen to lead that makes you go slower\n if v_target_lead2 < v_target_lead:\n l1 = l2\n d_des, a_lead_p, v_target_lead, v_coast = d_des2, a_lead_p2, v_target_lead2, v_coast2\n\n # l1 is the main lead now\n\n #*** compute accel limits ***\n a_target1, a_pcm1 = calc_acc_accel_limits(l1.dRel, d_des, v_ego, v_pid, l1.vLead,\n l1.vRel, a_lead_p, v_target_lead, v_coast, a_target, a_pcm)\n\n # we can now limit a_target to a_lim\n a_target = np.clip(a_target1, a_lim[0], a_lim[1])\n a_pcm = np.clip(a_pcm1, a_lim[0], a_lim[1]).tolist()\n\n #*** compute max factor ***\n jerk_factor = calc_jerk_factor(l1.dRel, l1.vRel)\n\n # force coasting decel if driver hasn't been controlling car in a while\n return v_target_lead, a_target, a_pcm, jerk_factor\n\n\nclass AdaptiveCruise(object):\n def __init__(self, live20):\n self.live20 = live20\n self.last_cal = 0.\n self.l1, self.l2 = None, None\n self.logMonoTime = 0\n self.dead = True\n def update(self, cur_time, v_ego, angle_steers, v_pid, CP):\n l20 = messaging.recv_sock(self.live20)\n if l20 is not None:\n self.l1 = l20.live20.leadOne\n self.l2 = l20.live20.leadTwo\n self.logMonoTime = l20.logMonoTime\n\n # TODO: no longer has anything to do with calibration\n self.last_cal = cur_time\n self.dead = False\n elif cur_time - self.last_cal > 0.5:\n self.dead = True\n\n self.v_target_lead, self.a_target, self.a_pcm, self.jerk_factor = \\\n compute_speed_with_leads(v_ego, angle_steers, v_pid, self.l1, self.l2, CP)\n self.has_lead = self.v_target_lead != MAX_SPEED_POSSIBLE\n"
] | [
[
"numpy.vstack",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NunoEdgarGFlowHub/edward | [
"298fb539261c71e34d5e7aa5a37ed8a029df0820",
"298fb539261c71e34d5e7aa5a37ed8a029df0820",
"298fb539261c71e34d5e7aa5a37ed8a029df0820"
] | [
"examples/factor_analysis.py",
"tests/test-inferences/test_map.py",
"examples/pp_dirichlet_process.py"
] | [
"#!/usr/bin/env python\n\"\"\"Logistic factor analysis on MNIST. Using Monte Carlo EM, with HMC\nfor the E-step and MAP for the M-step. We fit to just one data\npoint in MNIST.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport edward as ed\nimport os\nimport tensorflow as tf\n\nfrom edward.models import Bernoulli, Empirical, Normal\nfrom scipy.misc import imsave\nfrom tensorflow.contrib import slim\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n\ndef generative_network(z):\n \"\"\"Generative network to parameterize generative model. It takes\n latent variables as input and outputs the likelihood parameters.\n\n logits = neural_network(z)\n \"\"\"\n net = slim.fully_connected(z, 28 * 28, activation_fn=None)\n net = slim.flatten(net)\n return net\n\n\ned.set_seed(42)\n\nN = 1 # number of data points\nd = 10 # latent dimension\nDATA_DIR = \"data/mnist\"\nIMG_DIR = \"img\"\n\nif not os.path.exists(DATA_DIR):\n os.makedirs(DATA_DIR)\nif not os.path.exists(IMG_DIR):\n os.makedirs(IMG_DIR)\n\n# DATA\nmnist = input_data.read_data_sets(DATA_DIR, one_hot=True)\nx_train, _ = mnist.train.next_batch(N)\n\n# MODEL\nz = Normal(loc=tf.zeros([N, d]), scale=tf.ones([N, d]))\nlogits = generative_network(z)\nx = Bernoulli(logits=logits)\n\n# INFERENCE\nn_iter_per_epoch = 100\nn_epoch = 1000\n\nT = n_iter_per_epoch * n_epoch\nqz = Empirical(params=tf.Variable(tf.random_normal([T, N, d])))\n\ninference_e = ed.HMC({z: qz}, data={x: x_train})\ninference_e.initialize(n_print=n_iter_per_epoch)\n\ninference_m = ed.MAP(data={x: x_train, z: qz.params[inference_e.t]})\noptimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0)\ninference_m.initialize(optimizer=optimizer)\n\ntf.global_variables_initializer().run()\n\nfor _ in range(n_epoch):\n avg_loss = 0.0\n for _ in range(n_iter_per_epoch):\n info_dict_e = inference_e.update()\n info_dict_m = inference_m.update()\n avg_loss += info_dict_m['loss']\n\n inference_e.print_progress(info_dict_e)\n\n # Print a lower bound to the average marginal likelihood for an\n # image.\n avg_loss = avg_loss / n_iter_per_epoch\n avg_loss = avg_loss / N\n print(\"\\nlog p(x) >= {:0.3f}\".format(avg_loss))\n\n # Prior predictive check.\n imgs = x.eval()\n for m in range(N):\n imsave(os.path.join(IMG_DIR, '%d.png') % m, imgs[m].reshape(28, 28))\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport edward as ed\nimport numpy as np\nimport tensorflow as tf\n\nfrom edward.models import Normal, PointMass\n\n\nclass test_map_class(tf.test.TestCase):\n\n def test_normalnormal_run(self):\n with self.test_session() as sess:\n x_data = np.array([0.0] * 50, dtype=np.float32)\n\n mu = Normal(loc=0.0, scale=1.0)\n x = Normal(loc=tf.ones(50) * mu, scale=1.0)\n\n qmu = PointMass(params=tf.Variable(1.0))\n\n # analytic solution: N(loc=0.0, scale=\\sqrt{1/51}=0.140)\n inference = ed.MAP({mu: qmu}, data={x: x_data})\n inference.run(n_iter=1000)\n\n self.assertAllClose(qmu.mean().eval(), 0)\n\nif __name__ == '__main__':\n ed.set_seed(42)\n tf.test.main()\n",
"#!/usr/bin/env python\n\"\"\"Dirichlet process.\n\nWe implement sample generation from a Dirichlet process (with no base\ndistribution) via its stick breaking construction. It is a streamlined\nimplementation of the ``DirichletProcess`` random variable in Edward.\n\nReferences\n----------\nhttps://probmods.org/chapters/12-non-parametric-models.html#infinite-discrete-distributions-the-dirichlet-processes\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nfrom edward.models import Bernoulli, Beta, DirichletProcess, Exponential, Normal\n\nplt.style.use('ggplot')\n\n\ndef dirichlet_process(alpha):\n \"\"\"Demo of stochastic while loop for stick breaking construction.\"\"\"\n def cond(k, beta_k):\n # End while loop (return False) when flip is heads.\n flip = Bernoulli(beta_k)\n return tf.cast(1 - flip, tf.bool)\n\n def body(k, beta_k):\n beta_k = Beta(1.0, alpha)\n return k + 1, beta_k\n\n k = tf.constant(0)\n beta_k = Beta(1.0, alpha)\n stick_num, stick_beta = tf.while_loop(cond, body, loop_vars=[k, beta_k])\n return stick_num\n\n\ndp = dirichlet_process(10.0)\n\n# The number of sticks broken is dynamic, changing across evaluations.\nsess = tf.Session()\nprint(sess.run(dp))\nprint(sess.run(dp))\n\n# Demo of the DirichletProcess random variable in Edward.\nbase = Normal(0.0, 1.0)\n\n# Highly concentrated DP.\nalpha = 1.0\ndp = DirichletProcess(alpha, base)\nx = dp.sample(1000)\nsamples = sess.run(x)\nplt.hist(samples, bins=100, range=(-3.0, 3.0))\nplt.title(\"DP({0}, N(0, 1))\".format(alpha))\nplt.show()\n\n# More spread out DP.\nalpha = 50.0\ndp = DirichletProcess(alpha, base)\nx = dp.sample(1000)\nsamples = sess.run(x)\nplt.hist(samples, bins=100, range=(-3.0, 3.0))\nplt.title(\"DP({0}, N(0, 1))\".format(alpha))\nplt.show()\n\n# States persist across calls to sample() in a DP.\nalpha = 1.0\ndp = DirichletProcess(alpha, base)\nx = dp.sample(50)\ny = dp.sample(75)\nsamples_x, samples_y = sess.run([x, y])\nplt.subplot(211)\nplt.hist(samples_x, bins=100, range=(-3.0, 3.0))\nplt.title(\"DP({0}, N(0, 1)) across two calls to sample()\".format(alpha))\nplt.subplot(212)\nplt.hist(samples_y, bins=100, range=(-3.0, 3.0))\nplt.show()\n\n# ``theta`` is the distribution indirectly returned by the DP.\n# Fetching theta is the same as fetching the Dirichlet process.\ndp = DirichletProcess(alpha, base)\ntheta = Normal(0.0, 1.0, value=tf.cast(dp, tf.float32))\nprint(sess.run([dp, theta]))\nprint(sess.run([dp, theta]))\n\n# DirichletProcess can also take in non-scalar concentrations and bases.\nalpha = tf.constant([0.1, 0.6, 0.4])\nbase = Exponential(lam=tf.ones([5, 2]))\ndp = DirichletProcess(alpha, base)\nprint(dp)\n"
] | [
[
"tensorflow.zeros",
"tensorflow.ones",
"tensorflow.global_variables_initializer",
"tensorflow.contrib.slim.fully_connected",
"tensorflow.contrib.slim.flatten",
"tensorflow.train.AdamOptimizer",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.random_normal"
],
[
"numpy.array",
"tensorflow.ones",
"tensorflow.test.main",
"tensorflow.Variable"
],
[
"tensorflow.constant",
"tensorflow.while_loop",
"matplotlib.pyplot.style.use",
"tensorflow.cast",
"tensorflow.ones",
"matplotlib.pyplot.subplot",
"tensorflow.Session",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"1.4",
"2.2",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
ProjectAGI/aha | [
"53a98ea42526dca56517dc97fffad874772f10f2",
"53a98ea42526dca56517dc97fffad874772f10f2"
] | [
"aha/datasets/omniglot_lake_dataset.py",
"aha/components/hopfieldlike_component.py"
] | [
"# Copyright (C) 2019 Project AGI\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"OmniglotLakeDataset class.\"\"\"\n\nimport os\nimport math\nimport zipfile\nimport tempfile\nimport logging\n\nfrom random import shuffle\nfrom six.moves import urllib\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom pagi.datasets.dataset import Dataset\nfrom pagi.utils.tf_utils import tf_invert_values, tf_centre_of_mass\n\n\nclass OmniglotLakeDataset(Dataset):\n \"\"\"\n Omniglot Dataset assembled in a way that can be used for the Lake test\n i.e. unique exemplars, followed by unique exemplars of same classes in same order\n \"\"\"\n\n # Mapping of alphabets (superclasses) to characters (classes) populated when dataset is loaded\n CLASS_MAP = {}\n\n def __init__(self, directory, batch_size, test_classes, instance_mode):\n super(OmniglotLakeDataset, self).__init__(\n name='omniglot',\n directory=directory,\n dataset_shape=[-1, 105, 105, 1],\n train_size=19280,\n test_size=13180,\n num_train_classes=964,\n num_test_classes=659,\n num_classes=1623)\n\n self._batch_size = batch_size\n self._test_classes = test_classes\n self._instance_mode = instance_mode\n\n self._dataset_show_files = []\n self._dataset_show_labels = []\n self._dataset_match_files = []\n self._dataset_match_labels = []\n\n def set_shape(self, height, width):\n self._dataset_shape[1] = height\n self._dataset_shape[2] = width\n\n def get_train(self, preprocess=False):\n \"\"\"\n tf.data.Dataset object for Omniglot training data.\n \"\"\"\n\n if len(self._dataset_show_files) == 0:\n self._create_test_sets()\n return self._dataset(self._dataset_show_files, self._dataset_show_labels)\n\n def get_test(self, preprocess=False):\n \"\"\"\n tf.data.Dataset object for Omniglot test data.\n \"\"\"\n if len(self._dataset_match_files) == 0:\n self._create_test_sets()\n return self._dataset(self._dataset_match_files, self._dataset_match_labels)\n\n def _create_test_sets(self, preprocess=False):\n \"\"\"\n tf.data.Dataset object for Omniglot test data.\n\n The order of samples is such that they are divided into batches,\n and always have one of each of the first `batch_size` classes from `test_show_classes`\n \"\"\"\n\n # assert that batch_size <= test_classes\n # assert that batches <= len(labels) / batch_size\n\n # 1) Get full list of possible samples, filename and label\n # ------------------------------------------------------------------------------------\n images_folder = self._download(self._directory, 'images_background')\n files, labels = self._filenames_and_labels(images_folder)\n\n # filter the filenames, labels by the list in the_classes\n the_classes = self.get_classes_by_superclass(self._test_classes)\n\n files_filtered = []\n labels_filtered = []\n for file, label in zip(files, labels):\n if label in the_classes:\n files_filtered.append(file)\n labels_filtered.append(label)\n files = files_filtered\n labels = labels_filtered\n\n # 2) Sort the full list 'labels' into batches of unique classes\n # ------------------------------------------------------------------------------------\n self._dataset_show = []\n self._dataset_match = []\n\n # first shuffle the order\n dataset = list(zip(files, labels))\n np.random.shuffle(dataset)\n files, labels = map(list, zip(*dataset))\n\n # then repeatedly sample with removal, assembling all batches\n data_show = []\n data_match = []\n\n end_batches = False\n batch_num = -1\n while not end_batches:\n batch_num += 1\n\n if batch_num >= 20:\n break\n\n # build a new batch\n batch_labels = []\n batches_labels = []\n batch_label_index = -1\n batch_label = ''\n for i, sample in enumerate(range(self._batch_size)):\n\n # if instance mode, then we only want one class, repeated batch_size times for train.\n # 'test' should be a copy (but usually done in workflow anyway)\n if self._instance_mode:\n\n # get the next index\n # ----------------------------\n\n index = -1\n # first item in batch, sample a random label that has not been chosen previously\n if batch_label_index == -1:\n # select first sample that is not in all batches so far (we want each batch to be a unique class)\n for idx, label in enumerate(labels):\n if label not in batches_labels:\n batch_label_index = idx\n batch_label = label\n batches_labels.append(batch_label) # remember which labels we added to all batches\n index = idx\n break\n\n logging.debug(\"====================== Batch={}, label={}\".format(batch_num, batch_label))\n\n # from then on, choose another exemplar from the same class\n else:\n # select same class for a 'match' sample\n if batch_label in labels:\n index = labels.index(batch_label)\n\n logging.debug(\"================== ----> Batch={}, index={}\".format(batch_num, index))\n\n # detect reaching the end of the dataset i.e. not able to assemble a new batch\n if index == -1:\n logging.info('Not able to find a unique class to assemble a new batch, '\n 'on batch={0}, sample={1}'.format(batch_num, sample))\n end_batches = True\n break\n\n # add to the datasets\n file = files.pop(index)\n label = labels.pop(index)\n data_show.append([file, label])\n data_match.append([file, label])\n\n else:\n # select first sample that is not in batch so far (to get unique)\n index = -1\n for idx, label in enumerate(labels):\n if label not in batch_labels:\n index = idx\n break\n\n # detect reaching the end of the dataset i.e. not able to assemble a new batch\n if index == -1:\n logging.info('Not able to find a unique class to assemble a new batch, '\n 'on batch={0}, sample={1}'.format(batch_num, sample))\n end_batches = True\n break\n\n # add to the 'show' dataset\n file = files.pop(index)\n label = labels.pop(index)\n data_show.append([file, label])\n\n batch_labels.append(label) # remember which labels we added to this batch\n\n # select same class for a 'match' sample\n index = labels.index(label)\n\n # add to the 'match' dataset\n label = labels.pop(index)\n file = files.pop(index)\n data_match.append([file, label])\n\n # convert from array of pairs, to pair of arrays\n self._dataset_show_files, self._dataset_show_labels = map(list, zip(*data_show))\n self._dataset_match_files, self._dataset_match_labels = map(list, zip(*data_match))\n\n def get_classes_by_superclass(self, superclasses, proportion=1.0):\n \"\"\"\n Retrieves a proportion of classes belonging to a particular superclass, defaults to retrieving all classes\n i.e. proportion=1.0.\n\n Arguments:\n superclasses: A single or list of the names of superclasses, or a single name of a superclass.\n proportion: A float that indicates the proportion of sub-classes to retrieve (default=1.0)\n \"\"\"\n if not self.CLASS_MAP:\n raise ValueError('Superclass to class mapping (CLASS_MAP) is not populated yet.')\n\n def filter_classes(classes, proportion, do_shuffle=True):\n \"\"\"Filters the list of classes by retrieving a proportion of shuffled classes.\"\"\"\n if do_shuffle:\n shuffle(classes)\n num_classes = math.ceil(len(classes) * float(proportion))\n return classes[:num_classes]\n\n classes = []\n if superclasses is None or (isinstance(superclasses, list) and len(superclasses) == 0):\n for superclass in self.CLASS_MAP.keys():\n subclasses = filter_classes(self.CLASS_MAP[superclass], proportion)\n classes.extend(subclasses)\n elif isinstance(superclasses, list):\n for superclass in superclasses:\n subclasses = filter_classes(self.CLASS_MAP[superclass], proportion)\n classes.extend(subclasses)\n else: # string - single superclass specified\n classes = filter_classes(self.CLASS_MAP[superclasses], proportion)\n\n return classes\n\n def _dataset(self, filenames, labels):\n\n def parse_function(filenames, label):\n return OmniglotLakeDataset.parse_function(filenames, label, self.shape, self._dataset_shape, centre=True)\n\n dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))\n dataset = dataset.map(parse_function, num_parallel_calls=4)\n return dataset\n\n def _dataset_by_filename(self, directory, images_file):\n \"\"\"Download and parse Omniglot dataset.\"\"\"\n images_folder = self._download(directory, images_file)\n filenames, labels = self._filenames_and_labels(images_folder)\n dataset = self._dataset(filenames, labels)\n return dataset\n\n def _download(self, directory, filename):\n \"\"\"Download (and unzip) a file from the Omniglot dataset if not already done.\"\"\"\n dirpath = os.path.join(directory, self.name)\n filepath = os.path.join(dirpath, filename)\n if tf.gfile.Exists(filepath):\n return filepath\n if not tf.gfile.Exists(dirpath):\n tf.gfile.MakeDirs(dirpath)\n\n url = 'https://github.com/brendenlake/omniglot/raw/master/python/' + (\n filename + '.zip')\n _, zipped_filepath = tempfile.mkstemp(suffix='.zip')\n logging.info('Downloading %s to %s', url, zipped_filepath)\n urllib.request.urlretrieve(url, zipped_filepath)\n\n zip_ref = zipfile.ZipFile(zipped_filepath, 'r')\n zip_ref.extractall(dirpath)\n zip_ref.close()\n\n os.remove(zipped_filepath)\n return filepath\n\n def _filenames_and_labels(self, image_folder):\n \"\"\"Get the image filename and label for each Omniglot character.\"\"\"\n # Compute list of characters (each is a folder full of images)\n character_folders = []\n for family in os.listdir(image_folder):\n if os.path.isdir(os.path.join(image_folder, family)):\n append_characters = False\n if family not in self.CLASS_MAP:\n self.CLASS_MAP[family] = []\n append_characters = True\n for character in os.listdir(os.path.join(image_folder, family)):\n character_folder = os.path.join(image_folder, family, character)\n if append_characters and os.path.isdir(character_folder):\n character_file = os.listdir(character_folder)[0]\n character_label = int(character_file.split('_')[0])\n self.CLASS_MAP[family].append(character_label)\n character_folders.append(character_folder)\n else:\n logging.warning('Path to alphabet is not a directory: %s', os.path.join(image_folder, family))\n\n # Count number of images\n num_images = 0\n for path in character_folders:\n if os.path.isdir(path):\n for file in os.listdir(path):\n num_images += 1\n\n # Put them in one big array, and one for labels\n # A 4D uint8 numpy array [index, y, x, depth].\n idx = 0\n filename_arr = []\n label_arr = np.zeros([num_images], dtype=np.int32)\n\n for path in character_folders:\n if os.path.isdir(path):\n for file in os.listdir(path):\n filename_arr.append(os.path.join(path, file))\n label_arr[idx] = file.split('_')[0]\n idx += 1\n\n return filename_arr, label_arr\n\n @staticmethod\n def parse_function(filename, label, shape, dataset_shape, centre=True):\n\n if centre:\n \"\"\"Read and parse the image from a filepath.\"\"\"\n image_string = tf.read_file(filename)\n\n # Don't use tf.image.decode_image, or the output shape will be undefined\n image = tf.image.decode_jpeg(image_string, channels=shape[3])\n\n # This will convert to float values in [0, 1] result shape ?,?,1\n image = tf.image.convert_image_dtype(image, tf.float32)\n\n # Resize image\n image = tf.image.resize_images(image, [shape[1], shape[2]])\n\n # Invert foreground/background so digit is 1 and background is 0\n image = tf_invert_values(image, None)\n\n # Centre image\n centre = [shape[1] * 0.5, shape[2] * 0.5]\n centre_of_mass = tf_centre_of_mass([image], [1, shape[1], shape[2], 1])\n translation = centre - centre_of_mass # e.g. Com = 27, centre = 25, 25-27 = -2\n\n # Translate [dx, dy]\n image = tf.contrib.image.translate([image], [translation], interpolation='BILINEAR')\n\n # flatten feature dimension\n image = tf.reshape(image, dataset_shape[1:])\n\n return image, label\n\n else:\n \"\"\"Read and parse the image from a filepath.\"\"\"\n image_string = tf.read_file(filename)\n\n # Don't use tf.image.decode_image, or the output shape will be undefined\n image = tf.image.decode_jpeg(image_string, channels=shape[3])\n\n # This will convert to float values in [0, 1]\n image = tf.image.convert_image_dtype(image, tf.float32)\n\n # Resize image and flatten feature dimension\n image = tf.image.resize_images(image, [shape[1], shape[2]])\n image = tf.reshape(image, dataset_shape[1:])\n\n return image, label\n",
"# Copyright (C) 2019 Project AGI\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"HopfieldlikeComponent class.\"\"\"\n\nimport logging\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\n\nfrom pagi.utils.dual import DualData\nfrom pagi.utils import image_utils, generic_utils, tf_utils\nfrom pagi.utils.image_utils import add_square_as_square, square_image_shape_from_1d, add_op_images\nfrom pagi.utils.layer_utils import activation_fn, type_activation_fn\n\nfrom pagi.components.component import Component\nfrom pagi.components.summarize_levels import SummarizeLevels\n\nfrom aha.utils.generic_utils import build_kernel_initializer, normalize_minmax, print_minmax\n\n########################################################################################\n\n# The following methods convert signals between spaces\n# i.e. from real values between 0,1 to binary values of values -1 or 1\n# The current implementations are for ONE setup that we are currently using, they are not flexible\n# If we are to change the DG outputs, we'll need to change these or parameterise\n#\n# PC space = the memory to memorise\n# PC cue space = cue to use for pr (can be real valued)\n#\n# Assumes:\n# DG = [0, 1] real valued, sparse (i.e. there are definitely units with 0 output)\n# PC = [-1, 1] binary valued\n# PC_cue = [-1, 1] real valued\n\n\ndef pc_to_unit(tensor):\n \"\"\"\n From b[-1,1] to [0,1]\n This implementation only works assuming tensor is binary\n \"\"\"\n # if >0, will be 1, if ==-1 (<0), will be 0\n tensor = tf.to_float(tf.greater(tensor, 0)) # 1.0(True) where =curr_min, 0.(False) where !=curr_min\n return tensor\n\n\ndef unit_to_pc_linear(tensor):\n \"\"\"Input assumed to be unit range. Linearly scales to -1 <= x <= 1\"\"\"\n result = (tensor * 2.0) - 1.0 # Theoretical range limits -1 : 1\n return result\n\n\ndef unit_to_pc_sparse(tensor):\n \"\"\" From b[0,1] to b[-1,1] or [0,1] to r[-1,1] \"\"\"\n tensor, _ = tf_utils.tf_set_min(tensor, None, tgt_min=-1, current_min=0) # set 0's to -1, so that the range is -1, 1\n return tensor\n\n\ndef get_pc_topk_shift(tensor, sparsity):\n \"\"\"Input tensor must be batch of vectors.\n Returns a vector per batch sample of the shift required to make Hopfield converge.\n Assumes knowledge of Hopfield fixed sparsity.\"\"\"\n\n # Intuition: The output distribution must straddle the zero point to make hopfield work.\n # These are the values that should be positive.\n tensor_shape = tensor.get_shape().as_list()\n batch_size = tensor_shape[0]\n num_features = tensor_shape[1]\n cue_top_k_mask = tf_utils.tf_build_top_k_mask_op(input_tensor=tensor,\n k=int(sparsity+1), # NOTE: k+1th elem := 0\n batch_size=batch_size,\n input_area=num_features)\n y = tensor\n # Worked example:\n # e.g. k = 2\n # 0, 0.1, 0.3, 0.5 y\n # 0 0 1 1 mask\n # 1-y:\n # 1 0.9 0.7 0.5 y_inv\n # * mask\n # 0, 0 , 0.7, 0.5 y_inv_masked\n # max: 0.7\n # 1-max: 0.3\n y_inv = 1.0 - y\n y_inv_masked = y_inv * cue_top_k_mask\n y_inv_masked_max = tf.reduce_max(y_inv_masked, axis=1) # max per batch sample\n y_masked_min = 1.0 - y_inv_masked_max\n\n # convert this to tanh range\n # cue_tanh_min: -0.5 -0.1 0.0 0.1 0.5\n # 0-x +0.5 +0.1 0.0 -0.1 -0.5\n # so e.g.\n # -0.5 + 0.5 = 0\n # 0.5 + -0.5 = 0 this bit value has become zero.\n cue_tanh_masked_min = (y_masked_min * 2.0) - 1.0 # scale these values\n shift = tf.expand_dims(0.0 - cue_tanh_masked_min, 1)\n return shift\n\n\ndef dg_to_pc(tensor):\n \"\"\" From sparse r[0,1] to b[-1,1]\"\"\"\n tensor = tf.to_float(tf.greater(tensor, 0.0))\n tensor, _ = tf_utils.tf_set_min(tensor, None, -1, current_min=0) # set 0's to -1, so that the range is -1, 1\n return tensor\n\n\n# def dg_to_pc_numpy(arr):\n#\n# arr = np.greater(arr, 0.0).astype(float)\n#\n# minval = -1\n# source_zeros = np.equal(arr, 0).astype(float) # 1.0(True) where =0, 0.(False) where !=0\n# minvals_inplace = minval * source_zeros\n# target = arr + minvals_inplace\n#\n# return target\n\n########################################################################################\n\n\nclass HopfieldlikeComponent(Component):\n \"\"\"\n Hopfield net inspired component. Main ideas of Hopfield network are implemented.\n The training differs though, this version uses gradient descent to minimise the diff\n between input and output calculated simply by one pass through, no activation function.\n i.e. Y = MX, loss = Y - X\n\n pr PATH\n If an input_cue is specified when build() is called, then `use_input_cue mode=True`,\n and the Hopfield learns to map an external to internal cue for retrieval.\n This can be done via a pseudoinverse or NN (set by internal static constant)\n Pseudoinverse is not recommended as it is symmetrical, which is pathological, but it works ok for simple cases.\n\n Batch types are:\n - training : memorise the samples in a batch in the fb weights, & for `use_input_cue mode`, learn to map external\n cue `x_cue` to internal cue `z_cue` which is used for retrieval.\n - encoding : 'retrieval' but use 'encoding' for compatibility other components. Recursive steps to produce output.\n\n Terminology:\n\n x_ext = external input (from DG). Used to memorise batch.\n\n x_cue = external cue input (from EC). Any dimensions. Mapped to z_cue which is used for retrieval.\n z_cue = output of cue mapping (x_cue to Hopfield dimensions)\n\n x_fb = feedback = y(t-1), recursive iterations get Hopfield converging on basins of attraction\n\n y = output of net at time t\n\n WARNINGS\n # Builds ONE 'recursive' summary subgraph (i.e. these may be produced in the context of any batch type)\n\n \"\"\"\n\n @staticmethod\n def default_hparams():\n \"\"\"Builds an HParam object with default hyperparameters.\"\"\"\n return tf.contrib.training.HParams(\n batch_size=1,\n learning_rate=0.0001,\n optimizer='adam',\n momentum=0.9,\n momentum_nesterov=False,\n use_feedback=True,\n memorise_method='pinv', # pinv = pseudoinverse, otherwise tf optimisation\n nonlinearity='tanh',\n update_n_neurons=-1, # number of neurons to update each iteration, -1 = all\n gain=2.7, # applied to weights during iterations\n\n pr_type='pinv', # 'nn' or 'pinv': type of cue mapping in pr path\n pm_type='none', # 'none' or 'nn': map stable PC patterns back to EC\n pm_raw_type='none', # 'none' or 'nn': map stable PC patterns back to VC input (image space)\n pm_l1_size=100, # hidden layer of PM path (pattern mapping)\n pm_raw_l1_size=100,\n pm_raw_l2_regularizer=0.0,\n pm_raw_nonlinearity='leaky_relu',\n pm_noise_type='s', # 's' for salt, 'sp' for salt + pepper\n pm_train_with_noise=0.0,\n pm_train_with_noise_pp=0.0,\n\n cue_nn_learning_rate=0.0001, # 1.0=off: dropout in nn that learns the cue from EC\n cue_nn_train_dropout_keep_prob=1.0, # 1.0=off: dropout in nn that learns the cue from EC\n cue_nn_test_with_noise=0.0, # 0.0=off: noise to EC for testing generalisation of learning cue with nn\n cue_nn_train_with_noise=0.0, # 0.0=off: noise to EC for testing generalisation of learning cue with nn\n cue_nn_train_with_noise_pp=0.0,\n cue_nn_test_with_noise_pp=0.0,\n cue_nn_label_sparsity=10,\n cue_nn_hidden_size=500,\n cue_nn_sparsity_boost=1.2, # let more bits through, can tolerate false positives better\n cue_nn_non_linearity='sigmoid',\n cue_nn_last_layer='softmax_ce', # 'softmax_ce', 'sigmoid_mse', 'relu_mse', 'linear_mse'\n cue_nn_gain=1.0, # 'softmax_ce', 'sigmoid_mse', 'relu_mse', 'linear_mse'\n cue_nn_sum_norm=10.0,\n cue_nn_softmax=False,\n cue_nn_sparsen=False,\n cue_nn_l2_regularizer=0.0,\n\n summarize_level=SummarizeLevels.ALL.value,\n max_outputs=3\n )\n\n def __init__(self):\n self._name = None\n self._hidden_name = None\n self._hparams = None\n self._dual = None\n self._input_summary_shape = None\n self._input_values = None\n self._input_cue = None\n self._input_cue_raw = None\n self._use_input_cue = None\n self._use_pm = False\n self._use_pm_raw = False\n self._summary_values = None\n self._summary_recursive_values = None\n self._input_size = None\n self._input_values_shape = None\n self._batch_type = None\n\n self._debug_input_cue = False\n\n def reset(self):\n self._dual.get('y').set_values_to(0.0)\n\n loss = self._dual.get('loss_memorise')\n loss.set_values_to(0.0)\n\n if self._use_input_cue:\n pr_loss = self._dual.get('pr_loss')\n pr_loss.set_values_to(0.0)\n\n # -------------------------------------------------------\n # used for playing around with pinv\n\n def get_cue_target(self):\n\n # how we did it before\n x = self._dual.get_op('x_ext') # DG - primary, intended output (already set to pc space)\n\n # using raw so that it learns based on signal (0, 1) i.e. no negative weights. We'll map it afterwards.\n # x = self._dual.get_op('x_ext_raw') # DG - primary, intended output\n\n return x\n\n def modify_pr_out(self, x):\n # return dg_to_pc(x)\n return x\n\n # -------------------------------------------------------\n\n @property\n def name(self):\n return self._name\n\n @property\n def use_input_cue(self):\n return self._use_input_cue\n\n @property\n def use_pm(self):\n return self._use_pm\n\n @property\n def use_pm_raw(self):\n return self._use_pm_raw\n\n @property\n def use_inhibition(self):\n return True\n\n def get_loss(self):\n \"\"\"Loss from memorisation of samples in fb weights \"\"\"\n return self._dual.get_values('loss_memorise')\n\n def get_loss_pr(self, default=0):\n \"\"\"Loss from mapping in pr branch (from EC external cue to PC internal cue\"\"\"\n loss = self._dual.get_values('pr_loss_mismatch')\n if loss is None:\n loss = default\n return loss\n\n def get_loss_pr_range(self):\n \"\"\"Minimum possible loss returned from get_pr_loss()\"\"\"\n sparsity = self._hparams.cue_nn_label_sparsity\n boost = self._hparams.cue_nn_sparsity_boost\n min = sparsity - int(sparsity * boost) # they are exactly the same, except for the additional allowed bits\n max = sparsity + int(sparsity * boost) # no overlap in the label and prediction\n return min, max\n\n def get_losses_pm(self, default=0):\n loss = self._dual.get_values('pm_loss')\n loss_raw = self._dual.get_values('pm_loss_raw')\n\n if loss is None:\n loss = default\n\n if loss_raw is None:\n loss_raw = default\n\n return loss, loss_raw\n\n def get_dual(self):\n return self._dual\n\n def get_decoding(self):\n \"\"\"For consistency with other components, 'decoding' is output, y. Reshaped to input dimensions.\"\"\"\n return self._dual.get_values('decoding')\n\n def get_decoding_op(self):\n \"\"\"For consistency with other components, 'decoding' is output, y. Reshaped to input dimensions.\"\"\"\n return self._dual.get_op('decoding')\n\n def get_input(self, batch_type):\n \"\"\"\n batch_type: 'training' = memorization, 'encoding' = retrieval\n\n For memorisation, input = x_ext\n For retrieval, input depends on whether PR is in use\n \"\"\"\n\n if batch_type == 'training':\n return self._dual.get_values('x_ext')\n else:\n if self._use_input_cue:\n return self._dual.get_values('pr_out_direct')\n #return self._dual.get_values('pr_out')\n else:\n return self._dual.get_values('x_direct')\n\n def get_ec_out_raw_op(self):\n return self._dual.get_op('ec_out_raw')\n\n def get_ec_out_raw(self):\n return self._dual.get_values('ec_out_raw')\n\n def update_feed_dict(self, feed_dict, batch_type='training'):\n if batch_type == 'training':\n self.update_training_dict(feed_dict)\n if batch_type == 'encoding':\n self.update_encoding_dict(feed_dict)\n\n def _update_dict_fb(self, feed_dict):\n # set feedback from previous y output\n x_next = self._dual.get('y').get_values()\n x_fb = self._dual.get_pl('x_fb')\n feed_dict.update({\n x_fb: x_next\n })\n\n def update_feed_dict_input_gain_pl(self, feed_dict, gain):\n input_gain_pl = self._dual.get('input_gain').get_pl()\n feed_dict.update({\n input_gain_pl: [gain]\n })\n\n def add_fetches(self, fetches, batch_type='training'):\n if batch_type == 'training':\n self.add_training_fetches(fetches)\n if batch_type == 'encoding':\n self.add_encoding_fetches(fetches)\n\n summary_op = self._dual.get_op(generic_utils.summary_name(batch_type))\n if summary_op is not None:\n fetches[self._name]['summaries'] = summary_op\n\n summary_op = self._dual.get_op(generic_utils.summary_name('recursive'))\n if summary_op is not None:\n fetches[self._name]['summaries_recursive'] = summary_op\n\n def set_fetches(self, fetched, batch_type='training'):\n if batch_type == 'training':\n self.set_training_fetches(fetched)\n if batch_type == 'encoding':\n self.set_encoding_fetches(fetched)\n\n summary_op = self._dual.get_op(generic_utils.summary_name(batch_type))\n if summary_op is not None:\n self._summary_values = fetched[self._name]['summaries']\n\n summary_recursive_op = self._dual.get_op(generic_utils.summary_name('recursive'))\n if summary_recursive_op is not None:\n self._summary_recursive_values = fetched[self._name]['summaries_recursive']\n\n def build_summaries(self, batch_types=None, scope=None):\n \"\"\"Builds all summaries.\"\"\"\n if not scope:\n scope = self._name + '/summaries/'\n with tf.name_scope(scope):\n for batch_type in batch_types:\n\n # build 'batch_type' summary subgraph\n with tf.name_scope(batch_type):\n summaries = self._build_summaries(batch_type)\n if summaries and len(summaries) > 0:\n self._dual.set_op(generic_utils.summary_name(batch_type), tf.summary.merge(summaries))\n\n # WARNING: Build ONE 'recursive' summary subgraph (i.e. these may be produced in the context of any batch type)\n with tf.name_scope('recursive'):\n summaries = self._build_recursive_summaries()\n if len(summaries) > 0:\n self._dual.set_op(generic_utils.summary_name('recursive'), tf.summary.merge(summaries))\n\n def write_summaries(self, step, writer, batch_type='training'):\n \"\"\"Write the summaries fetched into _summary_values\"\"\"\n if self._summary_values is not None:\n writer.add_summary(self._summary_values, step)\n writer.flush()\n\n def write_recursive_summaries(self, step, writer, batch_type='training'):\n \"\"\"\n Only write summaries for encoding batch_type (retrieval)\n \"\"\"\n if batch_type == 'encoding':\n if self._summary_recursive_values is not None:\n writer.add_summary(self._summary_recursive_values, step)\n writer.flush()\n\n# ---------------- build methods\n\n def build(self, input_values, input_summary_shape, hparams, name, input_cue=None, input_cue_raw=None):\n \"\"\"Builds the network and optimiser.\"\"\"\n self._input_values = input_values\n self._input_summary_shape = input_summary_shape\n self._hparams = hparams\n self._name = name\n\n self._input_values_shape = self._input_values.get_shape().as_list()\n self._input_size = np.prod(self._input_values_shape[1:])\n\n if self._debug_input_cue:\n input_cue = input_values\n\n # if input_cue is provided, then set flag to build PR\n if input_cue is not None:\n self._input_cue = input_cue\n self._use_input_cue = True\n else:\n self._use_input_cue = False\n\n # if hyperparam specifies and input_cue or input_cue_raw provided, set flag to build PM\n if self._hparams.pm_type != 'none' and input_cue is not None:\n self._use_pm = True\n\n if self._hparams.pm_raw_type != 'none' and input_cue_raw is not None:\n self._input_cue_raw = input_cue_raw\n self._use_pm_raw = True\n\n self._dual = DualData(self._name)\n\n with tf.variable_scope(self._name, reuse=tf.AUTO_REUSE):\n\n self._batch_type = tf.placeholder_with_default(input='training', shape=[], name='batch_type')\n\n # 0) setup inputs for other build methods\n # ---------------------------------------------------\n self._setup_inputs() # sets: x_cue, x_ext, x_fb\n\n self._random_recall = self._dual.add('random_recall',\n shape=[],\n default_value='').add_pl(default=True, dtype=tf.string)\n\n # 1) build cue mapping for retrieval (if relevant)\n # ---------------------------------------------------\n if self._use_input_cue:\n if self.use_nn_in_pr_path():\n self._build_pr_nn() # sets: vc_to_pc, loss_learn_cue, uses: x_cue, x_ext\n else:\n self._build_pr_pinv(self._input_size) # sets: w_p, z_cue\n\n # 2) build core Hopfield for retrieval - recursive retrieval network\n # ---------------------------------------------------\n self._build_retrieval() # uses: x_cue, x_ext, x_fb, vc_to_pc [with cue_pinv: w_p, z_cue]\n\n # 3) build cue retrieval memorisation for pinv variant\n # note: must be after build_retrieval()\n # ---------------------------------------------------\n if self._use_input_cue:\n if not self.use_nn_in_pr_path():\n self._build_pr_pinv_memorise() # uses: x_ext, x_cue\n\n # 4) build Pattern Mapping (PC patterns to corresponding EC)\n # ---------------------------------------------------\n if self._use_pm or self._use_pm_raw:\n self._build_pm()\n\n # 5) build Hopfield fb weights - memorisation of x_ext (DG) inputs\n # ---------------------------------------------------\n if self._is_pinv():\n self._build_memorise_pinv()\n\n self.reset()\n\n def _setup_inputs(self):\n \"\"\"Prepare external input by reshaping and optionally applying an input gain.\"\"\"\n input_values_shape = self._input_values.get_shape().as_list()\n input_size = np.prod(input_values_shape[1:])\n input_shape = [self._hparams.batch_size, input_size]\n\n # external input (from DG)\n x_ext = tf.reshape(self._input_values, input_shape)\n\n self._dual.set_op('x_ext_raw', x_ext)\n\n x_ext = dg_to_pc(x_ext)\n\n # apply input gain (can be used to amplify or attenuate external input)\n input_gain = self._dual.add('input_gain', shape=[1], default_value=1.0).add_pl(default=True)\n x_ext = tf.multiply(x_ext, input_gain)\n self._dual.set_op('x_ext', x_ext)\n\n # placeholder for getting feedback signal\n self._dual.add('x_fb', shape=input_shape, default_value=0.0).add_pl(default=True)\n\n # input cue (from EC)\n if self._use_input_cue:\n input_cue_shape = self._input_cue.get_shape().as_list()\n input_cue_size = np.prod(input_cue_shape[1:])\n x_cue_shape = [self._hparams.batch_size, input_cue_size]\n x_cue = tf.reshape(self._input_cue, x_cue_shape)\n\n self._dual.set_op('x_cue', x_cue)\n\n def _w_variable(self, shape, trainable=False):\n w_default = 0.01\n w_initializer = w_default * tf.random_uniform(shape)\n\n if not self._is_pinv() and not self._is_pinv_hybrid():\n trainable = True\n\n # Apply a constraint to zero out single cell circular weights (i.e. cell 1 to cell 1)\n return tf.get_variable(name='w', initializer=w_initializer, constraint=self._remove_diagonal,\n trainable=trainable)\n\n def _neuron_update(self, input_size, x_fb, y_potential):\n\n # compute mask\n length_update = self._hparams.update_n_neurons\n if length_update == -1:\n length_update = input_size\n\n seed_np = np.ones(input_size)\n seed_np[length_update:] = 0\n seed = tf.convert_to_tensor(seed_np, dtype=tf.float32)\n mask = tf.random_shuffle(seed) # 1 in the bits to update\n\n # apply masked update\n y_update = tf.multiply(y_potential, mask) # zero out the non-update neurons\n y_purge_mask = 1 - mask\n y_temp = tf.multiply(y_purge_mask, x_fb) # x_fb is baseline, now zero out the neuron to be updated\n y = y_temp + y_update # apply update selectively on that neuron\n\n return y\n\n def _build_retrieval(self):\n \"\"\"\n Initialises variables and builds the Hopfield-like network.\n\n Retrieve with x_direct and x_fb\n\n When `_use_input_cue` enabled:\n - x_direct = z_cue (output of mapping from external cue (EC) to internal cue)\n Else:\n - x_direct = x_ext (DG output)\n\n \"\"\"\n\n input_values_shape = self._input_values_shape\n input_size = self._input_size\n\n # create variables\n w = self._w_variable(shape=[input_size, input_size])\n\n # setup network\n if self._use_input_cue:\n x_direct = self._dual.get_op('z_cue')\n else:\n x_ext = self._dual.get_op('x_ext')\n x_direct = x_ext # no weights, one-to-one mapping, so they are the same\n\n pc_noise = self._dual.add('pc_noise',\n shape=x_direct.shape,\n default_value=0.0).add_pl(default=True, dtype=x_direct.dtype)\n\n # Swap 'x_direct' during random recall at PC\n x_direct = tf.cond(tf.equal(self._random_recall, 'pc'), lambda: pc_noise, lambda: x_direct)\n\n x_fb = self._dual.get_pl('x_fb')\n z = tf.matmul(x_fb, w) + x_direct # weighted sum + bias\n y_potential, _ = activation_fn(self._hparams.gain * z, self._hparams.nonlinearity) # non-linearity\n\n # only update the relevant neurons\n y = self._neuron_update(input_size, x_fb, y_potential)\n\n # calculate Hopfield Energy\n e = -0.5 * tf.matmul(tf.matmul(y, w), tf.transpose(y)) - tf.matmul(y, tf.transpose(x_direct))\n\n # 'decoding' for output in same dimensions as input, and for consistency with other components\n y_reshaped = tf.reshape(y, input_values_shape)\n\n # Normalize the decoding output\n y_reshaped = normalize_minmax(y_reshaped)\n\n # remember values for later use\n self._dual.set_op('w', w)\n self._dual.set_op('y', y)\n self._dual.set_op('decoding', y_reshaped)\n self._dual.set_op('e', e)\n self._dual.set_op('x_direct', x_direct)\n\n return y\n\n def _build_pm(self):\n \"\"\"Preprocess the inputs and build the pattern mapping components.\"\"\"\n\n # map to input\n # pc_out = self._dual.get_op('y') # output of Hopfield (PC)\n pc_out = self._dual.get_op('decoding') # output of Hopfield (PC)\n # pc_out = normalize_minmax(pc_out)\n\n pc_target = self._dual.get_op('pr_target')\n\n x_nn = tf.cond(tf.equal(self._batch_type, 'training'),\n lambda: pc_target, # training\n lambda: pc_out) # encoding\n\n # Apply noise during training, to regularise / test generalisation\n # --------------------------------------------------------------------------\n if self._hparams.pm_noise_type == 's': # salt noise\n x_nn = tf.cond(\n tf.equal(self._batch_type, 'training'),\n lambda: image_utils.add_image_salt_noise_flat(x_nn, None,\n noise_val=self._hparams.pm_train_with_noise,\n noise_factor=self._hparams.pm_train_with_noise_pp,\n mode='replace'),\n lambda: x_nn\n )\n\n elif self._hparams.pm_noise_type == 'sp': # salt + pepper noise\n # Inspired by denoising AE.\n # Add salt+pepper noise to mimic missing/extra bits in PC space.\n # Use a fairly high rate of noising to mitigate few training iters.\n x_nn = tf.cond(\n tf.equal(self._batch_type, 'training'),\n lambda: image_utils.add_image_salt_pepper_noise_flat(x_nn, None,\n salt_val=self._hparams.pm_train_with_noise,\n pepper_val=-self._hparams.pm_train_with_noise,\n noise_factor=self._hparams.pm_train_with_noise_pp),\n lambda: x_nn\n )\n\n else:\n raise NotImplementedError('PM noise type not supported: ' + str(self._hparams.noise_type))\n\n # Build PM\n # --------------------------------------------------------------------------\n if self.use_pm:\n ec_in = self._input_cue\n output_nonlinearity = type_activation_fn('leaky_relu')\n ec_out = self._build_pm_core(x=x_nn, target=ec_in,\n l1_size=self._hparams.pm_l1_size,\n non_linearity1=tf.nn.leaky_relu,\n non_linearity2=output_nonlinearity,\n loss_fn=tf.losses.mean_squared_error)\n self._dual.set_op('ec_out', ec_out)\n\n if self._use_pm_raw:\n ec_in = self._input_cue_raw\n output_nonlinearity = type_activation_fn(self._hparams.pm_raw_nonlinearity)\n ec_out_raw = self._build_pm_core(x=x_nn, target=ec_in,\n l1_size=self._hparams.pm_raw_l1_size,\n non_linearity1=tf.nn.leaky_relu,\n non_linearity2=output_nonlinearity,\n loss_fn=tf.losses.mean_squared_error,\n name_suffix=\"_raw\")\n self._dual.set_op('ec_out_raw', ec_out_raw)\n\n def _build_pm_core(self, x, target, l1_size, non_linearity1, non_linearity2, loss_fn, name_suffix=\"\"):\n \"\"\"Build the layers of the PM network, with optional L2 regularization.\"\"\"\n target_shape = target.get_shape().as_list()\n target_size = np.prod(target_shape[1:])\n l2_size = target_size\n\n use_bias = True\n\n weights = []\n scope = 'pm' + name_suffix\n with tf.variable_scope(scope):\n y1_layer = tf.layers.Dense(units=l1_size, activation=non_linearity1, use_bias=use_bias,\n kernel_initializer=build_kernel_initializer('xavier'))\n y1 = y1_layer(x)\n\n f_layer = tf.layers.Dense(units=l2_size, activation=non_linearity2, use_bias=use_bias,\n kernel_initializer=build_kernel_initializer('xavier'))\n f = f_layer(y1)\n\n weights.append(y1_layer.weights[0])\n weights.append(f_layer.weights[0])\n\n if use_bias:\n weights.append(y1_layer.weights[1])\n weights.append(f_layer.weights[1])\n\n y = tf.stop_gradient(f) # ensure gradients don't leak into other nn's in PC\n\n target_flat = tf.reshape(target, shape=[-1, target_size])\n loss = loss_fn(f, target_flat)\n self._dual.set_op('pm_loss' + name_suffix, loss)\n\n if self._hparams.pm_raw_l2_regularizer > 0.0:\n all_losses = [loss]\n\n for weight in weights:\n weight_loss = tf.nn.l2_loss(weight)\n weight_loss_sum = tf.reduce_sum(weight_loss)\n weight_loss_scaled = weight_loss_sum * self._hparams.pm_raw_l2_regularizer\n all_losses.append(weight_loss_scaled)\n\n all_losses_op = tf.add_n(all_losses, name='total_pm_loss')\n self._build_optimizer(all_losses_op, 'training_pm' + name_suffix, scope)\n else:\n self._build_optimizer(loss, 'training_pm' + name_suffix, scope)\n\n return y\n\n def _build_pr_pinv(self, input_size):\n input_cue_shape = self._input_cue.get_shape().as_list()\n input_cue_size = np.prod(input_cue_shape[1:])\n w_p = tf.get_variable(name='w_p', shape=(input_cue_size, input_size), trainable=False)\n self._dual.set_op('w_p', w_p)\n\n x_cue = self._dual.get_op('x_cue')\n z_cue = tf.matmul(x_cue, w_p)\n\n z_cue = self.modify_pr_out(z_cue)\n\n self._dual.set_op('z_cue', z_cue)\n\n return z_cue\n\n def _build_memorise_pinv(self):\n \"\"\"Pseudoinverse-based optimisation.\"\"\"\n input_values_shape = self._input_values.get_shape().as_list()\n input_size = np.prod(input_values_shape[1:])\n\n x = self._dual.get_op('x_ext')\n\n w_ref = self._dual.get_op('w')\n\n batches = input_values_shape[0]\n x_matrix = tf.reshape(x, [1, batches, input_size]) # 1 matrix of x vol vecs (expressed as 1 batch)\n xinv = tfp.math.pinv(x_matrix, rcond=None, validate_args=False, name=None) # this is XT-1 (transposed already)\n w_batches = tf.matmul(xinv, x_matrix)\n w_val = tf.reshape(w_batches, [input_size, input_size]) # strip out the batch dimension\n w_val = self._remove_diagonal(w_val) # remove self-connections (also a constraint if using gradient training)\n w = tf.assign(w_ref, w_val, name='w_assign')\n\n y_memorise = tf.matmul(x, w)\n loss_memorise = tf.reduce_sum(tf.square(x - y_memorise))\n\n self._dual.set_op('y_memorise', y_memorise)\n self._dual.set_op('loss_memorise', loss_memorise)\n\n def _build_pr_nn(self):\n \"\"\"\n Teach a NN to transform x_cue to x (=z_cue)\n From the NN perspective:\n\n x_nn = x_cue\n t_nn = x (target)\n\n \"\"\"\n\n # 0) nn params\n # ------------------------------------\n sparsity = self._hparams.cue_nn_label_sparsity\n pr_sparsity_boost = self._hparams.cue_nn_sparsity_boost\n non_linearity = self._hparams.cue_nn_non_linearity\n hidden_size = self._hparams.cue_nn_hidden_size\n\n # 1) organise inputs to network\n # ------------------------------------\n x_ext = self._dual.get_op('x_ext') # DG - primary, converted to PC space (-1<=n<=1)\n\n t_nn = pc_to_unit(x_ext) # This means unit range, sparse\n self._dual.set_op('pr_target', t_nn)\n\n t_nn_shape = x_ext.get_shape().as_list()\n t_nn_size = np.prod(t_nn_shape[1:])\n\n x_nn = self._dual.get_op('x_cue') # EC - secondary : vc output\n x_nn_shape = x_nn.get_shape().as_list()\n x_nn_size = np.prod(x_nn_shape[1:])\n\n pr_noise = self._dual.add('pr_noise',\n shape=x_nn.shape,\n default_value=0.0).add_pl(default=True, dtype=x_nn.dtype)\n\n inhibition = self._dual.add('inhibition',\n shape=x_nn.shape,\n default_value=0.0).add_pl(default=True, dtype=x_nn.dtype)\n\n use_inhibition_pl = self._dual.add('use_inhibition',\n shape=[],\n default_value=False).add_pl(default=True, dtype=tf.bool)\n\n if self.use_inhibition:\n decay = 0.5\n inhibition_decayed = decay * inhibition + (1 - decay) * x_nn\n self._dual.set_op('inhibition', inhibition_decayed)\n\n inhibition_noise = pr_noise + inhibition_decayed\n random_cue = tf.cond(tf.equal(use_inhibition_pl, True), lambda: inhibition_noise, lambda: pr_noise)\n else:\n random_cue = pr_noise\n\n # Swap 'x_nn' during random recall\n x_nn = tf.cond(tf.equal(self._random_recall, 'pr'), lambda: random_cue, lambda: x_nn)\n\n x_nn = x_nn\n\n # 2) build the network\n # ------------------------------------\n\n # apply noise at train and/or test time, to regularise / test generalisation\n x_nn = tf.cond(tf.equal(self._batch_type, 'encoding'),\n lambda: image_utils.add_image_salt_noise_flat(x_nn, None,\n noise_val=self._hparams.cue_nn_test_with_noise,\n noise_factor=self._hparams.cue_nn_test_with_noise_pp),\n lambda: x_nn)\n\n x_nn = tf.cond(tf.equal(self._batch_type, 'training'),\n lambda: image_utils.add_image_salt_noise_flat(x_nn, None,\n noise_val=self._hparams.cue_nn_train_with_noise,\n noise_factor=self._hparams.cue_nn_train_with_noise_pp),\n lambda: x_nn)\n\n # apply dropout during training\n keep_prob = self._hparams.cue_nn_train_dropout_keep_prob\n x_nn = tf.cond(tf.equal(self._batch_type, 'training'),\n lambda: tf.nn.dropout(x_nn, keep_prob),\n lambda: x_nn)\n\n # Already normalized in episodic_component\n #def normalize(x):\n # return (x - tf.reduce_min(x)) / (tf.reduce_max(x) - tf.reduce_min(x))\n #x_nn = normalize(x_nn)\n\n self._dual.set_op('x_pr_memorise', x_nn) # input to the pr path nn\n\n # Hidden layer[s]\n weights = []\n\n if hidden_size > 0:\n\n kernel_initializer = build_kernel_initializer('xavier')\n\n # hidden_out = tf.layers.dense(inputs=x_nn, units=hidden_size, activation=type_activation_fn(non_linearity),\n # name=\"cue_nn_hidden\")\n layer_hidden = tf.layers.Dense(units=hidden_size,\n activation=type_activation_fn(non_linearity),\n name=\"cue_nn_hidden\",\n kernel_initializer=kernel_initializer)\n hidden_out = layer_hidden(x_nn)\n weights.append(layer_hidden.weights[0])\n weights.append(layer_hidden.weights[1])\n\n # Optional dropout on hidden layer in addition to input dropout, potentially at different rate.\n keep_prob = 1.0\n if keep_prob < 1.0:\n hidden_out = tf.cond(tf.equal(self._batch_type, 'training'),\n lambda: tf.nn.dropout(hidden_out, keep_prob),\n lambda: hidden_out)\n else:\n hidden_out = x_nn\n\n # Final layer - no dropout (because we don't want to damage the output signal, there's no benefit)\n # No nonlinearity (yet)\n\n kernel_initializer = build_kernel_initializer('xavier')\n\n layer_out = tf.layers.Dense(units=t_nn_size,\n name=\"cue_nn_logits\",\n kernel_initializer=kernel_initializer) # units = number of logits\n logits = layer_out(hidden_out)\n weights.append(layer_out.weights[0])\n weights.append(layer_out.weights[1])\n #logits = tf.layers.dense(inputs=hidden_out, units=t_nn_size, name=\"cue_nn_logits\") # units = number of logits\n\n last_layer = self._hparams.cue_nn_last_layer\n\n if last_layer == 'relu_softmax_ce':\n f = tf.nn.relu(logits)\n probs = tf.nn.softmax(f)\n loss = tf.losses.sigmoid_cross_entropy(t_nn, f)\n f = probs * sparsity # adjust output magnitudes: sums to 1, restore magnitudes of individual bits to range=(0,1)\n elif last_layer == 'sigmoid_softmax_ce':\n f = tf.nn.sigmoid(logits)\n probs = tf.nn.softmax(f)\n loss = tf.losses.sigmoid_cross_entropy(t_nn, f)\n f = probs * sparsity # adjust output magnitudes: sums to 1, restore magnitudes of individual bits to range=(0,1)\n elif last_layer == 'softmax_ce': # Original mode\n probs = tf.nn.softmax(logits)\n loss = tf.losses.sigmoid_cross_entropy(t_nn, logits)\n f = probs * sparsity # adjust output magnitudes: sums to 1, restore magnitudes of individual bits to range=(0,1)\n elif last_layer == 'sigmoid_ce': # Dave's mode. Treat each output px as a class. problem\n f = tf.nn.sigmoid(logits) # Unit range\n loss = tf.losses.sigmoid_cross_entropy(t_nn, logits)\n elif last_layer == 'sigmoid_mse': # Alternate training regime to try, same output\n #y = logits #tf.nn.sigmoid(logits) WORKS\n f = tf.nn.sigmoid(logits)\n loss = tf.losses.mean_squared_error(t_nn, f)\n elif last_layer == 'lrelu_mse': # Alternate training regime to try, same output\n f = tf.nn.leaky_relu(logits)\n loss = tf.losses.mean_squared_error(t_nn, f)\n else:\n raise RuntimeError(\"cue_nn_last_layer hparam option '{}' not implemented\".format(last_layer))\n\n y = tf.stop_gradient(f)\n #pr_out_direct = y\n\n if self._hparams.cue_nn_l2_regularizer > 0.0:\n all_losses = [loss]\n\n for weight in weights:\n weight_loss = tf.nn.l2_loss(weight)\n weight_loss_sum = tf.reduce_sum(weight_loss)\n weight_loss_scaled = weight_loss_sum * self._hparams.cue_nn_l2_regularizer\n all_losses.append(weight_loss_scaled)\n\n all_losses_op = tf.add_n(all_losses, name='total_pr_loss')\n self._build_optimizer(all_losses_op, 'training_pr', scope='pr')\n else:\n self._build_optimizer(loss, 'training_pr', scope='pr')\n\n # Swap 'y' during replay\n # y = tf.cond(tf.equal(replay, True), lambda: replay_input, lambda: y)\n\n self._dual.set_op('pr_probs', y) # badly named for historical reasons\n\n if (last_layer == 'sigmoid_ce') or (last_layer == 'sigmoid_mse') or (last_layer == 'lrelu_mse'): # New modes\n logging.info('PR New last layer pathway enabled.')\n\n # Clip\n y = tf.clip_by_value(y, 0.0, 1.0)\n\n # Sparsen\n if self._hparams.cue_nn_sparsen is True:\n k_pr = int(sparsity * pr_sparsity_boost)\n logging.info('PR Sparsen enabled k=' + str(k_pr))\n mask = tf_utils.tf_build_top_k_mask_op(input_tensor=y,\n k=k_pr,\n batch_size=self._hparams.batch_size,\n input_area=self._input_size)\n y = y * mask\n\n # Sum norm (all input is positive)\n # We expect a lot of zeros, or near zeros, and a few larger values.\n # 0, 0.1, 0.1, ... 0.8, 0.9, 0.91\n # 5 / 5 * 20 = \n # 1/5 = 0.2\n # 5 * 0.2 * 20 = 20\n # This sum norm is like a softmax on the classification - the mass must be evenly distributed\n if self._hparams.cue_nn_sum_norm > 0.0:\n logging.info('PR Sum-norm enabled')\n y_sum = tf.reduce_sum(y, axis=1, keepdims=True)\n reciprocal = 1.0 / y_sum + 0.0000000000001\n y = y * reciprocal * self._hparams.cue_nn_sum_norm\n\n # Softmax norm DEFAULT: false\n if self._hparams.cue_nn_softmax is True:\n logging.info('PR Softmax enabled')\n y = tf.nn.softmax(y)\n #y = y * 50.0 # Softmax makes the output *very* small. This hurts Hopfield reconstruction.\n\n # After norm DEFAULT: 1.0 (no gain)\n if self._hparams.cue_nn_gain != 1.0:\n logging.info('PR Gain enabled')\n y = y * self._hparams.cue_nn_gain\n\n # Range shift from unit to signed unit\n pr_out = y # Unit range\n\n # This would be a better point to tee.\n pr_out_direct = y\n\n z_cue_in = unit_to_pc_linear(y) # Theoretical range limits -1 : 1\n target_sparsity = self._hparams.cue_nn_label_sparsity\n shift = get_pc_topk_shift(y, target_sparsity)\n\n # shift until k bits are > 0, i.e.\n # min *masked* value should become equal to zero.\n z_cue_shift = z_cue_in + shift\n\n # scaling\n # say scale was y_sum. This is a unit value, so it's always pos.\n # (it sounds like we should have norms on exit of VC etc.)\n # sum = 0.25 0.5\n # -0.5 / 0.25=-2\n # 0.5 / 0.25=2\n # y_sum = tf.reduce_sum(y, axis=1) # magnitude of output, per batch sample\n # TODO now apply magnitude (y_sum) to the z_cue. just * it?\n\n else:\n # Old conditioning for PC/Hopfield\n # filter and binarise to get crisp output\n pr_out_mask = tf_utils.tf_build_top_k_mask_op(input_tensor=y,\n k=int(sparsity * pr_sparsity_boost),\n batch_size=self._hparams.batch_size,\n input_area=self._input_size)\n\n # pr_out = tf.to_float(tf.greater(y, 0.02)) * y # anything less than 0.2 is 0\n\n pr_out = pr_out_mask * y # use top k mask on y\n z_cue_in = unit_to_pc_sparse(pr_out) # convert all 0's to -1 for Hopfield\n z_cue_shift = z_cue_in\n\n self._dual.set_op('pr_out_direct', pr_out_direct) # raw output of cue_nn\n self._dual.set_op('pr_out', pr_out) # output of cue_nn after conditioning for PC in retrieval mode\n self._dual.set_op('z_cue_in', z_cue_in) # modified to pc range and type (binary or real)\n self._dual.set_op('z_cue', z_cue_shift) # modified to pc range and type (binary or real)\n self._dual.set_op('t_nn', t_nn) # Target for NN\n self._dual.set_op('x_nn', x_nn) # Target for NN\n\n # store losses for visualisation\n # ------------------------------------\n # cross entropy loss that is optimized\n loss_pr_memorise = tf.reduce_sum(loss)\n self._dual.set_op('pr_loss', loss_pr_memorise) # reduced loss\n\n # number of bits that are mismatched per sample: binary signals, so sum of diff is equal to count of mismatch\n loss_mismatch = tf.reduce_sum(tf.abs(t_nn - pr_out))/self._hparams.batch_size\n self._dual.set_op('pr_loss_mismatch', loss_mismatch)\n\n def _build_pr_pinv_memorise(self):\n \"\"\"\n Calculate weights that 'connect' a secondary input to the stored memories\n Use the same approach as pinv.\n (This is used for EC --> CA3 in AMTL project)\n\n dg = primary (Dentate Gyrus)\n ec = secondary (Entorhinal cortex)\n wp = weights for mapping x_secondary -> hopfield state (perforant)\n \"\"\"\n\n x_shape = self._input_values.get_shape().as_list()\n x_size = np.prod(x_shape[1:])\n\n x = self.get_cue_target()\n\n self._dual.set_op('pr_target', x) # in this PR, there is no change between x_ext and pr_target\n\n x_cue_shape = self._input_cue.get_shape().as_list()\n x_cue_size = np.prod(x_cue_shape[1:])\n x_cue = self._dual.get_op('x_cue') # EC - secondary, input\n\n batches = x_shape[0]\n x_matrix = tf.reshape(x, [1, batches, x_size]) # 1 matrix of x vol vecs (expressed as 1 batch)\n x_cue_matrix = tf.reshape(x_cue, [1, batches, x_cue_size]) # 1 matrix of x vol vecs (expressed as 1 batch)\n x_cue_inv = tfp.math.pinv(x_cue_matrix, rcond=None, validate_args=False, name=None) # this is XT-1 (transposed already)\n w_batches = tf.matmul(x_cue_inv, x_matrix)\n w_p_val = tf.reshape(w_batches, [x_cue_size, x_size]) # strip out the batch dimension\n\n adjust = 'none'\n # if adjust == 'outgoing':\n # w_p_exps = tf.exp(w_p_val)\n # row_sums = tf.reduce_sum(w_p_exps, axis=0, keepdims=True)\n # w_p_val = tf.divide(w_p_exps, row_sums)\n # elif adjust == 'incoming':\n # w_p_exps = tf.exp(w_p_val)\n # col_sums = tf.reduce_sum(w_p_exps, axis=1, keepdims=True)\n # w_p_val = tf.divide(w_p_exps, col_sums)\n\n if adjust == 'outgoing':\n # last dimension is columns, or outgoing weights for a given vc neuron\n k_pos = int(self._hparams.cue_nn_label_sparsity * 2)\n k_neg = int(self._hparams.cue_nn_label_sparsity * 10)\n\n values, indices = tf.nn.top_k(w_p_val, k=k_pos) # mask for top k\n min_topk = tf.reduce_min(values, axis=1) # minimum of top k for each column\n\n values, indices = tf.nn.top_k(-w_p_val, k=k_neg) # mask for bottom k (topk of negative)\n min_botk = tf.reduce_max(-values, axis=1) # minimum of top k for each column\n\n w_p_val = tf.transpose(w_p_val) # transpose so we can broadcast 'greater' across cols for each row (vc neuron)\n w_mask_topk = tf.to_float(tf.greater_equal(w_p_val, min_topk)) # if it is in the topk, mask it on\n # w_mask_botk = tf.to_float(tf.less_equal(w_p_val, min_botk)) # if it is in the bottk, mask it on\n\n # # combine masks and apply\n # w_mask = w_mask_topk + w_mask_botk\n\n # mask for all neg values\n w_mask_neg = tf.to_float(tf.less(w_p_val, 0))\n w_mask = w_mask_topk + w_mask_neg\n\n w_p_val = tf.multiply(w_mask, w_p_val)\n w_p_val = tf.transpose(w_p_val) # transpose it back\n\n w_p_ref = self._dual.get_op('w_p')\n w_p = tf.assign(w_p_ref, w_p_val, name='w_p_assign')\n self._dual.set_op('w_p_assign', w_p)\n\n z_cue_memorise = tf.matmul(x_cue, w_p)\n pr_loss = tf.reduce_sum(tf.square(x - z_cue_memorise))\n\n self._dual.set_op('z_cue_memorise', z_cue_memorise)\n self._dual.set_op('pr_loss', pr_loss)\n\n def _build_optimizer(self, loss_op, training_op_name, scope=None, learning_rate=None):\n \"\"\"Minimise loss using initialised a tf.train.Optimizer.\"\"\"\n\n logging.info(\"-----------> Adding optimiser for op %s\", loss_op)\n\n if scope is not None:\n scope = 'optimizer/' + str(scope)\n else:\n scope = 'optimizer'\n\n with tf.variable_scope(scope):\n optimizer = self._setup_optimizer(learning_rate)\n training = optimizer.minimize(loss_op, global_step=tf.train.get_or_create_global_step())\n\n self._dual.set_op(training_op_name, training)\n\n def _setup_optimizer(self, learning_rate=None):\n \"\"\"Initialise the Optimizer class specified by a hyperparameter.\"\"\"\n optimizer_learning_rate = self._hparams.learning_rate # Default\n if learning_rate is not None:\n optimizer_learning_rate = learning_rate\n\n if self._hparams.optimizer == 'adam':\n logging.debug('Adam Opt., Hopfield learning rate: ' + str(optimizer_learning_rate))\n optimizer = tf.train.AdamOptimizer(optimizer_learning_rate)\n elif self._hparams.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(optimizer_learning_rate, self._hparams.momentum,\n use_nesterov=self._hparams.momentum_nesterov)\n elif self._hparams.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(optimizer_learning_rate)\n else:\n raise NotImplementedError('Optimizer not implemented: ' + str(self._hparams.optimizer))\n\n return optimizer\n\n # ---------------- helpers\n\n @staticmethod\n def _remove_diagonal(tensor):\n mask = np.ones(tensor.get_shape(), dtype=np.float32)\n np.fill_diagonal(mask, 0)\n diagonal_mask = tf.convert_to_tensor(mask)\n weights_updated = tf.multiply(tensor, diagonal_mask) # must be element-wise\n return weights_updated\n\n @staticmethod\n def _enforce_symmetry(tensor):\n weights_updated = tf.matrix_band_part(tensor, 0, -1)\n weights_updated = 0.5 * (weights_updated + tf.transpose(weights_updated))\n return weights_updated\n\n def _is_pinv(self):\n return self._hparams.memorise_method == 'pinv'\n\n def _is_pinv_hybrid(self):\n return self._hparams.memorise_method == 'pinv_hybrid'\n\n def use_nn_in_pr_path(self):\n return self._hparams.pr_type == 'nn'\n\n # ---------------- training\n\n def update_training_dict(self, feed_dict):\n names = []\n if self.use_inhibition:\n names.extend(['inhibition'])\n self._dual.update_feed_dict(feed_dict, names)\n\n feed_dict.update({\n self._batch_type: 'training'\n })\n\n\n def add_training_fetches(self, fetches):\n\n names = ['loss_memorise', 'y', 'z_cue', 'pr_out', 'pr_out_direct', 'x_direct', 'x_ext']\n\n if self._is_pinv():\n names.extend(['y_memorise', 'w']) # need y_memorise to ensure w is assigned\n\n if self._use_input_cue:\n names.extend(['pr_loss'])\n if self.use_nn_in_pr_path():\n names.extend(['training_pr', 'pr_loss_mismatch']) # this mismatch loss is more interesting\n else:\n names.extend(['w_p_assign'])\n\n if self._use_pm:\n names.extend(['training_pm', 'ec_out'])\n\n if self._use_pm_raw:\n names.extend(['training_pm_raw', 'ec_out_raw'])\n\n if self.use_inhibition:\n names.extend(['inhibition'])\n\n # this needs to be done once, because it replaces the fetches, instead of adding to them\n self._dual.add_fetches(fetches, names)\n\n def set_training_fetches(self, fetched):\n\n names = ['loss_memorise', 'y', 'z_cue', 'pr_out', 'pr_out_direct', 'x_direct', 'x_ext']\n\n if self._is_pinv():\n names.extend(['w'])\n\n if self._use_input_cue:\n names.extend(['pr_loss']) # optional\n if self.use_nn_in_pr_path():\n names.extend(['pr_loss_mismatch']) # this mismatch loss is more interesting\n\n if self._use_pm:\n names.extend(['ec_out'])\n\n if self._use_pm_raw:\n names.extend(['ec_out_raw'])\n\n if self.use_inhibition:\n names.extend(['inhibition'])\n\n self._dual.set_fetches(fetched, names)\n\n# ---------------- inference (encoding)\n\n def update_encoding_dict(self, feed_dict):\n names = []\n if self.use_inhibition:\n names.extend(['inhibition'])\n self._dual.update_feed_dict(feed_dict, names)\n\n self._update_dict_fb(feed_dict)\n\n feed_dict.update({\n self._batch_type: 'encoding'\n })\n\n def add_encoding_fetches(self, fetches):\n\n names = ['decoding', 'y', 'z_cue', 'pr_out', 'pr_out_direct', 'x_direct', 'x_ext']\n\n if self._is_pinv():\n names.extend(['w'])\n\n if self._use_input_cue:\n if self.use_nn_in_pr_path():\n names.extend(['pr_loss_mismatch']) # this mismatch loss is more interesting\n names.extend(['pr_loss'])\n\n if self._use_pm:\n names.extend(['pm_loss', 'ec_out'])\n\n if self._use_pm_raw:\n names.extend(['pm_loss_raw', 'ec_out_raw'])\n\n if self.use_inhibition:\n names.extend(['inhibition'])\n\n self._dual.add_fetches(fetches, names)\n\n def set_encoding_fetches(self, fetched):\n names = ['decoding', 'y', 'z_cue', 'pr_out', 'pr_out_direct', 'x_direct', 'x_ext']\n\n if self._is_pinv():\n names.extend(['w'])\n\n if self._use_input_cue:\n if self.use_nn_in_pr_path():\n names.extend(['pr_loss_mismatch']) # this mismatch loss is more interesting\n names.extend(['pr_loss'])\n\n if self._use_pm:\n names.extend(['pm_loss', 'ec_out'])\n\n if self._use_pm_raw:\n names.extend(['pm_loss_raw', 'ec_out_raw'])\n\n if self.use_inhibition:\n names.extend(['inhibition'])\n\n self._dual.set_fetches(fetched, names)\n\n# -------------- build summaries\n\n def _build_summaries(self, batch_type='training'):\n \"\"\"Assumes appropriate name scope has been set.\"\"\"\n summaries = []\n\n if self._hparams.summarize_level == SummarizeLevels.OFF.value:\n return summaries\n\n if batch_type == 'training':\n summaries = self._build_summaries_memorise(summaries, verbose=False)\n if batch_type == 'encoding':\n summaries = self._build_summaries_retrieve(summaries, verbose=False)\n return summaries\n\n def _build_recursive_summaries(self):\n \"\"\"\n Assumes appropriate name scope has been set. Same level as _build_summaries.\n\n Build same summaries as retrieval.\n \"\"\"\n summaries = []\n\n if self._hparams.summarize_level == SummarizeLevels.OFF.value:\n return summaries\n\n summaries = self._build_summaries_retrieve(summaries, verbose=False)\n return summaries\n\n def _build_summarise_pm(self, summaries, max_outputs):\n\n if not (self.use_pm_raw or self._use_pm):\n return\n\n with tf.name_scope('pm'):\n\n # original vc for visuals\n if self.use_pm_raw:\n ec_in = self._input_cue_raw\n ec_out = self._dual.get_op('ec_out_raw')\n ec_recon = image_utils.concat_images([ec_in, ec_out], self._hparams.batch_size)\n summaries.append(tf.summary.image('ec_recon_raw', ec_recon, max_outputs=max_outputs))\n\n pm_loss_raw = self._dual.get_op('pm_loss_raw')\n summaries.append(tf.summary.scalar('pm_loss_raw', pm_loss_raw))\n\n # actual input received to PC from EC (vc out)\n if self.use_pm:\n ec_in = self._input_cue\n ec_out = self._dual.get_op('ec_out')\n ec_recon = image_utils.concat_images([ec_in, ec_out], self._hparams.batch_size)\n summaries.append(tf.summary.image('ec_recon', ec_recon, max_outputs=max_outputs))\n\n\n\n # visualise losses\n pm_loss = self._dual.get_op('pm_loss')\n summaries.append(tf.summary.scalar('pm_loss', pm_loss))\n\n def _build_summarise_cue_learning(self, summaries, summary_input_shape, batch_type, max_outputs):\n\n # Terminology:\n # pr = the path to create a cue from VC\n # xcue = the input to the pr path\n # cue = the output of the pr path, used to retrieve a memory from the Hopfield\n\n if not self._use_input_cue:\n return\n\n debug_scalar_stats = False\n\n with tf.name_scope('pr'):\n\n # nn method\n if self.use_nn_in_pr_path():\n\n pr_out = self._dual.get_op('pr_out') # unit\n pr_probs = self._dual.get_op('pr_probs') # unit\n pr_target = self._dual.get_op('pr_target') # unit\n\n # 1) *Mod to input to NN*: show concat of: [x_nn / modified x_nn = x_pr_memorise]\n # modification: may be dropout or noise affected, depending on training/encoding and hparams\n # -------------------------------------------------------------------------------------------\n x_cue = self._dual.get_op('x_cue')\n x_cue_shape = x_cue.get_shape().as_list()\n x_cue_summary_shape, _ = square_image_shape_from_1d(x_cue_shape[1])\n x_cue = tf.reshape(x_cue, x_cue_summary_shape) # output\n x_pr_memorise = self._dual.get_op('x_pr_memorise')\n x_pr_reshape = tf.reshape(x_pr_memorise, x_cue_summary_shape) # output\n diff = tf.abs(tf.subtract(x_cue, x_pr_reshape))\n x_cue_mod = image_utils.concat_images([x_cue, x_pr_reshape, diff], self._hparams.batch_size)\n summaries.append(tf.summary.image('xcue_mod', x_cue_mod, max_outputs=max_outputs))\n\n # 2) *Mod to output of NN* show concat of: [softmax probs / filtered output]\n # -------------------------------------------------------------------------------------------\n prob_out = image_utils.concat_images([pr_probs, pr_out],\n self._hparams.batch_size,\n summary_input_shape)\n\n summaries.append(tf.summary.image('prob_out', prob_out, max_outputs=max_outputs))\n\n # 3) *Results of NN*: show concat of: [labels / filtered output / diff]\n # -------------------------------------------------------------------------------------------\n diff = tf.abs(tf.subtract(pr_target, pr_out))\n concat_image = image_utils.concat_images([pr_target, pr_out, diff], self._hparams.batch_size,\n summary_input_shape)\n summaries.append(tf.summary.image('nn_label_out', concat_image, max_outputs=max_outputs))\n\n # 4) *Results of pr including space conversions*: show concat of: [raw labels / final output / diff]\n # -------------------------------------------------------------------------------------------\n target = self._dual.get_op('x_ext')\n out = self._dual.get_op('z_cue')\n diff = tf.abs(tf.subtract(target, out))\n concat_image = image_utils.concat_images([target, out, diff], self._hparams.batch_size, summary_input_shape)\n summaries.append(tf.summary.image('pr_tgt_cue', concat_image, max_outputs=max_outputs))\n\n # 5) *Results of Hopfield given the cue: [labels / internal cue / pc output]\n # -------------------------------------------------------------------------------------------\n y = self._dual.get_op('y')\n concat_image = image_utils.concat_images([pr_target, out, y], self._hparams.batch_size, summary_input_shape)\n summaries.append(tf.summary.image('label_zcue_y', concat_image, max_outputs=max_outputs))\n\n # pinv method\n else:\n z_cue = self._dual.get_op('z_cue') # onehot space\n pr_target = self._dual.get_op('pr_target') # onehot\n\n show_pr_memorise_with_target = True\n if not show_pr_memorise_with_target:\n summaries.append(tf.summary.image('z_cue', z_cue, max_outputs=max_outputs))\n else:\n label_cue = image_utils.concat_images([pr_target, z_cue], self._hparams.batch_size, summary_input_shape)\n summaries.append(tf.summary.image('label_cue', label_cue, max_outputs=max_outputs))\n\n # nn method\n if self.use_nn_in_pr_path():\n # visualise losses\n summaries.append(tf.summary.scalar('loss', self._dual.get_op('pr_loss')))\n\n # count of mismatched bits\n summaries.append(tf.summary.scalar('loss_mismatch', self._dual.get_op('pr_loss_mismatch')))\n\n summaries.append(tf.summary.histogram('pr_out', self._dual.get_op('pr_out')))\n\n # pinv method\n else:\n z_cue = self._dual.get_op('z_cue')\n w_p = self._dual.get_op('w_p')\n\n add_square_as_square(summaries, w_p, 'w_p')\n\n summaries.append(tf.summary.histogram('w_p', w_p))\n\n if batch_type == 'training':\n summaries.append(tf.summary.scalar('loss', self._dual.get_op('pr_loss')))\n\n if z_cue is not None:\n z_cue_reshape = tf.reshape(z_cue, summary_input_shape)\n summaries.append(tf.summary.image('z_cue', z_cue_reshape, max_outputs=max_outputs))\n\n if debug_scalar_stats:\n summaries.append(tf_utils.tf_build_stats_summaries(self._dual.get_op('x_ext'), 'x_ext'))\n summaries.append(tf_utils.tf_build_stats_summaries(self._dual.get_op('x_cue'), 'x_cue'))\n if self._use_input_cue:\n summaries.append(tf_utils.tf_build_stats_summaries(self._dual.get_op('pr_target'), 'pr_target'))\n summaries.append(tf_utils.tf_build_stats_summaries(self._dual.get_op('pr_out'), 'pr_out'))\n summaries.append(tf_utils.tf_build_stats_summaries(self._dual.get_op('z_cue'), 'z_cue'))\n\n def _build_summaries_retrieve(self, summaries, verbose=False):\n \"\"\"Build summaries for retrieval.\"\"\"\n\n # summarise_stuff = ['pm', 'pr', 'general']\n # summarise_stuff = ['pm']\n summarise_stuff = ['general']\n\n max_outputs = self._hparams.max_outputs\n summary_input_shape = image_utils.get_image_summary_shape(self._input_summary_shape)\n\n if 'general' in summarise_stuff:\n\n x_direct = self._dual.get_op('x_direct')\n y = self._dual.get_op('y')\n w = self._dual.get_op('w')\n\n with tf.name_scope('vars'):\n if verbose:\n add_square_as_square(summaries, w, 'w')\n\n w_p = self._dual.get_op('w_p')\n if w_p is not None:\n add_square_as_square(summaries, w_p, 'w_p')\n\n if verbose:\n # Inspect data ranges\n ##########################\n x_nn = self._dual.get_op('x_nn')\n t_nn = self._dual.get_op('t_nn')\n p_nn = self._dual.get_op('z_cue_in') # modified to pc range and type (binary or real)\n l_nn = self._dual.get_op('pr_out')\n\n summaries.append(tf.summary.histogram('PR_input', x_nn))\n\n summaries.append(tf.summary.histogram('PR_target', t_nn))\n summaries.append(tf.summary.histogram('PR_predict', p_nn))\n summaries.append(tf.summary.histogram('PR_output', l_nn))\n concat_image = image_utils.concat_images([t_nn, l_nn, y], self._hparams.batch_size, summary_input_shape)\n summaries.append(tf.summary.image('label_zcue_y', concat_image, max_outputs=max_outputs))\n\n if verbose:\n ops = ['x_ext', 'x_direct', 'z_cue', 'x_fb']\n add_op_images(self._dual, ops, summary_input_shape, max_outputs, summaries)\n\n x_reshape = tf.reshape(x_direct, summary_input_shape)\n y_reshape = tf.reshape(y, summary_input_shape)\n\n # output of the net\n summaries.append(tf.summary.image('y', y_reshape, max_outputs=max_outputs))\n\n show_as_recon_also = True\n if show_as_recon_also:\n diff = tf.abs(tf.subtract(x_reshape, y_reshape))\n x_y = tf.concat([tf.concat([x_reshape, y_reshape], axis=1), diff], axis=1)\n summaries.append(tf.summary.image('x_y_diff', x_y, max_outputs=max_outputs))\n\n if verbose:\n with tf.name_scope('distr'):\n\n def add_op_histograms(ops):\n for op_name in ops:\n op = self._dual.get_op(op_name)\n summaries.append(tf.summary.histogram(op_name, op))\n\n ops = ['w', 'x_direct', 'y']\n add_op_histograms(ops)\n\n with tf.name_scope('performance'):\n e = self._dual.get_op('e')\n summaries.append(tf.summary.scalar('Energy_total', tf.reduce_sum(e)))\n\n for idx in range(max_outputs):\n summaries.append(tf.summary.scalar('Energy_'+str(idx), tf.reduce_sum(e[idx])))\n\n if 'pr' in summarise_stuff:\n self._build_summarise_cue_learning(summaries, summary_input_shape, 'encoding', max_outputs)\n\n if 'pm' in summarise_stuff:\n self._build_summarise_pm(summaries, max_outputs)\n\n return summaries\n\n def _build_summaries_memorise(self, summaries, verbose=False):\n \"\"\"Build summaries for memorisation.\"\"\"\n\n # summarise_stuff = ['pm', 'pr', 'general']\n summarise_stuff = ['pm']\n max_outputs = self._hparams.max_outputs\n summary_input_shape = image_utils.get_image_summary_shape(self._input_summary_shape)\n\n if 'general' in summarise_stuff:\n w = self._dual.get_op('w')\n k = self._dual.get_op('k')\n b = self._dual.get_op('b')\n loss_memorise = self._dual.get_op('loss_memorise')\n x_ext = self._dual.get_op('x_ext')\n x_direct = self._dual.get_op('x_direct')\n y = self._dual.get_op('y')\n y_memorise = self._dual.get_op('y_memorise')\n\n ##########################\n # Inspect data ranges\n ##########################\n x_nn = self._dual.get_op('x_nn')\n t_nn = self._dual.get_op('t_nn')\n p_nn = self._dual.get_op('z_cue_in') # modified to pc range and type (binary or real)\n l_nn = self._dual.get_op('pr_out')\n summaries.append(tf.summary.histogram('PR_input', x_nn))\n summaries.append(tf.summary.histogram('PR_target', t_nn))\n summaries.append(tf.summary.histogram('PR_predict', p_nn))\n summaries.append(tf.summary.histogram('PR_output', l_nn))\n concat_image = image_utils.concat_images([t_nn, l_nn, y], self._hparams.batch_size, summary_input_shape)\n summaries.append(tf.summary.image('label_zcue_y', concat_image, max_outputs=max_outputs))\n ##########################\n # Inspect data ranges\n ##########################\n\n x_ext_reshaped = tf.reshape(x_ext, summary_input_shape)\n summaries.append(tf.summary.image('x_ext', x_ext_reshaped, max_outputs=max_outputs))\n\n x_reshape = tf.reshape(x_direct, summary_input_shape)\n summaries.append(tf.summary.image('x_direct', x_reshape, max_outputs=max_outputs))\n\n if y is not None and not self._is_pinv():\n # doesn't mean anything on pinv, where only one iteration of training, and input was indeterminate\n y_reshape = tf.reshape(y, summary_input_shape)\n summaries.append(tf.summary.image('y', y_reshape, max_outputs=max_outputs))\n\n if y_memorise is not None:\n y_reshape = tf.reshape(y_memorise, summary_input_shape)\n summaries.append(tf.summary.image('y_memorise', y_reshape, max_outputs=max_outputs))\n\n show_as_recon_also = True\n if show_as_recon_also:\n diff = tf.abs(tf.subtract(x_reshape, y_reshape))\n x_y = tf.concat([tf.concat([x_reshape, y_reshape], axis=1), diff], axis=1)\n summaries.append(tf.summary.image('x_y_diff', x_y, max_outputs=max_outputs))\n\n with tf.name_scope('vars'):\n # images\n add_square_as_square(summaries, w, 'w')\n\n # Monitor parameter sum over time\n with tf.name_scope('sum'):\n w_sum_summary = tf.summary.scalar('w', tf.reduce_sum(tf.abs(w)))\n summaries.extend([w_sum_summary])\n\n # histograms\n with tf.name_scope('hist'):\n summaries.append(tf.summary.histogram('w', w))\n\n with tf.name_scope('opt'):\n\n if loss_memorise is not None:\n summaries.append(tf.summary.scalar('loss_memorise', loss_memorise))\n\n if 'pr' in summarise_stuff:\n self._build_summarise_cue_learning(summaries, summary_input_shape, 'training', max_outputs)\n\n if 'pm' in summarise_stuff:\n self._build_summarise_pm(summaries, max_outputs)\n\n return summaries\n\n def variables_networks(self, outer_scope):\n vars_nets = []\n\n # Selectively include/exclude optimizer parameters\n optim_pr = True\n optim_pm = False\n optim_pm_raw = True\n\n if self._use_input_cue:\n vars_nets += self._variables_cue_nn(outer_scope)\n if optim_pr:\n vars_nets += self._variables_cue_nn_optimizer(outer_scope)\n\n if self._use_pm:\n vars_nets += self._variables_pm(outer_scope)\n if optim_pm:\n vars_nets += self._variables_pm_optimizer(outer_scope)\n\n if self.use_pm_raw:\n vars_nets += self._variables_pm_raw(outer_scope)\n if optim_pm_raw:\n vars_nets += self._variables_pm_raw_optimizer(outer_scope)\n\n return vars_nets\n\n @staticmethod\n def _variables_cue_nn(outer_scope):\n cue_nn_hidden = tf.get_collection(\n tf.GraphKeys.GLOBAL_VARIABLES,\n scope=outer_scope + \"/cue_nn_hidden\"\n )\n cue_nn_logits = tf.get_collection(\n tf.GraphKeys.GLOBAL_VARIABLES,\n scope=outer_scope + \"/cue_nn_logits\"\n )\n cue_nn = cue_nn_hidden + cue_nn_logits\n return cue_nn\n\n @staticmethod\n def _variables_pm(outer_scope):\n pm = tf.get_collection(\n tf.GraphKeys.GLOBAL_VARIABLES,\n scope=outer_scope + \"/pm\"\n )\n return pm\n\n @staticmethod\n def _variables_pm_raw(outer_scope):\n pm_raw = tf.get_collection(\n tf.GraphKeys.GLOBAL_VARIABLES,\n scope=outer_scope + \"/pm_raw\"\n )\n return pm_raw\n\n @staticmethod\n def _variables_cue_nn_optimizer(outer_scope):\n return tf.get_collection(\n tf.GraphKeys.GLOBAL_VARIABLES,\n scope=outer_scope + \"/optimizer/pr\")\n\n @staticmethod\n def _variables_pm_optimizer(outer_scope):\n return tf.get_collection(\n tf.GraphKeys.GLOBAL_VARIABLES,\n scope=outer_scope + \"/optimizer/pm\")\n\n @staticmethod\n def _variables_pm_raw_optimizer(outer_scope):\n return tf.get_collection(\n tf.GraphKeys.GLOBAL_VARIABLES,\n scope=outer_scope + \"/optimizer/pm_raw\")\n"
] | [
[
"tensorflow.read_file",
"tensorflow.gfile.Exists",
"tensorflow.image.resize_images",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.contrib.image.translate",
"tensorflow.reshape",
"numpy.random.shuffle",
"tensorflow.gfile.MakeDirs",
"tensorflow.image.convert_image_dtype",
"numpy.zeros",
"tensorflow.image.decode_jpeg"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.matrix_band_part",
"tensorflow.reduce_sum",
"tensorflow.equal",
"tensorflow.nn.l2_loss",
"numpy.fill_diagonal",
"tensorflow.train.AdamOptimizer",
"tensorflow.random_shuffle",
"tensorflow.add_n",
"tensorflow.summary.scalar",
"tensorflow.greater",
"tensorflow.get_collection",
"tensorflow.summary.image",
"tensorflow.placeholder_with_default",
"tensorflow.train.get_or_create_global_step",
"tensorflow.stop_gradient",
"tensorflow.subtract",
"tensorflow.nn.top_k",
"tensorflow.train.MomentumOptimizer",
"tensorflow.name_scope",
"tensorflow.square",
"tensorflow.nn.dropout",
"tensorflow.matmul",
"tensorflow.nn.sigmoid",
"tensorflow.less",
"tensorflow.layers.Dense",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.contrib.training.HParams",
"tensorflow.summary.merge",
"tensorflow.summary.histogram",
"tensorflow.clip_by_value",
"tensorflow.nn.relu",
"tensorflow.reduce_max",
"tensorflow.multiply",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.losses.mean_squared_error",
"tensorflow.nn.leaky_relu",
"tensorflow.reshape",
"tensorflow.assign",
"tensorflow.expand_dims",
"tensorflow.losses.sigmoid_cross_entropy",
"numpy.ones",
"tensorflow.reduce_min",
"numpy.prod",
"tensorflow.variable_scope",
"tensorflow.greater_equal",
"tensorflow.random_uniform",
"tensorflow.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Gavin-Hoang/mindspore | [
"f745ae0799a0840ebba18021c250f0089325a414",
"f745ae0799a0840ebba18021c250f0089325a414"
] | [
"tests/ut/python/dataset/test_concatenate_op.py",
"tests/st/ops/gpu/test_resize_nearest_neighbor_op.py"
] | [
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nTesting concatenate op\n\"\"\"\n\nimport numpy as np\nimport pytest\n\nimport mindspore.dataset as ds\nimport mindspore.dataset.transforms.c_transforms as data_trans\n\n\ndef test_concatenate_op_all():\n def gen():\n yield (np.array([5., 6., 7., 8.], dtype=np.float),)\n\n prepend_tensor = np.array([1.4, 2., 3., 4., 4.5], dtype=np.float)\n append_tensor = np.array([9., 10.3, 11., 12.], dtype=np.float)\n data = ds.GeneratorDataset(gen, column_names=[\"col\"])\n concatenate_op = data_trans.Concatenate(0, prepend_tensor, append_tensor)\n data = data.map(input_columns=[\"col\"], operations=concatenate_op)\n expected = np.array([1.4, 2., 3., 4., 4.5, 5., 6., 7., 8., 9., 10.3,\n 11., 12.])\n for data_row in data:\n np.testing.assert_array_equal(data_row[0], expected)\n\n\ndef test_concatenate_op_none():\n def gen():\n yield (np.array([5., 6., 7., 8.], dtype=np.float),)\n\n data = ds.GeneratorDataset(gen, column_names=[\"col\"])\n concatenate_op = data_trans.Concatenate()\n\n data = data.map(input_columns=[\"col\"], operations=concatenate_op)\n for data_row in data:\n np.testing.assert_array_equal(data_row[0], np.array([5., 6., 7., 8.], dtype=np.float))\n\n\ndef test_concatenate_op_string():\n def gen():\n yield (np.array([\"ss\", \"ad\"], dtype='S'),)\n\n prepend_tensor = np.array([\"dw\", \"df\"], dtype='S')\n append_tensor = np.array([\"dwsdf\", \"df\"], dtype='S')\n data = ds.GeneratorDataset(gen, column_names=[\"col\"])\n concatenate_op = data_trans.Concatenate(0, prepend_tensor, append_tensor)\n\n data = data.map(input_columns=[\"col\"], operations=concatenate_op)\n expected = np.array([\"dw\", \"df\", \"ss\", \"ad\", \"dwsdf\", \"df\"], dtype='S')\n for data_row in data:\n np.testing.assert_array_equal(data_row[0], expected)\n\n\ndef test_concatenate_op_multi_input_string():\n prepend_tensor = np.array([\"dw\", \"df\"], dtype='S')\n append_tensor = np.array([\"dwsdf\", \"df\"], dtype='S')\n\n data = ([[\"1\", \"2\", \"d\"]], [[\"3\", \"4\", \"e\"]])\n data = ds.NumpySlicesDataset(data, column_names=[\"col1\", \"col2\"])\n\n concatenate_op = data_trans.Concatenate(0, prepend=prepend_tensor, append=append_tensor)\n\n data = data.map(input_columns=[\"col1\", \"col2\"], columns_order=[\"out1\"], output_columns=[\"out1\"],\n operations=concatenate_op)\n expected = np.array([\"dw\", \"df\", \"1\", \"2\", \"d\", \"3\", \"4\", \"e\", \"dwsdf\", \"df\"], dtype='S')\n for data_row in data:\n np.testing.assert_array_equal(data_row[0], expected)\n\n\ndef test_concatenate_op_multi_input_numeric():\n prepend_tensor = np.array([3, 5])\n\n data = ([[1, 2]], [[3, 4]])\n data = ds.NumpySlicesDataset(data, column_names=[\"col1\", \"col2\"])\n\n concatenate_op = data_trans.Concatenate(0, prepend=prepend_tensor)\n\n data = data.map(input_columns=[\"col1\", \"col2\"], columns_order=[\"out1\"], output_columns=[\"out1\"],\n operations=concatenate_op)\n expected = np.array([3, 5, 1, 2, 3, 4])\n for data_row in data:\n np.testing.assert_array_equal(data_row[0], expected)\n\n\ndef test_concatenate_op_type_mismatch():\n def gen():\n yield (np.array([3, 4], dtype=np.float),)\n\n prepend_tensor = np.array([\"ss\", \"ad\"], dtype='S')\n data = ds.GeneratorDataset(gen, column_names=[\"col\"])\n concatenate_op = data_trans.Concatenate(0, prepend_tensor)\n\n data = data.map(input_columns=[\"col\"], operations=concatenate_op)\n with pytest.raises(RuntimeError) as error_info:\n for _ in data:\n pass\n assert \"Tensor types do not match\" in str(error_info.value)\n\n\ndef test_concatenate_op_type_mismatch2():\n def gen():\n yield (np.array([\"ss\", \"ad\"], dtype='S'),)\n\n prepend_tensor = np.array([3, 5], dtype=np.float)\n data = ds.GeneratorDataset(gen, column_names=[\"col\"])\n concatenate_op = data_trans.Concatenate(0, prepend_tensor)\n\n data = data.map(input_columns=[\"col\"], operations=concatenate_op)\n with pytest.raises(RuntimeError) as error_info:\n for _ in data:\n pass\n assert \"Tensor types do not match\" in str(error_info.value)\n\n\ndef test_concatenate_op_incorrect_dim():\n def gen():\n yield (np.array([[\"ss\", \"ad\"], [\"ss\", \"ad\"]], dtype='S'),)\n\n prepend_tensor = np.array([3, 5], dtype=np.float)\n concatenate_op = data_trans.Concatenate(0, prepend_tensor)\n data = ds.GeneratorDataset(gen, column_names=[\"col\"])\n\n data = data.map(input_columns=[\"col\"], operations=concatenate_op)\n with pytest.raises(RuntimeError) as error_info:\n for _ in data:\n pass\n assert \"Only 1D tensors supported\" in str(error_info.value)\n\n\ndef test_concatenate_op_wrong_axis():\n with pytest.raises(ValueError) as error_info:\n data_trans.Concatenate(2)\n assert \"only 1D concatenation supported.\" in str(error_info.value)\n\n\ndef test_concatenate_op_negative_axis():\n def gen():\n yield (np.array([5., 6., 7., 8.], dtype=np.float),)\n\n prepend_tensor = np.array([1.4, 2., 3., 4., 4.5], dtype=np.float)\n append_tensor = np.array([9., 10.3, 11., 12.], dtype=np.float)\n data = ds.GeneratorDataset(gen, column_names=[\"col\"])\n concatenate_op = data_trans.Concatenate(-1, prepend_tensor, append_tensor)\n data = data.map(input_columns=[\"col\"], operations=concatenate_op)\n expected = np.array([1.4, 2., 3., 4., 4.5, 5., 6., 7., 8., 9., 10.3,\n 11., 12.])\n for data_row in data:\n np.testing.assert_array_equal(data_row[0], expected)\n\n\ndef test_concatenate_op_incorrect_input_dim():\n prepend_tensor = np.array([[\"ss\", \"ad\"], [\"ss\", \"ad\"]], dtype='S')\n\n with pytest.raises(ValueError) as error_info:\n data_trans.Concatenate(0, prepend_tensor)\n assert \"can only prepend 1D arrays.\" in str(error_info.value)\n\n\nif __name__ == \"__main__\":\n test_concatenate_op_all()\n test_concatenate_op_none()\n test_concatenate_op_string()\n test_concatenate_op_multi_input_string()\n test_concatenate_op_multi_input_numeric()\n test_concatenate_op_type_mismatch()\n test_concatenate_op_type_mismatch2()\n test_concatenate_op_incorrect_dim()\n test_concatenate_op_negative_axis()\n test_concatenate_op_wrong_axis()\n test_concatenate_op_incorrect_input_dim()\n",
"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops import operations as P\n\n\nclass ResizeNearestNeighborAlignCornerT(nn.Cell):\n def __init__(self, size):\n super(ResizeNearestNeighborAlignCornerT, self).__init__()\n self.ResizeNearestNeighborAlignCornerT = P.ResizeNearestNeighbor(size, align_corners=True)\n\n def construct(self, x):\n return self.ResizeNearestNeighborAlignCornerT(x)\n\nclass ResizeNearestNeighborAlignCornerF(nn.Cell):\n def __init__(self, size):\n super(ResizeNearestNeighborAlignCornerF, self).__init__()\n self.ResizeNearestNeighborAlignCornerF = P.ResizeNearestNeighbor(size, align_corners=False)\n\n def construct(self, x):\n return self.ResizeNearestNeighborAlignCornerF(x)\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_ResizeNearestNeighborAlignCornerT():\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\")\n input_tensor = Tensor(np.array([[[[1, 0], [0, 1]]]]).astype(np.float32))\n expect = np.array([[[[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]]]).astype(np.float32)\n rnn = ResizeNearestNeighborAlignCornerT((4, 4))\n output = rnn(input_tensor)\n assert np.all(output.asnumpy() == expect)\n input_tensor = Tensor(np.array([[[[1, 0], [0, 1]]]]).astype(np.float16))\n expect = np.array([[[[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]]]).astype(np.float16)\n rnn = ResizeNearestNeighborAlignCornerT((4, 4))\n output = rnn(input_tensor)\n assert np.all(output.asnumpy() == expect)\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_ResizeNearestNeighborAlignCornerF():\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\")\n input_tensor = Tensor(np.array([[[[1, 0], [0, 1]]]]).astype(np.float32))\n expect = np.array([[[[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]]]).astype(np.float32)\n rnn = ResizeNearestNeighborAlignCornerF((4, 4))\n output = rnn(input_tensor)\n assert np.all(output.asnumpy() == expect)\n input_tensor = Tensor(np.array([[[[1, 0], [0, 1]]]]).astype(np.float16))\n expect = np.array([[[[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]]]).astype(np.float16)\n rnn = ResizeNearestNeighborAlignCornerF((4, 4))\n output = rnn(input_tensor)\n assert np.all(output.asnumpy() == expect)\n"
] | [
[
"numpy.testing.assert_array_equal",
"numpy.array"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rdutra/multi-class-text-classification-cnn | [
"bdb4403166e8b350fbc2b1073276755e46af9603"
] | [
"train.py"
] | [
"import os\nimport sys\nimport json\nimport time\nimport logging\nimport data_helper\nimport numpy as np\nimport tensorflow as tf\nfrom text_cnn import TextCNN\nfrom tensorflow.contrib import learn\nfrom sklearn.model_selection import train_test_split\n\nlogging.getLogger().setLevel(logging.INFO)\n\ndef train_cnn():\n\t\"\"\"Step 0: load sentences, labels, and training parameters\"\"\"\n\ttrain_file = sys.argv[1]\n\tx_raw, y_raw, df, labels = data_helper.load_data_and_labels(train_file)\n\n\tparameter_file = sys.argv[2]\n\tparams = json.loads(open(parameter_file).read())\n\n\t\"\"\"Step 1: pad each sentence to the same length and map each word to an id\"\"\"\n\tmax_document_length = max([len(x.split(' ')) for x in x_raw])\n\tlogging.info('The maximum length of all sentences: {}'.format(max_document_length))\n\tvocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)\n\tx = np.array(list(vocab_processor.fit_transform(x_raw)))\n\ty = np.array(y_raw)\n\n\t\"\"\"Step 2: split the original dataset into train and test sets\"\"\"\n\tx_, x_test, y_, y_test = train_test_split(x, y, test_size=0.1, random_state=42)\n\n\t\"\"\"Step 3: shuffle the train set and split the train set into train and dev sets\"\"\"\n\tshuffle_indices = np.random.permutation(np.arange(len(y_)))\n\tx_shuffled = x_[shuffle_indices]\n\ty_shuffled = y_[shuffle_indices]\n\tx_train, x_dev, y_train, y_dev = train_test_split(x_shuffled, y_shuffled, test_size=0.1)\n\n\t\"\"\"Step 4: save the labels into labels.json since predict.py needs it\"\"\"\n\twith open('./labels.json', 'w') as outfile:\n\t\tjson.dump(labels, outfile, indent=4)\n\n\tlogging.info('x_train: {}, x_dev: {}, x_test: {}'.format(len(x_train), len(x_dev), len(x_test)))\n\tlogging.info('y_train: {}, y_dev: {}, y_test: {}'.format(len(y_train), len(y_dev), len(y_test)))\n\n\t\"\"\"Step 5: build a graph and cnn object\"\"\"\n\tgraph = tf.Graph()\n\twith graph.as_default():\n\t\tsession_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)\n\t\tsess = tf.Session(config=session_conf)\n\t\twith sess.as_default():\n\t\t\tcnn = TextCNN(\n\t\t\t\tsequence_length=x_train.shape[1],\n\t\t\t\tnum_classes=y_train.shape[1],\n\t\t\t\tvocab_size=len(vocab_processor.vocabulary_),\n\t\t\t\tembedding_size=params['embedding_dim'],\n\t\t\t\tfilter_sizes=list(map(int, params['filter_sizes'].split(\",\"))),\n\t\t\t\tnum_filters=params['num_filters'],\n\t\t\t\tl2_reg_lambda=params['l2_reg_lambda'])\n\n\t\t\tglobal_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\t\t\toptimizer = tf.train.AdamOptimizer(1e-3)\n\t\t\tgrads_and_vars = optimizer.compute_gradients(cnn.loss)\n\t\t\ttrain_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)\n\n\t\t\ttimestamp = str(int(time.time()))\n\t\t\tout_dir = os.path.abspath(os.path.join(os.path.curdir, \"trained_model_\" + timestamp))\n\n\t\t\tcheckpoint_dir = os.path.abspath(os.path.join(out_dir, \"checkpoints\"))\n\t\t\tcheckpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n\t\t\tif not os.path.exists(checkpoint_dir):\n\t\t\t\tos.makedirs(checkpoint_dir)\n\t\t\tsaver = tf.train.Saver(tf.all_variables())\n\n\t\t\t# One training step: train the model with one batch\n\t\t\tdef train_step(x_batch, y_batch):\n\t\t\t\tfeed_dict = {\n\t\t\t\t\tcnn.input_x: x_batch,\n\t\t\t\t\tcnn.input_y: y_batch,\n\t\t\t\t\tcnn.dropout_keep_prob: params['dropout_keep_prob']}\n\t\t\t\t_, step, loss, acc = sess.run([train_op, global_step, cnn.loss, cnn.accuracy], feed_dict)\n\n\t\t\t# One evaluation step: evaluate the model with one batch\n\t\t\tdef dev_step(x_batch, y_batch):\n\t\t\t\tfeed_dict = {cnn.input_x: x_batch, cnn.input_y: y_batch, cnn.dropout_keep_prob: 1.0}\n\t\t\t\tstep, loss, acc, num_correct = sess.run([global_step, cnn.loss, cnn.accuracy, cnn.num_correct], feed_dict)\n\t\t\t\treturn num_correct\n\n\t\t\t# Save the word_to_id map since predict.py needs it\n\t\t\tvocab_processor.save(os.path.join(out_dir, \"vocab.pickle\"))\n\t\t\tsess.run(tf.initialize_all_variables())\n\n\t\t\t# Training starts here\n\t\t\ttrain_batches = data_helper.batch_iter(list(zip(x_train, y_train)), params['batch_size'], params['num_epochs'])\n\t\t\tbest_accuracy, best_at_step = 0, 0\n\n\t\t\t\"\"\"Step 6: train the cnn model with x_train and y_train (batch by batch)\"\"\"\n\t\t\tfor train_batch in train_batches:\n\t\t\t\tx_train_batch, y_train_batch = zip(*train_batch)\n\t\t\t\ttrain_step(x_train_batch, y_train_batch)\n\t\t\t\tcurrent_step = tf.train.global_step(sess, global_step)\n\n\t\t\t\t\"\"\"Step 6.1: evaluate the model with x_dev and y_dev (batch by batch)\"\"\"\n\t\t\t\tif current_step % params['evaluate_every'] == 0:\n\t\t\t\t\tdev_batches = data_helper.batch_iter(list(zip(x_dev, y_dev)), params['batch_size'], 1)\n\t\t\t\t\ttotal_dev_correct = 0\n\t\t\t\t\tfor dev_batch in dev_batches:\n\t\t\t\t\t\tx_dev_batch, y_dev_batch = zip(*dev_batch)\n\t\t\t\t\t\tnum_dev_correct = dev_step(x_dev_batch, y_dev_batch)\n\t\t\t\t\t\ttotal_dev_correct += num_dev_correct\n\n\t\t\t\t\tdev_accuracy = float(total_dev_correct) / len(y_dev)\n\t\t\t\t\tlogging.critical('Accuracy on dev set: {}'.format(dev_accuracy))\n\n\t\t\t\t\t\"\"\"Step 6.2: save the model if it is the best based on accuracy of the dev set\"\"\"\n\t\t\t\t\tif dev_accuracy >= best_accuracy:\n\t\t\t\t\t\tbest_accuracy, best_at_step = dev_accuracy, current_step\n\t\t\t\t\t\tpath = saver.save(sess, checkpoint_prefix, global_step=current_step)\n\t\t\t\t\t\tlogging.critical('Saved model {} at step {}'.format(path, best_at_step))\n\t\t\t\t\t\tlogging.critical('Best accuracy {} at step {}'.format(best_accuracy, best_at_step))\n\n\t\t\t\"\"\"Step 7: predict x_test (batch by batch)\"\"\"\n\t\t\ttest_batches = data_helper.batch_iter(list(zip(x_test, y_test)), params['batch_size'], 1)\n\t\t\ttotal_test_correct = 0\n\t\t\tfor test_batch in test_batches:\n\t\t\t\tx_test_batch, y_test_batch = zip(*test_batch)\n\t\t\t\tnum_test_correct = dev_step(x_test_batch, y_test_batch)\n\t\t\t\ttotal_test_correct += num_test_correct\n\n\t\t\ttest_accuracy = float(total_test_correct) / len(y_test)\n\t\t\tlogging.critical('Accuracy on test set is {} based on the best model {}'.format(test_accuracy, path))\n\t\t\tlogging.critical('The training is complete')\n\nif __name__ == '__main__':\n\t# python train.py ./data/repsolutions.csv.zip ./parameters.json\n\ttrain_cnn()\n"
] | [
[
"tensorflow.train.global_step",
"tensorflow.Graph",
"tensorflow.all_variables",
"tensorflow.Variable",
"sklearn.model_selection.train_test_split",
"tensorflow.ConfigProto",
"tensorflow.initialize_all_variables",
"tensorflow.Session",
"tensorflow.train.AdamOptimizer",
"numpy.array",
"tensorflow.contrib.learn.preprocessing.VocabularyProcessor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cnzeki/DeepLoader | [
"cec3f47692bc77fbdcb397ad7ec21c994328fc00",
"cec3f47692bc77fbdcb397ad7ec21c994328fc00"
] | [
"deeploader/eval/extractor.py",
"deeploader/util/alignment.py"
] | [
"# -*- coding:utf-8 -*-\r\nfrom __future__ import print_function\r\nimport math\r\nimport numpy as np\r\nimport logging\r\nimport time\r\nimport os\r\nimport argparse\r\nimport cv2\r\nimport sys\r\n\r\nfrom deeploader.eval.run_verify import *\r\n \r\n \r\ndef _extract_feature_each(extractor, img_list):\r\n feat_list = []\r\n n = len(img_list)\r\n idx = 1\r\n for img in img_list:\r\n feat = extractor.extract(img)\r\n feat_list.append(feat)\r\n if idx > 1:\r\n print('{}'.format('\\b'*10))\r\n print('{}/{}'.format(idx, n), end='')\r\n idx += 1\r\n return feat_list\r\n\r\n\r\ndef _extract_feature_batch(extractor, pair_list, size = 0):\r\n batch_size = extractor.batch_size\r\n feat_list = []\r\n npairs = len(pair_list)\r\n if size == 0:\r\n size = npairs\r\n size = min(size, npairs)\r\n nbatch = (size + batch_size - 1) // batch_size\r\n\r\n for batch in range(nbatch):\r\n # make a batch\r\n x_list = []\r\n for i in range(batch_size):\r\n pairid = (batch * batch_size + i)\r\n if pairid >= npairs:\r\n pairid = npairs - 1\r\n x_list.append(pair_list[pairid])\r\n #\r\n x_batch = np.stack(x_list, axis=0)\r\n feat = extractor.extract(x_batch)\r\n \r\n for i in range(batch_size):\r\n a = feat[i,:]\r\n if len(feat_list) < size:\r\n feat_list.append(a)\r\n \r\n return feat_list \r\n\r\n \r\ndef extract_list(extractor, img_list, size = 0):\r\n batch_size = extractor.batch_size\r\n if batch_size > 1:\r\n return _extract_feature_batch(extractor, img_list, size)\r\n return _extract_feature_each(extractor, img_list)\r\n \r\n \r\ndef crop_image_list(img_list, imsize):\r\n \"\"\"\r\n crop images\r\n \"\"\"\r\n out_list = []\r\n h, w, c = img_list[0].shape\r\n x1 = (w - imsize[0])/2\r\n y1 = (h - imsize[1])/2\r\n for pair in img_list:\r\n img1 = pair\r\n img1 = img1[y1:(y1+imsize[1]),x1:(x1+imsize[0]),:]\r\n out_list.append(img1)\r\n #print(img1.shape)\r\n return out_list\r\n \r\n\r\ndef norm_image_list(img_list):\r\n \"\"\"\r\n norm images\r\n \"\"\"\r\n out_list = []\r\n for pair in img_list:\r\n img1 = pair\r\n img1 = ( np.float32(img1) - 127.5 ) / 128\r\n out_list.append(img1)\r\n return out_list\r\n\r\ndef parse_args():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--type\", help=\"caffe | tensorflow | mxnet\")\r\n parser.add_argument(\"--test_set\", help=\"lfw | ytf\")\r\n parser.add_argument(\"--data\", help=\"lfw.np or pair.txt\")\r\n parser.add_argument(\"--prefix\", help=\"data prefix\")\r\n parser.add_argument(\"--model_path\", help= 'specify which model to test ')\r\n parser.add_argument('--image_size', default=\"112, 96\", help='image size height, width')\r\n parser.add_argument('--net_depth', default=50, help='resnet depth, default is 50')\r\n parser.add_argument('--batch_size', default=64, help='batch size to train network')\r\n parser.add_argument(\"--model_name\", help= 'specify which model to test \\n'\r\n ' centerface\\n'\r\n ' sphereface\\n'\r\n ' AMSoftmax\\n'\r\n ' arcface\\n'\r\n ' yours \\n')\r\n parser.add_argument(\"--dist_type\", default='cosine', help=\"distance measure ['cosine', 'L2', 'SSD']\")\r\n parser.add_argument(\"--do_mirror\", default=False, help=\"mirror image and concatinate features\")\r\n parser.add_argument(\"--do_norm\", default=True, help=\"norm image before feed to nets\")\r\n parser.add_argument(\"--embed_name\", help= 'specify output blob name')\r\n args = parser.parse_args()\r\n return args\r\n\r\ndef build_extractor(args):\r\n image_size = args.image_size.split(',')\r\n image_size = (int(image_size[1]), int(image_size[0])) \r\n model_name = args.model_name\r\n do_mirror = args.do_mirror\r\n if args.type == 'caffe':\r\n from plats.caffe.caffe_model_factory import model_factory\r\n extractor, image_size = model_factory(model_name, do_mirror)\r\n print('Testing model\\t: %s' % (extractor.weight))\r\n # do norm\r\n args.do_norm = True\r\n elif args.type == 'tensorflow':\r\n from plats.tensorflow.resnet50_extractor import get_extractor\r\n image_size = [image_size[1], image_size[0]]\r\n extractor = get_extractor(args)\r\n # do norm\r\n args.do_norm = True\r\n elif args.type == 'mxnet':\r\n from plats.mxnet.mxnet_extractor import MxnetExtractor\r\n extractor = MxnetExtractor(args.model_path, args.batch_size, image_size, args.embed_name)\r\n \r\n args.image_size = image_size\r\n return extractor\r\n \r\nif __name__ == '__main__':\r\n args = parse_args()\r\n output_dir = '.'\r\n # parse args \r\n image_size = args.image_size.split(',')\r\n image_size = (int(image_size[1]), int(image_size[0])) \r\n model_name = args.model_name\r\n test_set = args.test_set\r\n dist_type = args.dist_type\r\n do_mirror = args.do_mirror\r\n print('Dataset \\t: %s (%s,%s)' % (args.test_set, args.data, args.prefix))\r\n print('Testing \\t: %s' % model_name)\r\n print('Distance \\t: %s' % dist_type)\r\n print('Do mirror\\t: {}'.format(do_mirror))\r\n \r\n # load images\r\n if args.data.find('.np') > 0:\r\n pos_img, neg_img = pickle.load(open(args.data, 'rb'))\r\n #pos_img, neg_img = pickle.load(open(lfw_data, 'rb'), encoding='iso-8859-1')\r\n else:\r\n if args.test_set == 'lfw':\r\n pos_img, neg_img = load_image_paris(args.data, args.prefix)\r\n else:\r\n pos_img, neg_img = load_ytf_pairs(args.data, args.prefix)\r\n \r\n # crop image\r\n pos_img = crop_image_list(pos_img, image_size)\r\n neg_img = crop_image_list(neg_img, image_size)\r\n # model\r\n if args.type == 'caffe':\r\n from plats.caffe.caffe_model_factory import model_factory\r\n extractor, image_size = model_factory(model_name, do_mirror)\r\n print('Testing model\\t: %s' % (extractor.weight))\r\n # do norm\r\n args.do_norm = True\r\n elif args.type == 'tensorflow':\r\n from plats.tensorflow.resnet50_extractor import get_extractor\r\n args.image_size = [image_size[1], image_size[0]]\r\n extractor = get_extractor(args)\r\n # do norm\r\n args.do_norm = True\r\n elif args.type == 'mxnet':\r\n from plats.mxnet.mxnet_extractor import MxnetExtractor\r\n extractor = MxnetExtractor(args.model_path, args.batch_size, image_size, args.embed_name)\r\n print('Image size\\t: {}'.format(image_size))\r\n print('Do norm \\t: {}'.format(args.do_norm))\r\n if args.do_norm == True:\r\n print('Norm images')\r\n pos_img = norm_image_list(pos_img)\r\n neg_img = norm_image_list(neg_img)\r\n # compute feature\r\n print('Extracting features ...')\r\n pos_list = extract_feature(extractor, pos_img)\r\n print(' Done positive pairs')\r\n neg_list = extract_feature(extractor, neg_img)\r\n print(' Done negative pairs')\r\n\r\n # evaluate\r\n print('Evaluating ...')\r\n precision, std, threshold, pos, neg, _ = verification(pos_list, neg_list, dist_type = dist_type) \r\n # _, title = os.path.split(extractor.weight)\r\n #draw_chart(title, output_dir, {'pos': pos, 'neg': neg}, precision, threshold)\r\n print('------------------------------------------------------------')\r\n print('Precision on %s : %1.5f+-%1.5f \\nBest threshold : %f' % (args.test_set, precision, std, threshold))\r\n \r\n \r\n\r\n",
"# coding: utf-8\nimport os\nimport numpy as np\nimport math\nimport cv2\n\ndef compute_affine_transform(points, refpoints, w = None):\n '''\n compute the affine tranform matrix\n '''\n if w == None:\n w = [1] * (len(points) * 2)\n assert(len(w) == 2*len(points))\n y = []\n for n, p in enumerate(refpoints):\n y += [p[0]/w[n*2], p[1]/w[n*2+1]]\n A = []\n for n, p in enumerate(points):\n A.extend([ [p[0]/w[n*2], p[1]/w[n*2], 0, 0, 1/w[n*2], 0], [0, 0, p[0]/w[n*2+1], p[1]/w[n*2+1], 0, 1/w[n*2+1]] ])\n \n lstsq = cv2.solve(np.array(A, dtype=np.float), np.array(y, dtype=np.float), flags=cv2.DECOMP_SVD)\n h11, h12, h21, h22, dx, dy = lstsq[1]\n\n #R = np.array([[h11, h12, dx], [h21, h22, dy]])\n # The row above works too - but creates a redundant dimension\n R = np.array([[h11[0], h12[0], dx[0]], [h21[0], h22[0], dy[0]]])\n return R\n\n \ndef compute_similarity_transform(src, dst):\n '''\n compute the similarity tranform matrix\n '''\n assert len(src) == len(dst)\n N = len(src)\n A = np.zeros((N*2, 4), dtype=np.float)\n B = np.zeros((N*2, 1), dtype=np.float)\n for i in range(N):\n # x'\n row = i * 2\n A[row][0] = src[i][0]\n A[row][1] = -src[i][1]\n A[row][2] = 1\n A[row][3] = 0\n B[row][0] = dst[i][0]\n # y'\n row += 1\n A[row][0] = src[i][1]\n A[row][1] = src[i][0]\n A[row][2] = 0\n A[row][3] = 1\n B[row][0] = dst[i][1]\n AT = np.transpose(A)\n invAA = np.linalg.inv(np.dot(AT,A))\n AAT = np.dot(invAA,AT)\n X = np.dot(AAT,B) \n \n R = np.array([[X[0], -X[1], X[2]], [X[1], X[0], X[3]]])\n return R\n\ndef cv2_imread(path):\n img = cv2.imread(path)\n if img is not None:\n return img\n # try .png\n print('Not find:%s try:%s' % (path, path+'.png'))\n img = cv2.imread(path+'.png')\n return img\n \n\ndef cv2_imwrite(path,img):\n ret = True\n title, ext = os.path.splitext(path)\n ext = ext.lower()\n makedirs(path)\n # append gif with .png\n if ext == '.gif':\n ext = '.png'\n path = path+'.png'\n elif ext == '':\n path = path+'.png'\n \n try:\n cv2.imwrite(path, img)\n except:\n ret = False\n \n return ret\n\n \ndef mean_shape_96x112():\n mean_face_shape_x = [30.2946, 65.5318, 48.0252, 33.5493, 62.7299]\n mean_face_shape_y = [51.6963, 51.5014, 71.7366, 92.3655, 92.2041]\n return mean_face_shape_x, mean_face_shape_y\n\n \ndef mean_shape_112x112():\n _x, _y = mean_shape_96x112()\n _x = [x+8 for x in _x]\n return _x, _y\n \n\ndef landmark68_to_5(landmark68):\n landmark5 = [0]*10 \n pts = []\n # left eye\n eyel = [0, 0]\n for i in range(36, 42):\n eyel[0] += landmark68[i]['x']\n eyel[1] += landmark68[i]['y']\n eyel = [x/6 for x in eyel]\n pts.append(eyel)\n # right eye\n eyer = [0, 0]\n for i in range(42, 48):\n eyer[0] += landmark68[i]['x']\n eyer[1] += landmark68[i]['y']\n eyer = [x/6 for x in eyer]\n pts.append(eyer)\n pts.append([landmark68[30]['x'], landmark68[30]['y']])\n pts.append([landmark68[48]['x'], landmark68[48]['y']])\n pts.append([landmark68[54]['x'], landmark68[54]['y']])\n for i in range(5):\n landmark5[i] = pts[i][0]\n landmark5[i+5] = pts[i][1]\n return landmark5\n\n \ndef alignface_96x112(img, points, pading=0, trans_type = 'similarity'):\n \"\"\"\n crop and align face\n Parameters:\n ----------\n img: numpy array, bgr order of shape (1, 3, n, m)\n input image\n points: numpy array, n x 10 (x1, x2 ... x5, y1, y2 ..y5)\n padding: default 0\n trans_type: similarity OR affine, default similarity\n Return:\n -------\n crop_imgs: list, n\n cropped and aligned faces \n \"\"\"\n # average positions of face points\n mean_face_shape_x = [30.2946, 65.5318, 48.0252, 33.5493, 62.7299]\n mean_face_shape_y = [51.6963, 51.5014, 71.7366, 92.3655, 92.2041]\n # tranform\n tranform = compute_similarity_transform\n if trans_type == 'affine' :\n tranform = compute_affine_transform\n # do the job\n crop_imgs = []\n for p in points:\n shape =[]\n for k in range(int(len(p)/2)):\n shape.append(p[k])\n shape.append(p[k+5])\n\n from_points = []\n to_points = []\n\n for i in range(int(len(shape)/2)):\n x = mean_face_shape_x[i] + pading\n y = mean_face_shape_y[i] + pading\n to_points.append([x, y])\n from_points.append([shape[2*i], shape[2*i+1]])\n \n N = tranform(from_points,to_points)\n chips = cv2.warpAffine(img, N, (96+2*pading, 112+2*pading) )\n crop_imgs.append(chips)\n\n return crop_imgs\n\n \ndef align_to_96x112(img, points, pading=0, trans_type = 'similarity'):\n \"\"\"\n crop and align face\n Parameters:\n ----------\n img: numpy array, bgr order of shape (1, 3, n, m)\n input image\n points: list, 1 x 10 (x1, x2 ... x5, y1, y2 ..y5)\n padding: default 0\n trans_type: similarity OR affine, default similarity\n Return:\n -------\n cropped and aligned face\n \"\"\"\n # average positions of face points\n mean_face_shape_x = [30.2946, 65.5318, 48.0252, 33.5493, 62.7299]\n mean_face_shape_y = [51.6963, 51.5014, 71.7366, 92.3655, 92.2041]\n # tranform\n tranform = compute_similarity_transform\n if trans_type == 'affine' :\n tranform = compute_affine_transform\n # do the job\n from_points = []\n to_points = []\n\n for i in range(int(len(points)/2)):\n if points[i] == None:\n continue\n x = mean_face_shape_x[i] + pading\n y = mean_face_shape_y[i] + pading\n to_points.append([x, y])\n from_points.append([points[i], points[i + 5]])\n \n N = tranform(from_points,to_points)\n chip = cv2.warpAffine(img, N, (96+2*pading, 112+2*pading) )\n return chip\n\n \ndef align_to_112x112(img, points, pading=0, trans_type = 'similarity'):\n \"\"\"\n crop and align face\n Parameters:\n ----------\n img: numpy array, bgr order of shape (1, 3, n, m)\n input image\n points: list, 1 x 10 (x1, x2 ... x5, y1, y2 ..y5)\n padding: default 0\n trans_type: similarity OR affine, default similarity\n Return:\n -------\n cropped and aligned face\n \"\"\"\n # average positions of face points\n mean_face_shape_x = [30.2946, 65.5318, 48.0252, 33.5493, 62.7299]\n mean_face_shape_y = [51.6963, 51.5014, 71.7366, 92.3655, 92.2041]\n # tranform\n tranform = compute_similarity_transform\n if trans_type == 'affine' :\n tranform = compute_affine_transform\n # do the job\n from_points = []\n to_points = []\n\n for i in range(int(len(points)/2)):\n if points[i] == None:\n continue\n x = mean_face_shape_x[i] + pading + 8.0\n y = mean_face_shape_y[i] + pading\n to_points.append([x, y])\n from_points.append([points[i], points[i + 5]])\n \n N = tranform(from_points,to_points)\n chip = cv2.warpAffine(img, N, (112+2*pading, 112+2*pading) )\n return chip\n \n "
] | [
[
"numpy.float32",
"numpy.stack"
],
[
"numpy.dot",
"numpy.array",
"numpy.zeros",
"numpy.transpose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nikhil12-cmd/nikhil12345678 | [
"1aa14b385b6a18b8b4c7e323642448e9bb0ff41b"
] | [
"anprLabelProcessor.py"
] | [
"# import the necessary packages\nimport numpy as np\nfrom sklearn.preprocessing import LabelBinarizer\n\nclass AnprLabelProcessor:\n # init the label binarizers. Maps classes to a set of one-hot vectors\n def __init__(self, plateChars, plateLens):\n # convert the labels from integers to vectors\n self.plate_lb = LabelBinarizer().fit(plateChars)\n self.charCnt_lb = LabelBinarizer().fit(plateLens)\n self.numClassesPerChar = len(plateChars)\n self.maxPlateLen = plateLens[-1]\n\n # Generate one-hot vectors for every plate\n def transform(self, labels):\n # Create a list of chars for each plate\n plateLabel = np.empty((len(labels), self.maxPlateLen), dtype=np.unicode_)\n for (i, label) in enumerate(labels):\n for j in range(0, self.maxPlateLen):\n plateLabel[i, j] = label[j]\n\n # Create a list of plate lengths for each plate\n #plateLenLabel = np.zeros((len(labels), 1), dtype=int)\n #for (i, label) in enumerate(labels):\n # plateLenLabel[i, 0] = label[7]\n\n # Create the one hot labels for each plate\n #plateLabelsOneHot = np.zeros((len(labels), (37 * 7) + 7), dtype=int)\n plateLabelsOneHot = np.zeros((len(labels), (self.numClassesPerChar * self.maxPlateLen)), dtype=int)\n for i in range(len(labels)):\n oneHotText = self.plate_lb.transform(plateLabel[i])\n #oneHotCharCnt = self.charCnt_lb.transform(plateLenLabel[i])\n #plateLabelsOneHot[i] = np.concatenate((oneHotText.flatten(), oneHotCharCnt.flatten()))\n plateLabelsOneHot[i] = oneHotText.flatten()\n\n return plateLabelsOneHot\n\n # for every plate generate license plate chars, and license plate length\n def inverse_transform(self,oneHotLabels):\n plates = []\n plateLens = []\n oneHotLenDemuxed = []\n for i in range(len(oneHotLabels)):\n oneHotDemuxed = []\n for j in range(self.maxPlateLen):\n onehotDemux = np.array(oneHotLabels[i,j])\n oneHotDemuxed.append(onehotDemux)\n oneHotDemuxed = np.array(oneHotDemuxed)\n plate = self.plate_lb.inverse_transform(oneHotDemuxed)\n plates.append(plate)\n #oneHotLenDemux = np.array(oneHotLabels[i, 37 * 7:])\n #oneHotLenDemuxed.append(oneHotLenDemux)\n #oneHotLenDemuxed = np.array(oneHotLenDemuxed)\n #plateLens = (self.charCnt_lb.inverse_transform(oneHotLenDemuxed))\n\n #return plates, plateLens\n return plates"
] | [
[
"numpy.array",
"sklearn.preprocessing.LabelBinarizer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CC1st/step-by-step-mindspore | [
"df37ffa60a17d4951814c2bce675809f6aff8f21"
] | [
"src/emb/fact_network.py"
] | [
"\"\"\"\n Copyright (c) 2018, salesforce.com, inc.\n All rights reserved.\n SPDX-License-Identifier: BSD-3-Clause\n For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause\n \n Fact scoring networks.\n Code adapted from https://github.com/TimDettmers/ConvE/blob/master/model.py\n\"\"\"\n\nimport copy\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport mindspore\nimport mindspore.nn as ms_nn\nimport mindspore.ops.functional\n\nclass TripleE(nn.Module):\n def __init__(self, args, num_entities):\n super(TripleE, self).__init__()\n conve_args = copy.deepcopy(args)\n conve_args.model = 'conve'\n self.conve_nn = ConvE(conve_args, num_entities)\n conve_state_dict = torch.load(args.conve_state_dict_path)\n conve_nn_state_dict = get_conve_nn_state_dict(conve_state_dict)\n self.conve_nn.load_state_dict(conve_nn_state_dict)\n\n complex_args = copy.deepcopy(args)\n complex_args.model = 'complex'\n self.complex_nn = ComplEx(complex_args)\n\n distmult_args = copy.deepcopy(args)\n distmult_args.model = 'distmult'\n self.distmult_nn = DistMult(distmult_args)\n\n def forward(self, e1, r, conve_kg, secondary_kgs):\n complex_kg = secondary_kgs[0]\n distmult_kg = secondary_kgs[1]\n return (self.conve_nn.forward(e1, r, conve_kg)\n + self.complex_nn.forward(e1, r, complex_kg)\n + self.distmult_nn.forward(e1, r, distmult_kg)) / 3\n\n def forward_fact(self, e1, r, conve_kg, secondary_kgs):\n complex_kg = secondary_kgs[0]\n distmult_kg = secondary_kgs[1]\n return (self.conve_nn.forward_fact(e1, r, conve_kg)\n + self.complex_nn.forward_fact(e1, r, complex_kg)\n + self.distmult_nn.forward_fact(e1, r, distmult_kg)) / 3\n\n\nclass HyperE(nn.Module):\n def __init__(self, args, num_entities):\n super(HyperE, self).__init__()\n self.conve_nn = ConvE(args, num_entities)\n conve_state_dict = torch.load(args.conve_state_dict_path)\n conve_nn_state_dict = get_conve_nn_state_dict(conve_state_dict)\n self.conve_nn.load_state_dict(conve_nn_state_dict)\n\n complex_args = copy.deepcopy(args)\n complex_args.model = 'complex'\n self.complex_nn = ComplEx(complex_args)\n\n def forward(self, e1, r, conve_kg, secondary_kgs):\n complex_kg = secondary_kgs[0]\n return (self.conve_nn.forward(e1, r, conve_kg)\n + self.complex_nn.forward(e1, r, complex_kg)) / 2\n\n def forward_fact(self, e1, r, e2, conve_kg, secondary_kgs):\n complex_kg = secondary_kgs[0]\n return (self.conve_nn.forward_fact(e1, r, e2, conve_kg)\n + self.complex_nn.forward_fact(e1, r, e2, complex_kg)) / 2\n\n\n#class ComplEx(nn.Module):\nclass ComplEx(mindspore.nn.Cell):\n def __init__(self, args):\n super(ComplEx, self).__init__()\n #mindspore:torch.nn.function.sigmoid()\n self.ms_sigmoid=mindspore.ops.Sigmoid()\n #mindspore:torch.mm\n self.ms_matmul=mindspore.ops.MatMul()\n #mindspore: torch.sum()\n self.ms_reduceSum=mindspore.ops.ReduceSum(keep_dims=True)\n\n def forward(self, e1, r, kg):\n def dist_mult(E1, R, E2):\n #return torch.mm(E1 * R, E2.transpose(1, 0))\n return mindspore.ops.MatMul(False, True)(E1 * R, E2)\n\n E1_real = kg.get_entity_embeddings(e1)\n R_real = kg.get_relation_embeddings(r)\n E2_real = kg.get_all_entity_embeddings()\n E1_img = kg.get_entity_img_embeddings(e1)\n R_img = kg.get_relation_img_embeddings(r)\n E2_img = kg.get_all_entity_img_embeddings()\n\n rrr = dist_mult(R_real, E1_real, E2_real)\n rii = dist_mult(R_real, E1_img, E2_img)\n iri = dist_mult(R_img, E1_real, E2_img)\n iir = dist_mult(R_img, E1_img, E2_real)\n S = rrr + rii + iri - iir\n #S = F.sigmoid(S)\n S = self.ms_sigmoid(S)\n return S\n\n #mindspore\n def construct(self,e1, r, kg):\n out=self.forward(e1, r, kg)\n return out\n\n def forward_fact(self, e1, r, e2, kg):\n def dist_mult_fact(E1, R, E2):\n #return torch.sum(E1 * R * E2, dim=1, keepdim=True)\n return mindspore.ops.ReduceSum(True)(E1 * R * E2, 1)\n\n E1_real = kg.get_entity_embeddings(e1)\n R_real = kg.get_relation_embeddings(r)\n E2_real = kg.get_entity_embeddings(e2)\n E1_img = kg.get_entity_img_embeddings(e1)\n R_img = kg.get_relation_img_embeddings(r)\n E2_img = kg.get_entity_img_embeddings(e2)\n\n rrr = dist_mult_fact(R_real, E1_real, E2_real)\n rii = dist_mult_fact(R_real, E1_img, E2_img)\n iri = dist_mult_fact(R_img, E1_real, E2_img)\n iir = dist_mult_fact(R_img, E1_img, E2_real)\n S = rrr + rii + iri - iir\n #S = F.sigmoid(S)\n S = self.ms_sigmoid(S)\n return S\n\n #mindspore:construct\n def construct_fact(self,e1, r, e2, kg):\n out=self.forward_fact(e1, r, e2, kg)\n return out\n\n\nclass ConvE(nn.Module):\n def __init__(self, args, num_entities):\n super(ConvE, self).__init__()\n self.entity_dim = args.entity_dim # 200\n self.relation_dim = args.relation_dim # 200\n assert (args.emb_2D_d1 * args.emb_2D_d2 == args.entity_dim)\n assert (args.emb_2D_d1 * args.emb_2D_d2 == args.relation_dim)\n self.emb_2D_d1 = args.emb_2D_d1 # 10\n self.emb_2D_d2 = args.emb_2D_d2 # 20\n self.num_out_channels = args.num_out_channels # 32\n self.w_d = args.kernel_size\n self.HiddenDropout = nn.Dropout(args.hidden_dropout_rate)\n self.FeatureDropout = nn.Dropout(args.feat_dropout_rate)\n\n # stride = 1, padding = 0, dilation = 1, groups = 1\n self.conv1 = nn.Conv2d(1, self.num_out_channels, (self.w_d, self.w_d), 1, 0)\n self.bn0 = nn.BatchNorm2d(1)\n self.bn1 = nn.BatchNorm2d(self.num_out_channels)\n self.bn2 = nn.BatchNorm1d(self.entity_dim)\n self.register_parameter('b', nn.Parameter(torch.zeros(num_entities)))\n h_out = 2 * self.emb_2D_d1 - self.w_d + 1\n w_out = self.emb_2D_d2 - self.w_d + 1\n self.feat_dim = self.num_out_channels * h_out * w_out\n self.fc = nn.Linear(self.feat_dim, self.entity_dim)\n\n def forward(self, e1, r, kg):\n # f(h, r, t) = sigmoid((vec(h; r) * w) * W) * t)\n E1 = kg.get_entity_embeddings(e1).view(-1, 1, self.emb_2D_d1, self.emb_2D_d2)\n R = kg.get_relation_embeddings(r).view(-1, 1, self.emb_2D_d1, self.emb_2D_d2)\n E2 = kg.get_all_entity_embeddings()\n\n stacked_inputs = torch.cat([E1, R], 2)\n stacked_inputs = self.bn0(stacked_inputs)\n\n X = self.conv1(stacked_inputs)\n # X = self.bn1(X)\n X = F.relu(X)\n X = self.FeatureDropout(X)\n X = X.view(-1, self.feat_dim)\n X = self.fc(X)\n X = self.HiddenDropout(X)\n X = self.bn2(X)\n X = F.relu(X)\n X = torch.mm(X, E2.transpose(1, 0))\n X += self.b.expand_as(X)\n\n S = F.sigmoid(X)\n return S\n\n def forward_fact(self, e1, r, e2, kg):\n \"\"\"\n Compute network scores of the given facts.\n :param e1: [batch_size]\n :param r: [batch_size]\n :param e2: [batch_size]\n :param kg:\n \"\"\"\n # print(e1.size(), r.size(), e2.size())\n # print(e1.is_contiguous(), r.is_contiguous(), e2.is_contiguous())\n # print(e1.min(), r.min(), e2.min())\n # print(e1.max(), r.max(), e2.max())\n E1 = kg.get_entity_embeddings(e1).view(-1, 1, self.emb_2D_d1, self.emb_2D_d2)\n R = kg.get_relation_embeddings(r).view(-1, 1, self.emb_2D_d1, self.emb_2D_d2)\n E2 = kg.get_entity_embeddings(e2)\n\n stacked_inputs = torch.cat([E1, R], 2)\n stacked_inputs = self.bn0(stacked_inputs)\n\n X = self.conv1(stacked_inputs)\n # X = self.bn1(X)\n X = F.relu(X)\n X = self.FeatureDropout(X)\n X = X.view(-1, self.feat_dim)\n X = self.fc(X)\n X = self.HiddenDropout(X)\n X = self.bn2(X)\n X = F.relu(X)\n X = torch.matmul(X.unsqueeze(1), E2.unsqueeze(2)).squeeze(2)\n X += self.b[e2].unsqueeze(1)\n\n S = F.sigmoid(X)\n return S\n\n\nclass DistMult(nn.Module):\n def __init__(self, args):\n super(DistMult, self).__init__()\n\n def forward(self, e1, r, kg):\n E1 = kg.get_entity_embeddings(e1)\n R = kg.get_relation_embeddings(r)\n E2 = kg.get_all_entity_embeddings()\n S = torch.mm(E1 * R, E2.transpose(1, 0))\n S = F.sigmoid(S)\n return S\n\n def forward_fact(self, e1, r, e2, kg):\n E1 = kg.get_entity_embeddings(e1)\n R = kg.get_relation_embeddings(r)\n E2 = kg.get_entity_embeddings(e2)\n S = torch.sum(E1 * R * E2, dim=1, keepdim=True)\n S = F.sigmoid(S)\n return S\n\n\ndef get_conve_nn_state_dict(state_dict):\n conve_nn_state_dict = {}\n for param_name in ['mdl.b', 'mdl.conv1.weight', 'mdl.conv1.bias', 'mdl.bn0.weight', 'mdl.bn0.bias',\n 'mdl.bn0.running_mean', 'mdl.bn0.running_var', 'mdl.bn1.weight', 'mdl.bn1.bias',\n 'mdl.bn1.running_mean', 'mdl.bn1.running_var', 'mdl.bn2.weight', 'mdl.bn2.bias',\n 'mdl.bn2.running_mean', 'mdl.bn2.running_var', 'mdl.fc.weight', 'mdl.fc.bias']:\n conve_nn_state_dict[param_name.split('.', 1)[1]] = state_dict['state_dict'][param_name]\n return conve_nn_state_dict\n\n\ndef get_conve_kg_state_dict(state_dict):\n kg_state_dict = dict()\n for param_name in ['kg.entity_embeddings.weight', 'kg.relation_embeddings.weight']:\n kg_state_dict[param_name.split('.', 1)[1]] = state_dict['state_dict'][param_name]\n return kg_state_dict\n\n\ndef get_complex_kg_state_dict(state_dict):\n kg_state_dict = dict()\n for param_name in ['kg.entity_embeddings.weight', 'kg.relation_embeddings.weight',\n 'kg.entity_img_embeddings.weight', 'kg.relation_img_embeddings.weight']:\n kg_state_dict[param_name.split('.', 1)[1]] = state_dict['state_dict'][param_name]\n return kg_state_dict\n\n\ndef get_distmult_kg_state_dict(state_dict):\n kg_state_dict = dict()\n for param_name in ['kg.entity_embeddings.weight', 'kg.relation_embeddings.weight']:\n kg_state_dict[param_name.split('.', 1)[1]] = state_dict['state_dict'][param_name]\n return kg_state_dict\n"
] | [
[
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.load",
"torch.cat",
"torch.zeros",
"torch.nn.Conv2d",
"torch.sum",
"torch.nn.Linear",
"torch.nn.functional.sigmoid",
"torch.nn.functional.relu",
"torch.nn.BatchNorm2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
XZ-X/rose6icse | [
"5710dc0e39509f79e42535e0b5ca5e41cbd90fc2"
] | [
"submissions/available/CPC/CPC-what-property/classification/getCommentVecByMean.py"
] | [
"from cleanSent import cleanSentence\nimport numpy as np\nimport pandas as pd\n\n\ndef sent2vec(s, model):\n res = np.zeros(200)\n # sent = cleanSentence(s)\n words = s.split()\n num = 0\n for w in words:\n if model.__contains__(w):\n res += model[w]\n else:\n res += model[\"method\"]\n num += 1.0\n if num == 0:\n return np.zeros(200)\n else:\n return res/num\n\n\ndef getVecByMean(data, model, commentVec, commentDict):\n for i ,sent in enumerate(data['cleanB'].values):\n commentVec[i, :] = sent2vec(sent, model)\n commentDict[(str)(sent2vec(sent, model))] = sent\n return commentDict\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mardillu/gps-polygon-smoothers | [
"7f6c3dd9daa379c4b1c9d4816d489bd3f81b59f1"
] | [
"PolygonSmoother.py"
] | [
"from geopy import distance\nfrom trianglesolver import solve\nfrom RDP import rdp\nfrom math import degrees\nfrom scipy.spatial import ConvexHull\n\n\n\ndef remove_duplicates(coords = []):\n '''Steps through the list of coordinates to find and remove coordinates that appear more than once\n\n Parameters\n ----------\n coords : list\n List of coordinates to remove duplicates from\n\n Returns\n -------\n list\n the original list of coordinates without duplicates\n '''\n new_list = []\n for point in coords:\n if point not in new_list:\n new_list.append(point)\n return new_list\n\ndef nearest_neighbor_smoother(coords=[]):\n ''' A nearest neighbor smoother steps through the list of coordinates and adjust each coordinate\n based on the values of the previoys and next values, wrapping around when necessary.\n\n This approach affects the overall area of the polygon since each coordinate gets adjusted to a new coordinate \n\n Parameters\n ----------\n coords : list\n List of coordinates to smooth\n\n Returns\n -------\n list\n A new list of smoothed out coordinates\n '''\n for n in range(len(coords)):\n prevIndex = n-1\n nextIndex = n+1\n if prevIndex == -1:\n prevIndex = len(coords)-1\n if nextIndex >= len(coords):\n nextIndex = 0\n \n coords[n][0] = coords[prevIndex][0] * 0.3 + coords[n][0] * .4 + coords[nextIndex][0] * .3\n coords[n][1] = coords[prevIndex][1] * 0.3 + coords[n][1] * .4 + coords[nextIndex][1] * .3\n\n return coords\n\ndef remove_sharp_angle(coords = [], deg_threshold = 30.0):\n ''' Detect and remove sharp angles from a polygon\n Uses harversine formaular or spherical law of cosine to calculate distance between 2 coordinates\n Uses the law of cosines to detect and remove angles sharper than the specified angles\n\n Parameters\n ----------\n coords : list\n List of coordinates to detect angles for\n deg_threshold: float\n the maximum angle to allow. Coordinates that that form angles below this value will be removed\n\n Returns\n -------\n list\n the original list of coordinates with sharp angles trimmed\n '''\n list_to_modify = [x for x in coords]\n coords_count = len(coords)\n if(coords_count <= 3):\n return coords\n\n for i in range(coords_count):\n if(i+1 >= coords_count or i+2 >= coords_count):\n return list_to_modify\n\n coords_1 = coords[i]\n coords_2 = coords[i+1]\n coords_3 = coords[i+2]\n d1 = distance.distance(coords_1, coords_2)\n d2 = distance.distance(coords_2, coords_3)\n d3 = distance.distance(coords_3, coords_1)\n d1 = float(str(d1)[:-3])*1000\n d2 = float(str(d2)[:-3])*1000\n d3 = float(str(d3)[:-3])*1000\n if d1 > 0.01 and d2 > 0.01 and d3 > 0.01: # if they are 0, there will be an error\n a,b,c,A,B,C = solve(a=d1, b=d2, c=d3) # Calculate the angles from the sides\n A,B,C = degrees(A), degrees(B), degrees(C) # Convert to math.degrees\n if (360.0 - C) < deg_threshold or C < deg_threshold:\n list_to_modify.remove(coords[i+1])\n\ndef remove_lakes_islands(coords = [], deg_threshold = 180.0):\n list_to_modify = [x for x in coords]\n coords_count = len(coords)\n if(coords_count <= 3):\n return coords\n\n for i in range(coords_count):\n if(i+1 >= coords_count or i+2 >= coords_count):\n return list_to_modify\n\n coords_1 = coords[i]\n coords_2 = coords[i+1]\n coords_3 = coords[i+2]\n d1 = distance.distance(coords_1, coords_2)\n d2 = distance.distance(coords_2, coords_3)\n d3 = distance.distance(coords_3, coords_1)\n d1 = float(str(d1)[:-3])*1000\n d2 = float(str(d2)[:-3])*1000\n d3 = float(str(d3)[:-3])*1000\n\n if d1 > 0.01 and d2 > 0.01 and d3 > 0.01: # if they are 0, there will be an error\n a,b,c,A,B,C = solve(a=d1, b=d2, c=d3) # Calculate the angles from the sides\n A,B,C = degrees(A), degrees(B), degrees(C) # Convert to math.degrees\n if C > deg_threshold:\n #spike= True\n list_to_modify.remove(coords[i+1])\n\ndef run_angle_remover(iterations=3, coords = [], deg_threshold=30.0):\n for i in range(3):\n coords = remove_sharp_angle(coords=coords, deg_threshold=deg_threshold)\n\n return coords\n\ndef k_sample_smoother(k=3, coords=[], strategy='delete'):\n for i in range(0,len(coords),3):\n index1 = i\n index2 = i+1\n index3 = i+2\n index4 = i+3\n index5 = i+4\n if index2 >= len(coords):\n index2 = 0\n if index3 >= len(coords):\n index3 = 1\n if index4 >= len(coords):\n index4 = 2\n if index5 >= len(coords):\n index5 = 3\n\n pA = coords[i]\n pB = coords[index2]\n pC = coords[index3]\n pD = coords[index4]\n pE = coords[index5]\n\n AB = distance.distance(pA, pB)\n AC = distance.distance(pA, pC)\n AD = distance.distance(pA, pD)\n AE = distance.distance(pA, pE)\n BD = distance.distance(pB, pD)\n BE = distance.distance(pB, pE)\n BC = distance.distance(pB, pC)\n CD = distance.distance(pC, pD)\n CE = distance.distance(pC, pE)\n DE = distance.distance(pD, pE)\n \n ATOTAL = sum([float(str(AB)[:-3])*1000, float(str(AC)[:-3])*1000, float(str(AD)[:-3])*1000, float(str(AE)[:-3])*1000])\n BTOTAL = sum([float(str(AB)[:-3])*1000, float(str(BC)[:-3])*1000, float(str(BD)[:-3])*1000, float(str(BE)[:-3])*1000])\n CTOTAL = sum([float(str(AC)[:-3])*1000, float(str(BC)[:-3])*1000, float(str(CD)[:-3])*1000, float(str(CE)[:-3])*1000])\n DTOTAL = sum([float(str(AD)[:-3])*1000, float(str(BD)[:-3])*1000, float(str(CD)[:-3])*1000, float(str(DE)[:-3])*1000])\n ETOTAL = sum([float(str(AE)[:-3])*1000, float(str(BE)[:-3])*1000, float(str(CE)[:-3])*1000, float(str(DE)[:-3])*1000])\n\n # If BTOTAL is largest you would replace point B with D if BD = min { AB AC AD AE }\n if ATOTAL > BTOTAL and ATOTAL > CTOTAL and ATOTAL > DTOTAL and ATOTAL > ETOTAL: #AB AC AD AE\n if AB < AC and AB < AD and AB < AE:\n coords[i] = pB\n elif AC < AB and AC < AD and AC < AE:\n coords[i] = pC\n elif AD < AB and AD < AC and AD < AE:\n coords[i] = pD\n elif AE < AB and AE < AC and AE < AD:\n coords[i] = pE\n elif BTOTAL > ATOTAL and BTOTAL > CTOTAL and BTOTAL > DTOTAL and BTOTAL > ETOTAL: #AB BC BD BE\n if AB < BC and AB < BD and AB < BE:\n coords[index2] = pB\n elif BC < AB and BC < BD and BC < BE:\n coords[index2] = pC\n elif BD < AB and BD < BC and BD < BE:\n coords[index2] = pD\n elif BE < AB and BE < BC and BE < BD:\n coords[index2] = pE\n elif BTOTAL > ATOTAL and BTOTAL > CTOTAL and BTOTAL > DTOTAL and BTOTAL > ETOTAL: #AC BC CD CE\n if AC < BC and AC < CD and AC < CE:\n coords[index3] = pC\n elif BC < AC and BC < CD and BC < CE:\n coords[index3] = pC\n elif CD < AC and CD < BC and CD < CE:\n coords[index3] = pD\n elif CE < AC and CE < BC and CE < CD:\n coords[index3] = pE\n elif BTOTAL > ATOTAL and BTOTAL > CTOTAL and BTOTAL > DTOTAL and BTOTAL > ETOTAL: #DA DB DC DE\n if DA < DB and DA < DC and DA < DE:\n coords[index3] = pA\n elif DB < DA and DB < DC and DB < DE:\n coords[index3] = pB\n elif DC < DA and DC < DB and DC < DE:\n coords[index3] = pC\n elif DE < DA and DE < DB and DE < DC:\n coords[index3] = pE\n elif BTOTAL > ATOTAL and BTOTAL > CTOTAL and BTOTAL > DTOTAL and BTOTAL > ETOTAL: # EA EB EC DE\n if EA < EB and EA < EC and EA < DE:\n coords[index3] = pA\n elif EB < EA and EB < EC and EB < DE:\n coords[index3] = pB\n elif EC < EA and EC < EB and EC < DE:\n coords[index3] = pC\n elif DE < EA and DE < EB and DE < EC:\n coords[index3] = pE\n\n return coords\n\n'''\nThe Ramer-Douglas-Peucker algorithm is an algorithm for reducing the number \nof points in a curve that is approximated by a series of points.\n'''\ndef compute_significant_points(epsilon = 4, coords = []):\n return rdp(coords, 1.0)\n\ndef reduce_point(coords=[], epsilon = 4):\n ''' A function to smartly reduced the number of coordinates in a polygon to a desired number\n by ranking cordinates based on their distances from each other. The idea is to find the most\n useful n (epsilon) points that can still represent the original polygon\n Coordinates that are farther away from each other will be ranked higher\n While coordinates that are closer to each other will be ranked lower\n The function returns the first n (epsilon) highest ranked coordinates\n If you wish to maintain the shape of the original polygon then do not make this value too low, \n otherwise, this function is capable of repres\n \n NOTE: Only call this function with an already smoothed polygon (i.e. coords should be the result of a call to\n either a convex_hull() or map_cleaner() function)\n In addition, the function assumes that the polygon is smoothed and that epsilon >= 4\n\n Parameters\n ----------\n coords : list\n Coordinates of the polygin to be reduced\n epsilon:\n The number of coordinates to keep. If you wish to maintain the shape\n of the original polygon then do not make this value too low.\n epsilon must be an even number\n\n Returns\n -------\n list\n the polygon with reduced coordinates\n '''\n if epsilon % 2 != 0:\n epsilon -= 1\n\n coordinate_count = len(coords)\n if coordinate_count == 0 or coordinate_count == 4:\n return coords\n\n coords = remove_duplicates(coords)\n coordinate_count = len(coords)\n dist_counter = 1\n for h in range(coordinate_count):\n max_distance = 0.0\n x = 0 \n y = 0\n for i in range(coordinate_count):\n for j in range (i+1, coordinate_count):\n if len(coords[i]) < 3 and len(coords[j]) < 3:\n d1 = distance.distance(coords[i], coords[j])\n d1 = float(str(d1)[:-3])*1000\n if d1 > max_distance:\n max_distance = d1\n x = i\n y = j\n if x == 0 and y == 0:\n break\n\n if len(coords[x]) < 3:\n coords[x].append(dist_counter)\n if y < coordinate_count and len(coords[y]) < 3:\n coords[y].append(dist_counter)\n dist_counter += 1\n\n\n contracted_coords = []\n for i in range(coordinate_count):\n if len(coords[i]) == 3 and coords[i][2] <= epsilon:\n coords[i].pop(2)\n contracted_coords.append(coords[i])\n\n contracted_coords.append(contracted_coords[0])\n return contracted_coords\n\ndef remove_outliers(coords = [], dist_threshold = 1000):\n ''' Detects outliers by computing distance between every two cordinates\n Any point that's more than dist_threshold(Km) away from any other point is an outlier\n This approach can be problematic because this action can create fake outliers in the resulting polygon\n \n \n Parameters\n ----------\n coords : list\n List of coordinates to smooth\n\n dist_threshold: int\n the minimum distance to use to classify a point as an outlier\n\n Returns\n -------\n list\n A new list without outliers\n '''\n smooth_coodinates = list()\n for i in range(len(coords)):\n nextIndex = i+1\n if nextIndex >= len(coords):\n nextIndex = 0\n d1 = distance.distance(coords[i], coords[nextIndex])\n if d1 < dist_threshold:\n smooth_coodinates.append(coords[i])\n\n return smooth_coodinates\n\ndef convex_hull(coords=[]):\n ''' Compute the convex hull of given set of coordinate points\n \n Parameters\n ----------\n coords : list\n List of coordinates to smooth\n\n Returns\n -------\n list\n A new list of smoothed out coordinates\n '''\n hull = ConvexHull(coords)\n conv_hull = list()\n for v in sorted(hull.vertices):\n conv_hull.append(coords[v])\n \n return conv_hull"
] | [
[
"scipy.spatial.ConvexHull"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
77loopin/ray | [
"c18caa4db36d466718bdbcb2229aa0b2dc03da1f",
"9322f6aab53f4ca5baf5a3573e1ffde12feae519",
"c18caa4db36d466718bdbcb2229aa0b2dc03da1f",
"c18caa4db36d466718bdbcb2229aa0b2dc03da1f",
"9322f6aab53f4ca5baf5a3573e1ffde12feae519",
"9322f6aab53f4ca5baf5a3573e1ffde12feae519",
"c18caa4db36d466718bdbcb2229aa0b2dc03da1f",
"9322f6aab53f4ca5baf5a3573e1ffde12feae519",
"9322f6aab53f4ca5baf5a3573e1ffde12feae519",
"c18caa4db36d466718bdbcb2229aa0b2dc03da1f"
] | [
"release/tune_tests/scalability_tests/create_test_data.py",
"python/ray/data/impl/lazy_block_list.py",
"python/ray/util/collective/tests/single_node_cpu_tests/test_allgather.py",
"python/ray/util/sgd/torch/examples/pytorch_pbt_failure.py",
"python/ray/util/horovod/tests/test_horovod.py",
"python/ray/tune/schedulers/hyperband.py",
"rllib/policy/view_requirement.py",
"python/ray/experimental/shuffle.py",
"python/ray/experimental/raysort/main.py",
"rllib/models/jax/fcnet.py"
] | [
"import argparse\nimport numpy as np\nimport os\n\nfrom xgboost_ray.tests.utils import create_parquet\n\nif __name__ == \"__main__\":\n if \"OMP_NUM_THREADS\" in os.environ:\n del os.environ[\"OMP_NUM_THREADS\"]\n\n parser = argparse.ArgumentParser(description=\"Create fake data.\")\n parser.add_argument(\n \"filename\", type=str, default=\"/data/parted.parquet/\", help=\"ray/dask\")\n parser.add_argument(\n \"-r\",\n \"--num-rows\",\n required=False,\n type=int,\n default=1e8,\n help=\"num rows\")\n parser.add_argument(\n \"-p\",\n \"--num-partitions\",\n required=False,\n type=int,\n default=100,\n help=\"num partitions\")\n parser.add_argument(\n \"-c\",\n \"--num-cols\",\n required=False,\n type=int,\n default=4,\n help=\"num columns (features)\")\n parser.add_argument(\n \"-C\",\n \"--num-classes\",\n required=False,\n type=int,\n default=2,\n help=\"num classes\")\n parser.add_argument(\n \"-s\",\n \"--seed\",\n required=False,\n type=int,\n default=1234,\n help=\"random seed\")\n\n args = parser.parse_args()\n\n if os.path.exists(args.filename):\n print(f\"File already exists: {args.filename}. Skipping creation.\")\n\n np.random.seed(args.seed)\n create_parquet(\n args.filename,\n num_rows=int(args.num_rows),\n num_partitions=int(args.num_partitions),\n num_features=int(args.num_cols),\n num_classes=int(args.num_classes))\n",
"import math\nfrom typing import Callable, List\n\nimport numpy as np\n\nfrom ray.types import ObjectRef\nfrom ray.data.block import Block, BlockMetadata, T\nfrom ray.data.impl.block_list import BlockList\n\n\nclass LazyBlockList(BlockList[T]):\n def __init__(self, calls: Callable[[], ObjectRef[Block]],\n metadata: List[BlockMetadata]):\n assert len(calls) == len(metadata), (calls, metadata)\n self._calls = calls\n self._blocks = [calls[0]()] if calls else []\n self._metadata = metadata\n\n def split(self, split_size: int) -> List[\"LazyBlockList\"]:\n num_splits = math.ceil(len(self._calls) / split_size)\n calls = np.array_split(self._calls, num_splits)\n meta = np.array_split(self._metadata, num_splits)\n output = []\n for c, m in zip(calls, meta):\n output.append(LazyBlockList(c.tolist(), m.tolist()))\n return output\n\n def __len__(self):\n return len(self._calls)\n\n def __iter__(self):\n outer = self\n\n class Iter:\n def __init__(self):\n self._pos = -1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self._pos += 1\n if self._pos < len(outer._calls):\n return outer._get_or_compute(self._pos)\n raise StopIteration\n\n return Iter()\n\n def _get_or_compute(self, i: int) -> ObjectRef[Block]:\n assert i < len(self._calls), i\n # Check if we need to compute more blocks.\n if i >= len(self._blocks):\n start = len(self._blocks)\n # Exponentially increase the number of blocks computed per batch.\n for c in self._calls[start:max(i + 1, start * 2)]:\n self._blocks.append(c())\n return self._blocks[i]\n",
"\"\"\"Test the collective allgather API.\"\"\"\nimport numpy as np\nimport pytest\nimport ray\nimport torch\nfrom ray.util.collective.tests.cpu_util import create_collective_workers, \\\n init_tensors_for_gather_scatter\nfrom ray.util.collective.types import Backend\n\n\[email protected](\"backend\", [Backend.GLOO])\[email protected](\"tensor_backend\", [\"numpy\", \"torch\"])\[email protected](\"array_size\",\n [2, 2**5, 2**10, 2**15, 2**20, [2, 2], [5, 5, 5]])\ndef test_allgather_different_array_size(ray_start_single_node, array_size,\n tensor_backend, backend):\n world_size = 2\n actors, _ = create_collective_workers(world_size, backend=backend)\n init_tensors_for_gather_scatter(\n actors, array_size=array_size, tensor_backend=tensor_backend)\n results = ray.get([a.do_allgather.remote() for a in actors])\n for i in range(world_size):\n for j in range(world_size):\n if tensor_backend == \"numpy\":\n assert (results[i][j] == np.ones(array_size, dtype=np.float32)\n * (j + 1)).all()\n else:\n assert (results[i][j] == torch.ones(\n array_size, dtype=torch.float32) * (j + 1)).all()\n\n\[email protected](\"backend\", [Backend.GLOO])\[email protected](\"dtype\",\n [np.uint8, np.float16, np.float32, np.float64])\ndef test_allgather_different_dtype(ray_start_single_node, dtype, backend):\n world_size = 2\n actors, _ = create_collective_workers(world_size, backend=backend)\n init_tensors_for_gather_scatter(actors, dtype=dtype)\n results = ray.get([a.do_allgather.remote() for a in actors])\n for i in range(world_size):\n for j in range(world_size):\n assert (results[i][j] == np.ones(10, dtype=dtype) * (j + 1)).all()\n\n\[email protected](\"backend\", [Backend.GLOO])\[email protected](\"length\", [0, 1, 2, 3])\ndef test_unmatched_tensor_list_length(ray_start_single_node, length, backend):\n world_size = 2\n actors, _ = create_collective_workers(world_size, backend=backend)\n list_buffer = [np.ones(10, dtype=np.float32) for _ in range(length)]\n ray.wait(\n [a.set_list_buffer.remote(list_buffer, copy=True) for a in actors])\n if length != world_size:\n with pytest.raises(RuntimeError):\n ray.get([a.do_allgather.remote() for a in actors])\n else:\n ray.get([a.do_allgather.remote() for a in actors])\n\n\[email protected](\"backend\", [Backend.GLOO])\[email protected](\"shape\", [10, 20, [4, 5], [1, 3, 5, 7]])\ndef test_unmatched_tensor_shape(ray_start_single_node, shape, backend):\n world_size = 2\n actors, _ = create_collective_workers(world_size, backend=backend)\n init_tensors_for_gather_scatter(actors, array_size=10)\n list_buffer = [np.ones(shape, dtype=np.float32) for _ in range(world_size)]\n ray.get([a.set_list_buffer.remote(list_buffer, copy=True) for a in actors])\n if shape != 10:\n with pytest.raises(RuntimeError):\n ray.get([a.do_allgather.remote() for a in actors])\n else:\n ray.get([a.do_allgather.remote() for a in actors])\n\n\[email protected](\"backend\", [Backend.GLOO])\ndef test_allgather_torch_numpy(ray_start_single_node, backend):\n world_size = 2\n shape = [10, 10]\n actors, _ = create_collective_workers(world_size, backend=backend)\n\n # tensor is pytorch, list is numpy\n for i, a in enumerate(actors):\n t = torch.ones(shape, dtype=torch.float32) * (i + 1)\n ray.wait([a.set_buffer.remote(t)])\n list_buffer = [\n np.ones(shape, dtype=np.float32) for _ in range(world_size)\n ]\n ray.wait([a.set_list_buffer.remote(list_buffer, copy=True)])\n results = ray.get([a.do_allgather.remote() for a in actors])\n for i in range(world_size):\n for j in range(world_size):\n assert (results[i][j] == np.ones(shape, dtype=np.float32) *\n (j + 1)).all()\n\n # tensor is numpy, list is pytorch\n for i, a in enumerate(actors):\n t = np.ones(shape, dtype=np.float32) * (i + 1)\n ray.wait([a.set_buffer.remote(t)])\n list_buffer = [\n torch.ones(shape, dtype=torch.float32) for _ in range(world_size)\n ]\n ray.wait([a.set_list_buffer.remote(list_buffer, copy=True)])\n results = ray.get([a.do_allgather.remote() for a in actors])\n for i in range(world_size):\n for j in range(world_size):\n assert (results[i][j] == torch.ones(shape, dtype=torch.float32) *\n (j + 1)).all()\n\n # some tensors in the list are pytorch, some are numpy\n for i, a in enumerate(actors):\n t = np.ones(shape, dtype=np.float32) * (i + 1)\n ray.wait([a.set_buffer.remote(t)])\n list_buffer = []\n for j in range(world_size):\n if j % 2 == 0:\n list_buffer.append(torch.ones(shape, dtype=torch.float32))\n else:\n list_buffer.append(np.ones(shape, dtype=np.float32))\n ray.wait([a.set_list_buffer.remote(list_buffer, copy=True)])\n results = ray.get([a.do_allgather.remote() for a in actors])\n for i in range(world_size):\n for j in range(world_size):\n if j % 2 == 0:\n assert (results[i][j] == torch.ones(\n shape, dtype=torch.float32) * (j + 1)).all()\n else:\n assert (results[i][j] == np.ones(shape, dtype=np.float32) *\n (j + 1)).all()\n\n\nif __name__ == \"__main__\":\n import pytest\n import sys\n sys.exit(pytest.main([\"-v\", \"-x\", __file__]))\n",
"import argparse\nimport numpy as np\nimport os\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, Subset\nfrom torchvision.datasets import CIFAR10\nimport torchvision.transforms as transforms\n\nimport ray\nfrom ray import tune\nfrom ray.tune import CLIReporter\nfrom ray.tune.schedulers import PopulationBasedTraining\nfrom ray.tune.utils.mock import FailureInjectorCallback\nfrom ray.util.sgd.torch import TorchTrainer, TrainingOperator\nfrom ray.util.sgd.torch.resnet import ResNet18\nfrom ray.util.sgd.utils import BATCH_SIZE\n\nfrom ray.tune.utils.release_test_util import ProgressCallback\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--smoke-test\",\n action=\"store_true\",\n default=False,\n help=\"Finish quickly for training.\")\nargs = parser.parse_args()\n\n\ndef initialization_hook():\n # Need this for avoiding a connection restart issue on AWS.\n os.environ[\"NCCL_SOCKET_IFNAME\"] = \"^docker0,lo\"\n os.environ[\"NCCL_LL_THRESHOLD\"] = \"0\"\n\n # set the below if needed\n # print(\"NCCL DEBUG SET\")\n # os.environ[\"NCCL_DEBUG\"] = \"INFO\"\n\n\ndef cifar_creator(config):\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465),\n (0.2023, 0.1994, 0.2010)),\n ]) # meanstd transformation\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465),\n (0.2023, 0.1994, 0.2010)),\n ])\n train_dataset = CIFAR10(\n root=\"~/data\", train=True, download=True, transform=transform_train)\n validation_dataset = CIFAR10(\n root=\"~/data\", train=False, download=False, transform=transform_test)\n\n if config.get(\"test_mode\"):\n train_dataset = Subset(train_dataset, list(range(64)))\n validation_dataset = Subset(validation_dataset, list(range(64)))\n\n train_loader = DataLoader(\n train_dataset, batch_size=config[BATCH_SIZE], num_workers=2)\n validation_loader = DataLoader(\n validation_dataset, batch_size=config[BATCH_SIZE], num_workers=2)\n return train_loader, validation_loader\n\n\ndef optimizer_creator(model, config):\n \"\"\"Returns optimizer\"\"\"\n return torch.optim.SGD(\n model.parameters(),\n lr=config.get(\"lr\", 0.1),\n momentum=config.get(\"momentum\", 0.9))\n\n\nray.init(address=\"auto\" if not args.smoke_test else None, log_to_driver=True)\nnum_training_workers = 1 if args.smoke_test else 3\n\nCustomTrainingOperator = TrainingOperator.from_creators(\n model_creator=ResNet18,\n optimizer_creator=optimizer_creator,\n data_creator=cifar_creator,\n loss_creator=nn.CrossEntropyLoss)\n\nTorchTrainable = TorchTrainer.as_trainable(\n training_operator_cls=CustomTrainingOperator,\n initialization_hook=initialization_hook,\n num_workers=num_training_workers,\n config={\n \"test_mode\": args.smoke_test,\n BATCH_SIZE: 128 * num_training_workers,\n },\n use_gpu=not args.smoke_test,\n backend=\"gloo\", # This should also work with NCCL\n)\n\npbt_scheduler = PopulationBasedTraining(\n time_attr=\"training_iteration\",\n metric=\"val_loss\",\n mode=\"min\",\n perturbation_interval=1,\n hyperparam_mutations={\n # distribution for resampling\n \"lr\": lambda: np.random.uniform(0.001, 1),\n # allow perturbations within this set of categorical values\n \"momentum\": [0.8, 0.9, 0.99],\n })\n\nreporter = CLIReporter()\nreporter.add_metric_column(\"val_loss\", \"loss\")\nreporter.add_metric_column(\"val_accuracy\", \"acc\")\n\nanalysis = tune.run(\n TorchTrainable,\n num_samples=4,\n config={\n \"lr\": tune.choice([0.001, 0.01, 0.1]),\n \"momentum\": 0.8,\n \"head_location\": None,\n \"worker_locations\": None\n },\n max_failures=-1, # used for fault tolerance\n checkpoint_freq=2, # used for fault tolerance\n progress_reporter=reporter,\n scheduler=pbt_scheduler,\n callbacks=[\n FailureInjectorCallback(time_between_checks=90),\n ProgressCallback()\n ],\n stop={\"training_iteration\": 1} if args.smoke_test else None)\n\nprint(analysis.get_best_config(metric=\"val_loss\", mode=\"min\"))\n",
"import pytest\nimport torch\nimport ray\nfrom ray.util.client.ray_client_helpers import ray_start_client_server\n\npytest.importorskip(\"horovod\")\n\ntry:\n from horovod.ray.runner import RayExecutor\n from horovod.common.util import gloo_built\nexcept ImportError:\n pass # This shouldn't be reached - the test should be skipped.\n\n\n# For each test, run it once with ray.init() and again with ray client.\[email protected](params=[False, True])\ndef ray_start_4_cpus(request):\n if request.param:\n\n def ray_connect_handler(job_config=None):\n # Ray client will disconnect from ray when\n # num_clients == 0.\n if ray.is_initialized():\n return\n else:\n return ray.init(job_config=job_config, num_cpus=4)\n\n assert not ray.util.client.ray.is_connected()\n with ray_start_client_server(ray_connect_handler=ray_connect_handler):\n assert ray.util.client.ray.is_connected()\n yield\n else:\n ray.init(num_cpus=4)\n yield\n ray.shutdown()\n\n\ndef _train(batch_size=32, batch_per_iter=10):\n import torch.nn.functional as F\n import torch.optim as optim\n import torch.utils.data.distributed\n import horovod.torch as hvd\n import timeit\n\n hvd.init()\n\n # Set up fixed fake data\n data = torch.randn(batch_size, 2)\n target = torch.LongTensor(batch_size).random_() % 2\n\n model = torch.nn.Sequential(torch.nn.Linear(2, 2))\n optimizer = optim.SGD(model.parameters(), lr=0.01)\n\n # Horovod: wrap optimizer with DistributedOptimizer.\n optimizer = hvd.DistributedOptimizer(\n optimizer, named_parameters=model.named_parameters())\n\n # Horovod: broadcast parameters & optimizer state.\n hvd.broadcast_parameters(model.state_dict(), root_rank=0)\n hvd.broadcast_optimizer_state(optimizer, root_rank=0)\n\n def benchmark_step():\n optimizer.zero_grad()\n output = model(data)\n loss = F.cross_entropy(output, target)\n loss.backward()\n optimizer.step()\n\n timeit.timeit(benchmark_step, number=batch_per_iter)\n return hvd.local_rank()\n\n\[email protected](\n not gloo_built(), reason=\"Gloo is required for Ray integration\")\ndef test_train(ray_start_4_cpus):\n def simple_fn(worker):\n local_rank = _train()\n return local_rank\n\n setting = RayExecutor.create_settings(timeout_s=30)\n hjob = RayExecutor(\n setting, num_workers=3, use_gpu=torch.cuda.is_available())\n hjob.start()\n result = hjob.execute(simple_fn)\n assert set(result) == {0, 1, 2}\n result = ray.get(hjob.run_remote(simple_fn, args=[None]))\n assert set(result) == {0, 1, 2}\n hjob.shutdown()\n\n\[email protected](\n not gloo_built(), reason=\"Gloo is required for Ray integration\")\ndef test_horovod_example(ray_start_4_cpus):\n from ray.util.horovod.horovod_example import main\n kwargs = {\n \"data_dir\": \"./data\",\n \"num_epochs\": 1,\n }\n\n main(num_workers=1, use_gpu=False, kwargs=kwargs)\n\n\nif __name__ == \"__main__\":\n import pytest\n import sys\n\n sys.exit(pytest.main([\"-v\", __file__] + sys.argv[1:]))\n",
"import collections\nfrom typing import Dict, List, Optional, Tuple\n\nimport numpy as np\nimport logging\n\nfrom ray.tune import trial_runner\nfrom ray.tune.result import DEFAULT_METRIC\nfrom ray.tune.schedulers.trial_scheduler import FIFOScheduler, TrialScheduler\nfrom ray.tune.trial import Trial\nfrom ray.tune.error import TuneError\n\nlogger = logging.getLogger(__name__)\n\n\n# Implementation notes:\n# This implementation contains 3 logical levels.\n# Each HyperBand iteration is a \"band\". There can be multiple\n# bands running at once, and there can be 1 band that is incomplete.\n#\n# In each band, there are at most `s` + 1 brackets.\n# `s` is a value determined by given parameters, and assigned on\n# a cyclic basis.\n#\n# In each bracket, there are at most `n(s)` trials, indicating that\n# `n` is a function of `s`. These trials go through a series of\n# halving procedures, dropping lowest performers. Multiple\n# brackets are running at once.\n#\n# Trials added will be inserted into the most recent bracket\n# and band and will spill over to new brackets/bands accordingly.\n#\n# This maintains the bracket size and max trial count per band\n# to 5 and 117 respectively, which correspond to that of\n# `max_attr=81, eta=3` from the blog post. Trials will fill up\n# from smallest bracket to largest, with largest\n# having the most rounds of successive halving.\nclass HyperBandScheduler(FIFOScheduler):\n \"\"\"Implements the HyperBand early stopping algorithm.\n\n HyperBandScheduler early stops trials using the HyperBand optimization\n algorithm. It divides trials into brackets of varying sizes, and\n periodically early stops low-performing trials within each bracket.\n\n To use this implementation of HyperBand with Tune, all you need\n to do is specify the max length of time a trial can run `max_t`, the time\n units `time_attr`, the name of the reported objective value `metric`,\n and if `metric` is to be maximized or minimized (`mode`).\n We automatically determine reasonable values for the other\n HyperBand parameters based on the given values.\n\n For example, to limit trials to 10 minutes and early stop based on the\n `episode_mean_reward` attr, construct:\n\n ``HyperBand('time_total_s', 'episode_reward_mean', max_t=600)``\n\n Note that Tune's stopping criteria will be applied in conjunction with\n HyperBand's early stopping mechanisms.\n\n See also: https://people.eecs.berkeley.edu/~kjamieson/hyperband.html\n\n Args:\n time_attr (str): The training result attr to use for comparing time.\n Note that you can pass in something non-temporal such as\n `training_iteration` as a measure of progress, the only requirement\n is that the attribute should increase monotonically.\n metric (str): The training result objective value attribute. Stopping\n procedures will use this attribute. If None but a mode was passed,\n the `ray.tune.result.DEFAULT_METRIC` will be used per default.\n mode (str): One of {min, max}. Determines whether objective is\n minimizing or maximizing the metric attribute.\n max_t (int): max time units per trial. Trials will be stopped after\n max_t time units (determined by time_attr) have passed.\n The scheduler will terminate trials after this time has passed.\n Note that this is different from the semantics of `max_t` as\n mentioned in the original HyperBand paper.\n reduction_factor (float): Same as `eta`. Determines how sharp\n the difference is between bracket space-time allocation ratios.\n stop_last_trials (bool): Whether to terminate the trials after\n reaching max_t. Defaults to True.\n \"\"\"\n\n def __init__(self,\n time_attr: str = \"training_iteration\",\n reward_attr: Optional[str] = None,\n metric: Optional[str] = None,\n mode: Optional[str] = None,\n max_t: int = 81,\n reduction_factor: float = 3,\n stop_last_trials: bool = True):\n assert max_t > 0, \"Max (time_attr) not valid!\"\n if mode:\n assert mode in [\"min\", \"max\"], \"`mode` must be 'min' or 'max'!\"\n\n if reward_attr is not None:\n mode = \"max\"\n metric = reward_attr\n logger.warning(\n \"`reward_attr` is deprecated and will be removed in a future \"\n \"version of Tune. \"\n \"Setting `metric={}` and `mode=max`.\".format(reward_attr))\n\n FIFOScheduler.__init__(self)\n self._eta = reduction_factor\n self._s_max_1 = int(\n np.round(np.log(max_t) / np.log(reduction_factor))) + 1\n self._max_t_attr = max_t\n # bracket max trials\n self._get_n0 = lambda s: int(\n np.ceil(self._s_max_1 / (s + 1) * self._eta**s))\n # bracket initial iterations\n self._get_r0 = lambda s: int((max_t * self._eta**(-s)))\n self._hyperbands = [[]] # list of hyperband iterations\n self._trial_info = {} # Stores Trial -> Bracket, Band Iteration\n\n # Tracks state for new trial add\n self._state = {\"bracket\": None, \"band_idx\": 0}\n self._num_stopped = 0\n self._metric = metric\n self._mode = mode\n self._metric_op = None\n\n if self._mode == \"max\":\n self._metric_op = 1.\n elif self._mode == \"min\":\n self._metric_op = -1.\n self._time_attr = time_attr\n self._stop_last_trials = stop_last_trials\n\n def set_search_properties(self, metric: Optional[str],\n mode: Optional[str]) -> bool:\n if self._metric and metric:\n return False\n if self._mode and mode:\n return False\n\n if metric:\n self._metric = metric\n if mode:\n self._mode = mode\n\n if self._mode == \"max\":\n self._metric_op = 1.\n elif self._mode == \"min\":\n self._metric_op = -1.\n\n if self._metric is None and self._mode:\n # If only a mode was passed, use anonymous metric\n self._metric = DEFAULT_METRIC\n\n return True\n\n def on_trial_add(self, trial_runner: \"trial_runner.TrialRunner\",\n trial: Trial):\n \"\"\"Adds new trial.\n\n On a new trial add, if current bracket is not filled,\n add to current bracket. Else, if current band is not filled,\n create new bracket, add to current bracket.\n Else, create new iteration, create new bracket, add to bracket.\"\"\"\n if not self._metric or not self._metric_op:\n raise ValueError(\n \"{} has been instantiated without a valid `metric` ({}) or \"\n \"`mode` ({}) parameter. Either pass these parameters when \"\n \"instantiating the scheduler, or pass them as parameters \"\n \"to `tune.run()`\".format(self.__class__.__name__, self._metric,\n self._mode))\n\n cur_bracket = self._state[\"bracket\"]\n cur_band = self._hyperbands[self._state[\"band_idx\"]]\n if cur_bracket is None or cur_bracket.filled():\n retry = True\n while retry:\n # if current iteration is filled, create new iteration\n if self._cur_band_filled():\n cur_band = []\n self._hyperbands.append(cur_band)\n self._state[\"band_idx\"] += 1\n\n # cur_band will always be less than s_max_1 or else filled\n s = len(cur_band)\n assert s < self._s_max_1, \"Current band is filled!\"\n if self._get_r0(s) == 0:\n logger.info(\"Bracket too small - Retrying...\")\n cur_bracket = None\n else:\n retry = False\n cur_bracket = self._create_bracket(s)\n cur_band.append(cur_bracket)\n self._state[\"bracket\"] = cur_bracket\n\n self._state[\"bracket\"].add_trial(trial)\n self._trial_info[trial] = cur_bracket, self._state[\"band_idx\"]\n\n def _create_bracket(self, s):\n return Bracket(\n time_attr=self._time_attr,\n max_trials=self._get_n0(s),\n init_t_attr=self._get_r0(s),\n max_t_attr=self._max_t_attr,\n eta=self._eta,\n s=s,\n stop_last_trials=self._stop_last_trials)\n\n def _cur_band_filled(self) -> bool:\n \"\"\"Checks if the current band is filled.\n\n The size of the current band should be equal to s_max_1\"\"\"\n\n cur_band = self._hyperbands[self._state[\"band_idx\"]]\n return len(cur_band) == self._s_max_1\n\n def on_trial_result(self, trial_runner: \"trial_runner.TrialRunner\",\n trial: Trial, result: Dict):\n \"\"\"If bracket is finished, all trials will be stopped.\n\n If a given trial finishes and bracket iteration is not done,\n the trial will be paused and resources will be given up.\n\n This scheduler will not start trials but will stop trials.\n The current running trial will not be handled,\n as the trialrunner will be given control to handle it.\"\"\"\n\n bracket, _ = self._trial_info[trial]\n bracket.update_trial_stats(trial, result)\n\n if bracket.continue_trial(trial):\n return TrialScheduler.CONTINUE\n\n action = self._process_bracket(trial_runner, bracket)\n logger.debug(f\"{action} for {trial} on \"\n f\"{self._time_attr}={result.get(self._time_attr)}\")\n return action\n\n def _process_bracket(self, trial_runner: \"trial_runner.TrialRunner\",\n bracket: \"Bracket\") -> str:\n \"\"\"This is called whenever a trial makes progress.\n\n When all live trials in the bracket have no more iterations left,\n Trials will be successively halved. If bracket is done, all\n non-running trials will be stopped and cleaned up,\n and during each halving phase, bad trials will be stopped while good\n trials will return to \"PENDING\".\"\"\"\n\n action = TrialScheduler.PAUSE\n if bracket.cur_iter_done():\n if bracket.finished():\n bracket.cleanup_full(trial_runner)\n return TrialScheduler.STOP\n\n good, bad = bracket.successive_halving(self._metric,\n self._metric_op)\n # kill bad trials\n self._num_stopped += len(bad)\n for t in bad:\n if t.status == Trial.PAUSED:\n trial_runner.stop_trial(t)\n elif t.status == Trial.RUNNING:\n bracket.cleanup_trial(t)\n action = TrialScheduler.STOP\n else:\n raise TuneError(f\"Trial with unexpected bad status \"\n f\"encountered: {t.status}\")\n\n # ready the good trials - if trial is too far ahead, don't continue\n for t in good:\n if t.status not in [Trial.PAUSED, Trial.RUNNING]:\n raise TuneError(f\"Trial with unexpected good status \"\n f\"encountered: {t.status}\")\n if bracket.continue_trial(t):\n if t.status == Trial.PAUSED:\n self._unpause_trial(trial_runner, t)\n elif t.status == Trial.RUNNING:\n action = TrialScheduler.CONTINUE\n return action\n\n def on_trial_remove(self, trial_runner: \"trial_runner.TrialRunner\",\n trial: Trial):\n \"\"\"Notification when trial terminates.\n\n Trial info is removed from bracket. Triggers halving if bracket is\n not finished.\"\"\"\n bracket, _ = self._trial_info[trial]\n bracket.cleanup_trial(trial)\n if not bracket.finished():\n self._process_bracket(trial_runner, bracket)\n\n def on_trial_complete(self, trial_runner: \"trial_runner.TrialRunner\",\n trial: Trial, result: Dict):\n \"\"\"Cleans up trial info from bracket if trial completed early.\"\"\"\n self.on_trial_remove(trial_runner, trial)\n\n def on_trial_error(self, trial_runner: \"trial_runner.TrialRunner\",\n trial: Trial):\n \"\"\"Cleans up trial info from bracket if trial errored early.\"\"\"\n self.on_trial_remove(trial_runner, trial)\n\n def choose_trial_to_run(\n self, trial_runner: \"trial_runner.TrialRunner\") -> Optional[Trial]:\n \"\"\"Fair scheduling within iteration by completion percentage.\n\n List of trials not used since all trials are tracked as state\n of scheduler. If iteration is occupied (ie, no trials to run),\n then look into next iteration.\n \"\"\"\n\n for hyperband in self._hyperbands:\n # band will have None entries if no resources\n # are to be allocated to that bracket.\n scrubbed = [b for b in hyperband if b is not None]\n for bracket in sorted(\n scrubbed, key=lambda b: b.completion_percentage()):\n for trial in bracket.current_trials():\n if (trial.status == Trial.PENDING\n and trial_runner.has_resources_for_trial(trial)):\n return trial\n return None\n\n def debug_string(self) -> str:\n \"\"\"This provides a progress notification for the algorithm.\n\n For each bracket, the algorithm will output a string as follows:\n\n Bracket(Max Size (n)=5, Milestone (r)=33, completed=14.6%):\n {PENDING: 2, RUNNING: 3, TERMINATED: 2}\n\n \"Max Size\" indicates the max number of pending/running experiments\n set according to the Hyperband algorithm.\n\n \"Milestone\" indicates the iterations a trial will run for before\n the next halving will occur.\n\n \"Completed\" indicates an approximate progress metric. Some brackets,\n like ones that are unfilled, will not reach 100%.\n \"\"\"\n out = \"Using HyperBand: \"\n out += \"num_stopped={} total_brackets={}\".format(\n self._num_stopped, sum(len(band) for band in self._hyperbands))\n for i, band in enumerate(self._hyperbands):\n out += \"\\nRound #{}:\".format(i)\n for bracket in band:\n if bracket:\n out += \"\\n {}\".format(bracket)\n return out\n\n def state(self) -> Dict[str, int]:\n return {\n \"num_brackets\": sum(len(band) for band in self._hyperbands),\n \"num_stopped\": self._num_stopped\n }\n\n def _unpause_trial(self, trial_runner: \"trial_runner.TrialRunner\",\n trial: Trial):\n trial_runner.trial_executor.unpause_trial(trial)\n\n\nclass Bracket:\n \"\"\"Logical object for tracking Hyperband bracket progress. Keeps track\n of proper parameters as designated by HyperBand.\n\n Also keeps track of progress to ensure good scheduling.\n \"\"\"\n\n def __init__(self,\n time_attr: str,\n max_trials: int,\n init_t_attr: int,\n max_t_attr: int,\n eta: float,\n s: int,\n stop_last_trials: bool = True):\n self._live_trials = {} # maps trial -> current result\n self._all_trials = []\n self._time_attr = time_attr # attribute to\n\n self._n = self._n0 = max_trials\n self._r = self._r0 = init_t_attr\n self._max_t_attr = max_t_attr\n self._cumul_r = self._r0\n\n self._eta = eta\n self._halves = s\n\n self._total_work = self._calculate_total_work(self._n0, self._r0, s)\n self._completed_progress = 0\n self.stop_last_trials = stop_last_trials\n\n def add_trial(self, trial: Trial):\n \"\"\"Add trial to bracket assuming bracket is not filled.\n\n At a later iteration, a newly added trial will be given equal\n opportunity to catch up.\"\"\"\n assert not self.filled(), \"Cannot add trial to filled bracket!\"\n self._live_trials[trial] = None\n self._all_trials.append(trial)\n\n def cur_iter_done(self) -> bool:\n \"\"\"Checks if all iterations have completed.\n\n TODO(rliaw): also check that `t.iterations == self._r`\"\"\"\n return all(\n self._get_result_time(result) >= self._cumul_r\n for result in self._live_trials.values())\n\n def finished(self) -> bool:\n if not self.stop_last_trials:\n return False\n return self._halves == 0 and self.cur_iter_done()\n\n def current_trials(self) -> List[Trial]:\n return list(self._live_trials)\n\n def continue_trial(self, trial: Trial) -> bool:\n result = self._live_trials[trial]\n if not self.stop_last_trials and self._halves == 0:\n return True\n elif self._get_result_time(result) < self._cumul_r:\n return True\n return False\n\n def filled(self) -> bool:\n \"\"\"Checks if bracket is filled.\n\n Only let new trials be added at current level minimizing the need\n to backtrack and bookkeep previous medians.\"\"\"\n\n return len(self._live_trials) == self._n\n\n def successive_halving(self, metric: str, metric_op: float\n ) -> Tuple[List[Trial], List[Trial]]:\n if self._halves == 0 and not self.stop_last_trials:\n return self._live_trials, []\n assert self._halves > 0\n self._halves -= 1\n self._n /= self._eta\n self._n = int(np.ceil(self._n))\n\n self._r *= self._eta\n self._r = int(min(self._r, self._max_t_attr - self._cumul_r))\n self._cumul_r = self._r\n sorted_trials = sorted(\n self._live_trials,\n key=lambda t: metric_op * self._live_trials[t][metric])\n\n good, bad = sorted_trials[-self._n:], sorted_trials[:-self._n]\n return good, bad\n\n def update_trial_stats(self, trial: Trial, result: Dict):\n \"\"\"Update result for trial. Called after trial has finished\n an iteration - will decrement iteration count.\n\n TODO(rliaw): The other alternative is to keep the trials\n in and make sure they're not set as pending later.\"\"\"\n\n assert trial in self._live_trials\n assert self._get_result_time(result) >= 0\n observed_time = self._get_result_time(result)\n last_observed = self._get_result_time(self._live_trials[trial])\n\n delta = observed_time - last_observed\n if delta <= 0:\n logger.info(\"Restoring from a previous point in time. \"\n \"Previous={}; Now={}\".format(last_observed,\n observed_time))\n self._completed_progress += delta\n self._live_trials[trial] = result\n\n def cleanup_trial(self, trial: Trial):\n \"\"\"Clean up statistics tracking for terminated trials (either by force\n or otherwise).\n\n This may cause bad trials to continue for a long time, in the case\n where all the good trials finish early and there are only bad trials\n left in a bracket with a large max-iteration.\"\"\"\n assert trial in self._live_trials\n del self._live_trials[trial]\n\n def cleanup_full(self, trial_runner: \"trial_runner.TrialRunner\"):\n \"\"\"Cleans up bracket after bracket is completely finished.\n\n Lets the last trial continue to run until termination condition\n kicks in.\"\"\"\n for trial in self.current_trials():\n if (trial.status == Trial.PAUSED):\n trial_runner.stop_trial(trial)\n\n def completion_percentage(self) -> float:\n \"\"\"Returns a progress metric.\n\n This will not be always finish with 100 since dead trials\n are dropped.\"\"\"\n if self.finished():\n return 1.0\n return min(self._completed_progress / self._total_work, 1.0)\n\n def _get_result_time(self, result: Dict) -> float:\n if result is None:\n return 0\n return result[self._time_attr]\n\n def _calculate_total_work(self, n: int, r: float, s: int):\n work = 0\n cumulative_r = r\n for _ in range(s + 1):\n work += int(n) * int(r)\n n /= self._eta\n n = int(np.ceil(n))\n r *= self._eta\n r = int(min(r, self._max_t_attr - cumulative_r))\n return work\n\n def __repr__(self) -> str:\n status = \", \".join([\n \"Max Size (n)={}\".format(self._n),\n \"Milestone (r)={}\".format(self._cumul_r),\n \"completed={:.1%}\".format(self.completion_percentage())\n ])\n counts = collections.Counter([t.status for t in self._all_trials])\n trial_statuses = \", \".join(\n sorted(\"{}: {}\".format(k, v) for k, v in counts.items()))\n return \"Bracket({}): {{{}}} \".format(status, trial_statuses)\n",
"import gym\nimport numpy as np\nfrom typing import List, Optional, Union\n\nfrom ray.rllib.utils.framework import try_import_torch\n\ntorch, _ = try_import_torch()\n\n\nclass ViewRequirement:\n \"\"\"Single view requirement (for one column in an SampleBatch/input_dict).\n\n Policies and ModelV2s return a Dict[str, ViewRequirement] upon calling\n their `[train|inference]_view_requirements()` methods, where the str key\n represents the column name (C) under which the view is available in the\n input_dict/SampleBatch and ViewRequirement specifies the actual underlying\n column names (in the original data buffer), timestep shifts, and other\n options to build the view.\n\n Examples:\n >>> # The default ViewRequirement for a Model is:\n >>> req = [ModelV2].view_requirements\n >>> print(req)\n {\"obs\": ViewRequirement(shift=0)}\n \"\"\"\n\n def __init__(self,\n data_col: Optional[str] = None,\n space: gym.Space = None,\n shift: Union[int, str, List[int]] = 0,\n index: Optional[int] = None,\n batch_repeat_value: int = 1,\n used_for_compute_actions: bool = True,\n used_for_training: bool = True):\n \"\"\"Initializes a ViewRequirement object.\n\n Args:\n data_col (Optional[str]): The data column name from the SampleBatch\n (str key). If None, use the dict key under which this\n ViewRequirement resides.\n space (gym.Space): The gym Space used in case we need to pad data\n in inaccessible areas of the trajectory (t<0 or t>H).\n Default: Simple box space, e.g. rewards.\n shift (Union[int, str, List[int]]): Single shift value or\n list of relative positions to use (relative to the underlying\n `data_col`).\n Example: For a view column \"prev_actions\", you can set\n `data_col=\"actions\"` and `shift=-1`.\n Example: For a view column \"obs\" in an Atari framestacking\n fashion, you can set `data_col=\"obs\"` and\n `shift=[-3, -2, -1, 0]`.\n Example: For the obs input to an attention net, you can specify\n a range via a str: `shift=\"-100:0\"`, which will pass in\n the past 100 observations plus the current one.\n index (Optional[int]): An optional absolute position arg,\n used e.g. for the location of a requested inference dict within\n the trajectory. Negative values refer to counting from the end\n of a trajectory.\n used_for_compute_actions (bool): Whether the data will be used for\n creating input_dicts for `Policy.compute_actions()` calls (or\n `Policy.compute_actions_from_input_dict()`).\n used_for_training (bool): Whether the data will be used for\n training. If False, the column will not be copied into the\n final train batch.\n \"\"\"\n self.data_col = data_col\n self.space = space if space is not None else gym.spaces.Box(\n float(\"-inf\"), float(\"inf\"), shape=())\n\n self.shift = shift\n if isinstance(self.shift, (list, tuple)):\n self.shift = np.array(self.shift)\n\n # Special case: Providing a (probably larger) range of indices, e.g.\n # \"-100:0\" (past 100 timesteps plus current one).\n self.shift_from = self.shift_to = None\n if isinstance(self.shift, str):\n f, t = self.shift.split(\":\")\n self.shift_from = int(f)\n self.shift_to = int(t)\n\n self.index = index\n self.batch_repeat_value = batch_repeat_value\n\n self.used_for_compute_actions = used_for_compute_actions\n self.used_for_training = used_for_training\n",
"\"\"\"A simple distributed shuffle implementation in Ray.\n\nThis utility provides a `simple_shuffle` function that can be used to\nredistribute M input partitions into N output partitions. It does this with\na single wave of shuffle map tasks followed by a single wave of shuffle reduce\ntasks. Each shuffle map task generates O(N) output objects, and each shuffle\nreduce task consumes O(M) input objects, for a total of O(N*M) objects.\n\nTo try an example 10GB shuffle, run:\n\n $ python -m ray.experimental.shuffle \\\n --num-partitions=50 --partition-size=200e6 \\\n --object-store-memory=1e9\n\nThis will print out some statistics on the shuffle execution such as:\n\n --- Aggregate object store stats across all nodes ---\n Plasma memory usage 0 MiB, 0 objects, 0.0% full\n Spilled 9487 MiB, 2487 objects, avg write throughput 1023 MiB/s\n Restored 9487 MiB, 2487 objects, avg read throughput 1358 MiB/s\n Objects consumed by Ray tasks: 9537 MiB.\n\n Shuffled 9536 MiB in 16.579771757125854 seconds\n\"\"\"\n\nimport time\nfrom typing import List, Iterable, Tuple, Callable, Any, Union\n\nimport ray\nfrom ray.cluster_utils import Cluster\nfrom ray import ObjectRef\n\n# TODO(ekl) why doesn't TypeVar() deserialize properly in Ray?\n# The type produced by the input reader function.\nInType = Any\n# The type produced by the output writer function.\nOutType = Any\n# Integer identifying the partition number.\nPartitionID = int\n\n\nclass ObjectStoreWriter:\n \"\"\"This class is used to stream shuffle map outputs to the object store.\n\n It can be subclassed to optimize writing (e.g., batching together small\n records into larger objects). This will be performance critical if your\n input records are small (the example shuffle uses very large records, so\n the naive strategy works well).\n \"\"\"\n\n def __init__(self):\n self.results = []\n\n def add(self, item: InType) -> None:\n \"\"\"Queue a single item to be written to the object store.\n\n This base implementation immediately writes each given item to the\n object store as a standalone object.\n \"\"\"\n self.results.append(ray.put(item))\n\n def finish(self) -> List[ObjectRef]:\n \"\"\"Return list of object refs representing written items.\"\"\"\n return self.results\n\n\nclass ObjectStoreWriterNonStreaming(ObjectStoreWriter):\n def __init__(self):\n self.results = []\n\n def add(self, item: InType) -> None:\n self.results.append(item)\n\n def finish(self) -> List[Any]:\n return self.results\n\n\ndef round_robin_partitioner(input_stream: Iterable[InType], num_partitions: int\n ) -> Iterable[Tuple[PartitionID, InType]]:\n \"\"\"Round robin partitions items from the input reader.\n\n You can write custom partitioning functions for your use case.\n\n Args:\n input_stream: Iterator over items from the input reader.\n num_partitions: Number of output partitions.\n\n Yields:\n Tuples of (partition id, input item).\n \"\"\"\n i = 0\n for item in input_stream:\n yield (i, item)\n i += 1\n i %= num_partitions\n\n\[email protected]\nclass _StatusTracker:\n def __init__(self):\n self.num_map = 0\n self.num_reduce = 0\n\n def inc(self):\n self.num_map += 1\n\n def inc2(self):\n self.num_reduce += 1\n\n def get_progress(self):\n return self.num_map, self.num_reduce\n\n\ndef render_progress_bar(tracker, input_num_partitions, output_num_partitions):\n from tqdm import tqdm\n num_map = 0\n num_reduce = 0\n map_bar = tqdm(total=input_num_partitions, position=0)\n map_bar.set_description(\"Map Progress.\")\n reduce_bar = tqdm(total=output_num_partitions, position=1)\n reduce_bar.set_description(\"Reduce Progress.\")\n\n while (num_map < input_num_partitions\n or num_reduce < output_num_partitions):\n new_num_map, new_num_reduce = ray.get(tracker.get_progress.remote())\n map_bar.update(new_num_map - num_map)\n reduce_bar.update(new_num_reduce - num_reduce)\n num_map = new_num_map\n num_reduce = new_num_reduce\n time.sleep(0.1)\n map_bar.close()\n reduce_bar.close()\n\n\ndef simple_shuffle(*,\n input_reader: Callable[[PartitionID], Iterable[InType]],\n input_num_partitions: int,\n output_num_partitions: int,\n output_writer: Callable[\n [PartitionID, List[Union[ObjectRef, Any]]], OutType],\n partitioner: Callable[[Iterable[InType], int], Iterable[\n PartitionID]] = round_robin_partitioner,\n object_store_writer: ObjectStoreWriter = ObjectStoreWriter,\n tracker: _StatusTracker = None,\n streaming: bool = True) -> List[OutType]:\n \"\"\"Simple distributed shuffle in Ray.\n\n Args:\n input_reader: Function that generates the input items for a\n partition (e.g., data records).\n input_num_partitions: The number of input partitions.\n output_num_partitions: The desired number of output partitions.\n output_writer: Function that consumes a iterator of items for a\n given output partition. It returns a single value that will be\n collected across all output partitions.\n partitioner: Partitioning function to use. Defaults to round-robin\n partitioning of input items.\n object_store_writer: Class used to write input items to the\n object store in an efficient way. Defaults to a naive\n implementation that writes each input record as one object.\n tracker: Tracker actor that is used to display the progress bar.\n streaming: Whether or not if the shuffle will be streaming.\n\n Returns:\n List of outputs from the output writers.\n \"\"\"\n\n @ray.remote(num_returns=output_num_partitions)\n def shuffle_map(i: PartitionID) -> List[List[Union[Any, ObjectRef]]]:\n writers = [object_store_writer() for _ in range(output_num_partitions)]\n for out_i, item in partitioner(input_reader(i), output_num_partitions):\n writers[out_i].add(item)\n return [c.finish() for c in writers]\n\n @ray.remote\n def shuffle_reduce(\n i: PartitionID,\n *mapper_outputs: List[List[Union[Any, ObjectRef]]]) -> OutType:\n input_objects = []\n assert len(mapper_outputs) == input_num_partitions\n for obj_refs in mapper_outputs:\n for obj_ref in obj_refs:\n input_objects.append(obj_ref)\n return output_writer(i, input_objects)\n\n shuffle_map_out = [\n shuffle_map.remote(i) for i in range(input_num_partitions)\n ]\n\n shuffle_reduce_out = [\n shuffle_reduce.remote(\n j, *[shuffle_map_out[i][j] for i in range(input_num_partitions)])\n for j in range(output_num_partitions)\n ]\n\n if tracker:\n render_progress_bar(tracker, input_num_partitions,\n output_num_partitions)\n\n return ray.get(shuffle_reduce_out)\n\n\ndef build_cluster(num_nodes, num_cpus, object_store_memory):\n cluster = Cluster()\n for _ in range(num_nodes):\n cluster.add_node(\n num_cpus=num_cpus, object_store_memory=object_store_memory)\n cluster.wait_for_nodes()\n return cluster\n\n\ndef main(ray_address=None,\n object_store_memory=1e9,\n num_partitions=5,\n partition_size=200e6,\n num_nodes=None,\n num_cpus=8,\n no_streaming=False,\n use_wait=False):\n import argparse\n import numpy as np\n import time\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ray-address\", type=str, default=ray_address)\n parser.add_argument(\n \"--object-store-memory\", type=float, default=object_store_memory)\n parser.add_argument(\"--num-partitions\", type=int, default=num_partitions)\n parser.add_argument(\"--partition-size\", type=float, default=partition_size)\n parser.add_argument(\"--num-nodes\", type=int, default=num_nodes)\n parser.add_argument(\"--num-cpus\", type=int, default=num_cpus)\n parser.add_argument(\n \"--no-streaming\", action=\"store_true\", default=no_streaming)\n parser.add_argument(\"--use-wait\", action=\"store_true\", default=use_wait)\n args = parser.parse_args()\n\n is_multi_node = args.num_nodes\n if args.ray_address:\n print(\"Connecting to a existing cluster...\")\n ray.init(address=args.ray_address)\n elif is_multi_node:\n print(\"Emulating a cluster...\")\n print(f\"Num nodes: {args.num_nodes}\")\n print(f\"Num CPU per node: {args.num_cpus}\")\n print(f\"Object store memory per node: {args.object_store_memory}\")\n cluster = build_cluster(args.num_nodes, args.num_cpus,\n args.object_store_memory)\n ray.init(address=cluster.address)\n else:\n print(\"Start a new cluster...\")\n ray.init(\n num_cpus=args.num_cpus,\n object_store_memory=args.object_store_memory)\n\n partition_size = int(args.partition_size)\n num_partitions = args.num_partitions\n rows_per_partition = partition_size // (8 * 2)\n tracker = _StatusTracker.remote()\n use_wait = args.use_wait\n\n def input_reader(i: PartitionID) -> Iterable[InType]:\n for _ in range(num_partitions):\n yield np.ones(\n (rows_per_partition // num_partitions, 2), dtype=np.int64)\n tracker.inc.remote()\n\n def output_writer(i: PartitionID,\n shuffle_inputs: List[ObjectRef]) -> OutType:\n total = 0\n if not use_wait:\n for obj_ref in shuffle_inputs:\n arr = ray.get(obj_ref)\n total += arr.size * arr.itemsize\n else:\n while shuffle_inputs:\n [ready], shuffle_inputs = ray.wait(\n shuffle_inputs, num_returns=1)\n arr = ray.get(ready)\n total += arr.size * arr.itemsize\n\n tracker.inc2.remote()\n return total\n\n def output_writer_non_streaming(i: PartitionID,\n shuffle_inputs: List[Any]) -> OutType:\n total = 0\n for arr in shuffle_inputs:\n total += arr.size * arr.itemsize\n tracker.inc2.remote()\n return total\n\n if args.no_streaming:\n output_writer_callable = output_writer_non_streaming\n object_store_writer = ObjectStoreWriterNonStreaming\n else:\n object_store_writer = ObjectStoreWriter\n output_writer_callable = output_writer\n\n start = time.time()\n output_sizes = simple_shuffle(\n input_reader=input_reader,\n input_num_partitions=num_partitions,\n output_num_partitions=num_partitions,\n output_writer=output_writer_callable,\n object_store_writer=object_store_writer,\n tracker=tracker)\n delta = time.time() - start\n\n time.sleep(.5)\n print()\n print(ray.internal.internal_api.memory_summary(stats_only=True))\n print()\n print(\"Shuffled\", int(sum(output_sizes) / (1024 * 1024)), \"MiB in\", delta,\n \"seconds\")\n\n\nif __name__ == \"__main__\":\n main()\n",
"import argparse\nimport csv\nimport logging\nimport os\nimport random\nimport subprocess\nfrom typing import Iterable, List\n\nimport numpy as np\nimport ray\n\nfrom ray.experimental.raysort import constants\nfrom ray.experimental.raysort import logging_utils\nfrom ray.experimental.raysort import sortlib\nfrom ray.experimental.raysort import tracing_utils\nfrom ray.experimental.raysort.types import BlockInfo, ByteCount, RecordCount, PartId, PartitionInfo, Path # noqa: E501\n\n# ------------------------------------------------------------\n# Parse Arguments\n# ------------------------------------------------------------\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--ray_address\",\n default=\"auto\",\n type=str,\n help=\"if set to None, will launch a local Ray cluster\",\n )\n parser.add_argument(\n \"--total_data_size\",\n default=1_000_000_000,\n type=ByteCount,\n help=\"partition size in bytes\",\n )\n parser.add_argument(\n \"--num_mappers\",\n default=4,\n type=int,\n help=\"number of map tasks\",\n )\n parser.add_argument(\n \"--num_reducers\",\n default=4,\n type=int,\n help=\"number of reduce tasks\",\n )\n parser.add_argument(\n \"--reducer_batch_num_records\",\n default=1_000_000,\n type=RecordCount,\n help=\"number of bytes to buffer before writing the output to EBS\",\n )\n parser.add_argument(\n \"--skip_sorting\",\n default=False,\n action=\"store_true\",\n help=\"if set, no sorting is actually performed\",\n )\n parser.add_argument(\n \"--skip_input\",\n default=False,\n action=\"store_true\",\n help=\"if set, mappers will not read data from disk\",\n )\n parser.add_argument(\n \"--skip_output\",\n default=False,\n action=\"store_true\",\n help=\"if set, reducers will not write out results to disk\",\n )\n # Which tasks to run?\n tasks_group = parser.add_argument_group(\n \"tasks to run\", \"if no task is specified, will run all tasks\")\n tasks = [\"generate_input\", \"sort\", \"validate_output\"]\n for task in tasks:\n tasks_group.add_argument(\n f\"--{task}\", action=\"store_true\", help=f\"run task {task}\")\n\n args = parser.parse_args()\n # Derive additional arguments.\n args.input_part_size = ByteCount(args.total_data_size / args.num_mappers)\n args.output_part_size = ByteCount(args.total_data_size / args.num_reducers)\n args.mount_points = _get_mount_points()\n # If no tasks are specified, run all tasks.\n args_dict = vars(args)\n if not any(args_dict[task] for task in tasks):\n for task in tasks:\n args_dict[task] = True\n return args\n\n\ndef _get_mount_points():\n mnt = \"/mnt\"\n if not os.path.exists(mnt):\n return []\n return [os.path.join(mnt, d) for d in os.listdir(mnt)]\n\n\nargs = None\n\n# ------------------------------------------------------------\n# Generate Input\n# ------------------------------------------------------------\n\n\ndef _make_partition_info(part_id: PartId, kind=\"input\") -> PartitionInfo:\n node = ray.worker.global_worker.node_ip_address\n mnt = random.choice(args.mount_points)\n filepath = _get_part_path(mnt, part_id, kind)\n return PartitionInfo(part_id, node, filepath)\n\n\ndef _get_part_path(mnt: Path, part_id: PartId, kind=\"input\") -> Path:\n assert kind in {\"input\", \"output\"}\n dir_fmt = constants.DATA_DIR_FMT[kind]\n dirpath = dir_fmt.format(mnt=mnt)\n os.makedirs(dirpath, exist_ok=True)\n filename_fmt = constants.FILENAME_FMT[kind]\n filename = filename_fmt.format(part_id=part_id)\n filepath = os.path.join(dirpath, filename)\n return filepath\n\n\[email protected]\ndef generate_part(part_id: PartId, size: RecordCount,\n offset: RecordCount) -> PartitionInfo:\n logging_utils.init()\n pinfo = _make_partition_info(part_id)\n if not args.skip_input:\n subprocess.run(\n [constants.GENSORT_PATH, f\"-b{offset}\", f\"{size}\", pinfo.path],\n check=True)\n logging.info(f\"Generated input {pinfo}\")\n return pinfo\n\n\ndef generate_input():\n if args.skip_input:\n return\n size = constants.bytes_to_records(args.input_part_size)\n offset = 0\n tasks = []\n for part_id in range(args.num_mappers):\n tasks.append(generate_part.remote(part_id, size, offset))\n offset += size\n assert offset == constants.bytes_to_records(args.total_data_size), args\n logging.info(f\"Generating {len(tasks)} partitions\")\n parts = ray.get(tasks)\n with open(constants.INPUT_MANIFEST_FILE, \"w\") as fout:\n writer = csv.writer(fout)\n writer.writerows(parts)\n\n\n# ------------------------------------------------------------\n# Sort\n# ------------------------------------------------------------\n\n\ndef _load_manifest(path: Path) -> List[PartitionInfo]:\n if args.skip_input:\n return _load_dummy_manifest()\n with open(path) as fin:\n reader = csv.reader(fin)\n return [\n PartitionInfo(int(part_id), node, path)\n for part_id, node, path in reader\n ]\n\n\ndef _load_dummy_manifest() -> List[PartitionInfo]:\n return [PartitionInfo(i, \"\", \"\") for i in range(args.num_mappers)]\n\n\ndef _load_partition(path: Path) -> np.ndarray:\n return np.fromfile(path, dtype=np.uint8)\n\n\ndef _dummy_sort_and_partition(part: np.ndarray,\n boundaries: List[int]) -> List[BlockInfo]:\n N = len(boundaries)\n offset = 0\n size = int(np.ceil(part.size / N))\n blocks = []\n for _ in range(N):\n blocks.append((offset, size))\n offset += size\n return blocks\n\n\[email protected]\ndef mapper(boundaries: List[int], mapper_id: PartId,\n path: Path) -> List[ray.ObjectRef]:\n logging_utils.init()\n task_id = f\"M-{mapper_id} Mapper\"\n logging.info(f\"{task_id} starting {args}\")\n if args.skip_input:\n block_size = int(np.ceil(args.input_part_size / args.num_reducers))\n return [\n ray.put(\n np.frombuffer(np.random.bytes(block_size), dtype=np.uint8))\n for _ in range(args.num_reducers)\n ]\n\n part = _load_partition(path)\n sort_fn = _dummy_sort_and_partition \\\n if args.skip_sorting else sortlib.sort_and_partition\n blocks = sort_fn(part, boundaries)\n logging.info(f\"{task_id} saving to object store\")\n return [ray.put(part[offset:offset + size]) for offset, size in blocks]\n\n\ndef _dummy_merge(blocks: List[np.ndarray], _n: int) -> Iterable[memoryview]:\n for block in blocks:\n yield block\n\n\[email protected]\ndef reducer(reducer_id: PartId, *blocks: List[ray.ObjectRef]) -> PartitionInfo:\n logging_utils.init()\n task_id = f\"R-{reducer_id} Reducer\"\n logging.info(f\"{task_id} starting\")\n blocks = [np.copy(ray.get(block)) for block in blocks]\n merge_fn = _dummy_merge if args.skip_sorting else sortlib.merge_partitions\n merger = merge_fn(blocks, args.reducer_batch_num_records)\n if args.skip_output:\n for datachunk in merger:\n del datachunk\n logging.info(f\"{task_id} done\")\n return None\n else:\n pinfo = _make_partition_info(reducer_id, \"output\")\n with open(pinfo.path, \"wb\") as fout:\n for datachunk in merger:\n fout.write(datachunk)\n logging.info(f\"{task_id} done\")\n return pinfo\n\n\n@tracing_utils.timeit(\"sorting\")\ndef sort_main():\n partitions = _load_manifest(constants.INPUT_MANIFEST_FILE)\n boundaries = sortlib.get_boundaries(args.num_reducers)\n mapper_results = np.empty(\n (args.num_mappers, args.num_reducers), dtype=object)\n for part_id, node, path in partitions:\n opt = {} if args.skip_input else {\n \"resources\": {\n f\"node:{node}\": 1 / args.num_mappers\n },\n \"memory\": args.input_part_size * 1.2,\n }\n opt.update(num_returns=args.num_reducers)\n mapper_results[part_id, :] = mapper.options(**opt).remote(\n boundaries, part_id, path)\n\n reducer_results = []\n for r in range(args.num_reducers):\n opt = {\n \"memory\": args.output_part_size * 1.0,\n }\n blocks = mapper_results[:, r].tolist()\n ret = reducer.options(**opt).remote(r, *blocks)\n reducer_results.append(ret)\n\n reducer_results = ray.get(reducer_results)\n if not args.skip_output:\n with open(constants.OUTPUT_MANIFEST_FILE, \"w\") as fout:\n writer = csv.writer(fout)\n writer.writerows(reducer_results)\n\n\n# ------------------------------------------------------------\n# Validate Output\n# ------------------------------------------------------------\n\n\[email protected]\ndef validate_part(path: Path):\n logging_utils.init()\n proc = subprocess.run([constants.VALSORT_PATH, path], capture_output=True)\n if proc.returncode != 0:\n logging.critical(\"\\n\" + proc.stderr.decode(\"ascii\"))\n raise RuntimeError(f\"Validation failed: {path}\")\n logging.info(f\"Validated output {path}\")\n\n\ndef validate_output():\n if args.skip_output:\n return\n partitions = _load_manifest(constants.OUTPUT_MANIFEST_FILE)\n tasks = []\n for _, node, path in partitions:\n tasks.append(\n validate_part.options(resources={\n f\"node:{node}\": 1 / args.num_reducers\n }).remote(path))\n logging.info(f\"Validating {len(tasks)} partitions\")\n ray.get(tasks)\n logging.info(\"All done!\")\n\n\n# ------------------------------------------------------------\n# Main\n# ------------------------------------------------------------\n\n\ndef init():\n if args.ray_address is None:\n ray.init()\n else:\n ray.init(address=args.ray_address)\n logging_utils.init()\n logging.info(args)\n logging.info(ray.available_resources())\n os.makedirs(constants.WORK_DIR, exist_ok=True)\n\n\ndef main():\n init()\n\n if args.generate_input:\n generate_input()\n\n if args.sort:\n sort_main()\n\n if args.validate_output:\n validate_output()\n\n\nif __name__ == \"__main__\":\n args = get_args()\n main()\n",
"import logging\nimport numpy as np\nimport time\n\nfrom ray.rllib.models.jax.jax_modelv2 import JAXModelV2\nfrom ray.rllib.models.jax.misc import SlimFC\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.utils.framework import try_import_jax\n\njax, flax = try_import_jax()\n\nlogger = logging.getLogger(__name__)\n\n\nclass FullyConnectedNetwork(JAXModelV2):\n \"\"\"Generic fully connected network.\"\"\"\n\n def __init__(self, obs_space, action_space, num_outputs, model_config,\n name):\n super().__init__(obs_space, action_space, num_outputs, model_config,\n name)\n\n self.key = jax.random.PRNGKey(int(time.time()))\n\n activation = model_config.get(\"fcnet_activation\")\n hiddens = model_config.get(\"fcnet_hiddens\", [])\n no_final_linear = model_config.get(\"no_final_linear\")\n self.vf_share_layers = model_config.get(\"vf_share_layers\")\n self.free_log_std = model_config.get(\"free_log_std\")\n\n # Generate free-floating bias variables for the second half of\n # the outputs.\n if self.free_log_std:\n assert num_outputs % 2 == 0, (\n \"num_outputs must be divisible by two\", num_outputs)\n num_outputs = num_outputs // 2\n\n self._hidden_layers = []\n prev_layer_size = int(np.product(obs_space.shape))\n self._logits = None\n\n # Create layers 0 to second-last.\n for size in hiddens[:-1]:\n self._hidden_layers.append(\n SlimFC(\n in_size=prev_layer_size,\n out_size=size,\n activation_fn=activation))\n prev_layer_size = size\n\n # The last layer is adjusted to be of size num_outputs, but it's a\n # layer with activation.\n if no_final_linear and num_outputs:\n self._hidden_layers.append(\n SlimFC(\n in_size=prev_layer_size,\n out_size=num_outputs,\n activation_fn=activation))\n prev_layer_size = num_outputs\n # Finish the layers with the provided sizes (`hiddens`), plus -\n # iff num_outputs > 0 - a last linear layer of size num_outputs.\n else:\n if len(hiddens) > 0:\n self._hidden_layers.append(\n SlimFC(\n in_size=prev_layer_size,\n out_size=hiddens[-1],\n activation_fn=activation))\n prev_layer_size = hiddens[-1]\n if num_outputs:\n self._logits = SlimFC(\n in_size=prev_layer_size,\n out_size=num_outputs,\n activation_fn=None)\n else:\n self.num_outputs = (\n [int(np.product(obs_space.shape))] + hiddens[-1:])[-1]\n\n # Layer to add the log std vars to the state-dependent means.\n if self.free_log_std and self._logits:\n raise ValueError(\"`free_log_std` not supported for JAX yet!\")\n\n self._value_branch_separate = None\n if not self.vf_share_layers:\n # Build a parallel set of hidden layers for the value net.\n prev_vf_layer_size = int(np.product(obs_space.shape))\n vf_layers = []\n for size in hiddens:\n vf_layers.append(\n SlimFC(\n in_size=prev_vf_layer_size,\n out_size=size,\n activation_fn=activation,\n ))\n prev_vf_layer_size = size\n self._value_branch_separate = vf_layers\n\n self._value_branch = SlimFC(\n in_size=prev_layer_size, out_size=1, activation_fn=None)\n # Holds the current \"base\" output (before logits layer).\n self._features = None\n # Holds the last input, in case value branch is separate.\n self._last_flat_in = None\n\n @override(JAXModelV2)\n def forward(self, input_dict, state, seq_lens):\n self._last_flat_in = input_dict[\"obs_flat\"]\n x = self._last_flat_in\n for layer in self._hidden_layers:\n x = layer(x)\n self._features = x\n logits = self._logits(self._features) if self._logits else \\\n self._features\n if self.free_log_std:\n logits = self._append_free_log_std(logits)\n return logits, state\n\n @override(JAXModelV2)\n def value_function(self):\n assert self._features is not None, \"must call forward() first\"\n if self._value_branch_separate:\n return self._value_branch(\n self._value_branch_separate(self._last_flat_in)).squeeze(1)\n else:\n return self._value_branch(self._features).squeeze(1)\n"
] | [
[
"numpy.random.seed"
],
[
"numpy.array_split"
],
[
"torch.ones",
"numpy.ones"
],
[
"numpy.random.uniform",
"torch.utils.data.DataLoader"
],
[
"torch.LongTensor",
"torch.randn",
"torch.nn.functional.cross_entropy",
"torch.nn.Linear",
"torch.cuda.is_available"
],
[
"numpy.ceil",
"numpy.log"
],
[
"numpy.array"
],
[
"numpy.ones"
],
[
"numpy.random.bytes",
"numpy.ceil",
"numpy.fromfile",
"numpy.empty"
],
[
"numpy.product"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
suryaavala/tfx | [
"c315e7cf75822088e974e15b43c96fab86746733"
] | [
"tfx/components/transform/executor.py"
] | [
"# Lint as: python2, python3\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Executor for TensorFlow Transform.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport hashlib\nimport os\nfrom typing import Any, Callable, Dict, Generator, Iterable, List, Mapping, Optional, Sequence, Set, Text, Tuple, Union\n\nimport absl\nimport apache_beam as beam\nimport pyarrow as pa\nimport tensorflow as tf\nimport tensorflow_data_validation as tfdv\nimport tensorflow_transform as tft\nfrom tensorflow_transform import impl_helper\nimport tensorflow_transform.beam as tft_beam\nfrom tensorflow_transform.beam import analyzer_cache\nfrom tensorflow_transform.beam import common as tft_beam_common\nfrom tensorflow_transform.tf_metadata import dataset_metadata\nfrom tensorflow_transform.tf_metadata import metadata_io\nfrom tensorflow_transform.tf_metadata import schema_utils\nfrom tfx import types\nfrom tfx.components.transform import labels\nfrom tfx.components.transform import stats_options_util\nfrom tfx.components.util import tfxio_utils\nfrom tfx.components.util import udf_utils\nfrom tfx.components.util import value_utils\nfrom tfx.dsl.components.base import base_beam_executor\nfrom tfx.dsl.components.base import base_executor\nfrom tfx.dsl.io import fileio\nfrom tfx.proto import example_gen_pb2\nfrom tfx.proto import transform_pb2\nfrom tfx.types import artifact_utils\nfrom tfx.types import standard_artifacts\nfrom tfx.types import standard_component_specs\nfrom tfx.utils import io_utils\nfrom tfx.utils import json_utils\nfrom tfx.utils import proto_utils\nimport tfx_bsl\nfrom tfx_bsl.tfxio import tfxio as tfxio_module\n\nfrom tensorflow_metadata.proto.v0 import anomalies_pb2\nfrom tensorflow_metadata.proto.v0 import schema_pb2\n\n\n# Key for temp path, for internal use only.\nTEMP_PATH_KEY = 'temp_path'\n\nRAW_EXAMPLE_KEY = 'raw_example'\n\n# Schema to use if the input data should be decoded as raw example.\n_RAW_EXAMPLE_SCHEMA = schema_utils.schema_from_feature_spec(\n {RAW_EXAMPLE_KEY: tf.io.FixedLenFeature([], tf.string)})\n\n# TODO(b/123519698): Simplify the code by removing the key structure.\n_TRANSFORM_INTERNAL_FEATURE_FOR_KEY = '__TFT_PASS_KEY__'\n\n# Default file name prefix for transformed_examples.\n_DEFAULT_TRANSFORMED_EXAMPLES_PREFIX = 'transformed_examples'\n\n# Temporary path inside transform_output used for tft.beam\n# TODO(b/125451545): Provide a safe temp path from base executor instead.\n_TEMP_DIR_IN_TRANSFORM_OUTPUT = '.temp_path'\n\n_TRANSFORM_COMPONENT_DESCRIPTOR = 'Transform'\n_TELEMETRY_DESCRIPTORS = [_TRANSFORM_COMPONENT_DESCRIPTOR]\n\n# TODO(b/37788560): Increase this max, based on results of experimentation with\n# many non-packable analyzers on our benchmarks.\n_MAX_ESTIMATED_STAGES_COUNT = 20000\n\n# Beam extra pip package prefix.\n_BEAM_EXTRA_PACKAGE_PREFIX = '--extra_package='\n\n\n# TODO(b/122478841): Move it to a common place that is shared across components.\nclass _Status(object):\n \"\"\"Status that reports success or error status of an execution.\"\"\"\n\n def __init__(self, is_error, error_message=None):\n self._is_error = is_error\n self._error_message = error_message\n\n @classmethod\n def OK(cls):\n \"\"\"Returns an ok Status.\"\"\"\n\n return _Status(False)\n\n @classmethod\n def Error(cls, error_message):\n \"\"\"Returns an error Status with error message.\"\"\"\n\n return _Status(True, error_message)\n\n @property\n def error_message(self):\n return self._error_message\n\n\nclass _Dataset(object):\n \"\"\"Dataset to be analyzed and/or transformed.\n\n It also contains bundle of stages of a single dataset through the transform\n pipeline.\n \"\"\"\n _FILE_PATTERN_SUFFIX_LENGTH = 6\n\n def __init__(self, file_pattern: Text,\n file_format: Union[Text, int],\n data_format: int,\n data_view_uri: Optional[Text],\n stats_output_path: Optional[Text] = None,\n materialize_output_path: Optional[Text] = None):\n \"\"\"Initialize a Dataset.\n\n Args:\n file_pattern: The file pattern of the dataset.\n file_format: The file format of the dataset.\n data_format: The data format of the dataset. One of the enums from\n example_gen_pb2.PayloadFormat.\n data_view_uri: URI to the DataView used to parse the data.\n stats_output_path: The file path where to write stats for the dataset.\n materialize_output_path: The file path where to write the dataset.\n \"\"\"\n self._file_pattern = file_pattern\n file_pattern_suffix = os.path.join(\n *file_pattern.split(os.sep)[-self._FILE_PATTERN_SUFFIX_LENGTH:])\n dataset_identifier = file_pattern_suffix + '-' + hashlib.sha256(\n file_pattern.encode()).hexdigest()\n self._dataset_key = analyzer_cache.DatasetKey(dataset_identifier)\n self._file_format = file_format\n self._data_format = data_format\n self._data_view_uri = data_view_uri\n self._stats_output_path = stats_output_path\n self._materialize_output_path = materialize_output_path\n self._index = None\n self._standardized = None\n self._transformed = None\n self._transformed_and_standardized = None\n self._tfxio = None\n\n @property\n def file_pattern(self):\n assert self._file_pattern\n return self._file_pattern\n\n @property\n def stats_output_path(self):\n assert self._stats_output_path\n return self._stats_output_path\n\n @property\n def materialize_output_path(self):\n assert self._materialize_output_path\n return self._materialize_output_path\n\n @property\n def index(self):\n assert self._index is not None\n return self._index\n\n @property\n def dataset_key(self):\n assert self._dataset_key\n return self._dataset_key\n\n @property\n def data_format(self):\n assert self._data_format\n return self._data_format\n\n @property\n def data_view_uri(self):\n return self._data_view_uri\n\n @property\n def file_format(self):\n assert self._file_format\n return self._file_format\n\n @property\n def standardized(self):\n assert self._standardized is not None\n return self._standardized\n\n @property\n def transformed(self):\n assert self._transformed is not None\n return self._transformed\n\n @property\n def transformed_and_standardized(self):\n assert self._transformed_and_standardized is not None\n return self._transformed_and_standardized\n\n @property\n def tfxio(self):\n assert self._tfxio is not None\n return self._tfxio\n\n @index.setter\n def index(self, val):\n self._index = val\n\n @standardized.setter\n def standardized(self, val):\n self._standardized = val\n\n @transformed.setter\n def transformed(self, val):\n self._transformed = val\n\n @transformed_and_standardized.setter\n def transformed_and_standardized(self, val):\n self._transformed_and_standardized = val\n\n @tfxio.setter\n def tfxio(self, val):\n self._tfxio = val\n\n\ndef _InvokeStatsOptionsUpdaterFn(\n stats_options_updater_fn: Callable[\n [stats_options_util.StatsType, tfdv.StatsOptions], tfdv.StatsOptions],\n stats_type: stats_options_util.StatsType,\n schema: Optional[schema_pb2.Schema] = None,\n asset_map: Optional[Dict[Text, Text]] = None,\n transform_output_path: Optional[Text] = None) -> tfdv.StatsOptions:\n \"\"\"Invokes the provided stats_options_updater_fn.\n\n Args:\n stats_options_updater_fn: The function to call.\n stats_type: The stats_type use in the function call.\n schema: The input schema to use in the function call.\n asset_map: A dictionary containing key to filename mappings.\n transform_output_path: The path to the transform output.\n\n Returns:\n The updated tfdv.StatsOptions.\n \"\"\"\n options = {}\n if schema is not None:\n schema_copy = schema_pb2.Schema()\n schema_copy.CopyFrom(schema)\n options['schema'] = schema_copy\n if asset_map is not None:\n asset_path = os.path.join(transform_output_path, 'transform_fn',\n tf.saved_model.ASSETS_DIRECTORY)\n vocab_paths = {k: os.path.join(asset_path, v) for k, v in asset_map.items()}\n options['vocab_paths'] = vocab_paths\n return stats_options_updater_fn(stats_type, tfdv.StatsOptions(**options))\n\n\nclass Executor(base_beam_executor.BaseBeamExecutor):\n \"\"\"Transform executor.\"\"\"\n\n def __init__(\n self, context: Optional[base_executor.BaseExecutor.Context] = None):\n super(Executor, self).__init__(context)\n self._pip_dependencies = []\n\n def Do(self, input_dict: Dict[Text, List[types.Artifact]],\n output_dict: Dict[Text, List[types.Artifact]],\n exec_properties: Dict[Text, Any]) -> None:\n \"\"\"TensorFlow Transform executor entrypoint.\n\n This implements BaseExecutor.Do() and is invoked by orchestration systems.\n This is not inteded for manual usage or further customization. Please use\n the Transform() function which takes an input format with no artifact\n dependency.\n\n Args:\n input_dict: Input dict from input key to a list of artifacts, including:\n - examples: A list of type `standard_artifacts.Examples` which should\n contain custom splits specified in splits_config. If custom split is\n not provided, this should contain two splits 'train' and 'eval'.\n - schema: A list of type `standard_artifacts.Schema` which should\n contain a single schema artifact.\n - analyzer_cache: Cache input of 'tf.Transform', where cached\n information for analyzed examples from previous runs will be read.\n output_dict: Output dict from key to a list of artifacts, including:\n - transform_graph: Output of 'tf.Transform', which includes an exported\n Tensorflow graph suitable for both training and serving;\n - transformed_examples: Materialized transformed examples, which\n includes transform splits as specified in splits_config. If custom\n split is not provided, this should include both 'train' and 'eval'\n splits.\n - updated_analyzer_cache: Cache output of 'tf.Transform', where\n cached information for analyzed examples will be written.\n exec_properties: A dict of execution properties, including:\n - module_file: The file path to a python module file, from which the\n 'preprocessing_fn' function will be loaded. Exactly one of\n 'module_file', 'module_path' and 'preprocessing_fn' should be set.\n - module_path: The python module path, from which the\n 'preprocessing_fn' function will be loaded. Exactly one of\n 'module_file', 'module_path' and 'preprocessing_fn' should be set.\n - preprocessing_fn: The module path to a python function that\n implements 'preprocessing_fn'. Exactly one of 'module_file',\n 'module_path' and 'preprocessing_fn' should be set.\n - splits_config: A transform_pb2.SplitsConfig instance, providing splits\n that should be analyzed and splits that should be transformed. Note\n analyze and transform splits can have overlap. Default behavior (when\n splits_config is not set) is analyze the 'train' split and transform\n all splits. If splits_config is set, analyze cannot be empty.\n - force_tf_compat_v1: Whether to use TF in compat.v1 mode\n irrespective of installed/enabled TF behaviors.\n - disable_statistics: Whether to disable computation of pre-transform\n and post-transform statistics.\n\n Returns:\n None\n \"\"\"\n self._log_startup(input_dict, output_dict, exec_properties)\n\n # TODO(b/175426744): use executor util to create output artifact.\n # Create output artifacts when input Examples Channel contains more than\n # one artifacts.\n num_examples = len(input_dict[standard_component_specs.EXAMPLES_KEY])\n if num_examples > 1 and len(\n output_dict[standard_component_specs.TRANSFORMED_EXAMPLES_KEY]) == 1:\n transformed_examples = artifact_utils.get_single_instance(\n output_dict[standard_component_specs.TRANSFORMED_EXAMPLES_KEY])\n transformed_examples_list = []\n for i in range(num_examples):\n tft_examples = standard_artifacts.Examples()\n tft_examples.copy_from(transformed_examples)\n tft_examples.uri = os.path.join(transformed_examples.uri, str(i))\n transformed_examples_list.append(tft_examples)\n output_dict[standard_component_specs\n .TRANSFORMED_EXAMPLES_KEY] = transformed_examples_list\n\n splits_config = transform_pb2.SplitsConfig()\n if exec_properties.get(standard_component_specs.SPLITS_CONFIG_KEY, None):\n proto_utils.json_to_proto(\n exec_properties[standard_component_specs.SPLITS_CONFIG_KEY],\n splits_config)\n if not splits_config.analyze:\n raise ValueError('analyze cannot be empty when splits_config is set.')\n else:\n splits_config.analyze.append('train')\n\n # All input artifacts should have the same set of split names.\n split_names = artifact_utils.decode_split_names(\n input_dict[standard_component_specs.EXAMPLES_KEY][0].split_names)\n split_names_set = set(split_names)\n\n for artifact in input_dict[standard_component_specs.EXAMPLES_KEY]:\n artifact_split_names = artifact_utils.decode_split_names(\n artifact.split_names)\n if split_names_set != set(artifact_split_names):\n raise ValueError(\n 'Not all input artifacts have the same split names: (%s, %s)' %\n (split_names, artifact_split_names))\n\n splits_config.transform.extend(split_names)\n absl.logging.info(\n \"Analyze the 'train' split and transform all splits when \"\n 'splits_config is not set.')\n\n payload_format, data_view_uri = (\n tfxio_utils.resolve_payload_format_and_data_view_uri(\n input_dict[standard_component_specs.EXAMPLES_KEY]))\n schema_file = io_utils.get_only_uri_in_dir(\n artifact_utils.get_single_uri(\n input_dict[standard_component_specs.SCHEMA_KEY]))\n transform_output = artifact_utils.get_single_uri(\n output_dict[standard_component_specs.TRANSFORM_GRAPH_KEY])\n\n temp_path = os.path.join(transform_output, _TEMP_DIR_IN_TRANSFORM_OUTPUT)\n absl.logging.debug('Using temp path %s for tft.beam', temp_path)\n\n analyze_data_paths = []\n for split in splits_config.analyze:\n data_uris = artifact_utils.get_split_uris(\n input_dict[standard_component_specs.EXAMPLES_KEY], split)\n for data_uri in data_uris:\n analyze_data_paths.append(io_utils.all_files_pattern(data_uri))\n\n transform_data_paths = []\n for split in splits_config.transform:\n data_uris = artifact_utils.get_split_uris(\n input_dict[standard_component_specs.EXAMPLES_KEY], split)\n for data_uri in data_uris:\n transform_data_paths.append(io_utils.all_files_pattern(data_uri))\n\n materialize_output_paths = []\n if output_dict.get(\n standard_component_specs.TRANSFORMED_EXAMPLES_KEY) is not None:\n for transformed_example_artifact in output_dict[\n standard_component_specs.TRANSFORMED_EXAMPLES_KEY]:\n transformed_example_artifact.split_names = (\n artifact_utils.encode_split_names(list(splits_config.transform)))\n\n for split in splits_config.transform:\n\n transformed_example_uris = artifact_utils.get_split_uris(\n output_dict[standard_component_specs.TRANSFORMED_EXAMPLES_KEY],\n split)\n for output_uri in transformed_example_uris:\n materialize_output_paths.append(\n os.path.join(output_uri, _DEFAULT_TRANSFORMED_EXAMPLES_PREFIX))\n\n def _GetCachePath(label, params_dict):\n if params_dict.get(label) is None:\n return None\n else:\n return artifact_utils.get_single_uri(params_dict[label])\n\n force_tf_compat_v1 = bool(\n exec_properties.get(standard_component_specs.FORCE_TF_COMPAT_V1_KEY, 0))\n\n disable_statistics = bool(\n exec_properties.get(standard_component_specs.DISABLE_STATISTICS_KEY, 0))\n\n # Make sure user packages get propagated to the remote Beam worker.\n user_module_key = exec_properties.get(\n standard_component_specs.MODULE_PATH_KEY, None)\n _, extra_pip_packages = udf_utils.decode_user_module_key(user_module_key)\n for pip_package_path in extra_pip_packages:\n local_pip_package_path = io_utils.ensure_local(pip_package_path)\n self._beam_pipeline_args.append(_BEAM_EXTRA_PACKAGE_PREFIX +\n local_pip_package_path)\n self._pip_dependencies.append(local_pip_package_path)\n\n label_inputs = {\n labels.DISABLE_STATISTICS_LABEL:\n disable_statistics,\n labels.SCHEMA_PATH_LABEL:\n schema_file,\n labels.EXAMPLES_DATA_FORMAT_LABEL:\n payload_format,\n labels.DATA_VIEW_LABEL:\n data_view_uri,\n labels.ANALYZE_DATA_PATHS_LABEL:\n analyze_data_paths,\n labels.ANALYZE_PATHS_FILE_FORMATS_LABEL: [labels.FORMAT_TFRECORD] *\n len(analyze_data_paths),\n labels.TRANSFORM_DATA_PATHS_LABEL:\n transform_data_paths,\n labels.TRANSFORM_PATHS_FILE_FORMATS_LABEL: [labels.FORMAT_TFRECORD] *\n len(transform_data_paths),\n labels.MODULE_FILE:\n exec_properties.get(standard_component_specs.MODULE_FILE_KEY, None),\n labels.MODULE_PATH:\n user_module_key,\n labels.PREPROCESSING_FN:\n exec_properties.get(standard_component_specs.PREPROCESSING_FN_KEY,\n None),\n labels.CUSTOM_CONFIG:\n exec_properties.get(standard_component_specs.CUSTOM_CONFIG_KEY,\n None),\n labels.FORCE_TF_COMPAT_V1_LABEL:\n force_tf_compat_v1,\n }\n cache_input = _GetCachePath(standard_component_specs.ANALYZER_CACHE_KEY,\n input_dict)\n if cache_input is not None:\n label_inputs[labels.CACHE_INPUT_PATH_LABEL] = cache_input\n\n label_outputs = {\n labels.TRANSFORM_METADATA_OUTPUT_PATH_LABEL: transform_output,\n labels.TRANSFORM_MATERIALIZE_OUTPUT_PATHS_LABEL:\n materialize_output_paths,\n labels.TEMP_OUTPUT_LABEL: str(temp_path),\n }\n cache_output = _GetCachePath(\n standard_component_specs.UPDATED_ANALYZER_CACHE_KEY, output_dict)\n if cache_output is not None:\n label_outputs[labels.CACHE_OUTPUT_PATH_LABEL] = cache_output\n status_file = 'status_file' # Unused\n self.Transform(label_inputs, label_outputs, status_file)\n absl.logging.debug('Cleaning up temp path %s on executor success',\n temp_path)\n io_utils.delete_dir(temp_path)\n\n @staticmethod\n @beam.ptransform_fn\n @beam.typehints.with_input_types(beam.Pipeline)\n @beam.typehints.with_output_types(beam.pvalue.PDone)\n def _IncrementPipelineMetrics(\n pipeline: beam.Pipeline, total_columns_count: int,\n analyze_columns_count: int, transform_columns_count: int,\n analyze_paths_count: int, analyzer_cache_enabled: bool,\n disable_statistics: bool, materialize: bool):\n \"\"\"A beam PTransform to increment counters of column usage.\"\"\"\n\n def _MakeAndIncrementCounters(unused_element):\n \"\"\"Increment column usage counters.\"\"\"\n del unused_element\n beam.metrics.Metrics.counter(\n tft_beam_common.METRICS_NAMESPACE,\n 'total_columns_count').inc(total_columns_count)\n beam.metrics.Metrics.counter(\n tft_beam_common.METRICS_NAMESPACE,\n 'analyze_columns_count').inc(analyze_columns_count)\n beam.metrics.Metrics.counter(\n tft_beam_common.METRICS_NAMESPACE,\n 'transform_columns_count').inc(transform_columns_count)\n beam.metrics.Metrics.counter(\n tft_beam_common.METRICS_NAMESPACE,\n 'analyze_paths_count').inc(analyze_paths_count)\n beam.metrics.Metrics.counter(\n tft_beam_common.METRICS_NAMESPACE,\n 'analyzer_cache_enabled').inc(int(analyzer_cache_enabled))\n beam.metrics.Metrics.counter(\n tft_beam_common.METRICS_NAMESPACE,\n 'disable_statistics').inc(int(disable_statistics))\n beam.metrics.Metrics.counter(\n tft_beam_common.METRICS_NAMESPACE,\n 'materialize').inc(int(materialize))\n return beam.pvalue.PDone(pipeline)\n\n return (\n pipeline\n | 'CreateSole' >> beam.Create([None])\n | 'Count' >> beam.Map(_MakeAndIncrementCounters))\n\n @staticmethod\n @beam.ptransform_fn\n @beam.typehints.with_input_types(Tuple[Optional[bytes], bytes])\n @beam.typehints.with_output_types(beam.pvalue.PDone)\n def _WriteExamples(pcoll: beam.pvalue.PCollection, file_format: Text,\n transformed_example_path: Text) -> beam.pvalue.PDone:\n \"\"\"Writes transformed examples compressed in gzip format.\n\n Args:\n pcoll: PCollection of serialized transformed examples.\n file_format: The output file format.\n transformed_example_path: path to write to.\n\n Returns:\n beam.pvalue.PDone.\n \"\"\"\n assert file_format == labels.FORMAT_TFRECORD, file_format\n\n # TODO(b/139538871): Implement telemetry, on top of pa.Table once available.\n return (\n pcoll\n | 'Values' >> beam.Values()\n | 'Write' >> beam.io.WriteToTFRecord(\n transformed_example_path, file_name_suffix='.gz'))\n\n def _GetSchema(self, schema_path: Text) -> schema_pb2.Schema:\n \"\"\"Gets a tf.metadata schema.\n\n Args:\n schema_path: Path to schema file.\n\n Returns:\n A tf.metadata schema.\n \"\"\"\n schema_reader = io_utils.SchemaReader()\n return schema_reader.read(schema_path)\n\n def _ReadMetadata(self, data_format: int,\n schema_path: Text) -> dataset_metadata.DatasetMetadata:\n \"\"\"Returns a dataset_metadata.DatasetMetadata for the input data.\n\n Args:\n data_format: The data format of the dataset. One of the enums from\n example_gen_pb2.PayloadFormat.\n schema_path: path to schema file.\n\n Returns:\n A dataset_metadata.DatasetMetadata representing the provided set of\n columns.\n \"\"\"\n\n if (self._IsDataFormatSequenceExample(data_format) or\n self._IsDataFormatProto(data_format)):\n return dataset_metadata.DatasetMetadata(_RAW_EXAMPLE_SCHEMA)\n schema_proto = self._GetSchema(schema_path)\n return dataset_metadata.DatasetMetadata(schema_proto)\n\n @staticmethod\n @beam.ptransform_fn\n @beam.typehints.with_input_types(pa.RecordBatch)\n @beam.typehints.with_output_types(Tuple[beam.pvalue.PDone,\n Optional[beam.pvalue.PDone],\n Optional[beam.pvalue.PDone]])\n def _GenerateAndMaybeValidateStats(\n pcoll: beam.pvalue.PCollection, stats_output_path: Text,\n stats_options: tfdv.StatsOptions, enable_validation: bool\n ) -> Tuple[beam.pvalue.PDone, Optional[beam.pvalue.PDone],\n Optional[beam.pvalue.PDone]]:\n \"\"\"Generates statistics.\n\n Args:\n pcoll: PCollection of examples.\n stats_output_path: path where statistics is written to.\n stats_options: An instance of `tfdv.StatsOptions()` used when computing\n statistics.\n enable_validation: Whether to enable stats validation.\n\n Returns:\n A tuple containing the beam.pvalue.PDones for generating the stats,\n writing the schema, and writing the validation, in that order. If the\n schema is not present or validation is not enabled, the corresponding\n values are replaced with Nones.\n \"\"\"\n generated_stats = (\n pcoll\n | 'FilterInternalColumn' >> beam.Map(Executor._FilterInternalColumn)\n | 'GenerateStatistics' >> tfdv.GenerateStatistics(stats_options))\n\n stats_result = (\n generated_stats\n | 'WriteStats' >>\n tfdv.WriteStatisticsToBinaryFile(output_path=stats_output_path))\n\n if stats_options.schema is None:\n return (stats_result, None, None)\n\n stats_dir = os.path.dirname(stats_output_path)\n schema_output_path = os.path.join(stats_dir, 'Schema.pb')\n # TODO(b/186867968): See if we should switch to common libraries.\n schema_result = (\n pcoll.pipeline\n | 'CreateSchema' >> beam.Create([stats_options.schema])\n | 'WriteSchema' >> beam.io.WriteToText(\n schema_output_path,\n append_trailing_newlines=False,\n shard_name_template='', # To force unsharded output.\n coder=beam.coders.ProtoCoder(schema_pb2.Schema)))\n\n if not enable_validation:\n return (stats_result, schema_result, None)\n\n validation_output_path = os.path.join(stats_dir, 'SchemaDiff.pb')\n # TODO(b/186867968): See if we should switch to common libraries.\n validation_result = (\n generated_stats\n | 'ValidateStatistics' >> beam.Map(\n lambda stats: tfdv.validate_statistics(stats, stats_options.schema))\n | 'WriteValidation' >> beam.io.WriteToText(\n validation_output_path,\n append_trailing_newlines=False,\n shard_name_template='', # To force unsharded output.\n coder=beam.coders.ProtoCoder(anomalies_pb2.Anomalies)))\n\n return (stats_result, schema_result, validation_result)\n\n # TODO(b/130807807): This is still used by pre-transform stats to decode\n # sequence example as tf.example. Once the support is implemented this can be\n # removed.\n @beam.typehints.with_input_types(List[bytes])\n @beam.typehints.with_output_types(pa.RecordBatch)\n class _ToArrowRecordBatchesFn(beam.DoFn):\n \"\"\"Converts a batch of serialized examples to an Arrow RecordBatch.\"\"\"\n\n def __init__(self, schema: Optional[schema_pb2.Schema]):\n self._serialized_schema = schema.SerializeToString() if schema else None\n\n def setup(self):\n args = ([] if self._serialized_schema is None\n else [self._serialized_schema])\n self._decoder = (\n tfx_bsl.coders.example_coder.ExamplesToRecordBatchDecoder(*args))\n\n def process(self, element: List[bytes]) -> Iterable[pa.RecordBatch]:\n yield self._decoder.DecodeBatch(element)\n\n @beam.typehints.with_input_types(beam.Pipeline)\n class _OptimizeRun(beam.PTransform):\n \"\"\"Utilizes TFT cache if applicable and removes unused datasets.\"\"\"\n\n # pyformat: disable\n def __init__(self,\n input_cache_dir: Text,\n output_cache_dir: Text,\n analyze_data_list: List[_Dataset],\n typespecs: Mapping[Text, tf.TypeSpec],\n preprocessing_fn: Any,\n cache_source: beam.PTransform,\n force_tf_compat_v1: bool):\n # pyformat: enable\n self._input_cache_dir = input_cache_dir\n self._output_cache_dir = output_cache_dir\n self._analyze_data_list = analyze_data_list\n self._feature_spec_or_typespec = typespecs\n self._preprocessing_fn = preprocessing_fn\n self._cache_source = cache_source\n self._force_tf_compat_v1 = force_tf_compat_v1\n\n # TODO(zoy): Remove this method once beam no longer pickles PTransforms,\n # once https://issues.apache.org/jira/browse/BEAM-3812 is resolved.\n def to_runner_api_pickled(self, context):\n # Overriding to_runner_api_pickled and calling to_runner_api_parameter\n # instead to make sure that beam doesn't try to pickle the\n # preprocessing_fn with the PTransform instance since it may not be\n # picklable.\n return self.to_runner_api_parameter(context)\n\n def expand(\n self, pipeline\n ) -> Tuple[Dict[Text, Optional[_Dataset]], Optional[Dict[Text, Dict[\n Text, beam.pvalue.PCollection]]]]:\n # TODO(b/170304777): Remove this Create once the issue is fixed in beam.\n # Forcing beam to treat this PTransform as non-primitive.\n _ = pipeline | 'WorkaroundForBug170304777' >> beam.Create([None])\n\n dataset_keys_list = [\n dataset.dataset_key for dataset in self._analyze_data_list\n ]\n # TODO(b/37788560): Remove this restriction when a greater number of\n # stages can be handled efficiently.\n cache_entry_keys = (\n tft_beam.analysis_graph_builder.get_analysis_cache_entry_keys(\n self._preprocessing_fn, self._feature_spec_or_typespec,\n dataset_keys_list, self._force_tf_compat_v1))\n # We estimate the number of stages in the pipeline to be roughly:\n # analyzers * analysis_paths * 10.\n if (len(cache_entry_keys) * len(dataset_keys_list) * 10 >\n _MAX_ESTIMATED_STAGES_COUNT):\n absl.logging.warning(\n 'Disabling cache because otherwise the number of stages might be '\n 'too high ({} analyzers, {} analysis paths)'.format(\n len(cache_entry_keys), len(dataset_keys_list)))\n # Returning None as the input cache here disables both input and output\n # cache.\n return ({d.dataset_key: d for d in self._analyze_data_list}, None)\n\n if self._input_cache_dir is not None:\n absl.logging.info('Reading the following analysis cache entry keys: %s',\n cache_entry_keys)\n input_cache = (\n pipeline\n | 'ReadCache' >> analyzer_cache.ReadAnalysisCacheFromFS(\n self._input_cache_dir,\n dataset_keys_list,\n source=self._cache_source,\n cache_entry_keys=cache_entry_keys))\n elif self._output_cache_dir is not None:\n input_cache = {}\n else:\n # Using None here to indicate that this pipeline will not read or write\n # cache.\n input_cache = None\n\n if input_cache is None:\n # Cache is disabled so we won't be filtering out any datasets, and will\n # always perform a flatten over all of them.\n filtered_analysis_dataset_keys = dataset_keys_list\n else:\n filtered_analysis_dataset_keys = (\n tft_beam.analysis_graph_builder.get_analysis_dataset_keys(\n self._preprocessing_fn, self._feature_spec_or_typespec,\n dataset_keys_list, input_cache, self._force_tf_compat_v1))\n\n new_analyze_data_dict = {}\n for dataset in self._analyze_data_list:\n if dataset.dataset_key in filtered_analysis_dataset_keys:\n new_analyze_data_dict[dataset.dataset_key] = dataset\n else:\n new_analyze_data_dict[dataset.dataset_key] = None\n\n return (new_analyze_data_dict, input_cache)\n\n def _MaybeBindCustomConfig(self, inputs: Mapping[Text, Any],\n fn: Any) -> Callable[..., Any]:\n # For compatibility, only bind custom config if it's in the signature.\n if value_utils.FunctionHasArg(fn, labels.CUSTOM_CONFIG):\n custom_config_json = value_utils.GetSoleValue(inputs,\n labels.CUSTOM_CONFIG)\n custom_config = (json_utils.loads(custom_config_json)\n if custom_config_json else {}) or {}\n fn = functools.partial(fn, custom_config=custom_config)\n return fn\n\n def _GetPreprocessingFn(\n self, inputs: Mapping[Text, Any],\n unused_outputs: Mapping[Text, Any]) -> Callable[..., Any]:\n \"\"\"Returns a user defined preprocessing_fn.\n\n If a custom config is provided in inputs, and also needed in\n preprocessing_fn, bind it to preprocessing_fn.\n\n Args:\n inputs: A dictionary of labelled input values.\n unused_outputs: A dictionary of labelled output values.\n\n Returns:\n User defined function, optionally bound with a custom config.\n\n Raises:\n ValueError: When neither or both of MODULE_FILE and PREPROCESSING_FN\n are present in inputs.\n \"\"\"\n has_module_file = bool(\n value_utils.GetSoleValue(inputs, labels.MODULE_FILE, strict=False))\n has_module_path = bool(\n value_utils.GetSoleValue(inputs, labels.MODULE_PATH, strict=False))\n has_preprocessing_fn = bool(\n value_utils.GetSoleValue(inputs, labels.PREPROCESSING_FN, strict=False))\n\n if (int(has_module_file) + int(has_module_path) +\n int(has_preprocessing_fn)) != 1:\n raise ValueError(\n 'Exactly one of MODULE_FILE, MODULE_PATH or PREPROCESSING_FN should '\n 'be supplied in inputs.')\n\n fn = udf_utils.get_fn(\n {\n standard_component_specs.MODULE_FILE_KEY:\n value_utils.GetSoleValue(\n inputs, labels.MODULE_FILE, strict=False),\n standard_component_specs.MODULE_PATH_KEY:\n value_utils.GetSoleValue(\n inputs, labels.MODULE_PATH, strict=False),\n standard_component_specs.PREPROCESSING_FN_KEY:\n value_utils.GetSoleValue(\n inputs, labels.PREPROCESSING_FN, strict=False),\n }, standard_component_specs.PREPROCESSING_FN_KEY)\n\n return self._MaybeBindCustomConfig(inputs, fn)\n\n def _GetStatsOptionsUpdaterFn(\n self, inputs: Mapping[Text, Any]\n ) -> Optional[Callable[[stats_options_util.StatsType, tfdv.StatsOptions],\n tfdv.StatsOptions]]:\n \"\"\"Returns the user-defined stats_options_updater_fn.\n\n If a custom config is provided in inputs, and also needed in\n stats_options_updater_fn, bind it to stats_options_updater_fn.\n\n Args:\n inputs: A dictionary of labelled input values.\n\n Returns:\n User defined function, optionally bound with a custom config.\n \"\"\"\n has_module_file = bool(\n value_utils.GetSoleValue(inputs, labels.MODULE_FILE, strict=False))\n has_module_path = bool(\n value_utils.GetSoleValue(inputs, labels.MODULE_PATH, strict=False))\n\n if has_module_file and has_module_path:\n raise ValueError(\n 'At most one of MODULE_FILE or MODULE_PATH should be '\n 'supplied in inputs.')\n\n if not has_module_file and not has_module_path:\n return None\n\n fn = udf_utils.try_get_fn(\n {\n standard_component_specs.MODULE_FILE_KEY:\n value_utils.GetSoleValue(\n inputs, labels.MODULE_FILE, strict=False),\n standard_component_specs.MODULE_PATH_KEY:\n value_utils.GetSoleValue(\n inputs, labels.MODULE_PATH, strict=False),\n }, standard_component_specs.STATS_OPTIONS_UPDATER_FN_KEY)\n if fn is None:\n return fn\n return self._MaybeBindCustomConfig(inputs, fn)\n\n # TODO(b/122478841): Refine this API in following cls.\n # Note: This API is up to change.\n def Transform(self, inputs: Mapping[Text, Any], outputs: Mapping[Text, Any],\n status_file: Text) -> None:\n \"\"\"Executes on request.\n\n This is the implementation part of transform executor. This is intended for\n using or extending the executor without artifact dependency.\n\n Args:\n inputs: A dictionary of labelled input values, including:\n - labels.DISABLE_STATISTICS_LABEL: Whether disable statistics\n compuatation.\n - labels.SCHEMA_PATH_LABEL: Path to schema file.\n - labels.EXAMPLES_DATA_FORMAT_LABEL: Example data format, one of the\n enums from example_gen_pb2.PayloadFormat.\n - labels.ANALYZE_DATA_PATHS_LABEL: Paths or path patterns to analyze\n data.\n - labels.ANALYZE_PATHS_FILE_FORMATS_LABEL: File formats of paths to\n analyze data.\n - labels.TRANSFORM_DATA_PATHS_LABEL: Paths or path patterns to transform\n data.\n - labels.TRANSFORM_PATHS_FILE_FORMATS_LABEL: File formats of paths to\n transform data.\n - labels.MODULE_FILE: Path to a Python module that contains the\n preprocessing_fn, optional.\n - labels.MODULE_PATH: Python module path that contains the\n preprocessing_fn, optional.\n - labels.PREPROCESSING_FN: Path to a Python function that implements\n preprocessing_fn, optional.\n - labels.CUSTOM_CONFIG: Dictionary of additional parameters for\n preprocessing_fn, optional.\n - labels.DATA_VIEW_LABEL: DataView to be used to read the Example,\n optional\n - labels.FORCE_TF_COMPAT_V1_LABEL: Whether to use TF in compat.v1 mode\n irrespective of installed/enabled TF behaviors.\n outputs: A dictionary of labelled output values, including:\n - labels.PER_SET_STATS_OUTPUT_PATHS_LABEL: Paths to statistics output,\n optional.\n - labels.TRANSFORM_METADATA_OUTPUT_PATH_LABEL: A path to\n TFTransformOutput output.\n - labels.TRANSFORM_MATERIALIZE_OUTPUT_PATHS_LABEL: Paths to transform\n materialization.\n - labels.TEMP_OUTPUT_LABEL: A path to temporary directory.\n status_file: Where the status should be written (not yet implemented)\n \"\"\"\n del status_file # unused\n\n absl.logging.debug(\n 'Inputs to executor.Transform function: {}'.format(inputs))\n absl.logging.debug(\n 'Outputs to executor.Transform function: {}'.format(outputs))\n\n disable_statistics = value_utils.GetSoleValue(\n inputs, labels.DISABLE_STATISTICS_LABEL)\n transform_output_path = value_utils.GetSoleValue(\n outputs, labels.TRANSFORM_METADATA_OUTPUT_PATH_LABEL)\n raw_examples_data_format = value_utils.GetSoleValue(\n inputs, labels.EXAMPLES_DATA_FORMAT_LABEL)\n schema = value_utils.GetSoleValue(inputs, labels.SCHEMA_PATH_LABEL)\n input_dataset_metadata = self._ReadMetadata(raw_examples_data_format,\n schema)\n materialize_output_paths = value_utils.GetValues(\n outputs, labels.TRANSFORM_MATERIALIZE_OUTPUT_PATHS_LABEL)\n preprocessing_fn = self._GetPreprocessingFn(inputs, outputs)\n stats_options_updater_fn = self._GetStatsOptionsUpdaterFn(inputs)\n per_set_stats_output_paths = value_utils.GetValues(\n outputs, labels.PER_SET_STATS_OUTPUT_PATHS_LABEL)\n analyze_data_paths = value_utils.GetValues(inputs,\n labels.ANALYZE_DATA_PATHS_LABEL)\n analyze_paths_file_formats = value_utils.GetValues(\n inputs, labels.ANALYZE_PATHS_FILE_FORMATS_LABEL)\n transform_data_paths = value_utils.GetValues(\n inputs, labels.TRANSFORM_DATA_PATHS_LABEL)\n transform_paths_file_formats = value_utils.GetValues(\n inputs, labels.TRANSFORM_PATHS_FILE_FORMATS_LABEL)\n input_cache_dir = value_utils.GetSoleValue(\n inputs, labels.CACHE_INPUT_PATH_LABEL, strict=False)\n output_cache_dir = value_utils.GetSoleValue(\n outputs, labels.CACHE_OUTPUT_PATH_LABEL, strict=False)\n per_set_stats_output_paths = value_utils.GetValues(\n outputs, labels.PER_SET_STATS_OUTPUT_PATHS_LABEL)\n temp_path = value_utils.GetSoleValue(outputs, labels.TEMP_OUTPUT_LABEL)\n data_view_uri = value_utils.GetSoleValue(\n inputs, labels.DATA_VIEW_LABEL, strict=False)\n force_tf_compat_v1 = value_utils.GetSoleValue(\n inputs, labels.FORCE_TF_COMPAT_V1_LABEL)\n\n absl.logging.debug('Force tf.compat.v1: %s', force_tf_compat_v1)\n absl.logging.debug('Analyze data patterns: %s',\n list(enumerate(analyze_data_paths)))\n absl.logging.debug('Transform data patterns: %s',\n list(enumerate(transform_data_paths)))\n absl.logging.debug('Transform materialization output paths: %s',\n list(enumerate(materialize_output_paths)))\n absl.logging.debug('Transform output path: %s', transform_output_path)\n\n if len(analyze_data_paths) != len(analyze_paths_file_formats):\n raise ValueError(\n 'size of analyze_data_paths and '\n 'analyze_paths_file_formats do not match: {} v.s {}'.format(\n len(analyze_data_paths), len(analyze_paths_file_formats)))\n if len(transform_data_paths) != len(transform_paths_file_formats):\n raise ValueError(\n 'size of transform_data_paths and '\n 'transform_paths_file_formats do not match: {} v.s {}'.format(\n len(transform_data_paths), len(transform_paths_file_formats)))\n\n can_process_analysis_jointly = not bool(output_cache_dir)\n analyze_data_list = self._MakeDatasetList(analyze_data_paths,\n analyze_paths_file_formats,\n raw_examples_data_format,\n data_view_uri,\n can_process_analysis_jointly)\n if not analyze_data_list:\n raise ValueError('Analyze data list must not be empty.')\n\n can_process_transform_jointly = not bool(per_set_stats_output_paths or\n materialize_output_paths)\n transform_data_list = self._MakeDatasetList(transform_data_paths,\n transform_paths_file_formats,\n raw_examples_data_format,\n data_view_uri,\n can_process_transform_jointly,\n per_set_stats_output_paths,\n materialize_output_paths)\n\n all_datasets = analyze_data_list + transform_data_list\n for d in all_datasets:\n d.tfxio = self._CreateTFXIO(d, input_dataset_metadata.schema)\n self._AssertSameTFXIOSchema(all_datasets)\n typespecs = all_datasets[0].tfxio.TensorAdapter().OriginalTypeSpecs()\n\n # Inspecting the preprocessing_fn even if we know we need a full pass in\n # order to fail faster if it fails.\n analyze_input_columns = tft.get_analyze_input_columns(\n preprocessing_fn, typespecs, force_tf_compat_v1=force_tf_compat_v1)\n\n if (disable_statistics and not materialize_output_paths and\n stats_options_updater_fn is None):\n if analyze_input_columns:\n absl.logging.warning(\n 'Not using the in-place Transform because the following features '\n 'require analyzing: {}'.format(\n tuple(c for c in analyze_input_columns)))\n else:\n absl.logging.warning(\n 'Using the in-place Transform since disable_statistics=True, '\n 'it does not materialize transformed data, and the configured '\n 'preprocessing_fn appears to not require analyzing the data.')\n self._RunInPlaceImpl(preprocessing_fn, force_tf_compat_v1,\n input_dataset_metadata, typespecs,\n transform_output_path)\n # TODO(b/122478841): Writes status to status file.\n return\n\n stats_options_updater_fn = (stats_options_updater_fn\n if stats_options_updater_fn else lambda _, x: x)\n\n materialization_format = (\n transform_paths_file_formats[-1] if materialize_output_paths else None)\n self._RunBeamImpl(analyze_data_list, transform_data_list, preprocessing_fn,\n stats_options_updater_fn, force_tf_compat_v1,\n input_dataset_metadata, transform_output_path,\n raw_examples_data_format, temp_path, input_cache_dir,\n output_cache_dir, disable_statistics,\n per_set_stats_output_paths, materialization_format,\n len(analyze_data_paths))\n # TODO(b/122478841): Writes status to status file.\n\n # pylint: disable=expression-not-assigned, no-value-for-parameter\n def _RunBeamImpl(self, analyze_data_list: List[_Dataset],\n transform_data_list: List[_Dataset], preprocessing_fn: Any,\n stats_options_updater_fn: Callable[\n [stats_options_util.StatsType, tfdv.StatsOptions],\n tfdv.StatsOptions], force_tf_compat_v1: bool,\n input_dataset_metadata: dataset_metadata.DatasetMetadata,\n transform_output_path: Text, raw_examples_data_format: int,\n temp_path: Text, input_cache_dir: Optional[Text],\n output_cache_dir: Optional[Text], disable_statistics: bool,\n per_set_stats_output_paths: Sequence[Text],\n materialization_format: Optional[Text],\n analyze_paths_count: int) -> _Status:\n \"\"\"Perform data preprocessing with TFT.\n\n Args:\n analyze_data_list: List of datasets for analysis.\n transform_data_list: List of datasets for transform.\n preprocessing_fn: The tf.Transform preprocessing_fn.\n stats_options_updater_fn: The user-specified function for updating stats\n options.\n force_tf_compat_v1: If True, call Transform's API to use Tensorflow in\n tf.compat.v1 mode.\n input_dataset_metadata: A DatasetMetadata object for the input data.\n transform_output_path: An absolute path to write the output to.\n raw_examples_data_format: The data format of the raw examples. One of the\n enums from example_gen_pb2.PayloadFormat.\n temp_path: A path to a temporary dir.\n input_cache_dir: A dir containing the input analysis cache. May be None.\n output_cache_dir: A dir to write the analysis cache to. May be None.\n disable_statistics: A bool indicating whether or to disable statistics.\n per_set_stats_output_paths: Paths to per-set statistics output. If empty,\n per-set statistics is not produced.\n materialization_format: A string describing the format of the materialized\n data or None if materialization is not enabled.\n analyze_paths_count: An integer, the number of paths that should be used\n for analysis.\n\n Returns:\n Status of the execution.\n \"\"\"\n self._AssertSameTFXIOSchema(analyze_data_list)\n unprojected_typespecs = (\n analyze_data_list[0].tfxio.TensorAdapter().OriginalTypeSpecs())\n\n analyze_input_columns = tft.get_analyze_input_columns(\n preprocessing_fn,\n unprojected_typespecs,\n force_tf_compat_v1=force_tf_compat_v1)\n analyze_columns_count = len(analyze_input_columns)\n\n transform_input_columns = tft.get_transform_input_columns(\n preprocessing_fn,\n unprojected_typespecs,\n force_tf_compat_v1=force_tf_compat_v1)\n # Use the same dataset (same columns) for AnalyzeDataset and computing\n # pre-transform stats so that the data will only be read once for these\n # two operations.\n if not disable_statistics:\n analyze_input_columns = list(\n set(list(analyze_input_columns) + list(transform_input_columns)))\n\n for d in analyze_data_list:\n d.tfxio = d.tfxio.Project(analyze_input_columns)\n\n self._AssertSameTFXIOSchema(analyze_data_list)\n analyze_data_tensor_adapter_config = (\n analyze_data_list[0].tfxio.TensorAdapterConfig())\n\n for d in transform_data_list:\n d.tfxio = d.tfxio.Project(transform_input_columns)\n\n desired_batch_size = self._GetDesiredBatchSize(raw_examples_data_format)\n\n # TempPipInstallContext is needed here so that subprocesses (which\n # may be created by the Beam multi-process DirectRunner) can find the\n # needed dependencies.\n # TODO(b/187122662): Move this to the ExecutorOperator or Launcher and\n # remove the `_pip_dependencies` attribute.\n with udf_utils.TempPipInstallContext(self._pip_dependencies):\n with self._CreatePipeline(transform_output_path) as pipeline:\n with tft_beam.Context(\n temp_dir=temp_path,\n desired_batch_size=desired_batch_size,\n passthrough_keys=self._GetTFXIOPassthroughKeys(),\n use_deep_copy_optimization=True,\n force_tf_compat_v1=force_tf_compat_v1):\n (new_analyze_data_dict, input_cache) = (\n pipeline\n | 'OptimizeRun' >> self._OptimizeRun(\n input_cache_dir, output_cache_dir,\n analyze_data_list, unprojected_typespecs, preprocessing_fn,\n self._GetCacheSource(), force_tf_compat_v1))\n\n _ = (\n pipeline\n | 'IncrementPipelineMetrics' >> self._IncrementPipelineMetrics(\n total_columns_count=len(unprojected_typespecs),\n analyze_columns_count=analyze_columns_count,\n transform_columns_count=len(transform_input_columns),\n analyze_paths_count=analyze_paths_count,\n analyzer_cache_enabled=input_cache is not None,\n disable_statistics=disable_statistics,\n materialize=materialization_format is not None))\n\n if input_cache:\n absl.logging.debug('Analyzing data with cache.')\n\n full_analyze_dataset_keys_list = [\n dataset.dataset_key for dataset in analyze_data_list\n ]\n\n # Removing unneeded datasets if they won't be needed for statistics or\n # materialization.\n if materialization_format is None and disable_statistics:\n if None in new_analyze_data_dict.values():\n absl.logging.debug(\n 'Not reading the following datasets due to cache: %s', [\n dataset.file_pattern\n for dataset in analyze_data_list\n if new_analyze_data_dict[dataset.dataset_key] is None\n ])\n analyze_data_list = [\n d for d in new_analyze_data_dict.values() if d is not None\n ]\n\n for dataset in analyze_data_list:\n infix = 'AnalysisIndex{}'.format(dataset.index)\n dataset.standardized = (\n pipeline\n | 'TFXIOReadAndDecode[{}]'.format(infix) >>\n dataset.tfxio.BeamSource(desired_batch_size))\n\n input_analysis_data = {}\n for key, dataset in new_analyze_data_dict.items():\n input_analysis_data[key] = (None if dataset is None else\n dataset.standardized)\n\n transform_fn, cache_output = (\n (input_analysis_data, input_cache,\n analyze_data_tensor_adapter_config)\n | 'Analyze' >> tft_beam.AnalyzeDatasetWithCache(\n preprocessing_fn, pipeline=pipeline))\n\n # Write the raw/input metadata.\n (input_dataset_metadata\n | 'WriteMetadata' >> tft_beam.WriteMetadata(\n os.path.join(transform_output_path,\n tft.TFTransformOutput.RAW_METADATA_DIR), pipeline))\n\n # WriteTransformFn writes transform_fn and metadata to subdirectories\n # tensorflow_transform.SAVED_MODEL_DIR and\n # tensorflow_transform.TRANSFORMED_METADATA_DIR respectively.\n completed_transform = (\n transform_fn\n | 'WriteTransformFn' >>\n tft_beam.WriteTransformFn(transform_output_path))\n\n if output_cache_dir is not None and cache_output is not None:\n fileio.makedirs(output_cache_dir)\n absl.logging.debug('Using existing cache in: %s', input_cache_dir)\n if input_cache_dir is not None:\n # Only copy cache that is relevant to this iteration. This is\n # assuming that this pipeline operates on rolling ranges, so those\n # cache entries may also be relevant for future iterations.\n for span_cache_dir in input_analysis_data:\n full_span_cache_dir = os.path.join(input_cache_dir,\n span_cache_dir.key)\n if fileio.isdir(full_span_cache_dir):\n self._CopyCache(\n full_span_cache_dir,\n os.path.join(output_cache_dir, span_cache_dir.key))\n\n # TODO(b/157479287, b/171165988): Remove this condition when beam\n # 2.26 is used.\n if cache_output:\n (cache_output\n | 'WriteCache' >> analyzer_cache.WriteAnalysisCacheToFS(\n pipeline=pipeline,\n cache_base_dir=output_cache_dir,\n sink=self._GetCacheSink(),\n dataset_keys=full_analyze_dataset_keys_list))\n\n if not disable_statistics or materialization_format is not None:\n # Do not compute pre-transform stats if the input format is raw\n # proto, as StatsGen would treat any input as tf.Example. Note that\n # tf.SequenceExamples are wire-format compatible with tf.Examples.\n if (not disable_statistics and\n not self._IsDataFormatProto(raw_examples_data_format)):\n # Aggregated feature stats before transformation.\n pre_transform_feature_stats_path = os.path.join(\n transform_output_path,\n tft.TFTransformOutput.PRE_TRANSFORM_FEATURE_STATS_PATH)\n\n if self._IsDataFormatSequenceExample(raw_examples_data_format):\n schema_proto = None\n else:\n schema_proto = input_dataset_metadata.schema\n\n if self._IsDataFormatSequenceExample(raw_examples_data_format):\n\n def _ExtractRawExampleBatches(record_batch):\n return record_batch.column(\n record_batch.schema.get_field_index(\n RAW_EXAMPLE_KEY)).flatten().to_pylist()\n\n # Make use of the fact that tf.SequenceExample is wire-format\n # compatible with tf.Example\n stats_input = []\n for dataset in analyze_data_list:\n infix = 'AnalysisIndex{}'.format(dataset.index)\n stats_input.append(\n dataset.standardized\n | 'ExtractRawExampleBatches[{}]'.format(\n infix) >> beam.Map(_ExtractRawExampleBatches)\n | 'DecodeSequenceExamplesAsExamplesIntoRecordBatches[{}]'\n .format(infix) >> beam.ParDo(\n self._ToArrowRecordBatchesFn(schema_proto)))\n else:\n stats_input = [\n dataset.standardized for dataset in analyze_data_list\n ]\n\n pre_transform_stats_options = _InvokeStatsOptionsUpdaterFn(\n stats_options_updater_fn,\n stats_options_util.StatsType.PRE_TRANSFORM, schema_proto)\n pre_transform_stats_options.experimental_use_sketch_based_topk_uniques = (\n self._TfdvUseSketchBasedTopKUniques())\n\n (stats_input\n | 'FlattenAnalysisDatasets' >> beam.Flatten(pipeline=pipeline)\n | 'GenerateStats[FlattenedAnalysisDataset]' >>\n self._GenerateAndMaybeValidateStats(\n pre_transform_feature_stats_path,\n stats_options=pre_transform_stats_options,\n enable_validation=False))\n\n # transform_data_list is a superset of analyze_data_list, we pay the\n # cost to read the same dataset (analyze_data_list) again here to\n # prevent certain beam runner from doing large temp materialization.\n for dataset in transform_data_list:\n infix = 'TransformIndex{}'.format(dataset.index)\n dataset.standardized = (\n pipeline | 'TFXIOReadAndDecode[{}]'.format(infix) >>\n dataset.tfxio.BeamSource(desired_batch_size))\n (dataset.transformed, metadata) = (\n ((dataset.standardized, dataset.tfxio.TensorAdapterConfig()),\n transform_fn)\n | 'Transform[{}]'.format(infix) >>\n tft_beam.TransformDataset(output_record_batches=True))\n\n if not disable_statistics:\n # Aggregated feature stats after transformation.\n _, metadata = transform_fn\n\n # TODO(b/70392441): Retain tf.Metadata (e.g., IntDomain) in\n # schema. Currently input dataset schema only contains dtypes,\n # and other metadata is dropped due to roundtrip to tensors.\n transformed_schema_proto = metadata.schema\n\n for dataset in transform_data_list:\n infix = 'TransformIndex{}'.format(dataset.index)\n dataset.transformed_and_standardized = (\n dataset.transformed\n | 'ExtractRecordBatches[{}]'.format(infix) >> beam.Keys())\n\n post_transform_feature_stats_path = os.path.join(\n transform_output_path,\n tft.TFTransformOutput.POST_TRANSFORM_FEATURE_STATS_PATH)\n\n post_transform_stats_options = _InvokeStatsOptionsUpdaterFn(\n stats_options_updater_fn,\n stats_options_util.StatsType.POST_TRANSFORM,\n transformed_schema_proto, metadata.asset_map,\n transform_output_path)\n\n post_transform_stats_options.experimental_use_sketch_based_topk_uniques = (\n self._TfdvUseSketchBasedTopKUniques())\n ([\n dataset.transformed_and_standardized\n for dataset in transform_data_list\n ]\n | 'FlattenTransformedDatasets' >> beam.Flatten(pipeline=pipeline)\n | 'WaitForTransformWrite' >> beam.Map(\n lambda x, completion: x,\n completion=beam.pvalue.AsSingleton(completed_transform))\n | 'GenerateAndValidateStats[FlattenedTransformedDatasets]' >>\n self._GenerateAndMaybeValidateStats(\n post_transform_feature_stats_path,\n stats_options=post_transform_stats_options,\n enable_validation=True))\n\n if per_set_stats_output_paths:\n # TODO(b/130885503): Remove duplicate stats gen compute that is\n # done both on a flattened view of the data, and on each span\n # below.\n for dataset in transform_data_list:\n infix = 'TransformIndex{}'.format(dataset.index)\n (dataset.transformed_and_standardized\n | 'WaitForTransformWrite[{}]'.format(infix) >> beam.Map(\n lambda x, completion: x,\n completion=beam.pvalue.AsSingleton(completed_transform))\n | 'GenerateAndValidateStats[{}]'.format(infix) >>\n self._GenerateAndMaybeValidateStats(\n dataset.stats_output_path,\n stats_options=post_transform_stats_options,\n enable_validation=True))\n\n if materialization_format is not None:\n for dataset in transform_data_list:\n infix = 'TransformIndex{}'.format(dataset.index)\n (dataset.transformed\n | 'EncodeAndSerialize[{}]'.format(infix) >> beam.FlatMap(\n Executor._RecordBatchToExamples)\n | 'Materialize[{}]'.format(infix) >> self._WriteExamples(\n materialization_format, dataset.materialize_output_path))\n\n return _Status.OK()\n # pylint: enable=expression-not-assigned, no-value-for-parameter\n\n def _RunInPlaceImpl(self, preprocessing_fn: Any, force_tf_compat_v1: bool,\n metadata: dataset_metadata.DatasetMetadata,\n typespecs: Dict[Text, tf.TypeSpec],\n transform_output_path: Text) -> _Status:\n \"\"\"Runs a transformation iteration in-place without looking at the data.\n\n Args:\n preprocessing_fn: The tf.Transform preprocessing_fn.\n force_tf_compat_v1: If True, call Transform's API to use Tensorflow in\n tf.compat.v1 mode.\n metadata: A DatasetMetadata object for the input data.\n typespecs: a Dict[Text, tf.TypeSpec]\n transform_output_path: An absolute path to write the output to.\n\n Returns:\n Status of the execution.\n \"\"\"\n\n absl.logging.debug('Processing an in-place transform')\n\n raw_metadata_dir = os.path.join(transform_output_path,\n tft.TFTransformOutput.RAW_METADATA_DIR)\n metadata_io.write_metadata(metadata, raw_metadata_dir)\n # TODO(b/149997088): Use typespecs for the tf.compat.v1 path as well.\n feature_specs = schema_utils.schema_as_feature_spec(\n metadata.schema).feature_spec\n impl_helper.analyze_in_place(preprocessing_fn, force_tf_compat_v1,\n feature_specs, typespecs,\n transform_output_path)\n\n return _Status.OK()\n\n def _CreatePipeline(\n self, unused_transform_output_path: Text) -> beam.Pipeline:\n \"\"\"Creates beam pipeline.\n\n Args:\n unused_transform_output_path: unused.\n\n Returns:\n Beam pipeline.\n \"\"\"\n return self._make_beam_pipeline()\n\n # TODO(b/114444977): Remove the unused can_process_jointly argument.\n def _MakeDatasetList(\n self,\n file_patterns: Sequence[Union[Text, int]],\n file_formats: Sequence[Union[Text, int]],\n data_format: int,\n data_view_uri: Optional[Text],\n can_process_jointly: bool,\n stats_output_paths: Optional[Sequence[Text]] = None,\n materialize_output_paths: Optional[Sequence[Text]] = None,\n ) -> List[_Dataset]:\n \"\"\"Makes a list of Dataset from the given `file_patterns`.\n\n Args:\n file_patterns: A list of file patterns where each pattern corresponds to\n one `_Dataset`.\n file_formats: A list of file format where each format corresponds to one\n `_Dataset`. Must have the same size as `file_patterns`.\n data_format: The data format of the datasets. One of the enums from\n example_gen_pb2.PayloadFormat.\n data_view_uri: URI to the DataView to be used to parse the data.\n can_process_jointly: Whether paths can be processed jointly, unused.\n stats_output_paths: The statistics output paths, if applicable.\n materialize_output_paths: The materialization output paths, if applicable.\n\n Returns:\n A list of `_Dataset` sorted by their dataset_key property.\n \"\"\"\n assert len(file_patterns) == len(file_formats)\n if stats_output_paths:\n assert len(file_patterns) == len(stats_output_paths)\n else:\n stats_output_paths = [None] * len(file_patterns)\n if materialize_output_paths:\n assert len(file_patterns) == len(materialize_output_paths)\n else:\n materialize_output_paths = [None] * len(file_patterns)\n\n datasets = [\n _Dataset(p, f, data_format, data_view_uri, s, m)\n for p, f, s, m in zip(file_patterns, file_formats, stats_output_paths,\n materialize_output_paths)\n ]\n result = sorted(datasets, key=lambda dataset: dataset.dataset_key)\n for index, dataset in enumerate(result):\n dataset.index = index\n return result\n\n def _ShouldDecodeAsRawExample(self, data_format: int,\n data_view_uri: Optional[Text]) -> bool:\n \"\"\"Returns true if data format should be decoded as raw example.\n\n Args:\n data_format: One of the enums from example_gen_pb2.PayloadFormat.\n data_view_uri: URI to the DataView to be used to parse the data.\n\n Returns:\n True if data format should be decoded as raw example.\n \"\"\"\n return (self._IsDataFormatSequenceExample(data_format) or\n (self._IsDataFormatProto(data_format) and data_view_uri is None))\n\n @staticmethod\n def _IsDataFormatSequenceExample(data_format: int) -> bool:\n \"\"\"Returns true if data format is sequence example.\n\n Args:\n data_format: One of the enums from example_gen_pb2.PayloadFormat.\n\n Returns:\n True if data format is sequence example.\n \"\"\"\n return data_format == example_gen_pb2.FORMAT_TF_SEQUENCE_EXAMPLE\n\n @staticmethod\n def _IsDataFormatProto(data_format: int) -> bool:\n \"\"\"Returns true if data format is protocol buffer.\n\n Args:\n data_format: One of the enums from example_gen_pb2.PayloadFormat.\n\n Returns:\n True if data format is protocol buffer.\n \"\"\"\n return data_format == example_gen_pb2.FORMAT_PROTO\n\n def _GetDesiredBatchSize(self, data_format: int) -> Optional[int]:\n \"\"\"Returns batch size.\n\n Args:\n data_format: One of the enums from example_gen_pb2.PayloadFormat.\n\n Returns:\n Batch size or None.\n \"\"\"\n if self._IsDataFormatSequenceExample(data_format):\n return 1\n return None\n\n @staticmethod\n def _GetCacheSource():\n return None\n\n @staticmethod\n def _GetCacheSink():\n return None\n\n @staticmethod\n def _CopyCache(src, dst):\n # TODO(b/37788560): Make this more efficient.\n io_utils.copy_dir(src, dst)\n\n def _CreateTFXIO(self, dataset: _Dataset,\n schema: schema_pb2.Schema) -> tfxio_module.TFXIO:\n \"\"\"Creates a TFXIO instance for `dataset`.\"\"\"\n read_as_raw_records = self._ShouldDecodeAsRawExample(\n dataset.data_format, dataset.data_view_uri)\n return tfxio_utils.make_tfxio(\n file_pattern=dataset.file_pattern,\n telemetry_descriptors=_TELEMETRY_DESCRIPTORS,\n payload_format=dataset.data_format,\n data_view_uri=dataset.data_view_uri,\n schema=schema,\n read_as_raw_records=read_as_raw_records)\n\n def _AssertSameTFXIOSchema(self, datasets: Sequence[_Dataset]) -> None:\n if not datasets:\n return\n for dataset in datasets[1:]:\n assert (datasets[0].tfxio.ArrowSchema().equals(\n dataset.tfxio.ArrowSchema()))\n\n @staticmethod\n def _GetTFXIOPassthroughKeys() -> Optional[Set[Text]]:\n \"\"\"Always returns None.\"\"\"\n return None\n\n @staticmethod\n def _FilterInternalColumn(\n record_batch: pa.RecordBatch,\n internal_column_index: Optional[int] = None) -> pa.RecordBatch:\n \"\"\"Returns shallow copy of a batch with internal column removed.\"\"\"\n if (internal_column_index is None and\n _TRANSFORM_INTERNAL_FEATURE_FOR_KEY not in record_batch.schema.names):\n return record_batch\n else:\n internal_column_index = (\n internal_column_index or\n record_batch.schema.names.index(_TRANSFORM_INTERNAL_FEATURE_FOR_KEY))\n # Making shallow copy since input modification is not allowed.\n filtered_columns = list(record_batch.columns)\n filtered_columns.pop(internal_column_index)\n filtered_schema = record_batch.schema.remove(internal_column_index)\n return pa.RecordBatch.from_arrays(\n filtered_columns, schema=filtered_schema)\n\n @staticmethod\n def _RecordBatchToExamples(\n data_batch: Tuple[pa.RecordBatch, Dict[str, pa.Array]]\n ) -> Generator[Tuple[Any, bytes], None, None]:\n \"\"\"Maps `pa.RecordBatch` to a generator of serialized `tf.Example`s.\"\"\"\n record_batch, unary_passthrough_features = data_batch\n if _TRANSFORM_INTERNAL_FEATURE_FOR_KEY in record_batch.schema.names:\n keys_index = record_batch.schema.names.index(\n _TRANSFORM_INTERNAL_FEATURE_FOR_KEY)\n keys = record_batch.column(keys_index).to_pylist()\n # Filter the record batch to make sure that the internal column doesn't\n # get encoded.\n record_batch = Executor._FilterInternalColumn(record_batch, keys_index)\n examples = tfx_bsl.coders.example_coder.RecordBatchToExamples(\n record_batch)\n for key, example in zip(keys, examples):\n yield (None if key is None else key[0], example)\n else:\n # Internal feature key is not present in the record batch but may be\n # present in the unary pass-through features dict.\n key = unary_passthrough_features.get(_TRANSFORM_INTERNAL_FEATURE_FOR_KEY,\n None)\n if key is not None:\n key = None if key.to_pylist()[0] is None else key.to_pylist()[0][0]\n examples = tfx_bsl.coders.example_coder.RecordBatchToExamples(\n record_batch)\n for example in examples:\n yield (key, example)\n\n # TODO(b/130885503): clean this up once the sketch-based generator is the\n # default.\n @staticmethod\n def _TfdvUseSketchBasedTopKUniques():\n return False\n"
] | [
[
"tensorflow.io.FixedLenFeature"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pasinb/tensorflow_object_counting_api | [
"db7f2f985783220311d11075af594ced7693f7d2"
] | [
"smurf_counting.py"
] | [
"#----------------------------------------------\n#--- Author : Ahmet Ozlu\n#--- Mail : [email protected]\n#--- Date : 27th July 2019\n#----------------------------------------------\n\n# Imports\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\n# Object detection imports\nfrom utils import backbone\nfrom api import object_counting_api\n\ninput_video = \"./input_images_and_videos/smurf_input.avi\"\n\n# By default I use an \"SSD with Mobilenet\" model here. See the detection model zoo (https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.\ndetection_graph, category_index = backbone.set_model('custom_frozen_inference_graph', 'detection.pbtxt')\n\nis_color_recognition_enabled = False # set it to true for enabling the color prediction for the detected objects\n\nobject_counting_api.object_counting(input_video, detection_graph, category_index, is_color_recognition_enabled)\n"
] | [
[
"tensorflow.compat.v1.disable_v2_behavior"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cvmlarun/RANet | [
"3f67a3f36aaacd9cc7fb98ec79f77db8f1ebdc60"
] | [
"codes/RANet_model.py"
] | [
"# ************************************\n# Author: Ziqin Wang\n# Email: [email protected]\n# Github: https://github.com/Storife\n# ************************************\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom numpy.random import normal\nfrom numpy.linalg import svd\nfrom math import sqrt\nfrom torch.nn import functional as f\nfrom torch.autograd import Variable\nimport random\nfrom torch.nn import DataParallel as DP\nfrom RANet_lib.RANet_Model_imagenet import *\nimport time\nfrom typing import List\n\ndef make_layer2(input_feature, out_feature, up_scale=1, ksize=3, d=1, groups=1):\n p = int((ksize - 1) / 2)\n if up_scale == 1:\n return nn.Sequential(\n nn.InstanceNorm2d(input_feature),\n nn.ReLU(),\n nn.Conv2d(input_feature, out_feature, ksize, padding=p, dilation=d, groups=groups),\n )\n return nn.Sequential(\n nn.InstanceNorm2d(input_feature),\n nn.ReLU(),\n nn.Conv2d(input_feature, out_feature, ksize, padding=p),\n nn.UpsamplingBilinear2d(scale_factor=up_scale),\n )\n\nclass ResBlock2(nn.Module):\n def __init__(self, input_feature, planes, dilated=1, group=1):\n super(ResBlock2, self).__init__()\n self.conv1 = nn.Conv2d(input_feature, planes, kernel_size=1, bias=False, groups=group)\n self.bn1 = nn.InstanceNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1 * dilated, bias=False, dilation=dilated, groups=group)\n self.bn2 = nn.InstanceNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, input_feature, kernel_size=1, bias=False, groups=group)\n self.bn3 = nn.InstanceNorm2d(input_feature)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n out += residual\n out = self.relu(out)\n\n return out\n\nclass ResBlock_f(nn.Module):\n def __init__(self, input_feature, planes, dilated=1, group=1):\n super(ResBlock2, self).__init__()\n self.dilated = dilated\n self.conv1 = nn.Conv2d(input_feature, planes, kernel_size=1, bias=False, groups=group)\n self.bn1 = nn.InstanceNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False, groups=group)\n self.bn2 = nn.InstanceNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, input_feature, kernel_size=1, bias=False, groups=group)\n self.bn3 = nn.InstanceNorm2d(input_feature)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = f.avg_pool2d(out, self.dilated)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass MS_Block(nn.Module):\n def __init__(self, input_feature, out_feature, d=[1, 2, 4], group=1):\n super(MS_Block, self).__init__()\n self.l1 = nn.Conv2d(input_feature, out_feature, 3, padding=d[0], dilation=d[0], bias=False, groups=group)\n self.l2 = nn.Conv2d(input_feature, out_feature, 3, padding=d[1], dilation=d[1], bias=False, groups=group)\n self.l3 = nn.Conv2d(input_feature, out_feature, 3, padding=d[2], dilation=d[2], bias=False, groups=group)\n def forward(self, x):\n out = self.l1(x) + self.l2(x) + self.l3(x)\n return out\n\n\nclass RANet(ResNet101):\n def __init__(self, with_relu=0, pretrained=True, type='single_object'):\n super(RANet, self).__init__(with_relu=with_relu, pretrained=pretrained)\n self.fp16 = False\n self.net_type = type\n #self._init_net()\n self.p_1 = make_layer2(256, 256)\n self.res_1 = ResBlock2(256, 128, 1)\n self.p_2 = make_layer2(256, 128)\n\n self.p_1b = make_layer2(256, 256)\n self.res_1b = ResBlock2(256, 128, 1)\n self.p_2b = make_layer2(256, 128)\n self.ls13 = make_layer2(512, 32, up_scale=1, ksize=1)\n self.ls14 = make_layer2(1024, 16, up_scale=2, ksize=1)\n self.ls15 = make_layer2(2048, 16, up_scale=4, ksize=1)\n\n self.ls22 = make_layer2(256, 32, up_scale=1, ksize=1)\n self.ls23 = make_layer2(512, 16, up_scale=2, ksize=1)\n self.ls24 = make_layer2(1024, 16, up_scale=4, ksize=1)\n\n self.ls31 = make_layer2(64, 32, up_scale=1, ksize=1)\n self.ls32 = make_layer2(256, 16, up_scale=2, ksize=1)\n self.ls33 = make_layer2(512, 16, up_scale=4, ksize=1)\n\n self.R2 = nn.Sequential(make_layer2(128 + 64, 128),\n make_layer2(128, 64),\n MS_Block(64, 32, d=[1,3,6]),\n ResBlock2(32, 16),\n nn.UpsamplingBilinear2d(scale_factor=2))\n self.R3 = nn.Sequential(make_layer2(32 + 64, 64),\n make_layer2(64, 32),\n MS_Block(32, 16, d=[1,3,6]),\n nn.UpsamplingBilinear2d(scale_factor=2),\n ResBlock2(16, 8),\n nn.Conv2d(16, 1, 3, padding=1)\n )\n self.R1 = nn.Sequential(make_layer2(256 + 64 + 1, 256),\n make_layer2(256, 256),\n MS_Block(256, 128, d=[1,3,6]),\n ResBlock2(128, 64),\n nn.UpsamplingBilinear2d(scale_factor=2))\n self.L4 = make_layer2(1024, 256, ksize=3)\n self.L5 = make_layer2(2048, 512, ksize=3)\n self.L3 = make_layer2(512, 128, ksize=3)\n self.L_g = make_layer2(512 + 256 + 128, 512)\n self.rank_A = nn.Sequential(nn.Conv1d(2, 8, 1),\n nn.PReLU(),\n nn.Conv1d(8, 1, 1),\n nn.ReLU())\n self.Ranking = nn.Sequential(make_layer2(405, 128), ResBlock2(128, 32, 2), make_layer2(128, 1))\n # self.Ranking = nn.Sequential(nn.Conv2d(405, 16, 1),\n # nn.InstanceNorm2d(16), nn.ReLU(),\n # nn.Conv2d(16, 1, 1, bias=False), nn.ReLU())\n\n def Dtype(self, data):\n return data.float()\n \n \n\n\n def to_kernel(self, feature):\n size = feature.size()\n return feature.view(size[1], size[2] * size[3]).transpose(0, 1).unsqueeze(2).unsqueeze(3).contiguous()\n\n\n @torch.jit.export\n def corr_fun(self, Kernel_tmp, Feature):\n size = Kernel_tmp.size()\n #print(\"Feature :\", len(Feature))\n #if len(Feature) == 1:\n Kernel = Kernel_tmp.view(size[1], size[2] * size[3]).transpose(0, 1)\n Kernel = Kernel.unsqueeze(2).unsqueeze(3)\n\n corr = torch.nn.functional.conv2d(Feature, Kernel.contiguous())\n Kernel = Kernel.unsqueeze(0)\n return corr, Kernel\n \n def correlate(self, Kernel, Feature):\n corr = torch.nn.functional.conv2d(Feature, Kernel,stride=1)\n return corr\n\n def P2masks(self, P, num):\n M = []\n M.append(self.Dtype((P == 0) + (P > int(num))))\n for idx in range(1, num + 1):\n M.append(self.Dtype(P == idx))\n return M\n\n def bbox_uncrop(img, bbox, size, crop_size): # 4D input\n img = F.upsample_bilinear(img, size=crop_size[2::])\n msk = F.pad(img, (bbox[1], 864 - bbox[3], bbox[0], 480 - bbox[2],))\n return msk\n \n @torch.jit.export\n def forward_first(self, Ker):\n x2 = Ker\n base_features2 = self.res_forward(x2)\n Kernel_3 = f.normalize(f.max_pool2d(self.L3(base_features2[2]), 2))\n Kernel_4 = f.normalize(self.L4(base_features2[3]))\n Kernel_5 = f.normalize(f.upsample(self.L5(base_features2[4]), scale_factor=2, mode='bilinear'))\n Kernel_tmp = f.normalize(self.L_g(torch.cat([Kernel_3, Kernel_4, Kernel_5], dim=1)))\n\n Kernel_tmp = f.adaptive_avg_pool2d(Kernel_tmp, [15, 27])\n return [Kernel_tmp] \n \n \n @torch.jit.export\n def forward_post(self, Correlation, base_features1: List[torch.Tensor], msk_p, m):\n # Select FG / BG similarity maps\n h_size = 15\n w_size = 27\n c_size = h_size * w_size\n mb = self.Dtype((1 - m).ge(0.9))\n corr = Correlation * m.view(-1, c_size, 1, 1)\n corr_b = Correlation * mb.view(-1, c_size, 1, 1)\n # Ranking attention scores\n\n T_corr = f.max_pool2d(corr, 2).permute(0, 2, 3, 1).view(-1, c_size, h_size, w_size)\n T_corr_b = f.max_pool2d(corr_b, 2).permute(0, 2, 3, 1).view(-1, c_size, h_size, w_size)\n R_map = (f.relu(self.Ranking(T_corr)) * self.Dtype(m != 0)).view(-1, 1, c_size) * 0.2\n R_map_b = (f.relu(self.Ranking(T_corr_b)) * mb).view(-1, 1, c_size) * 0.2\n\n # Rank & select\n co_size = corr.size()[2::]\n max_only, indices = f.max_pool2d(corr, co_size, return_indices=True)\n max_only = max_only.view(-1, 1, c_size) + R_map\n m_sorted, m_sorted_idx = max_only.sort(descending=True, dim=2)\n corr = torch.cat([co.index_select(0, m_sort[0, 0:256]).unsqueeze(0) for co, m_sort in zip(corr, m_sorted_idx)])\n # corr = corr[0].index_select(0, m_sorted_idx[0, 0, 0:256]).unsqueeze(0)\n max_only_b, indices = f.max_pool2d(corr_b, co_size, return_indices=True)\n max_only_b = max_only_b.view(-1, 1, c_size) + R_map_b\n m_sorted, m_sorted_idx = max_only_b.sort(descending=True, dim=2)\n corr_b = torch.cat([co.index_select(0, m_sort[0, 0:256]).unsqueeze(0) for co, m_sort in zip(corr_b, m_sorted_idx)])\n # corr_b = corr_b[0].index_select(0, m_sorted_idx[0, 0, 0:256]).unsqueeze(0)\n # Merge net\n fcorr = self.p_2(self.res_1(self.p_1(f.upsample(corr, scale_factor=2, mode='bilinear'))))\n fcorr_b = self.p_2(self.res_1(self.p_1(f.upsample(corr_b, scale_factor=2, mode='bilinear'))))\n\n # Decoder\n base1 = torch.cat([self.ls13(base_features1[2]),\n self.ls14(base_features1[3]),\n self.ls15(base_features1[4]),\n fcorr,\n fcorr_b,\n f.adaptive_avg_pool2d(msk_p, fcorr.size()[-2::])], 1)\n fea1 = self.R1(base1)\n base2 = torch.cat([self.ls22(base_features1[1]),\n self.ls23(base_features1[2]),\n self.ls24(base_features1[3]),\n fea1], 1)\n fea2 = self.R2(base2)\n base3 = torch.cat([self.ls31(base_features1[0]),\n self.ls32(base_features1[1]),\n self.ls33(base_features1[2]),\n fea2], 1)\n fea3 = self.R3(base3)\n out_R = f.sigmoid(fea3)\n\n features = []\n out = [out_R]\n return out, features\n \n def forward(self, x1, Ker, msk2, msk_p): # vxd feature * msk *2 _feature_Rf\n\n if msk2.max() > 1:\n msk2 = (msk2.ge(1.6)).float()\n msk_p = (msk_p.ge(1.6)).float()\n # Current frame feature\n base_features1 = self.res_forward(x1)\n Feature_3 = f.normalize(f.max_pool2d(self.L3(base_features1[2]), 2))\n Feature_4 = f.normalize(self.L4(base_features1[3]))\n Feature_5 = f.normalize(f.upsample(self.L5(base_features1[4]), scale_factor=2, mode='bilinear'))\n Feature = f.normalize(self.L_g(torch.cat([Feature_3, Feature_4, Feature_5], dim=1)))\n \n '''\n Kernel_tmp = Ker\n m = f.adaptive_avg_pool2d(msk2.detach(), Kernel_tmp.size()[-2::])\n Kernel = Kernel_tmp * m.repeat(1, 512, 1, 1)\n mb = (1 - m).ge(0.9).float()\n Kernel_back = Kernel_tmp * mb.repeat(1, 512, 1, 1).float()\n corr, Kerner = self.corr_fun(Kernel, Feature)\n corr_b, Kerner_b = self.corr_fun(Kernel_back, Feature)\n '''\n # Correlation\n Kernel = Ker\n m = f.adaptive_avg_pool2d(msk2.detach(), Kernel.size()[-2::])\n \n \n return Kernel, Feature, base_features1, msk_p, m\n #Correlation, a = self.corr_fun(Kernel, Feature)\n# # Select FG / BG similarity maps\n# corr = Correlation * m.view(-1, c_size, 1, 1)\n# corr_b = Correlation * mb.view(-1, c_size, 1, 1)\n# # Ranking attention scores\n#\n# T_corr = f.max_pool2d(corr, 2).permute(0, 2, 3, 1).view(-1, c_size, h_size, w_size)\n# T_corr_b = f.max_pool2d(corr_b, 2).permute(0, 2, 3, 1).view(-1, c_size, h_size, w_size)\n# R_map = (f.relu(self.Ranking(T_corr)) * self.Dtype(m != 0)).view(-1, 1, c_size) * 0.2\n# R_map_b = (f.relu(self.Ranking(T_corr_b)) * mb).view(-1, 1, c_size) * 0.2\n#\n# # Rank & select\n# co_size = corr.size()[2::]\n# max_only, indices = f.max_pool2d(corr, co_size, return_indices=True)\n# max_only = max_only.view(-1, 1, c_size) + R_map\n# m_sorted, m_sorted_idx = max_only.sort(descending=True, dim=2)\n# corr = torch.cat([co.index_select(0, m_sort[0, 0:256]).unsqueeze(0) for co, m_sort in zip(corr, m_sorted_idx)])\n# # corr = corr[0].index_select(0, m_sorted_idx[0, 0, 0:256]).unsqueeze(0)\n# max_only_b, indices = f.max_pool2d(corr_b, co_size, return_indices=True)\n# max_only_b = max_only_b.view(-1, 1, c_size) + R_map_b\n# m_sorted, m_sorted_idx = max_only_b.sort(descending=True, dim=2)\n# corr_b = torch.cat([co.index_select(0, m_sort[0, 0:256]).unsqueeze(0) for co, m_sort in zip(corr_b, m_sorted_idx)])\n# # corr_b = corr_b[0].index_select(0, m_sorted_idx[0, 0, 0:256]).unsqueeze(0)\n# # Merge net\n# fcorr = self.p_2(self.res_1(self.p_1(f.upsample(corr, scale_factor=2, mode='bilinear'))))\n# fcorr_b = self.p_2(self.res_1(self.p_1(f.upsample(corr_b, scale_factor=2, mode='bilinear'))))\n#\n# # Decoder\n# base1 = torch.cat([self.ls13(base_features1[2]),\n# self.ls14(base_features1[3]),\n# self.ls15(base_features1[4]),\n# fcorr,\n# fcorr_b,\n# f.adaptive_avg_pool2d(msk_p, fcorr.size()[-2::])], 1)\n# fea1 = self.R1(base1)\n# base2 = torch.cat([self.ls22(base_features1[1]),\n# self.ls23(base_features1[2]),\n# self.ls24(base_features1[3]),\n# fea1], 1)\n# fea2 = self.R2(base2)\n# base3 = torch.cat([self.ls31(base_features1[0]),\n# self.ls32(base_features1[1]),\n# self.ls33(base_features1[2]),\n# fea2], 1)\n# fea3 = self.R3(base3)\n# out_R = f.sigmoid(fea3)\n#\n# features = []\n# out = [out_R]\n# return out, features\n\n def RANet_Multiple_forward_eval(self, x1, Ker, msk2, msk_p, mode): # vxd feature * msk *2 _feature_Rf\n if mode == 'first':\n # Exact template features\n x2 = Ker\n base_features2 = self.res_forward(x2)\n Kernel_3 = f.normalize(f.max_pool2d(self.L3(base_features2[2]), 2))\n Kernel_4 = f.normalize(self.L4(base_features2[3]))\n Kernel_5 = f.normalize(f.upsample(self.L5(base_features2[4]), scale_factor=2, mode='bilinear'))\n Kernel_tmp = f.normalize(self.L_g(torch.cat([Kernel_3, Kernel_4, Kernel_5], dim=1)))\n Kernel_tmp = f.avg_pool2d(Kernel_tmp, 2)\n return [Kernel_tmp]\n # Current frame feature\n base_features1 = self.res_forward(x1)\n Feature_3 = f.normalize(f.max_pool2d(self.L3(base_features1[2]), 2))\n Feature_4 = f.normalize(self.L4(base_features1[3]))\n Feature_5 = f.normalize(f.upsample(self.L5(base_features1[4]), scale_factor=2, mode='bilinear'))\n Feature = f.normalize(self.L_g(torch.cat([Feature_3, Feature_4, Feature_5], dim=1)))\n\n Kernel_tmp = Ker\n Out_Rs = []\n\n basef1 = torch.cat([self.ls13(base_features1[2]),\n self.ls14(base_features1[3]),\n self.ls15(base_features1[4]), ], 1)\n basef2 = torch.cat([self.ls22(base_features1[1]),\n self.ls23(base_features1[2]),\n self.ls24(base_features1[3]), ], 1)\n basef3 = torch.cat([self.ls31(base_features1[0]),\n self.ls32(base_features1[1]),\n self.ls33(base_features1[2])], 1)\n\n for idx in range(len(Feature)): # batch\n ker = Kernel_tmp[idx: idx + 1]\n feature = Feature[idx: idx + 1]\n m2 = msk2[idx: idx + 1]\n mp = msk_p[idx: idx + 1]\n max_obj = m2.max().int().data.cpu().numpy()\n if max_obj < 2:\n m2[0, 0, 0, 0] = 2\n max_obj = m2.max().int().data.cpu().numpy()\n M2s = self.P2masks(f.relu(m2 - 1), max_obj - 1)\n M2_all = m2.ge(1.5).float()\n Mps = self.P2masks(f.relu(mp - 1), max_obj - 1)\n Mp_all = mp.ge(1.5).float()\n\n # Correlation\n Corr_subs = []\n ker_R = self.to_kernel(ker)\n corr_R = self.correlate(ker_R, feature)\n\n # Ranking attention scores\n T_corr = f.max_pool2d(corr_R, 2).view(-1, 405, 405).transpose(1, 2).view(-1, 405, 15, 27)\n R_map = f.relu(self.Ranking(T_corr)) * 0.2\n Rmaps = []\n\n for idy in range(max_obj): # make corrs (backgrounds(=1) and objs)\n m2_rep = f.adaptive_avg_pool2d(M2s[idy], ker.size()[-2::])\n corr_sub = m2_rep.view(m2_rep.size()[0], -1, 1, 1) * corr_R\n Corr_subs.append(corr_sub)\n Rmaps.append((R_map * m2_rep).view(-1, 1, 405))\n\n Outs = []\n for idy in range(1, max_obj): # training:with_bg, testing: w/o BG\n corr = Corr_subs[idy]\n co_size = Corr_subs[idy].size()[2::]\n max_only, indices = f.max_pool2d(corr, co_size, return_indices=True)\n max_only = max_only.view(-1, 1, 405) + Rmaps[idy]\n # Rank & select FG\n m_sorted, m_sorted_idx = max_only.sort(descending=True, dim=2)\n corr = torch.cat([co.index_select(0, m_sort[0, 0:256]).unsqueeze(0) for co, m_sort in zip(corr, m_sorted_idx)])\n # Merge net FG\n corr_fores = self.p_2(self.res_1(self.p_1(f.upsample(corr, scale_factor=2, mode='bilinear'))))\n if max_obj == 1: # only bg\n print('missing obj')\n corr_backs = torch.zeros(corr_fores.size()).cuda()\n else:\n backs_idx = Corr_subs[0:idy] + Corr_subs[idy + 1::]\n corr_b = torch.cat(backs_idx, 1)\n R_map_b = Rmaps[0:idy] + Rmaps[idy + 1::]\n R_map_b = torch.cat(R_map_b, 2)\n max_only_b, indices = f.max_pool2d(corr_b, co_size, return_indices=True)\n max_only_b = max_only_b.view(R_map_b.size()[0], 1, -1) + R_map_b\n # Rank & select BG\n m_sorted, m_sorted_idx = max_only_b.sort(descending=True, dim=2)\n corr_b = torch.cat([co.index_select(0, m_sort[0, 0:256]).unsqueeze(0) for co, m_sort in zip(corr_b, m_sorted_idx)])\n # Merge net BG\n corr_backs = self.p_2(self.res_1(self.p_1(f.upsample(corr_b, scale_factor=2, mode='bilinear'))))\n if idy == 0:\n tmp = corr_fores\n corr_fores = corr_backs\n corr_backs = tmp\n m_p = f.adaptive_avg_pool2d(Mp_all, corr_fores.size()[-2::])\n else:\n m_p = f.adaptive_avg_pool2d(Mps[idy], corr_fores.size()[-2::])\n # low level features\n base1 = torch.cat([basef1[idx: idx + 1], corr_fores, corr_backs, m_p], 1)\n fea1 = self.R1(base1)\n base2 = torch.cat([basef2[idx: idx + 1],\n fea1], 1)\n fea2 = self.R2(base2)\n base3 = torch.cat([basef3[idx: idx + 1],\n fea2], 1)\n fea3 = self.R3(base3)\n out = f.sigmoid(fea3)\n Outs.append(out)\n Out = torch.cat(Outs, 1)\n Out_Rs.append(Out)\n features = []\n out = [Out_Rs]\n return out, features\n"
] | [
[
"torch.nn.functional.upsample",
"torch.cat",
"torch.nn.PReLU",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.functional.conv2d",
"torch.nn.UpsamplingBilinear2d",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.functional.sigmoid",
"torch.nn.functional.relu",
"torch.nn.InstanceNorm2d",
"torch.nn.Conv1d",
"torch.nn.ReLU",
"torch.nn.functional.max_pool2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thomaslu2000/Incremental-Parsing-Representations | [
"1b0ec638e85f0e521a12b53d8b309191c40fe0d3"
] | [
"src/benepar/integrations/nltk_plugin.py"
] | [
"import dataclasses\nimport itertools\nfrom typing import List, Optional, Tuple\n\nimport nltk\nimport torch\n\nfrom .downloader import load_trained_model\nfrom ..parse_base import BaseParser, BaseInputExample\nfrom ..ptb_unescape import ptb_unescape, guess_space_after\n\n\nTOKENIZER_LOOKUP = {\n \"en\": \"english\",\n \"de\": \"german\",\n \"fr\": \"french\",\n \"pl\": \"polish\",\n \"sv\": \"swedish\",\n}\n\nLANGUAGE_GUESS = {\n \"ar\": (\"X\", \"XP\", \"WHADVP\", \"WHNP\", \"WHPP\"),\n \"zh\": (\"VSB\", \"VRD\", \"VPT\", \"VNV\"),\n \"en\": (\"WHNP\", \"WHADJP\", \"SINV\", \"SQ\"),\n \"de\": (\"AA\", \"AP\", \"CCP\", \"CH\", \"CNP\", \"VZ\"),\n \"fr\": (\"P+\", \"P+D+\", \"PRO+\", \"PROREL+\"),\n \"he\": (\"PREDP\", \"SYN_REL\", \"SYN_yyDOT\"),\n \"pl\": (\"formaczas\", \"znakkonca\"),\n \"sv\": (\"PSEUDO\", \"AVP\", \"XP\"),\n}\n\n\ndef guess_language(label_vocab):\n \"\"\"Guess parser language based on its syntactic label inventory.\n\n The parser training scripts are designed to accept arbitrary input tree\n files with minimal language-specific behavior, but at inference time we may\n need to know the language identity in order to invoke other pipeline\n elements, such as tokenizers.\n \"\"\"\n for language, required_labels in LANGUAGE_GUESS.items():\n if all(label in label_vocab for label in required_labels):\n return language\n return None\n\n\[email protected]\nclass InputSentence(BaseInputExample):\n \"\"\"Parser input for a single sentence.\n\n At least one of `words` and `escaped_words` is required for each input\n sentence. The remaining fields are optional: the parser will attempt to\n derive the value for any missing fields using the fields that are provided.\n\n `words` and `space_after` together form a reversible tokenization of the\n input text: they represent, respectively, the Unicode text for each word and\n an indicator for whether the word is followed by whitespace. These are used\n as inputs by the parser.\n\n `tags` is a list of part-of-speech tags, if available prior to running the\n parser. The parser does not actually use these tags as input, but it will\n pass them through to its output. If `tags` is None, the parser will perform\n its own part of speech tagging (if the parser was not trained to also do\n tagging, \"UNK\" part-of-speech tags will be used in the output instead).\n\n `escaped_words` are the representations of each leaf to use in the output\n tree. If `words` is provided, `escaped_words` will not be used by the neural\n network portion of the parser, and will only be incorporated when\n constructing the output tree. Therefore, `escaped_words` may be used to\n accommodate any dataset-specific text encoding, such as transliteration.\n\n Here is an example of the differences between these fields for English PTB:\n (raw text): \"Fly safely.\"\n words: \" Fly safely . \"\n space_after: False True False False False\n tags: `` VB RB . ''\n escaped_words: `` Fly safely . ''\n \"\"\"\n\n words: Optional[List[str]] = None\n space_after: Optional[List[bool]] = None\n tags: Optional[List[str]] = None\n escaped_words: Optional[List[str]] = None\n\n @property\n def tree(self):\n return None\n\n def leaves(self):\n return self.escaped_words\n\n def pos(self):\n if self.tags is not None:\n return list(zip(self.escaped_words, self.tags))\n else:\n return [(word, \"UNK\") for word in self.escaped_words]\n\n\nclass Parser:\n \"\"\"Berkeley Neural Parser (benepar), integrated with NLTK.\n\n Use this class to apply the Berkeley Neural Parser to pre-tokenized datasets\n and treebanks, or when integrating the parser into an NLP pipeline that\n already performs tokenization, sentence splitting, and (optionally)\n part-of-speech tagging. For parsing starting with raw text, it is strongly\n encouraged that you use spaCy and benepar.BeneparComponent instead.\n\n Sample usage:\n >>> parser = benepar.Parser(\"benepar_en3\")\n >>> input_sentence = benepar.InputSentence(\n words=['\"', 'Fly', 'safely', '.', '\"'],\n space_after=[False, True, False, False, False],\n tags=['``', 'VB', 'RB', '.', \"''\"],\n escaped_words=['``', 'Fly', 'safely', '.', \"''\"],\n )\n >>> parser.parse(input_sentence)\n\n Not all fields of benepar.InputSentence are required, but at least one of\n `words` and `escaped_words` must not be None. The parser will attempt to\n guess the value for missing fields. For example,\n >>> input_sentence = benepar.InputSentence(\n words=['\"', 'Fly', 'safely', '.', '\"'],\n )\n >>> parser.parse(input_sentence)\n\n Although this class is primarily designed for use with data that has already\n been tokenized, to help with interactive use and debugging it also accepts\n simple text string inputs. However, using this class to parse from raw text\n is STRONGLY DISCOURAGED for any application where parsing accuracy matters.\n When parsing from raw text, use spaCy and benepar.BeneparComponent instead.\n The reason is that parser models do not ship with a tokenizer or sentence\n splitter, and some models may not include a part-of-speech tagger either. A\n toolkit must be used to fill in these pipeline components, and spaCy\n outperforms NLTK in all of these areas (sometimes by a large margin).\n >>> parser.parse('\"Fly safely.\"') # For debugging/interactive use only.\n \"\"\"\n\n def __init__(self, name, batch_size=64, language_code=None):\n \"\"\"Load a trained parser model.\n\n Args:\n name (str): Model name, or path to pytorch saved model\n batch_size (int): Maximum number of sentences to process per batch\n language_code (str, optional): language code for the parser (e.g.\n 'en', 'he', 'zh', etc). Our official trained models will set\n this automatically, so this argument is only needed if training\n on new languages or treebanks.\n \"\"\"\n self._parser = load_trained_model(name)\n if torch.cuda.is_available():\n self._parser.cuda()\n if language_code is not None:\n self._language_code = language_code\n else:\n self._language_code = guess_language(self._parser.config[\"label_vocab\"])\n self._tokenizer_lang = TOKENIZER_LOOKUP.get(self._language_code, None)\n\n self.batch_size = batch_size\n\n def parse(self, sentence):\n \"\"\"Parse a single sentence\n\n Args:\n sentence (InputSentence or List[str] or str): Sentence to parse.\n If the input is of List[str], it is assumed to be a sequence of\n words and will behave the same as only setting the `words` field\n of InputSentence. If the input is of type str, the sentence will\n be tokenized using the default NLTK tokenizer (not recommended:\n if parsing from raw text, use spaCy and benepar.BeneparComponent\n instead).\n\n Returns:\n nltk.Tree\n \"\"\"\n return list(self.parse_sents([sentence]))[0]\n\n def parse_sents(self, sents):\n \"\"\"Parse multiple sentences in batches.\n\n Args:\n sents (Iterable[InputSentence]): An iterable of sentences to be\n parsed. `sents` may also be a string, in which case it will be\n segmented into sentences using the default NLTK sentence\n splitter (not recommended: if parsing from raw text, use spaCy\n and benepar.BeneparComponent instead). Otherwise, each element\n of `sents` will be treated as a sentence. The elements of\n `sents` may also be List[str] or str: see Parser.parse() for\n documentation regarding these cases.\n\n Yields:\n nltk.Tree objects, one per input sentence.\n \"\"\"\n if isinstance(sents, str):\n if self._tokenizer_lang is None:\n raise ValueError(\n \"No tokenizer available for this language. \"\n \"Please split into individual sentences and tokens \"\n \"before calling the parser.\"\n )\n sents = nltk.sent_tokenize(sents, self._tokenizer_lang)\n\n end_sentinel = object()\n for batch_sents in itertools.zip_longest(\n *([iter(sents)] * self.batch_size), fillvalue=end_sentinel\n ):\n batch_inputs = []\n for sent in batch_sents:\n if sent is end_sentinel:\n break\n elif isinstance(sent, str):\n if self._tokenizer_lang is None:\n raise ValueError(\n \"No word tokenizer available for this language. \"\n \"Please tokenize before calling the parser.\"\n )\n escaped_words = nltk.word_tokenize(sent, self._tokenizer_lang)\n sent = InputSentence(escaped_words=escaped_words)\n elif isinstance(sent, (list, tuple)):\n sent = InputSentence(words=sent)\n elif not isinstance(sent, InputSentence):\n raise ValueError(\n \"Sentences must be one of: InputSentence, list, tuple, or str\"\n )\n batch_inputs.append(self._with_missing_fields_filled(sent))\n\n for inp, output in zip(\n batch_inputs, self._parser.parse(batch_inputs, return_compressed=True)\n ):\n # If pos tags are provided as input, ignore any tags predicted\n # by the parser.\n if inp.tags is not None:\n output = output.without_predicted_tags()\n yield output.to_tree(\n inp.pos(),\n self._parser.decoder.label_from_index,\n self._parser.tag_from_index,\n )\n\n def _with_missing_fields_filled(self, sent):\n if not isinstance(sent, InputSentence):\n raise ValueError(\"Input is not an instance of InputSentence\")\n if sent.words is None and sent.escaped_words is None:\n raise ValueError(\"At least one of words or escaped_words is required\")\n elif sent.words is None:\n sent = dataclasses.replace(sent, words=ptb_unescape(sent.escaped_words))\n elif sent.escaped_words is None:\n escaped_words = [\n word.replace(\"(\", \"-LRB-\")\n .replace(\")\", \"-RRB-\")\n .replace(\"{\", \"-LCB-\")\n .replace(\"}\", \"-RCB-\")\n .replace(\"[\", \"-LSB-\")\n .replace(\"]\", \"-RSB-\")\n for word in sent.words\n ]\n sent = dataclasses.replace(sent, escaped_words=escaped_words)\n else:\n if len(sent.words) != len(sent.escaped_words):\n raise ValueError(\n f\"Length of words ({len(sent.words)}) does not match \"\n f\"escaped_words ({len(sent.escaped_words)})\"\n )\n\n if sent.space_after is None:\n if self._language_code == \"zh\":\n space_after = [False for _ in sent.words]\n elif self._language_code in (\"ar\", \"he\"):\n space_after = [True for _ in sent.words]\n else:\n space_after = guess_space_after(sent.words)\n sent = dataclasses.replace(sent, space_after=space_after)\n elif len(sent.words) != len(sent.space_after):\n raise ValueError(\n f\"Length of words ({len(sent.words)}) does not match \"\n f\"space_after ({len(sent.space_after)})\"\n )\n\n assert len(sent.words) == len(sent.escaped_words) == len(sent.space_after)\n return sent\n"
] | [
[
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.