repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
ALexanderpu/tf229
[ "eaa1f5c9ed568af850fb7658379226f6d42f1715" ]
[ "tf229/descriminative/softmax_regression.py" ]
[ "\"\"\"Implements stochastic gradient decent on logistic regression as seen in\nStanford 229.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom collections import namedtuple\n\nimport tensorflow as tf\nimport numpy as np\n\ndef train(x, y, **kwargs):\n \"\"\"Implements stochastic gradient decent on softmax regression as seen in\n Stanford 229 (http://cs229.stanford.edu/notes/cs229-notes1.pdf)\n Args:\n x: The covariates or factors of the model in an n by m array (n is number)\n of data points and m is number of factors\n y: The targets or labels of the model in an n by c array, where c is the\n number of classes\n kwargs:\n model_path: the location where the tf model file should be saved\n iterations: The number of steps to train\n batch_size: The number of samples to use per step\n verbosity_step: The number of steps between each printout of the cost\n of the model (negative for no printouts)\n step_size: The distance we step down the gradient each step\n seed: the seed for choosing our batches (0 if no seed)\n Raises:\n TODO\n Returns:\n A (Weights, Bias) tuple\n \"\"\"\n # extract the kwargs\n model_path = kwargs.get(\"model_path\", \"\")\n iterations = kwargs.get(\"iterations\", 100)\n batch_size = kwargs.get(\"batch_size\", 10)\n verbosity_step = kwargs.get(\"verbosity_step\", 20)\n step_size = kwargs.get(\"step_size\", 10e-1)\n seed = kwargs.get(\"seed\", 0)\n\n if seed:\n np.random.seed(seed)\n\n num_predictors = len(x[0])\n num_classes = len(y[0])\n x = np.array(x)\n y = np.array(y)\n\n with tf.Graph().as_default() as _:\n X = tf.placeholder(tf.float32, [batch_size, num_predictors])\n Y = tf.placeholder(tf.float32, [batch_size, num_classes])\n\n Ws = tf.Variable(tf.truncated_normal([num_predictors,num_classes], stddev=0.001))\n bs = tf.Variable(tf.truncated_normal([1,num_classes], stddev=0.001))\n\n saver = tf.train.Saver([Ws, bs])\n\n # batch_size,num_predictors * num_predictors,num_classes => batch_size,num_classes\n weighted_X = tf.matmul(X, Ws) + bs\n\n # sum along the num_predictors axis => batch_size,1\n normalization = tf.reduce_sum(tf.exp(weighted_X), 1, keep_dims=True)\n\n # subtracts the batch normalization from each term in the batch\n logits = weighted_X - tf.log(normalization)\n\n # elementwise multiply. logits and Y are the same shape\n # this will only keep the logits that are true\n cost = -tf.reduce_mean(Y * logits)\n\n train_step = tf.train.GradientDescentOptimizer(step_size).minimize(cost)\n\n init = tf.initialize_all_variables()\n\n with tf.Session() as sess:\n sess.run(init)\n\n for i in xrange(iterations):\n sample_indexes = np.random.choice(len(y), batch_size)\n sample_xs = x[sample_indexes]\n sample_ys = y[sample_indexes]\n\n weights, biases, step_cost, _ = sess.run(\n [Ws, bs, cost, train_step],\n feed_dict={X:sample_xs, Y:sample_ys})\n\n if i % verbosity_step == 0:\n print(step_cost)\n\n\n if model_path:\n saver.save(sess, model_path)\n\n Parameters = namedtuple(\"Parameters\", [\"Weights\", \"Biases\"])\n return Parameters(weights, biases)\n\ndef predict(x, num_classes, model_path):\n \"\"\"Predicts targets using a batch of predictors and a model trained by\n the softmax regression train method\n Args:\n x: The covariates or factors of the model in an n by m array (n is number)\n of data points and m is number of factors\n model_path: location of the tf model file\n Raises:\n TODO\n Returns:\n a num data by 1 array of predictions\n \"\"\"\n num_predictors = len(x[0])\n num_data = len(x)\n\n x = np.array(x)\n\n with tf.Graph().as_default() as _:\n X = tf.placeholder(tf.float32, [num_data, num_predictors])\n\n Ws = tf.Variable(tf.truncated_normal([num_predictors,num_classes], stddev=0.001))\n bs = tf.Variable(tf.truncated_normal([1,num_classes], stddev=0.001))\n\n saver = tf.train.Saver([Ws, bs])\n\n # batch_size,num_predictors * num_predictors,num_classes => batch_size,num_classes\n unnormalized = tf.exp(tf.matmul(X, Ws) + bs)\n\n # sum along the num_predictors axis => batch_size,1\n normalization = tf.reduce_sum(unnormalized, 1, keep_dims=True)\n\n Predictions = unnormalized / normalization\n\n with tf.Session() as sess:\n saver.restore(sess, model_path)\n\n predictions = sess.run([Predictions], feed_dict={X:x})\n\n return predictions\n\n\nif __name__ == \"__main__\":\n X_TEST = np.array([[0],[1],[2],[1],[2]])\n # init one hots\n Y_TEST = np.zeros((5,3))\n Y_TEST[np.arange(5), [0,1,2,1,2]] = 1\n\n print(train(X_TEST, Y_TEST, iterations=2000, batch_size=5,\n model_path=\"models/softmax_regression/softmax_regression\"))\n\n print(predict(X_TEST, 3, \"models/softmax_regression/softmax_regression\"))\n" ]
[ [ "tensorflow.matmul", "tensorflow.Graph", "tensorflow.truncated_normal", "numpy.random.seed", "tensorflow.reduce_mean", "tensorflow.reduce_sum", "numpy.arange", "tensorflow.placeholder", "tensorflow.exp", "tensorflow.initialize_all_variables", "tensorflow.train.GradientDescentOptimizer", "tensorflow.log", "tensorflow.Session", "tensorflow.train.Saver", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Jh-SYSU/MolRep
[ "b2c802d18d41d7db26c19c6dd644098f945e48a1" ]
[ "MolRep/Featurization/utils/graph_utils.py" ]
[ "# -*- coding: utf-8 -*-\n'''\nCreated on 2020.05.19\n\n@author: Jiahua Rao, Weiming Li, Hui Yang, Jiancong Xie\n'''\n\n\nfrom collections import defaultdict\nimport numpy as np\nimport networkx as nx\n\nimport torch\nfrom torch_geometric import data\nfrom torch_geometric.utils import dense_to_sparse\n\nfrom typing import List, Tuple, Union\n\nfrom rdkit import Chem\n\nclass Graph(nx.Graph):\n def __init__(self, target, smiles, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.target = target\n self.smiles = smiles\n self.laplacians = None\n self.v_plus = None\n self.max_num_nodes = 200\n\n def get_edge_index(self):\n adj = torch.Tensor(nx.to_numpy_array(self))\n edge_index, _ = dense_to_sparse(adj)\n return edge_index\n\n def get_edge_attr(self):\n features = []\n for _, _, edge_attrs in self.edges(data=True):\n data = []\n\n if edge_attrs[\"label\"] is not None:\n data.extend(edge_attrs[\"label\"])\n\n if edge_attrs[\"attrs\"] is not None:\n data.extend(edge_attrs[\"attrs\"])\n\n features.append(data)\n features.append(data)\n return torch.Tensor(features)\n\n def get_x(self, use_node_attrs=False, use_node_degree=False, use_one=False):\n features = []\n for node, node_attrs in self.nodes(data=True):\n data = []\n\n if node_attrs[\"label\"] is not None: # r attention !\n data.extend(node_attrs[\"label\"])\n\n if use_node_attrs and node_attrs[\"attrs\"] is not None:\n data.extend(node_attrs[\"attrs\"])\n\n if use_node_degree:\n data.extend([self.degree(node)])\n\n if use_one:\n data.extend([1])\n\n features.append(data)\n\n return torch.Tensor(features)\n\n def get_target(self):\n return np.array(self.target)\n\n def get_smiles(self):\n return self.smiles\n\n @property\n def has_edge_attrs(self):\n _, _, edge_attrs = list(self.edges(data=True))[0]\n return edge_attrs[\"attrs\"] is not None\n\n @property\n def has_edge_labels(self):\n _, _, edge_attrs = list(self.edges(data=True))[0]\n return edge_attrs[\"label\"] is not None\n\n @property\n def has_node_attrs(self):\n _, node_attrs = list(self.node(data=True))[0]\n return node_attrs[\"attrs\"] is not None\n\n @property\n def has_node_labels(self):\n _, node_attrs = list(self.node(data=True))[0]\n return node_attrs[\"label\"] is not None\n\n\ndef one_hot(value, num_classes):\n vec = np.zeros(num_classes)\n vec[value - 1] = 1\n return vec\n\n\ndef parse_tu_data(model_name, temp_dir):\n # setup paths\n indicator_path = temp_dir / f'{model_name}_graph_indicator.txt'\n edges_path = temp_dir / f'{model_name}_A.txt'\n smiles_path = temp_dir / f'{model_name}_SMILES.txt'\n graph_labels_path = temp_dir / f'{model_name}_graph_labels.txt'\n node_labels_path = temp_dir / f'{model_name}_node_labels.txt'\n edge_labels_path = temp_dir / f'{model_name}_edge_labels.txt'\n node_attrs_path = temp_dir / f'{model_name}_node_attributes.txt'\n edge_attrs_path = temp_dir / f'{model_name}_edge_attributes.txt'\n\n unique_node_labels = set()\n unique_edge_labels = set()\n\n indicator, edge_indicator = [-1], [(-1, -1)]\n graph_nodes = defaultdict(list)\n graph_edges = defaultdict(list)\n node_labels = defaultdict(list)\n edge_labels = defaultdict(list)\n node_attrs = defaultdict(list)\n edge_attrs = defaultdict(list)\n\n with open(indicator_path, \"r\") as f:\n for i, line in enumerate(f.readlines(), 1):\n line = line.rstrip(\"\\n\")\n graph_id = int(line)\n indicator.append(graph_id)\n graph_nodes[graph_id].append(i)\n \n with open(edges_path, \"r\") as f:\n for i, line in enumerate(f.readlines(), 1):\n line = line.rstrip(\"\\n\")\n edge = [int(e) for e in line.split(',')]\n edge_indicator.append(edge)\n\n # edge[0] is a node id, and it is used to retrieve\n # the corresponding graph id to which it belongs to\n # (see README.txt)\n graph_id = indicator[edge[0]]\n\n graph_edges[graph_id].append(edge)\n\n if node_labels_path.exists():\n with open(node_labels_path, \"r\") as f:\n for i, line in enumerate(f.readlines(), 1):\n line = line.rstrip(\"\\n\")\n node_label = int(line)\n unique_node_labels.add(node_label)\n graph_id = indicator[i]\n node_labels[graph_id].append(node_label)\n\n if edge_labels_path.exists():\n with open(edge_labels_path, \"r\") as f:\n for i, line in enumerate(f.readlines(), 1):\n line = line.rstrip(\"\\n\")\n edge_label = int(line)\n unique_edge_labels.add(edge_label)\n graph_id = indicator[edge_indicator[i][0]]\n edge_labels[graph_id].append(edge_label)\n\n if node_attrs_path.exists():\n with open(node_attrs_path, \"r\") as f:\n for i, line in enumerate(f.readlines(), 1):\n line = line.rstrip(\"\\n\")\n nums = line.split(\",\")\n node_attr = np.array([float(n) for n in nums])\n graph_id = indicator[i]\n node_attrs[graph_id].append(node_attr)\n\n if edge_attrs_path.exists():\n with open(edge_attrs_path, \"r\") as f:\n for i, line in enumerate(f.readlines(), 1):\n line = line.rstrip(\"\\n\")\n nums = line.split(\",\")\n edge_attr = np.array([float(n) for n in nums])\n graph_id = indicator[edge_indicator[i][0]]\n edge_attrs[graph_id].append(edge_attr)\n\n # get graph labels\n graph_labels = []\n with open(graph_labels_path, \"r\") as f:\n for i, line in enumerate(f.readlines(), 1):\n line = line.rstrip(\"\\n\")\n nums = line.split(\",\")\n targets = np.array([np.nan if n == 'None' else float(n) for n in nums])\n graph_labels.append(targets)\n\n # Shift by one to the left. Apparently this is necessary for multiclass tasks.\n # if min(graph_labels) == 1:\n # graph_labels = [l - 1 for l in graph_labels]\n\n # get SMILES\n smiles_all = []\n with open(smiles_path, \"r\") as f:\n for i, line in enumerate(f.readlines(), 1):\n line = line.rstrip(\"\\n\")\n smiles_all.append(line)\n\n num_node_labels = max(\n unique_node_labels) if unique_node_labels != set() else 0\n # some datasets e.g. PROTEINS have labels with value 0\n if num_node_labels != 0 and min(unique_node_labels) == 0:\n num_node_labels += 1\n\n num_edge_labels = max(\n unique_edge_labels) if unique_edge_labels != set() else 0\n if num_edge_labels != 0 and min(unique_edge_labels) == 0:\n num_edge_labels += 1\n\n return {\n \"graph_nodes\": graph_nodes,\n \"graph_edges\": graph_edges,\n \"graph_labels\": graph_labels,\n \"node_labels\": node_labels,\n \"node_attrs\": node_attrs,\n \"edge_labels\": edge_labels,\n \"edge_attrs\": edge_attrs,\n \"smiles\": smiles_all\n }, num_node_labels, num_edge_labels\n\n\ndef create_graph_from_tu_data(graph_data, target, num_node_labels, num_edge_labels, smiles=None):\n nodes = graph_data[\"graph_nodes\"]\n edges = graph_data[\"graph_edges\"] # y list\n\n G = Graph(target=target, smiles=smiles)\n\n for i, node in enumerate(nodes):\n label, attrs = None, None\n\n if graph_data[\"node_labels\"] != []:\n label = one_hot(graph_data[\"node_labels\"][i], num_node_labels)\n\n if graph_data[\"node_attrs\"] != []:\n attrs = graph_data[\"node_attrs\"][i]\n\n G.add_node(node, label=label, attrs=attrs)\n\n for i, edge in enumerate(edges): # y 遍历某个图的所有边\n n1, n2 = edge\n label, attrs = None, None\n\n if graph_data[\"edge_labels\"] != []:\n label = one_hot(graph_data[\"edge_labels\"][i], num_edge_labels)\n if graph_data[\"edge_attrs\"] != []:\n attrs = graph_data[\"edge_attrs\"][i]\n\n G.add_edge(n1, n2, label=label, attrs=attrs)\n\n return G\n\n\nclass Data(data.Data):\n def __init__(self,\n x=None,\n edge_index=None,\n edge_attr=None,\n y=None,\n v_outs=None,\n e_outs=None,\n g_outs=None,\n o_outs=None,\n laplacians=None,\n v_plus=None,\n smiles=None,\n max_num_nodes=200,\n **kwargs):\n\n additional_fields = {\n 'v_outs': v_outs,\n 'e_outs': e_outs,\n 'g_outs': g_outs,\n 'o_outs': o_outs,\n 'laplacians': laplacians,\n 'v_plus': v_plus,\n 'max_num_nodes': max_num_nodes,\n 'smiles': smiles\n }\n super().__init__(x, edge_index, edge_attr, y, **additional_fields)\n\n def set_targets(self, target):\n self.y = target\n\n\n# Atom feature sizes\nMAX_ATOMIC_NUM = 100\nATOM_FEATURES = {\n 'atomic_num': list(range(MAX_ATOMIC_NUM)),\n 'degree': [0, 1, 2, 3, 4, 5],\n 'formal_charge': [-1, -2, 1, 2, 0],\n 'chiral_tag': [0, 1, 2, 3],\n 'num_Hs': [0, 1, 2, 3, 4],\n 'hybridization': [\n Chem.rdchem.HybridizationType.SP,\n Chem.rdchem.HybridizationType.SP2,\n Chem.rdchem.HybridizationType.SP3,\n Chem.rdchem.HybridizationType.SP3D,\n Chem.rdchem.HybridizationType.SP3D2\n ],\n}\n\n# Distance feature sizes\nPATH_DISTANCE_BINS = list(range(10))\nTHREE_D_DISTANCE_MAX = 20\nTHREE_D_DISTANCE_STEP = 1\nTHREE_D_DISTANCE_BINS = list(range(0, THREE_D_DISTANCE_MAX + 1, THREE_D_DISTANCE_STEP))\n\n# len(choices) + 1 to include room for uncommon values; + 2 at end for IsAromatic and mass\nATOM_FDIM = sum(len(choices) + 1 for choices in ATOM_FEATURES.values()) + 2\nBOND_FDIM = 14\n\n\ndef onek_encoding_unk(value: int, choices: List[int]) -> List[int]:\n \"\"\"\n Creates a one-hot encoding.\n Args:\n value: The value for which the encoding should be one.\n choices: A list of possible values.\n \n return: \n A one-hot encoding of the value in a list of length len(choices) + 1.\n If value is not in the list of choices, then the final element in the encoding is 1.\n \"\"\"\n encoding = [0] * (len(choices) + 1)\n index = choices.index(value) if value in choices else -1\n encoding[index] = 1\n\n return encoding\n\n\ndef atom_features(atom: Chem.rdchem.Atom, functional_groups: List[int] = None) -> List[Union[bool, int, float]]:\n \"\"\"\n Builds a feature vector for an atom.\n Args:\n atom: An RDKit atom.\n functional_groups: A k-hot vector indicating the functional groups the atom belongs to.\n \n return: \n A list containing the atom features.\n \"\"\"\n features = onek_encoding_unk(atom.GetAtomicNum() - 1, ATOM_FEATURES['atomic_num']) + \\\n onek_encoding_unk(atom.GetTotalDegree(), ATOM_FEATURES['degree']) + \\\n onek_encoding_unk(atom.GetFormalCharge(), ATOM_FEATURES['formal_charge']) + \\\n onek_encoding_unk(int(atom.GetChiralTag()), ATOM_FEATURES['chiral_tag']) + \\\n onek_encoding_unk(int(atom.GetTotalNumHs()), ATOM_FEATURES['num_Hs']) + \\\n onek_encoding_unk(int(atom.GetHybridization()), ATOM_FEATURES['hybridization']) + \\\n [1 if atom.GetIsAromatic() else 0] + \\\n [atom.GetMass() * 0.01] # scaled to about the same range as other features\n if functional_groups is not None:\n features += functional_groups\n return features\n\n\ndef bond_features(bond: Chem.rdchem.Bond) -> List[Union[bool, int, float]]:\n \"\"\"\n Builds a feature vector for a bond.\n Args:\n bond: A RDKit bond.\n \n return: \n A list containing the bond features.\n \"\"\"\n if bond is None:\n fbond = [1] + [0] * (BOND_FDIM - 1)\n else:\n bt = bond.GetBondType()\n fbond = [\n 0, # bond is not None\n bt == Chem.rdchem.BondType.SINGLE,\n bt == Chem.rdchem.BondType.DOUBLE,\n bt == Chem.rdchem.BondType.TRIPLE,\n bt == Chem.rdchem.BondType.AROMATIC,\n (bond.GetIsConjugated() if bt is not None else 0),\n (bond.IsInRing() if bt is not None else 0)\n ]\n fbond += onek_encoding_unk(int(bond.GetStereo()), list(range(6)))\n return fbond" ]
[ [ "numpy.array", "numpy.zeros", "torch.Tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ploriaux/Boruta-Shap
[ "3d6be5ae18aa5dbed836dc78d8558dc834d8cc5f" ]
[ "src/BorutaShap.py" ]
[ "from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, IsolationForest\r\nfrom sklearn.datasets import load_breast_cancer, load_boston\r\nfrom statsmodels.stats.multitest import multipletests\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.cluster import KMeans\r\nfrom scipy.sparse import issparse\r\nfrom scipy.stats import binom_test, ks_2samp\r\nimport matplotlib.pyplot as plt\r\nfrom tqdm import tqdm\r\nimport random\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom numpy.random import choice\r\nimport seaborn as sns\r\nimport shap\r\nimport os\r\nimport re\r\n\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n\r\nclass BorutaShap:\r\n\r\n \"\"\"\r\n BorutaShap is a wrapper feature selection method built on the foundations of both the SHAP and Boruta algorithms.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, model=None, importance_measure='Shap',\r\n classification=True, percentile=100, pvalue=0.05):\r\n\r\n \"\"\"\r\n Parameters\r\n ----------\r\n model: Model Object\r\n If no model specified then a base Random Forest will be returned otherwise the specifed model will\r\n be returned.\r\n\r\n importance_measure: String\r\n Which importance measure too use either Shap or Gini/Gain\r\n\r\n classification: Boolean\r\n if true then the problem is either a binary or multiclass problem otherwise if false then it is regression\r\n\r\n percentile: Int\r\n An integer ranging from 0-100 it changes the value of the max shadow importance values. Thus, lowering its value\r\n would make the algorithm more lenient.\r\n\r\n p_value: float\r\n A float used as a significance level again if the p-value is increased the algorithm will be more lenient making it smaller\r\n would make it more strict also by making the model more strict could impact runtime making it slower. As it will be less likley\r\n to reject and accept features.\r\n\r\n \"\"\"\r\n\r\n self.importance_measure = importance_measure.lower()\r\n self.percentile = percentile\r\n self.pvalue = pvalue\r\n self.classification = classification\r\n self.model = model\r\n self.check_model()\r\n\r\n\r\n def check_model(self):\r\n\r\n \"\"\"\r\n Checks that a model object has been passed as a parameter when intiializing the BorutaShap class.\r\n\r\n Returns\r\n -------\r\n Model Object\r\n If no model specified then a base Random Forest will be returned otherwise the specifed model will\r\n be returned.\r\n\r\n Raises\r\n ------\r\n AttirbuteError\r\n If the model object does not have the required attributes.\r\n\r\n \"\"\"\r\n\r\n check_fit = hasattr(self.model, 'fit')\r\n check_predict_proba = hasattr(self.model, 'predict')\r\n\r\n try:\r\n check_feature_importance = hasattr(self.model, 'feature_importances_')\r\n\r\n except:\r\n check_feature_importance = True\r\n\r\n\r\n if self.model is None:\r\n\r\n if self.classification:\r\n self.model = RandomForestClassifier()\r\n else:\r\n self.model = RandomForestRegressor()\r\n\r\n elif check_fit is False and check_predict_proba is False:\r\n raise AttributeError('Model must contain both the fit() and predict() methods')\r\n\r\n elif check_feature_importance is False and self.importance_measure == 'gini':\r\n raise AttributeError('Model must contain the feature_importances_ method to use Gini try Shap instead')\r\n\r\n else:\r\n pass\r\n\r\n\r\n def check_X(self):\r\n\r\n \"\"\"\r\n Checks that the data passed to the BorutaShap instance is a pandas Dataframe\r\n\r\n Returns\r\n -------\r\n Datframe\r\n\r\n Raises\r\n ------\r\n AttirbuteError\r\n If the data is not of the expected type.\r\n\r\n \"\"\"\r\n\r\n if isinstance(self.X, pd.DataFrame) is False:\r\n raise AttributeError('X must be a pandas Dataframe')\r\n\r\n else:\r\n pass\r\n\r\n\r\n def missing_values_y(self):\r\n\r\n \"\"\"\r\n Checks for missing values in target variable.\r\n\r\n Returns\r\n -------\r\n Boolean\r\n\r\n Raises\r\n ------\r\n AttirbuteError\r\n If data is not in the expected format.\r\n\r\n \"\"\"\r\n\r\n if isinstance(self.y, pd.Series):\r\n return self.y.isnull().any().any()\r\n\r\n elif isinstance(self.y, np.ndarray):\r\n return np.isnan(self.y).any()\r\n\r\n else:\r\n raise AttributeError('Y must be a pandas Dataframe or a numpy array')\r\n\r\n\r\n def check_missing_values(self):\r\n\r\n \"\"\"\r\n Checks for missing values in the data.\r\n\r\n Returns\r\n -------\r\n Boolean\r\n\r\n Raises\r\n ------\r\n AttirbuteError\r\n If there are missing values present.\r\n\r\n \"\"\"\r\n\r\n X_missing = self.X.isnull().any().any()\r\n Y_missing = self.missing_values_y()\r\n\r\n models_to_check = ('xgb', 'catboost', 'lgbm', 'lightgbm')\r\n\r\n model_name = str(type(self.model)).lower()\r\n if X_missing or Y_missing:\r\n\r\n if model_name.startswith(models_to_check):\r\n print('Warning there are missing values in your data !')\r\n\r\n else:\r\n raise ValueError('There are missing values in your Data')\r\n\r\n else:\r\n pass\r\n\r\n\r\n\r\n def Check_if_chose_train_or_test_and_train_model(self):\r\n\r\n \"\"\"\r\n Decides to fit the model to either the training data or the test/unseen data a great discussion on the\r\n differences can be found here.\r\n\r\n https://compstat-lmu.github.io/iml_methods_limitations/pfi-data.html#introduction-to-test-vs.training-data\r\n\r\n \"\"\"\r\n\r\n if self.train_or_test.lower() == 'test':\r\n # keeping the same naming convenetion as to not add complexit later on\r\n self.X_boruta_train, self.X_boruta_test, self.y_train, self.y_test = train_test_split(self.X_boruta,\r\n self.y,\r\n test_size=0.3,\r\n random_state=self.random_state)\r\n self.Train_model(self.X_boruta_train, self.y_train)\r\n\r\n elif self.train_or_test.lower() == 'train':\r\n # model will be trained and evaluated on the same data\r\n self.Train_model(self.X_boruta, self.y)\r\n\r\n else:\r\n raise ValueError('The train_or_test parameter can only be \"train\" or \"test\"')\r\n\r\n\r\n\r\n def Train_model(self, X, y):\r\n\r\n \"\"\"\r\n Trains Model also checks to see if the model is an instance of catboost as it needs extra parameters\r\n also the try except is for models with a verbose statement\r\n\r\n Parameters\r\n ----------\r\n X: Dataframe\r\n A pandas dataframe of the features.\r\n\r\n y: Series/ndarray\r\n A pandas series or numpy ndarray of the target\r\n\r\n Returns\r\n ----------\r\n fitted model object\r\n\r\n \"\"\"\r\n\r\n if 'catboost' in str(type(self.model)).lower():\r\n self.model.fit(X, y, cat_features = self.X_categorical, verbose=False)\r\n\r\n else:\r\n\r\n try:\r\n self.model.fit(X, y, verbose=False)\r\n\r\n except:\r\n self.model.fit(X, y)\r\n\r\n\r\n\r\n\r\n def fit(self, X, y, n_trials = 20, random_state=0, sample=False,\r\n train_or_test = 'test', normalize=True, verbose=True):\r\n\r\n \"\"\"\r\n The main body of the program this method it computes the following\r\n\r\n 1. Extend the information system by adding copies of all variables (the information system\r\n is always extended by at least 5 shadow attributes, even if the number of attributes in\r\n the original set is lower than 5).\r\n\r\n 2. Shuffle the added attributes to remove their correlations with the response.\r\n\r\n 3. Run a random forest classifier on the extended information system and gather the\r\n Z scores computed.\r\n\r\n 4. Find the maximum Z score among shadow attributes (MZSA), and then assign a hit to\r\n every attribute that scored better than MZSA.\r\n\r\n 5. For each attribute with undetermined importance perform a two-sided test of equality\r\n with the MZSA.\r\n\r\n 6. Deem the attributes which have importance significantly lower than MZSA as ‘unimportant’\r\n and permanently remove them from the information system.\r\n\r\n 7. Deem the attributes which have importance significantly higher than MZSA as ‘important’.\r\n\r\n 8. Remove all shadow attributes.\r\n\r\n 9. Repeat the procedure until the importance is assigned for all the attributes, or the\r\n algorithm has reached the previously set limit of the random forest runs.\r\n\r\n 10. Stores results.\r\n\r\n Parameters\r\n ----------\r\n X: Dataframe\r\n A pandas dataframe of the features.\r\n\r\n y: Series/ndarray\r\n A pandas series or numpy ndarray of the target\r\n\r\n random_state: int\r\n A random state for reproducibility of results\r\n\r\n Sample: Boolean\r\n if true then a rowise sample of the data will be used to calculate the feature importance values\r\n\r\n sample_fraction: float\r\n The sample fraction of the original data used in calculating the feature importance values only\r\n used if Sample==True.\r\n\r\n train_or_test: string\r\n Decides whether the feature importance should be calculated on out of sample data see the dicussion here.\r\n https://compstat-lmu.github.io/iml_methods_limitations/pfi-data.html#introduction-to-test-vs.training-data\r\n\r\n normalize: boolean\r\n if true the importance values will be normalized using the z-score formula\r\n\r\n verbose: Boolean\r\n a flag indicator to print out all the rejected or accepted features.\r\n\r\n \"\"\"\r\n\r\n np.random.seed(random_state)\r\n self.starting_X = X.copy()\r\n self.X = X.copy()\r\n self.y = y.copy()\r\n self.n_trials = n_trials\r\n self.random_state = random_state\r\n self.ncols = self.X.shape[1]\r\n self.all_columns = self.X.columns.to_numpy()\r\n self.rejected_columns = []\r\n self.accepted_columns = []\r\n\r\n self.check_X()\r\n self.check_missing_values()\r\n self.sample = sample\r\n self.train_or_test = train_or_test\r\n\r\n self.features_to_remove = []\r\n self.hits = np.zeros(self.ncols)\r\n self.order = self.create_mapping_between_cols_and_indices()\r\n self.create_importance_history()\r\n\r\n if self.sample: self.preds = self.isolation_forest(self.X)\r\n\r\n for trial in tqdm(range(self.n_trials)):\r\n\r\n self.remove_features_if_rejected()\r\n self.columns = self.X.columns.to_numpy()\r\n self.create_shadow_features()\r\n\r\n # early stopping\r\n if self.X.shape[1] == 0:\r\n break\r\n\r\n else:\r\n\r\n self.Check_if_chose_train_or_test_and_train_model()\r\n\r\n self.X_feature_import, self.Shadow_feature_import = self.feature_importance(normalize=normalize)\r\n self.update_importance_history()\r\n self.hits += self.calculate_hits()\r\n self.test_features(iteration=trial+1)\r\n\r\n self.store_feature_importance()\r\n self.calculate_rejected_accepted_tentative(verbose=verbose)\r\n\r\n\r\n def calculate_rejected_accepted_tentative(self, verbose):\r\n\r\n \"\"\"\r\n Figures out which features have been either accepted rejeected or tentative\r\n\r\n Returns\r\n -------\r\n 3 lists\r\n\r\n \"\"\"\r\n\r\n self.rejected = list(set(self.flatten_list(self.rejected_columns))-set(self.flatten_list(self.accepted_columns)))\r\n self.accepted = list(set(self.flatten_list(self.accepted_columns)))\r\n self.tentative = list(set(self.all_columns) - set(self.rejected + self.accepted))\r\n\r\n if verbose:\r\n print(str(len(self.accepted)) + ' attributes confirmed important: ' + str(self.accepted))\r\n print(str(len(self.rejected)) + ' attributes confirmed unimportant: ' + str(self.rejected))\r\n print(str(len(self.tentative)) + ' tentative attributes remains: ' + str(self.tentative))\r\n\r\n\r\n\r\n def create_importance_history(self):\r\n\r\n \"\"\"\r\n Creates a dataframe object to store historical feature importance scores.\r\n\r\n Returns\r\n -------\r\n Datframe\r\n\r\n \"\"\"\r\n\r\n self.history_shadow = np.zeros(self.ncols)\r\n self.history_x = np.zeros(self.ncols)\r\n\r\n\r\n def update_importance_history(self):\r\n\r\n \"\"\"\r\n At each iteration update the datframe object that stores the historical feature importance scores.\r\n\r\n Returns\r\n -------\r\n Datframe\r\n\r\n \"\"\"\r\n\r\n padded_history_shadow = np.full((self.ncols), np.NaN)\r\n padded_history_x = np.full((self.ncols), np.NaN)\r\n\r\n for (index, col) in enumerate(self.columns):\r\n map_index = self.order[col]\r\n padded_history_shadow[map_index] = self.Shadow_feature_import[index]\r\n padded_history_x[map_index] = self.X_feature_import[index]\r\n\r\n self.history_shadow = np.vstack((self.history_shadow, padded_history_shadow))\r\n self.history_x = np.vstack((self.history_x, padded_history_x))\r\n\r\n\r\n\r\n def store_feature_importance(self):\r\n\r\n \"\"\"\r\n Reshapes the columns in the historical feature importance scores object also adds the mean, median, max, min\r\n shadow feature scores.\r\n\r\n Returns\r\n -------\r\n Datframe\r\n\r\n \"\"\"\r\n\r\n self.history_x = pd.DataFrame(data=self.history_x,\r\n columns=self.all_columns)\r\n\r\n\r\n self.history_x['Max_Shadow'] = [max(i) for i in self.history_shadow]\r\n self.history_x['Min_Shadow'] = [min(i) for i in self.history_shadow]\r\n self.history_x['Mean_Shadow'] = [np.nanmean(i) for i in self.history_shadow]\r\n self.history_x['Median_Shadow'] = [np.nanmedian(i) for i in self.history_shadow]\r\n self.history_x.dropna(axis=0,inplace=True)\r\n\r\n\r\n def results_to_csv(self, filename='feature_importance'):\r\n\r\n \"\"\"\r\n Saves the historical feature importance scores to csv.\r\n\r\n Parameters\r\n ----------\r\n filname : string\r\n used as the name for the outputed file.\r\n\r\n Returns\r\n -------\r\n comma delimnated file\r\n\r\n \"\"\"\r\n\r\n features = pd.DataFrame(data={'Features':self.history_x.iloc[1:].columns.values,\r\n 'Average Feature Importance':self.history_x.iloc[1:].mean(axis=0).values,\r\n 'Standard Deviation Importance':self.history_x.iloc[1:].std(axis=0).values})\r\n\r\n decision_mapper = self.create_mapping_of_features_to_attribute(maps=['Tentative','Rejected','Accepted', 'Shadow'])\r\n features['Decision'] = features['Features'].map(decision_mapper)\r\n features = features.sort_values(by='Average Feature Importance',ascending=False)\r\n\r\n features.to_csv(filename + '.csv', index=False)\r\n\r\n\r\n def remove_features_if_rejected(self):\r\n\r\n \"\"\"\r\n At each iteration if a feature has been rejected by the algorithm remove it from the process\r\n\r\n \"\"\"\r\n\r\n if len(self.features_to_remove) != 0:\r\n for feature in self.features_to_remove:\r\n try:\r\n self.X.drop(feature, axis = 1, inplace=True)\r\n except:\r\n pass\r\n\r\n else:\r\n pass\r\n\r\n\r\n @staticmethod\r\n def average_of_list(lst):\r\n return sum(lst) / len(lst)\r\n\r\n @staticmethod\r\n def flatten_list(array):\r\n return [item for sublist in array for item in sublist]\r\n\r\n\r\n def create_mapping_between_cols_and_indices(self):\r\n return dict(zip(self.X.columns.to_list(), np.arange(self.X.shape[1])))\r\n\r\n\r\n def calculate_hits(self):\r\n\r\n \"\"\"\r\n If a features importance is greater than the maximum importance value of all the random shadow\r\n features then we assign it a hit.\r\n\r\n Parameters\r\n ----------\r\n Percentile : value ranging from 0-1\r\n can be used to reduce value of the maximum value of the shadow features making the algorithm\r\n more lenient.\r\n\r\n \"\"\"\r\n\r\n shadow_threshold = np.percentile(self.Shadow_feature_import,\r\n self.percentile)\r\n\r\n padded_hits = np.zeros(self.ncols)\r\n hits = self.X_feature_import > shadow_threshold\r\n\r\n for (index, col) in enumerate(self.columns):\r\n map_index = self.order[col]\r\n padded_hits[map_index] += hits[index]\r\n\r\n return padded_hits\r\n\r\n\r\n def create_shadow_features(self):\r\n \"\"\"\r\n Creates the random shadow features by shuffling the existing columns.\r\n\r\n Returns:\r\n Datframe with random permutations of the original columns.\r\n \"\"\"\r\n self.X_shadow = self.X.apply(np.random.permutation)\r\n \r\n if isinstance(self.X_shadow, pd.DataFrame):\r\n # append\r\n obj_col = self.X_shadow.select_dtypes(\"object\").columns.tolist()\r\n if obj_col ==[] :\r\n pass\r\n else :\r\n self.X_shadow[obj_col] =self.X_shadow[obj_col].astype(\"category\")\r\n\r\n self.X_shadow.columns = ['shadow_' + feature for feature in self.X.columns]\r\n self.X_boruta = pd.concat([self.X, self.X_shadow], axis = 1)\r\n\r\n col_types = self.X_boruta.dtypes\r\n self.X_categorical = list(col_types[(col_types=='category' ) | (col_types=='object')].index)\r\n\r\n\r\n @staticmethod\r\n def calculate_Zscore(array):\r\n \"\"\"\r\n Calculates the Z-score of an array\r\n\r\n Parameters\r\n ----------\r\n array: array_like\r\n\r\n Returns:\r\n normalised array\r\n \"\"\"\r\n mean_value = np.mean(array)\r\n std_value = np.std(array)\r\n return [(element-mean_value)/std_value for element in array]\r\n\r\n\r\n def feature_importance(self, normalize):\r\n\r\n \"\"\"\r\n Caculates the feature importances scores of the model\r\n\r\n Parameters\r\n ----------\r\n importance_measure: string\r\n allows the user to choose either the Shap or Gini importance metrics\r\n\r\n normalize: boolean\r\n if true the importance values will be normalized using the z-score formula\r\n\r\n Returns:\r\n array of normalized feature importance scores for both the shadow and original features.\r\n\r\n Raise\r\n ----------\r\n ValueError:\r\n If no Importance measure was specified\r\n \"\"\"\r\n\r\n if self.importance_measure == 'shap':\r\n\r\n self.explain()\r\n vals = self.shap_values\r\n\r\n if normalize:\r\n vals = self.calculate_Zscore(vals)\r\n\r\n X_feature_import = vals[:len(self.X.columns)]\r\n Shadow_feature_import = vals[len(self.X_shadow.columns):]\r\n\r\n\r\n elif self.importance_measure == 'gini':\r\n\r\n feature_importances_ = np.abs(self.model.feature_importances_)\r\n\r\n if normalize:\r\n feature_importances_ = self.calculate_Zscore(feature_importances_)\r\n\r\n X_feature_import = feature_importances_[:len(self.X.columns)]\r\n Shadow_feature_import = feature_importances_[len(self.X.columns):]\r\n\r\n else:\r\n\r\n raise ValueError('No Importance_measure was specified select one of (shap, gini)')\r\n\r\n\r\n return X_feature_import, Shadow_feature_import\r\n\r\n\r\n @staticmethod\r\n def isolation_forest(X):\r\n '''\r\n fits isloation forest to the dataset and gives an anomally score to every sample\r\n '''\r\n clf = IsolationForest().fit(X)\r\n preds = clf.score_samples(X)\r\n return preds\r\n\r\n\r\n @staticmethod\r\n def get_5_percent(num):\r\n return round(5 / 100 * num)\r\n\r\n\r\n def get_5_percent_splits(self, length):\r\n '''\r\n splits dataframe into 5% intervals\r\n '''\r\n five_percent = self.get_5_percent(length)\r\n return np.arange(five_percent,length,five_percent)\r\n\r\n\r\n\r\n def find_sample(self):\r\n '''\r\n Finds a sample by comparing the distributions of the anomally scores between the sample and the original\r\n distribution using the KS-test. Starts of a 5% howver will increase to 10% and then 15% etc. if a significant sample can not be found\r\n '''\r\n loop = True\r\n iteration = 0\r\n size = self.get_5_percent_splits(self.X.shape[0])\r\n element = 1\r\n while loop:\r\n\r\n sample_indices = choice(np.arange(self.preds.size), size=size[element], replace=False)\r\n sample = np.take(self.preds, sample_indices)\r\n if ks_2samp(self.preds, sample).pvalue > 0.95:\r\n break\r\n\r\n if iteration == 20:\r\n element += 1\r\n iteration = 0\r\n\r\n\r\n return self.X_boruta.iloc[sample_indices]\r\n\r\n\r\n\r\n def explain(self):\r\n\r\n \"\"\"\r\n The shap package has numerous variants of explainers which use different assumptions depending on the model\r\n type this function allows the user to choose explainer\r\n\r\n Returns:\r\n shap values\r\n\r\n Raise\r\n ----------\r\n ValueError:\r\n if no model type has been specified tree as default\r\n \"\"\"\r\n\r\n\r\n explainer = shap.TreeExplainer(self.model, feature_perturbation = \"tree_path_dependent\")\r\n\r\n\r\n if self.sample:\r\n\r\n\r\n if self.classification:\r\n # for some reason shap returns values wraped in a list of length 1\r\n\r\n self.shap_values = np.array(explainer.shap_values(self.find_sample()))\r\n if isinstance(self.shap_values, list):\r\n\r\n class_inds = range(len(self.shap_values))\r\n shap_imp = np.zeros(self.shap_values[0].shape[1])\r\n for i, ind in enumerate(class_inds):\r\n shap_imp += np.abs(self.shap_values[ind]).mean(0)\r\n self.shap_values /= len(self.shap_values)\r\n\r\n elif len(self.shap_values.shape) == 3:\r\n self.shap_values = np.abs(self.shap_values).sum(axis=0)\r\n self.shap_values = self.shap_values.mean(0)\r\n\r\n else:\r\n self.shap_values = np.abs(self.shap_values).mean(0)\r\n\r\n else:\r\n self.shap_values = explainer.shap_values(self.find_sample())\r\n self.shap_values = np.abs(self.shap_values).mean(0)\r\n\r\n else:\r\n\r\n if self.classification:\r\n # for some reason shap returns values wraped in a list of length 1\r\n self.shap_values = np.array(explainer.shap_values(self.X_boruta))\r\n if isinstance(self.shap_values, list):\r\n\r\n class_inds = range(len(self.shap_values))\r\n shap_imp = np.zeros(self.shap_values[0].shape[1])\r\n for i, ind in enumerate(class_inds):\r\n shap_imp += np.abs(self.shap_values[ind]).mean(0)\r\n self.shap_values /= len(self.shap_values)\r\n\r\n elif len(self.shap_values.shape) == 3:\r\n self.shap_values = np.abs(self.shap_values).sum(axis=0)\r\n self.shap_values = self.shap_values.mean(0)\r\n\r\n else:\r\n self.shap_values = np.abs(self.shap_values).mean(0)\r\n\r\n else:\r\n self.shap_values = explainer.shap_values(self.X_boruta)\r\n self.shap_values = np.abs(self.shap_values).mean(0)\r\n\r\n\r\n\r\n @staticmethod\r\n def binomial_H0_test(array, n, p, alternative):\r\n \"\"\"\r\n Perform a test that the probability of success is p.\r\n This is an exact, two-sided test of the null hypothesis\r\n that the probability of success in a Bernoulli experiment is p\r\n \"\"\"\r\n return [binom_test(x, n=n, p=p, alternative=alternative) for x in array]\r\n\r\n\r\n @staticmethod\r\n def symetric_difference_between_two_arrays(array_one, array_two):\r\n set_one = set(array_one)\r\n set_two = set(array_two)\r\n return np.array(list(set_one.symmetric_difference(set_two)))\r\n\r\n\r\n @staticmethod\r\n def find_index_of_true_in_array(array):\r\n length = len(array)\r\n return list(filter(lambda x: array[x], range(length)))\r\n\r\n\r\n @staticmethod\r\n def bonferoni_corrections(pvals, alpha=0.05, n_tests=None):\r\n \"\"\"\r\n used to counteract the problem of multiple comparisons.\r\n \"\"\"\r\n pvals = np.array(pvals)\r\n\r\n if n_tests is None:\r\n n_tests = len(pvals)\r\n else:\r\n pass\r\n\r\n alphacBon = alpha / float(n_tests)\r\n reject = pvals <= alphacBon\r\n pvals_corrected = pvals * float(n_tests)\r\n return reject, pvals_corrected\r\n\r\n\r\n def test_features(self, iteration):\r\n\r\n \"\"\"\r\n For each feature with an undetermined importance perform a two-sided test of equality\r\n with the maximum shadow value to determine if it is statistcally better\r\n\r\n Parameters\r\n ----------\r\n hits: an array which holds the history of the number times\r\n this feature was better than the maximum shadow\r\n\r\n Returns:\r\n Two arrays of the names of the accepted and rejected columns at that instance\r\n \"\"\"\r\n\r\n acceptance_p_values = self.binomial_H0_test(self.hits,\r\n n=iteration,\r\n p=0.5,\r\n alternative='greater')\r\n\r\n regect_p_values = self.binomial_H0_test(self.hits,\r\n n=iteration,\r\n p=0.5,\r\n alternative='less')\r\n\r\n # [1] as function returns a tuple\r\n modified_acceptance_p_values = self.bonferoni_corrections(acceptance_p_values,\r\n alpha=0.05,\r\n n_tests=len(self.columns))[1]\r\n\r\n modified_regect_p_values = self.bonferoni_corrections(regect_p_values,\r\n alpha=0.05,\r\n n_tests=len(self.columns))[1]\r\n\r\n # Take the inverse as we want true to keep featrues\r\n rejected_columns = np.array(modified_regect_p_values) < self.pvalue\r\n accepted_columns = np.array(modified_acceptance_p_values) < self.pvalue\r\n\r\n rejected_indices = self.find_index_of_true_in_array(rejected_columns)\r\n accepted_indices = self.find_index_of_true_in_array(accepted_columns)\r\n\r\n rejected_features = self.all_columns[rejected_indices]\r\n accepted_features = self.all_columns[accepted_indices]\r\n\r\n\r\n self.features_to_remove = rejected_features\r\n\r\n\r\n self.rejected_columns.append(rejected_features)\r\n self.accepted_columns.append(accepted_features)\r\n\r\n\r\n def TentativeRoughFix(self):\r\n\r\n \"\"\"\r\n Sometimes no matter how many iterations are run a feature may neither be rejected or\r\n accepted. This method is used in this case to make a decision on a tentative feature\r\n by comparing its median importance value with the median max shadow value.\r\n\r\n Parameters\r\n ----------\r\n tentative: an array which holds the names of the tentative attiributes.\r\n\r\n Returns:\r\n Two arrays of the names of the final decision of the accepted and rejected columns.\r\n\r\n \"\"\"\r\n\r\n median_tentaive_values = self.history_x[self.tentative].median(axis=0).values\r\n median_max_shadow = self.history_x['Max_Shadow'].median(axis=0)\r\n\r\n\r\n filtered = median_tentaive_values > median_max_shadow\r\n\r\n self.tentative = np.array(self.tentative)\r\n newly_accepted = self.tentative[filtered]\r\n\r\n if len(newly_accepted) < 1:\r\n newly_rejected = self.tentative\r\n\r\n else:\r\n newly_rejected = self.symetric_difference_between_two_arrays(newly_accepted, self.tentative)\r\n\r\n print(str(len(newly_accepted)) + ' tentative features are now accepted: ' + str(newly_accepted))\r\n print(str(len(newly_rejected)) + ' tentative features are now rejected: ' + str(newly_rejected))\r\n\r\n self.rejected = self.rejected + newly_rejected.tolist()\r\n self.accepted = self.accepted + newly_accepted.tolist()\r\n\r\n\r\n\r\n def Subset(self, tentative=False):\r\n \"\"\"\r\n Returns the subset of desired features\r\n \"\"\"\r\n if tentative:\r\n return self.starting_X[self.accepted + self.tentative.tolist()]\r\n else:\r\n return self.starting_X[self.accepted]\r\n\r\n\r\n @staticmethod\r\n def create_list(array, color):\r\n colors = [color for x in range(len(array))]\r\n return colors\r\n\r\n @staticmethod\r\n def filter_data(data, column, value):\r\n data = data.copy()\r\n return data.loc[(data[column] == value) | (data[column] == 'Shadow')]\r\n\r\n\r\n @staticmethod\r\n def hasNumbers(inputString):\r\n return any(char.isdigit() for char in inputString)\r\n\r\n\r\n @staticmethod\r\n def check_if_which_features_is_correct(my_string):\r\n\r\n my_string = str(my_string).lower()\r\n if my_string in ['tentative','rejected','accepted','all']:\r\n pass\r\n\r\n else:\r\n raise ValueError(my_string + \" is not a valid value did you mean to type 'all', 'tentative', 'accepted' or 'rejected' ?\")\r\n\r\n\r\n\r\n def plot(self, X_rotation=90, X_size=8, figsize=(12,8),\r\n y_scale='log', which_features='all', display=True):\r\n\r\n \"\"\"\r\n creates a boxplot of the feature importances\r\n\r\n Parameters\r\n ----------\r\n X_rotation: int\r\n Controls the orientation angle of the tick labels on the X-axis\r\n\r\n X_size: int\r\n Controls the font size of the tick labels\r\n\r\n y_scale: string\r\n Log transform of the y axis scale as hard to see the plot as it is normally dominated by two or three\r\n features.\r\n\r\n which_features: string\r\n Despite efforts if the number of columns is large the plot becomes cluttered so this parameter allows you to\r\n select subsets of the features like the accepted, rejected or tentative features default is all.\r\n\r\n Display: Boolean\r\n controls if the output is displayed or not, set to false when running test scripts\r\n\r\n \"\"\"\r\n # data from wide to long\r\n data = self.history_x.iloc[1:]\r\n data['index'] = data.index\r\n data = pd.melt(data, id_vars='index', var_name='Methods')\r\n\r\n decision_mapper = self.create_mapping_of_features_to_attribute(maps=['Tentative','Rejected','Accepted', 'Shadow'])\r\n data['Decision'] = data['Methods'].map(decision_mapper)\r\n data.drop(['index'], axis=1, inplace=True)\r\n\r\n\r\n options = { 'accepted' : self.filter_data(data,'Decision', 'Accepted'),\r\n 'tentative': self.filter_data(data,'Decision', 'Tentative'),\r\n 'rejected' : self.filter_data(data,'Decision', 'Rejected'),\r\n 'all' : data\r\n }\r\n\r\n self.check_if_which_features_is_correct(which_features)\r\n data = options[which_features.lower()]\r\n\r\n self.box_plot(data=data,\r\n X_rotation=X_rotation,\r\n X_size=X_size,\r\n y_scale=y_scale,\r\n figsize=figsize)\r\n if display:\r\n plt.show()\r\n else:\r\n plt.close()\r\n\r\n\r\n def box_plot(self, data, X_rotation, X_size, y_scale, figsize):\r\n\r\n if y_scale=='log':\r\n minimum = data['value'].min()\r\n if minimum <= 0:\r\n data['value'] += abs(minimum) + 0.01\r\n\r\n order = data.groupby(by=[\"Methods\"])[\"value\"].mean().sort_values(ascending=False).index\r\n my_palette = self.create_mapping_of_features_to_attribute(maps= ['yellow','red','green','blue'])\r\n\r\n # Use a color palette\r\n plt.figure(figsize=figsize)\r\n ax = sns.boxplot(x=data[\"Methods\"], y=data[\"value\"],\r\n order=order, palette=my_palette)\r\n\r\n if y_scale == 'log':ax.set(yscale=\"log\")\r\n ax.set_xticklabels(ax.get_xticklabels(), rotation=X_rotation, size=X_size)\r\n ax.set_title('Feature Importance')\r\n ax.set_ylabel('Z-Score')\r\n ax.set_xlabel('Features')\r\n\r\n\r\n def create_mapping_of_features_to_attribute(self, maps = []):\r\n\r\n rejected = list(self.rejected)\r\n tentative = list(self.tentative)\r\n accepted = list(self.accepted)\r\n shadow = ['Max_Shadow','Median_Shadow','Min_Shadow','Mean_Shadow']\r\n\r\n tentative_map = self.create_list(tentative, maps[0])\r\n rejected_map = self.create_list(rejected, maps[1])\r\n accepted_map = self.create_list(accepted, maps[2])\r\n shadow_map = self.create_list(shadow, maps[3])\r\n\r\n values = tentative_map + rejected_map + accepted_map + shadow_map\r\n keys = tentative + rejected + accepted + shadow\r\n\r\n return self.to_dictionary(keys, values)\r\n\r\n\r\n @staticmethod\r\n def to_dictionary(list_one, list_two):\r\n return dict(zip(list_one, list_two))\r\n\r\n\r\n\r\ndef load_data(data_type='classification'):\r\n\r\n \"\"\"\r\n Load Example datasets for the user to try out the package\r\n \"\"\"\r\n\r\n data_type = data_type.lower()\r\n\r\n if data_type == 'classification':\r\n cancer = load_breast_cancer()\r\n X = pd.DataFrame(np.c_[cancer['data'], cancer['target']], columns = np.append(cancer['feature_names'], ['target']))\r\n y = X.pop('target')\r\n\r\n elif data_type == 'regression':\r\n boston = load_boston()\r\n X = pd.DataFrame(np.c_[boston['data'], boston['target']], columns = np.append(boston['feature_names'], ['target']))\r\n y = X.pop('target')\r\n\r\n else:\r\n raise ValueError(\"No data_type was specified, use either 'classification' or 'regression'\")\r\n\r\n\r\n return X, y\r\n" ]
[ [ "sklearn.ensemble.RandomForestRegressor", "numpy.nanmedian", "scipy.stats.ks_2samp", "numpy.take", "pandas.DataFrame", "numpy.mean", "numpy.nanmean", "sklearn.datasets.load_boston", "pandas.melt", "sklearn.ensemble.RandomForestClassifier", "numpy.arange", "numpy.full", "numpy.std", "matplotlib.pyplot.close", "numpy.zeros", "matplotlib.pyplot.figure", "pandas.concat", "numpy.isnan", "sklearn.model_selection.train_test_split", "numpy.append", "numpy.array", "matplotlib.pyplot.show", "sklearn.ensemble.IsolationForest", "sklearn.datasets.load_breast_cancer", "numpy.random.seed", "numpy.abs", "scipy.stats.binom_test", "numpy.percentile", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
b1skit/PyTorch-GAN
[ "1bdf9ba3d79434a39cd6b3e9b29d294081d21120" ]
[ "implementation/srgan/processImage.py" ]
[ "\"\"\"\nTest script: Use this to super-sample an image using a trained SRGAN model\n\nNote: This script will fail if network weights cannot be found.\n\"\"\"\n\nimport argparse\nimport os\nimport numpy as np\nimport math\nimport itertools\nimport sys\n\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image, make_grid\n\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\n\nfrom models import *\nfrom datasets import *\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\nimport time\nimport re\n\n\n# Main script functionality\ndef main(opt):\n \"\"\"\n opt is the result of ArgumentParser's parse_args()\n \"\"\"\n\n outputDir = \"processedOutput\"\n os.makedirs(outputDir, exist_ok=True)\n\n print(\"-------------------\")\n print(\"Processing results:\")\n print(\"-------------------\")\n \n cuda = torch.cuda.is_available()\n\n hr_shape = (opt.hr_height, opt.hr_width)\n\n # Count the number of unique residual layers mentioned in the generator state dict:\n generatorStateDict = torch.load(GetModelDataPath(\"generator\")) # Load the max trained weights from the /saved_models directory\n resBlocks = {}\n for key in generatorStateDict:\n processedKey = re.split(r'^(res_blocks\\.[0-9].)', key)\n if len(processedKey) > 1:\n resBlocks[processedKey[1]] = processedKey[1] # Insert an arbitrary entry: We just care about counting the unique keys\n\n num_residual_blocks = len(resBlocks)\n print(\"Counted \" + str(num_residual_blocks) + \" residual blocks in loaded generator state dict\")\n\n # Initialize generator and discriminator\n generator = GeneratorResNet(n_residual_blocks=num_residual_blocks)\n \n if cuda:\n print(\"Cuda is supported!!!\")\n torch.cuda.empty_cache()\n\n generator = generator.cuda()\n\n # Load pretrained models\n generator.load_state_dict(generatorStateDict)\n\n Tensor = torch.cuda.FloatTensor if cuda else torch.Tensor\n\n\n #----------------\n # Process images:\n #----------------\n print(\"Processing images using the trained model:\")\n\n torch.cuda.empty_cache()\n\n testStartTime = time.time()\n totalTestTime = 0\n numTests = 0\n\n with torch.no_grad(): # Prevent OOM errors\n\n # Set models to eval mode, so batchnorm is disabled\n generator.eval()\n\n dataPath = GetDataPath(opt.valid_dataset_name)\n\n dataloader = DataLoader(\n ImageLoader(dataPath),\n batch_size=opt.batch_size,\n shuffle=False,\n num_workers=opt.n_cpu,\n )\n\n # Process:\n for i, imgs in enumerate(dataloader):\n testStartTime = time.time()\n\n # Configure model input\n imgs_lr = Variable(imgs[\"img\"].type(Tensor))\n\n # Generate a high resolution image from low resolution input\n gen_hr = generator(imgs_lr)\n\n # --------------\n # Log Progress\n # --------------\n testTime = time.time() - testStartTime\n sys.stdout.write(\n \"[Processed image %d/%d] [Test time: %fs]\\n\"\n % (i, len(dataloader), testTime)\n )\n \n gen_hr = make_grid(gen_hr, nrow=1, normalize=True)\n\n save_image(gen_hr, GetArbitraryPath(outputDir) + (\"0\" if i < 10 else \"\") + \"%d.png\" % (i + 1), normalize=False)\n\n # Record the iteration time:\n totalTestTime = totalTestTime + testTime\n numTests = numTests + 1\n\n\n # ------------\n # Print stats:\n # ------------\n testTime = time.time() - testStartTime\n averageTestTime = totalTestTime / numTests\n\n print(\"\\Processing results:\\n-------------\")\n print(\"Total processing time = \" + str(testTime) + \" (secs) for \" + str(len(dataloader.dataset)) + \" test images\")\n print(\"Average processing time = \" + str(averageTestTime) + \" (secs)\")\n\n\n\nif __name__ == \"__main__\":\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"--epoch\", type=int, default=-1, help=\"epoch to load testing weights of. Loads the highest if argument is < 0\")\n parser.add_argument(\"--valid_dataset_name\", type=str, default=\"testImages\", help=\"name of the folder containing images to process\")\n parser.add_argument(\"--batch_size\", type=int, default=1, help=\"Number of images to process at once\")\n parser.add_argument(\"--n_cpu\", type=int, default=8, help=\"number of cpu threads to use during batch generation\")\n parser.add_argument(\"--hr_height\", type=int, default=256, help=\"high res. image height\")\n parser.add_argument(\"--hr_width\", type=int, default=256, help=\"high res. image width\")\n parser.add_argument(\"--channels\", type=int, default=3, help=\"number of image channels\")\n opt = parser.parse_args()\n print(opt)\n\n main(opt)\n " ]
[ [ "torch.no_grad", "torch.cuda.empty_cache", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nghia-vo/Savu
[ "1cf7343c141224643b2e1fb2f05e74448bc4fd58" ]
[ "savu/plugins/loaders/mapping_loaders/i22_loaders/i22_tomo_loader.py" ]
[ "# Copyright 2014 Diamond Light Source Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n.. module:: i22_tomo_loader\n :platform: Unix\n :synopsis: A class for loading I22\n\n.. moduleauthor:: Aaron Parsons <[email protected]>\n\n\"\"\"\n\nfrom savu.plugins.utils import register_plugin\nfrom savu.plugins.loaders.base_loader import BaseLoader\nimport h5py\nimport logging\nimport numpy as np\n\n\n@register_plugin\nclass I22TomoLoader(BaseLoader):\n def __init__(self, name='I22TomoLoader'):\n super(I22TomoLoader, self).__init__(name)\n\n def setup(self):\n \"\"\"\n \"\"\"\n exp = self.exp\n data_obj = exp.create_data_object('in_data', 'tomo')\n data_obj.backing_file = \\\n h5py.File(exp.meta_data.get(\"data_file\"), 'r')\n data_obj.data = data_obj.backing_file['entry/result/data']\n data_obj.set_shape(data_obj.data.shape)\n logging.warning('the data as shape %s' % str(data_obj.data.shape))\n data_obj.set_axis_labels('y.units', 'x.units',\n 'rotation_angle.degrees', 'Q.angstrom^-1')\n\n data_obj.add_pattern('PROJECTION', core_dims=(1, 0), slice_dims=(2, 3))\n data_obj.add_pattern('SINOGRAM', core_dims=(2, 1), slice_dims=(0, 3))\n data_obj.add_pattern('SPECTRUM', core_dims=(3,), slice_dims=(0, 1, 2))\n\n mData = data_obj.meta_data\n mData.set(\"Q\", data_obj.backing_file['entry/result/q'][()])\n mData.set(\"x\", np.arange(data_obj.data.shape[1]))\n mData.set(\"y\", np.arange(data_obj.data.shape[0]))\n mData.set(\"rotation_angle\", data_obj.backing_file[\n 'entry/result/theta'][()])\n\n self.set_data_reduction_params(data_obj)\n" ]
[ [ "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
blurry-mood/g2net
[ "62b9c235960844893a220e3f630adac6cca5f6e8" ]
[ "gnet/model/litmodel.py" ]
[ "import pytorch_lightning as pl\nimport torch\n\nfrom .model import model, Paper\nfrom ..utils import get_logger\nfrom ..preprocessing.preprocesser import Preprocessor\n\nfrom torch import nn\n\nfrom deepblocks.loss import FocalLoss, AUCLoss, AUCMarginLoss\nfrom torchmetrics import AUROC, Accuracy, F1\nfrom transformers import AdamW, get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup, get_constant_schedule_with_warmup\nfrom torch.optim import Adam, SGD\nfrom torch.optim.lr_scheduler import StepLR\n\n_logger = get_logger()\n\n_LOSS = {'celoss': nn.CrossEntropyLoss,\n 'focalloss': FocalLoss, 'bceloss': nn.BCEWithLogitsLoss, 'aucloss': AUCLoss, 'aucmarginloss': AUCMarginLoss}\n_OPT = {'adamw': AdamW, 'adam': Adam, 'sgd': SGD}\n_SCHEDULER = {'linear': get_linear_schedule_with_warmup, 'constant':get_constant_schedule_with_warmup,\n 'step': StepLR, 'cosine': get_cosine_schedule_with_warmup}\n\nclass LitModel(pl.LightningModule):\n\n def __init__(self, config, preprocess_config_name:str):\n super().__init__()\n\n self.save_hyperparameters()\n\n try:\n self.segment = config.segment\n except:\n self.segment = False\n\n self.preprocess = Preprocessor(preprocess_config_name)\n self.model = model(config.model_name,\n config.pretrained, config.num_classes)\n \n self.multi_cls = config.num_classes>1\n self.show_shape = True\n\n # choose loss\n self.loss = _LOSS[config.loss.name]\n if config.loss.args:\n self.loss = self.loss(**dict(config.loss.args))\n else:\n self.loss = self.loss()\n\n # metric\n self.train_auroc = AUROC(compute_on_step=False)\n self.val_auroc = AUROC(compute_on_step=False)\n self.val_acc = Accuracy(compute_on_step=True, threshold=0.3 )\n self.val_f1 = F1(compute_on_step=True, threshold=0.3 )\n\n # log\n _logger.info('The model is created')\n\n def configure_optimizers(self):\n opt = _OPT[self.hparams.config.optimizer.name](\n self.parameters(), **dict(self.hparams.config.optimizer.args))\n scheduler = _SCHEDULER[self.hparams.config.scheduler.name](\n opt, **dict(self.hparams.config.scheduler.args))\n return [opt], [scheduler]\n\n def forward(self, x):\n x = self.preprocess(x)\n x = x[..., :-1]\n x = self.model(x)\n if self.segment:\n x = x.mean(dim=[2, 3])\n return x\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self(x)\n\n if not self.multi_cls:\n y = y.unsqueeze(1)\n\n probs = torch.softmax(y_hat, dim=1)[:, 1] if self.multi_cls else torch.sigmoid(y_hat)\n\n loss = self.loss(y_hat, y if self.multi_cls else y.float() )\n self.train_auroc(probs, y)\n\n self.log('train_loss', loss, prog_bar=True)\n\n return loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n\n if self.show_shape:\n self.show_shape = False\n _logger.info(\n f'Raw input shape: {x.shape}, mean: {x.mean()}, std: {x.std()}')\n xx = self.preprocess(x)\n _logger.info(\n f'Preprocessed input shape: {xx.shape}, mean: {xx.mean()}, std: {xx.std()}')\n \n y_hat = self(x)\n\n if not self.multi_cls:\n y = y.unsqueeze(1)\n\n probs = torch.softmax(y_hat, dim=1)[:, 1] if self.multi_cls else torch.sigmoid(y_hat)\n\n loss = self.loss(y_hat, y if self.multi_cls else y.float() )\n self.val_auroc(probs, y)\n\n # logs\n self.log('val_loss', loss, prog_bar=True)\n self.log('val_acc', self.val_acc(probs, y), prog_bar=True)\n self.log('val_f1', self.val_f1(probs, y), prog_bar=True)\n self.val_acc.reset()\n self.val_f1.reset()\n return loss\n\n def test_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self(x)\n\n if not self.multi_cls:\n y = y.unsqueeze(1)\n\n probs = torch.softmax(y_hat, dim=1)[:, 1] if self.multi_cls else torch.sigmoid(y_hat)\n\n loss = self.loss(y_hat, y if self.multi_cls else y.float() )\n self.val_auroc(probs, y)\n\n # logs\n self.log('test_loss', loss, prog_bar=True, on_step=True)\n self.log('test_acc', self.val_acc(probs, y), prog_bar=True)\n self.log('test_f1', self.val_f1(probs, y), prog_bar=True)\n self.val_acc.reset()\n self.val_f1.reset()\n return loss\n\n def training_epoch_end(self, outputs) -> None:\n self.log('train_auroc', self.train_auroc.compute(), prog_bar=True)\n self.train_auroc.reset()\n\n def validation_epoch_end(self, outputs):\n self.log('val_auroc', self.val_auroc.compute(), prog_bar=True)\n self.val_auroc.reset()\n\n def test_epoch_end(self, outputs) -> None:\n self.log('test_auroc', self.val_auroc.compute(), prog_bar=True)\n self.val_auroc.reset()\n\n\nclass PredictLitModel(pl.LightningModule):\n\n def __init__(self, config, preprocess_config_name):\n super().__init__()\n\n self.config = config\n self.preprocess = Preprocessor(preprocess_config_name)\n self.model = model(config.model_name,\n config.pretrained, config.num_classes)\n self.softmax = config.num_classes == 2\n\n # log\n _logger.info('The model is created')\n\n def forward(self, x):\n x = self.preprocess(x)\n x = self.model(x)\n if self.softmax:\n x = torch.softmax(x, dim=1)[:, 1:]\n else:\n x = torch.sigmoid(x)\n return x[:,0]\n" ]
[ [ "torch.sigmoid", "torch.softmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hide-dog/kaggle_titanic
[ "010d8b5621a54e95df9162265b655b91eeee00cf" ]
[ "majority rule/majority_rule.py" ]
[ "import pandas as pd\nimport numpy as np\nimport glob\nimport csv\n\n# ------------------------------------------------\n# main\n# ------------------------------------------------\ndef main():\n ofile = \"majority_rule.csv\"\n \n fff = glob.glob(\"solution*\")\n\n y = np.loadtxt(fff[0], delimiter=',', skiprows = 1, usecols = (1), unpack=True)\n y_majority = np.zeros(len(y), dtype=np.int16)\n\n for i in range(len(fff)):\n x, y = np.loadtxt(fff[i], delimiter=',', skiprows = 1, usecols = (0, 1), unpack=True)\n\n for j in range(len(y)):\n y_majority[j] += y[j]\n \n \n for i in range(len(y_majority)):\n if len(fff)/2 <= y_majority[i]:\n y_majority[i] = 1\n else:\n y_majority[i] = 0\n\n # read file \n test = pd.read_csv(\"test.csv\")\n\n # get PassengerId\n pid = np.array(test[\"PassengerId\"]).astype(int)\n # combination of PassengerId and prediction\n solution = pd.DataFrame(y_majority, pid, columns = [\"Survived\"])\n # output .csv\n solution.to_csv(ofile, index_label = [\"PassengerId\"])\n \n # \n correct = pd.read_csv(\"correct.csv\")\n correct_s = correct[\"Survived\"].values\n \n score = 0.0\n for i in range(len(y_majority)):\n if correct_s[i] - y_majority[i] == 0.0:\n score += 1.0\n \n print( score / len(y_majority) )\n \n \n # output\n \"\"\"\n with open(ofile, 'wt', newline='') as f:\n writer = csv.writer(f)\n writer.writerow([\"PassengerId\", \"Survived\"])\n for i in range(len(y_majority)):\n writer.writerow([x[i], y_majority[i]])\n \"\"\"\n \n\n\n# ------------------------------------------------\n# execution\n# ------------------------------------------------\nif __name__ == \"__main__\":\n main()" ]
[ [ "numpy.array", "pandas.read_csv", "numpy.loadtxt", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
CharLee674/rvisa_lightlab
[ "b43e36f3436b60c8c5f3088b4cb0896c5360aa4a" ]
[ "lightlab/util/data/one_dim.py" ]
[ "''' One-dimensional data structures with substantial processing abilities\n'''\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import signal\nfrom lightlab import logger\nfrom IPython import display\nimport lightlab.util.io as io\n\nfrom .peaks import findPeaks, ResonanceFeature\nfrom .basic import rms\nfrom .function_inversion import interpInverse\n\n\ndef prbs_generator(characteristic, state):\n ''' Generator of PRBS bits.\n\n Example:\n polynomial = 0b1000010001 # 1 + X^5 + X^9\n seed = 0b111100000\n\n The above parameters will give you a PRBS9 bit sequence.\n Note: it might be inverted compared to the official definition,\n i.e., 1s are 0s and vice versa.\n '''\n\n def compute_parity(n):\n parity = False\n while n > 0:\n parity ^= (n & 1)\n n >>= 1\n\n return parity # odd means True\n\n order = characteristic.bit_length() - 1\n while True:\n result = state & 1\n state += (compute_parity(state & characteristic) << order)\n state >>= 1\n yield result\n\n\ndef prbs_pattern(polynomial, seed, length=None):\n ''' Returns an array containing a sequence of a PRBS pattern.\n\n If length is not set, the sequence will be 2^n-1 long, corresponding\n to the repeating pattern of the PRBS sequence.\n '''\n order = polynomial.bit_length() - 1\n\n if length is None:\n length = 2 ** order - 1\n\n from itertools import islice\n prbs_pattern = list(islice(iter(prbs_generator(polynomial, seed)), length))\n return ~np.array(prbs_pattern, dtype=np.bool)\n\n\nclass MeasuredFunction(object): # pylint: disable=eq-without-hash\n ''' Array of x,y points.\n This is the workhorse class of ``lightlab`` data structures.\n Examples can be found throughout Test notebooks.\n\n Supports many kinds of operations:\n\n 1. Data access (``mf(x)``, ``len(mf)``, ``mf[i]``, :meth:`getData`)\n Calling the object with x-values interpolates and returns y-values.\n\n 2. Storage (:meth:`copy`, :meth:`save`, :meth:`load`, :meth:`loadFromFile`)\n see method docstrings\n\n 3. x-axis signal processing (:meth:`getSpan`, :meth:`crop`, :meth:`shift`, :meth:`flip`, :meth:`resample`, :meth:`uniformlySample`)\n see method docstrings\n\n 4. y-axis signal processing (:meth:`getRange`, :meth:`clip`, :meth:`debias`, :meth:`unitRms`, :meth:`getMean`, :meth:`moment`)\n see method docstrings\n\n 5. Advanced signal processing (:meth:`invert`, :meth:`lowPass`, :meth:`centerOfMass`, :meth:`findResonanceFeatures`)\n see method docstrings\n\n 6. Binary math (``+``, ``-``, ``*``, ``/``, ``==``)\n Operands must be either\n * the same subclass of MeasuredFunction, or\n * scalar numbers, or\n * functions/bound methods: these must be callable with one argument that is an ndarray\n\n If both are MeasuredFunction, the domain used will be the smaller of the two\n\n 7. Plotting (:meth:`simplePlot`)\n Args and Kwargs are passed to pyplot's plot function.\n Supports live plotting for notebooks\n\n 8. Others (:meth:`deleteSegment`, :meth:`splice`)\n see method docstrings\n '''\n\n # https://stackoverflow.com/questions/14619449/how-can-i-override-comparisons-between-numpys-ndarray-and-my-type\n __array_priority__ = 100 # have numpy call the __*add__ functions first instead of trying to iterate\n\n absc = None #: abscissa, a.k.a. the x-values or domain\n ordi = None #: ordinate, a.k.a. the y-values\n\n def __init__(self, abscissaPoints, ordinatePoints, unsafe=False):\n '''\n Args:\n abscissaPoints (array): abscissa, a.k.a. independent variable, a.k.a. domain\n ordinatePoints (array): ordinate, a.k.a. dependent variable, a.k.a. range\n unsafe (bool): if True, faster, give it 1-D np.ndarrays of the same length, or you will get weird errors later on\n '''\n if unsafe:\n self.absc = abscissaPoints\n self.ordi = ordinatePoints\n else:\n checkVals = [None, None]\n for iv, arr in enumerate((abscissaPoints, ordinatePoints)):\n if isinstance(arr, np.ndarray):\n if arr.ndim > 1:\n raise ValueError(\n 'Must be a one dimensional array. Got shape ' + str(arr.shape))\n if arr.ndim == 0:\n arr = np.array([arr])\n checkVals[iv] = arr.copy()\n elif isinstance(arr, (list, tuple)):\n checkVals[iv] = np.array(arr)\n elif np.isscalar(arr):\n checkVals[iv] = np.array([arr])\n else:\n raise TypeError('Unsupported type: ' + str(type(arr)) +\n '. Need np.ndarray, scalar, list, or tuple')\n self.absc, self.ordi = tuple(checkVals)\n if self.absc.shape != self.ordi.shape:\n raise ValueError('Shapes do not match. Got ' +\n str(self.absc.shape) + ' and ' + str(self.ordi.shape))\n\n # Data structuring and usage stuff\n\n def __call__(self, testAbscissa=None):\n ''' Interpolates the discrete function\n\n Args:\n testAbscissa (array): test points in the domain\n\n Returns:\n testOrdinate (array): interpolated output values\n '''\n if np.isscalar(self.absc):\n return self.ordi[0]\n return np.interp(testAbscissa, self.absc, self.ordi)\n\n def __len__(self):\n return len(self.absc)\n\n def __iter__(self):\n raise TypeError(\"{} is not iterable\".format(self.__class__.__qualname__))\n\n def __getitem__(self, sl):\n ''' Slice this function.\n\n Args:\n sl (int, slice): which indeces to pick out\n\n Returns:\n (MeasuredFunction/<childClass>): sliced version\n '''\n if type(sl) not in [int, slice]:\n raise ValueError('MeasuredFunction [] only works with integers and slices. ' +\n 'Got ' + str(sl) + ' (' + str(type(sl)) + ').')\n newAbsc = self.absc[sl]\n newOrdi = self.ordi[sl]\n return self.__newOfSameSubclass(newAbsc, newOrdi)\n\n def getData(self):\n ''' Gives a tuple of the enclosed array data.\n\n It is copied, so you can do what you want with it\n\n Returns:\n tuple(array,array): the enclosed data\n '''\n return np.copy(self.absc), np.copy(self.ordi)\n\n def copy(self):\n ''' Gives a copy, so that further operations can be performed without side effect.\n\n Returns:\n (MeasuredFunction/<childClass>): new object with same properties\n '''\n return self.__newOfSameSubclass(self.absc, self.ordi)\n\n def save(self, savefile):\n io.saveMat(savefile, {'absc': self.absc, 'ordi': self.ordi})\n\n @classmethod\n def load(cls, savefile):\n dataDict = io.loadMat(savefile)\n absc = dataDict['absc']\n ordi = dataDict['ordi']\n return cls(absc, ordi)\n\n def simplePlot(self, *args, livePlot=False, **kwargs):\n r''' Plots on the current axis\n\n Args:\n livePlot (bool): if True, displays immediately in IPython notebook\n \\*args (tuple): arguments passed through to ``pyplot.plot``\n \\*\\*kwargs (dict): arguments passed through to ``pyplot.plot``\n\n Returns:\n Whatever is returned by ``pyplot.plot``\n '''\n curve = plt.plot(*(self.getData() + args), **kwargs)\n plt.autoscale(enable=True, axis='x', tight=True)\n if 'label' in kwargs.keys():\n plt.legend()\n if livePlot:\n display.display(plt.gcf())\n display.clear_output(wait=True)\n return curve\n\n # Simple data handling operations\n\n def __newOfSameSubclass(self, newAbsc, newOrdi):\n ''' Helper functions that ensures proper inheritance of other methods.\n\n Returns a new object of the same type and\n with the same metadata (i.e. everything but absc and ordi) as self.\n\n This is useful for many kinds of operations where the returned\n MeasuredFunction or ChildClass is further processed\n\n Args:\n newAbsc (array): abscissa of new MeasuredFunction\n newOrdi (array): ordinate of new MeasuredFunction\n\n Returns:\n (MeasuredFunction): new object, which is a child class of MeasuredFunction\n '''\n newObj = type(self)(newAbsc.copy(), newOrdi.copy(), unsafe=True)\n for attr, val in self.__dict__.items():\n if attr not in ['absc', 'ordi']:\n newObj.__dict__[attr] = val\n return newObj\n\n def subsample(self, newAbscissa):\n ''' Returns a new MeasuredFunction sampled at given points.\n '''\n new_ordi = self.__call__(newAbscissa)\n return self.__newOfSameSubclass(newAbscissa, new_ordi)\n\n def getSpan(self):\n ''' The span of the domain\n\n Returns:\n (list[float,float]): the minimum and maximum abscissa points\n '''\n return [min(self.absc), max(self.absc)]\n\n def abs(self):\n ''' Computes the absolute value of the measured function.\n '''\n return abs(self)\n\n def mean(self):\n return self.getMean()\n\n def max(self):\n ''' Returns the maximum value of the ordinate axis, ignoring NaNs.'''\n return np.nanmax(self.ordi)\n\n def argmax(self):\n ''' Returns the abscissa value at which the ordinate is maximum. '''\n return self.absc[np.argmax(self.ordi)]\n\n def min(self):\n ''' Returns the minimum value of the ordinate axis, ignoring NaNs.'''\n return np.nanmin(self.ordi)\n\n def argmin(self):\n ''' Returns the abscissa value at which the ordinate is minimum. '''\n return self.absc[np.argmin(self.ordi)]\n\n def getRange(self):\n ''' The span of the ordinate\n\n Returns:\n (list[float,float]): the minimum and maximum values\n '''\n return [min(self.ordi), max(self.ordi)]\n\n def crop(self, segment):\n ''' Crop abscissa to segment domain.\n\n Args:\n segment (list[float,float]): the span of the new abscissa domain\n\n Returns:\n MeasuredFunction: new object\n '''\n min_segment, max_segment = segment\n absc_span = self.getSpan()\n\n if min_segment is None:\n min_segment = absc_span[0]\n\n if max_segment is None:\n max_segment = absc_span[1]\n\n # Just in case the user accidentally flipped the segment order\n min_segment, max_segment = min(min_segment, max_segment), max(min_segment, max_segment)\n\n if min_segment <= absc_span[0] and max_segment >= absc_span[1]:\n # do nothing\n return self.copy()\n dx = np.mean(np.diff(np.sort(self.absc)))\n newAbsc = np.arange(min_segment, max_segment, dx)\n return self.__newOfSameSubclass(newAbsc, self(newAbsc))\n\n def clip(self, amin, amax):\n ''' Clip ordinate to min/max range\n\n Args:\n amin (float): minimum value allowed in the new MeasuredFunction\n amax (float): maximum value allowed in the new MeasuredFunction\n\n Returns:\n MeasuredFunction: new object\n '''\n return self.__newOfSameSubclass(self.absc, np.clip(self.ordi, amin, amax))\n\n def shift(self, shiftBy):\n ''' Shift abscissa. Good for biasing wavelengths.\n\n Args:\n shiftBy (float): the number that will be added to the abscissa\n\n Returns:\n MeasuredFunction: new object\n '''\n return self.__newOfSameSubclass(self.absc + shiftBy, self.ordi)\n\n def flip(self):\n ''' Flips the abscissa, BUT DOES NOTHING the ordinate.\n\n Usually, this is meant for spectra centered at zero.\n I.e.: flipping would be the same as negating abscissa\n\n Returns:\n MeasuredFunction: new object\n '''\n return self.__newOfSameSubclass(self.absc[::-1], self.ordi)\n\n def reverse(self):\n ''' Flips the ordinate, keeping abscissa in order\n\n Returns:\n MeasuredFunction: new object\n '''\n return self.__newOfSameSubclass(self.absc, self.ordi[::-1])\n\n def debias(self):\n ''' Removes mean from the function\n\n Returns:\n MeasuredFunction: new object\n '''\n bias = np.mean(self.ordi)\n return self.__newOfSameSubclass(self.absc, self.ordi - bias)\n\n def unitRms(self):\n ''' Returns function with unit RMS or power\n '''\n rmsVal = rms(self.debias().ordi)\n return self * (1 / rmsVal)\n\n def getMean(self):\n return np.mean(self.ordi)\n\n def getMedian(self):\n return np.median(self.ordi)\n\n def getVariance(self):\n return np.var(self.ordi)\n\n def getStd(self):\n return np.std(self.ordi)\n\n def resample(self, nsamp=100):\n ''' Resample over the same domain span, but with a different number of points.\n\n Args:\n nsamp (int): number of samples in the new object\n\n Returns:\n MeasuredFunction: new object\n '''\n newAbsc = np.linspace(*self.getSpan(), int(nsamp))\n return self.__newOfSameSubclass(newAbsc, self(newAbsc))\n\n def uniformlySample(self):\n ''' Makes sure samples are uniform\n\n Returns:\n MeasuredFunction: new object\n '''\n dxes = np.diff(self.absc)\n if all(dxes == dxes[0]):\n return self\n else:\n return self.resample(len(self))\n\n def addPoint(self, xyPoint):\n ''' Adds the (x, y) point to the stored absc and ordi\n\n Args:\n xyPoint (tuple): x and y values to be inserted\n\n Returns:\n None: it modifies this object\n '''\n x, y = xyPoint\n for i in range(len(self)):\n if x < self.absc[i]:\n break\n else:\n i = len(self)\n self.absc = np.insert(self.absc, i, x)\n self.ordi = np.insert(self.ordi, i, y)\n\n # Signal processing stuff\n\n def correlate(self, other):\n ''' Correlate signals with scipy.signal.correlate.\n\n Only full mode and direct method is supported for now.\n '''\n new_abscissa = type(self)._maxAbsc(self, other)\n\n # ensure that they are uniformly sampled\n dxes = np.diff(new_abscissa)\n dx = dxes[0]\n assert np.allclose(dxes, dx) # sometimes there are numerical errors in floats\n\n N = len(new_abscissa)\n\n from scipy.signal import correlate\n self_ordi, other_ordi = self(new_abscissa), other(new_abscissa)\n self_ordi_norm = (self_ordi - np.mean(self_ordi)) / np.std(self_ordi)\n self_ordi_norm /= np.linalg.norm(self_ordi_norm)\n other_ordi_norm = (other_ordi - np.mean(other_ordi))\n other_ordi_norm /= np.linalg.norm(other_ordi_norm)\n\n correlated_ordi = correlate(self_ordi_norm,\n other_ordi_norm,\n mode=\"full\", method=\"direct\")\n offset_abscissa = np.arange(-N + 1, N, 1) * dx\n return self.__newOfSameSubclass(offset_abscissa, correlated_ordi)\n\n def lowPass(self, windowWidth=None, mode=None):\n if mode is not None:\n logger.warn(\"lowPass was renamed to movingAverage. Now it is an actual Butterworth low-pass filter.\")\n return self.lowPassButterworth(1 / windowWidth)\n\n def movingAverage(self, windowWidth=None, mode='valid'):\n ''' Low pass filter performed by convolving a moving average window.\n\n The convolutional ``mode`` can be one of the following string tokens\n * 'valid': the new span is reduced, but data is good looking\n * 'same': new span is the same as before, but there are edge artifacts\n\n Args:\n windowWidth (float): averaging window width in units of the abscissa\n mode (str): convolutional mode\n\n Returns:\n MeasuredFunction: new object\n '''\n if windowWidth is None:\n windowWidth = (max(self.absc) - min(self.absc)) / 10\n dx = abs(np.diff(self.absc[0:2])[0])\n windPts = np.int(windowWidth / dx)\n if windPts % 2 == 0: # Make sure windPts is odd so that basis doesn't shift\n windPts += 1\n if windPts >= np.size(self.ordi):\n raise Exception('windowWidth is ' + str(windPts) +\n ' wide, which is bigger than the data itself (' + str(np.size(self.ordi)) + ')')\n\n filt = np.ones(windPts) / windPts\n invalidIndeces = int((windPts - 1) / 2)\n\n if mode == 'valid':\n newAbsc = self.absc[invalidIndeces:-invalidIndeces].copy()\n newOrdi = np.convolve(filt, self.ordi, mode='valid')\n elif mode == 'same':\n newAbsc = self.absc.copy()\n newOrdi = self.ordi.copy()\n newOrdi[invalidIndeces:-invalidIndeces] = np.convolve(filt, self.ordi, mode='valid')\n return self.__newOfSameSubclass(newAbsc, newOrdi)\n\n def butterworthFilter(self, fc, order, btype):\n ''' Applies a Butterworth filter to the signal.\n\n Side effects: the waveform will be resampled to have equally-sampled points.\n\n Args:\n fc (float): cutoff frequency of the filter (cf. input to signal.butter)\n\n Returns:\n New object containing the filtered waveform\n '''\n\n uniformly_sampled = self.uniformlySample()\n x, y = uniformly_sampled.absc, uniformly_sampled.ordi\n dxes = np.diff(x)\n sampling_rate = 1 / dxes[0]\n fc = np.array(fc)\n\n b, a = signal.butter(order, fc * 2, btype, fs=sampling_rate) # construct the filter\n # compute initial condition such that the filtered y starts with the same value as y\n zi = signal.lfilter_zi(b, a)\n\n # applies the filter to the ordinate y if it is a low pass filter\n if btype.startswith('low'):\n ordi_filtered, _ = signal.lfilter(b, a, y, zi=zi * y[0])\n # cheat and debias the signal prior to high pass filtering\n # this prevents the initial filtered signal to start from zero\n else:\n mean_y = np.mean(y)\n ordi_filtered, _ = signal.lfilter(b, a, y - mean_y, zi=zi * 0)\n\n uniformly_sampled.ordi = ordi_filtered\n return uniformly_sampled\n\n def lowPassButterworth(self, fc, order=1):\n ''' Applies a low-pass Butterworth filter to the signal.\n\n Side effects: the waveform will be resampled to have equally-sampled points.\n\n Args:\n fc (float): cutoff frequency of the filter\n\n Returns:\n New object containing the filtered waveform\n '''\n\n return self.butterworthFilter(fc, order, 'lowpass')\n\n def highPassButterworth(self, fc, order=1):\n ''' Applies a high-pass Butterworth filter to the signal.\n\n Side effects: the waveform will be resampled to have equally-sampled points.\n\n Args:\n fc (float): cutoff frequency of the filter\n\n Returns:\n New object containing the filtered waveform\n '''\n\n return self.butterworthFilter(fc, order, 'highpass')\n\n def bandPassButterworth(self, fc, order=1):\n ''' Applies a high-pass Butterworth filter to the signal.\n\n Side effects: the waveform will be resampled to have equally-sampled points.\n\n Args:\n fc (length-2 float sequence): cutoff frequency of the filter\n\n Returns:\n New object containing the filtered waveform\n '''\n\n return self.butterworthFilter(fc, order, 'bandpass')\n\n def deleteSegment(self, segment):\n ''' Removes the specified segment from the abscissa.\n\n This means calling within this segment will give the first-order interpolation of its edges.\n\n Usually, deleting is followed by splicing in some new data in this span\n\n Args:\n segment (list[float,float]): span over which to delete stored points\n\n Returns:\n MeasuredFunction: new object\n '''\n nonNullInds = np.logical_or(self.absc < min(segment), self.absc > max(segment))\n newAbsc = self.absc[nonNullInds]\n return self.__newOfSameSubclass(newAbsc, self(newAbsc))\n\n def splice(self, other, segment=None):\n ''' Returns a Spectrum that is this one,\n except with the segment replaced with the other one's data\n\n The abscissa of the other matters.\n There is nothing changing (abscissa, ordinate) point pairs,\n only moving them around from ``other`` to ``self``.\n\n If segment is not specified, uses the full domain of the other\n\n Args:\n other (MeasuredFunction): the origin of new data\n segment (list[float,float]): span over which to do splice stored points\n\n Returns:\n MeasuredFunction: new object\n '''\n if segment is None:\n segment = other.getSpan()\n spliceInds = np.logical_and(self.absc > min(segment), self.absc < max(segment))\n newOrdi = self.ordi.copy()\n newOrdi[spliceInds] = other(self.absc[spliceInds])\n return self.__newOfSameSubclass(self.absc, newOrdi)\n\n def invert(self, yVals, directionToDescend=None):\n ''' Descends down the function until yVal is reached in ordi. Returns the absc value\n\n If the function is peaked, you should specify a direction to descend.\n\n If the function is approximately monotonic, don't worry about it.\n\n Args:\n yVals (scalar, ndarray): array of y values to descend to\n directionToDescend (['left', 'right', None]): use if peaked function to tell which side.\n Not used if monotonic\n\n Returns:\n (scalar, ndarray): corresponding x values\n '''\n maxInd = np.argmax(self.ordi)\n minInd = np.argmin(self.ordi)\n if directionToDescend is None:\n if minInd < maxInd:\n directionToDescend = 'left'\n else:\n directionToDescend = 'right'\n if np.isscalar(yVals):\n yValArr = np.array([yVals])\n else:\n yValArr = yVals\n xValArr = np.zeros(yValArr.shape)\n for iVal, y in enumerate(yValArr):\n xValArr[iVal] = interpInverse(*self.getData(),\n startIndex=maxInd,\n direction=directionToDescend,\n threshVal=y)\n if np.isscalar(yVals):\n return xValArr[0]\n else:\n return xValArr\n\n def centerOfMass(self):\n ''' Returns abscissa point where mass is centered '''\n deb = self.debias().clip(0, None)\n weighted = np.multiply(*deb.getData())\n com = np.sum(weighted) / np.sum(deb.ordi)\n return com\n\n def moment(self, order=2, relativeGauss=False):\n ''' The order'th moment of the function\n\n Args:\n order (integer): the polynomial moment of inertia. Don't trust the normalization of > 2'th order.\n order = 1: mean\n order = 2: variance\n order = 3: skew\n order = 4: kurtosis\n\n Returns:\n (float): the specified moment\n '''\n mean = np.mean(self.ordi)\n if order == 1:\n return mean\n variance = np.mean(np.power(self.ordi - mean, 2))\n if order == 2:\n return variance\n if order == 4:\n kurtosis = np.mean(np.power(self.ordi - mean, 4))\n kurtosis /= variance ** 2\n if relativeGauss:\n kurtosis -= 3\n return kurtosis\n\n def findResonanceFeatures(self, **kwargs):\n r''' A convenient wrapper for :func:`~lightlab.util.data.peaks.findPeaks`\n\n Args:\n \\*\\*kwargs: passed to :func:`~lightlab.util.data.peaks.findPeaks`\n\n Returns:\n list[ResonanceFeature]: the detected features as nice objects\n '''\n mFun = self.uniformlySample()\n dLam = np.diff(mFun.getSpan())[0] / len(mFun)\n\n xArr, yArr = mFun.getData()\n\n # Use the class-free peakfinder on arrays\n pkInds, pkIndWids = findPeaks(yArr, **kwargs)\n\n # Translate back into units of the original MeasuredFunction\n pkLambdas = xArr[pkInds]\n pkAmps = yArr[pkInds]\n pkWids = pkIndWids * dLam\n\n # Package into resonance objects\n try:\n isPeak = kwargs['isPeak']\n except KeyError:\n isPeak = True\n resonances = np.empty(len(pkLambdas), dtype=object)\n for iPk in range(len(resonances)):\n resonances[iPk] = ResonanceFeature(\n pkLambdas[iPk], pkWids[iPk], pkAmps[iPk], isPeak=isPeak)\n return resonances\n\n # Mathematics\n\n def __binMathHelper(self, other):\n ''' returns the new abcissa and a tuple of arrays: the ordinates to operate on\n '''\n try:\n ab = other.absc\n except AttributeError: # in other.absc\n pass\n else:\n if np.all(ab == self.absc):\n newAbsc = self.absc\n ords = (self.ordi, other.ordi)\n else:\n newAbsc = type(self)._minAbsc(self, other)\n ords = (self(newAbsc), other(newAbsc))\n return newAbsc, ords\n\n newAbsc = self.absc\n try:\n other = float(other)\n except TypeError: # not an int, float, or np.ndarry with all singleton dimensions\n pass\n else:\n ords = (self.ordi, other * np.ones(len(newAbsc)))\n return newAbsc, ords\n\n try:\n othOrd = other(newAbsc)\n except TypeError: # not callable\n pass\n else:\n ords = (self.ordi, othOrd)\n return newAbsc, ords\n\n # time to fail\n if isinstance(other, np.ndarray):\n raise TypeError('Cannot do binary math with MeasuredFunction and numpy array')\n for obj in (self, other):\n if isinstance(obj.ordi, MeasuredFunction):\n raise TypeError('You have an ordinate that is a MeasuredFunction!' +\n ' This is a common error. It\\'s in ' + str(obj))\n raise TypeError('Unsupported types for binary math: ' +\n type(self).__name__ + ', ' + type(other).__name__)\n\n def norm(self, ord=None):\n # TODO recompute norm taking into account the possibility that the\n # abscissa is not uniformly sampled.\n return np.linalg.norm(self.ordi - np.mean(self.ordi), ord=ord)\n\n @staticmethod\n def _minAbsc(fa, fb):\n ''' Get the overlapping abscissa of two MeasuredFunctions.\n '''\n fa_span = fa.getSpan()\n fb_span = fb.getSpan()\n\n min_absc, max_absc = [max(fa_span[0], fb_span[0]), min(fa_span[1], fb_span[1])]\n dxa = np.mean(np.abs(np.diff(np.sort(fa.absc))))\n dxb = np.mean(np.abs(np.diff(np.sort(fb.absc))))\n new_dx = min(dxa, dxb)\n\n newAbsc = np.arange(min_absc, max_absc + new_dx, new_dx)\n return newAbsc\n\n @staticmethod\n def _maxAbsc(fa, fb):\n \"\"\" Gets a compact abscissa that includes the domains of both fa and fb.\n\n Assumes that the returned abscissa is uniformly sampled.\n self.correlate depends on this assumption.\n \"\"\"\n fa_span = fa.getSpan()\n fb_span = fb.getSpan()\n\n min_absc, max_absc = [min(fa_span[0], fb_span[0]), max(fa_span[1], fb_span[1])]\n dxa = np.mean(np.abs(np.diff(np.sort(fa.absc))))\n dxb = np.mean(np.abs(np.diff(np.sort(fb.absc))))\n new_dx = min(dxa, dxb)\n\n newAbsc = np.arange(min_absc, max_absc + new_dx, new_dx)\n return newAbsc\n\n def __sub__(self, other):\n ''' Returns the subtraction of the two functions, in the domain of the shortest abscissa object.\n The other object can also be a scalar '''\n newAbsc, ords = self.__binMathHelper(other)\n return self.__newOfSameSubclass(newAbsc, ords[0] - ords[1])\n\n def __rsub__(self, other):\n newAbsc, ords = self.__binMathHelper(other)\n return self.__newOfSameSubclass(newAbsc, ords[1] - ords[0])\n\n def __add__(self, other):\n ''' Returns the subtraction of the two functions, in the domain of the shortest abscissa object.\n The other object can also be a scalar '''\n newAbsc, ords = self.__binMathHelper(other)\n return self.__newOfSameSubclass(newAbsc, ords[0] + ords[1])\n\n def __abs__(self):\n ''' Returns a new object where the abscissa contains the absolute value of the old one.\n '''\n abs_ordi = np.abs(self.ordi)\n return self.__newOfSameSubclass(self.absc, abs_ordi)\n\n def __radd__(self, other):\n return self.__add__(other)\n\n def __mul__(self, other):\n ''' Returns the product of the two functions, in the domain of the shortest abscissa object.\n The other object can also be a scalar '''\n newAbsc, ords = self.__binMathHelper(other)\n return self.__newOfSameSubclass(newAbsc, ords[0] * ords[1])\n\n def __pow__(self, power):\n ''' Returns the result of exponentiation. Can only exponentiate\n with a float number.\n '''\n\n absc = self.absc.copy()\n ordi = self.ordi.copy()\n try:\n new_ordi = ordi ** power # uses numpy's power overload\n except ValueError as err:\n raise ValueError(\"Invalid power {} (not a number)\".format(power)) from err\n\n return self.__newOfSameSubclass(absc, new_ordi)\n\n def __rmul__(self, other):\n return self.__mul__(other)\n\n def __truediv__(self, other):\n return self * (1 / other)\n\n def __eq__(self, other):\n if isinstance(self, type(other)):\n return np.all(self.absc == other.absc) and np.all(self.ordi == other.ordi)\n return self.ordi == other\n\n def __repr__(self):\n try:\n return \"{}({:d} pts)\".format(self.__class__.__qualname__, len(self))\n except TypeError: # len(self fails)\n return \"{}({:f},{:f})\".format(self.__class__.__qualname__, self.absc, self.ordi)\n\n\nclass Spectrum(MeasuredFunction):\n ''' Adds handling of linear/dbm units.\n\n Use :meth:`lin` and :meth:`dbm` to make sure what you're getting\n what you expect for things like binary math and peakfinding, etc.\n '''\n\n def __init__(self, nm, power, inDbm=True, unsafe=False):\n '''\n Args:\n nm (array): abscissa\n power (array): ordinate\n inDbm (bool): is the ``power`` in linear or dbm units?\n '''\n super().__init__(nm, power, unsafe=unsafe)\n self._inDbm = inDbm\n\n @property\n def inDbm(self):\n ''' Is it in dbm units currently?\n\n Returns:\n bool:\n '''\n try:\n return self._inDbm\n except AttributeError:\n self._inDbm = self._Spectrum__inDbm\n return self._inDbm\n\n def lin(self):\n ''' The spectrum in linear units\n\n Returns:\n Spectrum: new object\n '''\n if not self.inDbm:\n return type(self)(self.absc.copy(), self.ordi.copy(), inDbm=False)\n else:\n return type(self)(self.absc.copy(), 10 ** (self.ordi.copy() / 10), inDbm=False)\n\n def db(self):\n ''' The spectrum in decibel units\n\n Returns:\n Spectrum: new object\n '''\n if self.inDbm:\n return type(self)(self.absc.copy(), self.ordi.copy(), inDbm=True)\n else:\n clippedOrdi = np.clip(self.ordi, 1e-12, None)\n return type(self)(self.absc.copy(), 10 * np.log10(clippedOrdi), inDbm=True)\n\n def __binMathHelper(self, other):\n ''' Adds a check to make sure lin/db is in the same state '''\n if type(other) is type(self) and other.inDbm is not self.inDbm:\n raise Exception('Can not do binary math on Spectra in different formats')\n return super().__binMathHelper(other)\n\n def simplePlot(self, *args, livePlot=False, **kwargs):\n ''' More often then not, this is db vs. wavelength, so label it\n '''\n super().simplePlot(*args, livePlot=livePlot, **kwargs)\n plt.xlabel('Wavelength (nm)')\n plt.ylabel('Transmission ({})'.format('dB' if self.inDbm else 'lin'))\n\n # Peak and trough related\n\n def refineResonanceWavelengths(self, filtShapes, seedRes=None, isPeak=None):\n ''' Convolutional resonance correction to get very robust resonance wavelengths\n\n Does the resonance finding itself, unless an initial approximation is provided.\n\n Also, has some special options for ``Spectrum`` types to make sure db/lin is optimal\n\n Args:\n filtShapes (list[MeasuredFunction]): shapes of each resonance. Must be in order of ascending abscissa/wavelength\n seedRes (list[ResonanceFeature]): rough approximation of resonance properties. If None, this method will find them.\n isPeak (bool): required to do peak finding, but not used if ``seedRes`` is specified\n\n Returns:\n list[ResonanceFeature]: the detected and refined features as nice objects\n\n Todo:\n take advantage of fft convolution for speed\n '''\n if seedRes is None:\n if isPeak is None:\n raise Exception('If seed resonance is not specified, isPeak must be specified.')\n seedRes = self.findResonanceFeatures(expectedCnt=len(filtShapes), isPeak=isPeak)\n else:\n isPeak = seedRes[0].isPeak\n fineRes = np.array([r.copy() for r in seedRes])\n\n useFilts = filtShapes.copy()\n if type(self) == Spectrum:\n # For Spectrum objects only\n if isPeak:\n spectFun = self.lin()\n else:\n spectFun = 1 - self.lin()\n for i in range(len(filtShapes)):\n if type(filtShapes[i]).__name__ != 'Spectrum':\n raise Exception(\n 'If calling object is Spectrum, the filter shapes must also be Spectrum types')\n if isPeak:\n useFilts[i] = filtShapes[i].lin()\n else:\n useFilts[i] = 1 - filtShapes[i].lin()\n else:\n spectFun = self\n\n confidence = 1000\n for i, r in enumerate(fineRes):\n thisFilt = useFilts[i]\n cropWind = max(thisFilt.absc) * np.array([-1, 1])\n subSpect = spectFun.shift(-r.lam).crop(cropWind)\n basis = subSpect.absc\n convArr = np.convolve(subSpect(basis), thisFilt(basis)[::-1], 'same')\n lamOffset = basis[np.argmax(convArr)]\n fineRes[i].lam = r.lam + lamOffset\n thisConf = np.max(convArr) / np.sum(thisFilt(basis) ** 2)\n confidence = min(confidence, thisConf)\n return fineRes, confidence\n\n def findResonanceFeatures(self, **kwargs):\n r''' Overloads :meth:`.MeasuredFunction.findResonanceFeatures` to make sure it's in db scale\n\n Args:\n \\*\\*kwargs: kwargs passed to :mod:`~lightlab.util.data.peaks.findPeaks`\n\n Returns:\n list[ResonanceFeature]: the detected features as nice objects\n '''\n kwargs['isDb'] = True\n return MeasuredFunction.findResonanceFeatures(self.db(), **kwargs)\n\n def GHz(self):\n ''' Convert to SpectrumGHz '''\n GHz = 299_792_458 / self.absc\n ordi = np.copy(self.ordi)\n return SpectrumGHz(GHz, ordi, inDbm=self.inDbm)\n\n\nclass SpectrumGHz(Spectrum):\n ''' Spectrum with GHz units in the abscissa\n\n Use :meth:`lin` and :meth:`dbm` to make sure what you're getting\n what you expect for things like binary math and peakfinding, etc.\n '''\n\n def __init__(self, GHz, power, inDbm=True, unsafe=False):\n '''\n Args:\n GHz (array): abscissa\n power (array): ordinate\n inDbm (bool): is the ``power`` in linear or dbm units?\n '''\n MeasuredFunction.__init__(self, GHz, power, unsafe=unsafe)\n self._inDbm = inDbm\n\n def simplePlot(self, *args, livePlot=False, **kwargs):\n ''' More often then not, this is db vs. wavelength, so label it\n '''\n super().simplePlot(*args, livePlot=livePlot, **kwargs)\n plt.xlabel('Frequency (GHz)')\n plt.ylabel('Transmission ({})'.format('dB' if self.inDbm else 'lin'))\n\n def nm(self):\n ''' Convert to Spectrum'''\n nm = 299_792_458 / self.absc\n ordi = np.copy(self.ordi)\n return Spectrum(nm, ordi, inDbm=self.inDbm)\n\n\nclass Waveform(MeasuredFunction):\n ''' Typically used for time, voltage functions.\n This is very similar to what is referred to as a \"signal.\"\n\n Use the unit attribute to set units different than Volts.\n\n Has class methods for generating common time-domain signals\n '''\n\n unit = None\n\n def __init__(self, t, v, unit='V', unsafe=False):\n super().__init__(t, v, unsafe=unsafe)\n self.unit = unit\n\n @classmethod\n def pulse(cls, tArr, tOn, tOff):\n vForm = np.zeros(len(tArr))\n vForm[tArr > tOn] = 1.\n vForm[tArr > tOff] = 0.\n return cls(tArr, vForm)\n\n @classmethod\n def whiteNoise(cls, tArr, rmsPow):\n vForm = np.random.randn(len(tArr))\n firstRms = rms(vForm)\n return cls(tArr, vForm * np.sqrt(rmsPow / firstRms))\n" ]
[ [ "numpy.nanmax", "matplotlib.pyplot.legend", "numpy.sqrt", "scipy.signal.correlate", "matplotlib.pyplot.autoscale", "numpy.nanmin", "numpy.all", "numpy.int", "numpy.max", "numpy.mean", "numpy.argmin", "numpy.var", "numpy.allclose", "numpy.clip", "numpy.arange", "matplotlib.pyplot.gcf", "numpy.std", "scipy.signal.butter", "numpy.diff", "numpy.interp", "numpy.insert", "numpy.argmax", "numpy.copy", "numpy.size", "scipy.signal.lfilter", "numpy.zeros", "numpy.power", "numpy.median", "scipy.signal.lfilter_zi", "numpy.log10", "numpy.array", "numpy.sum", "numpy.convolve", "numpy.abs", "numpy.linalg.norm", "numpy.sort", "numpy.ones", "numpy.isscalar", "matplotlib.pyplot.xlabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
Cosmos-Break/OpenNMT-py
[ "8d1b5555da65f7e2ebddb07e4532794fd775b482" ]
[ "onmt/bin/translate.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom onmt.utils.logging import init_logger\nfrom onmt.utils.misc import split_corpus\nfrom onmt.translate.multimodaltranslator import build_translator\nimport numpy as np\nimport onmt.opts as opts\nfrom onmt.utils.parse import ArgumentParser\n\n\ndef translate(opt):\n ArgumentParser.validate_translate_opts(opt)\n logger = init_logger(opt.log_file)\n\n translator = build_translator(opt, report_score=True)\n # MultimodalTranslator 继承 Translator,主要覆盖了translate函数\n # 有些函数没有覆盖,被我原封不动地拷贝进了MultimodalTranslator,\n # 其实可以直接在子类里面调用父类的方法,\n # 用super(MultimodalTranslator, self).method() \n # 这个以后再改吧。\n\n\n test_img_feats = np.load(opt.path_to_test_img_feats)\n test_img_feats = test_img_feats.astype(np.float32)\n\n src_shards = split_corpus(opt.src, opt.shard_size)\n tgt_shards = split_corpus(opt.tgt, opt.shard_size)\n shard_pairs = zip(src_shards, tgt_shards)\n\n for i, (src_shard, tgt_shard) in enumerate(shard_pairs):\n logger.info(\"Translating shard %d.\" % i)\n translator.translate(\n src=src_shard,\n tgt=tgt_shard,\n src_dir=opt.src_dir,\n batch_size=opt.batch_size,\n batch_type=opt.batch_type,\n attn_debug=opt.attn_debug,\n align_debug=opt.align_debug,\n test_img_feats=test_img_feats,\n multimodal_model_type=opt.multimodal_model_type\n )\n\n\ndef _get_parser():\n parser = ArgumentParser(description='translate.py')\n\n opts.config_opts(parser)\n opts.translate_opts(parser)\n opts.mmod_finetune_translate_opts(parser)\n return parser\n\n\ndef main():\n parser = _get_parser()\n\n opt = parser.parse_args()\n translate(opt)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
katosh/mizani
[ "c67e3665f71b7961c97fc24e2940ec47e3724693" ]
[ "mizani/tests/test_utils.py" ]
[ "import numpy as np\nimport pandas as pd\nimport pytest\n\n\nfrom mizani.utils import (round_any, min_max, match, precision,\n first_element, multitype_sort,\n same_log10_order_of_magnitude)\n\n\ndef test_round_any():\n x = 4.632\n assert round_any(x, 1) == 5\n assert round_any(x, 2) == 4\n assert round_any(x, 3) == 6\n assert round_any(x, 4) == 4\n assert round_any(x, 5) == 5\n assert round_any(x, 1.5) == 4.5\n\n # Maintains the same index\n s = pd.Series([1.1, 2.2, 3.3], index=[3, 2, 1])\n result = round_any(s, 2)\n assert s.index.equals(result.index)\n\n\ndef test_min_max():\n x = [1, 2, 3, 4, 5]\n _min, _max = min_max(x)\n assert _min == 1\n assert _max == 5\n\n x = [1, float('-inf'), 3, 4, 5]\n _min, _max = min_max(x)\n assert _min == 1\n assert _max == 5\n\n _min, _max = min_max(x, finite=False)\n assert _min == float('-inf')\n assert _max == 5\n\n x = [1, 2, float('nan'), 4, 5]\n _min, _max = min_max(x, na_rm=True)\n assert _min == 1\n assert _max == 5\n\n x = [1, 2, float('nan'), 4, 5, float('inf')]\n _min, _max = min_max(x, na_rm=True, finite=False)\n assert _min == 1\n assert _max == float('inf')\n\n _min, _max = min_max(x)\n assert str(_min) == 'nan'\n assert str(_max) == 'nan'\n\n x = [float('nan'), float('nan'), float('nan')]\n _min, _max = min_max(x, na_rm=True)\n assert _min == float('-inf')\n assert _max == float('inf')\n\n\ndef test_match():\n v1 = [0, 1, 2, 3, 4, 5]\n v2 = [5, 4, 3, 2, 1, 0]\n result = match(v1, v2)\n assert result == v2\n\n # Positions of the first match\n result = match(v1, v2+v2)\n assert result == v2\n\n result = match(v1, v2, incomparables=[1, 2])\n assert result == [5, -1, -1, 2, 1, 0]\n\n result = match(v1, v2, start=1)\n assert result == [6, 5, 4, 3, 2, 1]\n\n v2 = [5, 99, 3, 2, 1, 0]\n result = match(v1, v2)\n assert result == [5, 4, 3, 2, -1, 0]\n\n\ndef test_precision():\n assert precision(0.0037) == .001\n assert precision(0.5) == .1\n assert precision(9) == 1\n assert precision(24) == 10\n assert precision(784) == 100\n assert precision([0, 0]) == 1\n\n\ndef test_first_element():\n x = [3, 4, 5]\n s = pd.Series(x)\n a = np.array([3, 4, 5])\n\n assert first_element(x) == 3\n assert first_element(s) == 3\n assert first_element(s[1:]) == 4\n assert first_element(a) == 3\n assert first_element(a[1:]) == 4\n\n with pytest.raises(StopIteration):\n first_element([])\n\n with pytest.raises(RuntimeError):\n first_element(iter(x))\n\n\ndef test_multitype_sort():\n a = ['c', float('nan'), 1, 'b', 'a', 2.0, 0]\n result = multitype_sort(a)\n # Any consecutive elements of the sametype are\n # sorted\n for i, x in enumerate(result[1:], start=1):\n x_prev = result[i-1]\n if (type(x_prev) is type(x)):\n # cannot compare nan with anything\n if (isinstance(x, (float, np.float)) and\n (np.isnan(x_prev) or np.isnan(x))):\n continue\n assert x_prev <= x\n\n\ndef test_same_log10_order_of_magnitude():\n # Default delta\n assert same_log10_order_of_magnitude((2, 8))\n assert same_log10_order_of_magnitude((35, 80.8))\n assert same_log10_order_of_magnitude((232.3, 730))\n\n assert not same_log10_order_of_magnitude((1, 18))\n assert not same_log10_order_of_magnitude((35, 800))\n assert not same_log10_order_of_magnitude((32, 730))\n\n assert not same_log10_order_of_magnitude((1, 9.9))\n assert not same_log10_order_of_magnitude((35, 91))\n assert not same_log10_order_of_magnitude((232.3, 950))\n\n # delta = 0\n assert same_log10_order_of_magnitude((1, 9.9), delta=0)\n assert same_log10_order_of_magnitude((35, 91), delta=0)\n assert same_log10_order_of_magnitude((232.3, 950), delta=0)\n" ]
[ [ "numpy.isnan", "numpy.array", "pandas.Series" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
y1a2o6/qedr
[ "a63ecaf2b9538789ca0e761d55608a28d7194c4d" ]
[ "lib/eval/hinton.py" ]
[ "''' \nBased on:\n1) https://github.com/tonysyu/mpltools/blob/master/mpltools/special/hinton.py\n2) http://tonysyu.github.io/mpltools/auto_examples/special/plot_hinton.html\n'''\n\nfrom __future__ import division\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import collections\nfrom matplotlib import transforms\nfrom matplotlib import ticker\n\n__all__ = ['hinton']\n\nclass SquareCollection(collections.RegularPolyCollection):\n \"\"\"Return a collection of squares.\"\"\"\n\n def __init__(self, **kwargs):\n super(SquareCollection, self).__init__(4, rotation=np.pi/4., **kwargs)\n\n def get_transform(self):\n \"\"\"Return transform scaling circle areas to data space.\"\"\"\n ax = self.axes\n pts2pixels = 72.0 / ax.figure.dpi\n scale_x = pts2pixels * ax.bbox.width / ax.viewLim.width\n scale_y = pts2pixels * ax.bbox.height / ax.viewLim.height\n return transforms.Affine2D().scale(scale_x, scale_y)\n\n\ndef hinton(inarray, x_label=None, y_label=None, max_value=None, use_default_ticks=True, \n ax=None, fontsize=14):\n \"\"\"Plot Hinton diagram for visualizing the values of a 2D array.\n\n Plot representation of an array with positive and negative values\n represented by white and black squares, respectively. The size of each\n square represents the magnitude of each value.\n\n Unlike the hinton demo in the matplotlib gallery [1]_, this implementation\n uses a RegularPolyCollection to draw squares, which is much more efficient\n than drawing individual Rectangles.\n\n .. note::\n This function inverts the y-axis to match the origin for arrays.\n\n .. [1] http://matplotlib.sourceforge.net/examples/api/hinton_demo.html\n\n Parameters\n ----------\n inarray : array\n Array to plot.\n max_value : float\n Any *absolute* value larger than `max_value` will be represented by a\n unit square.\n use_default_ticks: boolean\n Disable tick-generation and generate them outside this function.\n \"\"\"\n\n ax = ax if ax is not None else plt.gca()\n ax.set_facecolor('gray')\n # make sure we're working with a numpy array, not a numpy matrix\n inarray = np.asarray(inarray)\n height, width = inarray.shape\n if max_value is None:\n max_value = 2**np.ceil(np.log(np.max(np.abs(inarray)))/np.log(2))\n values = np.clip(inarray/max_value, -1, 1)\n rows, cols = np.mgrid[:height, :width]\n\n pos = np.where(values > 0)\n neg = np.where(values < 0)\n for idx, color in zip([pos, neg], ['white', 'black']):\n if len(idx[0]) > 0:\n xy = list(zip(cols[idx], rows[idx]))\n circle_areas = np.pi / 2 * np.abs(values[idx])\n squares = SquareCollection(sizes=circle_areas,\n offsets=xy, transOffset=ax.transData,\n facecolor=color, edgecolor=color)\n ax.add_collection(squares, autolim=True)\n\n ax.axis('scaled')\n # set data limits instead of using xlim, ylim.\n ax.set_xlim(-0.5, width-0.5)\n ax.set_ylim(height-0.5, -0.5)\n ax.grid(False)\n ax.tick_params(direction='in', colors='black')\n ax.spines['bottom'].set_color('black')\n ax.spines['top'].set_color('black')\n ax.spines['right'].set_color('black')\n ax.spines['left'].set_color('black')\n \n if x_label is not None:\n ax.set_xlabel(x_label, fontsize=fontsize)\n if y_label is not None:\n ax.set_ylabel(y_label, fontsize=fontsize)\n\n if use_default_ticks:\n ax.xaxis.set_major_locator(IndexLocator())\n ax.yaxis.set_major_locator(IndexLocator())\n\n\nclass IndexLocator(ticker.Locator):\n\n def __init__(self, max_ticks=10):\n self.max_ticks = max_ticks\n\n def __call__(self):\n \"\"\"Return the locations of the ticks.\"\"\"\n dmin, dmax = self.axis.get_data_interval()\n if dmax < self.max_ticks:\n step = 1\n else:\n step = np.ceil(dmax / self.max_ticks)\n return self.raise_if_exceeds(np.arange(0, dmax, step))" ]
[ [ "matplotlib.pyplot.gca", "numpy.log", "numpy.abs", "numpy.clip", "numpy.asarray", "numpy.arange", "matplotlib.transforms.Affine2D", "numpy.ceil", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sutoiku/autostat
[ "b0e6588e587450c4cbdb19a021d847f7571ba466" ]
[ "autostat/plots.py" ]
[ "from matplotlib import pyplot as plt\nimport numpy as np\nfrom numpy.typing import NDArray\nimport typing as ty\n\nfrom .auto_gp_model import AutoGpModel\nfrom .dataset_adapters import Dataset\nfrom .decomposition import DecompositionData\n\n\ndef plot_observations(X, Y, ax):\n ax.plot(X.flatten(), Y.flatten(), \"k.\", markersize=1)\n\n\ndef plot_predictions(pred_x, pred_mean_y, y_std, ax):\n # Plot predictive means as blue line\n ax.plot(pred_x.flatten(), pred_mean_y.flatten(), \"r\")\n # Shade between the lower and upper confidence bounds\n ax.fill_between(\n pred_x.flatten(),\n pred_mean_y - 2 * y_std,\n pred_mean_y + 2 * y_std,\n alpha=0.5,\n )\n\n\ndef plot_model(model: AutoGpModel, data: Dataset):\n fig, ax = plt.subplots(1, 1, figsize=(14, 3))\n plot_observations(data.train_x, data.train_y, ax)\n plot_observations(data.test_x, data.test_y, ax)\n\n y, y_std = model.predict_train()\n\n plot_predictions(data.train_x, y, y_std, ax)\n\n y, y_std = model.predict_test()\n plot_predictions(data.test_x, y, y_std, ax)\n\n return fig, ax\n\n\ndef plot_decomposition(d: DecompositionData):\n num_components = len(d.components)\n\n fig, axes = plt.subplots(\n nrows=num_components, sharex=True, figsize=(14, 3 * num_components)\n )\n if isinstance(axes, plt.Axes):\n axes = [axes]\n\n axes = ty.cast(list[plt.Axes], axes)\n\n for (spec, y_comp, y_std), ax in zip(d.components, axes):\n ax.plot(d.x, y_comp)\n # print(\"y_comp\", y_comp.shape, y_comp)\n # print(\"y_std\", y_std.shape, y_std)\n ax.fill_between(\n d.x.flatten(),\n y_comp.flatten() - 2 * y_std.flatten(),\n y_comp.flatten() + 2 * y_std.flatten(),\n alpha=0.5,\n )\n ax.set_title(spec.spec_str(True, True))\n\n return fig\n" ]
[ [ "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
osoco/comprendiendo-software-creando-herramientas
[ "64b07b29a876ce180b3ba03dfd1d1770d5fc6f6e" ]
[ "demos/models/suchai-flight-software/sandbox/log_parser.py" ]
[ "import re\nimport argparse\nimport pandas as pd\n\n# General expressions\nre_error = re.compile(r'\\[ERROR\\]\\[(\\d+)\\]\\[(\\w+)\\](.+)')\nre_warning = re.compile(r'\\[WARN \\]\\[(\\d+)\\]\\[(\\w+)\\](.+)')\nre_info = re.compile(r'\\[INFO \\]\\[(\\d+)\\]\\[(\\w+)\\](.+)')\nre_debug = re.compile(r'\\[DEBUG\\]\\[(\\d+)\\]\\[(\\w+)\\](.+)')\nre_verbose = re.compile(r'\\[VERB \\]\\[(\\d+)\\]\\[(\\w+)\\](.+)')\n\n# Specific expressions\nre_cmd_run = re.compile(r'\\[INFO \\]\\[(\\d+)]\\[Executer\\] Running the command: (.+)')\nre_cmd_result = re.compile(r'\\[INFO \\]\\[(\\d+)]\\[Executer\\] Command result: (\\d+)')\n\n\ndef get_parameters():\n \"\"\"\n Parse script arguments\n \"\"\"\n parser = argparse.ArgumentParser()\n # General expressions\n parser.add_argument('file', type=str, help=\"Log file\")\n parser.add_argument('--error', action=\"store_const\", const=re_error)\n parser.add_argument('--warning', action=\"store_const\", const=re_warning)\n parser.add_argument('--info', action=\"store_const\", const=re_info)\n parser.add_argument('--debug', action=\"store_const\", const=re_debug)\n parser.add_argument('--verbose', action=\"store_const\", const=re_verbose)\n # Specific expressions\n parser.add_argument('--cmd-run', action=\"store_const\", const=re_cmd_run)\n parser.add_argument('--cmd-result', action=\"store_const\", const=re_cmd_result)\n\n return parser.parse_args()\n\n\ndef parse_text(text, regexp):\n return regexp.findall(text)\n\n\ndef save_parsed(logs, file, format=None):\n df = pd.DataFrame(logs)\n # print(df)\n df.to_csv(file)\n\n\nif __name__ == \"__main__\":\n args = get_parameters()\n\n print(\"Reading file {}...\".format(args.file))\n with open(args.file) as logfile:\n text = logfile.read()\n\n args = vars(args)\n print(args)\n\n for type, regexp in args.items():\n if type is not \"file\" and regexp is not None:\n print(\"Parsing {}...\", type)\n logs = parse_text(text, regexp)\n save_parsed(logs, args[\"file\"]+type+\".csv\")\n\n\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
kmr0877/pyq
[ "dcad1f5d52f9b0df4a77f2918af4fdd5c00a5d80" ]
[ "src/pyq/_n.py" ]
[ "\"\"\"A helper module for interfacing with numpy\n\nNumpy has four date units\n\nCode\tMeaning\tTime span (relative)\tTime span (absolute)\nY\tyear\t+/- 9.2e18 years\t[9.2e18 BC, 9.2e18 AD]\nM\tmonth\t+/- 7.6e17 years\t[7.6e17 BC, 7.6e17 AD]\nW\tweek\t+/- 1.7e17 years\t[1.7e17 BC, 1.7e17 AD]\nD\tday\t+/- 2.5e16 years\t[2.5e16 BC, 2.5e16 AD]\n\nAnd nine time units:\n\nCode\tMeaning\tTime span (relative)\tTime span (absolute)\nh\thour\t+/- 1.0e15 years\t[1.0e15 BC, 1.0e15 AD]\nm\tminute\t+/- 1.7e13 years\t[1.7e13 BC, 1.7e13 AD]\ns\tsecond\t+/- 2.9e12 years\t[ 2.9e9 BC, 2.9e9 AD]\nms\tmillisecond\t+/- 2.9e9 years\t[ 2.9e6 BC, 2.9e6 AD]\nus\tmicrosecond\t+/- 2.9e6 years\t[290301 BC, 294241 AD]\nns\tnanosecond\t+/- 292 years\t[ 1678 AD, 2262 AD]\nps\tpicosecond\t+/- 106 days\t[ 1969 AD, 1970 AD]\nfs\tfemtosecond\t+/- 2.6 hours\t[ 1969 AD, 1970 AD]\nas\tattosecond\t+/- 9.2 seconds\t[ 1969 AD, 1970 AD]\n\n\nkdb+ has four datetime-like types\n\nnum char q-type c-type\n12 \"p\" timestamp int64_t\n13 \"m\" month int32_t\n14 \"d\" date int32_t\n15 \"z\" datetime double\n\nAnd four timedelta-like types\n\n16 \"n\" timespan int64_t\n17 \"u\" minute int32_t\n18 \"v\" second int32_t\n19 \"t\" time int32_t\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom datetime import date\n\nimport numpy\n\nK_DATE_SHIFT = date(2000, 1, 1).toordinal() - date(1970, 1, 1).toordinal()\nK_STAMP_SHIFT = K_DATE_SHIFT * 24 * 60 * 60 * 10 ** 9\n\n\ndef get_unit(a):\n \"\"\"Extract the time unit from array's dtype\"\"\"\n typestr = a.dtype.str\n i = typestr.find('[')\n if i == -1:\n raise TypeError(\"Expected a datetime64 array, not %s\", a.dtype)\n return typestr[i + 1: -1]\n\n\n_SCALE = {\n 'W': ('floor_divide', 7 * 24 * 60 * 60 * 10 ** 9),\n 'D': ('floor_divide', 24 * 60 * 60 * 10 ** 9),\n 'h': ('floor_divide', 60 * 60 * 10 ** 9),\n 'm': ('floor_divide', 60 * 10 ** 9),\n 's': ('floor_divide', 10 ** 9),\n 'ms': ('floor_divide', 10 ** 6),\n 'us': ('floor_divide', 10 ** 3),\n 'ns': (None, None),\n 'ps': ('multiply', 10 ** 3),\n 'fs': ('multiply', 10 ** 6),\n 'as': ('multiply', 10 ** 9),\n}\n\n_UNIT = {\n 'D': ('date', K_DATE_SHIFT, None, None),\n 'Y': ('year', -1970, None, None),\n 'W': ('date', K_DATE_SHIFT, 'floor_divide', 7),\n 'M': ('month', 30 * 12, None, None),\n 'h': ('timestamp', K_STAMP_SHIFT, 'floor_divide', 60 * 60 * 10 ** 9),\n 'm': ('timestamp', K_STAMP_SHIFT, 'floor_divide', 60 * 10 ** 9),\n 's': ('timestamp', K_STAMP_SHIFT, 'floor_divide', 10 ** 9),\n 'ns': ('timestamp', K_STAMP_SHIFT, None, None),\n 'ps': ('timestamp', K_STAMP_SHIFT, 'multiply', 1000),\n}\n\n_DTYPES = [\n \"O\", # 0\n \"?\", # 1 - boolean\n \"16B\", # 2 - guid\n None, # 3 - unused\n \"B\", # 4 - byte\n \"h\", # 5 - short\n \"i\", # 6 - int\n \"q\", # 7 - long\n \"f\", # 8 - real\n \"d\", # 9 - float\n \"S1\", # 10 - char\n \"O\", # 11 - symbol\n \"M8[ns]\", # 12 - timestamp\n \"M8[M]\", # 13 - month\n \"M8[D]\", # 14 - date\n None, # 15 - datetime (unsupported)\n \"m8[ns]\", # 16 - timespan\n \"m8[m]\", # 17 - minute\n \"m8[s]\", # 18 - second\n \"m8[ms]\", # 19 - time\n \"O\", # 20 - `sym$\n]\n\n\ndef dtypeof(x):\n \"\"\"Return the dtype corresponding to a given q object\"\"\"\n t = abs(x._t)\n if t < 20:\n return _DTYPES[t]\n return 'O'\n\n\ndef k2a(a, x):\n \"\"\"Rescale data from a K object x to array a.\n\n \"\"\"\n func, scale = None, 1\n t = abs(x._t)\n # timestamp (12), month (13), date (14) or datetime (15)\n if 12 <= t <= 15:\n unit = get_unit(a)\n attr, shift, func, scale = _UNIT[unit]\n a[:] = getattr(x, attr).data\n a += shift\n # timespan (16), minute (17), second (18) or time (19)\n elif 16 <= t <= 19:\n unit = get_unit(a)\n func, scale = _SCALE[unit]\n a[:] = x.timespan.data\n else:\n a[:] = list(x)\n\n if func is not None:\n func = getattr(numpy, func)\n a[:] = func(a.view(dtype='i8'), scale)\n\n\ndef array(self, dtype=None):\n \"\"\"An implementation of __array__()\"\"\"\n t = self._t\n # timestamp (12) through last enum (76)\n if 11 <= t < 77:\n dtype = dtypeof(self)\n a = numpy.empty(len(self), dtype)\n k2a(a, self)\n return a\n # table (98)\n if t == 98:\n if dtype is None:\n dtype = list(zip(self.cols, (dtypeof(c) for c in self.flip.value)))\n dtype = numpy.dtype(dtype)\n a = numpy.empty(int(self.count), dtype)\n for c in dtype.fields:\n k2a(a[c], self[c])\n return a\n return numpy.array(list(self), dtype)\n" ]
[ [ "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jamesthomasgriffin/smm
[ "5f1f6432e17f84f7f793d60cb9831ac5c79991e6" ]
[ "smm/lemm/mppca.py" ]
[ "from smm.lemm.linearlyembeddedmm import LinearlyEmbeddedMM\nfrom smm.rvs.normalsimplexrv import NormalSimplexRV\nimport numpy as np\n\n\nclass MPPCA(LinearlyEmbeddedMM):\n \"\"\"\n A class implementing mixtures of PPCA, described in\n\n Michael E. Tipping and Christopher M. Bishop,\n *Mixtures of Probabilistic Principal Component Analysers*,\n Neural Computation 11(2), pp 443–482, MIT Press, 1999\n\n Parameters\n ----------\n components : integer\n the number of components\n d : integer\n the dimension of components\n n : integer\n the ambient dimension\n rnd : np.random.RandomState\n a choice of random number generator\n \"\"\"\n\n def __init__(self, components, d, n, **kwargs):\n\n m = (d+1) * components\n self.d = d\n self.components = components\n\n simplices = [tuple(range(i*(d+1), (i+1)*(d+1))) for i in range(components)]\n\n rvs = [NormalSimplexRV(m, S) for S in simplices]\n\n LinearlyEmbeddedMM.__init__(self, m, n, rvs, **kwargs)\n\n def initial_V_from_means(self, means, scale):\n means_shape = (self.components, self.n)\n if means.shape != means_shape:\n raise ValueError(f\"means has wrong shape, should be {means_shape}\")\n\n V = np.zeros((self.m, self.n), dtype=means.dtype)\n d = self.d\n for i in range(d+1):\n V[i::d+1, :] = means + \\\n scale * self.rnd.standard_normal(size=means.shape)\n\n return V\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
3e45/minpiler
[ "993bdb38d1e4709a412bb551f7eb213376bfe7d2" ]
[ "minpiler/std/_macro.py" ]
[ "import json\nimport math\nimport textwrap\nfrom collections import defaultdict\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Any, Final\n\nfrom minpiler.m_ast import FrozenBuildContext\nfrom minpiler.std import _tripy\n\n\ndef render_svg(filepath: str, x: None, y: None, size: float, *, ctx: FrozenBuildContext):\n import svgpathtools\n filepath = str(ctx.filepath.parent / filepath)\n\n def _is_closable_fixed(self: Any):\n \"\"\"https://github.com/mathandy/svgpathtools/issues/71#issuecomment-736125722\"\"\"\n try:\n return _is_closable(self)\n except IndexError:\n return True\n\n _is_closable = svgpathtools.Path._is_closable\n svgpathtools.Path._is_closable = _is_closable_fixed\n\n paths, attributes = svgpathtools.svg2paths(filepath) # type: ignore\n lines: list[str] = [\"from minpiler.std import M\"]\n xmin = math.inf\n xmax = -math.inf\n ymin = math.inf\n ymax = -math.inf\n for path in paths:\n for segment in path:\n xmin = min(xmin, segment.start.real, segment.end.real)\n xmax = max(xmax, segment.start.real, segment.end.real)\n ymin = min(ymin, segment.start.imag, segment.end.imag)\n ymax = max(ymax, segment.start.imag, segment.end.imag)\n scale = 1 / max(xmax - xmin, ymax - ymin) * size\n\n def scale_segment(segment: Any) -> tuple[tuple[float, float], tuple[float, float]]:\n start = (segment.start - xmin - ymin * 1j) * scale\n end = (segment.end - xmin - ymin * 1j) * scale\n return (\n (start.real, size - start.imag),\n (end.real, size - end.imag),\n )\n for path, attr in zip(paths, attributes):\n if len(path) == 0:\n continue\n style: dict[str, str] = {key_value.split(':')[0]: key_value.split(':')[1] for key_value in attr['style'].split(';')}\n stroke_str = style['stroke-width']\n if style['stroke'] != 'none':\n if stroke_str.endswith('px'):\n stroke_str = stroke_str[:-2]\n stroke = max(1, int(float(stroke_str) * scale))\n lines.append(f'M.draw.stroke({stroke})')\n for segment in path:\n (x0, y0), (x1, y1) = scale_segment(segment)\n lines.append(f'M.draw.line({x0} + x, {y0} + y, {x1} + x, {y1} + y)')\n if style['fill'] != 'none':\n assert style['fill'].startswith('#')\n h = style['fill'][1:]\n lines.append(f'M.draw.color({int(h[:2], 16)}, {int(h[2:4], 16)}, {int(h[4:6], 16)}, 255)')\n points: list[tuple[float, float]] = []\n for v in path[:-1]:\n points.append(scale_segment(v)[0])\n points.extend(scale_segment(path[-1]))\n for (x0, y0), (x1, y1), (x2, y2) in _tripy.earclip(points):\n lines.append(f'M.draw.triangle({x0} + x, {y0} + y, {x1} + x, {y1} + y, {x2} + x, {y2} + y)')\n assert len(lines) >= 2, f\"Failed to parse {filepath}\"\n return \"\\n\".join(lines)\n\n\ndef _shift(font: Any):\n for char in font:\n char['right'] -= char['left']\n if char['bbox'] is not None:\n for point in char['bbox']:\n point[0] -= char['left']\n for line in char['lines']:\n for point in line:\n point[0] -= char['left']\n char['left'] = 0\n\n\n# Converted the font data to json with python-hershey by scruss https://github.com/scruss/python-hershey\n# and reduced the number of points by hand\nfont = json.loads((Path(__file__).parent / 'hershey-futural.json').read_text(encoding='utf-8'))\n_shift(font)\n\nchar_table: Final = r''' !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~'''\nfont_data: dict[str, list[str]] = {}\nchar_advance: dict[str, str] = {}\n\nfor font_char, char_name in zip(font, char_table):\n lines: list[tuple[int, int, int, int]] = []\n xs: set[int] = set()\n ys: set[int] = set()\n for line in font_char['lines']:\n for (x0, y0), (x1, y1) in zip(line[:-1], line[1:]):\n points = (int(x0), int(-y0 + 13), int(x1), int(-y1 + 13))\n lines.append(points)\n xs.add(points[0])\n ys.add(points[1])\n xs.add(points[2])\n ys.add(points[3])\n\n if char_name == \"'\":\n char_name = \"\\\\'\"\n if char_name == '\\\\':\n char_name = '\\\\\\\\'\n font_data[char_name] = []\n if len(lines) == 0:\n font_data[char_name].append('pass')\n else:\n def name(v: int): return str(v).replace('-', '_')\n for x in sorted(xs):\n font_data[char_name].append(f'x{name(x)} = x + {x} * size')\n for y in sorted(ys):\n font_data[char_name].append(f'y{name(y)} = y + {y} * size')\n for x0, y0, x1, y1 in lines:\n font_data[char_name].append(f'M.draw.line(x{name(x0)}, y{name(y0)}, x{name(x1)}, y{name(y1)})')\n char_advance[char_name] = f\"{font_char['right']} * size\"\n\nfont_lowercase = r''' !?,.0123456789abcdefghijklmnopqrstuvwxyz'''\n\n\ndef render_char_lowercase(char: None, x: None, y: None, size: None, *, ctx: FrozenBuildContext):\n lines: list[str] = [\"from minpiler.std import M\"]\n for k, v in font_data.items():\n if k in font_lowercase:\n lines.append(('if' if len(lines) == 1 else 'elif') + f' char == {json.dumps(k)}:')\n for line in v:\n lines.append(' ' + line)\n return \"\\n\".join(lines)\n\n\ndef render_char(char: None, x: None, y: None, size: None, *, ctx: FrozenBuildContext):\n lines: list[str] = [\"from minpiler.std import M\"]\n for k, v in font_data.items():\n lines.append(('if' if len(lines) == 1 else 'elif') + f' char == {json.dumps(k)}:')\n for line in v:\n lines.append(' ' + line)\n return \"\\n\".join(lines)\n\n\ndef get_char_advance(char: None, size: None, *, ctx: FrozenBuildContext):\n lines: list[str] = [\"from minpiler.std import M\"]\n for k, v in char_advance.items():\n lines.append(('if' if len(lines) == 1 else 'elif') + f' char == {json.dumps(k)}:')\n lines.append(f' advance = {v}')\n lines.append('else:')\n lines.append(f' advance = 0')\n lines.append(f'return advance')\n return \"\\n\".join(lines)\n\n\ndef render_text(text: str, x: None, y: None, size: float, *, ctx: FrozenBuildContext):\n lines: list[str] = [\"from minpiler.std import util\"]\n\n if len(text) > 0:\n lines.append(f'util.render_char({json.dumps(text[0])}, x, y, size)')\n\n left = 0\n font_data = json.loads((Path(__file__).parent / 'hershey-futural.json').read_text(encoding='utf-8'))\n for prev, char in zip(text[:-1], text[1:]):\n advance = font_data[char_table.index(prev)]['right'] - font_data[char_table.index(prev)]['left']\n left += float(advance * size)\n lines.append(f'util.render_char({json.dumps(char)}, x + {left}, y, size)')\n\n return \"\\n\".join(lines)\n\n\ncolor_palette_cache: dict[Path, Any] = {} # dict[Path, np.ndarray]\n\n\ndef _render_image_file(filepath: str, resolution_x: int, resolution_y: int, src_y_start: int, src_y_stop: int, num_colors: int, dst_left: None, dst_top: None, dst_width: float, dst_height: float, display: None, once: bool, *, ctx: FrozenBuildContext):\n import numpy as np\n import PIL.Image\n import PIL.ImageOps\n import scipy.cluster.vq\n import scipy.spatial\n import skimage.color\n\n scale_x: float = dst_width / resolution_x\n scale_y: float = dst_height / resolution_y\n\n real_filepath = (ctx.filepath.parent / filepath).absolute().resolve()\n if not Path(real_filepath).exists():\n raise ctx.CompileError(f'{real_filepath!r} does not exist')\n\n def quantize_pixels(color_palette_: np.ndarray, pixels_: np.ndarray) -> np.ndarray:\n return scipy.spatial.KDTree(color_palette_).query(pixels_[np.arange(pixels_.shape[0])])[1] # type: ignore # pixel_id -> color_id\n\n lines: list[str] = [\"from minpiler.std import M, L\"]\n if real_filepath not in color_palette_cache:\n pixels = np.array(PIL.Image.open(real_filepath).convert('RGB').getdata())\n\n def median_cut(bucket: np.ndarray, n: int = num_colors) -> np.ndarray:\n assert n & (n - 1) == 0, f'num_colors must be a power of 2: {num_colors}'\n if n < 2:\n return np.expand_dims(np.mean(bucket, axis=0), axis=0)\n axis = (np.max(bucket, axis=0) - np.min(bucket, axis=0)).argmax()\n median = np.median(bucket, axis=0)[axis]\n children = bucket[np.where(bucket[:, axis] < median)], bucket[np.where(bucket[:, axis] >= median)]\n return np.concatenate((median_cut(children[0], n // 2), median_cut(children[1], n - n // 2)))\n\n def k_means(pixels: np.ndarray):\n stdev = np.std(pixels, axis=0)\n stdev[stdev == 0] = 1.0\n return scipy.cluster.vq.kmeans(pixels / stdev, num_colors, iter=20, seed=0)[0] * stdev\n\n # clustering\n np.random.seed(0)\n color_quantization_algorithm = k_means # median_cut or k_means\n color_palette = skimage.color.lab2rgb(color_quantization_algorithm(skimage.color.rgb2lab(pixels.astype(np.float32) / 255))) * 255 # color_id -> color\n indexed_pixels = quantize_pixels(color_palette, pixels)\n\n # sort the color palette by frequency of use\n rank_to_color_id = np.array([color_id for color_id, _ in sorted([(color_id, np.count_nonzero(indexed_pixels == color_id)) for color_id in range(color_palette.shape[0])], key=lambda v: -v[1])])\n color_palette_cache[real_filepath] = color_palette[rank_to_color_id]\n\n pixels = np.array(PIL.ImageOps.flip(PIL.Image.open(real_filepath).convert('RGB').resize((resolution_x, resolution_y), PIL.Image.BICUBIC)).getdata())\n color_palette: np.ndarray = color_palette_cache[real_filepath]\n indexed_pixels = quantize_pixels(color_palette, pixels)\n\n command_count = 0\n lines.append(f'M.draw_flush(display)')\n last_color_command: str | None = None\n\n def add_draw_command(line: str, set_color: bool):\n nonlocal command_count, last_color_command\n if set_color:\n last_color_command = line\n lines.append(line)\n command_count += 1\n if command_count > 250: # max_graphics_buffer = 256\n lines.append(f'M.draw_flush(display)')\n command_count = 0\n if last_color_command is not None:\n lines.append(last_color_command) # TODO: not the processor but the display holds current color? or is it the emulator's bug?\n\n # for each color\n for color_id in range(color_palette.shape[0]):\n r, g, b = color_palette[color_id]\n add_draw_command(f'M.draw.color({r}, {g}, {b}, 255)', True)\n\n def to_segments(swap_axis: bool):\n segments: list[tuple[int, int, int, int, int]] = [] # list[(x_start_min, x_start_max, x_end_min, x_end_max, y)]\n\n INF = max(resolution_x, resolution_y) * 2\n\n @dataclass\n class State1:\n x_start_min: None | int\n\n @dataclass\n class State2:\n x_start_min: int\n x_start_max: int\n x_end_min: int\n x_end_max: int\n\n for y in range(resolution_x) if swap_axis else range(src_y_start, src_y_stop):\n state: State1 | State2 = State1(None)\n for x in range(src_y_start, src_y_stop) if swap_axis else range(resolution_x):\n i = x * resolution_x + y if swap_axis else y * resolution_x + x\n if isinstance(state, State1):\n if indexed_pixels[i] < color_id:\n if state.x_start_min is not None:\n segments.append((state.x_start_min, INF, -INF, x - 1, y))\n state.x_start_min = None\n elif indexed_pixels[i] > color_id:\n if state.x_start_min is None:\n state.x_start_min = x\n else:\n if state.x_start_min is None:\n state = State2(x, x, x, x)\n else:\n state = State2(state.x_start_min, x, x, x)\n else:\n if indexed_pixels[i] < color_id:\n segments.append((state.x_start_min, state.x_start_max, state.x_end_min, state.x_end_max, y)) # type: ignore\n state = State1(None)\n elif indexed_pixels[i] > color_id:\n if once:\n state.x_end_max = x\n else:\n segments.append((state.x_start_min, state.x_start_max, state.x_end_min, state.x_end_max, y)) # type: ignore\n state = State1(None)\n else:\n state.x_end_min = x\n state.x_end_max = x\n if isinstance(state, State1):\n if state.x_start_min is not None:\n segments.append((state.x_start_min, INF, -INF, (src_y_stop if swap_axis else resolution_x) - 1, y))\n else:\n segments.append((state.x_start_min, state.x_start_max, state.x_end_min, state.x_end_max, y)) # type: ignore\n\n def merge_rects(from_bottom: bool = True):\n rects: dict[int, set[tuple[int, int, int, int, int]]] = defaultdict(lambda: set()) # (y_min if from_bottom else y_max) -> set[(x_start_min, x_start_max, x_end_min, x_end_max, height)]\n\n for x_start_min, x_start_max, x_end_min, x_end_max, y in reversed(segments) if from_bottom else segments:\n for (x_start_min2, x_start_max2, x_end_min2, x_end_max2, height2) in rects[y + 1 if from_bottom else y - 1] if once else []:\n if x_start_min2 <= x_start_max and \\\n x_start_min <= x_start_max2 and \\\n x_end_min <= x_end_max2 and \\\n x_end_min2 <= x_end_max:\n rects[y + 1 if from_bottom else y - 1].remove((x_start_min2, x_start_max2, x_end_min2, x_end_max2, height2))\n rects[y].add((max(x_start_min, x_start_min2), min(x_start_max, x_start_max2), max(x_end_min, x_end_min2), min(x_end_max, x_end_max2), height2 + 1))\n break\n else:\n rects[y].add((x_start_min, x_start_max, x_end_min, x_end_max, 1))\n\n # convert to set[(x, y, width, height)]\n if from_bottom:\n return {(x_start_max, y_min, x_end_min - x_start_max + 1, height) for y_min, v in rects.items() for _x_start_min, x_start_max, x_end_min, _x_end_max, height in v if x_start_max <= x_end_min}\n else:\n return {(x_start_max, y_max - height + 1, x_end_min - x_start_max + 1, height) for y_max, v in rects.items() for _x_start_min, x_start_max, x_end_min, _x_end_max, height in v if x_start_max <= x_end_min}\n\n rectsA = merge_rects(False)\n rectsB = merge_rects(True)\n rects_xywh = rectsA if len(rectsA) < len(rectsB) else rectsB\n del segments\n\n diagonal_lines: set[tuple[int, int, int, int]] = set() # x0, y0, x1, y1\n\n # Merge known shapes\n def merge_diagonal_line(x0: int, y0: int, x1: int, y1: int):\n if (x0, y0, 1, 1) in rects_xywh and (x1, y1, 1, 1) in rects_xywh:\n diagonal_lines.add((x0, y0, x1, y1))\n rects_xywh.remove((x0, y0, 1, 1))\n rects_xywh.remove((x1, y1, 1, 1))\n\n # TODO: need to be tested and add shapes if it works\n if False and once:\n if scale_x == 1 and scale_y == 1:\n for y in range(src_y_start, src_y_stop):\n for x in range(resolution_x):\n merge_diagonal_line(x, y, x + 1, y + 1)\n merge_diagonal_line(x, y, x - 1, y + 1)\n\n if swap_axis:\n return {(y, x, height, width) for x, y, width, height in rects_xywh}, {(y0, x0, y1, x1) for x0, y0, x1, y1 in diagonal_lines}\n else:\n return rects_xywh, diagonal_lines\n\n round_points_x = np.floor(np.arange(resolution_x + 1) * scale_x)\n round_points_y = np.floor(np.arange(resolution_y + 1) * scale_y)\n\n def round_x(x: float):\n return round_points_x[np.abs(round_points_x - x).argmin()]\n\n def round_y(y: float):\n return round_points_y[np.abs(round_points_y - y).argmin()]\n\n A = to_segments(False)\n B = to_segments(True)\n shapes = A if len(A[0]) + len(A[1]) < len(B[0]) + len(B[1]) else B\n\n # rects\n for x, y, width, height in shapes[0]:\n x_int = round_x(x * scale_x)\n y_int = round_y(y * scale_y)\n add_draw_command(f'M.draw.rect(dst_left + {x_int}, dst_top + {y_int}, {round_x((x + width) * scale_x) - x_int}, {round_y((y + height) * scale_y) - y_int})', False)\n\n # diagonal lines\n for x0, y0, x1, y1 in shapes[1]:\n add_draw_command(f'M.draw.line(dst_left + {round_x(x0 * scale_x)}, dst_top + {round_y(y0 * scale_y)}, {round_y(x1 * scale_x)}, {round_y(y1 * scale_y)})', False)\n\n lines.append(f'M.draw_flush(display)')\n return \"\\n\".join(lines)\n\n\ndef render_image_file(filepath: str, resolution_x: int, resolution_y: int, num_colors: int, dst_left: None, dst_top: None, dst_width: float, dst_height: float, display: None, once: bool, *, ctx: FrozenBuildContext):\n return _render_image_file(filepath, resolution_x, resolution_y, 0, resolution_y, num_colors, dst_left, dst_top, dst_width, dst_height, display, once, ctx=ctx)\n\n\nimage_parts: dict[str, list[tuple[int, int]]] = {}\n\n\ndef split_image_file(filepath: str, resolution_x: int, resolution_y: int, num_colors: int, dst_left: None, dst_top: None, dst_width: float, dst_height: float, display: None, once: bool, *, ctx: FrozenBuildContext) -> int:\n def f(start: int, end: int):\n return _render_image_file(filepath, resolution_x, resolution_y, start, end, num_colors, dst_left, dst_top, dst_width, dst_height, display, once, ctx=ctx)\n\n if filepath not in image_parts:\n parts: list[tuple[int, int]] = []\n size = len(f(0, resolution_y).splitlines())\n if size <= 950:\n parts.append((0, resolution_y))\n else:\n mean = resolution_y * 950 // size\n assert mean > 0, f\"resolution = {(resolution_x, resolution_y)} is too large: {filepath}\"\n\n start = 0\n end = mean\n while start < end:\n size = len(f(start, resolution_y).splitlines())\n while size > 950:\n end = max(start + (end - start) * 950 // size, end - 1)\n assert start < end, f\"resolution = {(resolution_x, resolution_y)} is too large: {filepath}\"\n size = len(f(start, end).splitlines())\n parts.append((start, end))\n start = end\n end = min(start + mean, resolution_y)\n\n image_parts[filepath] = parts\n\n return len(image_parts[filepath])\n\n\ndef render_image_file_multiprocessing(filepath: str, resolution_x: int, resolution_y: int, num_colors: int, dst_left: None, dst_top: None, dst_width: float, dst_height: float, display: None, once: bool, *, ctx: FrozenBuildContext):\n num_parts = split_image_file(filepath, resolution_x, resolution_y, num_colors, dst_left, dst_top, dst_width, dst_height, display, once, ctx=ctx)\n result = f'''\\\nfrom minpiler.std import Processors\n\n'''\n for i in range(num_parts):\n result += f'''\\\[email protected]\ndef f{i}():\n{textwrap.indent(_render_image_file(filepath, resolution_x, resolution_y, *image_parts[filepath][i], num_colors, dst_left, dst_top, dst_width, dst_height, display, once, ctx=ctx), \" \")}\n'''\n if once:\n result += '''\\\nwhile True:\n pass\n'''\n return result\n" ]
[ [ "numpy.abs", "numpy.random.seed", "numpy.min", "numpy.arange", "numpy.median", "numpy.max", "numpy.std", "numpy.mean", "numpy.count_nonzero", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
samuelcheang0419/practice-ml-from-scratch
[ "8500044341d3b68633d1639ed3705001d1df6f33" ]
[ "kmeans/kmeans.py" ]
[ "import numpy as np\nimport random\n\nclass KMeans:\n def __init__(self, n_clusters, max_iter, max_convergence_change_cnt, random_seed):\n self.n_clusters = n_clusters\n self.max_iter = max_iter\n self.max_convergence_change_cnt = max_convergence_change_cnt\n self.random_seed = random_seed\n\n def fit(self, X):\n random.seed(self.random_seed)\n\n n_rows = X.shape[0]\n self.grps = np.zeros(n_rows)\n # set initial clusters\n self.grps[random.sample(range(0, n_rows - 1), self.n_clusters)] = np.arange(self.n_clusters)\n self.changed_cnt_each_iteration = []\n\n for i in range(self.max_iter):\n # calculate centroids (use mean for now)\n self.centroids = np.array([np.mean(X[self.grps == grp], axis = 0) for grp in range(self.n_clusters)])\n\n # calculate and reassign to nearest cluster (use Euclidean distance for now)\n new_grps = np.array([np.argmin(np.sum((row - self.centroids)**2, axis = 1)**0.5) for row in X])\n changed_cnt = sum(new_grps != self.grps)\n self.changed_cnt_each_iteration.append(changed_cnt)\n self.grps = new_grps\n self.n_iter = i\n if changed_cnt == 0:\n break\n\n self.inertia_ = np.sum(np.sum((X - self.centroids[self.grps])**2, axis = 1))\n return self\n\n def predict(self, X):\n new_grps = np.array([np.argmin(np.sum((row - self.centroids)**2, axis = 1)**0.5) for row in X])\n return new_grps\n\n\nif __name__ == '__main__':\n pass" ]
[ [ "numpy.arange", "numpy.mean", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lijiansong/lang
[ "e255709da2b12e09dea45f86d54f77a19b96f13b" ]
[ "python/sklearn/linear-regression/workload-analysis/classify/online/roofline/roofline.py" ]
[ "#!/usr/bin/env python3\n\nfrom collections import OrderedDict\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns;\n\ndef get_data(data_file_path):\n data_file_reader = open(data_file_path, 'r')\n data_list = []\n try:\n text_lines = data_file_reader.readlines()\n for line in text_lines:\n line = line.rstrip('\\n')\n source, layer_name,\tlayer_type, exe_time, IN, IC, IH, IW, kernel_size, stride, pad,\tON, OC,\tOH, OW,\tflot_ops, mem_bytes, gflops, mem_band, op_intensity = line.split('\\t')\n print(layer_type, float(gflops), float(op_intensity))\n data_list.append((layer_type, float(gflops), float(op_intensity)))\n finally:\n data_file_reader.close()\n return data_list\n\ndef draw_roofline(data_list, peak_flops, peak_membdw):\n layer_type_set = {record[0] for record in data_list}\n #print(layer_type_set)\n colors = sns.color_palette(\"hls\", n_colors=len(layer_type_set) + 2)\n layer_color_map = {val:i for i, val in enumerate(list(layer_type_set))}\n #print(layer_color_map)\n fig, ax = plt.subplots(figsize=(6, 6))\n\n # 1. plot the <flops, intensity> pairs\n for i in data_list:\n layer_type, flops, intensity = str(i[0]), i[1], i[2]\n if layer_type == 'Convolution':\n ax.plot(intensity, flops, 'x',\n color=colors[layer_color_map[layer_type]], label=layer_type, marker='x')\n elif layer_type == 'InnerProduct':\n ax.plot(intensity, flops, 'v',\n color=colors[layer_color_map[layer_type]], label=layer_type, marker='v')\n elif layer_type == 'Pooling':\n ax.plot(intensity, flops, '*',\n color=colors[layer_color_map[layer_type]], label=layer_type, marker='*')\n elif layer_type == 'Scale':\n ax.plot(intensity, flops, 's',\n color=colors[layer_color_map[layer_type]], label=layer_type, marker='s')\n elif layer_type == 'Eltwise':\n ax.plot(intensity, flops, 'd',\n color=colors[layer_color_map[layer_type]], label=layer_type, marker='d')\n elif layer_type == 'ReLU':\n ax.plot(intensity, flops, 'p',\n color=colors[layer_color_map[layer_type]], label=layer_type, marker='p')\n elif layer_type == 'BatchNorm':\n ax.plot(intensity, flops, 'o',\n color=colors[layer_color_map[layer_type]], label=layer_type, marker='o')\n elif layer_type == 'LRN':\n ax.plot(intensity, flops, '^',\n color=colors[layer_color_map[layer_type]], label=layer_type, marker='^')\n\n # 2. plot the roof line\n x1 = peak_flops / peak_membdw\n y1 = peak_flops\n max_op_intensity = max([i[2] for i in data_list])\n ax.hlines(y=y1, xmin=x1,\n xmax=max_op_intensity, linewidth=1.5, color='red')\n min_flops = min([i[1] for i in data_list])\n x2 = min_flops / peak_membdw\n y2 = peak_membdw * x2\n ax.plot([x1, x2], [y1, y2], linewidth=1.5, color='red')\n\n ax.set_yscale('log')\n ax.set_xscale('log')\n #plt.xscale('log', basex=2)\n #plt.yscale('log', basey=2)\n ax.set_ylabel('GFLOPS', fontsize=10)\n ax.set_xlabel('Operational Intensity (FLOPS/Byte)', fontsize=10)\n\n handles, labels = ax.get_legend_handles_labels()\n #print(labels)\n labels_od = OrderedDict(zip(labels, handles))\n ax.legend(labels_od.values(), labels_od.keys(), loc='upper left')\n\n plt.show()\n\nif __name__ == '__main__':\n data = get_data('data.txt')\n # FLOPS: fp16 dense 0.5T, fp16 sparse 2T; int8 dense 1T, int8 sparse 4T\n #peak_flops = 16384\n peak_flops = 512\n # memory bandwidth: 25.6 GB/s, in real only reach 80%\n #peak_mem_bandwidth = 102.4\n peak_mem_bandwidth = 25.6\n draw_roofline(data, peak_flops, peak_mem_bandwidth)\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Jingkustc/Doc2EDAG
[ "9d3d2026265cead9247fecebcaaf79b084035a36" ]
[ "dee/dee_helper.py" ]
[ "# -*- coding: utf-8 -*-\n# AUTHOR: Shun Zheng\n# DATE: 19-9-19\n\nimport logging\nimport os\nimport re\nfrom collections import defaultdict, Counter\nimport numpy as np\nimport torch\n\nfrom .dee_metric import measure_event_table_filling\nfrom .event_type import event_type2event_class, BaseEvent, event_type_fields_list, common_fields\nfrom .ner_task import NERExample, NERFeatureConverter\nfrom .utils import default_load_json, default_dump_json, default_dump_pkl, default_load_pkl\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass DEEExample(object):\n def __init__(self, annguid, detail_align_dict, only_inference=False):\n self.guid = annguid\n # [sent_text, ...]\n self.sentences = detail_align_dict['sentences']\n self.num_sentences = len(self.sentences)\n\n if only_inference:\n # set empty entity/event information\n self.only_inference = True\n self.ann_valid_mspans = []\n self.ann_mspan2dranges = {}\n self.ann_mspan2guess_field = {}\n self.recguid_eventname_eventdict_list = []\n self.num_events = 0\n self.sent_idx2srange_mspan_mtype_tuples = {}\n self.event_type2event_objs = {}\n else:\n # set event information accordingly\n self.only_inference = False\n\n # [span_text, ...]\n self.ann_valid_mspans = detail_align_dict['ann_valid_mspans']\n # span_text -> [drange_tuple, ...]\n self.ann_mspan2dranges = detail_align_dict['ann_mspan2dranges']\n # span_text -> guessed_field_name\n self.ann_mspan2guess_field = detail_align_dict['ann_mspan2guess_field']\n # [(recguid, event_name, event_dict), ...]\n self.recguid_eventname_eventdict_list = detail_align_dict['recguid_eventname_eventdict_list']\n self.num_events = len(self.recguid_eventname_eventdict_list)\n\n # for create ner examples\n # sentence_index -> [(sent_match_range, match_span, match_type), ...]\n self.sent_idx2srange_mspan_mtype_tuples = {}\n for sent_idx in range(self.num_sentences):\n self.sent_idx2srange_mspan_mtype_tuples[sent_idx] = []\n\n for mspan in self.ann_valid_mspans:\n for drange in self.ann_mspan2dranges[mspan]:\n sent_idx, char_s, char_e = drange\n sent_mrange = (char_s, char_e)\n\n sent_text = self.sentences[sent_idx]\n if sent_text[char_s: char_e] != mspan:\n raise Exception('GUID: {} span range is not correct, span={}, range={}, sent={}'.format(\n annguid, mspan, str(sent_mrange), sent_text\n ))\n\n guess_field = self.ann_mspan2guess_field[mspan]\n\n self.sent_idx2srange_mspan_mtype_tuples[sent_idx].append(\n (sent_mrange, mspan, guess_field)\n )\n\n # for create event objects\n # the length of event_objs should >= 1\n self.event_type2event_objs = {}\n for mrecguid, event_name, event_dict in self.recguid_eventname_eventdict_list:\n event_class = event_type2event_class[event_name]\n event_obj = event_class()\n assert isinstance(event_obj, BaseEvent)\n event_obj.update_by_dict(event_dict, recguid=mrecguid)\n\n if event_obj.name in self.event_type2event_objs:\n self.event_type2event_objs[event_obj.name].append(event_obj)\n else:\n self.event_type2event_objs[event_name] = [event_obj]\n\n def __repr__(self):\n dee_str = 'DEEExample (\\n'\n dee_str += ' guid: {},\\n'.format(repr(self.guid))\n\n if not self.only_inference:\n dee_str += ' span info: (\\n'\n for span_idx, span in enumerate(self.ann_valid_mspans):\n gfield = self.ann_mspan2guess_field[span]\n dranges = self.ann_mspan2dranges[span]\n dee_str += ' {:2} {:20} {:30} {}\\n'.format(span_idx, span, gfield, str(dranges))\n dee_str += ' ),\\n'\n\n dee_str += ' event info: (\\n'\n event_str_list = repr(self.event_type2event_objs).split('\\n')\n for event_str in event_str_list:\n dee_str += ' {}\\n'.format(event_str)\n dee_str += ' ),\\n'\n\n dee_str += ' sentences: (\\n'\n for sent_idx, sent in enumerate(self.sentences):\n dee_str += ' {:2} {}\\n'.format(sent_idx, sent)\n dee_str += ' ),\\n'\n\n dee_str += ')\\n'\n\n return dee_str\n\n @staticmethod\n def get_event_type_fields_pairs():\n return list(event_type_fields_list)\n\n @staticmethod\n def get_entity_label_list():\n visit_set = set()\n entity_label_list = [NERExample.basic_entity_label]\n\n for field in common_fields:\n if field not in visit_set:\n visit_set.add(field)\n entity_label_list.extend(['B-' + field, 'I-' + field])\n\n for event_name, fields in event_type_fields_list:\n for field in fields:\n if field not in visit_set:\n visit_set.add(field)\n entity_label_list.extend(['B-' + field, 'I-' + field])\n\n return entity_label_list\n\n\nclass DEEExampleLoader(object):\n def __init__(self, rearrange_sent_flag, max_sent_len):\n self.rearrange_sent_flag = rearrange_sent_flag\n self.max_sent_len = max_sent_len\n\n def rearrange_sent_info(self, detail_align_info):\n if 'ann_valid_dranges' not in detail_align_info:\n detail_align_info['ann_valid_dranges'] = []\n if 'ann_mspan2dranges' not in detail_align_info:\n detail_align_info['ann_mspan2dranges'] = {}\n\n detail_align_info = dict(detail_align_info)\n split_rgx = re.compile('[,::;;))]')\n\n raw_sents = detail_align_info['sentences']\n doc_text = ''.join(raw_sents)\n raw_dranges = detail_align_info['ann_valid_dranges']\n raw_sid2span_char_set = defaultdict(lambda: set())\n for raw_sid, char_s, char_e in raw_dranges:\n span_char_set = raw_sid2span_char_set[raw_sid]\n span_char_set.update(range(char_s, char_e))\n\n # try to split long sentences into short ones by comma, colon, semi-colon, bracket\n short_sents = []\n for raw_sid, sent in enumerate(raw_sents):\n span_char_set = raw_sid2span_char_set[raw_sid]\n if len(sent) > self.max_sent_len:\n cur_char_s = 0\n for mobj in split_rgx.finditer(sent):\n m_char_s, m_char_e = mobj.span()\n if m_char_s in span_char_set:\n continue\n short_sents.append(sent[cur_char_s:m_char_e])\n cur_char_s = m_char_e\n short_sents.append(sent[cur_char_s:])\n else:\n short_sents.append(sent)\n\n # merge adjacent short sentences to compact ones that match max_sent_len\n comp_sents = ['']\n for sent in short_sents:\n prev_sent = comp_sents[-1]\n if len(prev_sent + sent) <= self.max_sent_len:\n comp_sents[-1] = prev_sent + sent\n else:\n comp_sents.append(sent)\n\n # get global sentence character base indexes\n raw_char_bases = [0]\n for sent in raw_sents:\n raw_char_bases.append(raw_char_bases[-1] + len(sent))\n comp_char_bases = [0]\n for sent in comp_sents:\n comp_char_bases.append(comp_char_bases[-1] + len(sent))\n\n assert raw_char_bases[-1] == comp_char_bases[-1] == len(doc_text)\n\n # calculate compact doc ranges\n raw_dranges.sort()\n raw_drange2comp_drange = {}\n prev_comp_sid = 0\n for raw_drange in raw_dranges:\n raw_drange = tuple(raw_drange) # important when json dump change tuple to list\n raw_sid, raw_char_s, raw_char_e = raw_drange\n raw_char_base = raw_char_bases[raw_sid]\n doc_char_s = raw_char_base + raw_char_s\n doc_char_e = raw_char_base + raw_char_e\n assert doc_char_s >= comp_char_bases[prev_comp_sid]\n\n cur_comp_sid = prev_comp_sid\n for cur_comp_sid in range(prev_comp_sid, len(comp_sents)):\n if doc_char_e <= comp_char_bases[cur_comp_sid+1]:\n prev_comp_sid = cur_comp_sid\n break\n comp_char_base = comp_char_bases[cur_comp_sid]\n assert comp_char_base <= doc_char_s < doc_char_e <= comp_char_bases[cur_comp_sid+1]\n comp_char_s = doc_char_s - comp_char_base\n comp_char_e = doc_char_e - comp_char_base\n comp_drange = (cur_comp_sid, comp_char_s, comp_char_e)\n\n raw_drange2comp_drange[raw_drange] = comp_drange\n assert raw_sents[raw_drange[0]][raw_drange[1]:raw_drange[2]] == \\\n comp_sents[comp_drange[0]][comp_drange[1]:comp_drange[2]]\n\n # update detailed align info with rearranged sentences\n detail_align_info['sentences'] = comp_sents\n detail_align_info['ann_valid_dranges'] = [\n raw_drange2comp_drange[tuple(raw_drange)] for raw_drange in detail_align_info['ann_valid_dranges']\n ]\n ann_mspan2comp_dranges = {}\n for ann_mspan, mspan_raw_dranges in detail_align_info['ann_mspan2dranges'].items():\n comp_dranges = [\n raw_drange2comp_drange[tuple(raw_drange)] for raw_drange in mspan_raw_dranges\n ]\n ann_mspan2comp_dranges[ann_mspan] = comp_dranges\n detail_align_info['ann_mspan2dranges'] = ann_mspan2comp_dranges\n\n return detail_align_info\n\n def convert_dict_to_example(self, annguid, detail_align_info, only_inference=False):\n if self.rearrange_sent_flag:\n detail_align_info = self.rearrange_sent_info(detail_align_info)\n dee_example = DEEExample(annguid, detail_align_info, only_inference=only_inference)\n\n return dee_example\n\n def __call__(self, dataset_json_path):\n total_dee_examples = []\n annguid_aligninfo_list = default_load_json(dataset_json_path)\n for annguid, detail_align_info in annguid_aligninfo_list:\n # if self.rearrange_sent_flag:\n # detail_align_info = self.rearrange_sent_info(detail_align_info)\n # dee_example = DEEExample(annguid, detail_align_info)\n dee_example = self.convert_dict_to_example(annguid, detail_align_info)\n total_dee_examples.append(dee_example)\n\n return total_dee_examples\n\n\nclass DEEFeature(object):\n def __init__(self, guid, ex_idx, doc_token_id_mat, doc_token_mask_mat, doc_token_label_mat,\n span_token_ids_list, span_dranges_list, event_type_labels, event_arg_idxs_objs_list,\n valid_sent_num=None):\n self.guid = guid\n self.ex_idx = ex_idx # example row index, used for backtracking\n self.valid_sent_num = valid_sent_num\n\n # directly set tensor for dee feature to save memory\n # self.doc_token_id_mat = doc_token_id_mat\n # self.doc_token_mask_mat = doc_token_mask_mat\n # self.doc_token_label_mat = doc_token_label_mat\n self.doc_token_ids = torch.tensor(doc_token_id_mat, dtype=torch.long)\n self.doc_token_masks = torch.tensor(doc_token_mask_mat, dtype=torch.uint8) # uint8 for mask\n self.doc_token_labels = torch.tensor(doc_token_label_mat, dtype=torch.long)\n\n # sorted by the first drange tuple\n # [(token_id, ...), ...]\n # span_idx -> span_token_id tuple\n self.span_token_ids_list = span_token_ids_list\n # [[(sent_idx, char_s, char_e), ...], ...]\n # span_idx -> [drange tuple, ...]\n self.span_dranges_list = span_dranges_list\n\n # [event_type_label, ...]\n # length = the total number of events to be considered\n # event_type_label \\in {0, 1}, 0: no 1: yes\n self.event_type_labels = event_type_labels\n # event_type is denoted by the index of event_type_labels\n # event_type_idx -> event_obj_idx -> event_arg_idx -> span_idx\n # if no event objects, event_type_idx -> None\n self.event_arg_idxs_objs_list = event_arg_idxs_objs_list\n\n # event_type_idx -> event_field_idx -> pre_path -> {span_idx, ...}\n # pre_path is tuple of span_idx\n self.event_idx2field_idx2pre_path2cur_span_idx_set = self.build_dag_info(self.event_arg_idxs_objs_list)\n\n # event_type_idx -> key_sent_idx_set, used for key-event sentence detection\n self.event_idx2key_sent_idx_set, self.doc_sent_labels = self.build_key_event_sent_info()\n\n def generate_dag_info_for(self, pred_span_token_tup_list, return_miss=False):\n token_tup2pred_span_idx = {\n token_tup: pred_span_idx for pred_span_idx, token_tup in enumerate(pred_span_token_tup_list)\n }\n gold_span_idx2pred_span_idx = {}\n # pred_span_idx2gold_span_idx = {}\n missed_span_idx_list = [] # in terms of self\n missed_sent_idx_list = [] # in terms of self\n for gold_span_idx, token_tup in enumerate(self.span_token_ids_list):\n if token_tup in token_tup2pred_span_idx:\n pred_span_idx = token_tup2pred_span_idx[token_tup]\n gold_span_idx2pred_span_idx[gold_span_idx] = pred_span_idx\n # pred_span_idx2gold_span_idx[pred_span_idx] = gold_span_idx\n else:\n missed_span_idx_list.append(gold_span_idx)\n for gold_drange in self.span_dranges_list[gold_span_idx]:\n missed_sent_idx_list.append(gold_drange[0])\n missed_sent_idx_list = list(set(missed_sent_idx_list))\n\n pred_event_arg_idxs_objs_list = []\n for event_arg_idxs_objs in self.event_arg_idxs_objs_list:\n if event_arg_idxs_objs is None:\n pred_event_arg_idxs_objs_list.append(None)\n else:\n pred_event_arg_idxs_objs = []\n for event_arg_idxs in event_arg_idxs_objs:\n pred_event_arg_idxs = []\n for gold_span_idx in event_arg_idxs:\n if gold_span_idx in gold_span_idx2pred_span_idx:\n pred_event_arg_idxs.append(\n gold_span_idx2pred_span_idx[gold_span_idx]\n )\n else:\n pred_event_arg_idxs.append(None)\n\n pred_event_arg_idxs_objs.append(tuple(pred_event_arg_idxs))\n pred_event_arg_idxs_objs_list.append(pred_event_arg_idxs_objs)\n\n # event_idx -> field_idx -> pre_path -> cur_span_idx_set\n pred_dag_info = self.build_dag_info(pred_event_arg_idxs_objs_list)\n\n if return_miss:\n return pred_dag_info, missed_span_idx_list, missed_sent_idx_list\n else:\n return pred_dag_info\n\n def get_event_args_objs_list(self):\n event_args_objs_list = []\n for event_arg_idxs_objs in self.event_arg_idxs_objs_list:\n if event_arg_idxs_objs is None:\n event_args_objs_list.append(None)\n else:\n event_args_objs = []\n for event_arg_idxs in event_arg_idxs_objs:\n event_args = []\n for arg_idx in event_arg_idxs:\n if arg_idx is None:\n token_tup = None\n else:\n token_tup = self.span_token_ids_list[arg_idx]\n event_args.append(token_tup)\n event_args_objs.append(event_args)\n event_args_objs_list.append(event_args_objs)\n\n return event_args_objs_list\n\n def build_key_event_sent_info(self):\n assert len(self.event_type_labels) == len(self.event_arg_idxs_objs_list)\n # event_idx -> key_event_sent_index_set\n event_idx2key_sent_idx_set = [set() for _ in self.event_type_labels]\n for key_sent_idx_set, event_label, event_arg_idxs_objs in zip(\n event_idx2key_sent_idx_set, self.event_type_labels, self.event_arg_idxs_objs_list\n ):\n if event_label == 0:\n assert event_arg_idxs_objs is None\n else:\n for event_arg_idxs_obj in event_arg_idxs_objs:\n sent_idx_cands = []\n for span_idx in event_arg_idxs_obj:\n if span_idx is None:\n continue\n span_dranges = self.span_dranges_list[span_idx]\n for sent_idx, _, _ in span_dranges:\n sent_idx_cands.append(sent_idx)\n if len(sent_idx_cands) == 0:\n raise Exception('Event {} has no valid spans'.format(str(event_arg_idxs_obj)))\n sent_idx_cnter = Counter(sent_idx_cands)\n key_sent_idx = sent_idx_cnter.most_common()[0][0]\n key_sent_idx_set.add(key_sent_idx)\n\n doc_sent_labels = [] # 1: key event sentence, 0: otherwise\n for sent_idx in range(self.valid_sent_num): # masked sents will be truncated at the model part\n sent_labels = []\n for key_sent_idx_set in event_idx2key_sent_idx_set: # this mapping is a list\n if sent_idx in key_sent_idx_set:\n sent_labels.append(1)\n else:\n sent_labels.append(0)\n doc_sent_labels.append(sent_labels)\n\n return event_idx2key_sent_idx_set, doc_sent_labels\n\n @staticmethod\n def build_dag_info(event_arg_idxs_objs_list):\n # event_idx -> field_idx -> pre_path -> {span_idx, ...}\n # pre_path is tuple of span_idx\n event_idx2field_idx2pre_path2cur_span_idx_set = []\n for event_idx, event_arg_idxs_list in enumerate(event_arg_idxs_objs_list):\n if event_arg_idxs_list is None:\n event_idx2field_idx2pre_path2cur_span_idx_set.append(None)\n else:\n num_fields = len(event_arg_idxs_list[0])\n # field_idx -> pre_path -> {span_idx, ...}\n field_idx2pre_path2cur_span_idx_set = []\n for field_idx in range(num_fields):\n pre_path2cur_span_idx_set = {}\n for event_arg_idxs in event_arg_idxs_list:\n pre_path = event_arg_idxs[:field_idx]\n span_idx = event_arg_idxs[field_idx]\n if pre_path not in pre_path2cur_span_idx_set:\n pre_path2cur_span_idx_set[pre_path] = set()\n pre_path2cur_span_idx_set[pre_path].add(span_idx)\n field_idx2pre_path2cur_span_idx_set.append(pre_path2cur_span_idx_set)\n event_idx2field_idx2pre_path2cur_span_idx_set.append(field_idx2pre_path2cur_span_idx_set)\n\n return event_idx2field_idx2pre_path2cur_span_idx_set\n\n def is_multi_event(self):\n event_cnt = 0\n for event_objs in self.event_arg_idxs_objs_list:\n if event_objs is not None:\n event_cnt += len(event_objs)\n if event_cnt > 1:\n return True\n\n return False\n\n\nclass DEEFeatureConverter(object):\n def __init__(self, entity_label_list, event_type_fields_pairs,\n max_sent_len, max_sent_num, tokenizer,\n ner_fea_converter=None, include_cls=True, include_sep=True):\n self.entity_label_list = entity_label_list\n self.event_type_fields_pairs = event_type_fields_pairs\n self.max_sent_len = max_sent_len\n self.max_sent_num = max_sent_num\n self.tokenizer = tokenizer\n self.truncate_doc_count = 0 # track how many docs have been truncated due to max_sent_num\n self.truncate_span_count = 0 # track how may spans have been truncated\n\n # label not in entity_label_list will be default 'O'\n # sent_len > max_sent_len will be truncated, and increase ner_fea_converter.truncate_freq\n if ner_fea_converter is None:\n self.ner_fea_converter = NERFeatureConverter(entity_label_list, self.max_sent_len, tokenizer,\n include_cls=include_cls, include_sep=include_sep)\n else:\n self.ner_fea_converter = ner_fea_converter\n\n self.include_cls = include_cls\n self.include_sep = include_sep\n\n # prepare entity_label -> entity_index mapping\n self.entity_label2index = {}\n for entity_idx, entity_label in enumerate(self.entity_label_list):\n self.entity_label2index[entity_label] = entity_idx\n\n # prepare event_type -> event_index and event_index -> event_fields mapping\n self.event_type2index = {}\n self.event_type_list = []\n self.event_fields_list = []\n for event_idx, (event_type, event_fields) in enumerate(self.event_type_fields_pairs):\n self.event_type2index[event_type] = event_idx\n self.event_type_list.append(event_type)\n self.event_fields_list.append(event_fields)\n\n def convert_example_to_feature(self, ex_idx, dee_example, log_flag=False):\n annguid = dee_example.guid\n assert isinstance(dee_example, DEEExample)\n\n # 1. prepare doc token-level feature\n\n # Size(num_sent_num, num_sent_len)\n doc_token_id_mat = [] # [[token_idx, ...], ...]\n doc_token_mask_mat = [] # [[token_mask, ...], ...]\n doc_token_label_mat = [] # [[token_label_id, ...], ...]\n\n for sent_idx, sent_text in enumerate(dee_example.sentences):\n if sent_idx >= self.max_sent_num:\n # truncate doc whose number of sentences is longer than self.max_sent_num\n self.truncate_doc_count += 1\n break\n\n if sent_idx in dee_example.sent_idx2srange_mspan_mtype_tuples:\n srange_mspan_mtype_tuples = dee_example.sent_idx2srange_mspan_mtype_tuples[sent_idx]\n else:\n srange_mspan_mtype_tuples = []\n\n ner_example = NERExample(\n '{}-{}'.format(annguid, sent_idx), sent_text, srange_mspan_mtype_tuples\n )\n # sentence truncated count will be recorded incrementally\n ner_feature = self.ner_fea_converter.convert_example_to_feature(ner_example, log_flag=log_flag)\n\n doc_token_id_mat.append(ner_feature.input_ids)\n doc_token_mask_mat.append(ner_feature.input_masks)\n doc_token_label_mat.append(ner_feature.label_ids)\n\n assert len(doc_token_id_mat) == len(doc_token_mask_mat) == len(doc_token_label_mat) <= self.max_sent_num\n valid_sent_num = len(doc_token_id_mat)\n\n # 2. prepare span feature\n # spans are sorted by the first drange\n span_token_ids_list = []\n span_dranges_list = []\n mspan2span_idx = {}\n for mspan in dee_example.ann_valid_mspans:\n if mspan in mspan2span_idx:\n continue\n\n raw_dranges = dee_example.ann_mspan2dranges[mspan]\n char_base_s = 1 if self.include_cls else 0\n char_max_end = self.max_sent_len - 1 if self.include_sep else self.max_sent_len\n span_dranges = []\n for sent_idx, char_s, char_e in raw_dranges:\n if char_base_s + char_e <= char_max_end and sent_idx < self.max_sent_num:\n span_dranges.append((sent_idx, char_base_s + char_s, char_base_s + char_e))\n else:\n self.truncate_span_count += 1\n if len(span_dranges) == 0:\n # span does not have any valid location in truncated sequences\n continue\n\n span_tokens = self.tokenizer.char_tokenize(mspan)\n span_token_ids = tuple(self.tokenizer.convert_tokens_to_ids(span_tokens))\n\n mspan2span_idx[mspan] = len(span_token_ids_list)\n span_token_ids_list.append(span_token_ids)\n span_dranges_list.append(span_dranges)\n assert len(span_token_ids_list) == len(span_dranges_list) == len(mspan2span_idx)\n\n if len(span_token_ids_list) == 0 and not dee_example.only_inference:\n logger.warning('Neglect example {}'.format(ex_idx))\n return None\n\n # 3. prepare doc-level event feature\n # event_type_labels: event_type_index -> event_type_exist_sign (1: exist, 0: no)\n # event_arg_idxs_objs_list: event_type_index -> event_obj_index -> event_arg_index -> arg_span_token_ids\n\n event_type_labels = [] # event_type_idx -> event_type_exist_sign (1 or 0)\n event_arg_idxs_objs_list = [] # event_type_idx -> event_obj_idx -> event_arg_idx -> span_idx\n for event_idx, event_type in enumerate(self.event_type_list):\n event_fields = self.event_fields_list[event_idx]\n\n if event_type not in dee_example.event_type2event_objs:\n event_type_labels.append(0)\n event_arg_idxs_objs_list.append(None)\n else:\n event_objs = dee_example.event_type2event_objs[event_type]\n\n event_arg_idxs_objs = []\n for event_obj in event_objs:\n assert isinstance(event_obj, BaseEvent)\n\n event_arg_idxs = []\n any_valid_flag = False\n for field in event_fields:\n arg_span = event_obj.field2content[field]\n\n if arg_span is None or arg_span not in mspan2span_idx:\n # arg_span can be none or valid span is truncated\n arg_span_idx = None\n else:\n # when constructing data files,\n # must ensure event arg span is covered by the total span collections\n arg_span_idx = mspan2span_idx[arg_span]\n any_valid_flag = True\n\n event_arg_idxs.append(arg_span_idx)\n\n if any_valid_flag:\n event_arg_idxs_objs.append(tuple(event_arg_idxs))\n\n if event_arg_idxs_objs:\n event_type_labels.append(1)\n event_arg_idxs_objs_list.append(event_arg_idxs_objs)\n else:\n event_type_labels.append(0)\n event_arg_idxs_objs_list.append(None)\n\n dee_feature = DEEFeature(\n annguid, ex_idx, doc_token_id_mat, doc_token_mask_mat, doc_token_label_mat,\n span_token_ids_list, span_dranges_list, event_type_labels, event_arg_idxs_objs_list,\n valid_sent_num=valid_sent_num\n )\n\n return dee_feature\n\n def __call__(self, dee_examples, log_example_num=0):\n \"\"\"Convert examples to features suitable for document-level event extraction\"\"\"\n dee_features = []\n self.truncate_doc_count = 0\n self.truncate_span_count = 0\n self.ner_fea_converter.truncate_count = 0\n\n remove_ex_cnt = 0\n for ex_idx, dee_example in enumerate(dee_examples):\n if ex_idx < log_example_num:\n dee_feature = self.convert_example_to_feature(ex_idx-remove_ex_cnt, dee_example, log_flag=True)\n else:\n dee_feature = self.convert_example_to_feature(ex_idx-remove_ex_cnt, dee_example, log_flag=False)\n\n if dee_feature is None:\n remove_ex_cnt += 1\n continue\n\n dee_features.append(dee_feature)\n\n logger.info('{} documents, ignore {} examples, truncate {} docs, {} sents, {} spans'.format(\n len(dee_examples), remove_ex_cnt,\n self.truncate_doc_count, self.ner_fea_converter.truncate_count, self.truncate_span_count\n ))\n\n return dee_features\n\n\ndef convert_dee_features_to_dataset(dee_features):\n # just view a list of doc_fea as the dataset, that only requires __len__, __getitem__\n assert len(dee_features) > 0 and isinstance(dee_features[0], DEEFeature)\n\n return dee_features\n\n\ndef prepare_doc_batch_dict(doc_fea_list):\n doc_batch_keys = ['ex_idx', 'doc_token_ids', 'doc_token_masks', 'doc_token_labels', 'valid_sent_num']\n doc_batch_dict = {}\n for key in doc_batch_keys:\n doc_batch_dict[key] = [getattr(doc_fea, key) for doc_fea in doc_fea_list]\n\n return doc_batch_dict\n\n\ndef measure_dee_prediction(event_type_fields_pairs, features, event_decode_results,\n dump_json_path=None):\n pred_record_mat_list = []\n gold_record_mat_list = []\n for term in event_decode_results:\n ex_idx, pred_event_type_labels, pred_record_mat = term[:3]\n pred_record_mat = [\n [\n [\n tuple(arg_tup) if arg_tup is not None else None\n for arg_tup in pred_record\n ] for pred_record in pred_records\n ] if pred_records is not None else None\n for pred_records in pred_record_mat\n ]\n doc_fea = features[ex_idx]\n assert isinstance(doc_fea, DEEFeature)\n gold_record_mat = [\n [\n [\n tuple(doc_fea.span_token_ids_list[arg_idx]) if arg_idx is not None else None\n for arg_idx in event_arg_idxs\n ] for event_arg_idxs in event_arg_idxs_objs\n ] if event_arg_idxs_objs is not None else None\n for event_arg_idxs_objs in doc_fea.event_arg_idxs_objs_list\n ]\n\n pred_record_mat_list.append(pred_record_mat)\n gold_record_mat_list.append(gold_record_mat)\n\n g_eval_res = measure_event_table_filling(\n pred_record_mat_list, gold_record_mat_list, event_type_fields_pairs, dict_return=True\n )\n\n if dump_json_path is not None:\n default_dump_json(g_eval_res, dump_json_path)\n\n return g_eval_res\n\n\ndef aggregate_task_eval_info(eval_dir_path, target_file_pre='dee_eval', target_file_suffix='.json',\n dump_name='total_task_eval.pkl', dump_flag=False):\n \"\"\"Enumerate the evaluation directory to collect all dumped evaluation results\"\"\"\n logger.info('Aggregate task evaluation info from {}'.format(eval_dir_path))\n data_span_type2model_str2epoch_res_list = {}\n for fn in os.listdir(eval_dir_path):\n fn_splits = fn.split('.')\n if fn.startswith(target_file_pre) and fn.endswith(target_file_suffix) and len(fn_splits) == 6:\n _, data_type, span_type, model_str, epoch, _ = fn_splits\n\n data_span_type = (data_type, span_type)\n if data_span_type not in data_span_type2model_str2epoch_res_list:\n data_span_type2model_str2epoch_res_list[data_span_type] = {}\n model_str2epoch_res_list = data_span_type2model_str2epoch_res_list[data_span_type]\n\n if model_str not in model_str2epoch_res_list:\n model_str2epoch_res_list[model_str] = []\n epoch_res_list = model_str2epoch_res_list[model_str]\n\n epoch = int(epoch)\n fp = os.path.join(eval_dir_path, fn)\n eval_res = default_load_json(fp)\n\n epoch_res_list.append((epoch, eval_res))\n\n for data_span_type, model_str2epoch_res_list in data_span_type2model_str2epoch_res_list.items():\n for model_str, epoch_res_list in model_str2epoch_res_list.items():\n epoch_res_list.sort(key=lambda x: x[0])\n\n if dump_flag:\n dump_fp = os.path.join(eval_dir_path, dump_name)\n logger.info('Dumping {} into {}'.format(dump_name, eval_dir_path))\n default_dump_pkl(data_span_type2model_str2epoch_res_list, dump_fp)\n\n return data_span_type2model_str2epoch_res_list\n\n\ndef print_total_eval_info(data_span_type2model_str2epoch_res_list,\n metric_type='micro',\n span_type='pred_span',\n model_strs=('DCFEE-O', 'DCFEE-M', 'GreedyDec', 'Doc2EDAG'),\n target_set='test'):\n \"\"\"Print the final performance by selecting the best epoch on dev set and emitting performance on test set\"\"\"\n dev_type = 'dev'\n test_type = 'test'\n avg_type2prf1_keys = {\n 'macro': ('MacroPrecision', 'MacroRecall', 'MacroF1'),\n 'micro': ('MicroPrecision', 'MicroRecall', 'MicroF1'),\n }\n\n name_key = 'EventType'\n p_key, r_key, f_key = avg_type2prf1_keys[metric_type]\n\n def get_avg_event_score(epoch_res):\n eval_res = epoch_res[1]\n avg_event_score = eval_res[-1][f_key]\n\n return avg_event_score\n\n dev_model_str2epoch_res_list = data_span_type2model_str2epoch_res_list[(dev_type, span_type)]\n test_model_str2epoch_res_list = data_span_type2model_str2epoch_res_list[(test_type, span_type)]\n\n has_header = False\n mstr_bepoch_list = []\n print('=' * 15, 'Final Performance (%) (avg_type={})'.format(metric_type), '=' * 15)\n for model_str in model_strs:\n if model_str not in dev_model_str2epoch_res_list or model_str not in test_model_str2epoch_res_list:\n continue\n\n # get the best epoch on dev set\n dev_epoch_res_list = dev_model_str2epoch_res_list[model_str]\n best_dev_epoch, best_dev_res = max(dev_epoch_res_list, key=get_avg_event_score)\n\n test_epoch_res_list = test_model_str2epoch_res_list[model_str]\n best_test_epoch = None\n best_test_res = None\n for test_epoch, test_res in test_epoch_res_list:\n if test_epoch == best_dev_epoch:\n best_test_epoch = test_epoch\n best_test_res = test_res\n assert best_test_epoch is not None\n mstr_bepoch_list.append((model_str, best_test_epoch))\n\n if target_set == 'test':\n target_eval_res = best_test_res\n else:\n target_eval_res = best_dev_res\n\n align_temp = '{:20}'\n head_str = align_temp.format('ModelType')\n eval_str = align_temp.format(model_str)\n head_temp = ' \\t {}'\n eval_temp = ' \\t & {:.1f} & {:.1f} & {:.1f}'\n ps = []\n rs = []\n fs = []\n for tgt_event_res in target_eval_res[:-1]:\n head_str += align_temp.format(head_temp.format(tgt_event_res[0][name_key]))\n p, r, f1 = (100 * tgt_event_res[0][key] for key in [p_key, r_key, f_key])\n eval_str += align_temp.format(eval_temp.format(p, r, f1))\n ps.append(p)\n rs.append(r)\n fs.append(f1)\n\n head_str += align_temp.format(head_temp.format('Average'))\n ap, ar, af1 = (x for x in [np.mean(ps), np.mean(rs), np.mean(fs)])\n eval_str += align_temp.format(eval_temp.format(ap, ar, af1))\n\n head_str += align_temp.format(head_temp.format('Total ({})'.format(metric_type)))\n g_avg_res = target_eval_res[-1]\n ap, ar, af1 = (100 * g_avg_res[key] for key in [p_key, r_key, f_key])\n eval_str += align_temp.format(eval_temp.format(ap, ar, af1))\n\n if not has_header:\n print(head_str)\n has_header = True\n print(eval_str)\n\n return mstr_bepoch_list\n\n\n# evaluation dump file name template\n# dee_eval.[DataType].[SpanType].[ModelStr].[Epoch].(pkl|json)\ndecode_dump_template = 'dee_eval.{}.{}.{}.{}.pkl'\neval_dump_template = 'dee_eval.{}.{}.{}.{}.json'\n\n\ndef resume_decode_results(base_dir, data_type, span_type, model_str, epoch):\n decode_fn = decode_dump_template.format(data_type, span_type, model_str, epoch)\n decode_fp = os.path.join(base_dir, decode_fn)\n logger.info('Resume decoded results from {}'.format(decode_fp))\n decode_results = default_load_pkl(decode_fp)\n\n return decode_results\n\n\ndef resume_eval_results(base_dir, data_type, span_type, model_str, epoch):\n eval_fn = eval_dump_template.format(data_type, span_type, model_str, epoch)\n eval_fp = os.path.join(base_dir, eval_fn)\n logger.info('Resume eval results from {}'.format(eval_fp))\n eval_results = default_load_json(eval_fp)\n\n return eval_results\n\n\ndef print_single_vs_multi_performance(mstr_bepoch_list, base_dir, features,\n metric_type='micro', data_type='test', span_type='pred_span'):\n model_str2decode_results = {}\n for model_str, best_epoch in mstr_bepoch_list:\n model_str2decode_results[model_str] = resume_decode_results(\n base_dir, data_type, span_type, model_str, best_epoch\n )\n\n single_eid_set = set([doc_fea.ex_idx for doc_fea in features if not doc_fea.is_multi_event()])\n multi_eid_set = set([doc_fea.ex_idx for doc_fea in features if doc_fea.is_multi_event()])\n event_type_fields_pairs = DEEExample.get_event_type_fields_pairs()\n event_type_list = [x for x, y in event_type_fields_pairs]\n\n name_key = 'EventType'\n avg_type2f1_key = {\n 'micro': 'MicroF1',\n 'macro': 'MacroF1',\n }\n f1_key = avg_type2f1_key[metric_type]\n\n model_str2etype_sf1_mf1_list = {}\n for model_str, _ in mstr_bepoch_list:\n total_decode_results = model_str2decode_results[model_str]\n\n single_decode_results = [dec_res for dec_res in total_decode_results if dec_res[0] in single_eid_set]\n assert len(single_decode_results) == len(single_eid_set)\n single_eval_res = measure_dee_prediction(\n event_type_fields_pairs, features, single_decode_results\n )\n\n multi_decode_results = [dec_res for dec_res in total_decode_results if dec_res[0] in multi_eid_set]\n assert len(multi_decode_results) == len(multi_eid_set)\n multi_eval_res = measure_dee_prediction(\n event_type_fields_pairs, features, multi_decode_results\n )\n\n etype_sf1_mf1_list = []\n for event_idx, (se_res, me_res) in enumerate(zip(single_eval_res[:-1], multi_eval_res[:-1])):\n assert se_res[0][name_key] == me_res[0][name_key] == event_type_list[event_idx]\n event_type = event_type_list[event_idx]\n single_f1 = se_res[0][f1_key]\n multi_f1 = me_res[0][f1_key]\n\n etype_sf1_mf1_list.append((event_type, single_f1, multi_f1))\n g_avg_se_res = single_eval_res[-1]\n g_avg_me_res = multi_eval_res[-1]\n etype_sf1_mf1_list.append(\n ('Total ({})'.format(metric_type), g_avg_se_res[f1_key], g_avg_me_res[f1_key])\n )\n model_str2etype_sf1_mf1_list[model_str] = etype_sf1_mf1_list\n\n print('=' * 15, 'Single vs. Multi (%) (avg_type={})'.format(metric_type), '=' * 15)\n align_temp = '{:20}'\n head_str = align_temp.format('ModelType')\n head_temp = ' \\t {}'\n eval_temp = ' \\t & {:.1f} & {:.1f} '\n for event_type in event_type_list:\n head_str += align_temp.format(head_temp.format(event_type))\n head_str += align_temp.format(head_temp.format('Total ({})'.format(metric_type)))\n head_str += align_temp.format(head_temp.format('Average'))\n print(head_str)\n\n for model_str, _ in mstr_bepoch_list:\n eval_str = align_temp.format(model_str)\n sf1s = []\n mf1s = []\n for _, single_f1, multi_f1 in model_str2etype_sf1_mf1_list[model_str]:\n eval_str += align_temp.format(eval_temp.format(single_f1*100, multi_f1*100))\n sf1s.append(single_f1)\n mf1s.append(multi_f1)\n avg_sf1 = np.mean(sf1s[:-1])\n avg_mf1 = np.mean(mf1s[:-1])\n eval_str += align_temp.format(eval_temp.format(avg_sf1*100, avg_mf1*100))\n print(eval_str)\n\n\ndef print_ablation_study(mstr_bepoch_list, base_dir, base_mstr, other_mstrs,\n metric_type='micro', data_type='test', span_type='pred_span'):\n model_str2best_epoch = dict(mstr_bepoch_list)\n if base_mstr not in model_str2best_epoch:\n print('No base model type {}'.format(base_mstr))\n return\n\n base_eval = resume_eval_results(base_dir, data_type, span_type, base_mstr, model_str2best_epoch[base_mstr])\n model_str2eval_res = {\n model_str: resume_eval_results(base_dir, data_type, span_type, model_str, model_str2best_epoch[model_str])\n for model_str in other_mstrs if model_str in model_str2best_epoch\n }\n\n event_type_fields_pairs = DEEExample.get_event_type_fields_pairs()\n event_type_list = [x for x, y in event_type_fields_pairs]\n # name_key = 'EventType'\n # f1_key = 'AvgFieldF1'\n avg_type2f1_key = {\n 'micro': 'MicroF1',\n 'macro': 'MacroF1'\n }\n f1_key = avg_type2f1_key[metric_type]\n\n print('=' * 15, 'Ablation Study (avg_type={})'.format(metric_type), '=' * 15)\n align_temp = '{:20}'\n head_str = align_temp.format('ModelType')\n head_temp = ' \\t {}'\n for event_type in event_type_list:\n head_str += align_temp.format(head_temp.format(event_type))\n head_str += align_temp.format(head_temp.format('Average ({})'.format(metric_type)))\n head_str += align_temp.format(head_temp.format('Average'))\n print(head_str)\n\n eval_temp = ' \\t & {:.1f}'\n eval_str = align_temp.format(base_mstr)\n bf1s = []\n for base_event_res in base_eval[:-1]:\n base_f1 = base_event_res[0][f1_key]\n eval_str += align_temp.format(eval_temp.format(base_f1*100))\n bf1s.append(base_f1)\n g_avg_bf1 = base_eval[-1][f1_key]\n eval_str += align_temp.format(eval_temp.format(g_avg_bf1*100))\n avg_bf1 = np.mean(bf1s)\n eval_str += align_temp.format(eval_temp.format(avg_bf1*100))\n print(eval_str)\n\n inc_temp = ' \\t & +{:.1f}'\n dec_temp = ' \\t & -{:.1f}'\n for model_str in other_mstrs:\n if model_str in model_str2eval_res:\n eval_str = align_temp.format(model_str)\n cur_eval = model_str2eval_res[model_str]\n f1ds = []\n for base_event_res, cur_event_res in zip(base_eval[:-1], cur_eval[:-1]):\n base_f1 = base_event_res[0][f1_key]\n cur_f1 = cur_event_res[0][f1_key]\n f1_diff = cur_f1 - base_f1\n f1ds.append(f1_diff)\n f1_abs = abs(f1_diff)\n if f1_diff >= 0:\n eval_str += align_temp.format(inc_temp.format(f1_abs*100))\n else:\n eval_str += align_temp.format(dec_temp.format(f1_abs*100))\n\n g_avg_f1_diff = cur_eval[-1][f1_key] - base_eval[-1][f1_key]\n g_avg_f1_abs = abs(g_avg_f1_diff)\n if g_avg_f1_diff >= 0:\n eval_str += align_temp.format(inc_temp.format(g_avg_f1_abs*100))\n else:\n eval_str += align_temp.format(dec_temp.format(g_avg_f1_abs*100))\n\n avg_f1_diff = np.mean(f1ds)\n avg_f1_abs = abs(avg_f1_diff)\n if avg_f1_diff >= 0:\n eval_str += align_temp.format(inc_temp.format(avg_f1_abs*100))\n else:\n eval_str += align_temp.format(dec_temp.format(avg_f1_abs*100))\n\n print(eval_str)\n\n" ]
[ [ "numpy.mean", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shalei120/OxLegalReasoning
[ "009bc8249d45d61a3d9f52146e447535005e79d2" ]
[ "LSTM_capIB.py" ]
[ "import torch\nimport torch.autograd as autograd\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.nn.parameter import Parameter\n\nimport numpy as np\n\nimport datetime\nimport math\n\nfrom Encoder import Encoder\nfrom Decoder import Decoder\nfrom Hyperparameters import args\n\nclass LSTM_capsule_IB_Model(nn.Module):\n \"\"\"\n Implementation of a seq2seq model.\n Architecture:\n Encoder/decoder\n 2 LTSM layers\n \"\"\"\n\n def __init__(self, w2i, i2w):\n \"\"\"\n Args:\n args: parameters of the model\n textData: the dataset object\n \"\"\"\n super(LSTM_capsule_IB_Model, self).__init__()\n print(\"Model creation...\")\n\n self.word2index = w2i\n self.index2word = i2w\n self.max_length = args['maxLengthDeco']\n\n self.NLLloss = torch.nn.NLLLoss(reduction = 'none')\n self.CEloss = torch.nn.CrossEntropyLoss(reduction = 'none')\n\n self.embedding = nn.Embedding(args['vocabularySize'], args['embeddingSize']).to(args['device'])\n\n self.encoder = Encoder(w2i, i2w, self.embedding).to(args['device'])\n\n self.tanh = nn.Tanh()\n self.relu = nn.ReLU()\n self.softmax = nn.Softmax(dim = -1)\n\n # self.x_2_prob_z = nn.Sequential(\n # nn.Linear(args['hiddenSize'], 2),\n # nn.Softmax(dim=-1)\n # ).to(args['device'])\n self.x_2_prob_z_weight = Parameter(torch.rand(args['chargenum'], args['hiddenSize'], 2)).to(args['device'])\n self.z_to_fea = nn.Sequential(\n nn.Linear(args['hiddenSize'], args['hiddenSize']).to(args['device']),\n nn.Tanh()\n ).to(args['device'])\n\n self.ChargeClassifier = nn.Sequential(\n nn.Linear(args['hiddenSize'], 1),\n nn.Sigmoid()\n ).to(args['device'])\n\n '''\n capsule\n '''\n self.cap_Wij = nn.Linear(args['hiddenSize'], args['capsuleSize'],bias=False).to(args['device'])\n\n # self.z2_mean = Parameter(torch.rand(args['chargenum'], args['capsuleSize'])).to(args['device'])\n # self.z2_logvar = Parameter(torch.rand(args['chargenum'], args['capsuleSize'])).to(args['device'])\n\n\n self.q_linear = nn.Linear(args['hiddenSize'], args['hiddenSize']).to(args['device'])\n self.v_linear = nn.Linear(args['hiddenSize'], args['hiddenSize']).to(args['device'])\n self.k_linear = nn.Linear(args['hiddenSize'], args['hiddenSize']).to(args['device'])\n self.z2_hid2mean = nn.Linear(args['hiddenSize'], args['hiddenSize']).to(args['device'])\n self.z2_hid2logvar = nn.Linear(args['hiddenSize'], args['hiddenSize']).to(args['device'])\n \n def sample_gumbel(self, shape, eps=1e-20):\n U = torch.rand(shape).to(args['device'])\n return -torch.log(-torch.log(U + eps) + eps)\n\n def gumbel_softmax_sample(self, logits, temperature):\n y = logits + self.sample_gumbel(logits.size())\n return F.softmax(y / temperature, dim=-1)\n\n def gumbel_softmax(self, logits, temperature = args['temperature']):\n \"\"\"\n ST-gumple-softmax\n input: [*, n_class]\n return: flatten --> [*, n_class] an one-hot vector\n \"\"\"\n y = self.gumbel_softmax_sample(logits, temperature)\n shape = y.size()\n _, ind = y.max(dim=-1)\n y_hard = torch.zeros_like(y).view(-1, shape[-1])\n y_hard.scatter_(1, ind.view(-1, 1), 1)\n y_hard = y_hard.view(*shape)\n y_hard = (y_hard - y).detach() + y\n return y_hard\n\n def mask_softmax(self, logits, mask):\n '''\n :param logits: batch seq classnum\n :param mask: batch seq\n :return: batch seq classnum\n '''\n if len(logits.size()) == 3:\n explogits = torch.exp(logits) * mask.unsqueeze(2)\n elif len(logits.size()) == 2:\n explogits = torch.exp(logits) * mask\n\n explogits_Z = torch.sum(explogits, dim = 1, keepdim=True)\n softmax = explogits / explogits_Z\n\n return softmax\n\n def squash(self, s):\n '''\n :param s: batch classnum capsuledim\n :return:\n '''\n s_norm = torch.norm(s, dim = 2, keepdim = True) # batch classnum 1\n v = (s_norm / (1+ s_norm**2)) * s\n return v\n\n def self_attention(self, q, k, v, d_k, mask=None, dropout=None):\n # k = self.k_linear(k) # batch seq hid\n # q = self.q_linear(q)\n # v = self.v_linear(v)\n\n # scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)\n scores = torch.einsum('bsh,bth->bst', q, k) / math.sqrt(d_k)\n\n if mask is not None:\n scores = scores.masked_fill_(mask.unsqueeze(2) == 0, -1e9) # in place\n scores = scores.masked_fill_(mask.unsqueeze(1) == 0, -1e9) # in place\n scores = self.softmax(scores)\n\n if dropout is not None:\n scores = dropout(scores)\n\n output = torch.einsum('bst,bth->bsh',scores, v)\n return output\n\n def attention(self, q, mask=None, dropout=None):\n avg_q = torch.sum(q, dim = 1) / (torch.sum(mask.unsqueeze(2), dim = 1)+1) # batch hid\n scores = torch.einsum('bh,bsh->bs', avg_q,q)\n\n scores = scores.masked_fill_(mask == 0, -1e9) # in place\n scores = self.softmax(scores)\n\n if dropout is not None:\n scores = dropout(scores)\n\n output = scores.unsqueeze(2) * q # batch s hid\n return output\n\n def sample_z(self, mu, log_var):\n eps = Variable(torch.randn(mu.size())).to(args['device'])\n return mu + torch.exp(log_var / 2) * eps\n\n def forward(self, x, eps = 0.000001):\n '''\n :param encoderInputs: [batch, enc_len]\n :param decoderInputs: [batch, dec_len]\n :param decoderTargets: [batch, dec_len]\n :return:\n '''\n\n # print(x['enc_input'])\n self.encoderInputs = x['enc_input'].to(args['device'])\n self.encoder_lengths = x['enc_len']\n self.classifyLabels = x['labels'].to(args['device'])\n self.batch_size = self.encoderInputs.size()[0]\n self.seqlen = self.encoderInputs.size()[1]\n\n mask = torch.sign(self.encoderInputs).float()\n\n en_outputs, en_state = self.encoder(self.encoderInputs, self.encoder_lengths) # batch seq hid\n # print(en_outputs.size())\n\n z_prob = torch.einsum('chy,bsh->bcsy', self.x_2_prob_z_weight, en_outputs)# batch chargenum seq 2\n z_prob = self.softmax(z_prob)\n\n z_prob_fla = z_prob.reshape((self.batch_size * args['chargenum']* self.seqlen, 2))\n sampled_seq = self.gumbel_softmax(z_prob_fla).reshape((self.batch_size, args['chargenum'], self.seqlen, 2))\n # batch chargenum seq 2 //0-1\n sampled_seq = sampled_seq * mask.unsqueeze(1).unsqueeze(3)\n\n # print(sampled_seq)\n\n sampled_num = torch.sum(sampled_seq[:,:,:,1], dim = 1) # batch chargenum\n sampled_num = (sampled_num == 0).to(args['device'], dtype=torch.float32) + sampled_num\n sampled_word = en_outputs.unsqueeze(1) * (sampled_seq[:,:,:,1].unsqueeze(3)) # batch chargenum seq hid\n # s_w_feature = self.z_to_fea(sampled_word)\n # s_w_feature = torch.sum(s_w_feature, dim = 1)/ sampled_num.unsqueeze(1)# batch hid\n\n '''\n z1 -> z2\n '''\n sampled_word_bc = sampled_word.reshape(self.batch_size * args['chargenum'], self.seqlen,args['hiddenSize'] )\n z2_words = self.attention(sampled_word_bc,mask=sampled_seq[:,:,:,1].reshape(self.batch_size * args['chargenum'], self.seqlen))\n z2_words = z2_words.reshape(self.batch_size, args['chargenum'], self.seqlen, args['hiddenSize']).to(args['device'])\n z2_hid = z2_words.sum(dim = 2).to(args['device']) # batch chargenum hid\n z2_mean = self.z2_hid2mean(z2_hid)\n z2_logvar = self.z2_hid2logvar(z2_hid)\n\n z2 = self.sample_z(z2_mean, z2_logvar) # batch chargenum hid\n\n # print('z2',torch.sum(z2))\n\n\n I_x_z = torch.mean(-torch.log(z_prob[:,:,:,0]+ eps))\n # print(I_x_z)\n # en_hidden, en_cell = en_state #2 batch hid\n\n '''\n Capsule\n '''\n\n\n capsule_v_norm = self.ChargeClassifier(z2).squeeze() # b chargenum\n # print('capsule_v_norm: ', capsule_v_norm)\n\n m_plus = 0.9\n m_minus = 0.1\n\n cap_pos = self.relu(m_plus - capsule_v_norm) **2 # b chargenum max(0, *)\n cap_neg = self.relu(capsule_v_norm - m_minus) **2\n\n answer = F.one_hot(self.classifyLabels, num_classes=args['chargenum']) # batch chargenum\n lambda_c = 0.5\n\n # print('cap_pos: ', cap_pos.size(), cap_neg.size(), answer.size())\n capsule_loss = answer.float() * cap_pos + lambda_c * (1-answer.float()) * cap_neg\n capsule_loss = torch.mean(torch.sum(capsule_loss,dim = 1))\n # print('caploss: ',capsule_loss)\n\n\n # xz_mock,_ = torch.max(capsule_b ,dim =2 ) # b s\n # xz_mock_p = self.mask_softmax(xz_mock, sampled_seq[:,:,1])\n #\n #\n # # z_regu = - torch.sum(xz_mock_p * torch.log(z_prob[:,:,1]+eps) * sampled_seq[:,:,1], dim = 1)\n # # z_regu = torch.mean(z_regu)\n # xz_mock_p = xz_mock_p.unsqueeze(2) # b s 1\n # diff_xz_mock = torch.triu(xz_mock_p - xz_mock_p.transpose(1,2))\n # pred_z_prob = z_prob[:, :, 1].unsqueeze(2) # b s 1\n # diff_pred_z_prob = torch.triu(pred_z_prob - pred_z_prob.transpose(1,2))\n #\n # # print(diff_xz_mock)\n #\n # z_regu = torch.sum(self.relu(0.1 - diff_xz_mock * diff_pred_z_prob), dim = 2)\n # z_regu = torch.sum(z_regu, dim = 1)\n # z_regu = torch.mean(z_regu)\n\n\n # output = self.ChargeClassifier(s_w_feature).to(args['device']) # batch chargenum\n # recon_loss = self.NLLloss(output, self.classifyLabels).to(args['device'])\n # recon_loss_mean = torch.mean(recon_loss).to(args['device'])\n\n # print(capsule_loss, z_regu)\n loss = capsule_loss + 0.05 * I_x_z\n return loss\n\n def predict(self, x):\n encoderInputs = x['enc_input'].to(args['device'])\n encoder_lengths = x['enc_len']\n\n batch_size = encoderInputs.size()[0]\n seqlen = encoderInputs.size()[1]\n mask = torch.sign(encoderInputs).float()\n\n en_outputs, en_state = self.encoder(encoderInputs, encoder_lengths)\n\n z_prob = torch.einsum('chy,bsh->bcsy', self.x_2_prob_z_weight, en_outputs) # batch chargenum seq 2\n z_prob = self.softmax(z_prob)\n z_prob_fla = z_prob.reshape((batch_size * args['chargenum'] * seqlen, 2))\n sampled_seq = self.gumbel_softmax(z_prob_fla).reshape((batch_size, args['chargenum'], seqlen, 2))\n # batch chargenum seq 2 //0-1\n sampled_seq = sampled_seq * mask.unsqueeze(1).unsqueeze(3)\n\n # print(sampled_seq)\n\n sampled_num = torch.sum(sampled_seq[:, :, :, 1], dim=2) # batch chargenum\n sampled_num = (sampled_num == 0).to(args['device'], dtype=torch.float32) + sampled_num\n sampled_word = en_outputs.unsqueeze(1) * (sampled_seq[:, :, :, 1].unsqueeze(3))\n\n '''\n z1 -> z2\n '''\n sampled_word_bc = sampled_word.reshape(batch_size * args['chargenum'], seqlen, args['hiddenSize'])\n z2_words = self.attention(sampled_word_bc,mask=sampled_seq[:, :, :, 1].reshape(batch_size * args['chargenum'],seqlen))\n\n\n z2_words = z2_words.reshape(batch_size, args['chargenum'], seqlen, args['hiddenSize']).to(args['device'])\n z2_hid = z2_words.sum(dim=2).to(args['device']) # batch chargenum hid\n z2_mean = self.z2_hid2mean(z2_hid)\n z2_logvar = self.z2_hid2logvar(z2_hid)\n\n z2 = self.sample_z(z2_mean, z2_logvar)\n\n '''\n Capsule\n '''\n capsule_v_norm = self.ChargeClassifier(z2)[:,:,0] # b chargenum\n\n wordnum = torch.sum(mask, dim = 1, keepdim=True)\n\n return capsule_v_norm, (torch.argmax(capsule_v_norm, dim = -1), sampled_seq[:, :, :, 1], sampled_num/wordnum)\n" ]
[ [ "torch.nn.Softmax", "torch.nn.functional.softmax", "torch.sign", "torch.sum", "torch.nn.Embedding", "torch.nn.CrossEntropyLoss", "torch.norm", "torch.einsum", "torch.nn.Sigmoid", "torch.rand", "torch.nn.NLLLoss", "torch.zeros_like", "torch.exp", "torch.nn.Linear", "torch.log", "torch.nn.Tanh", "torch.nn.functional.one_hot", "torch.nn.ReLU", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
luiszeni/Boosted-OICR
[ "9c787808a3a0e5a2610cde7562eb04bc2ce050b9" ]
[ "code/layers/losses/oicr_losses.py" ]
[ "import torch\nimport torch.nn as nn\nfrom pdb import set_trace as pause \n\nclass OICRLosses(nn.Module):\n\n def forward(self, pcl_probs, labels, cls_loss_weights, gt_assignment, im_labels):\n\n\n eps = 1e-6\n pcl_probs = pcl_probs.clamp(eps, 1-eps).log()\n\n \n cls_loss_weights = cls_loss_weights.repeat(pcl_probs.shape[1],1).permute(1,0).cuda()\n\n\n labels = labels.repeat(pcl_probs.shape[1],1).permute(1,0).long()\n reap = torch.arange(pcl_probs.shape[1])[None,:].repeat(pcl_probs.shape[0], 1).long()\n labels = (reap - labels == 0).float().cuda()\n\n \n loss = labels * cls_loss_weights * pcl_probs\n\n loss = -loss.sum(dim=0).sum() / pcl_probs.size(0)\n\n\n return loss\n\n" ]
[ [ "torch.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
myedibleenso/this-before-that
[ "ddf609ac34e852aee205823928f26d7faa55b5c7" ]
[ "lstm/pitchfork_lstm.py" ]
[ "from gensim.models.word2vec import Word2Vec # make use of pretrained embeddings\nfrom sklearn.cross_validation import StratifiedKFold\nimport theano\nfrom keras.preprocessing import sequence\nfrom keras.preprocessing.text import one_hot, base_filter, Tokenizer\nfrom keras.utils import np_utils # for converting labels vectors to matrices in multi-class cases\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers import Merge\nfrom keras.layers.recurrent import LSTM, SimpleRNN, GRU\nfrom keras.callbacks import EarlyStopping\ntry:\n from keras.utils.visualize_util import model_to_dot, plot\nexcept:\n print(\"Can't import graphviz-based plotting utilities\")\nfrom evaluate import *\nfrom utils import *\nimport yaml\nimport pandas as pd\nimport numpy as np\nnp.random.seed(42) # for reproducibility\n\n\nclass Experiment(object):\n\n def __init__(self, config):\n self.config = config\n # load df\n self.df = self.get_annotations_df()\n # create tokenizer\n self.tokenizer = self.make_tokenizer()\n self.max_features = len(self.tokenizer.word_index) + 1\n self.w2v, self.embedding_weights = self.create_embeddings_weights()\n self.hidden_size = self.w2v.vector_size\n\n def get_annotations_df(self):\n \"\"\"\n Remove bugs and empty annotations\n \"\"\"\n data = pd.read_json(self.config[\"annotations_file\"])\n return data[(data.relation != \"Bug\") & (data.relation != \"\")]\n\n def create_embeddings_weights(self):\n config = self.config\n tk = self.tokenizer\n word2index = tk.word_index\n # reverse index\n index2word = {i:w for (w,i) in tk.word_index.items()}\n max_size = len(index2word) + 1\n # load w2v model\n w2v_vectors_file = config[\"w2v_data\"]\n w2v = Word2Vec.load_word2vec_format(w2v_vectors_file, binary=True)\n word_vector_dims = w2v.vector_size\n embedding_weights = np.zeros((max_size, word_vector_dims))\n\n for i,w in index2word.items():\n try:\n embedding_weights[i,:] = w2v[w]\n except:\n print(\"{} not found\".format(w))\n return (w2v, embedding_weights)\n\n\n def make_tokenizer(self):\n \"\"\"\n \"\"\"\n config = self.config\n tk = Tokenizer(\n # the maximum num. of words to retain\n nb_words=None,\n # the characters to filter out from the text\n filters=config[\"custom_filter\"],\n # whether or not to convert the text to lowercase\n lower=True,\n # the character to split on\n split=\" \",\n # whether or not to treat each character as a word\n char_level=False\n )\n data = self.df\n x = data.text.values\n # build tokenizer's vocabulary index\n tk.fit_on_texts(x)\n return tk\n\n def prepare_text(self, x):\n tk = self.tokenizer\n # prepare text\n x = tk.texts_to_sequences(x)\n # pad sequences\n max_len = config[\"max_len\"]\n x = sequence.pad_sequences(x, maxlen=max_len)\n return x\n\n def prepare_labels(self):\n \"\"\"\n get numeric representation of valid labels\n \"\"\"\n data = self.df\n # set labels other than precedence to \"None\"\n label_to_value = config[\"label_LUT\"]\n # filter out bugs and empty relations\n labels = data.relation.replace(label_to_value).values\n return labels\n\n def get_gold_labels(self):\n annotations_path = config[\"annotations_file\"]\n # set labels other than precedence to \"None\"\n label_to_value = config[\"label_LUT\"]\n value_to_label = config[\"value_LUT\"]\n data = self.df\n # deal with only a subset of labels\n gold_labels = data.relation.replace(label_to_value)\n gold_labels = gold_labels.replace(value_to_label)\n # string labels\n return gold_labels.values\n\n # folds are made by preserving the percentage of samples for each class\n def prepare_data(self):\n \"\"\"\n Load annotations from .json,\n discard bugs,\n and replace relations with the relevant classes\n \"\"\"\n config = self.config\n data = self.df\n # relations as labels (numeric representation)\n labels = self.prepare_labels()\n return (x, labels)\n\n def create_embeddings_layer(self):\n config = self.config\n use_pretrained_embeddings = config[\"with_pretraining\"]\n pretrained_embeddings = [self.embedding_weights] if use_pretrained_embeddings else None\n max_features = self.max_features\n hidden_size = self.hidden_size\n max_len = config[\"max_len\"]\n return Embedding(\n input_dim=max_features,\n output_dim=hidden_size,\n input_length=max_len,\n W_regularizer=None,\n #weights=None,\n # use pretrained vectors\n weights=pretrained_embeddings,\n dropout=0.2\n )\n\n def create_lstm_layer(self):\n config = self.config\n hidden_size = self.hidden_size\n # build the lstm layer\n return LSTM(\n #input_dim=max_features,\n output_dim=hidden_size,\n dropout_W=0.2,\n dropout_U=0.2,\n return_sequences=False\n )\n\n def yap(self):\n \"\"\"\n Gives information on model configuration\n \"\"\"\n config = self.config\n use_pretrained_embeddings = config[\"with_pretraining\"]\n pretrained_embeddings = [self.embedding_weights] if use_pretrained_embeddings else None\n print(\"Using pretrained embeddings? {}\".format(pretrained_embeddings != None))\n\n def create_fork(self):\n fork = Sequential()\n # embeddings\n fork.add(self.create_embeddings_layer())\n # lstm\n fork.add(self.create_lstm_layer())\n # dropout\n fork.add(Dropout(0.5))\n return fork\n\n def create_model(self):\n \"\"\"\n \"\"\"\n config = self.config\n self.yap()\n\n model = Sequential()\n # E1 text\n left_fork = self.create_fork()\n # minimal text span encompassing E1 + E2\n middle_fork = self.create_fork()\n # E2 text\n right_fork = self.create_fork()\n # build the three inputs\n\n # merge the inputs\n merged = Merge([left_fork, middle_fork, right_fork], mode='concat')\n model.add(merged)\n model.add(Dropout(0.5))\n # size should be equal to the number of classes\n num_classes = config[\"num_classes\"]\n model.add(Dense(num_classes))\n # at the end of the day, we only want one label per input (hence softmax)\n model.add(Activation('softmax'))\n\n model.compile(\n loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=[\"accuracy\"]\n )\n return model\n\n def write_model_graph(self):\n model = self.create_model()\n # write dot text to file\n with open(config[\"model_dot\"], \"wb\") as out:\n m = model_to_dot(model)\n dot_text = m.create_dot()\n out.write(dot_text)\n print(\"Wrote .dot to {}\".format(config[\"model_dot\"]))\n # write graph to file\n plot(model, to_file=config[\"model_graph\"], show_shapes=True)\n\n def train_and_evaluate_model(\n self,\n model,\n train_indices,\n test_indices,\n dev_indices\n ):\n \"\"\"\n \"\"\"\n # prep text serving as input\n e1_text = self.prepare_text(self.df[\"e1-sentence\"].values)\n e2_text = self.prepare_text(self.df[\"e2-sentence\"].values)\n encompassing_text = self.prepare_text(self.df[\"text\"].values)\n\n config = self.config\n num_epochs = config[\"num_epochs\"]\n batch_size = config[\"batch_size\"]\n #validation_split = config[\"validation_split\"]\n num_classes = config[\"num_classes\"]\n\n labels = self.prepare_labels()\n # convert class vectors to binary class matrices\n labels = np_utils.to_categorical(labels, num_classes)\n\n # prepare training data\n train_labels = labels[train_indices]\n train_data = [e1_text[train_indices], encompassing_text[train_indices], e2_text[train_indices]]\n # prepare test data\n test_data = [e1_text[test_indices], encompassing_text[test_indices], e2_text[test_indices]]\n test_labels = labels[test_indices]\n # prepare validation dataset\n dev_data = [e1_text[dev_indices], encompassing_text[dev_indices], e2_text[dev_indices]]\n dev_labels = labels[dev_indices]\n # add early stopping to help avoid overfitting\n early_stopping = EarlyStopping(monitor='val_loss', patience=2)\n\n # train\n model.fit(\n # input\n x=train_data,\n # target labels\n y=train_labels,\n # how many examples to consider at once\n batch_size=batch_size,\n # the number of epochs to train\n nb_epoch=num_epochs,\n # 0 for no logging, 1 for progress bar logging, 2 for one log line per epoch\n verbose=1,\n # the validation data to use,\n validation_data=(dev_data, dev_labels),\n # how much data to reserve for validation (takes n% starting at the end of the dataset)\n #validation_split=0.25,\n # should the training data be shuffled?\n shuffle=True,\n # dict mapping classes to weight for scaling in loss function\n class_weight=None,\n callbacks=[early_stopping]\n )\n\n # evaluate\n test_predictions = model.predict_classes(test_data, batch_size=batch_size, verbose=0)\n\n def convert_predictions(predictions):\n \"\"\"\n converts values in a numpy array to their corresponding label\n \"\"\"\n value_to_label_LUT = config[\"value_LUT\"]\n for p in predictions:\n yield value_to_label_LUT.get(p, \"None\")\n\n test_predictions = list(convert_predictions(test_predictions))\n return test_predictions\n\n def write_predictions_to_file(self, predictions):\n clf_results = config[\"classifier_results\"]\n # load gold\n gold = self.get_gold_labels()\n df = pd.DataFrame({\"Gold\":gold, \"Predicted\":predictions})\n df.to_csv(config[\"classifier_results\"], sep=\"\\t\")\n\n def evaluate(self):\n \"\"\"\n \"\"\"\n clf_results = config[\"classifier_results\"]\n\n evaluator = Evaluator(clf_results)\n classifier_performance = evaluator.generate_scores_df()\n\n print(\"Classifier performance\")\n print(classifier_performance.round(2))\n print()\n\n def run_kfold(self):\n tk = self.tokenizer\n w2v = self.w2v\n max_features = self.max_features\n print(\"Max features: {}\".format(max_features))\n # the number of hidden units\n folds = config[\"folds\"]\n # get text and labels\n # includes preprocessing of text (tokenize, etc.)\n labels = self.prepare_labels()\n print(\"max # epochs: {}\".format(config[\"num_epochs\"]))\n skf = StratifiedKFold(labels, n_folds=folds, shuffle=True)\n\n predictions = dict()\n skf = list(skf)\n for i, (train, test) in enumerate(skf):\n print(\"Running fold {} / {} ...\".format(i+1, folds))\n model = None\n model = self.create_model()\n # use next fold for validation\n if i == 0 and i + 1 < len(skf):\n (dev, _) = skf[i+1]\n else:\n (dev, _) = skf[i-1]\n test_predictions = self.train_and_evaluate_model(model, train, test, dev)\n # store predictions\n for i in range(len(test)):\n # check each test index\n test_index = test[i]\n # the ith item in the test_predictions corresponds to label for the test_index\n predictions[test_index] = test_predictions[i]\n\n # get ordered predictions\n predictions = [predictions[i] for i in range(len(predictions))]\n self.write_predictions_to_file(predictions)\n self.evaluate()\n\nif __name__ == \"__main__\":\n theano.config.openmp = True\n OMP_NUM_THREADS=4\n args = get_args()\n config_file = expand_path(args.config_file)\n print(\"Loading {}\".format(config_file))\n config = yaml.load(open(config_file, \"r\"))\n experiment = Experiment(config)\n # plot model\n try:\n experiment.write_model_graph()\n except:\n print(\"Problem writing model graph\")\n # run kfolds\n experiment.run_kfold()\n" ]
[ [ "sklearn.cross_validation.StratifiedKFold", "numpy.random.seed", "pandas.DataFrame", "pandas.read_json", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
dhaase-de/dito
[ "c804d40fcf068a04b98737d9f39a639b2a6a0727" ]
[ "dito/draw.py" ]
[ "import cv2\nimport numpy as np\n\nimport dito.core\n\n\n# often-used constants\nsqrt_05 = np.sqrt(0.5)\n\n\ndef draw_circle(image, center, radius, color, thickness, line_type, start_angle=None, end_angle=None):\n \"\"\"\n TODO: fix round corners when using start_angle/end_angle and thickness != cv2.FILLED\n \"\"\"\n if (start_angle is None) and (end_angle is None):\n cv2.circle(img=image, center=dito.core.tir(center), radius=radius, color=color, thickness=thickness, lineType=line_type)\n else:\n if start_angle is None:\n start_angle = 0.0\n if end_angle is None:\n end_angle = 360.0\n cv2.ellipse(img=image, center=dito.core.tir(center), axes=(radius, radius), angle=0.0, startAngle=start_angle, endAngle=end_angle, color=color, thickness=thickness, lineType=line_type)\n\n\ndef draw_ring(image, center, radius1, radius2, color, thickness, line_type, start_angle=None, end_angle=None):\n if thickness == cv2.FILLED:\n # draw circle outline with thickness equal to the radius difference\n circle_radius = (radius1 + radius2) // 2\n circle_thickness = abs(radius1 - radius2)\n draw_circle(image=image, center=center, radius=circle_radius, color=color, thickness=circle_thickness, line_type=line_type, start_angle=start_angle, end_angle=end_angle)\n else:\n # draw two circles\n draw_circle(image=image, center=center, radius=radius1, color=color, thickness=thickness, line_type=line_type, start_angle=start_angle, end_angle=end_angle)\n draw_circle(image=image, center=center, radius=radius2, color=color, thickness=thickness, line_type=line_type, start_angle=start_angle, end_angle=end_angle)\n\n\ndef draw_polygon(image, points, color, thickness, line_type):\n points_int = np.round(np.array(points)).astype(np.int32)\n if thickness == cv2.FILLED:\n cv2.fillPoly(img=image, pts=[points_int], color=color, lineType=line_type)\n else:\n cv2.polylines(img=image, pts=[points_int], isClosed=True, color=color, thickness=thickness, lineType=line_type)\n\n\ndef draw_regular_polygon(image, point_count, position, radius, color, thickness, line_type, angle_offset=0.0):\n (x, y) = position\n points = []\n for angle in np.linspace(start=0.0, stop=2.0 * np.pi, num=point_count, endpoint=False):\n points.append([\n radius * np.cos(angle + angle_offset) + x,\n radius * np.sin(angle + angle_offset) + y,\n ])\n draw_polygon(image=image, points=points, color=color, thickness=thickness, line_type=line_type)\n\n\ndef draw_regular_star(image, point_count, position, radius_outer, radius_inner, color, thickness, line_type, angle_offset=0.0):\n (x, y) = position\n points = []\n for (n_point, angle) in enumerate(np.linspace(start=0.0, stop=2.0 * np.pi, num=2 * point_count, endpoint=False)):\n radius = radius_outer if (n_point % 2) == 0 else radius_inner\n points.append([\n radius * np.cos(angle + angle_offset) + x,\n radius * np.sin(angle + angle_offset) + y,\n ])\n draw_polygon(image=image, points=points, color=color, thickness=thickness, line_type=line_type)\n\n\ndef draw_regular_skeleton(image, point_count, position, radius, color, thickness, line_type, angle_offset=0.0):\n thickness = 1 if thickness == cv2.FILLED else thickness\n (x, y) = position\n for angle in np.linspace(start=0.0, stop=2.0 * np.pi, num=point_count, endpoint=False):\n cv2.line(img=image, pt1=dito.core.tir(x, y), pt2=dito.core.tir(radius * np.cos(angle + angle_offset) + x, radius * np.sin(angle + angle_offset) + y), color=color, thickness=thickness, lineType=line_type)\n\n\ndef draw_symbol(image, symbol, position, radius=4, color=None, thickness=1, line_type=cv2.LINE_AA):\n # handle arguments\n (x, y) = position\n if color is None:\n if dito.core.is_color(image=image):\n color = (0, 255, 0)\n else:\n color = (255,)\n\n if symbol in (\"circle\", \"o\"):\n cv2.circle(img=image, center=dito.core.tir(x, y), radius=radius, color=color, thickness=thickness, lineType=line_type)\n\n elif symbol in (\"cross\", \"x\"):\n thickness = 1 if thickness == cv2.FILLED else thickness\n sqrt_one_over_radius = sqrt_05 * radius\n cv2.line(img=image, pt1=dito.core.tir(x - sqrt_one_over_radius, y - sqrt_one_over_radius), pt2=dito.core.tir(x + sqrt_one_over_radius, y + sqrt_one_over_radius), color=color, thickness=thickness, lineType=line_type)\n cv2.line(img=image, pt1=dito.core.tir(x + sqrt_one_over_radius, y - sqrt_one_over_radius), pt2=dito.core.tir(x - sqrt_one_over_radius, y + sqrt_one_over_radius), color=color, thickness=thickness, lineType=line_type)\n\n elif symbol in (\"diamond\", \"D\"):\n points = [\n (x, y - radius),\n (x + radius, y),\n (x, y + radius),\n (x - radius, y),\n ]\n draw_polygon(image=image, points=points, color=color, thickness=thickness, line_type=line_type)\n\n elif symbol in (\"diamond_thin\", \"d\"):\n points = [\n (x, y - radius),\n (x + 0.67 * radius, y),\n (x, y + radius),\n (x - 0.67 * radius, y),\n ]\n draw_polygon(image=image, points=points, color=color, thickness=thickness, line_type=line_type)\n\n elif symbol in (\"hexagon\", \"6\"):\n draw_regular_polygon(image=image, point_count=6, position=position, radius=radius, color=color, thickness=thickness, line_type=line_type, angle_offset=1.5 * np.pi)\n\n elif symbol in (\"pentagon\", \"5\"):\n draw_regular_polygon(image=image, point_count=5, position=position, radius=radius, color=color, thickness=thickness, line_type=line_type, angle_offset=1.5 * np.pi)\n\n elif symbol in (\"plus\", \"+\"):\n thickness = 1 if thickness == cv2.FILLED else thickness\n cv2.line(img=image, pt1=dito.core.tir(x - radius, y), pt2=dito.core.tir(x + radius, y), color=color, thickness=thickness, lineType=line_type)\n cv2.line(img=image, pt1=dito.core.tir(x, y - radius), pt2=dito.core.tir(x, y + radius), color=color, thickness=thickness, lineType=line_type)\n\n elif symbol in (\"skeleton_5\",):\n draw_regular_skeleton(image=image, point_count=5, position=position, radius=radius, color=color, thickness=thickness, line_type=line_type, angle_offset=1.5 * np.pi)\n\n elif symbol in (\"skeleton_6\",):\n draw_regular_skeleton(image=image, point_count=6, position=position, radius=radius, color=color, thickness=thickness, line_type=line_type, angle_offset=0.5 * np.pi)\n\n elif symbol in (\"square\", \"4\"):\n cv2.rectangle(img=image, pt1=dito.core.tir(x - radius, y - radius), pt2=dito.core.tir(x + radius, y + radius), color=color, thickness=thickness, lineType=line_type)\n\n elif symbol in (\"star_4\",):\n draw_regular_star(image=image, point_count=4, position=position, radius_outer=radius, radius_inner=0.5 * radius, color=color, thickness=thickness, line_type=line_type, angle_offset=1.5 * np.pi)\n\n elif symbol in (\"star_5\", \"*\"):\n draw_regular_star(image=image, point_count=5, position=position, radius_outer=radius, radius_inner=0.5 * radius, color=color, thickness=thickness, line_type=line_type, angle_offset=1.5 * np.pi)\n\n elif symbol in (\"star_6\",):\n draw_regular_star(image=image, point_count=6, position=position, radius_outer=radius, radius_inner=0.5 * radius, color=color, thickness=thickness, line_type=line_type, angle_offset=0.5 * np.pi)\n\n elif symbol in (\"star_12\",):\n draw_regular_star(image=image, point_count=12, position=position, radius_outer=radius, radius_inner=0.5 * radius, color=color, thickness=thickness, line_type=line_type, angle_offset=0.5 * np.pi)\n\n elif symbol in (\"triangle_up\", \"^\"):\n points = [\n (x, y - radius),\n (x + radius, y + sqrt_05 * radius),\n (x - radius, y + sqrt_05 * radius),\n ]\n draw_polygon(image=image, points=points, color=color, thickness=thickness, line_type=line_type)\n\n elif symbol in (\"triangle_down\", \"v\"):\n points = [\n (x + radius, y - sqrt_05 * radius),\n (x - radius, y - sqrt_05 * radius),\n (x, y + radius),\n ]\n draw_polygon(image=image, points=points, color=color, thickness=thickness, line_type=line_type)\n\n elif symbol in (\"triangle_left\", \"<\"):\n points = [\n (x + sqrt_05 * radius, y - radius),\n (x - radius, y),\n (x + sqrt_05 * radius, y + radius),\n ]\n draw_polygon(image=image, points=points, color=color, thickness=thickness, line_type=line_type)\n\n elif symbol in (\"triangle_right\", \">\"):\n points = [\n (x - sqrt_05 * radius, y - radius),\n (x + radius, y),\n (x - sqrt_05 * radius, y + radius),\n ]\n draw_polygon(image=image, points=points, color=color, thickness=thickness, line_type=line_type)\n\n elif symbol in (\"y_up\",):\n thickness = 1 if thickness == cv2.FILLED else thickness\n cv2.line(img=image, pt1=(x, y), pt2=dito.core.tir(x, y - radius), color=color, thickness=thickness, lineType=line_type)\n cv2.line(img=image, pt1=(x, y), pt2=dito.core.tir(x + sqrt_05 * radius, y + sqrt_05 * radius), color=color, thickness=thickness, lineType=line_type)\n cv2.line(img=image, pt1=(x, y), pt2=dito.core.tir(x - sqrt_05 * radius, y + sqrt_05 * radius), color=color, thickness=thickness, lineType=line_type)\n\n elif symbol in (\"y_down\", \"Y\"):\n thickness = 1 if thickness == cv2.FILLED else thickness\n cv2.line(img=image, pt1=(x, y), pt2=dito.core.tir(x + sqrt_05 * radius, y - sqrt_05 * radius), color=color, thickness=thickness, lineType=line_type)\n cv2.line(img=image, pt1=(x, y), pt2=dito.core.tir(x - sqrt_05 * radius, y - sqrt_05 * radius), color=color, thickness=thickness, lineType=line_type)\n cv2.line(img=image, pt1=(x, y), pt2=dito.core.tir(x, y + radius), color=color, thickness=thickness, lineType=line_type)\n\n elif symbol in (\"y_left\",):\n thickness = 1 if thickness == cv2.FILLED else thickness\n cv2.line(img=image, pt1=(x, y), pt2=dito.core.tir(x - radius, y), color=color, thickness=thickness, lineType=line_type)\n cv2.line(img=image, pt1=(x, y), pt2=dito.core.tir(x + sqrt_05 * radius, y - sqrt_05 * radius), color=color, thickness=thickness, lineType=line_type)\n cv2.line(img=image, pt1=(x, y), pt2=dito.core.tir(x + sqrt_05 * radius, y + sqrt_05 * radius), color=color, thickness=thickness, lineType=line_type)\n\n elif symbol in (\"y_right\",):\n thickness = 1 if thickness == cv2.FILLED else thickness\n cv2.line(img=image, pt1=(x, y), pt2=dito.core.tir(x - sqrt_05 * radius, y - sqrt_05 * radius), color=color, thickness=thickness, lineType=line_type)\n cv2.line(img=image, pt1=(x, y), pt2=dito.core.tir(x - sqrt_05 * radius, y + sqrt_05 * radius), color=color, thickness=thickness, lineType=line_type)\n cv2.line(img=image, pt1=(x, y), pt2=dito.core.tir(x + radius, y), color=color, thickness=thickness, lineType=line_type)\n\n else:\n raise ValueError(\"Unknown symbol '{}'\".format(symbol))\n" ]
[ [ "numpy.sqrt", "numpy.linspace", "numpy.cos", "numpy.sin", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wufan-tb/improved-LabelImg
[ "e8c08bdaf6a7cba307bad6b84c246e9bd75ab9bf" ]
[ "keras_retinanet/utils/gpu.py" ]
[ "\"\"\"\nCopyright 2017-2019 Fizyr (https://fizyr.com)\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport tensorflow as tf\n\nfrom .tf_version import tf_version_ok\n\n\ndef setup_gpu(gpu_id):\n if tf_version_ok((2, 0, 0)):\n if gpu_id == 'cpu' or gpu_id == -1:\n tf.config.experimental.set_visible_devices([], 'GPU')\n return\n\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n # Restrict TensorFlow to only use the first GPU.\n try:\n # Currently, memory growth needs to be the same across GPUs.\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n # Use only the selcted gpu.\n tf.config.experimental.set_visible_devices(gpus[int(gpu_id)], 'GPU')\n except RuntimeError as e:\n # Visible devices must be set before GPUs have been initialized.\n print(e)\n\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n else:\n import os\n if gpu_id == 'cpu' or gpu_id == -1:\n os.environ['CUDA_VISIBLE_DEVICES'] = \"\"\n return\n\n os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)\n config = tf.ConfigProto()\n config.gpu_options.per_process_gpu_memory_fraction = 0.4\n config.gpu_options.allow_growth = True\n tf.keras.backend.set_session(tf.Session(config=config))\n" ]
[ [ "tensorflow.config.experimental.list_logical_devices", "tensorflow.config.experimental.set_memory_growth", "tensorflow.config.experimental.list_physical_devices", "tensorflow.ConfigProto", "tensorflow.Session", "tensorflow.config.experimental.set_visible_devices" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rakschahsa/networkx
[ "3f1fdcb7693ff152f17623ce549526ec272698b1" ]
[ "networkx/algorithms/centrality/katz.py" ]
[ "# coding=utf8\n# Copyright (C) 2004-2018 by\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\n#\n# Authors: Aric Hagberg ([email protected])\n# Pieter Swart ([email protected])\n# Sasha Gutfraind ([email protected])\n# Vincent Gauthier ([email protected])\n\"\"\"Katz centrality.\"\"\"\nfrom math import sqrt\n\nimport networkx as nx\nfrom networkx.utils import not_implemented_for\n\n__all__ = ['katz_centrality', 'katz_centrality_numpy']\n\n\n@not_implemented_for('multigraph')\ndef katz_centrality(G, alpha=0.1, beta=1.0, max_iter=1000, tol=1.0e-6,\n nstart=None, normalized=True, weight=None):\n r\"\"\"Compute the Katz centrality for the nodes of the graph G.\n\n Katz centrality computes the centrality for a node based on the centrality\n of its neighbors. It is a generalization of the eigenvector centrality. The\n Katz centrality for node $i$ is\n\n .. math::\n\n x_i = \\alpha \\sum_{j} A_{ij} x_j + \\beta,\n\n where $A$ is the adjacency matrix of graph G with eigenvalues $\\lambda$.\n\n The parameter $\\beta$ controls the initial centrality and\n\n .. math::\n\n \\alpha < \\frac{1}{\\lambda_{\\max}}.\n\n Katz centrality computes the relative influence of a node within a\n network by measuring the number of the immediate neighbors (first\n degree nodes) and also all other nodes in the network that connect\n to the node under consideration through these immediate neighbors.\n\n Extra weight can be provided to immediate neighbors through the\n parameter $\\beta$. Connections made with distant neighbors\n are, however, penalized by an attenuation factor $\\alpha$ which\n should be strictly less than the inverse largest eigenvalue of the\n adjacency matrix in order for the Katz centrality to be computed\n correctly. More information is provided in [1]_.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph.\n\n alpha : float\n Attenuation factor\n\n beta : scalar or dictionary, optional (default=1.0)\n Weight attributed to the immediate neighborhood. If not a scalar, the\n dictionary must have an value for every node.\n\n max_iter : integer, optional (default=1000)\n Maximum number of iterations in power method.\n\n tol : float, optional (default=1.0e-6)\n Error tolerance used to check convergence in power method iteration.\n\n nstart : dictionary, optional\n Starting value of Katz iteration for each node.\n\n normalized : bool, optional (default=True)\n If True normalize the resulting values.\n\n weight : None or string, optional (default=None)\n If None, all edge weights are considered equal.\n Otherwise holds the name of the edge attribute used as weight.\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with Katz centrality as the value.\n\n Raises\n ------\n NetworkXError\n If the parameter `beta` is not a scalar but lacks a value for at least\n one node\n\n PowerIterationFailedConvergence\n If the algorithm fails to converge to the specified tolerance\n within the specified number of iterations of the power iteration\n method.\n\n Examples\n --------\n >>> import math\n >>> G = nx.path_graph(4)\n >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix\n >>> centrality = nx.katz_centrality(G, 1/phi - 0.01)\n >>> for n, c in sorted(centrality.items()):\n ... print(\"%d %0.2f\" % (n, c))\n 0 0.37\n 1 0.60\n 2 0.60\n 3 0.37\n\n See Also\n --------\n katz_centrality_numpy\n eigenvector_centrality\n eigenvector_centrality_numpy\n pagerank\n hits\n\n Notes\n -----\n Katz centrality was introduced by [2]_.\n\n This algorithm it uses the power method to find the eigenvector\n corresponding to the largest eigenvalue of the adjacency matrix of ``G``.\n The parameter ``alpha`` should be strictly less than the inverse of largest\n eigenvalue of the adjacency matrix for the algorithm to converge.\n You can use ``max(nx.adjacency_spectrum(G))`` to get $\\lambda_{\\max}$ the largest\n eigenvalue of the adjacency matrix.\n The iteration will stop after ``max_iter`` iterations or an error tolerance of\n ``number_of_nodes(G) * tol`` has been reached.\n\n When $\\alpha = 1/\\lambda_{\\max}$ and $\\beta=0$, Katz centrality is the same\n as eigenvector centrality.\n\n For directed graphs this finds \"left\" eigenvectors which corresponds\n to the in-edges in the graph. For out-edges Katz centrality\n first reverse the graph with ``G.reverse()``.\n\n References\n ----------\n .. [1] Mark E. J. Newman:\n Networks: An Introduction.\n Oxford University Press, USA, 2010, p. 720.\n .. [2] Leo Katz:\n A New Status Index Derived from Sociometric Index.\n Psychometrika 18(1):39–43, 1953\n http://phya.snu.ac.kr/~dkim/PRL87278701.pdf\n \"\"\"\n if len(G) == 0:\n return {}\n\n nnodes = G.number_of_nodes()\n\n if nstart is None:\n # choose starting vector with entries of 0\n x = dict([(n, 0) for n in G])\n else:\n x = nstart\n\n try:\n b = dict.fromkeys(G, float(beta))\n except (TypeError, ValueError, AttributeError):\n b = beta\n if set(beta) != set(G):\n raise nx.NetworkXError('beta dictionary '\n 'must have a value for every node')\n\n # make up to max_iter iterations\n for i in range(max_iter):\n xlast = x\n x = dict.fromkeys(xlast, 0)\n # do the multiplication y^T = Alpha * x^T A - Beta\n for n in x:\n for nbr in G[n]:\n x[nbr] += xlast[n] * G[n][nbr].get(weight, 1)\n for n in x:\n x[n] = alpha * x[n] + b[n]\n\n # check convergence\n err = sum([abs(x[n] - xlast[n]) for n in x])\n if err < nnodes * tol:\n if normalized:\n # normalize vector\n try:\n s = 1.0 / sqrt(sum(v**2 for v in x.values()))\n # this should never be zero?\n except ZeroDivisionError:\n s = 1.0\n else:\n s = 1\n for n in x:\n x[n] *= s\n return x\n raise nx.PowerIterationFailedConvergence(max_iter)\n\n\n@not_implemented_for('multigraph')\ndef katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True,\n weight=None):\n r\"\"\"Compute the Katz centrality for the graph G.\n\n Katz centrality computes the centrality for a node based on the centrality\n of its neighbors. It is a generalization of the eigenvector centrality. The\n Katz centrality for node $i$ is\n\n .. math::\n\n x_i = \\alpha \\sum_{j} A_{ij} x_j + \\beta,\n\n where $A$ is the adjacency matrix of graph G with eigenvalues $\\lambda$.\n\n The parameter $\\beta$ controls the initial centrality and\n\n .. math::\n\n \\alpha < \\frac{1}{\\lambda_{\\max}}.\n\n Katz centrality computes the relative influence of a node within a\n network by measuring the number of the immediate neighbors (first\n degree nodes) and also all other nodes in the network that connect\n to the node under consideration through these immediate neighbors.\n\n Extra weight can be provided to immediate neighbors through the\n parameter $\\beta$. Connections made with distant neighbors\n are, however, penalized by an attenuation factor $\\alpha$ which\n should be strictly less than the inverse largest eigenvalue of the\n adjacency matrix in order for the Katz centrality to be computed\n correctly. More information is provided in [1]_.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n alpha : float\n Attenuation factor\n\n beta : scalar or dictionary, optional (default=1.0)\n Weight attributed to the immediate neighborhood. If not a scalar the\n dictionary must have an value for every node.\n\n normalized : bool\n If True normalize the resulting values.\n\n weight : None or string, optional\n If None, all edge weights are considered equal.\n Otherwise holds the name of the edge attribute used as weight.\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with Katz centrality as the value.\n\n Raises\n ------\n NetworkXError\n If the parameter `beta` is not a scalar but lacks a value for at least\n one node\n\n Examples\n --------\n >>> import math\n >>> G = nx.path_graph(4)\n >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix\n >>> centrality = nx.katz_centrality_numpy(G, 1/phi)\n >>> for n, c in sorted(centrality.items()):\n ... print(\"%d %0.2f\" % (n, c))\n 0 0.37\n 1 0.60\n 2 0.60\n 3 0.37\n\n See Also\n --------\n katz_centrality\n eigenvector_centrality_numpy\n eigenvector_centrality\n pagerank\n hits\n\n Notes\n -----\n Katz centrality was introduced by [2]_.\n\n This algorithm uses a direct linear solver to solve the above equation.\n The parameter ``alpha`` should be strictly less than the inverse of largest\n eigenvalue of the adjacency matrix for there to be a solution.\n You can use ``max(nx.adjacency_spectrum(G))`` to get $\\lambda_{\\max}$ the largest\n eigenvalue of the adjacency matrix.\n\n When $\\alpha = 1/\\lambda_{\\max}$ and $\\beta=0$, Katz centrality is the same\n as eigenvector centrality.\n\n For directed graphs this finds \"left\" eigenvectors which corresponds\n to the in-edges in the graph. For out-edges Katz centrality\n first reverse the graph with ``G.reverse()``.\n\n References\n ----------\n .. [1] Mark E. J. Newman:\n Networks: An Introduction.\n Oxford University Press, USA, 2010, p. 720.\n .. [2] Leo Katz:\n A New Status Index Derived from Sociometric Index.\n Psychometrika 18(1):39–43, 1953\n http://phya.snu.ac.kr/~dkim/PRL87278701.pdf\n \"\"\"\n try:\n import numpy as np\n except ImportError:\n raise ImportError('Requires NumPy: http://scipy.org/')\n if len(G) == 0:\n return {}\n try:\n nodelist = beta.keys()\n if set(nodelist) != set(G):\n raise nx.NetworkXError('beta dictionary '\n 'must have a value for every node')\n b = np.array(list(beta.values()), dtype=float)\n except AttributeError:\n nodelist = list(G)\n try:\n b = np.ones((len(nodelist), 1)) * float(beta)\n except (TypeError, ValueError, AttributeError):\n raise nx.NetworkXError('beta must be a number')\n\n A = nx.adj_matrix(G, nodelist=nodelist, weight=weight).todense().T\n n = A.shape[0]\n centrality = np.linalg.solve(np.eye(n, n) - (alpha * A), b)\n if normalized:\n norm = np.sign(sum(centrality)) * np.linalg.norm(centrality)\n else:\n norm = 1.0\n centrality = dict(zip(nodelist, map(float, centrality / norm)))\n return centrality\n\n\n# fixture for nose tests\ndef setup_module(module):\n from nose import SkipTest\n try:\n import numpy\n import scipy\n except:\n raise SkipTest(\"SciPy not available\")\n" ]
[ [ "numpy.eye", "numpy.linalg.norm" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
gugarosa/synthetic_rbms
[ "a86c323e165a893810ac5bf79213f603ed86a8b3" ]
[ "libraries/nalp/nalp/models/generators/lstm.py" ]
[ "from tensorflow.keras.layers import RNN, Dense, Embedding, LSTMCell\n\nimport nalp.utils.logging as l\nfrom nalp.core.model import Generator\n\nlogger = l.get_logger(__name__)\n\n\nclass LSTMGenerator(Generator):\n \"\"\"A LSTMGenerator class is the one in charge of Long Short-Term Memory implementation.\n\n References:\n S. Hochreiter, Jürgen Schmidhuber. Long short-term memory. Neural computation 9.8 (1997).\n\n \"\"\"\n\n def __init__(self, encoder=None, vocab_size=1, embedding_size=32, hidden_size=64):\n \"\"\"Initialization method.\n\n Args:\n encoder (IntegerEncoder): An index to vocabulary encoder.\n vocab_size (int): The size of the vocabulary.\n embedding_size (int): The size of the embedding layer.\n hidden_size (int): The amount of hidden neurons.\n\n \"\"\"\n\n logger.info('Overriding class: Generator -> LSTMGenerator.')\n\n # Overrides its parent class with any custom arguments if needed\n super(LSTMGenerator, self).__init__(name='G_lstm')\n\n # Creates a property for holding the used encoder\n self.encoder = encoder\n\n # Creates an embedding layer\n self.embedding = Embedding(vocab_size, embedding_size, name='embedding')\n\n # Creates a LSTM cell\n self.cell = LSTMCell(hidden_size, name='lstm_cell')\n\n # Creates the RNN loop itself\n self.rnn = RNN(self.cell, name='rnn_layer',\n return_sequences=True,\n stateful=True)\n\n # Creates the linear (Dense) layer\n self.linear = Dense(vocab_size, name='out')\n\n @property\n def encoder(self):\n \"\"\"obj: An encoder generic object.\n\n \"\"\"\n\n return self._encoder\n\n @encoder.setter\n def encoder(self, encoder):\n self._encoder = encoder\n\n def call(self, x):\n \"\"\"Method that holds vital information whenever this class is called.\n\n Args:\n x (tf.Tensor): A tensorflow's tensor holding input data.\n\n Returns:\n The same tensor after passing through each defined layer.\n\n \"\"\"\n\n # Firstly, we apply the embedding layer\n x = self.embedding(x)\n\n # We need to apply the input into the first recurrent layer\n x = self.rnn(x)\n\n # The input also suffers a linear combination to output correct shape\n x = self.linear(x)\n\n return x\n" ]
[ [ "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.LSTMCell", "tensorflow.keras.layers.RNN", "tensorflow.keras.layers.Embedding" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
mohakbhardwaj/differentiable-robot-model
[ "be5bd816cc81931ed2b7133bcd4fde7c92b2abd7" ]
[ "differentiable_robot_model/rigid_body/differentiable_rigid_body.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport torch\nfrom differentiable_robot_model.rigid_body import utils\nfrom differentiable_robot_model.rigid_body.coordinate_transform import (\n CoordinateTransform,\n z_rot,\n y_rot,\n x_rot,\n)\n\nimport hydra\n\n\nclass DifferentiableRigidBody(torch.nn.Module):\n \"\"\"\n Differentiable Representation of a link\n \"\"\"\n\n def __init__(self, rigid_body_params, device=\"cpu\"):\n\n super().__init__()\n\n self._device = device\n self.joint_id = rigid_body_params[\"joint_id\"]\n self.name = rigid_body_params[\"link_name\"]\n\n # dynamics parameters\n self.mass = rigid_body_params[\"mass\"]\n self.com = rigid_body_params[\"com\"]\n self.inertia_mat = rigid_body_params[\"inertia_mat\"]\n self.joint_damping = rigid_body_params[\"joint_damping\"]\n\n # kinematics parameters\n self.trans = rigid_body_params[\"trans\"]\n self.rot_angles = rigid_body_params[\"rot_angles\"]\n self.joint_limits = rigid_body_params[\"joint_limits\"]\n\n # local z axis (w.r.t. joint coordinate frame):\n self._z = torch.zeros((1, 3))\n self._z[:, 2] = 1.0\n\n self.joint_pose = CoordinateTransform()\n self.joint_pose.set_translation(torch.reshape(self.trans, (1, 3)))\n\n # local velocities and accelerations (w.r.t. joint coordinate frame):\n # in spatial vector terminology: linear velocity v\n self.joint_lin_vel = torch.zeros((1, 3)) # .to(self._device)\n # in spatial vector terminology: angular velocity w\n self.joint_ang_vel = torch.zeros((1, 3)) # .to(self._device)\n # in spatial vector terminology: linear acceleration vd\n self.joint_lin_acc = torch.zeros((1, 3)) # .to(self._device)\n # in spatial vector terminology: angular acceleration wd\n self.joint_ang_acc = torch.zeros((1, 3)) # .to(self._device)\n\n self.update_joint_state(torch.zeros(1, 1), torch.zeros(1, 1))\n self.update_joint_acc(torch.zeros(1, 1))\n\n self.pose = CoordinateTransform()\n\n # I have different vectors for angular/linear motion/force, but they usually always appear as a pair\n # meaning we usually always compute both angular/linear components.\n # Maybe worthwile thinking of a structure for this - in math notation we would use the notion of spatial vectors\n # drake uses some form of spatial vector implementation\n self.lin_vel = torch.zeros((1, 3)).to(self._device)\n self.ang_vel = torch.zeros((1, 3)).to(self._device)\n self.lin_acc = torch.zeros((1, 3)).to(self._device)\n self.ang_acc = torch.zeros((1, 3)).to(self._device)\n\n # in spatial vector terminology this is the \"linear force f\"\n self.lin_force = torch.zeros((1, 3)).to(self._device)\n # in spatial vector terminology this is the \"couple n\"\n self.ang_force = torch.zeros((1, 3)).to(self._device)\n\n return\n\n def update_joint_state(self, q, qd):\n batch_size = q.shape[0]\n\n self.joint_ang_vel = qd @ self._z\n\n roll = self.rot_angles[0]\n pitch = self.rot_angles[1]\n yaw = self.rot_angles[2]\n\n fixed_rotation = (z_rot(yaw) @ y_rot(pitch)) @ x_rot(roll)\n\n # when we update the joint angle, we also need to update the transformation\n self.joint_pose.set_translation(torch.reshape(self.trans, (1, 3)))\n self.joint_pose.set_rotation(fixed_rotation.repeat(batch_size, 1, 1) @ z_rot(q))\n return\n\n def update_joint_acc(self, qdd):\n # local z axis (w.r.t. joint coordinate frame):\n self.joint_ang_acc = qdd @ self._z\n return\n\n def multiply_inertia_with_motion_vec(self, lin, ang):\n\n mass, com, inertia_mat = self._get_dynamics_parameters_values()\n\n mcom = com * mass\n com_skew_symm_mat = utils.vector3_to_skew_symm_matrix(com)\n inertia = inertia_mat + mass * (\n com_skew_symm_mat @ com_skew_symm_mat.transpose(-2, -1)\n )\n\n batch_size = lin.shape[0]\n\n new_lin_force = mass * lin - utils.cross_product(\n mcom.repeat(batch_size, 1), ang\n )\n new_ang_force = (inertia.repeat(batch_size, 1, 1) @ ang.unsqueeze(2)).squeeze(\n 2\n ) + utils.cross_product(mcom.repeat(batch_size, 1), lin)\n\n return new_lin_force, new_ang_force\n\n def _get_dynamics_parameters_values(self):\n return self.mass, self.com, self.inertia_mat\n\n def get_joint_limits(self):\n return self.joint_limits\n\n def get_joint_damping_const(self):\n return self.joint_damping\n\n\nclass LearnableRigidBody(DifferentiableRigidBody):\n r\"\"\"\n\n Learnable Representation of a link\n\n \"\"\"\n\n def __init__(self, learnable_rigid_body_config, gt_rigid_body_params, device=\"cpu\"):\n\n super().__init__(rigid_body_params=gt_rigid_body_params, device=device)\n\n # we overwrite dynamics parameters\n if \"mass\" in learnable_rigid_body_config.learnable_dynamics_params:\n self.mass_fn = hydra.utils.instantiate(\n learnable_rigid_body_config.mass_parametrization, device=device\n )\n else:\n self.mass_fn = lambda: self.mass\n\n if \"com\" in learnable_rigid_body_config.learnable_dynamics_params:\n self.com_fn = hydra.utils.instantiate(\n learnable_rigid_body_config.com_parametrization, device=device\n )\n else:\n self.com_fn = lambda: self.com\n\n if \"inertia_mat\" in learnable_rigid_body_config.learnable_dynamics_params:\n self.inertia_mat_fn = hydra.utils.instantiate(learnable_rigid_body_config.inertia_parametrization)\n else:\n self.inertia_mat_fn = lambda: self.inertia_mat\n\n self.joint_damping = gt_rigid_body_params[\"joint_damping\"]\n\n # kinematics parameters\n if \"trans\" in learnable_rigid_body_config.learnable_kinematics_params:\n self.trans = torch.nn.Parameter(\n torch.rand_like(gt_rigid_body_params[\"trans\"])\n )\n self.joint_pose.set_translation(torch.reshape(self.trans, (1, 3)))\n\n if \"rot_angles\" in learnable_rigid_body_config.learnable_kinematics_params:\n self.rot_angles = torch.nn.Parameter(gt_rigid_body_params[\"rot_angles\"])\n\n return\n\n def _get_dynamics_parameters_values(self):\n return self.mass_fn(), self.com_fn(), self.inertia_mat_fn()\n" ]
[ [ "torch.reshape", "torch.nn.Parameter", "torch.rand_like", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
abhishekkulkarni24/Machine-Learning
[ "8bf49a5c9c669ce078250f164376606546c9e81f" ]
[ "Numpy/vector_ranging_10_to_49.py" ]
[ "#Create a vector with values ranging from 10 to 49\n\n\nimport numpy as np\n\nvector = np.arange(10,50);\nprint(vector)\n\n'''\nOutput\n\n[10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33\n 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49]\n\n'''" ]
[ [ "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ianlevesque/ThreatExchange
[ "d00ca28b200eb2b9ade9db5299c083f1968c5a41" ]
[ "pytx3/benchmarks/benchmark_pdq_faiss_matchers.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport argparse\nimport binascii\nimport os\nimport time\nimport pickle\n\nimport numpy\nimport faiss\n\nfrom pytx3.hashing import (\n PDQFlatHashIndex,\n PDQMultiHashIndex,\n BITS_IN_PDQ,\n)\n\nparser = argparse.ArgumentParser(\n description=\"Run basic benchmarks comparing PDQHashIndex implementations using faiss\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n)\n\nparser.add_argument(\n \"--faiss-threads\",\n type=int,\n default=1,\n help=\"number of threads for faiss to use while searching\",\n)\nparser.add_argument(\n \"--dataset-size\",\n type=int,\n default=250000,\n help=\"number of hashes to generate for the dataset to search against\",\n)\nparser.add_argument(\n \"--num-queries\",\n type=int,\n default=1000,\n help=\"number of queries to generate for each search\",\n)\nparser.add_argument(\n \"--thresholds\",\n type=int,\n default=[0, 15, 31, 47],\n choices=range(256),\n nargs=\"+\",\n metavar=\"THRESHOLDS\",\n help=\"PDQ similarity threshold values to benchmark with\",\n)\nparser.add_argument(\"--seed\", type=int, help=\"seed for random number generator\")\nparser.add_argument(\n \"--use-custom-ids\",\n dest=\"use_custom_ids\",\n action=\"store_true\",\n help=\"whether to use custom ids in the index\",\n)\nparser.set_defaults(use_custom_ids=False)\n\nargs = parser.parse_args()\n\n######\n# Print Benchmark Settings\n######\n\nprint(\"Benchmark: PDQ Faiss Matcher Comparison\")\nprint(\"\")\nprint(\"Options:\")\nfor arg in vars(args):\n print(\"\\t\", arg, \": \", getattr(args, arg))\nprint(\"\")\n\n######\n# Set up environment and helpers\n######\n\nfaiss.omp_set_num_threads(args.faiss_threads)\nseed = args.seed if args.seed else time.time_ns()\nrng = numpy.random.default_rng(seed)\nif args.seed is None:\n print(\"using random seed of \", seed)\n print(\"use --seed \", seed, \" to rerun with same random values\")\n print(\"\")\n\n\ndef generate_random_hash():\n \"\"\"\n returns a random 256 bit PDQ hash as a hexstring of 64 characters\n \"\"\"\n hash_bytes = rng.bytes(BITS_IN_PDQ // 8)\n return binascii.hexlify(hash_bytes).decode()\n\n\ndef generate_random_distance_mask(hamming_distance):\n \"\"\"\n returns a random numpy array of uint8s that can be used as bitwise mask\n to generate a hash with the given hamming distance\n \"\"\"\n ones = numpy.ones(hamming_distance, dtype=numpy.uint8)\n bitmask = numpy.pad(\n ones, (0, BITS_IN_PDQ - hamming_distance), \"constant\", constant_values=0\n )\n return numpy.packbits(rng.permutation(bitmask))\n\n\ndef generate_random_hash_with_hamming_distance(original_hash, desired_hamming_distance):\n \"\"\"\n returns a random 256 bit PDQ hash as a hexstring of 64 characters that is the given\n hamming distance from the provided original hash\n \"\"\"\n original_hash_bytes = numpy.frombuffer(\n binascii.unhexlify(original_hash), dtype=numpy.uint8\n )\n mask = generate_random_distance_mask(desired_hamming_distance)\n new_hash_bytes = numpy.bitwise_xor(original_hash_bytes, mask).tobytes()\n return binascii.hexlify(new_hash_bytes).decode()\n\n\n######\n# Generate Random Dataset and Build Indexes\n######\n\ndataset = [generate_random_hash() for _ in range(args.dataset_size)]\n\nif args.use_custom_ids:\n custom_ids = [i + 100_000_000_000_000 for i in range(args.dataset_size)]\nelse:\n custom_ids = None\n\nstart_build_flat_hash_index = time.time()\nflat_index = PDQFlatHashIndex.create(dataset, custom_ids=custom_ids)\nserialized_flat_index = pickle.dumps(flat_index)\nend_build_flat_hash_index = time.time()\n\nstart_build_multi_hash_index = time.time()\nmulti_index = PDQMultiHashIndex.create(dataset, custom_ids=custom_ids)\nserialized_multi_index = pickle.dumps(multi_index)\nend_build_multi_hash_index = time.time()\n\nprint(\"Building Stats:\")\n\nprint(\n \"\\tPDQFlatHashIndex: time to build (s): \",\n end_build_flat_hash_index - start_build_flat_hash_index,\n)\nprint(\n f\"\\tPDQFlatHashIndex: approximate size: {len(serialized_flat_index) // 1024:,d}KB\"\n)\nprint(\n \"\\tPDQMultiHashIndex: time to build (s): \",\n end_build_multi_hash_index - start_build_multi_hash_index,\n)\nprint(\n f\"\\tPDQMultiHashIndex: approximate size: {len(serialized_multi_index) // 1024:,d}KB\"\n)\nprint(\"\")\n\n######\n# Run benchmarks for each requested search threshold\n######\nfor threshold in args.thresholds:\n print(\"Benchmarks for threshold: \", threshold)\n\n # Create queries with hamming distance of threshold compared to their search targets\n search_targets = rng.choice(dataset, size=args.num_queries)\n queries = [\n generate_random_hash_with_hamming_distance(target, threshold)\n for target in search_targets\n ]\n\n # Benchmark Searching Indexes\n start_flat_search = time.time()\n flat_results = flat_index.search(queries, threshold)\n end_flat_search = time.time()\n\n start_multi_search = time.time()\n multi_results = multi_index.search(queries, threshold)\n end_multi_search = time.time()\n\n def count_targets_found(targets, queries, results):\n \"\"\"\n Checks that each element of the provided search results list contains\n the associated target for that query, warning if it does not.\n\n Returns the number of targets that were found in their corresponding\n results\n \"\"\"\n found_targets = 0\n for target, query, result in zip(targets, queries, results):\n if target not in result:\n print(\n \"Query missed target in result: query=\",\n query,\n \"target=\",\n target,\n \"result=\",\n result,\n )\n else:\n found_targets += 1\n return found_targets\n\n flat_found_targets = count_targets_found(search_targets, queries, flat_results)\n multi_found_targets = count_targets_found(search_targets, queries, multi_results)\n\n print(\n \"\\tPDQFlatHashIndex - Total Time to search (s): \",\n end_flat_search - start_flat_search,\n )\n print(\n \"\\tPDQMultiHashIndex - Total Time to search (s): \",\n end_multi_search - start_multi_search,\n )\n print(\n \"\\tPDQFlatHashIndex - Precent of targets found: \",\n flat_found_targets / len(queries) * 100,\n )\n print(\n \"\\tPDQMultiHashIndex - Precent of targets found: \",\n multi_found_targets / len(queries) * 100,\n )\n\n print(\"\")\n" ]
[ [ "numpy.bitwise_xor", "numpy.pad", "numpy.random.default_rng", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dmgav/test_doc
[ "2e3b8faf55364c5b3f7e2897b8d6bfe61f5b4664" ]
[ "pyxrf/model/setting.py" ]
[ "from __future__ import (absolute_import, division,\n print_function)\n\nimport numpy as np\nfrom collections import OrderedDict\nimport copy\nimport os\nimport re\n\nfrom atom.api import (Atom, Str, observe, Dict, List, Int, Bool)\n\nfrom skbeam.fluorescence import XrfElement as Element\nfrom skbeam.core.fitting.xrf_model import K_LINE, L_LINE, M_LINE\nfrom skbeam.core.fitting.background import snip_method\n\nfrom .fileio import save_fitdata_to_hdf\n\nimport multiprocessing\n\nimport logging\nlogger = logging.getLogger()\n\n\nclass ROIModel(Atom):\n \"\"\"\n This class defines basic data structure for roi calculation.\n\n Attributes\n ----------\n prefix : str\n prefix name\n line_val : float\n emission energy of primary line\n left_val : float\n left boundary\n right_val : float\n right boundary\n default_left : float\n default_right : float\n step : float\n min step value to change\n show_plot : bool\n option to plot\n \"\"\"\n prefix = Str()\n line_val = Int()\n left_val = Int()\n right_val = Int()\n default_left = Int()\n default_right = Int()\n step = Int(1)\n show_plot = Bool(False)\n\n @observe('left_val')\n def _value_update(self, change):\n if change['type'] == 'create':\n return\n logger.debug('left value is changed {}'.format(change))\n\n @observe('show_plot')\n def _plot_opt(self, change):\n if change['type'] == 'create':\n return\n logger.debug('show plot is changed {}'.format(change))\n\n\nclass SettingModel(Atom):\n \"\"\"\n Control roi calculation according to given inputs.\n\n Parameters\n ----------\n parameters : Dict\n parameter values used for fitting\n data_dict : Dict\n dict of 3D data\n img_dict : Dict\n Reference to the respective field of the ``FileIOModel`` object\n element_for_roi : str\n inputs given by users\n element_list_roi : list\n list of elements after parsing\n roi_dict : dict\n dict of ROIModel object\n enable_roi_computation : Bool\n enables/disables GUI element that start ROI computation\n At least one element must be selected and all entry in the element\n list must be valid before ROI may be computed\n\n result_folder : Str\n directory which contains HDF5 file, in which results of processing are saved\n hdf_path : Str\n full path to the HDF5 file, in which results are saved\n hdf_name : Str\n name of the HDF file, in which results are saved\n\n data_title : str\n The title of the selected dataset (from ``fileio`` module)\n data_title_base : str\n The title changed for internal use (suffix is removed)\n data_title_adjusted : str\n The title changed for internal use (suffix 'sum' is removed if it exists)\n suffix_name_roi : str\n The suffix may have values 'sum', 'det1', 'det2' etc.\n \"\"\"\n parameters = Dict()\n data_sets = Dict()\n img_dict = Dict()\n\n element_for_roi = Str()\n element_list_roi = List()\n roi_dict = OrderedDict()\n enable_roi_computation = Bool(False)\n\n subtract_background = Bool(False)\n\n result_folder = Str()\n\n hdf_path = Str()\n hdf_name = Str()\n\n data_title = Str()\n data_title_base = Str()\n data_title_adjusted = Str()\n suffix_name_roi = Str()\n\n def filename_update(self, change):\n \"\"\"\n Observer function to be connected to the fileio model\n in the top-level gui.py startup\n\n Parameters\n ----------\n changed : dict\n This is the dictionary that gets passed to a function\n with the @observe decorator\n \"\"\"\n self.hdf_name = change['value']\n # output to .h5 file\n self.hdf_path = os.path.join(self.result_folder, self.hdf_name)\n\n def result_folder_changed(self, change):\n \"\"\"\n Observer function to be connected to the fileio model\n in the top-level gui.py startup\n\n Parameters\n ----------\n changed : dict\n This is the dictionary that gets passed to a function\n with the @observe decorator\n \"\"\"\n self.result_folder = change['value']\n\n def data_title_update(self, change):\n \"\"\"\n Observer function to be connected to the fileio model\n in the top-level gui.py startup\n\n Parameters\n ----------\n changed : dict\n This is the dictionary that gets passed to a function\n with the @observe decorator\n \"\"\"\n self.data_title = change['value']\n\n # It is assumed, that ``self.data_title`` was created in the ``fileio`` module\n # and has dataset label attached to the end of it.\n # The labels are ``sum``, ``det1``, ``det2`` etc. depending on the number\n # of detector channels.\n self.suffix_name_roi = self.data_title.split('_')[-1]\n\n self.data_title_base = '_'.join(self.data_title.split(\"_\")[:-1])\n\n if self.suffix_name_roi == \"sum\":\n # If suffix is 'sum', then remove the suffix\n self.data_title_adjusted = self.data_title_base\n else:\n # Else keep the original title\n self.data_title_adjusted = self.data_title\n\n def __init__(self, *args, **kwargs):\n self.parameters = kwargs['default_parameters']\n # Initialize with an empty string (no elements selected)\n self.element_for_roi = \"\"\n self.enable_roi_computation = False\n\n @observe('element_for_roi')\n def _update_element(self, change):\n \"\"\"\n Get element information as a string and parse it as a list.\n This element information means the ones for roi setup.\n \"\"\"\n self.element_for_roi = self.element_for_roi.strip(' ')\n # Remove leading and trailing ','\n self.element_for_roi = self.element_for_roi.strip(',')\n # Remove leading and trailing '.'\n self.element_for_roi = self.element_for_roi.strip('.')\n try:\n if len(self.element_for_roi) == 0:\n logger.debug('No elements entered.')\n self.remove_all_roi()\n self.element_list_roi = []\n self.enable_roi_computation = False\n return\n elif ',' in self.element_for_roi:\n element_list = [v.strip(' ') for v in self.element_for_roi.split(',')]\n else:\n element_list = [v for v in self.element_for_roi.split(' ')]\n\n # with self.suppress_notifications():\n # self.element_list_roi = element_list\n logger.debug('Current elements for ROI sum are: {}'.format(element_list))\n self.update_roi(element_list)\n self.element_list_roi = element_list\n self.enable_roi_computation = True\n except Exception as ex:\n logger.warning(f\"Incorrect specification of element lines for ROI computation: {ex}\")\n self.enable_roi_computation = False\n\n def data_sets_update(self, change):\n \"\"\"\n Observer function to be connected to the fileio model\n in the top-level gui.py startup\n\n Parameters\n ----------\n changed : dict\n This is the dictionary that gets passed to a function\n with the @observe decorator\n \"\"\"\n self.data_sets = change['value']\n\n def img_dict_update(self, change):\n \"\"\"\n Observer function to be connected to the fileio model\n in the top-level gui.py startup\n\n Parameters\n ----------\n change : dict\n This is the dictionary that gets passed to a function\n with the @observe decorator\n \"\"\"\n self.img_dict = change['value']\n\n def update_parameter(self, param):\n self.parameters = copy.deepcopy(param)\n\n def select_elements_from_list(self, element_list):\n self.element_for_roi = ', '.join(element_list)\n\n def use_all_elements(self):\n self.element_for_roi = ', '.join(K_LINE+L_LINE) # +M_LINE)\n\n def clear_selected_elements(self):\n self.element_for_roi = \"\"\n\n def remove_all_roi(self):\n self.roi_dict.clear()\n\n def update_roi(self, element_list, std_ratio=4):\n \"\"\"\n Update elements without touching old ones.\n\n Parameters\n ----------\n element_list : list\n list of elements for roi\n std_ratio : float, optional\n Define the range of roi for given element.\n\n Notes\n -----\n The unit of energy is in ev in this function. The reason is\n SpinBox in Enaml can only read integer as input. To be updated.\n \"\"\"\n\n eline_list = K_LINE + L_LINE + M_LINE\n\n for v in element_list:\n if v in self.roi_dict:\n continue\n\n if v not in eline_list:\n raise ValueError(f\"Emission line {v} is unknown\")\n\n if '_K' in v:\n temp = v.split('_')[0]\n e = Element(temp)\n val = int(e.emission_line['ka1']*1000)\n elif '_L' in v:\n temp = v.split('_')[0]\n e = Element(temp)\n val = int(e.emission_line['la1']*1000)\n elif '_M' in v:\n temp = v.split('_')[0]\n e = Element(temp)\n val = int(e.emission_line['ma1']*1000)\n\n delta_v = int(self.get_sigma(val/1000)*1000)\n\n roi = ROIModel(prefix=self.suffix_name_roi,\n line_val=val,\n left_val=val-delta_v*std_ratio,\n right_val=val+delta_v*std_ratio,\n default_left=val-delta_v*std_ratio,\n default_right=val+delta_v*std_ratio,\n step=1,\n show_plot=False)\n\n self.roi_dict.update({v: roi})\n\n # remove old items not included in element_list\n for k in self.roi_dict.copy().keys():\n if k not in element_list:\n del self.roi_dict[k]\n\n def get_sigma(self, energy, epsilon=2.96):\n \"\"\"\n Calculate the std at given energy.\n \"\"\"\n temp_val = 2 * np.sqrt(2 * np.log(2))\n return np.sqrt((self.parameters['fwhm_offset']['value']/temp_val)**2 +\n energy*epsilon*self.parameters['fwhm_fanoprime']['value'])\n\n def get_roi_sum(self):\n \"\"\"\n Save roi sum into a dict.\n\n Returns\n -------\n dict\n nested dict as output\n \"\"\"\n roi_result = {}\n\n datav = self.data_sets[self.data_title]\n\n logger.info(f\"Computing ROIs for dataset {self.data_title} ...\")\n\n temp = {}\n data_raw = np.asarray(datav.raw_data)\n\n if self.subtract_background:\n # TODO: Implementation of background subtraction is not memory efficient\n # and may cause problems when processing large scans. Eventually it\n # needs to be rewritten, so that the data is processed in batches and\n # no complete copy of raw data is created in memory. This does not affect\n # computation of ROI without background subtraction (which is typical).\n\n logger.info(f\"Subtracting background ...\")\n\n num_processors_to_use = multiprocessing.cpu_count()\n logger.info(f\"Cpu count: {num_processors_to_use}\")\n pool = multiprocessing.Pool(num_processors_to_use)\n\n result_pool = [pool.apply_async(\n _subtract_background_one_line,\n (data_raw[n, :, :],\n self.parameters['e_offset']['value'],\n self.parameters['e_linear']['value'],\n self.parameters['e_quadratic']['value'],\n self.parameters['non_fitting_values']['background_width']))\n for n in range(data_raw.shape[0])]\n\n data_roi = []\n for r in result_pool:\n data_roi.append(r.get())\n\n pool.terminate()\n pool.join()\n\n data_roi = np.array(data_roi)\n\n logger.info(f\"Background subtraction completed.\")\n\n else:\n data_roi = data_raw\n\n for k, v in self.roi_dict.items():\n leftv = v.left_val/1000\n rightv = v.right_val/1000\n sum2D = calculate_roi(data_roi,\n e_offset=self.parameters['e_offset']['value'],\n e_linear=self.parameters['e_linear']['value'],\n range_v=[leftv, rightv])\n sum2D = sum2D.astype(np.float64, copy=False) # Convert to 64-bit representation\n temp.update({k: sum2D})\n logger.debug(f\"Calculation is completed for {v.prefix}, {self.data_title}, {k}\")\n\n # Save ROI data to HDF5 file\n self.saveROImap_to_hdf(temp)\n\n # Add scalers to the ROI dataset, so that they can be selected from Image Wizard.\n # We don't want to save scalers to the file, since they are already in the file.\n # So we add scalers after data is saved.\n scaler_key = f\"{self.data_title_base}_scaler\"\n if scaler_key in self.img_dict:\n temp.update(self.img_dict[scaler_key])\n\n roi_result[f\"{self.data_title_adjusted}_roi\"] = temp\n\n logger.info(\"ROI is computed.\")\n return roi_result\n\n def saveROImap_to_hdf(self, data_dict_roi):\n\n # Generate the path to computed ROIs in the HDF5 file\n det_name = \"detsum\" # Assume that ROIs are computed using the sum of channels\n\n # Search for channel name in the data title. Channels are named\n # det1, det2, ... , i.e. 'det' followed by integer number.\n # The channel name is always located at the end of the ``data_title``.\n # If the channel name is found, then build the path using this name.\n srch = re.search(\"det\\d+$\", self.data_title) # noqa: W605\n if srch:\n det_name = srch.group(0)\n inner_path = f\"xrfmap/{det_name}\"\n\n try:\n save_fitdata_to_hdf(self.hdf_path, data_dict_roi, datapath=inner_path,\n data_saveas='xrf_roi', dataname_saveas='xrf_roi_name')\n except Exception as ex:\n logger.error(f\"Failed to save ROI data to file '{self.hdf_path}'\\n\"\n f\" Exception: {ex}\")\n else:\n logger.info(f\"ROI data was successfully saved to file '{self.hdf_name}'\")\n\n\ndef calculate_roi(data3D, e_offset, e_linear, range_v):\n \"\"\"\n Calculate 2D map for given ROI.\n\n Parameters\n ----------\n data3D : 3D array\n e_offset : float\n offset - coefficient for polynomial approximation of energy axis\n e_linear : float\n linear coefficient of polynomial approximation of energy axis\n e_quadratic : float\n quadratic coefficient of polynomial approximation of energy axis\n background_width : float\n parameter of snip algorithm for background estimation\n range_v : list\n range for ROI calculation for the element\n use_snip : bool\n True - subtract background before computing ROIs\n False - do not subtract background before computing ROIs\n\n Returns\n -------\n array\n 2D map\n \"\"\"\n data3D = np.asarray(data3D)\n range_v = np.asarray(range_v)\n range_v = (range_v - e_offset)/e_linear\n range_v = [int(round(v)) for v in range_v]\n # return np.sum(data3D[range_v[0]:range_v[1], :, :], axis=0)*e_linear\n return np.sum(data3D[:, :, range_v[0]:range_v[1]], axis=2) # * e_linear\n\n\ndef _subtract_background_one_line(data_line, e_off, e_lin, e_quad, width):\n \"\"\"\n Subtract background from spectra in a single line of the image\n\n Parameters\n ----------\n\n data_line : ndarray\n spectra for one line of an image, size NxM, N-the number of\n pixels in the line, M - the number of points in one spectrum (typically 4096)\n e_off : float\n offset - coefficient for polynomial approximation of energy axis\n e_lin : float\n linear coefficient of polynomial approximation of energy axis\n e_quad : float\n quadratic coefficient of polynomial approximation of energy axis\n background_width : float\n parameter of snip algorithm for background estimation\n\n Returns\n -------\n\n ndarray of the same shape as data_line. Contains spectra with subtracted background.\n \"\"\"\n\n data_line = np.copy(data_line)\n xx, _ = data_line.shape\n for n in range(xx):\n bg = snip_method(data_line[n, :],\n e_off=e_off,\n e_lin=e_lin,\n e_quad=e_quad,\n width=width)\n data_line[n, :] -= bg\n return data_line\n" ]
[ [ "numpy.log", "numpy.sqrt", "numpy.asarray", "numpy.copy", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AustinXY/super-res-stylegan2
[ "b1dbfcd0a20ae2917240aeb0562dc242b2671587" ]
[ "train1.py" ]
[ "import argparse\nimport math\nimport random\nimport os\nimport copy\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\nfrom numpy.core.fromnumeric import resize\nimport dnnlib\n\nimport numpy as np\nimport torch\nfrom torch import nn, autograd, optim\nfrom torch.nn import functional as F\nfrom torch.utils import data\nimport torch.distributed as dist\nfrom torchvision import transforms, utils\nfrom tqdm import tqdm\nfrom torch_utils import image_transforms\nfrom PIL import Image\n\nfrom model import Generator, G_NET, Encoder, UNet\nfrom finegan_config import finegan_config\n\n# try:\nimport wandb\n\n# except ImportError:\n# wandb = None\n\nfrom op import conv2d_gradfix\nfrom dataset import MultiResolutionDataset\n\nfrom distributed import (\n get_rank,\n synchronize,\n reduce_loss_dict,\n reduce_sum,\n get_world_size,\n)\n# from non_leaking import augment, AdaptiveAugment\n\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\n# os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if isinstance(m, nn.Conv2d):\n nn.init.orthogonal_(m.weight.data, 1.0)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n elif isinstance(m, nn.Linear):\n nn.init.orthogonal_(m.weight.data, 1.0)\n if m.bias is not None:\n m.bias.data.fill_(0.0)\n elif classname == 'MnetConv':\n nn.init.constant_(m.mask_conv.weight.data, 1)\n\ndef data_sampler(dataset, shuffle, distributed):\n if distributed:\n return data.distributed.DistributedSampler(dataset, shuffle=shuffle)\n\n if shuffle:\n return data.RandomSampler(dataset)\n\n else:\n return data.SequentialSampler(dataset)\n\n\ndef requires_grad(model, flag=True):\n for p in model.parameters():\n p.requires_grad = flag\n\n\ndef accumulate(model1, model2, decay=0.999):\n par1 = dict(model1.named_parameters())\n par2 = dict(model2.named_parameters())\n\n for k in par1.keys():\n par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay)\n\n\ndef sample_data(loader):\n while True:\n for batch in loader:\n yield batch\n\n\ndef d_logistic_loss(real_pred, fake_pred):\n real_loss = F.softplus(-real_pred)\n fake_loss = F.softplus(fake_pred)\n\n return real_loss.mean() + fake_loss.mean()\n\n\ndef d_r1_loss(real_pred, real_img):\n with conv2d_gradfix.no_weight_gradients():\n grad_real, = autograd.grad(\n outputs=real_pred.sum(), inputs=real_img, create_graph=True\n )\n grad_penalty = grad_real.pow(2).reshape(grad_real.shape[0], -1).sum(1).mean()\n\n return grad_penalty\n\n\ndef g_nonsaturating_loss(fake_pred):\n loss = F.softplus(-fake_pred).mean()\n return loss\n\n\ndef g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):\n noise = torch.randn_like(fake_img) / math.sqrt(\n fake_img.shape[2] * fake_img.shape[3]\n )\n grad, = autograd.grad(\n outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True\n )\n\n path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))\n\n path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)\n\n path_penalty = (path_lengths - path_mean).pow(2).mean()\n\n return path_penalty, path_mean.detach(), path_lengths\n\ndef set_grad_none(model, targets):\n for n, p in model.named_parameters():\n if n in targets:\n p.grad = None\n\ndef make_noise(batch, latent_dim, n_noise, device):\n if n_noise == 1:\n return torch.randn(batch, latent_dim, device=device)\n noises = torch.randn(n_noise, batch, latent_dim, device=device)\n return noises\n\n\ndef mixing_noise(batch, latent_dim, prob, device):\n prob = 1\n if prob > 0 and random.random() < prob:\n return make_noise(batch, latent_dim, 2, device)\n else:\n return [make_noise(batch, latent_dim, 1, device)]\n\ndef child_to_parent(c_code, c_dim, p_dim):\n ratio = c_dim / p_dim\n cid = torch.argmax(c_code, dim=1)\n pid = (cid / ratio).long()\n p_code = torch.zeros([c_code.size(0), p_dim], device=c_code.device)\n for i in range(c_code.size(0)):\n p_code[i][pid[i]] = 1\n return p_code\n\ndef sample_codes(batch, z_dim, b_dim, p_dim, c_dim, device):\n z = torch.randn(batch, z_dim, device=device)\n c = torch.zeros(batch, c_dim, device=device)\n cid = np.random.randint(c_dim, size=batch)\n for i in range(batch):\n c[i, cid[i]] = 1\n\n p = child_to_parent(c, c_dim, p_dim)\n b = c.clone()\n return z, b, p, c\n\n\ndef rand_sample_codes(prev_z=None, prev_b=None, prev_p=None, prev_c=None, rand_code=['b', 'p']):\n '''\n rand code default: keeping z and c\n '''\n\n if prev_z is not None:\n device = prev_z.device\n batch = prev_z.size(0)\n elif prev_b is not None:\n device = prev_b.device\n batch = prev_b.size(0)\n elif prev_p is not None:\n device = prev_p.device\n batch = prev_p.size(0)\n elif prev_c is not None:\n device = prev_c.device\n batch = prev_c.size(0)\n else:\n sys.exit(0)\n\n if 'z' in rand_code:\n z = torch.randn(batch, prev_z.size(1), device=device)\n else:\n z = prev_z\n\n if 'b' in rand_code:\n b = torch.zeros(batch, prev_b.size(1), device=device)\n bid = np.random.randint(prev_b.size(1), size=batch)\n for i in range(batch):\n b[i, bid[i]] = 1\n else:\n b = prev_b\n\n if 'p' in rand_code:\n p = torch.zeros(batch, prev_p.size(1), device=device)\n pid = np.random.randint(prev_p.size(1), size=batch)\n for i in range(batch):\n p[i, pid[i]] = 1\n else:\n p = prev_p\n\n if 'c' in rand_code:\n c = torch.zeros(batch, prev_c.size(1), device=device)\n cid = np.random.randint(prev_c.size(1), size=batch)\n for i in range(batch):\n c[i, cid[i]] = 1\n else:\n c = prev_c\n\n return z, b, p, c\n\ndef binarization_loss(mask):\n return torch.min(1-mask, mask).mean()\n\ndef train(args, fine_generator, style_generator, mpnet, mknet, mp_optim, mk_optim, device):\n\n pbar = range(args.iter)\n\n if get_rank() == 0:\n pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01)\n\n # cur_nimg = 0\n loss_dict = {}\n\n if args.distributed:\n mp_module = mpnet.module\n mk_module = mknet.module\n fine_g_module = fine_generator.module\n style_g_module = style_generator.module\n else:\n mp_module = mpnet\n mk_module = mknet\n fine_g_module = fine_generator\n style_g_module = style_generator\n\n\n # criterion_construct = nn.MSELoss()\n # criterion_reconstruct = nn.MSELoss().to(device)\n\n if args.trunc:\n truncation = 0.7\n trunc = style_generator.mean_latent(4096).detach()\n trunc.requires_grad_(False)\n else:\n truncation = 1\n trunc = None\n\n style_generator.eval()\n style_generator.requires_grad_(False)\n fine_generator.eval()\n fine_generator.requires_grad_(False)\n\n for idx in pbar:\n i = idx + args.start_iter\n\n if i > args.iter:\n print(\"Done!\")\n break\n\n mpnet.train()\n mknet.train()\n\n # ############# train mk network #############\n # mknet.requires_grad_(True)\n # # mpnet.requires_grad_(False)\n\n # z, b, p, c = sample_codes(args.batch, args.z_dim, args.b_dim, args.p_dim, args.c_dim, device)\n # if not args.tie_code:\n # z, b, p, c = rand_sample_codes(prev_z=z, prev_b=b, prev_p=p, prev_c=c, rand_code=['b', 'p'])\n\n # fine_img, mask = fine_generator(z, b, p, c, rtn_mk=True)\n # pred_mask = mknet(fine_img)\n\n # bin_loss = binarization_loss(pred_mask) * args.bin\n # mk_loss = F.mse_loss(pred_mask, mask) * args.mk\n\n # mknet_loss = mk_loss + bin_loss\n loss_dict[\"mk\"] = torch.zeros(1).to(device)\n # loss_dict[\"bin\"] = bin_loss / args.bin\n\n # mknet.zero_grad()\n # mknet_loss.backward()\n # mk_optim.step()\n\n ############# train mapping network #############\n mpnet.requires_grad_(True)\n\n noise = mixing_noise(args.batch, args.latent, args.mixing, device)\n\n style_img, latent = style_generator(noise, return_latents=True, randomize_noise=False)\n _style_img = F.interpolate(style_img, size=(128, 128), mode='area')\n\n #########\n with torch.no_grad():\n mask = mknet(_style_img)\n\n grad, = autograd.grad(\n outputs=(_style_img * noise).sum(), inputs=latent, create_graph=True\n )\n\n\n ##########\n\n\n wp_code = mpnet(_style_img)\n\n mp_loss = F.mse_loss(wp_code, torch.transpose(noise, 0, 1)) * args.mp\n loss_dict[\"mp\"] = mp_loss / args.mp\n\n mpnet.zero_grad()\n mp_loss.backward()\n mp_optim.step()\n\n ############# ############# #############\n loss_reduced = reduce_loss_dict(loss_dict)\n mp_loss_val = loss_reduced[\"mp\"].mean().item()\n mk_loss_val = loss_reduced[\"mk\"].mean().item()\n # bin_loss_val = loss_reduced[\"bin\"].mean().item()\n\n if get_rank() == 0:\n pbar.set_description(\n (\n f\"mp: {mp_loss_val:.4f}; mk: {mk_loss_val:.4f}\"\n )\n )\n\n if wandb and args.wandb:\n wandb.log(\n {\n \"MP\": mp_loss_val,\n \"MK\": mk_loss_val,\n }\n )\n\n # if i % 500 == 0:\n # with torch.no_grad():\n # mpnet.eval()\n # mknet.eval()\n\n # z, b, p, c = sample_codes(args.n_sample, args.z_dim, args.b_dim, args.p_dim, args.c_dim, device)\n # if not args.tie_code:\n # z, b, p, c = rand_sample_codes(prev_z=z, prev_b=b, prev_p=p, prev_c=c, rand_code=['b', 'p'])\n\n # fine_img, _ = fine_generator(z, b, p, c)\n # wp_code = mpnet(fine_img)\n # rec_fine, _ = style_generator([wp_code], input_is_latent=True)\n\n # _rec_fine = F.interpolate(rec_fine, size=(128, 128), mode='area')\n\n # rec_mk = mknet(_rec_fine)\n\n # noise = mixing_noise(args.n_sample, args.latent, args.mixing, device)\n # style_img, _ = style_generator(noise, return_latents=True)\n # _style_img = F.interpolate(style_img, size=(128, 128), mode='area')\n # wp_code = mpnet(_style_img)\n # rec_style, _ = style_generator([wp_code], input_is_latent=True)\n\n # utils.save_image(\n # fine_img,\n # f\"sample/{str(i).zfill(6)}_0.png\",\n # nrow=8,\n # normalize=True,\n # range=(-1, 1),\n # )\n\n # utils.save_image(\n # rec_fine,\n # f\"sample/{str(i).zfill(6)}_1.png\",\n # nrow=8,\n # normalize=True,\n # range=(-1, 1),\n # )\n\n # utils.save_image(\n # rec_mk,\n # f\"sample/{str(i).zfill(6)}_2.png\",\n # nrow=8,\n # normalize=True,\n # range=(0, 1),\n # )\n\n # utils.save_image(\n # style_img,\n # f\"sample/{str(i).zfill(6)}_3.png\",\n # nrow=8,\n # normalize=True,\n # range=(-1, 1),\n # )\n\n # utils.save_image(\n # rec_style,\n # f\"sample/{str(i).zfill(6)}_4.png\",\n # nrow=8,\n # normalize=True,\n # range=(-1, 1),\n # )\n # if wandb and args.wandb:\n # wandb.log(\n # {\n # \"fine image\": [wandb.Image(Image.open(f\"sample/{str(i).zfill(6)}_0.png\").convert(\"RGB\"))],\n # \"recon fine\": [wandb.Image(Image.open(f\"sample/{str(i).zfill(6)}_1.png\").convert(\"RGB\"))],\n # \"recon mask\": [wandb.Image(Image.open(f\"sample/{str(i).zfill(6)}_2.png\").convert(\"RGB\"))],\n # \"style image\": [wandb.Image(Image.open(f\"sample/{str(i).zfill(6)}_3.png\").convert(\"RGB\"))],\n # \"recon style\": [wandb.Image(Image.open(f\"sample/{str(i).zfill(6)}_4.png\").convert(\"RGB\"))],\n # }\n # )\n\n if i % 40000 == 0 and i != args.start_iter:\n torch.save(\n {\n \"style_g\": style_g_module.state_dict(),\n \"fine\": fine_g_module.state_dict(),\n \"mp\": mp_module.state_dict(),\n \"mk\": mk_module.state_dict(),\n \"mp_optim\": mp_optim.state_dict(),\n \"mk_optim\": mk_optim.state_dict(),\n \"args\": args,\n \"cur_iter\": i,\n },\n f\"checkpoint/{str(i).zfill(6)}_1_.pt\",\n )\n\n\nif __name__ == \"__main__\":\n device = \"cuda\"\n\n parser = argparse.ArgumentParser(description=\"mpnet trainer\")\n\n parser.add_argument('--arch', type=str, default='stylegan2', help='model architectures (stylegan2 | swagan)')\n parser.add_argument(\n \"--iter\", type=int, default=800000, help=\"total training iterations\"\n )\n parser.add_argument(\n \"--batch\", type=int, default=16, help=\"batch sizes for each gpus\"\n )\n parser.add_argument(\n \"--n_sample\",\n type=int,\n default=8,\n help=\"number of the samples generated during training\",\n )\n parser.add_argument(\n \"--size\", type=int, default=256, help=\"image sizes for the model\"\n )\n parser.add_argument(\n \"--mixing\", type=float, default=0.9, help=\"probability of latent code mixing\"\n )\n parser.add_argument(\n \"--ckpt\",\n type=str,\n default=None,\n help=\"path to previous trained checkpoint\",\n )\n parser.add_argument(\n \"--style_model\",\n type=str,\n default=None,\n help=\"path to stylegan\",\n )\n parser.add_argument(\n \"--fine_model\",\n type=str,\n default=None,\n help=\"path to finegan\",\n )\n parser.add_argument(\"--lr_mp\", type=float, default=2e-3, help=\"mapping network learning rate\")\n # parser.add_argument(\"--lr_d\", type=float, default=2e-5, help=\"discriminator learning rate\")\n parser.add_argument(\n \"--channel_multiplier\",\n type=int,\n default=2,\n help=\"channel multiplier factor for the model. config-f = 2, else = 1\",\n )\n parser.add_argument(\n \"--wandb\", action=\"store_true\", help=\"use weights and biases logging\"\n )\n parser.add_argument(\n \"--local_rank\", type=int, default=0, help=\"local rank for distributed training\"\n )\n parser.add_argument(\"--d_reg_every\", type=int, default=16)\n parser.add_argument(\"--r1\", type=float, default=10)\n parser.add_argument(\n \"--trunc\", action=\"store_true\", help=\"use truncation\"\n )\n parser.add_argument('--mp_arch', type=str, default='encoder',\n help='model architectures (vanilla | encoder)')\n parser.add_argument(\n \"--tie_code\", action=\"store_true\", help=\"use tied codes\"\n )\n parser.add_argument('--ds_name', type=str, default='STANFORDCAR',\n help='dataset used for training finegan (LSUNCAR | CUB | STANFORDCAR)')\n\n ## weights\n # parser.add_argument(\"--adv\", type=float, default=1, help=\"weight of the adv loss\")\n # parser.add_argument(\"--mse\", type=float, default=1e2, help=\"weight of the mse loss\")\n parser.add_argument(\"--mp\", type=float, default=1, help=\"weight of the latent recon loss\")\n parser.add_argument(\"--mk\", type=float, default=1e1, help=\"weight of mask recon\")\n parser.add_argument(\"--bin\", type=float, default=1, help=\"weight of mask recon\")\n\n args = parser.parse_args()\n\n n_gpu = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\n args.distributed = n_gpu > 1\n\n if args.distributed:\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\", init_method=\"env://\")\n synchronize()\n\n args.latent = 512\n args.n_mlp = 8\n\n args.start_iter = 0\n\n args.z_dim = finegan_config[args.ds_name]['Z_DIM']\n args.b_dim = finegan_config[args.ds_name]['FINE_GRAINED_CATEGORIES']\n args.p_dim = finegan_config[args.ds_name]['SUPER_CATEGORIES']\n args.c_dim = finegan_config[args.ds_name]['FINE_GRAINED_CATEGORIES']\n\n if args.arch == 'stylegan2':\n from model import Generator, Discriminator\n\n elif args.arch == 'swagan':\n from swagan import Generator, Discriminator\n\n style_generator = Generator(\n size=args.size,\n style_dim=args.latent,\n n_mlp=args.n_mlp,\n channel_multiplier=args.channel_multiplier\n ).to(device)\n\n fine_generator = G_NET(ds_name=args.ds_name).to(device)\n\n if args.mp_arch == 'vanilla':\n mpnet = MappingNetwork(\n num_ws=style_generator.n_latent,\n w_dim=args.latent\n ).to(device)\n # https://github.com/bryandlee/stylegan2-encoder-pytorch\n elif args.mp_arch == 'encoder':\n mpnet = Encoder(\n size=128,\n num_ws=2,\n w_dim=args.latent\n ).to(device)\n\n mknet = UNet(\n n_channels = 3,\n n_classes = 1,\n bilinear = True,\n ).to(device)\n\n mp_optim = optim.Adam(\n mpnet.parameters(),\n lr=args.lr_mp,\n betas=(0, 0.99),\n )\n\n mk_optim = optim.Adam(\n mknet.parameters(),\n lr=args.lr_mp,\n betas=(0, 0.99),\n )\n\n if args.ckpt is not None:\n print(\"load model:\", args.ckpt)\n\n ckpt = torch.load(args.ckpt, map_location=lambda storage, loc: storage)\n # train_args = ckpt['args']\n # args.start_iter = ckpt['cur_iter']\n\n # mpnet.load_state_dict(ckpt[\"mp\"])\n # mknet.load_state_dict(ckpt[\"mk\"])\n\n if args.style_model is None:\n style_generator.load_state_dict(ckpt[\"style_g\"])\n # mp_optim.load_state_dict(ckpt[\"mp_optim\"])\n\n # if args.fine_model is None:\n # fine_generator.load_state_dict(ckpt[\"fine\"])\n # mk_optim.load_state_dict(ckpt[\"mk_optim\"])\n\n # if specify stylegan checkpoint, overwrite stylegan from ckpt\n if args.style_model is not None:\n print(\"load style model:\", args.style_model)\n style_dict = torch.load(args.style_model, map_location=lambda storage, loc: storage)\n style_generator.load_state_dict(style_dict[\"g_ema\"])\n # d_optim.load_state_dict(style_dict[\"d_optim\"])\n\n # if specify finegan checkpoint, overwrite finegan from ckpt\n if args.fine_model is not None:\n print(\"load fine model:\", args.fine_model)\n fine_dict = torch.load(args.fine_model, map_location=lambda storage, loc: storage)\n fine_generator.load_state_dict(fine_dict)\n\n if args.distributed:\n style_generator = nn.parallel.DistributedDataParallel(\n style_generator,\n device_ids=[args.local_rank],\n output_device=args.local_rank,\n broadcast_buffers=False,\n )\n\n fine_generator = nn.parallel.DistributedDataParallel(\n fine_generator,\n device_ids=[args.local_rank],\n output_device=args.local_rank,\n broadcast_buffers=False,\n )\n\n mpnet = nn.parallel.DistributedDataParallel(\n mpnet,\n device_ids=[args.local_rank],\n output_device=args.local_rank,\n broadcast_buffers=False,\n )\n\n mknet = nn.parallel.DistributedDataParallel(\n mknet,\n device_ids=[args.local_rank],\n output_device=args.local_rank,\n broadcast_buffers=False,\n )\n\n if get_rank() == 0 and wandb is not None and args.wandb:\n wandb.init(project=\"map net style distribute\")\n\n train(args, fine_generator, style_generator, mpnet, mknet, mp_optim, mk_optim, device)\n" ]
[ [ "torch.randn_like", "torch.transpose", "torch.zeros", "torch.load", "torch.no_grad", "torch.nn.functional.interpolate", "numpy.random.randint", "torch.distributed.init_process_group", "torch.utils.data.distributed.DistributedSampler", "torch.randn", "torch.nn.functional.softplus", "torch.nn.init.constant_", "torch.min", "torch.nn.parallel.DistributedDataParallel", "torch.cuda.set_device", "torch.utils.data.SequentialSampler", "torch.utils.data.RandomSampler", "torch.nn.init.orthogonal_", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NaturalSolutions/NS.Bootstrap
[ "c2cc73717dbe769e064c3254a5b20cb16b37bda2" ]
[ "Back/ecoreleve_server/Views/station.py" ]
[ "from pyramid.view import view_config\nfrom ..Models import (\n DBSession,\n Station,\n StationType,\n Observation\n )\nfrom ecoreleve_server.GenericObjets.FrontModules import (FrontModule,ModuleField)\nfrom ecoreleve_server.GenericObjets import ListObjectWithDynProp\nimport transaction\nimport json\nfrom datetime import datetime\nimport pandas as pd\nimport numpy as np\nfrom sqlalchemy import select, and_\nfrom pyramid.security import NO_PERMISSION_REQUIRED\n\n\n\n\nprefix = 'stations'\n\n\n# @view_config(route_name= prefix, renderer='json', request_method = 'PUT')\n# def updateListStations(request):\n# # TODO \n# # update a list of stations \n# return\n\n# @view_config(route_name= prefix, renderer='json', request_method = 'GET')\n# def getListStations(request):\n# # TODO \n# # return list of stations \n# # can search/filter\n# return\n\n@view_config(route_name= prefix+'/action', renderer='json', request_method = 'GET')\ndef actionOnStations(request):\n print ('\\n*********************** Action **********************\\n')\n dictActionFunc = {\n 'count' : count,\n 'forms' : getForms,\n '0' : getForms,\n 'fields': getFields\n }\n actionName = request.matchdict['action']\n return dictActionFunc[actionName](request)\n\ndef count (request) :\n# ## TODO count stations\n return\n\ndef getForms(request) :\n\n typeSta = request.params['ObjectType']\n print('***************** GET FORMS ***********************')\n ModuleName = 'StaForm'\n Conf = DBSession.query(FrontModule).filter(FrontModule.Name==ModuleName ).first()\n newSta = Station(FK_StationType = typeSta)\n newSta.init_on_load()\n schema = newSta.GetDTOWithSchema(Conf,'edit')\n del schema['schema']['creationDate']\n return schema\n\ndef getFields(request) :\n# ## TODO return fields Station\n return\n\n@view_config(route_name= prefix+'/id', renderer='json', request_method = 'GET',permission = NO_PERMISSION_REQUIRED)\ndef getStation(request):\n\n print('***************** GET STATION ***********************')\n id = request.matchdict['id']\n curSta = DBSession.query(Station).get(id)\n curSta.LoadNowValues()\n\n # if Form value exists in request --> return data with schema else return only data\n if 'FormName' in request.params :\n ModuleName = request.params['FormName']\n try :\n DisplayMode = request.params['DisplayMode']\n except : \n DisplayMode = 'display'\n\n Conf = DBSession.query(FrontModule).filter(FrontModule.Name=='StaForm' ).first()\n response = curSta.GetDTOWithSchema(Conf,DisplayMode)\n else : \n response = curSta.GetFlatObject()\n\n return response\n\n@view_config(route_name= prefix+'/id', renderer='json', request_method = 'PUT')\ndef updateStation(request):\n\n print('*********************** UPDATE Station *****************')\n data = request.json_body\n id = request.matchdict['id']\n curSta = DBSession.query(Station).get(id)\n curSta.LoadNowValues()\n curSta.UpdateFromJson(data)\n transaction.commit()\n return {}\n\n@view_config(route_name= prefix, renderer='json', request_method = 'POST')\ndef insertStation(request):\n\n data = request.POST.mixed()\n if 'data' not in data :\n print('_______INsert ROW *******')\n return insertOneNewStation(request)\n else :\n print (data['data'])\n print('_______INsert LIST')\n\n return insertListNewStations(request)\n\ndef insertOneNewStation (request) :\n\n data = {}\n for items , value in request.json_body.items() :\n if value != \"\" :\n data[items] = value\n\n newSta = Station(FK_StationType = data['FK_StationType'])\n newSta.StationType = DBSession.query(StationType).filter(StationType.ID==data['FK_StationType']).first()\n newSta.init_on_load()\n newSta.UpdateFromJson(data)\n DBSession.add(newSta)\n DBSession.flush()\n return {'id': newSta.ID}\n\ndef insertListNewStations(request):\n data = request.POST.mixed()\n data = data['data']\n DTO = json.loads(data)\n data_to_insert = []\n format_dt = '%Y-%m-%d %H:%M:%S'\n format_dtBis = '%Y-%d-%m %H:%M:%S'\n dateNow = datetime.now()\n\n ##### Rename field and convert date #####\n for row in DTO :\n newRow = {}\n newRow['LAT'] = row['latitude']\n newRow['LON'] = row['longitude']\n newRow['Name'] = row['name']\n newRow['fieldActivityId'] = 1\n newRow['precision'] = row['Precision']\n newRow['creationDate'] = dateNow\n newRow['creator'] = request.authenticated_userid\n newRow['id'] = row['id']\n\n try :\n newRow['StationDate'] = datetime.strptime(row['waypointTime'],format_dt)\n except :\n newRow['StationDate'] = datetime.strptime(row['waypointTime'],format_dtBis)\n data_to_insert.append(newRow)\n\n ##### Load date into pandas DataFrame then round LAT,LON into decimal(5) #####\n DF_to_check = pd.DataFrame(data_to_insert)\n DF_to_check['LAT'] = np.round(DF_to_check['LAT'],decimals = 5)\n DF_to_check['LON'] = np.round(DF_to_check['LON'],decimals = 5)\n \n ##### Get min/max Value to query potential duplicated stations #####\n maxDate = DF_to_check['StationDate'].max(axis=1)\n minDate = DF_to_check['StationDate'].min(axis=1)\n maxLon = DF_to_check['LON'].max(axis=1)\n minLon = DF_to_check['LON'].min(axis=1)\n maxLat = DF_to_check['LAT'].max(axis=1)\n minLat = DF_to_check['LAT'].min(axis=1)\n\n ##### Retrieve potential duplicated stations from Database #####\n query = select([Station]).where(\n and_(\n Station.StationDate.between(minDate,maxDate),\n Station.LAT.between(minLat,maxLat)\n ))\n result_to_check = DBSession.execute(query).fetchall()\n\n if result_to_check :\n ##### IF potential duplicated stations, load them into pandas DataFrame then join data to insert on LAT,LON,DATE #####\n result_to_check = pd.DataFrame(data=result_to_check, columns = Station.__table__.columns.keys())\n result_to_check['LAT'] = result_to_check['LAT'].astype(float)\n result_to_check['LON'] = result_to_check['LON'].astype(float)\n\n merge_check = pd.merge(DF_to_check,result_to_check , on =['LAT','LON','StationDate'])\n\n ##### Get only non existing data to insert #####\n DF_to_check = DF_to_check[~DF_to_check['id'].isin(merge_check['id'])]\n\n DF_to_check = DF_to_check.drop(['id'],1)\n data_to_insert = json.loads(DF_to_check.to_json(orient='records',date_format='iso'))\n\n ##### Build block insert statement and returning ID of new created stations #####\n if len(data_to_insert) != 0 :\n stmt = Station.__table__.insert(returning=[Station.ID]).values(data_to_insert)\n res = DBSession.execute(stmt).fetchall()\n result = list(map(lambda y: y[0], res))\n else : \n result = []\n\n response = {'exist': len(DTO)-len(data_to_insert), 'new': len(data_to_insert)}\n \n return response \n\n@view_config(route_name= prefix, renderer='json', request_method = 'GET', permission = NO_PERMISSION_REQUIRED)\ndef searchStation(request):\n\n # data = request.params\n \n # searchInfo = data.mixed()\n # print (json.loads(searchInfo))\n listObj = ListObjectWithDynProp(DBSession,Station,searchInfo)\n response = listObj.GetFlatList()\n # return response\n\n@view_config(route_name= prefix+'/id/protocols', renderer='json', request_method = 'GET', permission = NO_PERMISSION_REQUIRED)\ndef GetProtocolsofStation (request) :\n\n sta_id = request.matchdict['id']\n data = {}\n searchInfo = {}\n criteria = {'Column': 'FK_Station', 'Operator':'=','Value':sta_id}\n\n response = []\n\n try : \n if 'criteria' in request.params or request.params == {} :\n print (' ********************** criteria params ==> Search ****************** ')\n\n searchInfo = data\n searchInfo['criteria'] = []\n searchInfo['criteria'].append(criteria)\n listObj = ListObjectWithDynProp(DBSession,Observation,searchInfo)\n response = listObj.GetFlatList()\n except : \n pass\n\n try :\n if 'FormName' in request.params : \n print (' ********************** Forms in params ==> DATA + FORMS ****************** ')\n ModuleName = request.params['FormName']\n try :\n DisplayMode = request.params['DisplayMode']\n except : \n DisplayMode = 'display'\n\n listObs = DBSession.query(Observation).filter(Observation.FK_Station == sta_id)\n\n if listObs :\n listObsWithSchema = {}\n for obs in listObs : \n typeName = obs.GetType().Name\n Conf = DBSession.query(FrontModule).filter(FrontModule.Name==ModuleName ).first()\n obs.LoadNowValues()\n try :\n listObsWithSchema[typeName].append(obs.GetDTOWithSchema(Conf,DisplayMode))\n except :\n listObsWithSchema[typeName] = []\n listObsWithSchema[typeName].append(obs.GetDTOWithSchema(Conf,DisplayMode))\n\n response = listObsWithSchema\n except Exception as e :\n print (e)\n pass\n return response\n\n@view_config(route_name= prefix+'/id/protocols', renderer='json', request_method = 'POST')\ndef insertNewProtocol (request) :\n data = {}\n for items , value in request.json_body.items() :\n if value != \"\" :\n data[items] = value\n data['FK_Station'] = request.matchdict['id']\n\n newProto = Observation(FK_ProtocoleType = data['FK_ProtocoleType'])\n newProto.ProtocoleType = DBSession.query(ProtocoleType).filter(ProtocoleType.ID==data['FK_ProtocoleType']).first()\n newProto.init_on_load()\n newProto.UpdateFromJson(data)\n DBSession.add(newProto)\n DBSession.flush()\n return {'id': newProto.ID}\n\n@view_config(route_name= prefix+'/id/protocols/obs_id', renderer='json', request_method = 'PUT')\ndef updateObservation(request):\n\n print('*********************** UPDATE Observation *****************')\n data = request.json_body\n id_obs = request.matchdict['obs_id']\n curObs = DBSession.query(Observation).get(id_obs)\n curObs.LoadNowValues()\n curObs.UpdateFromJson(data)\n transaction.commit()\n return {}\n\n@view_config(route_name= prefix+'/id/protocols/obs_id', renderer='json', request_method = 'GET', permission = NO_PERMISSION_REQUIRED)\ndef GetObservation(request):\n\n print('*********************** GET Observation *****************')\n \n id_obs = request.matchdict['obs_id']\n id_sta = request.matchdict['id']\n \n try :\n curObs = DBSession.query(Observation).filter(and_(Observation.ID ==id_obs, Observation.FK_Station == id_sta )).one()\n curObs.LoadNowValues()\n # if Form value exists in request --> return data with schema else return only data\n if 'FormName' in request.params :\n ModuleName = request.params['FormName']\n try :\n DisplayMode = request.params['DisplayMode']\n except : \n DisplayMode = 'display'\n\n Conf = DBSession.query(FrontModule).filter(FrontModule.Name=='ObsForm' ).first()\n response = curObs.GetDTOWithSchema(Conf,DisplayMode)\n else : \n response = curObs.GetFlatObject()\n\n except Exception as e :\n print(e)\n response = {}\n\n return response\n\n\n" ]
[ [ "numpy.round", "pandas.merge", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
Zed-Wu/ManiSkill-Learn
[ "8056fe327752cd0863f8730672fe62bd85a0ec12" ]
[ "mani_skill_learn/utils/data/converter.py" ]
[ "import numpy as np\nfrom collections.abc import Sequence, Iterable\nfrom numbers import Number\nfrom .type import str_to_dtype, is_arr, is_dict, is_seq_of, is_type, scalar_type, is_str\n\n\ndef astype(x, dtype):\n if dtype is None:\n return x\n assert is_arr(x) and is_str(dtype), (type(x), type(dtype))\n if is_arr(x, 'np'):\n return x.astype(str_to_dtype(dtype, 'np'))\n elif is_arr(x, 'torch'):\n return x.to(str_to_dtype(dtype, 'torch'))\n elif is_type(dtype):\n return dtype(x)\n else:\n raise NotImplementedError(f\"As type {type(x)}\")\n\n\ndef to_torch(x, dtype=None, device=None, non_blocking=False):\n import torch\n if x is None:\n return None\n elif is_dict(x):\n return {k: to_torch(x[k], dtype, device, non_blocking) for k in x}\n elif is_seq_of(x):\n return type(x)([to_torch(y, dtype, device, non_blocking) for y in x])\n\n if isinstance(x, torch.Tensor):\n ret = x.detach()\n elif isinstance(x, (Sequence, Number)):\n ret = torch.from_numpy(np.array(x))\n elif isinstance(x, np.ndarray):\n ret = torch.from_numpy(x)\n else:\n raise NotImplementedError(f\"{x} {dtype}\")\n if device is not None:\n ret = ret.to(device, non_blocking=non_blocking)\n if dtype is not None:\n ret = astype(ret, dtype)\n return ret\n\n\ndef to_np(x, dtype=None):\n if x is None:\n return None\n elif isinstance(x, str):\n return np.string_(x)\n elif is_dict(x):\n return {k: to_np(x[k], dtype) for k in x}\n elif is_seq_of(x):\n return type(x)([to_np(y, dtype) for y in x])\n elif isinstance(x, (Number, Sequence)):\n ret = np.array(x, dtype=scalar_type(x))\n elif isinstance(x, np.ndarray):\n ret = x\n else:\n import torch\n if isinstance(x, torch.Tensor):\n ret = x.cpu().detach().numpy()\n else:\n raise NotImplementedError(f\"{dtype}\")\n if dtype is not None:\n ret = astype(ret, dtype)\n return ret\n\n\ndef iter_cast(inputs, dst_type, return_type=None):\n \"\"\"Cast elements of an iterable object into some type.\n Args:\n inputs (Iterable): The input object.\n dst_type (type): Destination type.\n return_type (type, optional): If specified, the output object will be converted to this type,\n otherwise an iterator.\n Returns:\n iterator or specified type: The converted object.\n \"\"\"\n if not isinstance(inputs, Iterable):\n raise TypeError('inputs must be an iterable object')\n if not isinstance(dst_type, type):\n raise TypeError('\"dst_type\" must be a valid type')\n out_iterable = map(dst_type, inputs)\n if return_type is None:\n return out_iterable\n else:\n return return_type(out_iterable)\n\n\ndef list_cast(inputs, dst_type):\n return iter_cast(inputs, dst_type, return_type=list)\n\n\ndef tuple_cast(inputs, dst_type):\n return iter_cast(inputs, dst_type, return_type=tuple)\n\n\ndef dict_to_seq(inputs, num_output=2):\n keys = list(sorted(inputs.keys()))\n values = [inputs[k] for k in keys]\n if num_output == 2:\n return keys, values\n elif num_output == 1:\n return tuple(zip(keys, values))\n else:\n raise ValueError(f\"num_output is {num_output}, which is not 1 or 2\")\n\n\ndef seq_to_dict(*args):\n # args: key, value or a list of list\n args = list(args)\n if len(args) == 2:\n assert len(args[0]) == len(args[1])\n return {args[0][i]: args[1][i] for i in range(len(args[0]))}\n elif len(args) == 1:\n ret = {}\n for item in args:\n assert len(item) == 2\n ret[item[0]] = item[1]\n else:\n raise ValueError(f\"len(args) is {len(args)}, which is not 1 or 2\")\n\n\ndef dict_to_str(inputs):\n ret = ''\n for key in inputs:\n if ret != '':\n ret += \" \"\n if isinstance(inputs[key], (float, np.float32, np.float64)):\n if np.abs(inputs[key]).min() < 1E-2:\n ret += f'{key}:{inputs[key]:.4e}'\n else:\n ret += f'{key}:{inputs[key]:.6f}'\n else:\n ret += f'{key}:{inputs[key]}'\n return ret\n\n\ndef number_to_str(x, num):\n if isinstance(x, str):\n return x\n elif np.isscalar(x):\n if np.isreal(x):\n return f'{x:.{num}f}'\n else:\n return str(x)\n else:\n print(type(x))\n raise TypeError(f\"Type of {x} is not a number\")\n\n" ]
[ [ "numpy.string_", "numpy.abs", "torch.from_numpy", "numpy.isscalar", "numpy.array", "numpy.isreal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Dj1312/EMpy
[ "4bf1b01acc683c2a5ffc8679fdd4a9790aacfed0", "4bf1b01acc683c2a5ffc8679fdd4a9790aacfed0" ]
[ "examples/ex_SRR.py", "EMpy/modesolvers/FD.py" ]
[ "\"\"\"Single ring resonator example.\"\"\"\n\nimport EMpy\nimport numpy\nimport pylab\n\nwls = numpy.linspace(1.53e-6, 1.56e-6, 1000)\nK1 = EMpy.devices.Coupler(wls, numpy.sqrt(0.08), 1.)\nK2 = EMpy.devices.Coupler(wls, numpy.sqrt(0.08), 1.)\nl1 = numpy.pi * 5e-6\nl2 = numpy.pi * 5e-6\nSWG = EMpy.devices.SWG(488, 220, 25).solve(wls)\nSRR = EMpy.devices.SRR(K1, K2, SWG.neff, l1, l2).solve()\n\npylab.plot(wls, numpy.absolute(SRR.THRU), 'r.-',\n wls, numpy.absolute(SRR.DROP), 'g.-')\npylab.axis('tight')\npylab.ylim([0, 1])\npylab.xlabel('wavelength /m')\npylab.ylabel('power')\npylab.legend(('THRU', 'DROP'))\npylab.show()\n\n", "# pylint: disable=line-too-long,too-many-locals,too-many-statements,too-many-branches\n# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import\n# pylint: disable=attribute-defined-outside-init,too-many-instance-attributes\n# pylint: disable=arguments-differ,too-many-arguments\n\"\"\"Finite Difference Modesolver.\n\n@see: Fallahkhair, \"Vector Finite Difference Modesolver for Anisotropic Dielectric Waveguides\",\n@see: JLT 2007 <http://www.photonics.umd.edu/wp-content/uploads/pubs/ja-20/Fallahkhair_JLT_26_1423_2008.pdf>}\n@see: DOI of above reference <http://doi.org/10.1109/JLT.2008.923643>\n@see: http://www.mathworks.com/matlabcentral/fileexchange/loadFile.do?objectId=12734&objectType=FILE\n\n\"\"\"\nfrom __future__ import print_function\nfrom builtins import zip\nfrom builtins import str\nfrom builtins import range\n\nimport numpy\nimport scipy\nimport scipy.optimize\nimport EMpy.utils\nfrom EMpy.modesolvers.interface import *\n\n\nclass SVFDModeSolver(ModeSolver):\n\n \"\"\"\n This function calculates the modes of a dielectric waveguide\n using the semivectorial finite difference method.\n It is slightly faster than the full-vectorial VFDModeSolver,\n but it does not accept non-isotropic permittivity. For example,\n birefringent materials, which have\n different refractive indices along different dimensions cannot be used.\n It is adapted from the \"svmodes.m\" matlab code of Thomas Murphy and co-workers.\n https://www.mathworks.com/matlabcentral/fileexchange/12734-waveguide-mode-solver/content/svmodes.m\n\n Parameters\n ----------\n wl : float\n optical wavelength\n units are arbitrary, but must be self-consistent.\n I.e., just use micron for everything.\n x : 1D array of floats\n Array of x-values\n y : 1D array of floats\n Array of y-values\n epsfunc : function\n This is a function that provides the relative permittivity matrix\n (square of the refractive index) as a function of its x and y\n numpy.arrays (the function's input parameters). The function must be\n of the form: ``myRelativePermittivity(x,y)``, where x and y are 2D\n numpy \"meshgrid\" arrays that will be passed by this function.\n The function returns a relative permittivity numpy.array of\n shape( x.shape[0], y.shape[0] ) where each element of the array\n is a single float, corresponding the an isotropic refractive index.\n If an anisotropic refractive index is desired, the full-vectorial\n VFDModeSolver function should be used.\n boundary : str\n This is a string that identifies the type of boundary conditions applied.\n The following options are available:\n 'A' - Hx is antisymmetric, Hy is symmetric.\n 'S' - Hx is symmetric and, Hy is antisymmetric.\n '0' - Hx and Hy are zero immediately outside of the boundary.\n The string identifies all four boundary conditions, in the order:\n North, south, east, west.\n For example, boundary='000A'\n\n method : str\n must be 'Ex', 'Ey', or 'scalar'\n this identifies the field that will be calculated.\n\n\n Returns\n -------\n self : an instance of the SVFDModeSolver class\n Typically self.solve() will be called in order to actually find the modes.\n\n \"\"\"\n\n def __init__(self, wl, x, y, epsfunc, boundary, method='Ex'):\n self.wl = wl\n self.x = x\n self.y = y\n self.epsfunc = epsfunc\n self.boundary = boundary\n self.method = method\n\n def _get_eps(self, xc, yc):\n eps = self.epsfunc(xc, yc)\n eps = numpy.c_[eps[:, 0:1], eps, eps[:, -1:]]\n eps = numpy.r_[eps[0:1, :], eps, eps[-1:, :]]\n return eps\n\n def build_matrix(self):\n\n from scipy.sparse import coo_matrix\n\n wl = self.wl\n x = self.x\n y = self.y\n boundary = self.boundary\n method = self.method\n\n dx = numpy.diff(x)\n dy = numpy.diff(y)\n\n dx = numpy.r_[dx[0], dx, dx[-1]].reshape(-1, 1)\n dy = numpy.r_[dy[0], dy, dy[-1]].reshape(1, -1)\n\n xc = (x[:-1] + x[1:]) / 2\n yc = (y[:-1] + y[1:]) / 2\n eps = self._get_eps(xc, yc)\n\n nx = len(xc)\n ny = len(yc)\n\n self.nx = nx\n self.ny = ny\n\n k = 2 * numpy.pi / wl\n\n ones_nx = numpy.ones((nx, 1))\n ones_ny = numpy.ones((1, ny))\n\n n = numpy.dot(ones_nx, 0.5 * (dy[:, 2:] + dy[:, 1:-1])).flatten()\n s = numpy.dot(ones_nx, 0.5 * (dy[:, 0:-2] + dy[:, 1:-1])).flatten()\n e = numpy.dot(0.5 * (dx[2:, :] + dx[1:-1, :]), ones_ny).flatten()\n w = numpy.dot(0.5 * (dx[0:-2, :] + dx[1:-1, :]), ones_ny).flatten()\n p = numpy.dot(dx[1:-1, :], ones_ny).flatten()\n q = numpy.dot(ones_nx, dy[:, 1:-1]).flatten()\n\n en = eps[1:-1, 2:].flatten()\n es = eps[1:-1, 0:-2].flatten()\n ee = eps[2:, 1:-1].flatten()\n ew = eps[0:-2, 1:-1].flatten()\n ep = eps[1:-1, 1:-1].flatten()\n\n # three methods: Ex, Ey and scalar\n\n if method == 'Ex':\n\n # Ex\n\n An = 2 / n / (n + s)\n As = 2 / s / (n + s)\n Ae = 8 * (p * (ep - ew) + 2 * w * ew) * ee / \\\n ((p * (ep - ee) + 2 * e * ee) * (p ** 2 * (ep - ew) + 4 * w ** 2 * ew) +\n (p * (ep - ew) + 2 * w * ew) * (p ** 2 * (ep - ee) + 4 * e ** 2 * ee))\n Aw = 8 * (p * (ep - ee) + 2 * e * ee) * ew / \\\n ((p * (ep - ee) + 2 * e * ee) * (p ** 2 * (ep - ew) + 4 * w ** 2 * ew) +\n (p * (ep - ew) + 2 * w * ew) * (p ** 2 * (ep - ee) + 4 * e ** 2 * ee))\n Ap = ep * k ** 2 - An - As - Ae * ep / ee - Aw * ep / ew\n\n elif method == 'Ey':\n\n # Ey\n\n An = 8 * (q * (ep - es) + 2 * s * es) * en / \\\n ((q * (ep - en) + 2 * n * en) * (q ** 2 * (ep - es) + 4 * s ** 2 * es) +\n (q * (ep - es) + 2 * s * es) * (q ** 2 * (ep - en) + 4 * n ** 2 * en))\n As = 8 * (q * (ep - en) + 2 * n * en) * es / \\\n ((q * (ep - en) + 2 * n * en) * (q ** 2 * (ep - es) + 4 * s ** 2 * es) +\n (q * (ep - es) + 2 * s * es) * (q ** 2 * (ep - en) + 4 * n ** 2 * en))\n Ae = 2 / e / (e + w)\n Aw = 2 / w / (e + w)\n Ap = ep * k ** 2 - An * ep / en - As * ep / es - Ae - Aw\n\n elif method == 'scalar':\n\n # scalar\n\n An = 2 / n / (n + s)\n As = 2 / s / (n + s)\n Ae = 2 / e / (e + w)\n Aw = 2 / w / (e + w)\n Ap = ep * k ** 2 - An - As - Ae - Aw\n\n else:\n\n raise ValueError('unknown method')\n\n ii = numpy.arange(nx * ny).reshape(nx, ny)\n\n # north boundary\n ib = ii[:, -1]\n if boundary[0] == 'S':\n Ap[ib] += An[ib]\n elif boundary[0] == 'A':\n Ap[ib] -= An[ib]\n # else:\n # raise ValueError('unknown boundary')\n\n # south\n ib = ii[:, 0]\n if boundary[1] == 'S':\n Ap[ib] += As[ib]\n elif boundary[1] == 'A':\n Ap[ib] -= As[ib]\n # else:\n # raise ValueError('unknown boundary')\n\n # east\n ib = ii[-1, :]\n if boundary[2] == 'S':\n Ap[ib] += Ae[ib]\n elif boundary[2] == 'A':\n Ap[ib] -= Ae[ib]\n # else:\n # raise ValueError('unknown boundary')\n\n # west\n ib = ii[0, :]\n if boundary[3] == 'S':\n Ap[ib] += Aw[ib]\n elif boundary[3] == 'A':\n Ap[ib] -= Aw[ib]\n # else:\n # raise ValueError('unknown boundary')\n\n iall = ii.flatten()\n i_n = ii[:, 1:].flatten()\n i_s = ii[:, :-1].flatten()\n i_e = ii[1:, :].flatten()\n i_w = ii[:-1, :].flatten()\n\n I = numpy.r_[iall, i_w, i_e, i_s, i_n]\n J = numpy.r_[iall, i_e, i_w, i_n, i_s]\n V = numpy.r_[Ap[iall], Ae[i_w], Aw[i_e], An[i_s], As[i_n]]\n\n A = coo_matrix((V, (I, J))).tocsr()\n\n return A\n\n def solve(self, neigs, tol):\n\n from scipy.sparse.linalg import eigen\n\n self.nmodes = neigs\n self.tol = tol\n\n A = self.build_matrix()\n\n [eigvals, eigvecs] = eigen.eigs(A,\n k=neigs,\n which='LR',\n tol=tol,\n ncv=10 * neigs,\n return_eigenvectors=True)\n\n neff = self.wl * scipy.sqrt(eigvals) / (2 * numpy.pi)\n phi = []\n for ieig in range(neigs):\n tmp = eigvecs[:, ieig].reshape(self.nx, self.ny)\n phi.append(tmp)\n\n # sort and save the modes\n idx = numpy.flipud(numpy.argsort(neff))\n self.neff = neff[idx]\n tmp = []\n for i in idx:\n tmp.append(phi[i])\n\n if self.method == 'scalar':\n self.phi = tmp\n elif self.method == 'Ex':\n self.Ex = tmp\n if self.method == 'Ey':\n self.Ey = tmp\n\n return self\n\n def __str__(self):\n descr = (\n 'Semi-Vectorial Finite Difference Modesolver\\n\\tmethod: %s\\n' %\n self.method)\n return descr\n\n\nclass VFDModeSolver(ModeSolver):\n\n \"\"\"\n The VFDModeSolver class computes the electric and magnetic fields\n for modes of a dielectric waveguide using the \"Vector Finite\n Difference (VFD)\" method, as described in A. B. Fallahkhair,\n K. S. Li and T. E. Murphy, \"Vector Finite Difference Modesolver\n for Anisotropic Dielectric Waveguides\", J. Lightwave\n Technol. 26(11), 1423-1431, (2008).\n\n\n Parameters\n ----------\n wl : float\n The wavelength of the optical radiation (units are arbitrary,\n but must be self-consistent between all inputs. It is recommended to\n just use microns for everthing)\n x : 1D array of floats\n Array of x-values\n y : 1D array of floats\n Array of y-values\n epsfunc : function\n This is a function that provides the relative permittivity\n matrix (square of the refractive index) as a function of its x\n and y numpy.arrays (the function's input parameters). The\n function must be of the form: ``myRelativePermittivity(x,y)``\n The function returns a relative permittivity numpy.array of either\n shape( x.shape[0], y.shape[0] ) where each element of the\n array can either be a single float, corresponding the an\n isotropic refractive index, or (x.shape[0], y.shape[0], 5),\n where the last dimension describes the relative permittivity in\n the form (epsxx, epsxy, epsyx, epsyy, epszz).\n boundary : str\n This is a string that identifies the type of boundary\n conditions applied.\n The following options are available:\n 'A' - Hx is antisymmetric, Hy is symmetric.\n 'S' - Hx is symmetric and, Hy is antisymmetric.\n '0' - Hx and Hy are zero immediately outside of the boundary.\n The string identifies all four boundary conditions, in the\n order: North, south, east, west. For example, boundary='000A'\n\n Returns\n -------\n self : an instance of the VFDModeSolver class\n Typically self.solve() will be called in order to actually\n find the modes.\n\n \"\"\"\n\n def __init__(self, wl, x, y, epsfunc, boundary):\n self.wl = wl\n self.x = x\n self.y = y\n self.epsfunc = epsfunc\n self.boundary = boundary\n\n def _get_eps(self, xc, yc):\n tmp = self.epsfunc(xc, yc)\n\n def _reshape(tmp):\n \"\"\"\n pads the array by duplicating edge values\n \"\"\"\n tmp = numpy.c_[tmp[:, 0:1], tmp, tmp[:, -1:]]\n tmp = numpy.r_[tmp[0:1, :], tmp, tmp[-1:, :]]\n return tmp\n\n if tmp.ndim == 2: # isotropic refractive index\n tmp = _reshape(tmp)\n epsxx = epsyy = epszz = tmp\n epsxy = epsyx = numpy.zeros_like(epsxx)\n\n elif tmp.ndim == 3: # anisotropic refractive index\n assert tmp.shape[2] == 5, 'eps must be NxMx5'\n epsxx = _reshape(tmp[:, :, 0])\n epsxy = _reshape(tmp[:, :, 1])\n epsyx = _reshape(tmp[:, :, 2])\n epsyy = _reshape(tmp[:, :, 3])\n epszz = _reshape(tmp[:, :, 4])\n\n else:\n raise ValueError('Invalid eps')\n\n return epsxx, epsxy, epsyx, epsyy, epszz\n\n def build_matrix(self):\n\n from scipy.sparse import coo_matrix\n\n wl = self.wl\n x = self.x\n y = self.y\n boundary = self.boundary\n\n dx = numpy.diff(x)\n dy = numpy.diff(y)\n\n dx = numpy.r_[dx[0], dx, dx[-1]].reshape(-1, 1)\n dy = numpy.r_[dy[0], dy, dy[-1]].reshape(1, -1)\n\n # Note: the permittivity is actually defined at the center of each\n # region *between* the mesh points used for the H-field calculation.\n # (See Fig. 1 of Fallahkhair and Murphy)\n # In other words, eps is defined on (xc,yc) which is offset from\n # (x,y), the grid where H is calculated, by\n # \"half a pixel\" in the positive-x and positive-y directions.\n xc = (x[:-1] + x[1:]) / 2\n yc = (y[:-1] + y[1:]) / 2\n epsxx, epsxy, epsyx, epsyy, epszz = self._get_eps(xc, yc)\n\n nx = len(x)\n ny = len(y)\n\n self.nx = nx\n self.ny = ny\n\n k = 2 * numpy.pi / wl\n\n ones_nx = numpy.ones((nx, 1))\n ones_ny = numpy.ones((1, ny))\n\n # distance of mesh points to nearest neighbor mesh point:\n n = numpy.dot(ones_nx, dy[:, 1:]).flatten()\n s = numpy.dot(ones_nx, dy[:, :-1]).flatten()\n e = numpy.dot(dx[1:, :], ones_ny).flatten()\n w = numpy.dot(dx[:-1, :], ones_ny).flatten()\n\n # These define the permittivity (eps) tensor relative to each mesh point\n # using the following geometry:\n #\n # NW------N------NE\n # | | |\n # | 1 n 4 |\n # | | |\n # W---w---P---e---E\n # | | |\n # | 2 s 3 |\n # | | |\n # SW------S------SE\n\n exx1 = epsxx[:-1, 1:].flatten()\n exx2 = epsxx[:-1, :-1].flatten()\n exx3 = epsxx[1:, :-1].flatten()\n exx4 = epsxx[1:, 1:].flatten()\n\n eyy1 = epsyy[:-1, 1:].flatten()\n eyy2 = epsyy[:-1, :-1].flatten()\n eyy3 = epsyy[1:, :-1].flatten()\n eyy4 = epsyy[1:, 1:].flatten()\n\n exy1 = epsxy[:-1, 1:].flatten()\n exy2 = epsxy[:-1, :-1].flatten()\n exy3 = epsxy[1:, :-1].flatten()\n exy4 = epsxy[1:, 1:].flatten()\n\n eyx1 = epsyx[:-1, 1:].flatten()\n eyx2 = epsyx[:-1, :-1].flatten()\n eyx3 = epsyx[1:, :-1].flatten()\n eyx4 = epsyx[1:, 1:].flatten()\n\n ezz1 = epszz[:-1, 1:].flatten()\n ezz2 = epszz[:-1, :-1].flatten()\n ezz3 = epszz[1:, :-1].flatten()\n ezz4 = epszz[1:, 1:].flatten()\n\n ns21 = n * eyy2 + s * eyy1\n ns34 = n * eyy3 + s * eyy4\n ew14 = e * exx1 + w * exx4\n ew23 = e * exx2 + w * exx3\n\n # calculate the finite difference coefficients following\n # Fallahkhair and Murphy, Appendix Eqs 21 though 37\n\n axxn = ((2 * eyy4 * e - eyx4 * n) * (eyy3 / ezz4) / ns34 +\n (2 * eyy1 * w + eyx1 * n) * (eyy2 / ezz1) / ns21) / (n * (e + w))\n axxs = ((2 * eyy3 * e + eyx3 * s) * (eyy4 / ezz3) / ns34 +\n (2 * eyy2 * w - eyx2 * s) * (eyy1 / ezz2) / ns21) / (s * (e + w))\n ayye = (2 * n * exx4 - e * exy4) * exx1 / ezz4 / e / ew14 / \\\n (n + s) + (2 * s * exx3 + e * exy3) * \\\n exx2 / ezz3 / e / ew23 / (n + s)\n ayyw = (2 * exx1 * n + exy1 * w) * exx4 / ezz1 / w / ew14 / \\\n (n + s) + (2 * exx2 * s - exy2 * w) * \\\n exx3 / ezz2 / w / ew23 / (n + s)\n axxe = 2 / (e * (e + w)) + \\\n (eyy4 * eyx3 / ezz3 - eyy3 * eyx4 / ezz4) / (e + w) / ns34\n axxw = 2 / (w * (e + w)) + \\\n (eyy2 * eyx1 / ezz1 - eyy1 * eyx2 / ezz2) / (e + w) / ns21\n ayyn = 2 / (n * (n + s)) + \\\n (exx4 * exy1 / ezz1 - exx1 * exy4 / ezz4) / (n + s) / ew14\n ayys = 2 / (s * (n + s)) + \\\n (exx2 * exy3 / ezz3 - exx3 * exy2 / ezz2) / (n + s) / ew23\n\n axxne = +eyx4 * eyy3 / ezz4 / (e + w) / ns34\n axxse = -eyx3 * eyy4 / ezz3 / (e + w) / ns34\n axxnw = -eyx1 * eyy2 / ezz1 / (e + w) / ns21\n axxsw = +eyx2 * eyy1 / ezz2 / (e + w) / ns21\n\n ayyne = +exy4 * exx1 / ezz4 / (n + s) / ew14\n ayyse = -exy3 * exx2 / ezz3 / (n + s) / ew23\n ayynw = -exy1 * exx4 / ezz1 / (n + s) / ew14\n ayysw = +exy2 * exx3 / ezz2 / (n + s) / ew23\n\n axxp = -axxn - axxs - axxe - axxw - axxne - axxse - axxnw - axxsw + k ** 2 * \\\n (n + s) * \\\n (eyy4 * eyy3 * e / ns34 + eyy1 * eyy2 * w / ns21) / (e + w)\n ayyp = -ayyn - ayys - ayye - ayyw - ayyne - ayyse - ayynw - ayysw + k ** 2 * \\\n (e + w) * \\\n (exx1 * exx4 * n / ew14 + exx2 * exx3 * s / ew23) / (n + s)\n axyn = (eyy3 * eyy4 / ezz4 / ns34 - eyy2 * eyy1 / ezz1 /\n ns21 + s * (eyy2 * eyy4 - eyy1 * eyy3) / ns21 / ns34) / (e + w)\n axys = (eyy1 * eyy2 / ezz2 / ns21 - eyy4 * eyy3 / ezz3 /\n ns34 + n * (eyy2 * eyy4 - eyy1 * eyy3) / ns21 / ns34) / (e + w)\n ayxe = (exx1 * exx4 / ezz4 / ew14 - exx2 * exx3 / ezz3 /\n ew23 + w * (exx2 * exx4 - exx1 * exx3) / ew23 / ew14) / (n + s)\n ayxw = (exx3 * exx2 / ezz2 / ew23 - exx4 * exx1 / ezz1 /\n ew14 + e * (exx4 * exx2 - exx1 * exx3) / ew23 / ew14) / (n + s)\n\n axye = (eyy4 * (1 + eyy3 / ezz4) - eyy3 * (1 + eyy4 / ezz4)) / ns34 / (e + w) - \\\n (2 * eyx1 * eyy2 / ezz1 * n * w / ns21 +\n 2 * eyx2 * eyy1 / ezz2 * s * w / ns21 +\n 2 * eyx4 * eyy3 / ezz4 * n * e / ns34 +\n 2 * eyx3 * eyy4 / ezz3 * s * e / ns34 +\n 2 * eyy1 * eyy2 * (1. / ezz1 - 1. / ezz2) * w ** 2 / ns21) / e / (e + w) ** 2\n\n axyw = (eyy2 * (1 + eyy1 / ezz2) - eyy1 * (1 + eyy2 / ezz2)) / ns21 / (e + w) - \\\n (2 * eyx1 * eyy2 / ezz1 * n * e / ns21 +\n 2 * eyx2 * eyy1 / ezz2 * s * e / ns21 +\n 2 * eyx4 * eyy3 / ezz4 * n * w / ns34 +\n 2 * eyx3 * eyy4 / ezz3 * s * w / ns34 +\n 2 * eyy3 * eyy4 * (1. / ezz3 - 1. / ezz4) * e ** 2 / ns34) / w / (e + w) ** 2\n\n ayxn = (exx4 * (1 + exx1 / ezz4) - exx1 * (1 + exx4 / ezz4)) / ew14 / (n + s) - \\\n (2 * exy3 * exx2 / ezz3 * e * s / ew23 +\n 2 * exy2 * exx3 / ezz2 * w * n / ew23 +\n 2 * exy4 * exx1 / ezz4 * e * s / ew14 +\n 2 * exy1 * exx4 / ezz1 * w * n / ew14 +\n 2 * exx3 * exx2 * (1. / ezz3 - 1. / ezz2) * s ** 2 / ew23) / n / (n + s) ** 2\n\n ayxs = (exx2 * (1 + exx3 / ezz2) - exx3 * (1 + exx2 / ezz2)) / ew23 / (n + s) - \\\n (2 * exy3 * exx2 / ezz3 * e * n / ew23 +\n 2 * exy2 * exx3 / ezz2 * w * n / ew23 +\n 2 * exy4 * exx1 / ezz4 * e * s / ew14 +\n 2 * exy1 * exx4 / ezz1 * w * s / ew14 +\n 2 * exx1 * exx4 * (1. / ezz1 - 1. / ezz4) * n ** 2 / ew14) / s / (n + s) ** 2\n\n axyne = +eyy3 * (1 - eyy4 / ezz4) / (e + w) / ns34\n axyse = -eyy4 * (1 - eyy3 / ezz3) / (e + w) / ns34\n axynw = -eyy2 * (1 - eyy1 / ezz1) / (e + w) / ns21\n axysw = +eyy1 * (1 - eyy2 / ezz2) / (e + w) / ns21\n\n ayxne = +exx1 * (1 - exx4 / ezz4) / (n + s) / ew14\n ayxse = -exx2 * (1 - exx3 / ezz3) / (n + s) / ew23\n ayxnw = -exx4 * (1 - exx1 / ezz1) / (n + s) / ew14\n ayxsw = +exx3 * (1 - exx2 / ezz2) / (n + s) / ew23\n\n axyp = -(axyn + axys + axye + axyw + axyne + axyse + axynw + axysw) - k ** 2 * (w * (n * eyx1 *\n eyy2 + s * eyx2 * eyy1) / ns21 + e * (s * eyx3 * eyy4 + n * eyx4 * eyy3) / ns34) / (e + w)\n ayxp = -(ayxn + ayxs + ayxe + ayxw + ayxne + ayxse + ayxnw + ayxsw) - k ** 2 * (n * (w * exy1 *\n exx4 + e * exy4 * exx1) / ew14 + s * (w * exy2 * exx3 + e * exy3 * exx2) / ew23) / (n + s)\n\n ii = numpy.arange(nx * ny).reshape(nx, ny)\n\n # NORTH boundary\n\n ib = ii[:, -1]\n\n if boundary[0] == 'S':\n sign = 1\n elif boundary[0] == 'A':\n sign = -1\n elif boundary[0] == '0':\n sign = 0\n else:\n raise ValueError('unknown boundary conditions')\n\n axxs[ib] += sign * axxn[ib]\n axxse[ib] += sign * axxne[ib]\n axxsw[ib] += sign * axxnw[ib]\n ayxs[ib] += sign * ayxn[ib]\n ayxse[ib] += sign * ayxne[ib]\n ayxsw[ib] += sign * ayxnw[ib]\n ayys[ib] -= sign * ayyn[ib]\n ayyse[ib] -= sign * ayyne[ib]\n ayysw[ib] -= sign * ayynw[ib]\n axys[ib] -= sign * axyn[ib]\n axyse[ib] -= sign * axyne[ib]\n axysw[ib] -= sign * axynw[ib]\n\n # SOUTH boundary\n\n ib = ii[:, 0]\n\n if boundary[1] == 'S':\n sign = 1\n elif boundary[1] == 'A':\n sign = -1\n elif boundary[1] == '0':\n sign = 0\n else:\n raise ValueError('unknown boundary conditions')\n\n axxn[ib] += sign * axxs[ib]\n axxne[ib] += sign * axxse[ib]\n axxnw[ib] += sign * axxsw[ib]\n ayxn[ib] += sign * ayxs[ib]\n ayxne[ib] += sign * ayxse[ib]\n ayxnw[ib] += sign * ayxsw[ib]\n ayyn[ib] -= sign * ayys[ib]\n ayyne[ib] -= sign * ayyse[ib]\n ayynw[ib] -= sign * ayysw[ib]\n axyn[ib] -= sign * axys[ib]\n axyne[ib] -= sign * axyse[ib]\n axynw[ib] -= sign * axysw[ib]\n\n # EAST boundary\n\n ib = ii[-1, :]\n\n if boundary[2] == 'S':\n sign = 1\n elif boundary[2] == 'A':\n sign = -1\n elif boundary[2] == '0':\n sign = 0\n else:\n raise ValueError('unknown boundary conditions')\n\n axxw[ib] += sign * axxe[ib]\n axxnw[ib] += sign * axxne[ib]\n axxsw[ib] += sign * axxse[ib]\n ayxw[ib] += sign * ayxe[ib]\n ayxnw[ib] += sign * ayxne[ib]\n ayxsw[ib] += sign * ayxse[ib]\n ayyw[ib] -= sign * ayye[ib]\n ayynw[ib] -= sign * ayyne[ib]\n ayysw[ib] -= sign * ayyse[ib]\n axyw[ib] -= sign * axye[ib]\n axynw[ib] -= sign * axyne[ib]\n axysw[ib] -= sign * axyse[ib]\n\n # WEST boundary\n\n ib = ii[0, :]\n\n if boundary[3] == 'S':\n sign = 1\n elif boundary[3] == 'A':\n sign = -1\n elif boundary[3] == '0':\n sign = 0\n else:\n raise ValueError('unknown boundary conditions')\n\n axxe[ib] += sign * axxw[ib]\n axxne[ib] += sign * axxnw[ib]\n axxse[ib] += sign * axxsw[ib]\n ayxe[ib] += sign * ayxw[ib]\n ayxne[ib] += sign * ayxnw[ib]\n ayxse[ib] += sign * ayxsw[ib]\n ayye[ib] -= sign * ayyw[ib]\n ayyne[ib] -= sign * ayynw[ib]\n ayyse[ib] -= sign * ayysw[ib]\n axye[ib] -= sign * axyw[ib]\n axyne[ib] -= sign * axynw[ib]\n axyse[ib] -= sign * axysw[ib]\n\n # Assemble sparse matrix\n\n iall = ii.flatten()\n i_s = ii[:, :-1].flatten()\n i_n = ii[:, 1:].flatten()\n i_e = ii[1:, :].flatten()\n i_w = ii[:-1, :].flatten()\n i_ne = ii[1:, 1:].flatten()\n i_se = ii[1:, :-1].flatten()\n i_sw = ii[:-1, :-1].flatten()\n i_nw = ii[:-1, 1:].flatten()\n\n Ixx = numpy.r_[iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw]\n Jxx = numpy.r_[iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se]\n Vxx = numpy.r_[axxp[iall], axxe[i_w], axxw[i_e], axxn[i_s], axxs[\n i_n], axxsw[i_ne], axxnw[i_se], axxne[i_sw], axxse[i_nw]]\n\n Ixy = numpy.r_[iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw]\n Jxy = numpy.r_[\n iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se] + nx * ny\n Vxy = numpy.r_[axyp[iall], axye[i_w], axyw[i_e], axyn[i_s], axys[\n i_n], axysw[i_ne], axynw[i_se], axyne[i_sw], axyse[i_nw]]\n\n Iyx = numpy.r_[\n iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw] + nx * ny\n Jyx = numpy.r_[iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se]\n Vyx = numpy.r_[ayxp[iall], ayxe[i_w], ayxw[i_e], ayxn[i_s], ayxs[\n i_n], ayxsw[i_ne], ayxnw[i_se], ayxne[i_sw], ayxse[i_nw]]\n\n Iyy = numpy.r_[\n iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw] + nx * ny\n Jyy = numpy.r_[\n iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se] + nx * ny\n Vyy = numpy.r_[ayyp[iall], ayye[i_w], ayyw[i_e], ayyn[i_s], ayys[\n i_n], ayysw[i_ne], ayynw[i_se], ayyne[i_sw], ayyse[i_nw]]\n\n I = numpy.r_[Ixx, Ixy, Iyx, Iyy]\n J = numpy.r_[Jxx, Jxy, Jyx, Jyy]\n V = numpy.r_[Vxx, Vxy, Vyx, Vyy]\n A = coo_matrix((V, (I, J))).tocsr()\n\n return A\n\n def compute_other_fields(self, neffs, Hxs, Hys):\n\n from scipy.sparse import coo_matrix\n\n wl = self.wl\n x = self.x\n y = self.y\n boundary = self.boundary\n\n Hzs = []\n Exs = []\n Eys = []\n Ezs = []\n for neff, Hx, Hy in zip(neffs, Hxs, Hys):\n\n dx = numpy.diff(x)\n dy = numpy.diff(y)\n\n dx = numpy.r_[dx[0], dx, dx[-1]].reshape(-1, 1)\n dy = numpy.r_[dy[0], dy, dy[-1]].reshape(1, -1)\n\n xc = (x[:-1] + x[1:]) / 2\n yc = (y[:-1] + y[1:]) / 2\n epsxx, epsxy, epsyx, epsyy, epszz = self._get_eps(xc, yc)\n\n nx = len(x)\n ny = len(y)\n\n k = 2 * numpy.pi / wl\n\n ones_nx = numpy.ones((nx, 1))\n ones_ny = numpy.ones((1, ny))\n\n n = numpy.dot(ones_nx, dy[:, 1:]).flatten()\n s = numpy.dot(ones_nx, dy[:, :-1]).flatten()\n e = numpy.dot(dx[1:, :], ones_ny).flatten()\n w = numpy.dot(dx[:-1, :], ones_ny).flatten()\n\n exx1 = epsxx[:-1, 1:].flatten()\n exx2 = epsxx[:-1, :-1].flatten()\n exx3 = epsxx[1:, :-1].flatten()\n exx4 = epsxx[1:, 1:].flatten()\n\n eyy1 = epsyy[:-1, 1:].flatten()\n eyy2 = epsyy[:-1, :-1].flatten()\n eyy3 = epsyy[1:, :-1].flatten()\n eyy4 = epsyy[1:, 1:].flatten()\n\n exy1 = epsxy[:-1, 1:].flatten()\n exy2 = epsxy[:-1, :-1].flatten()\n exy3 = epsxy[1:, :-1].flatten()\n exy4 = epsxy[1:, 1:].flatten()\n\n eyx1 = epsyx[:-1, 1:].flatten()\n eyx2 = epsyx[:-1, :-1].flatten()\n eyx3 = epsyx[1:, :-1].flatten()\n eyx4 = epsyx[1:, 1:].flatten()\n\n ezz1 = epszz[:-1, 1:].flatten()\n ezz2 = epszz[:-1, :-1].flatten()\n ezz3 = epszz[1:, :-1].flatten()\n ezz4 = epszz[1:, 1:].flatten()\n\n b = neff * k\n\n bzxne = (0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * eyx4 / ezz4 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy3 * eyy1 * w * eyy2 +\n 0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (1 - exx4 / ezz4) / ezz3 / ezz2 / (w * exx3 + e * exx2) / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * exx1 * s) / b\n\n bzxse = (-0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * eyx3 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy1 * w * eyy2 +\n 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (1 - exx3 / ezz3) / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * n * exx1 * exx4) / b\n\n bzxnw = (-0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * eyx1 / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy2 * e -\n 0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (1 - exx1 / ezz1) / ezz3 / ezz2 / (w * exx3 + e * exx2) / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * exx4 * s) / b\n\n bzxsw = (0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * eyx2 / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * e -\n 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (1 - exx2 / ezz2) / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx3 * n * exx1 * exx4) / b\n\n bzxn = ((0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * n * ezz1 * ezz2 / eyy1 * (2 * eyy1 / ezz1 / n ** 2 + eyx1 / ezz1 / n / w) + 0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * n * ezz4 * ezz3 / eyy4 * (2 * eyy4 / ezz4 / n ** 2 - eyx4 / ezz4 / n / e)) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e + ((ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (0.5 * ezz4 * ((1 - exx1 / ezz1) / n / w - exy1 / ezz1 *\n (2. / n ** 2 - 2 / n ** 2 * s / (n + s))) / exx1 * ezz1 * w + (ezz4 - ezz1) * s / n / (n + s) + 0.5 * ezz1 * (-(1 - exx4 / ezz4) / n / e - exy4 / ezz4 * (2. / n ** 2 - 2 / n ** 2 * s / (n + s))) / exx4 * ezz4 * e) - (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (-ezz3 * exy2 / n / (n + s) / exx2 * w + (ezz3 - ezz2) * s / n / (n + s) - ezz2 * exy3 / n / (n + s) / exx3 * e)) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b\n\n bzxs = ((0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * s * ezz2 * ezz1 / eyy2 * (2 * eyy2 / ezz2 / s ** 2 - eyx2 / ezz2 / s / w) + 0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * s * ezz3 * ezz4 / eyy3 * (2 * eyy3 / ezz3 / s ** 2 + eyx3 / ezz3 / s / e)) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e + ((ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (-ezz4 * exy1 / s / (n + s) / exx1 * w - (ezz4 - ezz1)\n * n / s / (n + s) - ezz1 * exy4 / s / (n + s) / exx4 * e) - (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (0.5 * ezz3 * (-(1 - exx2 / ezz2) / s / w - exy2 / ezz2 * (2. / s ** 2 - 2 / s ** 2 * n / (n + s))) / exx2 * ezz2 * w - (ezz3 - ezz2) * n / s / (n + s) + 0.5 * ezz2 * ((1 - exx3 / ezz3) / s / e - exy3 / ezz3 * (2. / s ** 2 - 2 / s ** 2 * n / (n + s))) / exx3 * ezz3 * e)) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b\n\n bzxe = ((n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (0.5 * n * ezz4 * ezz3 / eyy4 * (2. / e ** 2 - eyx4 / ezz4 / n / e) + 0.5 * s * ezz3 * ezz4 / eyy3 * (2. / e ** 2 + eyx3 / ezz3 / s / e)) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e +\n (-0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * ezz1 * (1 - exx4 / ezz4) / n / exx4 * ezz4 - 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * ezz2 * (1 - exx3 / ezz3) / s / exx3 * ezz3) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b\n\n bzxw = ((-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (0.5 * n * ezz1 * ezz2 / eyy1 * (2. / w ** 2 + eyx1 / ezz1 / n / w) + 0.5 * s * ezz2 * ezz1 / eyy2 * (2. / w ** 2 - eyx2 / ezz2 / s / w)) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e +\n (0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * ezz4 * (1 - exx1 / ezz1) / n / exx1 * ezz1 + 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * ezz3 * (1 - exx2 / ezz2) / s / exx2 * ezz2) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b\n\n bzxp = (((-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (0.5 * n * ezz1 * ezz2 / eyy1 * (-2. / w ** 2 - 2 * eyy1 / ezz1 / n ** 2 + k ** 2 * eyy1 - eyx1 / ezz1 / n / w) + 0.5 * s * ezz2 * ezz1 / eyy2 * (-2. / w ** 2 - 2 * eyy2 / ezz2 / s ** 2 + k ** 2 * eyy2 + eyx2 / ezz2 / s / w)) + (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (0.5 * n * ezz4 * ezz3 / eyy4 * (-2. / e ** 2 - 2 * eyy4 / ezz4 / n ** 2 + k ** 2 * eyy4 + eyx4 / ezz4 / n / e) + 0.5 * s * ezz3 * ezz4 / eyy3 * (-2. / e ** 2 - 2 * eyy3 / ezz3 / s ** 2 + k ** 2 * eyy3 - eyx3 / ezz3 / s / e))) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e + ((ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (0.5 * ezz4 * (-k **\n 2 * exy1 - (1 - exx1 / ezz1) / n / w - exy1 / ezz1 * (-2. / n ** 2 - 2 / n ** 2 * (n - s) / s)) / exx1 * ezz1 * w + (ezz4 - ezz1) * (n - s) / n / s + 0.5 * ezz1 * (-k ** 2 * exy4 + (1 - exx4 / ezz4) / n / e - exy4 / ezz4 * (-2. / n ** 2 - 2 / n ** 2 * (n - s) / s)) / exx4 * ezz4 * e) - (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (0.5 * ezz3 * (-k ** 2 * exy2 + (1 - exx2 / ezz2) / s / w - exy2 / ezz2 * (-2. / s ** 2 + 2 / s ** 2 * (n - s) / n)) / exx2 * ezz2 * w + (ezz3 - ezz2) * (n - s) / n / s + 0.5 * ezz2 * (-k ** 2 * exy3 - (1 - exx3 / ezz3) / s / e - exy3 / ezz3 * (-2. / s ** 2 + 2 / s ** 2 * (n - s) / n)) / exx3 * ezz3 * e)) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b\n\n bzyne = (0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (1 - eyy4 / ezz4) / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy3 * eyy1 * w *\n eyy2 + 0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * exy4 / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * exx1 * s) / b\n\n bzyse = (-0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (1 - eyy3 / ezz3) / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy1 * w *\n eyy2 + 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * exy3 / ezz3 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * n * exx1 * exx4) / b\n\n bzynw = (-0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (1 - eyy1 / ezz1) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 *\n eyy2 * e - 0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * exy1 / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * exx4 * s) / b\n\n bzysw = (0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (1 - eyy2 / ezz2) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 *\n e - 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * exy2 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx3 * n * exx1 * exx4) / b\n\n bzyn = ((0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * ezz1 * ezz2 / eyy1 * (1 - eyy1 / ezz1) / w - 0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * ezz4 * ezz3 / eyy4 * (1 - eyy4 / ezz4) / e) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w *\n eyy2 * e + (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (0.5 * ezz4 * (2. / n ** 2 + exy1 / ezz1 / n / w) / exx1 * ezz1 * w + 0.5 * ezz1 * (2. / n ** 2 - exy4 / ezz4 / n / e) / exx4 * ezz4 * e) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b\n\n bzys = ((-0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * ezz2 * ezz1 / eyy2 * (1 - eyy2 / ezz2) / w + 0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * ezz3 * ezz4 / eyy3 * (1 - eyy3 / ezz3) / e) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w *\n eyy2 * e - (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (0.5 * ezz3 * (2. / s ** 2 - exy2 / ezz2 / s / w) / exx2 * ezz2 * w + 0.5 * ezz2 * (2. / s ** 2 + exy3 / ezz3 / s / e) / exx3 * ezz3 * e) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b\n\n bzye = (((-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (-n * ezz2 / eyy1 * eyx1 / e / (e + w) + (ezz1 - ezz2) * w / e / (e + w) - s * ezz1 / eyy2 * eyx2 / e / (e + w)) + (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (0.5 * n * ezz4 * ezz3 / eyy4 * (-(1 - eyy4 / ezz4) / n / e - eyx4 / ezz4 * (2. / e ** 2 - 2 / e ** 2 * w / (e + w))) + 0.5 * s * ezz3 * ezz4 / eyy3 * ((1 - eyy3 / ezz3) / s / e - eyx3 / ezz3 * (2. / e ** 2 - 2 / e ** 2 * w / (e + w))) + (ezz4 - ezz3) * w / e / (e + w))) / ezz4 /\n ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e + (0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * ezz1 * (2 * exx4 / ezz4 / e ** 2 - exy4 / ezz4 / n / e) / exx4 * ezz4 * e - 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * ezz2 * (2 * exx3 / ezz3 / e ** 2 + exy3 / ezz3 / s / e) / exx3 * ezz3 * e) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b\n\n bzyw = (((-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (0.5 * n * ezz1 * ezz2 / eyy1 * ((1 - eyy1 / ezz1) / n / w - eyx1 / ezz1 * (2. / w ** 2 - 2 / w ** 2 * e / (e + w))) - (ezz1 - ezz2) * e / w / (e + w) + 0.5 * s * ezz2 * ezz1 / eyy2 * (-(1 - eyy2 / ezz2) / s / w - eyx2 / ezz2 * (2. / w ** 2 - 2 / w ** 2 * e / (e + w)))) + (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (-n * ezz3 / eyy4 * eyx4 / w / (e + w) - s * ezz4 / eyy3 * eyx3 / w / (e + w) - (ezz4 - ezz3) * e / w / (e + w))) / ezz4 /\n ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e + (0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * ezz4 * (2 * exx1 / ezz1 / w ** 2 + exy1 / ezz1 / n / w) / exx1 * ezz1 * w - 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * ezz3 * (2 * exx2 / ezz2 / w ** 2 - exy2 / ezz2 / s / w) / exx2 * ezz2 * w) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b\n\n bzyp = (((-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (0.5 * n * ezz1 * ezz2 / eyy1 * (-k ** 2 * eyx1 - (1 - eyy1 / ezz1) / n / w - eyx1 / ezz1 * (-2. / w ** 2 + 2 / w ** 2 * (e - w) / e)) + (ezz1 - ezz2) * (e - w) / e / w + 0.5 * s * ezz2 * ezz1 / eyy2 * (-k ** 2 * eyx2 + (1 - eyy2 / ezz2) / s / w - eyx2 / ezz2 * (-2. / w ** 2 + 2 / w ** 2 * (e - w) / e))) + (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (0.5 * n * ezz4 * ezz3 / eyy4 * (-k ** 2 * eyx4 + (1 - eyy4 / ezz4) / n / e - eyx4 / ezz4 * (-2. / e ** 2 - 2 / e ** 2 * (e - w) / w)) + 0.5 * s * ezz3 * ezz4 / eyy3 * (-k ** 2 * eyx3 - (1 - eyy3 / ezz3) / s / e - eyx3 / ezz3 * (-2. / e ** 2 - 2 / e ** 2 * (e - w) / w)) + (ezz4 - ezz3) * (e - w) / e / w)) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) /\n ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e + ((ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (0.5 * ezz4 * (-2. / n ** 2 - 2 * exx1 / ezz1 / w ** 2 + k ** 2 * exx1 - exy1 / ezz1 / n / w) / exx1 * ezz1 * w + 0.5 * ezz1 * (-2. / n ** 2 - 2 * exx4 / ezz4 / e ** 2 + k ** 2 * exx4 + exy4 / ezz4 / n / e) / exx4 * ezz4 * e) - (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (0.5 * ezz3 * (-2. / s ** 2 - 2 * exx2 / ezz2 / w ** 2 + k ** 2 * exx2 + exy2 / ezz2 / s / w) / exx2 * ezz2 * w + 0.5 * ezz2 * (-2. / s ** 2 - 2 * exx3 / ezz3 / e ** 2 + k ** 2 * exx3 - exy3 / ezz3 / s / e) / exx3 * ezz3 * e)) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b\n\n ii = numpy.arange(nx * ny).reshape(nx, ny)\n\n # NORTH boundary\n\n ib = ii[:, -1]\n\n if boundary[0] == 'S':\n sign = 1\n elif boundary[0] == 'A':\n sign = -1\n elif boundary[0] == '0':\n sign = 0\n else:\n raise ValueError('unknown boundary conditions')\n\n bzxs[ib] += sign * bzxn[ib]\n bzxse[ib] += sign * bzxne[ib]\n bzxsw[ib] += sign * bzxnw[ib]\n bzys[ib] -= sign * bzyn[ib]\n bzyse[ib] -= sign * bzyne[ib]\n bzysw[ib] -= sign * bzynw[ib]\n\n # SOUTH boundary\n\n ib = ii[:, 0]\n\n if boundary[1] == 'S':\n sign = 1\n elif boundary[1] == 'A':\n sign = -1\n elif boundary[1] == '0':\n sign = 0\n else:\n raise ValueError('unknown boundary conditions')\n\n bzxn[ib] += sign * bzxs[ib]\n bzxne[ib] += sign * bzxse[ib]\n bzxnw[ib] += sign * bzxsw[ib]\n bzyn[ib] -= sign * bzys[ib]\n bzyne[ib] -= sign * bzyse[ib]\n bzynw[ib] -= sign * bzysw[ib]\n\n # EAST boundary\n\n ib = ii[-1, :]\n\n if boundary[2] == 'S':\n sign = 1\n elif boundary[2] == 'A':\n sign = -1\n elif boundary[2] == '0':\n sign = 0\n else:\n raise ValueError('unknown boundary conditions')\n\n bzxw[ib] += sign * bzxe[ib]\n bzxnw[ib] += sign * bzxne[ib]\n bzxsw[ib] += sign * bzxse[ib]\n bzyw[ib] -= sign * bzye[ib]\n bzynw[ib] -= sign * bzyne[ib]\n bzysw[ib] -= sign * bzyse[ib]\n\n # WEST boundary\n\n ib = ii[0, :]\n\n if boundary[3] == 'S':\n sign = 1\n elif boundary[3] == 'A':\n sign = -1\n elif boundary[3] == '0':\n sign = 0\n else:\n raise ValueError('unknown boundary conditions')\n\n bzxe[ib] += sign * bzxw[ib]\n bzxne[ib] += sign * bzxnw[ib]\n bzxse[ib] += sign * bzxsw[ib]\n bzye[ib] -= sign * bzyw[ib]\n bzyne[ib] -= sign * bzynw[ib]\n bzyse[ib] -= sign * bzysw[ib]\n\n # Assemble sparse matrix\n\n iall = ii.flatten()\n i_s = ii[:, :-1].flatten()\n i_n = ii[:, 1:].flatten()\n i_e = ii[1:, :].flatten()\n i_w = ii[:-1, :].flatten()\n i_ne = ii[1:, 1:].flatten()\n i_se = ii[1:, :-1].flatten()\n i_sw = ii[:-1, :-1].flatten()\n i_nw = ii[:-1, 1:].flatten()\n\n Izx = numpy.r_[iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw]\n Jzx = numpy.r_[iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se]\n Vzx = numpy.r_[bzxp[iall], bzxe[i_w], bzxw[i_e], bzxn[i_s], bzxs[\n i_n], bzxsw[i_ne], bzxnw[i_se], bzxne[i_sw], bzxse[i_nw]]\n\n Izy = numpy.r_[iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw]\n Jzy = numpy.r_[\n iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se] + nx * ny\n Vzy = numpy.r_[bzyp[iall], bzye[i_w], bzyw[i_e], bzyn[i_s], bzys[\n i_n], bzysw[i_ne], bzynw[i_se], bzyne[i_sw], bzyse[i_nw]]\n\n I = numpy.r_[Izx, Izy]\n J = numpy.r_[Jzx, Jzy]\n V = numpy.r_[Vzx, Vzy]\n B = coo_matrix((V, (I, J))).tocsr()\n\n HxHy = numpy.r_[Hx, Hy]\n Hz = B * HxHy.ravel() / 1j\n Hz = Hz.reshape(Hx.shape)\n\n # in xc e yc\n exx = epsxx[1:-1, 1:-1]\n exy = epsxy[1:-1, 1:-1]\n eyx = epsyx[1:-1, 1:-1]\n eyy = epsyy[1:-1, 1:-1]\n ezz = epszz[1:-1, 1:-1]\n edet = (exx * eyy - exy * eyx)\n\n h = e.reshape(nx, ny)[:-1, :-1]\n v = n.reshape(nx, ny)[:-1, :-1]\n\n # in xc e yc\n Dx = neff * EMpy.utils.centered2d(Hy) + (\n Hz[:-1, 1:] + Hz[1:, 1:] - Hz[:-1, :-1] - Hz[1:, :-1]) / (2j * k * v)\n Dy = -neff * EMpy.utils.centered2d(Hx) - (\n Hz[1:, :-1] + Hz[1:, 1:] - Hz[:-1, 1:] - Hz[:-1, :-1]) / (2j * k * h)\n Dz = ((Hy[1:, :-1] + Hy[1:, 1:] - Hy[:-1, 1:] - Hy[:-1, :-1]) / (2 * h) -\n (Hx[:-1, 1:] + Hx[1:, 1:] - Hx[:-1, :-1] - Hx[1:, :-1]) / (2 * v)) / (1j * k)\n\n Ex = (eyy * Dx - exy * Dy) / edet\n Ey = (exx * Dy - eyx * Dx) / edet\n Ez = Dz / ezz\n\n Hzs.append(Hz)\n Exs.append(Ex)\n Eys.append(Ey)\n Ezs.append(Ez)\n\n return (Hzs, Exs, Eys, Ezs)\n\n def solve(self, neigs=4, tol=0, guess=None):\n \"\"\"\n This function finds the eigenmodes.\n\n Parameters\n ----------\n neigs : int\n number of eigenmodes to find\n tol : float\n Relative accuracy for eigenvalues.\n The default value of 0 implies machine precision.\n guess : float\n A guess for the refractive index.\n The modesolver will only finds eigenvectors with an\n effective refrative index higher than this value.\n\n Returns\n -------\n self : an instance of the VFDModeSolver class\n obtain the fields of interest for specific modes using, for example:\n solver = EMpy.modesolvers.FD.VFDModeSolver(wavelength, x, y, epsf, boundary).solve()\n Ex = solver.modes[0].Ex\n Ey = solver.modes[0].Ey\n Ez = solver.modes[0].Ez\n \"\"\"\n\n from scipy.sparse.linalg import eigen\n\n self.nmodes = neigs\n self.tol = tol\n\n A = self.build_matrix()\n\n if guess is not None:\n # calculate shift for eigs function\n k = 2 * numpy.pi / self.wl\n shift = (guess * k) ** 2\n else:\n shift = None\n\n # Here is where the actual mode-solving takes place!\n [eigvals, eigvecs] = eigen.eigs(A,\n k=neigs,\n which='LR',\n tol=tol,\n ncv=10*neigs,\n return_eigenvectors=True,\n sigma=shift)\n\n neffs = self.wl * scipy.sqrt(eigvals) / (2 * numpy.pi)\n Hxs = []\n Hys = []\n nx = self.nx\n ny = self.ny\n for ieig in range(neigs):\n Hxs.append(eigvecs[:nx * ny, ieig].reshape(nx, ny))\n Hys.append(eigvecs[nx * ny:, ieig].reshape(nx, ny))\n\n # sort the modes\n idx = numpy.flipud(numpy.argsort(neffs))\n neffs = neffs[idx]\n tmpx = []\n tmpy = []\n for i in idx:\n tmpx.append(Hxs[i])\n tmpy.append(Hys[i])\n Hxs = tmpx\n Hys = tmpy\n\n [Hzs, Exs, Eys, Ezs] = self.compute_other_fields(neffs, Hxs, Hys)\n\n self.modes = []\n for (neff, Hx, Hy, Hz, Ex, Ey, Ez) in zip(neffs, Hxs, Hys, Hzs, Exs, Eys, Ezs):\n self.modes.append(\n FDMode(self.wl, self.x, self.y, neff, Ex, Ey, Ez, Hx, Hy, Hz).normalize())\n\n return self\n\n def save_modes_for_FDTD(self, x=None, y=None):\n for im, m in enumerate(self.modes):\n m.save_for_FDTD(str(im), x, y)\n\n def __str__(self):\n descr = 'Vectorial Finite Difference Modesolver\\n'\n return descr\n\n\nclass FDMode(Mode):\n\n def __init__(self, wl, x, y, neff, Ex, Ey, Ez, Hx, Hy, Hz):\n self.wl = wl\n self.x = x\n self.y = y\n self.neff = neff\n self.Ex = Ex\n self.Ey = Ey\n self.Ez = Ez\n self.Hx = Hx\n self.Hy = Hy\n self.Hz = Hz\n\n def get_x(self, n=None):\n if n is None:\n return self.x\n return numpy.linspace(self.x[0], self.x[-1], n)\n\n def get_y(self, n=None):\n if n is None:\n return self.y\n return numpy.linspace(self.y[0], self.y[-1], n)\n\n def get_field(self, fname, x=None, y=None):\n\n if fname == 'Ex':\n f = self.Ex\n centered = True\n elif fname == 'Ey':\n f = self.Ey\n centered = True\n elif fname == 'Ez':\n f = self.Ez\n centered = True\n elif fname == 'Hx':\n f = self.Hx\n centered = False\n elif fname == 'Hy':\n f = self.Hy\n centered = False\n elif fname == 'Hz':\n f = self.Hz\n centered = False\n\n if (x is None) and (y is None):\n return f\n\n if not centered:\n # magnetic fields are not centered\n x0 = self.x\n y0 = self.y\n else:\n # electric fields and intensity are centered\n x0 = EMpy.utils.centered1d(self.x)\n y0 = EMpy.utils.centered1d(self.y)\n\n return EMpy.utils.interp2(x, y, x0, y0, f)\n\n def intensityTETM(self, x=None, y=None):\n I_TE = self.Ex * EMpy.utils.centered2d(numpy.conj(self.Hy)) / 2.\n I_TM = -self.Ey * EMpy.utils.centered2d(numpy.conj(self.Hx)) / 2.\n if x is None and y is None:\n return (I_TE, I_TM)\n else:\n x0 = EMpy.utils.centered1d(self.x)\n y0 = EMpy.utils.centered1d(self.y)\n I_TE_ = EMpy.utils.interp2(x, y, x0, y0, I_TE)\n I_TM_ = EMpy.utils.interp2(x, y, x0, y0, I_TM)\n return (I_TE_, I_TM_)\n\n def intensity(self, x=None, y=None):\n I_TE, I_TM = self.intensityTETM(x, y)\n return I_TE + I_TM\n\n def TEfrac(self, x_=None, y_=None):\n if x_ is None:\n x = EMpy.utils.centered1d(self.x)\n else:\n x = x_\n if y_ is None:\n y = EMpy.utils.centered1d(self.y)\n else:\n y = y_\n STE, STM = self.intensityTETM(x_, y_)\n num = EMpy.utils.trapz2(numpy.abs(STE), x=x, y=y)\n den = EMpy.utils.trapz2(numpy.abs(STE) + numpy.abs(STM), x=x, y=y)\n return num / den\n\n def norm(self):\n x = EMpy.utils.centered1d(self.x)\n y = EMpy.utils.centered1d(self.y)\n return scipy.sqrt(EMpy.utils.trapz2(self.intensity(), x=x, y=y))\n\n def normalize(self):\n n = self.norm()\n self.Ex /= n\n self.Ey /= n\n self.Ez /= n\n self.Hx /= n\n self.Hy /= n\n self.Hz /= n\n\n return self\n\n def overlap(self, m, x=None, y=None):\n\n x1 = EMpy.utils.centered1d(self.x)\n y1 = EMpy.utils.centered1d(self.y)\n\n x2 = EMpy.utils.centered1d(m.x)\n y2 = EMpy.utils.centered1d(m.y)\n\n if x is None:\n x = x2\n\n if y is None:\n y = y2\n\n # Interpolates m1 onto m2 grid:\n Ex1 = EMpy.utils.interp2(x, y, x1, y1, self.Ex)\n Ey1 = EMpy.utils.interp2(x, y, x1, y1, self.Ey)\n Hx2 = EMpy.utils.interp2(x, y, x2, y2, m.Hx)\n Hy2 = EMpy.utils.interp2(x, y, x2, y2, m.Hy)\n\n intensity = (Ex1 * EMpy.utils.centered2d(numpy.conj(Hy2)) -\n Ey1 * EMpy.utils.centered2d(numpy.conj(Hx2))) / 2.\n\n return EMpy.utils.trapz2(intensity, x=x, y=y)\n\n def get_fields_for_FDTD(self, x=None, y=None):\n \"\"\"Get mode's field on a staggered grid.\n\n Note: ignores some fields on the boudaries.\n\n \"\"\"\n\n if x is None:\n x = self.x\n if y is None:\n y = self.y\n\n # Ex: ignores y = 0, max\n x_Ex = EMpy.utils.centered1d(self.x)\n y_Ex = EMpy.utils.centered1d(self.y)\n x_Ex_FDTD = EMpy.utils.centered1d(x)\n y_Ex_FDTD = y[1:-1]\n Ex_FDTD = EMpy.utils.interp2(x_Ex_FDTD, y_Ex_FDTD, x_Ex, y_Ex, self.Ex)\n # Ey: ignores x = 0, max\n x_Ey = EMpy.utils.centered1d(self.x)\n y_Ey = EMpy.utils.centered1d(self.y)\n x_Ey_FDTD = x[1:-1]\n y_Ey_FDTD = EMpy.utils.centered1d(y)\n Ey_FDTD = EMpy.utils.interp2(x_Ey_FDTD, y_Ey_FDTD, x_Ey, y_Ey, self.Ey)\n # Ez: ignores x, y = 0, max\n x_Ez = EMpy.utils.centered1d(self.x)\n y_Ez = EMpy.utils.centered1d(self.y)\n x_Ez_FDTD = x[1:-1]\n y_Ez_FDTD = y[1:-1]\n Ez_FDTD = EMpy.utils.interp2(x_Ez_FDTD, y_Ez_FDTD, x_Ez, y_Ez, self.Ez)\n # Hx: ignores x = 0, max, /120pi, reverse direction\n x_Hx = self.x\n y_Hx = self.y\n x_Hx_FDTD = x[1:-1]\n y_Hx_FDTD = EMpy.utils.centered1d(y)\n Hx_FDTD = EMpy.utils.interp2(\n x_Hx_FDTD, y_Hx_FDTD, x_Hx, y_Hx, self.Hx) / (-120. * numpy.pi)\n # Hy: ignores y = 0, max, /120pi, reverse direction\n x_Hy = self.x\n y_Hy = self.y\n x_Hy_FDTD = EMpy.utils.centered1d(x)\n y_Hy_FDTD = y[1:-1]\n Hy_FDTD = EMpy.utils.interp2(\n x_Hy_FDTD, y_Hy_FDTD, x_Hy, y_Hy, self.Hy) / (-120. * numpy.pi)\n # Hz: /120pi, reverse direction\n x_Hz = self.x\n y_Hz = self.y\n x_Hz_FDTD = EMpy.utils.centered1d(x)\n y_Hz_FDTD = EMpy.utils.centered1d(y)\n Hz_FDTD = EMpy.utils.interp2(\n x_Hz_FDTD, y_Hz_FDTD, x_Hz, y_Hz, self.Hz) / (-120. * numpy.pi)\n\n return (Ex_FDTD, Ey_FDTD, Ez_FDTD, Hx_FDTD, Hy_FDTD, Hz_FDTD)\n\n @staticmethod\n def plot_field(x, y, field):\n try:\n import pylab\n except ImportError:\n print('no pylab installed')\n return\n pylab.hot()\n pylab.contour(x, y, numpy.abs(field.T), 16)\n pylab.axis('image')\n\n def plot_Ex(self, x=None, y=None):\n if x is None:\n x = EMpy.utils.centered1d(self.x)\n if y is None:\n y = EMpy.utils.centered1d(self.y)\n Ex = self.get_field('Ex', x, y)\n self.plot_field(x, y, Ex)\n\n def plot_Ey(self, x=None, y=None):\n if x is None:\n x = EMpy.utils.centered1d(self.x)\n if y is None:\n y = EMpy.utils.centered1d(self.y)\n Ey = self.get_field('Ey', x, y)\n self.plot_field(x, y, Ey)\n\n def plot_Ez(self, x=None, y=None):\n if x is None:\n x = EMpy.utils.centered1d(self.x)\n if y is None:\n y = EMpy.utils.centered1d(self.y)\n Ez = self.get_field('Ez', x, y)\n self.plot_field(x, y, Ez)\n\n def plot_Hx(self, x=None, y=None):\n if x is None:\n x = self.x\n if y is None:\n y = self.y\n Hx = self.get_field('Hx', x, y)\n self.plot_field(x, y, Hx)\n\n def plot_Hy(self, x=None, y=None):\n if x is None:\n x = self.x\n if y is None:\n y = self.y\n Hy = self.get_field('Hy', x, y)\n self.plot_field(x, y, Hy)\n\n def plot_Hz(self, x=None, y=None):\n if x is None:\n x = self.x\n if y is None:\n y = self.y\n Hz = self.get_field('Hz', x, y)\n self.plot_field(x, y, Hz)\n\n def plot_intensity(self):\n x = EMpy.utils.centered1d(self.x)\n y = EMpy.utils.centered1d(self.y)\n I = self.intensity(x, y)\n self.plot_field(x, y, I)\n\n def plot(self):\n \"\"\"Plot the mode's fields.\"\"\"\n try:\n import pylab\n except ImportError:\n print('no pylab installed')\n return\n pylab.figure()\n pylab.subplot(2, 3, 1)\n self.plot_Ex()\n pylab.title('Ex')\n pylab.subplot(2, 3, 2)\n self.plot_Ey()\n pylab.title('Ey')\n pylab.subplot(2, 3, 3)\n self.plot_Ez()\n pylab.title('Ez')\n pylab.subplot(2, 3, 4)\n self.plot_Hx()\n pylab.title('Hx')\n pylab.subplot(2, 3, 5)\n self.plot_Hy()\n pylab.title('Hy')\n pylab.subplot(2, 3, 6)\n self.plot_Hz()\n pylab.title('Hz')\n\n\ndef stretchmesh(x, y, nlayers, factor, method='PPPP'):\n\n # OKKIO: check me!\n\n # This function can be used to continuously stretch the grid\n # spacing at the edges of the computation window for\n # finite-difference calculations. This is useful when you would\n # like to increase the size of the computation window without\n # increasing the total number of points in the computational\n # domain. The program implements four different expansion\n # methods: uniform, linear, parabolic (the default) and\n # geometric. The first three methods also allow for complex\n # coordinate stretching, which is useful for creating\n # perfectly-matched non-reflective boundaries.\n #\n # USAGE:\n #\n # [x,y] = stretchmesh(x,y,nlayers,factor);\n # [x,y] = stretchmesh(x,y,nlayers,factor,method);\n # [x,y,xc,yc] = stretchmesh(x,y,nlayers,factor);\n # [x,y,xc,yc] = stretchmesh(x,y,nlayers,factor,method);\n # [x,y,xc,yc,dx,dy] = stretchmesh(x,y,nlayers,factor);\n # [x,y,xc,yc,dx,dy] = stretchmesh(x,y,nlayers,factor,method);\n #\n # INPUT:\n #\n # x,y - vectors that specify the vertices of the original\n # grid, which are usually linearly spaced.\n # nlayers - vector that specifies how many layers of the grid\n # you would like to expand:\n # nlayers(1) = # of layers on the north boundary to stretch\n # nlayers(2) = # of layers on the south boundary to stretch\n # nlayers(3) = # of layers on the east boundary to stretch\n # nlayers(4) = # of layers on the west boundary to stretch\n # factor - cumulative factor by which the layers are to be\n # expanded. As with nlayers, this can be a 4-vector.\n # method - 4-letter string specifying the method of\n # stretching for each of the four boundaries. Four different\n # methods are supported: uniform, linear, parabolic (default)\n # and geometric. For example, method = 'LLLG' will use linear\n # expansion for the north, south and east boundaries and\n # geometric expansion for the west boundary.\n #\n # OUTPUT:\n #\n # x,y - the vertices of the new stretched grid\n # xc,yc (optional) - the center cell coordinates of the\n # stretched grid\n # dx,dy (optional) - the grid spacing (dx = diff(x))\n\n xx = x.astype(complex)\n yy = y.astype(complex)\n\n nlayers *= numpy.ones(4, dtype=int) \n factor *= numpy.ones(4)\n\n for idx, (n, f, m) in enumerate(zip(nlayers, factor, method.upper())):\n\n if n > 0 and f != 1:\n\n if idx == 0:\n # north boundary\n kv = numpy.arange(len(y) - 1 - n, len(y))\n z = yy\n q1 = z[-1 - n]\n q2 = z[-1]\n elif idx == 1:\n # south boundary\n kv = numpy.arange(0, n)\n z = yy\n q1 = z[n]\n q2 = z[0]\n elif idx == 2:\n # east boundary\n kv = numpy.arange(len(x) - 1 - n, len(x))\n z = xx\n q1 = z[-1 - n]\n q2 = z[-1]\n elif idx == 3:\n # west boundary\n kv = numpy.arange(0, n)\n z = xx\n q1 = z[n]\n q2 = z[0]\n\n kv = kv.astype(int)\n\n if m == 'U':\n c = numpy.polyfit([q1, q2], [q1, q1 + f * (q2 - q1)], 1)\n z[kv] = numpy.polyval(c, z[kv])\n elif m == 'L':\n c = (f - 1) / (q2 - q1)\n b = 1 - 2 * c * q1\n a = q1 - b * q1 - c * q1 ** 2\n z[kv] = a + b * z[kv] + c * z[kv] ** 2\n elif m == 'P':\n z[kv] = z[kv] + (f - 1) * (z[kv] - q1) ** 3 / (q2 - q1) ** 2\n elif m == 'G':\n b = scipy.optimize.newton(\n lambda s: numpy.exp(s) - 1 - f * s, f)\n a = (q2 - q1) / b\n z[kv] = q1 + a * (numpy.exp((z[kv] - q1) / a) - 1)\n\n xx = xx.real + 1j * numpy.abs(xx.imag)\n yy = yy.real + 1j * numpy.abs(yy.imag)\n\n xc = (xx[:-1] + xx[1:]) / 2.\n yc = (yy[:-1] + yy[1:]) / 2.\n\n dx = numpy.diff(xx)\n dy = numpy.diff(yy)\n\n return (xx, yy, xc, yc, dx, dy)\n" ]
[ [ "numpy.absolute", "numpy.sqrt", "numpy.linspace" ], [ "scipy.sparse.linalg.eigen.eigs", "numpy.dot", "scipy.sparse.coo_matrix", "numpy.polyfit", "numpy.abs", "numpy.linspace", "numpy.conj", "numpy.arange", "numpy.ones", "scipy.sqrt", "numpy.diff", "numpy.zeros_like", "numpy.argsort", "numpy.exp", "numpy.polyval" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GouldGroup/MFBERT
[ "743587b1ea76b00f4adf46d616fc1e28dd001749" ]
[ "fine_tune_freesolv.py" ]
[ "import os\nos.environ['CUDA_VISIBLE_DEVICES']='0'\n\nimport pandas as pd\nimport torch\nfrom transformers import RobertaForMaskedLM\nfrom torch.utils.data import Dataset, DataLoader\nimport pickle\nfrom tqdm import tqdm\nfrom Tokenizer.MFBERT_Tokenizer import MFBERTTokenizer\nimport numpy as np\n\nassert torch.cuda.device_count() == 1\n\n\nMAX_LEN = 514\nTRAIN_BATCH_SIZE = 16\nVALID_BATCH_SIZE = 8\nEPOCHS = 10\nLEARNING_RATE = 1e-05\nTOKENIZER_DIR = 'Tokenizer/'\n\n\nclass FreeSolvDataset(Dataset):\n def __init__(self):\n examples = []\n \n with open('Datasets/data_splits/FreeSolv/train.pkl', 'rb') as f:\n traindata = pickle.load(f)\n for k,v in traindata.items():\n examples.append((k,v))\n\n self.data = examples\n self.tokenizer = MFBERTTokenizer.from_pretrained(TOKENIZER_DIR+'Model/',\n dict_file = TOKENIZER_DIR+'Model/dict.txt')\n self.max_len = 514\n \n def __getitem__(self, idx):\n example = self.data[idx]\n smiles = example[0]\n target = example[1]\n inputs = self.tokenizer.encode_plus(\n smiles,\n None,\n add_special_tokens=True,\n max_length=self.max_len,\n pad_to_max_length=True,\n return_token_type_ids=True,\n truncation=True\n )\n ids = inputs['input_ids']\n mask = inputs['attention_mask']\n \n return {'input_ids':torch.tensor(ids, dtype=torch.long), \n 'attention_mask':torch.tensor(mask, dtype=torch.long), \n 'label':torch.tensor(target, dtype=torch.long)}\n \n def __len__(self):\n return len(self.data)\n \nclass MFBERTForFreeSolv(torch.nn.Module):\n def __init__(self):\n super(MFBERTForFreeSolv, self).__init__()\n self.l1 = list(RobertaForMaskedLM.from_pretrained('Model/weights').children())[0]\n self.l2 = torch.nn.Dropout(0.2)\n self.l3 = torch.nn.Linear(768, 1)\n \n def forward(self, ids, mask):\n output_1 = self.l1(ids, mask)\n output_2 = self.l2(torch.mean(output_1[0], dim=1))\n output = self.l3(output_2)\n return output\n \ntrainds = FreeSolvDataset()\n\nmodel = MFBERTForFreeSolv().cuda()\n\ntrain_params = {'batch_size': TRAIN_BATCH_SIZE,\n 'shuffle': True,\n 'num_workers': 0\n }\n\n\ntraining_loader = DataLoader(trainds, **train_params)\n\n# Creating the loss function and optimizer\nloss_function = torch.nn.MSELoss()\noptimizer = torch.optim.Adam(params = model.parameters(), lr=LEARNING_RATE)\n\ncurminloss = 1\n\ndef train(epoch):\n model.train()\n\n for _ , data in tqdm(enumerate(training_loader, 0), desc='ITERATION', total=len(training_loader)):\n ids = data['input_ids'].cuda()\n mask = data['attention_mask'].cuda()\n targets = data['label'].float().cuda()\n global curminloss\n outputs = model(ids, mask).squeeze()\n optimizer.zero_grad()\n \n loss = loss_function(outputs, targets)\n \n if _%100==0:\n print(f'Epoch: {epoch}, Loss: {loss.item()}')\n # save best model\n if loss.item()<curminloss:\n torch.save(model, f'fine-tuned/FreeSolv_model_best_{loss.item()}.bin')\n curminloss = loss.item()\n print('saving best...')\n\n loss.backward()\n optimizer.step()\n \nfor epoch in tqdm(range(EPOCHS), desc='EPOCHS'):\n train(epoch)\n\n\ntorch.save(model, 'fine-tuned/Freesolv_model_last.bin')\n" ]
[ [ "torch.mean", "torch.nn.Dropout", "torch.utils.data.DataLoader", "torch.tensor", "torch.nn.Linear", "torch.cuda.device_count", "torch.nn.MSELoss", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
abhinine4/ivy
[ "79f8037cb0e0815d21cf46a906e66a45b2f27688" ]
[ "ivy/functional/backends/torch/old/general.py" ]
[ "\"\"\"\nCollection of PyTorch general functions, wrapped to fit Ivy syntax and signature.\n\"\"\"\n\n# global\nimport ivy\nimport numpy as np\ntorch_scatter = None\nimport math as _math\nimport torch as torch\nfrom operator import mul\nfrom torch.types import Number\nfrom functools import reduce as _reduce\nfrom typing import List, Dict, Optional, Union\n\n\n# local\nfrom ivy.functional.ivy import default_dtype\nfrom ivy.functional.ivy.device import default_device\nfrom ivy.functional.backends.torch.device import dev_from_str, _callable_dev\n\n\n# API #\n# ----#\n\n\n\ndef dtype_bits(dtype_in):\n dtype_str = dtype_to_str(dtype_in)\n if 'bool' in dtype_str:\n return 1\n return int(dtype_str.replace('torch.', '').replace('uint', '').replace('int', '').replace('bfloat', '').replace(\n 'float', ''))\n\n\ndef shape(x, as_tensor=False) -> Union[torch.Tensor, List[int]]:\n return torch.tensor(x.shape) if as_tensor else x.shape\n\n\ndef get_num_dims(x, as_tensor=False) -> Union[torch.Tensor, int]:\n return torch.tensor(len(x.shape)) if as_tensor else len(x.shape)\n\n\ndef minimum(x, y):\n x_val = torch.tensor(x) if (isinstance(x, int) or isinstance(x, float)) else x\n y_val = torch.tensor(y) if (isinstance(y, int) or isinstance(y, float)) else y\n return torch.min(x_val, y_val)\n\n\ndef maximum(x, y):\n x_val = torch.tensor(x) if (isinstance(x, int) or isinstance(x, float)) else x\n y_val = torch.tensor(y) if (isinstance(y, int) or isinstance(y, float)) else y\n return torch.max(x_val, y_val)\n\n\ndef clip(x, x_min, x_max):\n return torch.clamp(x, x_min, x_max)\n\n\n# noinspection PyShadowingBuiltins\n# noinspection PyShadowingBuiltins\ndef abs(x):\n return torch.abs(x)\n\n\ndef cast(x, dtype_in: str):\n dtype_val = dtype_from_str(dtype_in)\n return x.type(dtype_val)\n\n\nastype = cast\n\n\n# noinspection PyShadowingNames\ndef arange(stop: Number, start: Number = 0, step: Number = 1, dtype: Optional[str] = None,\n dev: Optional[str] = None):\n dev = default_device(dev)\n if dtype is not None:\n return torch.arange(start, stop, step=step, dtype=dtype_from_str(dtype), device=dev_from_str(dev))\n else:\n return torch.arange(start, stop, step=step, device=dev_from_str(dev))\n\n\n\n\n\ndef concatenate(xs: List[torch.Tensor], axis: int = -1):\n if xs[0].shape == ():\n return torch.cat([x.unsqueeze(0) for x in xs], axis)\n return torch.cat(xs, axis)\n\n\ndef stack(xs: List[torch.Tensor], axis: int = 0):\n return torch.stack(xs, axis)\n\n\n\n\n\n\n\n\n\ndef transpose(x, axes: List[int]):\n if axes is None:\n num_dims = len(x.shape)\n axes = list(range(num_dims))\n axes.reverse()\n return x.permute(axes)\n\n\ndef where(condition, x1, x2):\n return torch.where(condition.type(torch.bool), x1, x2)\n\n\n\n\ndef reshape(x, newshape: List[int]):\n if isinstance(newshape, int):\n newshape = [newshape]\n return torch.reshape(x, newshape)\n\n\ndef broadcast_to(x, new_shape):\n return x.expand(new_shape)\n\n\ndef squeeze(x, axis: Optional[int] = None):\n if axis is None:\n return torch.squeeze(x)\n return torch.squeeze(x, axis)\n\n\n\n\n# noinspection PyShadowingNames\ndef zeros_like(x, dtype: Optional[str] = None, dev: Optional[str] = None):\n if dev is None:\n dev = _callable_dev(x)\n if dtype is not None:\n type_dict: Dict[str, torch.dtype] = {'int8': torch.int8,\n 'int16': torch.int16,\n 'int32': torch.int32,\n 'int64': torch.int64,\n 'uint8': torch.uint8,\n 'bfloat16': torch.bfloat16,\n 'float16': torch.float16,\n 'float32': torch.float32,\n 'float64': torch.float64,\n 'bool': torch.bool}\n return torch.zeros_like(x, dtype=type_dict[dtype], device=dev_from_str(dev))\n return torch.zeros_like(x, device=dev_from_str(dev))\n\n\ndef full(shape, fill_value, dtype=None, device=None):\n return torch.full(\n ivy.shape_to_tuple(shape), fill_value, dtype=dtype_from_str(default_dtype(dtype, fill_value)),\n device=default_device(device))\n\n\n\n\n\ndef cross(x1, x2):\n return torch.cross(x1, x2)\n\n\n\n# noinspection PyShadowingNames\ndef identity(n: int, dtype: ivy.Dtype = 'float32', batch_shape: Optional[List[int]] = None,\n dev: Optional[str] = None):\n dev = default_device(dev)\n type_dict: Dict[str, torch.dtype] = {'int8': torch.int8,\n 'int16': torch.int16,\n 'int32': torch.int32,\n 'int64': torch.int64,\n 'uint8': torch.uint8,\n 'bfloat16': torch.bfloat16,\n 'float16': torch.float16,\n 'float32': torch.float32,\n 'float64': torch.float64,\n 'bool': torch.bool}\n dtype_val: torch.dtype = type_dict[dtype]\n mat = torch.eye(n, n, dtype=dtype_val, device=dev_from_str(dev))\n if batch_shape is None:\n return mat\n else:\n reshape_dims = [1] * len(batch_shape) + [n, n]\n tile_dims = list(batch_shape) + [1, 1]\n res = torch.reshape(mat, reshape_dims).repeat(tile_dims)\n return res\n\n\ndef meshgrid(*xs, indexing='ij'):\n ret = torch.meshgrid(*xs)\n if indexing == 'xy':\n # ToDo: verify if this is correct\n return tuple([torch.transpose(x, 1, 0) for x in ret])\n return ret\n\n\n\n\ndef dtype(x, as_str=False):\n dt = x.dtype\n if as_str:\n return dtype_to_str(dt)\n return dt\n\n\ndef dtype_to_str(dtype_in):\n if isinstance(dtype_in, str):\n return dtype_in\n return {torch.int8: 'int8',\n torch.int16: 'int16',\n torch.int32: 'int32',\n torch.int64: 'int64',\n torch.uint8: 'uint8',\n torch.bfloat16: 'bfloat16',\n torch.float16: 'float16',\n torch.float32: 'float32',\n torch.float64: 'float64',\n torch.bool: 'bool'}[dtype_in]\n\n\ndef dtype_from_str(dtype_in: str) -> torch.dtype:\n if not isinstance(dtype_in, str):\n return dtype_in\n return {'int8': torch.int8,\n 'int16': torch.int16,\n 'int32': torch.int32,\n 'int64': torch.int64,\n 'uint8': torch.uint8,\n 'bfloat16': torch.bfloat16,\n 'float16': torch.float16,\n 'float32': torch.float32,\n 'float64': torch.float64,\n 'bool': torch.bool}[dtype_in]\n\n\ndef compile(fn, dynamic=True, example_inputs=None, static_argnums=None, static_argnames=None):\n if dynamic:\n return torch.jit.script(fn)\n return torch.jit.trace(fn, example_inputs)\n\n\ndef current_framework_str():\n return 'torch'\n\n\n\n\n" ]
[ [ "torch.jit.script", "torch.abs", "torch.transpose", "torch.jit.trace", "torch.max", "torch.cat", "torch.min", "torch.reshape", "torch.tensor", "torch.stack", "torch.clamp", "torch.meshgrid", "torch.squeeze", "torch.cross" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
benlansdell/obs-rl-i2a
[ "47e274a371c3d0eec418b85dc4ed732e36885c53" ]
[ "o2a/i2a.py" ]
[ "import os\nimport gym\nimport time\nimport logging\nimport numpy as np\nimport tensorflow as tf\nfrom common.multiprocessing_env import SubprocVecEnv\nfrom tqdm import tqdm\n\nfrom env_model_minigrid import create_env_model\nfrom a2c import get_actor_critic, CnnPolicy\nfrom common.minigrid_util import num_pixels, mode_rewards, pix_to_target, rewards_to_target, mode_rewards, target_to_pix\n\n# Hyperparameter of how far ahead in the future the agent \"imagines\"\n# Currently this is specifying one frame in the future.\nNUM_ROLLOUTS = 1\n\n# Hidden size in RNN imagination encoder.\nHIDDEN_SIZE = 128\nN_STEPS = 5\n\n# This can be anything from \"regular\" \"avoid\" \"hunt\" \"ambush\" \"rush\" each\n# resulting in a different reward function giving the agent different behavior.\nREWARD_MODE = 'regular'\n\n# Replace this with the name of the weights you want to load to train I2A\nA2C_MODEL_PATH = 'weights/a2c_200000.ckpt'\nENV_MODEL_PATH = 'weights/env_model.ckpt'\n\n# Softmax function for numpy taken from\n# https://nolanbconaway.github.io/blog/2017/softmax-numpy\ndef softmax(X, theta = 1.0, axis = None):\n \"\"\"\n Compute the softmax of each element along an axis of X.\n\n Parameters\n ----------\n X: ND-Array. Probably should be floats.\n theta (optional): float parameter, used as a multiplier\n prior to exponentiation. Default = 1.0\n axis (optional): axis to compute values along. Default is the\n first non-singleton axis.\n\n Returns an array the same size as X. The result will sum to 1\n along the specified axis.\n \"\"\"\n\n # make X at least 2d\n y = np.atleast_2d(X)\n\n # find axis\n if axis is None:\n axis = next(j[0] for j in enumerate(y.shape) if j[1] > 1)\n\n # multiply y against the theta parameter,\n y = y * float(theta)\n\n # subtract the max for numerical stability\n y = y - np.expand_dims(np.max(y, axis = axis), axis)\n\n # exponentiate y\n y = np.exp(y)\n\n # take the sum along the specified axis\n ax_sum = np.expand_dims(np.sum(y, axis = axis), axis)\n\n # finally: divide elementwise\n p = y / ax_sum\n\n # flatten if X was 1D\n if len(X.shape) == 1: p = p.flatten()\n\n return p\n\n\ndef convert_target_to_real(batch_size, nw, nh, nc, imagined_state, imagined_reward):\n imagined_state = softmax(imagined_state, axis=1)\n imagined_state = np.argmax(imagined_state, axis=1)\n imagined_state = target_to_pix(imagined_state)\n imagined_state = imagined_state.reshape((batch_size, nw, nh, nc))\n\n imagined_reward = softmax(imagined_reward, axis=1)\n imagined_reward = np.argmax(imagined_reward, axis=1)\n\n return imagined_state, imagined_reward\n\n\"\"\"\nUsed to generate rollouts of imagined states.\n\"\"\"\nclass ImaginationCore(object):\n def __init__(self, num_rollouts, num_actions, num_rewards,\n ob_space, actor_critic, env_model):\n\n self.num_rollouts = num_rollouts\n self.num_actions = num_actions\n self.num_rewards = num_rewards\n self.ob_space = ob_space\n self.actor_critic = actor_critic\n self.env_model = env_model\n\n\n def imagine(self, state, sess):\n nw, nh, nc = self.ob_space\n\n batch_size = state.shape[0]\n\n state = np.tile(state, [self.num_actions, 1, 1, 1, 1])\n state = state.reshape(-1, nw, nh, nc)\n\n action = np.array([[[i] for i in range(self.num_actions)] for j in\n range(batch_size)])\n\n action = action.reshape((-1,))\n\n rollout_batch_size = batch_size * self.num_actions\n\n rollout_states = []\n rollout_rewards = []\n\n for step in range(self.num_rollouts):\n onehot_action = np.zeros((rollout_batch_size, self.num_actions, nw, nh))\n onehot_action[range(rollout_batch_size), action] = 1\n onehot_action = np.transpose(onehot_action, (0, 2, 3, 1))\n\n imagined_state, imagined_reward = sess.run(\n [self.env_model.imag_state, self.env_model.imag_reward],\n feed_dict={\n self.env_model.input_states: state,\n self.env_model.input_actions: onehot_action,\n })\n\n imagined_state, imagined_reward = convert_target_to_real(rollout_batch_size, nw, nh, nc, imagined_state, imagined_reward)\n\n onehot_reward = np.zeros((rollout_batch_size, self.num_rewards))\n onehot_reward[range(rollout_batch_size), imagined_reward] = 1\n\n rollout_states.append(imagined_state)\n rollout_rewards.append(onehot_reward)\n\n state = imagined_state\n action, _, _ = self.actor_critic.act(state)\n\n return np.array(rollout_states), np.array(rollout_rewards)\n\n# So the model is not loaded twice.\ng_actor_critic = None\ndef get_cache_loaded_a2c(sess, nenvs, nsteps, ob_space, ac_space):\n global g_actor_critic\n if g_actor_critic is None:\n with tf.variable_scope('actor'):\n g_actor_critic = get_actor_critic(sess, nenvs, nsteps, ob_space,\n ac_space, CnnPolicy, should_summary=False)\n g_actor_critic.load(A2C_MODEL_PATH)\n\n print('Actor restored!')\n return g_actor_critic\n\n\n# So the model is not loaded twice.\ng_env_model = None\ndef get_cache_loaded_env_model(sess, nenvs, ob_space, num_actions):\n global g_env_model\n if g_env_model is None:\n with tf.variable_scope('env_model'):\n g_env_model = create_env_model(ob_space, num_actions, num_pixels,\n len(mode_rewards[REWARD_MODE]), should_summary=False)\n\n save_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='env_model')\n loader = tf.train.Saver(var_list=save_vars)\n loader.restore(sess, ENV_MODEL_PATH)\n\n print('Env model restored!')\n\n return g_env_model\n\n\nclass I2aPolicy(object):\n def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, reuse=False):\n num_rewards = len(mode_rewards[REWARD_MODE])\n num_actions = ac_space.n\n width, height, depth = ob_space\n\n actor_critic = get_cache_loaded_a2c(sess, nbatch, N_STEPS, ob_space, ac_space)\n env_model = get_cache_loaded_env_model(sess, nbatch, ob_space, num_actions)\n\n self.imagination = ImaginationCore(NUM_ROLLOUTS, num_actions, num_rewards,\n ob_space, actor_critic, env_model)\n\n with tf.variable_scope('model', reuse=reuse):\n # Model based path.\n self.imagined_state = tf.placeholder(tf.float32, [None, None, width, height, depth])\n self.imagined_reward = tf.placeholder(tf.float32, [None, None, num_rewards])\n\n num_steps = tf.shape(self.imagined_state)[0]\n batch_size = tf.shape(self.imagined_state)[1]\n\n hidden_state = self.get_encoder(self.imagined_state, self.imagined_reward,\n num_steps, batch_size, width, height, depth, HIDDEN_SIZE)\n\n # Model free path.\n self.state = tf.placeholder(tf.float32, [None, width, height,\n depth])\n\n state_batch_size = tf.shape(self.state)[0]\n\n c1 = tf.layers.conv2d(self.state, 16, kernel_size=3,\n strides=1, padding='valid', activation=tf.nn.relu)\n c2 = tf.layers.conv2d(c1, 16, kernel_size=3,\n strides=2, padding='valid', activation=tf.nn.relu)\n\n #Do all these numbers have to change?? What is 6 and 8?\n features = tf.reshape(c2, [state_batch_size, 6 * 6 * 16])\n\n self.features = features\n\n hidden_state = tf.reshape(hidden_state, [state_batch_size, 80 * HIDDEN_SIZE\n // 16])\n\n # Combine both paths\n x = tf.concat([features, hidden_state], axis=1)\n x = tf.layers.dense(x, 256, activation=tf.nn.relu)\n\n self.pi = tf.layers.dense(x, num_actions)\n self.vf = tf.layers.dense(x, 1)[:, 0]\n\n # Sample action. `pi` is like the logits\n u = tf.random_uniform(tf.shape(self.pi))\n self.a0 = tf.argmax(self.pi - tf.log(-tf.log(u)), axis=-1)\n\n # Get the negative log likelihood\n one_hot_actions = tf.one_hot(self.a0, self.pi.get_shape().as_list()[-1])\n self.neglogp0 = tf.nn.softmax_cross_entropy_with_logits(\n logits=self.pi,\n labels=one_hot_actions)\n\n\n def get_encoder(self, state, reward, num_steps, batch_size, width, height, depth, hidden_size):\n state = tf.reshape(state, [num_steps * batch_size, width, height,\n depth])\n\n c1 = tf.layers.conv2d(state, 16, kernel_size=3, strides=1,\n padding='valid', activation=tf.nn.relu)\n features = tf.layers.conv2d(c1, 16, kernel_size=3, strides=2,\n padding='valid', activation=tf.nn.relu)\n\n #Is this size hard coded?\n features = tf.reshape(features, [num_steps, batch_size, 6 * 6 * 16])\n\n rnn_input = tf.concat([features, reward], 2)\n\n cell = tf.contrib.rnn.GRUCell(hidden_size)\n _, internal_state = tf.nn.dynamic_rnn(cell, rnn_input, time_major=True, dtype=tf.float32)\n\n return internal_state\n\n\n def step(self, sess, ob):\n imagined_state, imagined_reward, ob = self.transform_input(ob, sess)\n\n a, v, neglogp = sess.run([\n self.a0,\n self.vf,\n self.neglogp0\n ],\n {\n self.imagined_state: imagined_state,\n self.imagined_reward: imagined_reward,\n self.state: ob\n })\n return a, v, neglogp\n\n\n def value(self, sess, ob):\n imagined_state, imagined_reward, ob = self.transform_input(ob, sess)\n\n v = sess.run(self.vf, {\n self.imagined_state: imagined_state,\n self.imagined_reward: imagined_reward,\n self.state: ob\n })\n return v\n\n # Add the imagined states to the default input.\n def get_inputs(self):\n return [self.imagined_state, self.imagined_reward, self.state]\n\n def transform_input(self, X, sess):\n imagined_state, imagined_reward = self.imagination.imagine(X, sess)\n return [imagined_state, imagined_reward, X]\n\n" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.nn.dynamic_rnn", "tensorflow.concat", "tensorflow.contrib.rnn.GRUCell", "numpy.max", "numpy.exp", "tensorflow.get_collection", "tensorflow.layers.dense", "numpy.argmax", "tensorflow.train.Saver", "numpy.zeros", "tensorflow.layers.conv2d", "tensorflow.shape", "tensorflow.placeholder", "numpy.atleast_2d", "numpy.transpose", "numpy.array", "numpy.sum", "tensorflow.reshape", "numpy.tile", "tensorflow.log", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
jcgeo9/ML-For-Fish-Recognition
[ "0b5faba77d0b2c5452950637f047882c80fa6fb7" ]
[ "Python-Files/model_conversion/convert_to_tflite.py" ]
[ "# =============================================================================\n# Created By : Giannis Kostas Georgiou\n# Project : Machine Learning for Fish Recognition (Individual Project)\n# =============================================================================\n# Description : File in order to convert saved models to .tflite instances.\n# To be used after the desired model are trained and saved\n# How to use : Replace variables in CAPS according to needs of the dataset \n# =============================================================================\n\nimport tensorflow as tf\n\nmodel_path='PATH TO SAVED MODEL'\ntflite_model_name='NAME OF THE NEWLY CREATED TFLITE MODEL'\n\n#convert the model by loading the saved model to the converter\nconverter = tf.lite.TFLiteConverter.from_saved_model(model_path)\ntflite_model = converter.convert()\n\n#save the tflite model\nwith open(tflite_model_name+'.tflite', 'wb') as f:\n f.write(tflite_model)\n" ]
[ [ "tensorflow.lite.TFLiteConverter.from_saved_model" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dhimanray/WEMRR
[ "aab019f1d1bb4d6db6dea36f9444167591129322" ]
[ "examples/NAMD/template_milestone/westpa_scripts/convert_first_milestone.py" ]
[ "#THIS CODE IS ONLY FOR FIRST MILSTONE. build.py WILL REPLACE convert.py WITH THIS FILE FOR FIRST MILESTONE\n\nimport numpy as np\n\nendpoint = ENDPOINT\n\nl = np.loadtxt('parent.dat')\n\nr = l[len(l)-1]\n\n#print(\"{:.2f}\".format(r)) #needed for OpenMM (does not include zero frame by default)\n\nl = []\n\nl = np.loadtxt('distance.dat')\n\nfor i in range(len(l)):\n if r < endpoint:\n r = l[i]\n print(\"{:.2f}\".format(r))\n" ]
[ [ "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
marisuki/LearnCRR
[ "0eb4c26be7b4e9d816f275ef45b848c99a489c57" ]
[ "core/sklearn_cnd.py" ]
[ "from sklearn.linear_model import BayesianRidge\nfrom sklearn.linear_model import LinearRegression\n\nimport numpy as np\nfrom copy import copy\nimport random\nfrom sklearn.metrics import mean_squared_error\nmse = mean_squared_error\n\n\n#@DeprecationWarning(\"not_essay\")\ndef train_baye(X_train, y_train):\n reg = BayesianRidge(tol=1e-6, fit_intercept=False, compute_score=True)\n reg.fit(X_train, y_train)\n return reg\n\ndef generate(attrs, tb, IC, A, params):\n attrs_excA = list(filter(lambda x: x != A, attrs))\n x_train, y_train = [], []\n X_train = []\n if params['func_name'] == \"linear\":\n # use all attrs\n x_train = np.array([[tb[x][i] for i in attrs_excA] for x in IC])\n y_train = np.array([tb[x][A] for x in IC])\n return [x_train], y_train, [x_train]\n elif params['func_name'] == \"bayesian\":\n # select best src as indep\n y_train = np.array([tb[x][A] for x in IC])\n for src in attrs_excA:\n tmp = [tb[x][src] for x in IC]\n X_train.append(np.vander(tmp, params[\"n_order\"] + 1, increasing=True))\n x_train.append(tmp)\n return X_train, y_train, x_train\n\ndef init(func_name, y_train, params=None):\n if func_name == \"bayesian\":\n if params and 'tol' in params and 'init' in params:\n reg = BayesianRidge(tol=params['tol'], fit_intercept=params['fit_intercept'], compute_score=params['compute_score'])\n reg.set_params(alpha_init=params['init'][0], lambda_init=params['init'][1])\n else:\n init = [1 / np.var(y_train), 1.]\n reg = BayesianRidge(tol=random.random()*1e-5, fit_intercept=False, compute_score=True)\n reg.set_params(alpha_init=init[0], lambda_init=init[1])\n return reg\n elif func_name == \"linear\":\n return LinearRegression()\n\n" ]
[ [ "numpy.vander", "sklearn.linear_model.LinearRegression", "sklearn.linear_model.BayesianRidge", "numpy.var", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
doc-E-brown/embc_2020
[ "ddf1f8f684b34e491b04787a0300bc7995ba336d" ]
[ "src/models/train_model.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\n.. currentmodule:: \n\n\"\"\"\n__author__ = 'Ben Johnston'\n\nimport tensorflow as tf\n\nfrom src.models.model1 import * \nfrom src.data.muct import load_tensors\nfrom src.data import augment_data \n\nfrom src.models.losses import dice_loss\n\nimport numpy as np \n\nx, y = load_tensors(seed=0)\n\nmodel = model5()\n\ncallbacks = [\n tf.keras.callbacks.TerminateOnNaN(),\n tf.keras.callbacks.ModelCheckpoint('saved_model.hdf5',\n save_weights_only=False,\n save_best_only=True,\n verbose=1\n ),\n tf.keras.callbacks.EarlyStopping(patience=10),\n tf.keras.callbacks.ReduceLROnPlateau(patience=5, verbose=1, min_lr=0.0001),\n]\n\nmodel.fit(\n x=x,\n y=y,\n batch_size=16,\n epochs=1000,\n shuffle=True,\n validation_split=0.3,\n callbacks=callbacks,\n)\n" ]
[ [ "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.callbacks.TerminateOnNaN", "tensorflow.keras.callbacks.ReduceLROnPlateau" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
vladbataev/nv-wavenet
[ "2e16155cef2a460bb7862df674a1b8fa074a5cab" ]
[ "pytorch/mel2samp_onehot.py" ]
[ "# *****************************************************************************\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met: \n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the NVIDIA CORPORATION nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# \n# *****************************************************************************\n\"\"\"\nGenerating pairs of mel-spectrograms and original audio\n\"\"\"\nimport random\nimport torch\nimport torch.utils.data\nimport numpy as np\nimport tensorflow as tf\n\nfrom pprint import pprint\n\nfrom audio_tf import AudioProcessor\nfrom audio_lws import LwsAudioProcessor\n\nconfig = tf.ConfigProto(device_count={'GPU': 0})\ntf.enable_eager_execution(config=config)\n\nimport utils\n\n\nclass Mel2SampOnehot(torch.utils.data.Dataset):\n \"\"\"\n This is the main class that calculates the spectrogram and returns the\n spectrogram, audio pair.\n \"\"\"\n def __init__(self, audio_files, mu_quantization, no_chunks, audio_config, segment_length,\n use_tf=False, use_lws=True, load_mel=False, verbose=False):\n\n audio_files = utils.files_to_list(audio_files)\n self.audio_files = audio_files\n random.seed(1234)\n random.shuffle(self.audio_files)\n\n if not load_mel:\n if use_tf:\n audio_processor_cls = AudioProcessor\n elif use_lws:\n audio_processor_cls = LwsAudioProcessor\n else:\n raise ValueError(\"Mel spectrum can be calculated only with tf or lws!\")\n self.audio_processor = audio_processor_cls(audio_config)\n\n self.mu_quantization = mu_quantization\n self.segment_length = segment_length\n\n audio_params = AudioProcessor._load_params(audio_config)\n if verbose:\n print(\"Audio params:\")\n pprint(audio_params)\n self.audio_params = audio_params\n self.window_size = audio_params[\"window_size\"]\n self.preemphasis_coeff = audio_params[\"preemphasis_coef\"]\n self.apply_preemphasis = audio_params[\"apply_preemphasis\"]\n self.window_step = audio_params[\"window_step\"]\n self.sample_rate = audio_params[\"sample_rate\"]\n self.mel_segment_length = int(np.ceil(\n (segment_length - self.window_size) / self.window_step)\n )\n self.num_mels = audio_params[\"num_mel_bins\"]\n self.use_tf = use_tf\n self.load_mel = load_mel\n self.no_chunks = no_chunks\n self.use_lws = use_lws\n\n def get_mel(self, audio):\n \"\"\"\n :param audio:\n :return: return mel array [F, T] e.g. [80, T]\n \"\"\"\n mel = self.audio_processor.compute_spectrum(audio)\n if self.use_tf:\n mel = mel.numpy()\n return mel\n\n def preemphasis(self, audio):\n padded_audio = np.pad(audio[:-1], (1, 0), 'constant')\n return -padded_audio * self.preemphasis_coeff + audio\n\n def __getitem__(self, index):\n # Read audio\n audio_filename, mel_filename = self.audio_files[index]\n\n audio, sample_rate = utils.load_wav(audio_filename)\n pad_size = self.window_size - self.window_step\n left_pad = pad_size\n right_pad = pad_size + self.window_step - len(audio) % self.window_step\n audio = np.pad(audio, (left_pad, right_pad), mode=\"constant\", constant_values=0)\n audio /= np.abs(audio).max()\n\n if self.apply_preemphasis:\n audio = self.preemphasis(audio)\n audio /= np.abs(audio).max()\n\n if sample_rate != self.sample_rate:\n raise ValueError(\"{} SR doesn't match target {} SR\".format(\n sample_rate, self.sample_rate))\n if self.no_chunks:\n if self.load_mel:\n mel = np.load(mel_filename).T\n else:\n # as by default lws always pad from left and right\n mel = self.get_mel(audio[left_pad:-right_pad])\n else:\n if mel_filename != \"\" and self.load_mel:\n if self.segment_length % self.window_step != 0:\n raise ValueError(\"Hop length should be a divider of segment length\")\n mel = np.load(mel_filename)\n mel = np.clip(mel, -self.audio_params[\"max_abs_value\"], self.audio_params[\"max_abs_value\"])\n # Take segment\n if mel.shape[0] >= self.mel_segment_length:\n max_mel_start = mel.shape[0] - self.mel_segment_length\n mel_start = random.randint(0, max_mel_start)\n mel = mel[mel_start: mel_start + self.mel_segment_length]\n assert mel.shape[0] == self.mel_segment_length\n audio_start = mel_start * self.window_step\n audio = audio[audio_start: audio_start + self.segment_length]\n assert audio.shape[0] == self.segment_length\n else:\n audio = np.pad(audio, (0, self.segment_length - audio.shape[0]), 'constant')\n mel = np.pad(mel, (0, 0, 0, self.mel_segment_length - mel.shape[0]), 'constant')\n else:\n if audio.shape[0] >= self.segment_length:\n max_audio_start = audio.shape[0] - self.segment_length\n audio_start = random.randint(0, max_audio_start)\n audio = audio[audio_start:audio_start + self.segment_length]\n else:\n audio = np.pad(audio, (0, self.segment_length - audio.shape[0]), 'constant')\n mel = self.get_mel(audio)\n\n mel_length = min(mel.shape[1], len(audio) // self.window_step)\n mel = mel[:, :mel_length]\n audio = audio[:mel_length * self.window_step]\n # as we want to apply transpose convolution\n assert len(audio) // self.window_step == mel.shape[1]\n mel = torch.FloatTensor(mel)\n audio = torch.FloatTensor(audio)\n audio = utils.mu_law_encode(audio, self.mu_quantization)\n return mel, audio\n\n def __len__(self):\n return len(self.audio_files)\n" ]
[ [ "tensorflow.enable_eager_execution", "numpy.pad", "numpy.abs", "numpy.clip", "tensorflow.ConfigProto", "numpy.ceil", "torch.FloatTensor", "numpy.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
IvanBongiorni/RNN-GAN_Timeseries-imputation
[ "4af0a7077d145f7726e42c4df623dfb0cd227d1c" ]
[ "model.py" ]
[ "\"\"\"\nAuthor: Ivan Bongiorni, https://github.com/IvanBongiorni\n2020-03-19\n\nModels implementation.\n\"\"\"\nimport tensorflow as tf\n\n\ndef build_vanilla_seq2seq(params):\n \"\"\"\n Implements a seq2seq RNN with Convolutional self attention. It keeps a canonical\n Encoder-Decoder structure: an Embedding layers receives the sequence of chars and\n learns a representation. This series is received by two different layers at the same time.\n First, an LSTM Encoder layer, whose output is repeated and sent to the Decoder. Second, a\n block of 1D Conv layers. Their kernel filters work as multi-head self attention layers.\n All their scores are pushed through a TanH gate that scales each score in the [-1,1] range.\n Both LSTM and Conv outputs are concatenated and sent to an LSTM Decoder, that processes\n the signal and sents it to Dense layers, performing the prediction for each step of the\n output series.\n\n Args: params dict\n \"\"\"\n from tensorflow.keras.models import Model\n from tensorflow.keras.layers import (\n Input, LSTM, RepeatVector, Conv1D, BatchNormalization,\n Concatenate, TimeDistributed, Dense\n )\n\n ## ENCODER\n encoder_input = Input((params['len_input'], 17))\n\n # LSTM block\n encoder_lstm = LSTM(units = params['encoder_lstm_units'])(encoder_input)\n output_lstm = RepeatVector(params['len_input'])(encoder_lstm)\n\n # Conv block\n conv_1 = Conv1D(\n filters = params['conv_filters'],\n kernel_size = params['kernel_size'],\n activation = params['conv_activation'],\n kernel_initializer = params['conv_initializer'],\n padding = 'same')(encoder_input)\n if params['use_batchnorm']:\n conv_1 = BatchNormalization()(conv_1)\n conv_2 = Conv1D(\n filters = params['conv_filters'],\n kernel_size = params['kernel_size'],\n activation = params['conv_activation'],\n kernel_initializer = params['conv_initializer'],\n padding = 'same')(conv_1)\n if params['use_batchnorm']:\n conv_2 = BatchNormalization()(conv_2)\n conv_3 = Conv1D(\n filters = params['conv_filters'],\n kernel_size = params['kernel_size'],\n activation = params['conv_activation'],\n kernel_initializer = params['conv_initializer'],\n padding = 'same')(conv_2)\n if params['use_batchnorm']:\n conv_3 = BatchNormalization()(conv_3)\n conv_4 = Conv1D(\n filters = params['conv_filters'],\n kernel_size = params['kernel_size'],\n activation = params['conv_activation'],\n kernel_initializer = params['conv_initializer'],\n padding = 'same')(conv_3)\n if params['use_batchnorm']:\n conv_4 = BatchNormalization()(conv_4)\n\n\n # Concatenate LSTM and Conv Encoder outputs for Decoder LSTM layer\n encoder_output = Concatenate(axis = -1)([output_lstm, conv_2])\n\n decoder_lstm = LSTM(params['decoder_dense_units'], return_sequences = True)(encoder_output)\n\n decoder_output = TimeDistributed(\n Dense(units = 1,\n activation = params['decoder_output_activation'],\n kernel_initializer = params['decoder_dense_initializer']))(decoder_lstm)\n\n seq2seq = Model(inputs = [encoder_input], outputs = [decoder_output])\n\n return seq2seq\n\n\ndef build_discriminator(params):\n '''\n Discriminator is based on the Vanilla seq2seq Encoder. The Decoder is removed\n and a Dense layer is left instead to perform binary classification.\n '''\n from tensorflow.keras.models import Model\n from tensorflow.keras.layers import (\n Input, LSTM, RepeatVector, Conv1D, BatchNormalization,\n Concatenate, Flatten, TimeDistributed, Dense\n )\n\n ## ENCODER\n encoder_input = Input((None, 17))\n\n # LSTM block\n encoder_lstm = LSTM(units = params['encoder_lstm_units'])(encoder_input)\n output_lstm = RepeatVector(params['len_input'])(encoder_lstm)\n\n # Conv block\n conv_1 = Conv1D(\n filters = params['conv_filters'],\n kernel_size = params['kernel_size'],\n activation = params['conv_activation'],\n kernel_initializer = params['conv_initializer'],\n padding = 'same')(encoder_input)\n if params['use_batchnorm']:\n conv_1 = BatchNormalization()(conv_1)\n conv_2 = Conv1D(\n filters = params['conv_filters'],\n kernel_size = params['kernel_size'],\n activation = params['conv_activation'],\n kernel_initializer = params['conv_initializer'],\n padding = 'same')(conv_1)\n if params['use_batchnorm']:\n conv_2 = BatchNormalization()(conv_2)\n\n # Concatenate LSTM and Conv Encoder outputs and Flatten for Decoder LSTM layer\n encoder_output = Concatenate(axis = -1)([output_lstm, conv_2])\n encoder_output = Flatten()(encoder_output)\n\n # Final layer for binary classification (real/fake)\n discriminator_output = Dense(\n units = 1,\n activation = 'sigmoid',\n kernel_initializer = params['decoder_dense_initializer'])(encoder_output)\n\n Discriminator = Model(inputs = [encoder_input], outputs = [discriminator_output])\n\n return Discriminator\n\n\ndef build_GAN(params):\n '''\n This is just a wrapper in case the model is trained as a GAN. It calls the vanilla\n seq2seq Generator, and build_discriminator() for the Discriminator model.\n '''\n generator = build_vanilla_seq2seq(params)\n discriminator = build_discriminator(params)\n return generator, discriminator\n" ]
[ [ "tensorflow.keras.layers.Concatenate", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv1D", "tensorflow.keras.layers.RepeatVector", "tensorflow.keras.layers.LSTM", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.Input" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
doutriaux1/ocgis
[ "989573258e6fcbeeb8b92d66bf5f6a43a34c2662" ]
[ "src/ocgis/test/fragments/hostetler/test_hostetler.py" ]
[ "import unittest\nimport os.path\nimport netCDF4 as nc\nfrom ocg.meta.interface.interface import GlobalInterface\nfrom ocg.api.interp.iocg.dataset import OcgDataset\nfrom ocg.conv.csv_ import CsvConverter\nimport numpy as np\nfrom ocg.conv.shp import ShpConverter\nfrom ocg.conv.shpidx import ShpIdxConverter\nfrom ocg.conv.numpy_ import NumpyConverter\nfrom ocg.util.helpers import ShpIterator, get_shp_as_multi\nfrom collections import OrderedDict\nimport datetime\nfrom ocg.test.misc import gen_descriptor_classes\nfrom ocg.api.interp.interpreter import Interpreter\n\n\nDIR = ('/home/local/WX/ben.koziol/Dropbox/nesii/project/ocg/python/'\n 'OpenClimateGIS/bin/climate_data/hostetler')\nNCS = [\n ['RegCM3_Daily_srm_GFDL.ncml.nc','TG'],\n ['RegCM3_Daily_srm_NCEP.ncml.nc','RNFS']\n ]\nSHP = ('/home/local/WX/ben.koziol/Dropbox/nesii/project/'\n 'ocg/python/OpenClimateGIS/bin/shp/hostetler_aoi.shp')\n\ndef get_meta():\n meta = []\n for n in NCS:\n path = os.path.join(DIR,n[0])\n meta.append({'uri':path,'variable':n[1]})\n return([meta])\n\ndef get_geom():\n y = [None]\n y.append(get_shp_as_multi(SHP,'id'))\n return(y)\n \nOPTS = OrderedDict({\n 'meta':get_meta(),\n 'mode':['ocg'],\n 'time_range':[[datetime.datetime(1968,1,17),\n datetime.datetime(1968,1,17)]],\n 'level_range':[None],\n 'geom':get_geom(),\n 'output_format':['shp'],\n 'output_grouping':[None],\n 'spatial_operation':['intersects','clip'],\n 'aggregate':[True,False],\n 'calc_raw':[None],\n 'calc_grouping':[None],\n 'calc':[None]})\n\n\nclass TestHostetler(unittest.TestCase):\n \n def gen_nc(self):\n for n in NCS:\n yield(os.path.join(DIR,n[0]),n[1])\n \n def pass_test_subset(self):\n for uri,var in self.gen_nc():\n ods = OcgDataset(uri)\n time_range = [ods.i.temporal.time.value[0],\n ods.i.temporal.time.value[0]]\n coll = ods.subset(var,time_range=time_range)\n coll['vid'] = np.array(1)\n coll['value'] = {var:coll['value']}\n conv = NumpyConverter(coll,None,ods,base_name='ocg',wd=None,\n cengine=None,write_attr=False,write_agg=False)\n ret = conv.write()\n import ipdb;ipdb.set_trace()\n \n def test_interpreter(self):\n for desc in gen_descriptor_classes(niter=float('inf'),opts=OPTS):\n interp = Interpreter.get_interpreter(desc)\n if desc['aggregate'] is False and desc['geom'] is not None:\n ret = interp.execute()\n import ipdb;ipdb.set_trace()\n \n# def test_interface(self):\n# for n in self.gen_nc():\n# ds = nc.Dataset(n,'r')\n# ii = GlobalInterface(ds)\n# import ipdb;ipdb.set_trace()\n \n \nif __name__ == \"__main__\":\n# import sys;sys.argv = ['', 'TestSimpleMultiCalc01.test']\n unittest.main()" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HelenR6/imagenet-r
[ "3131cacee97c407bd2ecea15846a67817a64f924" ]
[ "utils.py" ]
[ "import numpy as np\nimport sklearn.metrics as sk\nimport sklearn.neighbors\nimport sklearn.ensemble\nimport time\n\nrecall_level_default = 0.95\n\n\n\ndef calib_err(confidence, correct, p='2', beta=100):\n # beta is target bin size\n idxs = np.argsort(confidence)\n confidence = confidence[idxs]\n correct = correct[idxs]\n bins = [[i * beta, (i + 1) * beta] for i in range(len(confidence) // beta)]\n bins[-1] = [bins[-1][0], len(confidence)]\n\n cerr = 0\n total_examples = len(confidence)\n for i in range(len(bins) - 1):\n bin_confidence = confidence[bins[i][0]:bins[i][1]]\n bin_correct = correct[bins[i][0]:bins[i][1]]\n num_examples_in_bin = len(bin_confidence)\n\n if num_examples_in_bin > 0:\n difference = np.abs(np.nanmean(bin_confidence) - np.nanmean(bin_correct))\n\n if p == '2':\n cerr += num_examples_in_bin / total_examples * np.square(difference)\n elif p == '1':\n cerr += num_examples_in_bin / total_examples * difference\n elif p == 'infty' or p == 'infinity' or p == 'max':\n cerr = np.maximum(cerr, difference)\n else:\n assert False, \"p must be '1', '2', or 'infty'\"\n\n if p == '2':\n cerr = np.sqrt(cerr)\n\n return cerr\n\n\ndef aurra(confidence, correct):\n conf_ranks = np.argsort(confidence)[::-1] # indices from greatest to least confidence\n rra_curve = np.cumsum(np.asarray(correct)[conf_ranks])\n rra_curve = rra_curve / np.arange(1, len(rra_curve) + 1) # accuracy at each response rate\n return np.mean(rra_curve)\n\n\ndef soft_f1(confidence, correct):\n wrong = 1 - correct\n\n # # the incorrectly classified samples are our interest\n # # so they make the positive class\n # tp_soft = np.sum((1 - confidence) * wrong)\n # fp_soft = np.sum((1 - confidence) * correct)\n # fn_soft = np.sum(confidence * wrong)\n\n # return 2 * tp_soft / (2 * tp_soft + fn_soft + fp_soft)\n return 2 * ((1 - confidence) * wrong).sum()/(1 - confidence + wrong).sum()\n\n\ndef tune_temp(logits, labels, binary_search=True, lower=0.2, upper=5.0, eps=0.0001):\n logits = np.array(logits)\n\n if binary_search:\n import torch\n import torch.nn.functional as F\n\n logits = torch.FloatTensor(logits)\n labels = torch.LongTensor(labels)\n t_guess = torch.FloatTensor([0.5*(lower + upper)]).requires_grad_()\n\n while upper - lower > eps:\n if torch.autograd.grad(F.cross_entropy(logits / t_guess, labels), t_guess)[0] > 0:\n upper = 0.5 * (lower + upper)\n else:\n lower = 0.5 * (lower + upper)\n t_guess = t_guess * 0 + 0.5 * (lower + upper)\n\n t = min([lower, 0.5 * (lower + upper), upper], key=lambda x: float(F.cross_entropy(logits / x, labels)))\n else:\n import cvxpy as cx\n\n set_size = np.array(logits).shape[0]\n\n t = cx.Variable()\n\n expr = sum((cx.Minimize(cx.log_sum_exp(logits[i, :] * t) - logits[i, labels[i]] * t)\n for i in range(set_size)))\n p = cx.Problem(expr, [lower <= t, t <= upper])\n\n p.solve() # p.solve(solver=cx.SCS)\n t = 1 / t.value\n\n return t\n\n\ndef get_measures(confidence, correct):\n rms = calib_err(confidence, correct, p='2')\n aurra_metric = aurra(confidence, correct)\n mad = calib_err(confidence, correct, p='1') # secondary metric\n sf1 = soft_f1(confidence, correct) # secondary metric\n\n return rms, aurra_metric, mad, sf1\n\n\ndef print_measures(rms, aurra_metric, mad, sf1, method_name='Baseline'):\n print('\\t\\t\\t\\t\\t\\t\\t' + method_name)\n print('RMS Calib Error (%): \\t\\t{:.2f}'.format(100 * rms))\n print('AURRA (%): \\t\\t\\t{:.2f}'.format(100 * aurra))\n # print('MAD Calib Error (%): \\t\\t{:.2f}'.format(100 * mad))\n # print('Soft F1 Score (%): \\t\\t{:.2f}'.format(100 * sf1))\n\n\ndef show_calibration_results(confidence, correct):\n\n print('RMS Calib Error (%): \\t{:.2f}'.format(\n 100 * calib_err(confidence, correct, p='2')))\n\n print('AURRA (%): \\t\\t{:.2f}'.format(\n 100 * aurra(confidence, correct)))\n\n # print('MAD Calib Error (%): \\t\\t{:.2f}'.format(\n # 100 * calib_err(confidence, correct, p='1')))\n\n # print('Soft F1-Score (%): \\t\\t{:.2f}'.format(\n # 100 * soft_f1(confidence, correct))\n\n\n\ndef stable_cumsum(arr, rtol=1e-05, atol=1e-08):\n \"\"\"Use high precision for cumsum and check that final value matches sum\n Parameters\n ----------\n arr : array-like\n To be cumulatively summed as flat\n rtol : float\n Relative tolerance, see ``np.allclose``\n atol : float\n Absolute tolerance, see ``np.allclose``\n \"\"\"\n out = np.cumsum(arr, dtype=np.float64)\n expected = np.sum(arr, dtype=np.float64)\n if not np.allclose(out[-1], expected, rtol=rtol, atol=atol):\n raise RuntimeError('cumsum was found to be unstable: '\n 'its last element does not correspond to sum')\n return out\n\ndef fpr_and_fdr_at_recall(y_true, y_score, recall_level=recall_level_default, pos_label=None):\n classes = np.unique(y_true)\n if (pos_label is None and\n not (np.array_equal(classes, [0, 1]) or\n np.array_equal(classes, [-1, 1]) or\n np.array_equal(classes, [0]) or\n np.array_equal(classes, [-1]) or\n np.array_equal(classes, [1]))):\n raise ValueError(\"Data is not binary and pos_label is not specified\")\n elif pos_label is None:\n pos_label = 1.\n\n # make y_true a boolean vector\n y_true = (y_true == pos_label)\n\n # sort scores and corresponding truth values\n desc_score_indices = np.argsort(y_score, kind=\"mergesort\")[::-1]\n y_score = y_score[desc_score_indices]\n y_true = y_true[desc_score_indices]\n\n # y_score typically has many tied values. Here we extract\n # the indices associated with the distinct values. We also\n # concatenate a value for the end of the curve.\n distinct_value_indices = np.where(np.diff(y_score))[0]\n threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]\n\n # accumulate the true positives with decreasing threshold\n tps = stable_cumsum(y_true)[threshold_idxs]\n fps = 1 + threshold_idxs - tps # add one because of zero-based indexing\n\n thresholds = y_score[threshold_idxs]\n\n recall = tps / tps[-1]\n\n last_ind = tps.searchsorted(tps[-1])\n sl = slice(last_ind, None, -1) # [last_ind::-1]\n recall, fps, tps, thresholds = np.r_[recall[sl], 1], np.r_[fps[sl], 0], np.r_[tps[sl], 0], thresholds[sl]\n\n cutoff = np.argmin(np.abs(recall - recall_level))\n\n return fps[cutoff] / (np.sum(np.logical_not(y_true))) # , fps[cutoff]/(fps[cutoff] + tps[cutoff])\n\ndef get_measures(_pos, _neg, recall_level=recall_level_default):\n pos = np.array(_pos[:]).reshape((-1, 1))\n neg = np.array(_neg[:]).reshape((-1, 1))\n examples = np.squeeze(np.vstack((pos, neg)))\n labels = np.zeros(len(examples), dtype=np.int32)\n labels[:len(pos)] += 1\n\n auroc = sk.roc_auc_score(labels, examples)\n aupr = sk.average_precision_score(labels, examples)\n fpr = fpr_and_fdr_at_recall(labels, examples, recall_level)\n\n return auroc, aupr, fpr\n\n\ndef print_measures_old(auroc, aupr, fpr, method_name='Ours', recall_level=recall_level_default):\n print('\\t\\t\\t' + method_name)\n print('FPR{:d}:\\t{:.2f}'.format(int(100 * recall_level), 100 * fpr))\n print('AUROC: \\t{:.2f}'.format(100 * auroc))\n print('AUPR: \\t{:.2f}'.format(100 * aupr))\n\n\ndef print_measures_with_std(aurocs, auprs, fprs, method_name='Ours', recall_level=recall_level_default):\n print('\\t\\t\\t' + method_name)\n print('FPR{:d}:\\t{:.2f}\\t+/- {:.2f}'.format(int(100 * recall_level), 100 * np.mean(fprs), 100 * np.std(fprs)))\n print('AUROC: \\t{:.2f}\\t+/- {:.2f}'.format(100 * np.mean(aurocs), 100 * np.std(aurocs)))\n print('AUPR: \\t{:.2f}\\t+/- {:.2f}'.format(100 * np.mean(auprs), 100 * np.std(auprs)))\n\n\ndef get_and_print_results(out_score, in_score, num_to_avg=1):\n\n aurocs, auprs, fprs = [], [], []\n #for _ in range(num_to_avg):\n # out_score = get_ood_scores(ood_loader)\n measures = get_measures(out_score, in_score)\n aurocs.append(measures[0]); auprs.append(measures[1]); fprs.append(measures[2])\n\n auroc = np.mean(aurocs); aupr = np.mean(auprs); fpr = np.mean(fprs)\n #auroc_list.append(auroc); aupr_list.append(aupr); fpr_list.append(fpr)\n\n #if num_to_avg >= 5:\n # print_measures_with_std(aurocs, auprs, fprs, method_name='Ours')\n #else:\n # print_measures(auroc, aupr, fpr, method_name='Ours')\n return auroc, aupr, fpr\n" ]
[ [ "sklearn.metrics.roc_auc_score", "numpy.sqrt", "numpy.asarray", "numpy.cumsum", "numpy.mean", "torch.FloatTensor", "numpy.nanmean", "numpy.square", "numpy.allclose", "numpy.unique", "numpy.std", "numpy.diff", "numpy.logical_not", "torch.LongTensor", "numpy.argsort", "numpy.array", "numpy.sum", "numpy.maximum", "numpy.abs", "numpy.array_equal", "torch.nn.functional.cross_entropy", "sklearn.metrics.average_precision_score", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hedibejaoui/spark-timeseries
[ "9112dcbbba4e095b5eb46c568e1c72e13e1f251a" ]
[ "python3/sparkts/test/test_datetimeindex.py" ]
[ "from .test_utils import PySparkTestCase\nfrom sparkts.datetimeindex import *\nimport pandas as pd\n\nclass DateTimeIndexTestCase(PySparkTestCase):\n def test_frequencies(self):\n bd = BusinessDayFrequency(1, 1, self.sc)\n self.assertEqual(bd.days(), 1)\n \n hf = HourFrequency(4, self.sc)\n self.assertEqual(hf.hours(), 4)\n \n def test_uniform(self):\n freq = DayFrequency(3, self.sc)\n self.assertEqual(freq.days(), 3)\n start = '2015-04-10'\n index = uniform(start, periods=5, freq=freq, sc=self.sc)\n index2 = uniform(start, end='2015-04-22', freq=freq, sc=self.sc)\n self.assertEqual(index, index2)\n \n self.assertEqual(len(index), 5)\n self.assertEqual(index.first(), pd.to_datetime('2015-04-10'))\n self.assertEqual(index.last(), pd.to_datetime('2015-04-22'))\n subbydate = index[pd.to_datetime('2015-04-13'):pd.to_datetime('2015-04-19')]\n subbyloc = index.islice(1, 4)\n self.assertEqual(subbydate, subbyloc)\n self.assertEqual(subbydate.first(), pd.to_datetime('2015-04-13'))\n self.assertEqual(subbydate.last(), pd.to_datetime('2015-04-19'))\n self.assertEqual(subbydate.datetime_at_loc(0), pd.to_datetime('2015-04-13'))\n self.assertEqual(subbydate[pd.to_datetime('2015-04-13')], 0)\n\n def test_irregular(self):\n pd_index = pd.date_range('2015-04-10', periods=5, freq='3D')\n dt_index = irregular(pd_index, self.sc)\n\n self.assertEqual(len(dt_index), 5)\n self.assertEqual(dt_index.first(), pd.to_datetime('2015-04-10'))\n self.assertEqual(dt_index.last(), pd.to_datetime('2015-04-22'))\n subbydate = dt_index[pd.to_datetime('2015-04-13'):pd.to_datetime('2015-04-19')]\n subbyloc = dt_index.islice(1, 4)\n self.assertEqual(subbydate, subbyloc)\n self.assertEqual(subbydate.first(), pd.to_datetime('2015-04-13'))\n self.assertEqual(subbydate.last(), pd.to_datetime('2015-04-19'))\n self.assertEqual(subbydate.datetime_at_loc(0), pd.to_datetime('2015-04-13'))\n self.assertEqual(subbydate[pd.to_datetime('2015-04-13')], 0)\n\n pd_index2 = dt_index.to_pandas_index()\n self.assertTrue(pd_index.equals(pd_index2), str(pd_index) + \"!=\" + str(pd_index2))\n\n" ]
[ [ "pandas.to_datetime", "pandas.date_range" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
yingxinff-source/DeepRecSys
[ "bf781e112492563631e89fb1fba3431601f1331c" ]
[ "deep_recommend/other/attention/attention.py" ]
[ "'''\n@Description: \n@version: \n@License: MIT\n@Author: Wang Yao\n@Date: 2020-04-02 14:19:14\n@LastEditors: Wang Yao\n@LastEditTime: 2020-04-02 14:20:01\n'''\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras.layers import Layer\n\n\nclass Attention(Layer):\n\n def __init__(self, **kwargs):\n super(Attention, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.attetion = self.add_weight(\n shape=(input_shape[1],),\n initializer='glorot_uniform',\n trainable=True,\n name='attetion')\n super(Attention, self).build(input_shape)\n\n def call(self, inputs):\n attetion = K.softmax(self.attetion)\n outputs = K.dot(attetion, inputs)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], input_shape[-1])\n" ]
[ [ "tensorflow.keras.backend.dot", "tensorflow.keras.backend.softmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
wanglikuan/PFL-Non-IID-baselines
[ "0d73765847cfcbd94ad065ce51957b11c215d657" ]
[ "generate_niid_class.py" ]
[ "from tqdm import trange\nimport numpy as np\nimport random\nimport json\nimport os\nimport argparse\nfrom torchvision.datasets import CIFAR10\nimport torch\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as transforms\n# from data.Mnist.multi_mnist_loader import MNIST\n\nrandom.seed(42)\nnp.random.seed(42)\n\ndef rearrange_data_by_class(data, targets, n_class):\n new_data = []\n for i in trange(n_class):\n # idx = targets[0] == i\n idx = targets == i\n new_data.append(data[idx])\n return new_data\n\ndef get_dataset(mode='train'):\n # transform = transforms.Compose(\n # [transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])\n\n transform = transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) #CIFAR10\n\n dataset = CIFAR10(root='./data', train=True if mode=='train' else False, download=True, transform=transform)\n # dataset = MNIST(root='.', train=True, download=True, transform=transform, multi=True)\n n_sample = len(dataset.data)\n # print('n_sample:', n_sample)\n # SRC_N_CLASS = 10\n SRC_N_CLASS = len(dataset.classes)\n # full batch\n trainloader = DataLoader(dataset, batch_size=n_sample, shuffle=False)\n\n print(\"Loading data from storage ...\")\n for xy in trainloader:\n dataset.data, dataset.targets = xy\n # mdata, target, targets = xy\n\n # # dataset.targets = [original_targets, target_0, ..., target_9]\n # target = target[0].cpu().detach().numpy()\n # for t_idx in range(len(targets)):\n # targets[t_idx] = targets[t_idx].cpu().detach().numpy()\n # # for t_idx in range(len(multi_targets[1])):\n # # multi_targets[1][t_idx] = multi_targets[1][t_idx].cpu().detach().numpy()\n #\n # multi_targets = [target, targets]\n\n # print(\"Rearrange data by class...\")\n # data_by_class = rearrange_data_by_class(\n # mdata.cpu().detach().numpy(),\n # multi_targets,\n # SRC_N_CLASS\n # )\n print(\"Rearrange data by class...\")\n data_by_class = rearrange_data_by_class(\n dataset.data.cpu().detach().numpy(),\n dataset.targets.cpu().detach().numpy(),\n SRC_N_CLASS\n )\n print(f\"{mode.upper()} SET:\\n Total #samples: {n_sample}. sample shape: {dataset.data[0].shape}\")\n print(\" #samples per class:\\n\", [len(v) for v in data_by_class])\n\n return data_by_class, n_sample, SRC_N_CLASS\n\ndef sample_class(SRC_N_CLASS, NUM_LABELS, user_id, label_random=False):\n assert NUM_LABELS <= SRC_N_CLASS\n if label_random:\n source_classes = [n for n in range(SRC_N_CLASS)]\n random.shuffle(source_classes)\n return source_classes[:NUM_LABELS]\n else:\n return [(user_id + j) % SRC_N_CLASS for j in range(NUM_LABELS)]\n\n\n# each client contains two class\ndef divide_train_data(data, n_sample, SRC_CLASSES, NUM_USERS, min_sample, class_per_client=2):\n min_sample = 10#len(SRC_CLASSES) * min_sample\n min_size = 0 # track minimal samples per user\n ###### Determine Sampling #######\n while min_size < min_sample:\n # print(\"Try to find valid data separation\")\n idx_batch=[{} for _ in range(NUM_USERS)]\n for u in range(NUM_USERS):\n for l in range(len(SRC_CLASSES)):\n idx_batch[u][l] = []\n samples_per_user = [0 for _ in range(NUM_USERS)]\n max_samples_per_user = n_sample / NUM_USERS # 60000/20 = 3000\n class_num_client = [class_per_client for _ in range(NUM_USERS)]\n for l in range(len(SRC_CLASSES)):\n selected_clients = [] #\n for client in range(NUM_USERS):\n if class_num_client[client] > 0:\n selected_clients.append(client)\n selected_clients = selected_clients[:int(NUM_USERS / len(SRC_CLASSES) * class_per_client)]\n\n num_all = len(data[l])\n num_clients_ = int(NUM_USERS/len(SRC_CLASSES)*class_per_client)\n num_per = num_all / num_clients_\n num_samples = np.random.randint(max(num_per/10, 16), num_per, num_clients_-1).tolist()\n num_samples.append(num_all - sum(num_samples))\n\n if True:\n # each client is not sure to have all the labels\n selected_clients = list(np.random.choice(selected_clients, num_clients_, replace=False))\n\n idx = 0\n # get indices for all that label\n idx_l = [i for i in range(len(data[l]))]\n np.random.shuffle(idx_l)\n for client, num_sample in zip(selected_clients, num_samples):\n idx_batch[client][l] = np.random.choice(idx_l, num_sample)\n samples_per_user[client] += len(idx_batch[client][l])\n idx += num_sample\n class_num_client[client] -= 1\n\n # samples_for_l = int(np.random.randint(max_samples_per_user, int(len(data[l]))))\n # idx_l = idx_l[:samples_for_l]\n # # participate data of that label\n # # for u, new_idx in enumerate(np.split(idx_l, proportions)):\n # # # add new idex to the user\n # # idx_batch[u][l] = new_idx.tolist()\n # # samples_per_user[u] += len(idx_batch[u][l])\n min_size = min(samples_per_user)\n\n ###### CREATE USER DATA SPLIT #######\n X = [[] for _ in range(NUM_USERS)]\n y = [[] for _ in range(NUM_USERS)]\n Labels=[set() for _ in range(NUM_USERS)]\n print(\"processing users...\")\n for u, user_idx_batch in enumerate(idx_batch):\n for l, indices in user_idx_batch.items():\n if len(indices) == 0: continue\n X[u] += data[l][indices].tolist()\n y[u] += (l * np.ones(len(indices))).tolist()\n Labels[u].add(l)\n\n return X, y, Labels, idx_batch, samples_per_user\n\ndef divide_test_data(NUM_USERS, SRC_CLASSES, test_data, Labels, unknown_test):\n # Create TEST data for each user.\n test_X = [[] for _ in range(NUM_USERS)]\n test_y = [[] for _ in range(NUM_USERS)]\n idx = {l: 0 for l in SRC_CLASSES}\n for user in trange(NUM_USERS):\n if unknown_test: # use all available labels\n user_sampled_labels = SRC_CLASSES\n else:\n user_sampled_labels = list(Labels[user])\n for l in user_sampled_labels:\n num_samples = int(len(test_data[l]) / NUM_USERS )\n assert num_samples + idx[l] <= len(test_data[l])\n test_X[user] += test_data[l][idx[l]:idx[l] + num_samples].tolist()\n test_y[user] += (l * np.ones(num_samples)).tolist()\n assert len(test_X[user]) == len(test_y[user]), f\"{len(test_X[user])} == {len(test_y[user])}\"\n idx[l] += num_samples\n return test_X, test_y\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--format\", \"-f\", type=str, default=\"pt\", help=\"Format of saving: pt (torch.save), json\", choices=[\"pt\", \"json\"])\n parser.add_argument(\"--n_class\", type=int, default=10, help=\"number of classification labels\")\n parser.add_argument(\"--class_per_client\", type=int, default=2, help=\"number of classes for each client\")\n parser.add_argument(\"--min_sample\", type=int, default=10, help=\"Min number of samples per user.\")\n # parser.add_argument(\"--sampling_ratio\", type=float, default=0.1, help=\"Ratio for sampling training samples.\")\n parser.add_argument(\"--unknown_test\", type=int, default=0, help=\"Whether allow test label unseen for each user.\")\n # parser.add_argument(\"--alpha\", type=float, default=0.1, help=\"alpha in Dirichelt distribution (smaller means larger heterogeneity)\")\n parser.add_argument(\"--n_user\", type=int, default=20,\n help=\"number of local clients, should be muitiple of 10.\")\n args = parser.parse_args()\n print()\n print(\"Number of users: {}\".format(args.n_user))\n print(\"Number of classes: {}\".format(args.n_class))\n print(\"Min # of samples per uesr: {}\".format(args.min_sample))\n # print(\"Alpha for Dirichlet Distribution: {}\".format(args.alpha))\n # print(\"Ratio for Sampling Training Data: {}\".format(args.sampling_ratio))\n NUM_USERS = args.n_user\n\n # Setup directory for train/test data\n path_prefix = f'u{args.n_user}c{args.n_class}-class{args.class_per_client}'\n\n def process_user_data(mode, data, n_sample, SRC_CLASSES, Labels=None, unknown_test=0):\n if mode == 'train':\n X, y, Labels, idx_batch, samples_per_user = divide_train_data(\n data, n_sample, SRC_CLASSES, NUM_USERS, args.min_sample, args.class_per_client)\n if mode == 'test':\n assert Labels != None or unknown_test\n X, y = divide_test_data(NUM_USERS, SRC_CLASSES, data, Labels, unknown_test)\n dataset={'users': [], 'user_data': {}, 'num_samples': []}\n for i in range(NUM_USERS):\n uname='f_{0:05d}'.format(i)\n dataset['users'].append(uname)\n\n # # re-label\n # length = len(y[i])\n # for label_idx in range(len(y[i])):\n # for class_idx in range(args.n_class):\n # if class_idx == y[i][label_idx]:\n # locals()['multi_label_' + str(class_idx)][label_idx] = 1\n # else:\n # locals()['multi_label_' + str(class_idx)][label_idx] = 0\n # multi_y = []\n # # multi_y.append(locals()['multi_label_' + str(class_idx)] for class_idx in range(args.n_class))\n # multi_y.append(multi_label_0)\n # multi_y.append(multi_label_1)\n # multi_y.append(multi_label_2)\n # multi_y.append(multi_label_3)\n # multi_y.append(multi_label_4)\n # multi_y.append(multi_label_5)\n # multi_y.append(multi_label_6)\n # multi_y.append(multi_label_7)\n # multi_y.append(multi_label_8)\n # multi_y.append(multi_label_9)\n # dataset['user_data'][uname]={\n # 'x': torch.tensor(X[i], dtype=torch.float32),\n # 'y': torch.tensor(multi_y, dtype=torch.int64)}\n dataset['user_data'][uname] = {\n 'x': torch.tensor(X[i], dtype=torch.float32),\n 'y': torch.tensor(y[i], dtype=torch.int64)}\n dataset['num_samples'].append(len(X[i]))\n\n print(\"{} #sample by user:\".format(mode.upper()), dataset['num_samples'])\n\n data_path=f'./{path_prefix}/{mode}'\n if not os.path.exists(data_path):\n os.makedirs(data_path)\n\n data_path=os.path.join(data_path, \"{}.\".format(mode) + args.format)\n if args.format == \"json\":\n raise NotImplementedError(\n \"json is not supported because the train_data/test_data uses the tensor instead of list and tensor cannot be saved into json.\")\n with open(data_path, 'w') as outfile:\n print(f\"Dumping train data => {data_path}\")\n json.dump(dataset, outfile)\n elif args.format == \"pt\":\n with open(data_path, 'wb') as outfile:\n print(f\"Dumping train data => {data_path}\")\n torch.save(dataset, outfile)\n if mode == 'train':\n for u in range(NUM_USERS):\n print(\"{} samples in total\".format(samples_per_user[u]))\n train_info = ''\n # train_idx_batch, train_samples_per_user\n n_samples_for_u = 0\n for l in sorted(list(Labels[u])):\n n_samples_for_l = len(idx_batch[u][l])\n n_samples_for_u += n_samples_for_l\n train_info += \"c={},n={}| \".format(l, n_samples_for_l)\n print(train_info)\n print(\"{} Labels/ {} Number of training samples for user [{}]:\".format(len(Labels[u]), n_samples_for_u, u))\n return Labels, idx_batch, samples_per_user\n\n\n print(f\"Reading source dataset.\")\n train_data, n_train_sample, SRC_N_CLASS = get_dataset(mode='train')\n test_data, n_test_sample, SRC_N_CLASS = get_dataset(mode='test')\n SRC_CLASSES=[l for l in range(SRC_N_CLASS)]\n # random.shuffle(SRC_CLASSES)\n print(\"{} labels in total.\".format(len(SRC_CLASSES)))\n Labels, idx_batch, samples_per_user = process_user_data('train', train_data, n_train_sample, SRC_CLASSES)\n process_user_data('test', test_data, n_test_sample, SRC_CLASSES, Labels=Labels, unknown_test=args.unknown_test)\n print(\"Finish Generating User samples\")\n\n for client in range(NUM_USERS):\n print(f\"Client {client}\\t Size of data: {samples_per_user[client]}\\t Labels: \", np.unique(Labels[client]))\n print(f\"\\t\\t Samples of labels: \", [len(idx_batch[client][i]) for i in idx_batch[client]])\n print(\"-\" * 50)\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "numpy.random.seed", "numpy.random.choice", "numpy.unique", "torch.utils.data.DataLoader", "numpy.random.shuffle", "torch.tensor", "numpy.ones", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HaoranLv/Shelf-product-identification
[ "7302e6d535843dc7f9c1de81422ad3e68bb5e5c8" ]
[ "object_detector_retinanet/keras_retinanet/utils/transform.py" ]
[ "import numpy as np\n\nDEFAULT_PRNG = np.random\n\n\ndef colvec(*args):\n \"\"\" Create a numpy array representing a column vector. \"\"\"\n return np.array([args]).T\n\n\ndef transform_aabb(transform, aabb):\n \"\"\" Apply a transformation to an axis aligned bounding box.\n\n The result is a new AABB in the same coordinate system as the original AABB.\n The new AABB contains all corner points of the original AABB after applying the given transformation.\n\n Args\n transform: The transformation to apply.\n x1: The minimum x value of the AABB.\n y1: The minimum y value of the AABB.\n x2: The maximum x value of the AABB.\n y2: The maximum y value of the AABB.\n Returns\n The new AABB as tuple (x1, y1, x2, y2)\n \"\"\"\n x1, y1, x2, y2 = aabb\n # Transform all 4 corners of the AABB.\n points = transform.dot([\n [x1, x2, x1, x2],\n [y1, y2, y2, y1],\n [1, 1, 1, 1 ],\n ])\n\n # Extract the min and max corners again.\n min_corner = points.min(axis=1)\n max_corner = points.max(axis=1)\n\n return [min_corner[0], min_corner[1], max_corner[0], max_corner[1]]\n\n\ndef _random_vector(min, max, prng=DEFAULT_PRNG):\n \"\"\" Construct a random vector between min and max.\n Args\n min: the minimum value for each component\n max: the maximum value for each component\n \"\"\"\n min = np.array(min)\n max = np.array(max)\n assert min.shape == max.shape\n assert len(min.shape) == 1\n return prng.uniform(min, max)\n\n\ndef rotation(angle):\n \"\"\" Construct a homogeneous 2D rotation matrix.\n Args\n angle: the angle in radians\n Returns\n the rotation matrix as 3 by 3 numpy array\n \"\"\"\n return np.array([\n [np.cos(angle), -np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1]\n ])\n\n\ndef random_rotation(min, max, prng=DEFAULT_PRNG):\n \"\"\" Construct a random rotation between -max and max.\n Args\n min: a scalar for the minimum absolute angle in radians\n max: a scalar for the maximum absolute angle in radians\n prng: the pseudo-random number generator to use.\n Returns\n a homogeneous 3 by 3 rotation matrix\n \"\"\"\n return rotation(prng.uniform(min, max))\n\n\ndef translation(translation):\n \"\"\" Construct a homogeneous 2D translation matrix.\n # Arguments\n translation: the translation 2D vector\n # Returns\n the translation matrix as 3 by 3 numpy array\n \"\"\"\n return np.array([\n [1, 0, translation[0]],\n [0, 1, translation[1]],\n [0, 0, 1]\n ])\n\n\ndef random_translation(min, max, prng=DEFAULT_PRNG):\n \"\"\" Construct a random 2D translation between min and max.\n Args\n min: a 2D vector with the minimum translation for each dimension\n max: a 2D vector with the maximum translation for each dimension\n prng: the pseudo-random number generator to use.\n Returns\n a homogeneous 3 by 3 translation matrix\n \"\"\"\n return translation(_random_vector(min, max, prng))\n\n\ndef shear(angle):\n \"\"\" Construct a homogeneous 2D shear matrix.\n Args\n angle: the shear angle in radians\n Returns\n the shear matrix as 3 by 3 numpy array\n \"\"\"\n return np.array([\n [1, -np.sin(angle), 0],\n [0, np.cos(angle), 0],\n [0, 0, 1]\n ])\n\n\ndef random_shear(min, max, prng=DEFAULT_PRNG):\n \"\"\" Construct a random 2D shear matrix with shear angle between -max and max.\n Args\n min: the minimum shear angle in radians.\n max: the maximum shear angle in radians.\n prng: the pseudo-random number generator to use.\n Returns\n a homogeneous 3 by 3 shear matrix\n \"\"\"\n return shear(prng.uniform(min, max))\n\n\ndef scaling(factor):\n \"\"\" Construct a homogeneous 2D scaling matrix.\n Args\n factor: a 2D vector for X and Y scaling\n Returns\n the zoom matrix as 3 by 3 numpy array\n \"\"\"\n return np.array([\n [factor[0], 0, 0],\n [0, factor[1], 0],\n [0, 0, 1]\n ])\n\n\ndef random_scaling(min, max, prng=DEFAULT_PRNG):\n \"\"\" Construct a random 2D scale matrix between -max and max.\n Args\n min: a 2D vector containing the minimum scaling factor for X and Y.\n min: a 2D vector containing The maximum scaling factor for X and Y.\n prng: the pseudo-random number generator to use.\n Returns\n a homogeneous 3 by 3 scaling matrix\n \"\"\"\n return scaling(_random_vector(min, max, prng))\n\n\ndef random_flip(flip_x_chance, flip_y_chance, prng=DEFAULT_PRNG):\n \"\"\" Construct a transformation randomly containing X/Y flips (or not).\n Args\n flip_x_chance: The chance that the result will contain a flip along the X axis.\n flip_y_chance: The chance that the result will contain a flip along the Y axis.\n prng: The pseudo-random number generator to use.\n Returns\n a homogeneous 3 by 3 transformation matrix\n \"\"\"\n flip_x = prng.uniform(0, 1) < flip_x_chance\n flip_y = prng.uniform(0, 1) < flip_y_chance\n # 1 - 2 * bool gives 1 for False and -1 for True.\n return scaling((1 - 2 * flip_x, 1 - 2 * flip_y))\n\n\ndef change_transform_origin(transform, center):\n \"\"\" Create a new transform representing the same transformation,\n only with the origin of the linear part changed.\n Args\n transform: the transformation matrix\n center: the new origin of the transformation\n Returns\n translate(center) * transform * translate(-center)\n \"\"\"\n center = np.array(center)\n return np.linalg.multi_dot([translation(center), transform, translation(-center)])\n\n\ndef random_transform(\n min_rotation=0,\n max_rotation=0,\n min_translation=(0, 0),\n max_translation=(0, 0),\n min_shear=0,\n max_shear=0,\n min_scaling=(1, 1),\n max_scaling=(1, 1),\n flip_x_chance=0,\n flip_y_chance=0,\n prng=DEFAULT_PRNG\n):\n \"\"\" Create a random transformation.\n\n The transformation consists of the following operations in this order (from left to right):\n * rotation\n * translation\n * shear\n * scaling\n * flip x (if applied)\n * flip y (if applied)\n\n Note that by default, the data generators in `keras_retinanet.preprocessing.generators` interpret the translation\n as factor of the image size. So an X translation of 0.1 would translate the image by 10% of it's width.\n Set `relative_translation` to `False` in the `TransformParameters` of a data generator to have it interpret\n the translation directly as pixel distances instead.\n\n Args\n min_rotation: The minimum rotation in radians for the transform as scalar.\n max_rotation: The maximum rotation in radians for the transform as scalar.\n min_translation: The minimum translation for the transform as 2D column vector.\n max_translation: The maximum translation for the transform as 2D column vector.\n min_shear: The minimum shear angle for the transform in radians.\n max_shear: The maximum shear angle for the transform in radians.\n min_scaling: The minimum scaling for the transform as 2D column vector.\n max_scaling: The maximum scaling for the transform as 2D column vector.\n flip_x_chance: The chance (0 to 1) that a transform will contain a flip along X direction.\n flip_y_chance: The chance (0 to 1) that a transform will contain a flip along Y direction.\n prng: The pseudo-random number generator to use.\n \"\"\"\n return np.linalg.multi_dot([\n random_rotation(min_rotation, max_rotation, prng),\n random_translation(min_translation, max_translation, prng),\n random_shear(min_shear, max_shear, prng),\n random_scaling(min_scaling, max_scaling, prng),\n random_flip(flip_x_chance, flip_y_chance, prng)\n ])\n\n\ndef random_transform_generator(prng=None, **kwargs):\n \"\"\" Create a random transform generator.\n\n Uses a dedicated, newly created, properly seeded PRNG by default instead of the global DEFAULT_PRNG.\n\n The transformation consists of the following operations in this order (from left to right):\n * rotation\n * translation\n * shear\n * scaling\n * flip x (if applied)\n * flip y (if applied)\n\n Note that by default, the data generators in `keras_retinanet.preprocessing.generators` interpret the translation\n as factor of the image size. So an X translation of 0.1 would translate the image by 10% of it's width.\n Set `relative_translation` to `False` in the `TransformParameters` of a data generator to have it interpret\n the translation directly as pixel distances instead.\n\n Args\n min_rotation: The minimum rotation in radians for the transform as scalar.\n max_rotation: The maximum rotation in radians for the transform as scalar.\n min_translation: The minimum translation for the transform as 2D column vector.\n max_translation: The maximum translation for the transform as 2D column vector.\n min_shear: The minimum shear angle for the transform in radians.\n max_shear: The maximum shear angle for the transform in radians.\n min_scaling: The minimum scaling for the transform as 2D column vector.\n max_scaling: The maximum scaling for the transform as 2D column vector.\n flip_x_chance: The chance (0 to 1) that a transform will contain a flip along X direction.\n flip_y_chance: The chance (0 to 1) that a transform will contain a flip along Y direction.\n prng: The pseudo-random number generator to use.\n \"\"\"\n\n if prng is None:\n # RandomState automatically seeds using the best available method.\n prng = np.random.RandomState()\n\n while True:\n yield random_transform(prng=prng, **kwargs)\n" ]
[ [ "numpy.array", "numpy.cos", "numpy.random.RandomState", "numpy.sin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ProfLeao/RMG-Py
[ "47c4e0b16ee274919ec24aa4160ac5e83cc9f0a9" ]
[ "rmgpy/solver/surfaceTest.py" ]
[ "#!/usr/bin/env python3\n\n###############################################################################\n# #\n# RMG - Reaction Mechanism Generator #\n# #\n# Copyright (c) 2002-2021 Prof. William H. Green ([email protected]), #\n# Prof. Richard H. West ([email protected]) and the RMG Team ([email protected]) #\n# #\n# Permission is hereby granted, free of charge, to any person obtaining a #\n# copy of this software and associated documentation files (the 'Software'), #\n# to deal in the Software without restriction, including without limitation #\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, #\n# and/or sell copies of the Software, and to permit persons to whom the #\n# Software is furnished to do so, subject to the following conditions: #\n# #\n# The above copyright notice and this permission notice shall be included in #\n# all copies or substantial portions of the Software. #\n# #\n# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #\n# DEALINGS IN THE SOFTWARE. #\n# #\n###############################################################################\n\nimport unittest\nimport time\n\nimport numpy as np\n\nimport rmgpy.constants as constants\nfrom rmgpy.kinetics import SurfaceArrhenius, StickingCoefficient\nfrom rmgpy.molecule import Molecule\nfrom rmgpy.reaction import Reaction\nfrom rmgpy.solver.surface import SurfaceReactor\nfrom rmgpy.species import Species\nfrom rmgpy.thermo import ThermoData, NASA, NASAPolynomial\n\n\n################################################################################\n\n\nclass SurfaceReactorCheck(unittest.TestCase):\n def test_solve_h2(self):\n \"\"\"\n Test the surface batch reactor with a dissociative adsorption of H2\n\n Here we choose a kinetic model consisting of the dissociative adsorption reaction\n H2 + 2X <=> 2 HX\n We use a SurfaceArrhenius for the rate expression.\n \"\"\"\n h2 = Species(\n molecule=[Molecule().from_smiles(\"[H][H]\")],\n thermo=ThermoData(Tdata=([300, 400, 500, 600, 800, 1000, 1500], \"K\"),\n Cpdata=([6.955, 6.955, 6.956, 6.961, 7.003, 7.103, 7.502], \"cal/(mol*K)\"),\n H298=(0, \"kcal/mol\"),\n S298=(31.129, \"cal/(mol*K)\")))\n x = Species(\n molecule=[Molecule().from_adjacency_list(\"1 X u0 p0\")],\n thermo=ThermoData(Tdata=([300, 400, 500, 600, 800, 1000, 1500], \"K\"),\n Cpdata=([0., 0., 0., 0., 0., 0., 0.], \"cal/(mol*K)\"),\n H298=(0.0, \"kcal/mol\"),\n S298=(0.0, \"cal/(mol*K)\")))\n hx = Species(\n molecule=[Molecule().from_adjacency_list(\"1 H u0 p0 {2,S} \\n 2 X u0 p0 {1,S}\")],\n thermo=ThermoData(Tdata=([300, 400, 500, 600, 800, 1000, 1500], \"K\"),\n Cpdata=([1.50, 2.58, 3.40, 4.00, 4.73, 5.13, 5.57], \"cal/(mol*K)\"),\n H298=(-11.26, \"kcal/mol\"),\n S298=(0.44, \"cal/(mol*K)\")))\n\n rxn1 = Reaction(reactants=[h2, x, x],\n products=[hx, hx],\n kinetics=SurfaceArrhenius(A=(9.05e18, 'cm^5/(mol^2*s)'),\n n=0.5,\n Ea=(5.0, 'kJ/mol'),\n T0=(1.0, 'K')))\n\n core_species = [h2, x, hx]\n edge_species = []\n core_reactions = [rxn1]\n edge_reactions = []\n\n T = 600\n P_initial = 1.0e5\n rxn_system = SurfaceReactor(\n T, P_initial,\n n_sims=1,\n initial_gas_mole_fractions={h2: 1.0},\n initial_surface_coverages={x: 1.0},\n surface_volume_ratio=(1e1, 'm^-1'),\n surface_site_density=(2.72e-9, 'mol/cm^2'),\n termination=[])\n\n rxn_system.initialize_model(core_species, core_reactions, edge_species, edge_reactions)\n\n tlist = np.logspace(-13, -5, 81, dtype=np.float64)\n\n # Integrate to get the solution at each time point\n t = []\n y = []\n reaction_rates = []\n species_rates = []\n for t1 in tlist:\n rxn_system.advance(t1)\n t.append(rxn_system.t)\n # You must make a copy of y because it is overwritten by DASSL at\n # each call to advance()\n y.append(rxn_system.y.copy())\n reaction_rates.append(rxn_system.core_reaction_rates.copy())\n species_rates.append(rxn_system.core_species_rates.copy())\n\n # Convert the solution vectors to np arrays\n t = np.array(t, np.float64)\n y = np.array(y, np.float64)\n reaction_rates = np.array(reaction_rates, np.float64)\n species_rates = np.array(species_rates, np.float64)\n total_sites = y[0,1]\n\n # Check that we're computing the species fluxes correctly\n for i in range(t.shape[0]):\n self.assertAlmostEqual(reaction_rates[i, 0], -1.0 * species_rates[i, 0],\n delta=1e-6 * reaction_rates[i, 0])\n self.assertAlmostEqual(reaction_rates[i, 0], -0.5 * species_rates[i, 1],\n delta=1e-6 * reaction_rates[i, 0])\n self.assertAlmostEqual(reaction_rates[i, 0], 0.5 * species_rates[i, 2],\n delta=1e-6 * reaction_rates[i, 0])\n\n # Check that we've reached equilibrium\n self.assertAlmostEqual(reaction_rates[-1, 0], 0.0, delta=1e-2)\n\n # # Visualize the simulation results\n # import pylab\n # fig = pylab.figure(figsize=(6, 6))\n # pylab.subplot(2, 1, 1)\n # pylab.semilogx(t, y[:, 2] / total_sites)\n # pylab.ylabel('Surface coverage')\n # pylab.legend(['HX'], loc=4)\n # pylab.subplot(2, 1, 2)\n # pylab.semilogx(t, species_rates)\n # pylab.legend(['H2', 'X', 'HX'], loc=4)\n # pylab.xlabel('Time (s)')\n # pylab.ylabel('Rate (mol/m$^\\\\mathdefault{3 or 2}$*s)')\n # # fig.subplots_adjust(left=0.21, bottom=0.10, right=0.95, top=0.95, wspace=0.20, hspace=0.35)\n # pylab.tight_layout()\n # # pylab.show()\n # pylab.savefig('surfaceTestH2.pdf')\n # return\n\n def test_solve_ch3(self):\n \"\"\"\n Test the surface batch reactor with a nondissociative adsorption of CH3\n\n Here we choose a kinetic model consisting of the adsorption reaction\n CH3 + X <=> CH3X\n We use a sticking coefficient for the rate expression.\n \"\"\"\n\n ch3 = Species(\n molecule=[Molecule().from_smiles(\"[CH3]\")],\n thermo=NASA(\n polynomials=[\n NASAPolynomial(\n coeffs=[3.91547, 0.00184155, 3.48741e-06, -3.32746e-09, 8.49953e-13, 16285.6, 0.351743],\n Tmin=(100, 'K'), Tmax=(1337.63, 'K')),\n NASAPolynomial(\n coeffs=[3.54146, 0.00476786, -1.82148e-06, 3.28876e-10, -2.22545e-14, 16224, 1.66032],\n Tmin=(1337.63, 'K'), Tmax=(5000, 'K'))],\n Tmin=(100, 'K'), Tmax=(5000, 'K'), E0=(135.382, 'kJ/mol'),\n comment=\"\"\"Thermo library: primaryThermoLibrary + radical(CH3)\"\"\"\n ),\n molecular_weight=(15.0345, 'amu'),\n )\n\n x = Species(\n molecule=[Molecule().from_adjacency_list(\"1 X u0 p0\")],\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0, 0, 0, 0, 0, 0, 0], Tmin=(298, 'K'), Tmax=(1000, 'K')),\n NASAPolynomial(coeffs=[0, 0, 0, 0, 0, 0, 0], Tmin=(1000, 'K'), Tmax=(2000, 'K'))],\n Tmin=(298, 'K'), Tmax=(2000, 'K'), E0=(-6.19426, 'kJ/mol'),\n comment=\"\"\"Thermo library: surfaceThermo\"\"\")\n )\n\n ch3x = Species(\n molecule=[Molecule().from_adjacency_list(\"\"\"1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}\n2 H u0 p0 c0 {1,S}\n3 H u0 p0 c0 {1,S}\n4 H u0 p0 c0 {1,S}\n5 X u0 p0 c0 {1,S}\"\"\")],\n thermo=NASA(\n polynomials=[\n NASAPolynomial(\n coeffs=[-0.552219, 0.026442, -3.55617e-05, 2.60044e-08, -7.52707e-12, -4433.47, 0.692144],\n Tmin=(298, 'K'), Tmax=(1000, 'K')),\n NASAPolynomial(\n coeffs=[3.62557, 0.00739512, -2.43797e-06, 1.86159e-10, 3.6485e-14, -5187.22, -18.9668],\n Tmin=(1000, 'K'), Tmax=(2000, 'K'))],\n Tmin=(298, 'K'), Tmax=(2000, 'K'), E0=(-39.1285, 'kJ/mol'),\n comment=\"\"\"Thermo library: surfaceThermoNi111\"\"\")\n )\n\n rxn1 = Reaction(reactants=[ch3, x],\n products=[ch3x],\n kinetics=StickingCoefficient(\n A=0.1, n=0, Ea=(0, 'kcal/mol'), T0=(1, 'K'), Tmin=(200, 'K'), Tmax=(3000, 'K'),\n comment=\"\"\"Exact match found for rate rule (Adsorbate;VacantSite)\"\"\"\n )\n # kinetics=SurfaceArrhenius(A=(2.7e10, 'cm^3/(mol*s)'),\n # n=0.5,\n # Ea=(5.0, 'kJ/mol'),\n # T0=(1.0, 'K'))\n )\n core_species = [ch3, x, ch3x]\n edge_species = []\n core_reactions = [rxn1]\n edge_reactions = []\n\n T = 800.\n P_initial = 1.0e5\n rxn_system = SurfaceReactor(\n T, P_initial,\n n_sims=1,\n initial_gas_mole_fractions={ch3: 1.0},\n initial_surface_coverages={x: 1.0},\n surface_volume_ratio=(1., 'm^-1'),\n surface_site_density=(2.72e-9, 'mol/cm^2'),\n termination=[])\n # in chemkin, the sites are mostly occupied in about 1e-8 seconds.\n\n rxn_system.initialize_model(core_species, core_reactions, edge_species, edge_reactions)\n\n tlist = np.logspace(-13, -5, 81, dtype=np.float64)\n\n print(\"Surface site density:\", rxn_system.surface_site_density.value_si)\n\n print(\"rxn1 rate coefficient\",\n rxn1.get_surface_rate_coefficient(rxn_system.T.value_si, rxn_system.surface_site_density.value_si))\n\n # Integrate to get the solution at each time point\n t = []\n y = []\n reaction_rates = []\n species_rates = []\n t.append(rxn_system.t)\n # You must make a copy of y because it is overwritten by DASSL at\n # each call to advance()\n y.append(rxn_system.y.copy())\n reaction_rates.append(rxn_system.core_reaction_rates.copy())\n species_rates.append(rxn_system.core_species_rates.copy())\n print(\"time: \", t)\n print(\"moles:\", y)\n print(\"reaction rates:\", reaction_rates)\n print(\"species rates:\", species_rates)\n for t1 in tlist:\n rxn_system.advance(t1)\n t.append(rxn_system.t)\n # You must make a copy of y because it is overwritten by DASSL at\n # each call to advance()\n y.append(rxn_system.y.copy())\n reaction_rates.append(rxn_system.core_reaction_rates.copy())\n species_rates.append(rxn_system.core_species_rates.copy())\n\n # Convert the solution vectors to np arrays\n t = np.array(t, np.float64)\n y = np.array(y, np.float64)\n reaction_rates = np.array(reaction_rates, np.float64)\n species_rates = np.array(species_rates, np.float64)\n V = constants.R * rxn_system.T.value_si * np.sum(y) / rxn_system.P_initial.value_si\n\n # Check that we're computing the species fluxes correctly\n for i in range(t.shape[0]):\n self.assertAlmostEqual(reaction_rates[i, 0], -species_rates[i, 0],\n delta=1e-6 * reaction_rates[i, 0])\n self.assertAlmostEqual(reaction_rates[i, 0], -species_rates[i, 1],\n delta=1e-6 * reaction_rates[i, 0])\n self.assertAlmostEqual(reaction_rates[i, 0], species_rates[i, 2],\n delta=1e-6 * reaction_rates[i, 0])\n\n # Check that we've reached equilibrium by the end\n self.assertAlmostEqual(reaction_rates[-1, 0], 0.0, delta=1e-2)\n\n def test_solve_h2_coverage_dependence(self):\n \"\"\"\n Test the surface batch reactor can properly apply coverage dependent parameters\n with the dissociative adsorption of H2.\n\n Here we choose a kinetic model consisting of the dissociative adsorption reaction\n H2 + 2X <=> 2 HX\n We use a SurfaceArrhenius for the rate expression.\n \"\"\"\n h2 = Species(\n molecule=[Molecule().from_smiles(\"[H][H]\")],\n thermo=ThermoData(Tdata=([300, 400, 500, 600, 800, 1000, 1500], \"K\"),\n Cpdata=([6.955, 6.955, 6.956, 6.961, 7.003, 7.103, 7.502], \"cal/(mol*K)\"),\n H298=(0, \"kcal/mol\"),\n S298=(31.129, \"cal/(mol*K)\")))\n x = Species(\n molecule=[Molecule().from_adjacency_list(\"1 X u0 p0\")],\n thermo=ThermoData(Tdata=([300, 400, 500, 600, 800, 1000, 1500], \"K\"),\n Cpdata=([0., 0., 0., 0., 0., 0., 0.], \"cal/(mol*K)\"),\n H298=(0.0, \"kcal/mol\"),\n S298=(0.0, \"cal/(mol*K)\")))\n hx = Species(\n molecule=[Molecule().from_adjacency_list(\"1 H u0 p0 {2,S} \\n 2 X u0 p0 {1,S}\")],\n thermo=ThermoData(Tdata=([300, 400, 500, 600, 800, 1000, 1500], \"K\"),\n Cpdata=([1.50, 2.58, 3.40, 4.00, 4.73, 5.13, 5.57], \"cal/(mol*K)\"),\n H298=(-11.26, \"kcal/mol\"),\n S298=(0.44, \"cal/(mol*K)\")))\n\n rxn1 = Reaction(reactants=[h2, x, x],\n products=[hx, hx],\n kinetics=SurfaceArrhenius(A=(9.05e18, 'cm^5/(mol^2*s)'),\n n=0.5,\n Ea=(5.0, 'kJ/mol'),\n T0=(1.0, 'K'),\n coverage_dependence={x: {'a': 0.0, 'm': -1.0, 'E': (0.0, 'J/mol')}}))\n\n rxn2 = Reaction(reactants=[h2, x, x],\n products=[hx, hx],\n kinetics=SurfaceArrhenius(A=(9.05e-18, 'cm^5/(mol^2*s)'), # 1e36 times slower\n n=0.5,\n Ea=(5.0, 'kJ/mol'),\n T0=(1.0, 'K'),\n coverage_dependence={x: {'a': 0.0, 'm': -1.0, 'E': (10.0, 'J/mol')}}\n ))\n\n core_species = [h2, x, hx]\n edge_species = []\n core_reactions = [rxn1]\n edge_reactions = []\n\n # make it slower, for benchmarking\n for j in range(200):\n core_species.append(hx.copy())\n for j in range(1000):\n core_reactions.append(rxn2)\n\n T = 600\n P_initial = 1.0e5\n rxn_system = SurfaceReactor(\n T, P_initial,\n n_sims=1,\n initial_gas_mole_fractions={h2: 1.0},\n initial_surface_coverages={x: 1.0},\n surface_volume_ratio=(1e1, 'm^-1'),\n surface_site_density=(2.72e-9, 'mol/cm^2'),\n coverage_dependence=True,\n termination=[])\n\n rxn_system.initialize_model(core_species, core_reactions, edge_species, edge_reactions)\n\n tlist = np.logspace(-13, -5, 81, dtype=np.float64)\n\n self.assertIsInstance(rxn1.kinetics.coverage_dependence, dict) # check to make sure coverage_dependence is still the correct type\n for species, parameters in rxn1.kinetics.coverage_dependence.items():\n self.assertIsInstance(species, Species) # species should be a Species\n self.assertIsInstance(parameters, dict)\n self.assertIsNotNone(parameters['a'])\n self.assertIsNotNone(parameters['m'])\n self.assertIsNotNone(parameters['E'])\n\n # Integrate to get the solution at each time point\n t = []\n y = []\n reaction_rates = []\n species_rates = []\n start_time = time.time()\n for t1 in tlist:\n rxn_system.advance(t1)\n t.append(rxn_system.t)\n # You must make a copy of y because it is overwritten by DASSL at\n # each call to advance()\n y.append(rxn_system.y.copy())\n reaction_rates.append(rxn_system.core_reaction_rates.copy())\n species_rates.append(rxn_system.core_species_rates.copy())\n run_time = time.time() - start_time\n print(f\"Simulation took {run_time:.3e} seconds in {self.id()}\")\n\n # Convert the solution vectors to np arrays\n t = np.array(t, np.float64)\n y = np.array(y, np.float64)\n reaction_rates = np.array(reaction_rates, np.float64)\n species_rates = np.array(species_rates, np.float64)\n total_sites = y[0,1]\n\n # Check that we're computing the species fluxes correctly\n for i in range(t.shape[0]):\n self.assertAlmostEqual(reaction_rates[i, 0], -1.0 * species_rates[i, 0],\n delta=1e-6 * reaction_rates[i, 0])\n self.assertAlmostEqual(reaction_rates[i, 0], -0.5 * species_rates[i, 1],\n delta=1e-6 * reaction_rates[i, 0])\n self.assertAlmostEqual(reaction_rates[i, 0], 0.5 * species_rates[i, 2],\n delta=1e-6 * reaction_rates[i, 0])\n\n # Check that we've reached equilibrium\n self.assertAlmostEqual(reaction_rates[-1, 0], 0.0, delta=1e-2)\n\n def test_solve_ch3_coverage_dependence(self):\n \"\"\"\n Test the surface batch reactor can properly apply coverage dependent parameters\n with the nondissociative adsorption of CH3\n\n Here we choose a kinetic model consisting of the adsorption reaction\n CH3 + X <=> CH3X\n We use a sticking coefficient for the rate expression.\n \"\"\"\n\n ch3 = Species(\n molecule=[Molecule().from_smiles(\"[CH3]\")],\n thermo=NASA(\n polynomials=[\n NASAPolynomial(\n coeffs=[3.91547, 0.00184155, 3.48741e-06, -3.32746e-09, 8.49953e-13, 16285.6, 0.351743],\n Tmin=(100, 'K'), Tmax=(1337.63, 'K')),\n NASAPolynomial(\n coeffs=[3.54146, 0.00476786, -1.82148e-06, 3.28876e-10, -2.22545e-14, 16224, 1.66032],\n Tmin=(1337.63, 'K'), Tmax=(5000, 'K'))],\n Tmin=(100, 'K'), Tmax=(5000, 'K'), E0=(135.382, 'kJ/mol'),\n comment=\"\"\"Thermo library: primaryThermoLibrary + radical(CH3)\"\"\"\n ),\n molecular_weight=(15.0345, 'amu'),\n )\n\n x = Species(\n molecule=[Molecule().from_adjacency_list(\"1 X u0 p0\")],\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0, 0, 0, 0, 0, 0, 0], Tmin=(298, 'K'), Tmax=(1000, 'K')),\n NASAPolynomial(coeffs=[0, 0, 0, 0, 0, 0, 0], Tmin=(1000, 'K'), Tmax=(2000, 'K'))],\n Tmin=(298, 'K'), Tmax=(2000, 'K'), E0=(-6.19426, 'kJ/mol'),\n comment=\"\"\"Thermo library: surfaceThermo\"\"\")\n )\n\n ch3x = Species(\n molecule=[Molecule().from_adjacency_list(\"\"\"1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}\n2 H u0 p0 c0 {1,S}\n3 H u0 p0 c0 {1,S}\n4 H u0 p0 c0 {1,S}\n5 X u0 p0 c0 {1,S}\"\"\")],\n thermo=NASA(\n polynomials=[\n NASAPolynomial(\n coeffs=[-0.552219, 0.026442, -3.55617e-05, 2.60044e-08, -7.52707e-12, -4433.47, 0.692144],\n Tmin=(298, 'K'), Tmax=(1000, 'K')),\n NASAPolynomial(\n coeffs=[3.62557, 0.00739512, -2.43797e-06, 1.86159e-10, 3.6485e-14, -5187.22, -18.9668],\n Tmin=(1000, 'K'), Tmax=(2000, 'K'))],\n Tmin=(298, 'K'), Tmax=(2000, 'K'), E0=(-39.1285, 'kJ/mol'),\n comment=\"\"\"Thermo library: surfaceThermoNi111\"\"\")\n )\n\n rxn1 = Reaction(reactants=[ch3, x],\n products=[ch3x],\n kinetics=StickingCoefficient(\n A=0.1, n=0, Ea=(0, 'kcal/mol'), T0=(1, 'K'), Tmin=(200, 'K'), Tmax=(3000, 'K'),\n coverage_dependence={x: {'a': 0.0, 'm': -1.0, 'E': (0.0, 'J/mol')}},\n comment=\"\"\"Exact match found for rate rule (Adsorbate;VacantSite)\"\"\"\n )\n )\n core_species = [ch3, x, ch3x]\n edge_species = []\n core_reactions = [rxn1]\n edge_reactions = []\n\n T = 800.\n P_initial = 1.0e5\n rxn_system = SurfaceReactor(\n T, P_initial,\n n_sims=1,\n initial_gas_mole_fractions={ch3: 1.0},\n initial_surface_coverages={x: 1.0},\n surface_volume_ratio=(1., 'm^-1'),\n surface_site_density=(2.72e-9, 'mol/cm^2'),\n coverage_dependence=True,\n termination=[])\n # in chemkin, the sites are mostly occupied in about 1e-8 seconds.\n\n rxn_system.initialize_model(core_species, core_reactions, edge_species, edge_reactions)\n\n tlist = np.logspace(-13, -5, 81, dtype=np.float64)\n\n print(\"Surface site density:\", rxn_system.surface_site_density.value_si)\n\n print(\"rxn1 rate coefficient\",\n rxn1.get_surface_rate_coefficient(rxn_system.T.value_si, rxn_system.surface_site_density.value_si))\n\n self.assertIsInstance(rxn1.kinetics.coverage_dependence, dict) # check to make sure coverage_dependence is still the correct type\n for species, parameters in rxn1.kinetics.coverage_dependence.items():\n self.assertIsInstance(species, Species) # species should be a Species\n self.assertIsInstance(parameters, dict)\n self.assertIsNotNone(parameters['a'])\n self.assertIsNotNone(parameters['m'])\n self.assertIsNotNone(parameters['E'])\n\n # Integrate to get the solution at each time point\n t = []\n y = []\n reaction_rates = []\n species_rates = []\n t.append(rxn_system.t)\n # You must make a copy of y because it is overwritten by DASSL at\n # each call to advance()\n y.append(rxn_system.y.copy())\n reaction_rates.append(rxn_system.core_reaction_rates.copy())\n species_rates.append(rxn_system.core_species_rates.copy())\n print(\"time: \", t)\n print(\"moles:\", y)\n print(\"reaction rates:\", reaction_rates)\n print(\"species rates:\", species_rates)\n start_time = time.time()\n for t1 in tlist:\n rxn_system.advance(t1)\n t.append(rxn_system.t)\n # You must make a copy of y because it is overwritten by DASSL at\n # each call to advance()\n y.append(rxn_system.y.copy())\n reaction_rates.append(rxn_system.core_reaction_rates.copy())\n species_rates.append(rxn_system.core_species_rates.copy())\n run_time = time.time() - start_time\n print(f\"Simulation took {run_time:.3e} seconds in {self.id()}\")\n\n # Convert the solution vectors to np arrays\n t = np.array(t, np.float64)\n y = np.array(y, np.float64)\n reaction_rates = np.array(reaction_rates, np.float64)\n species_rates = np.array(species_rates, np.float64)\n V = constants.R * rxn_system.T.value_si * np.sum(y) / rxn_system.P_initial.value_si\n\n # Check that we're computing the species fluxes correctly\n for i in range(t.shape[0]):\n self.assertAlmostEqual(reaction_rates[i, 0], -species_rates[i, 0],\n delta=1e-6 * reaction_rates[i, 0])\n self.assertAlmostEqual(reaction_rates[i, 0], -species_rates[i, 1],\n delta=1e-6 * reaction_rates[i, 0])\n self.assertAlmostEqual(reaction_rates[i, 0], species_rates[i, 2],\n delta=1e-6 * reaction_rates[i, 0])\n\n # Check that we've reached equilibrium by the end\n self.assertAlmostEqual(reaction_rates[-1, 0], 0.0, delta=1e-2)\n\n # Run model with Covdep off so we can test that it is actually being implemented\n rxn_system = SurfaceReactor(\n T, P_initial,\n n_sims=1,\n initial_gas_mole_fractions={ch3: 1.0},\n initial_surface_coverages={x: 1.0},\n surface_volume_ratio=(1., 'm^-1'),\n surface_site_density=(2.72e-9, 'mol/cm^2'),\n termination=[])\n\n rxn_system.initialize_model(core_species, core_reactions, edge_species, edge_reactions)\n\n tlist = np.logspace(-13, -5, 81, dtype=np.float64)\n\n # Integrate to get the solution at each time point\n t = []\n y_off = []\n species_rates_off = []\n t.append(rxn_system.t)\n\n # You must make a copy of y because it is overwritten by DASSL at\n # each call to advance()\n y_off.append(rxn_system.y.copy())\n species_rates_off.append(rxn_system.core_species_rates.copy())\n for t1 in tlist:\n rxn_system.advance(t1)\n t.append(rxn_system.t)\n # You must make a copy of y because it is overwritten by DASSL at\n # each call to advance()\n y_off.append(rxn_system.y.copy())\n species_rates_off.append(rxn_system.core_species_rates.copy())\n run_time = time.time() - start_time\n print(f\"Simulation took {run_time:.3e} seconds in {self.id()}\")\n\n # Convert the solution vectors to np arrays\n t = np.array(t, np.float64)\n y_off = np.array(y_off, np.float64)\n species_rates_off = np.array(species_rates_off, np.float64)\n\n # Check that we've reached equilibrium\n self.assertAlmostEqual(species_rates_off[-1, 0], 0.0, delta=1e-2)\n\n # Check that coverages are different\n self.assertFalse(np.allclose(y,y_off))\n self.assertFalse(np.allclose(species_rates, species_rates_off))\n" ]
[ [ "numpy.logspace", "numpy.array", "numpy.sum", "numpy.allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
greatofdream/pmtTest
[ "d8f1fc8e94c3999516b6eef01e228311e6135053" ]
[ "waveana/triggerana.py" ]
[ "import numpy as np\nfrom .waveana import Waveana, interpolate\n'''\nuse trigger channel to extract information from other numpy waveforms\n'''\nclass Triggerana(Waveana):\n def __init__(self, wave=[], eid=0) -> None:\n super().__init__(wave, eid)\n def setTriggerWave(self, triggerWave, uprising=True):\n self.triggerWave = triggerWave\n self.triggerTime = self.getTriggerTime(50, 0.1, uprising)\n # print(self.triggerTime)\n def getTriggerTime(self, begin=50, threshold=0.1, uprising=True):\n # TODO: 初始的baseline长度不应该硬编码\n baseline = np.average(self.triggerWave[0:50])\n baseline2 = np.average(self.triggerWave[-150:-50])\n thresholdLevel = baseline2*threshold+baseline*(1-threshold)\n if uprising:\n for i in range(begin, self.triggerWave.shape[0]):\n #print(self.triggerWave[i])\n if self.triggerWave[i]>(thresholdLevel):\n return i-1+interpolate(thresholdLevel,self.triggerWave[i-1],self.triggerWave[i])\n print('Warning:{} cannot find trigger'.format(self.eid))\n return 175\n else:\n for i in range(begin, self.triggerWave.shape[0]):\n if self.triggerWave[i]<(baseline-threshold):\n return i\n def integrateWave(self):\n baseline = self.minPeakBaseline\n self.allCharge = np.sum(baseline-self.wave[int(self.triggerTime):])\n return self.allCharge\n" ]
[ [ "numpy.average" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KevinVolkSTScI/wfss_overlap
[ "6648ec520587c5d7a52f0241e08c7d129bab8223" ]
[ "fits_image_display.py" ]
[ "#! /usr/bin/env python\n#\n\"\"\"\nThis code uses matplotlib and numpy to produce a window within which a FITS\nimage can be displayed. The reason for having this and not using the usual\npackages already in existence is that I will want specific functions on the\nimage for data reduction.\n\nUsage:\n\nfits_image_display.py imagename.fits\n\nor just\n\nfits_image_display.py\n\nIn the first case the image name given is loaded (if possible) and displayed.\n\nIn the second case the widget comes up and one can read in an image.\n\nNote that if the image is of dimension larger than 2 then the first \"plane\"\nis used. There is no mechanism here for using other planes.\n\n\"\"\"\nimport math\nimport sys\nimport tkinter as Tk\nimport tkinter.ttk\nimport tkinter.filedialog\nimport tkinter.simpledialog\nimport tkinter.messagebox\nimport numpy\nfrom astropy.io import fits\n# import matplotlib\n# import matplotlib.lines as mlines\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.figure import Figure\n# from matplotlib.colors import LogNorm\nimport matplotlib.pyplot as pyplot\nimport general_utilities\nimport mpfitexpr\n\nclass ImageGUI(Tk.Frame):\n \"\"\"\n This class brings up a separate image display window.\n\n Parameters\n ----------\n\n Tk.Frame: The base class of the object, matching a Tkinter root or\n Toplevel variable\n\n Returns\n -------\n\n The class variable is returned, effectively.\n \"\"\"\n # The following section of code concerns the image display functionality.\n #\n\n def __init__(self, parent=None, **args):\n self.image = None\n self.imagefilename = None\n self.zscale_flag = False\n self.root = None\n self.indpi = 100\n self.zoom = [1, 0, 0]\n self.xposition = None\n self.yposition = None\n self.angle = None\n self.colourBarVariable = None\n self.showImageAxes = None\n self.imagePosLabel = None\n self.imagePosLabelText = None\n self.mplfig1 = None\n self.mplsubplot1 = None\n self.canvas1 = None\n self.plotFrame = None\n self.imagename = None\n self.imagexpos = None\n self.imageypos = None\n self.transvalues = None\n self.p1 = None\n self.p2 = None\n self.p3 = None\n self.yscaleType = None\n self.imageHistogramLabel = None\n self.imageHistogramLabelText = None\n self.rangeType = None\n self.scaleType = None\n self.minField = None\n self.maxField = None\n self.zsminField = None\n self.zsmaxField = None\n self.bin_field = None\n self.colourScheme = None\n self.colourLabels = None\n self.barLabel = None\n self.colourBar = None\n self.colouBarVariable = None\n if parent is not None:\n # initialize the window and make the plot area.\n Tk.Frame.__init__(self, parent, args)\n self.root = parent\n\n\n def make_image_window(self):\n \"\"\"\n Make the main image display window.\n\n Returns\n -------\n None.\n\n \"\"\"\n # make the window\n BGCOL = '#F8F8FF'\n if self.root is not None:\n imagewindow = self.root\n else:\n imagewindow = Tk.Toplevel()\n imagewindow.config(bg=BGCOL)\n self.showImageAxes = True\n imageLabelFrame = Tk.Frame(imagewindow)\n imageLabelFrame.pack(side=Tk.TOP)\n self.imagePosLabelText = Tk.StringVar()\n self.imagePosLabel = Tk.Label(imageLabelFrame,\n textvariable=self.imagePosLabelText,\n anchor=Tk.N, width=70)\n self.imagePosLabel.pack(side=Tk.LEFT)\n self.imagePosLabelText.set(\"Position: Value:\")\n controlFrame = Tk.Frame(imagewindow)\n controlFrame.pack(side=Tk.LEFT, fill=Tk.Y, expand=1)\n self.plotFrame = Tk.Frame(imagewindow)\n self.plotFrame.pack()\n self.mplfig1 = Figure(figsize=(6, 6), dpi=self.indpi)\n self.mplsubplot1 = self.mplfig1.add_subplot(1, 1, 1)\n self.canvas1 = FigureCanvasTkAgg(self.mplfig1, master=self.plotFrame)\n self.canvas1.draw()\n self.canvas1.get_tk_widget().pack(side=Tk.LEFT, fill=Tk.BOTH,\n expand=Tk.YES)\n self.canvas1.mpl_connect(\"motion_notify_event\", self.setPlotPosition)\n self.canvas1.mpl_connect(\"button_press_event\", self.buttonPress)\n self.canvas1.mpl_connect(\"button_release_event\", self.buttonRelease)\n self.canvas1.mpl_connect(\"key_press_event\", self.keyPress)\n newframe = Tk.Frame(controlFrame)\n newframe.pack(side=Tk.TOP)\n lb = Tk.Label(newframe, text='Colour Scheme')\n lb.pack(side=Tk.TOP)\n self.colourScheme = tkinter.ttk.Combobox(newframe, width=15)\n self.colourLabels = ['jet', 'rainbow', 'gist_ncar', 'viridis',\n 'gnuplot', 'gist_gray', 'nipy_spectral']\n self.colourScheme['values'] = self.colourLabels\n self.colourScheme.pack()\n self.colourScheme.current(0)\n #\n lb = Tk.Label(newframe, text='Show Colour Bar')\n lb.pack()\n selectFrame = Tk.Frame(newframe)\n selectFrame.pack()\n self.colourBar = Tk.IntVar()\n t1 = Tk.Radiobutton(selectFrame, text='vertical',\n variable=self.colourBar, value=0,\n command=self.displayImage)\n t1.pack(side=Tk.LEFT)\n t2 = Tk.Radiobutton(selectFrame, text='horizontal',\n variable=self.colourBar, value=1,\n command=self.displayImage)\n t2.pack(side=Tk.LEFT)\n t3 = Tk.Radiobutton(selectFrame, text='none', variable=self.colourBar,\n value=2, command=self.displayImage)\n t3.pack(side=Tk.LEFT)\n self.colourBar.set(2)\n lb = Tk.Label(newframe, text='Colour Bar Label')\n lb.pack()\n self.barLabel = Tk.Entry(newframe, width=30)\n self.barLabel.pack()\n rangeframe = Tk.Frame(newframe)\n rangeframe.pack()\n fr1 = Tk.Frame(rangeframe)\n fr1.pack(side=Tk.LEFT)\n lb = Tk.Label(fr1, text='Display Minimum')\n lb.pack(side=Tk.TOP)\n self.minField = Tk.Entry(fr1, width=10)\n self.minField.pack()\n fr1 = Tk.Frame(rangeframe)\n fr1.pack(side=Tk.LEFT)\n Tk.Label(fr1, text=' ').pack()\n fr1 = Tk.Frame(rangeframe)\n fr1.pack(side=Tk.LEFT)\n lb = Tk.Label(fr1, text='Display Maximum')\n lb.pack(side=Tk.TOP)\n self.maxField = Tk.Entry(fr1, width=10)\n self.maxField.pack()\n zmin = numpy.min(self.image)\n zmax = numpy.max(self.image)\n general_utilities.put_value(zmin, self.minField)\n general_utilities.put_value(zmax, self.maxField)\n rangeframe = Tk.Frame(newframe)\n rangeframe.pack()\n fr1 = Tk.Frame(rangeframe)\n fr1.pack(side=Tk.LEFT)\n lb = Tk.Label(fr1, text='Zscale Minimum')\n lb.pack(side=Tk.TOP)\n self.zsminField = Tk.Entry(fr1, width=10)\n self.zsminField.pack()\n fr1 = Tk.Frame(rangeframe)\n fr1.pack(side=Tk.LEFT)\n Tk.Label(fr1, text=' ').pack()\n fr1 = Tk.Frame(rangeframe)\n fr1.pack(side=Tk.LEFT)\n lb = Tk.Label(fr1, text='Zscale Maximum')\n lb.pack(side=Tk.TOP)\n self.zsmaxField = Tk.Entry(fr1, width=10)\n self.zsmaxField.pack()\n try:\n zmin1, zmax1 = self.get_limits(self.image)\n ratio = abs(zmax1/zmin1)\n if ratio < 1.2:\n if zmin1 < 0.:\n zmax1 = zmin1\n zmin1 = 3.*zmin1\n else:\n zmax1 = 3.*zmin1\n except:\n zmin1 = 0.\n zmax1 = 1.\n general_utilities.put_value(zmin1, self.zsminField)\n general_utilities.put_value(zmax1, self.zsmaxField)\n lb = Tk.Label(newframe, text='Image Scaling')\n lb.pack()\n selectFrame = Tk.Frame(newframe)\n selectFrame.pack()\n self.scaleType = Tk.IntVar()\n t1 = Tk.Radiobutton(selectFrame, text='linear',\n variable=self.scaleType, value=0,\n command=self.displayImage)\n t1.pack(side=Tk.LEFT)\n t2 = Tk.Radiobutton(selectFrame, text='log', variable=self.scaleType,\n value=1, command=self.displayImage)\n t2.pack(side=Tk.LEFT)\n t3 = Tk.Radiobutton(selectFrame, text='sqrt',\n variable=self.scaleType, value=2,\n command=self.displayImage)\n t3.pack(side=Tk.LEFT)\n self.scaleType.set(0)\n lb = Tk.Label(newframe, text='Image Range')\n lb.pack()\n selectFrame = Tk.Frame(newframe)\n selectFrame.pack()\n self.rangeType = Tk.IntVar()\n t1 = Tk.Radiobutton(\n selectFrame, text='full', variable=self.rangeType,\n value=0, command=self.toggle_zscale)\n t1.pack(side=Tk.LEFT)\n t2 = Tk.Radiobutton(\n selectFrame, text='zscale', variable=self.rangeType,\n value=1, command=self.toggle_zscale)\n t2.pack(side=Tk.LEFT)\n self.rangeType.set(0)\n buttonFrame = Tk.Frame(controlFrame)\n buttonFrame.pack(side=Tk.TOP)\n subFrame = Tk.Frame(buttonFrame)\n subFrame.pack(side=Tk.TOP)\n side1 = Tk.Frame(subFrame)\n side1.pack(side=Tk.LEFT)\n b1 = Tk.Button(side1, text='Toggle Axes',\n command=self.toggleAxes)\n b1.pack(side=Tk.TOP)\n b1 = Tk.Button(side1, text='Auto Scale',\n command=self.imageAutoscale)\n b1.pack(side=Tk.TOP)\n side2 = Tk.Frame(subFrame)\n side2.pack(side=Tk.LEFT)\n b1 = Tk.Button(side2, text='Image Histogram',\n command=self.imageHistogram)\n b1.pack(side=Tk.TOP)\n b1 = Tk.Button(side2, text='Set Zoom',\n command=self.set_zoom)\n b1.pack(side=Tk.TOP)\n bin_frame = Tk.Frame(buttonFrame)\n bin_frame.pack(side=Tk.TOP)\n label = Tk.Label(bin_frame, text='bin size/number')\n label.grid(row=0, column=0)\n self.bin_field = Tk.Entry(bin_frame, width=10)\n self.bin_field.grid(row=0, column=1)\n self.bin_field.insert(0, '100')\n label = Tk.Label(\n bin_frame, text='Positive for bin number, negative for \\nbin size')\n label.grid(row=1, column=0, columnspan=2)\n label = Tk.Label(buttonFrame, text='Histogram y scaling:')\n label.pack()\n yscaleFrame = Tk.Frame(buttonFrame)\n yscaleFrame.pack(side=Tk.TOP)\n self.yscaleType = Tk.IntVar()\n t1 = Tk.Radiobutton(\n yscaleFrame, text='linear', variable=self.yscaleType,\n value=0)\n t1.pack(side=Tk.LEFT)\n t2 = Tk.Radiobutton(\n yscaleFrame, text='hybrid log', variable=self.yscaleType,\n value=1)\n t2.pack(side=Tk.LEFT)\n self.rangeType.set(0)\n b1 = Tk.Button(buttonFrame, text='Save Image as FITS',\n command=lambda: general_utilities.save_fits(self.image))\n b1.pack(side=Tk.TOP)\n b1 = Tk.Button(buttonFrame, text='Save as PNG',\n command=lambda: general_utilities.save_png_figure(\n self.mplfig1))\n b1.pack(side=Tk.TOP)\n b1 = Tk.Button(buttonFrame, text='Save as PS',\n command=lambda: general_utilities.save_ps_figure(\n self.mplfig1))\n b1.pack(side=Tk.TOP)\n b1 = Tk.Button(buttonFrame, text='Redisplay',\n command=self.displayImage)\n b1.pack(side=Tk.TOP)\n# b1 = Tk.Button(buttonFrame, text='Close',\n# command=lambda: self.imageExit(imagewindow))\n# b1.pack(side=Tk.TOP)\n self.displayImage()\n\n def zoom_corner(self, sh1, zoom, x1, y1):\n \"\"\"\n Given the zoom parameters find the array lower left corner.\n\n Parameters\n ----------\n\n sh1: A two-element list of the shape of the input image, values being\n integers\n\n zoom: A positive integer zoom function to be applied to the image\n\n x1: The x pixel value for the centre of the field to display\n (float or integer)\n\n y1: The y pixel value for the centre of the field to display\n (float or integer)\n\n Returns\n -------\n\n xmin: An integer value for the lower left corner x pixel index\n\n ymin: An integer value for the lower left corner y pixel index\n\n\n \"\"\"\n nxpixel = sh1[1] // zoom\n nypixel = sh1[0] // zoom\n xmin = x1 - nxpixel/2.\n ymin = y1 - nypixel/2.\n xmin = int(xmin)\n ymin = int(ymin)\n if xmin < 0:\n xmin = 0\n if ymin < 0:\n ymin = 0\n xmax = xmin + nxpixel\n ymax = ymin + nypixel\n if ymax > sh1[0]:\n ymax = sh1[0]\n ymin = ymax - nypixel\n if xmax > sh1[1]:\n xmax = sh1[1]\n xmin = xmax - nxpixel\n return xmin, ymin\n\n def set_zoom(self):\n \"\"\"\n Bring up a window to set the zoom parameter.\n\n No values are passed to this routine or returned from it. The\n self.zoom variable is changed by the routine.\n \"\"\"\n sh1 = self.image.shape\n npixel = min(sh1[0], sh1[1])\n zoommax = int(npixel/64.)\n if zoommax <= 1:\n tkinter.messagebox.showinfo(\n \"Error\",\n \"Zoom is disabled for minimum image size < 128 pixels.\")\n return\n if self.xposition is None:\n x1 = sh1[1]/2.\n y1 = sh1[0]/2.\n else:\n x1 = self.xposition\n y1 = self.yposition\n zoom = tkinter.simpledialog.askinteger(\n 'Input',\n 'Set the integer zoom value (1 to %d)' % (zoommax))\n if zoom is None:\n return\n else:\n xmin, ymin = self.zoom_corner(sh1, zoom, x1, y1)\n self.zoom[0] = zoom\n self.zoom[1] = int(xmin)\n self.zoom[2] = int(ymin)\n self.displayImage()\n\n def toggle_zscale(self):\n \"\"\"\n Toggle the zscale option in the image display\n\n This routine is called in response to the \"Image Range\" radio button.\n It turns the zscale display option on or off via the self.zscale_flag\n boolean variable.\n\n No values are passed to this routine or returned form the routine.\n \"\"\"\n ind = self.rangeType.get()\n if ind == 1:\n self.zscale_flag = True\n else:\n self.zscale_flag = False\n self.displayImage()\n\n def readNewImage(self):\n \"\"\"\n Read a FITS image from a file and display it.\n\n Routine to read a FITS files and extract a two-dimensional image if\n possible. The image is then displayed. This routine will only work\n if the image display window exists.\n\n No parameters are passed to this routine or returned from this routine.\n \"\"\"\n try:\n filename = tkinter.filedialog.askopenfilename(\n filetypes=[('FITS', '*.fits')])\n if filename is not None:\n self.imagefilename = filename\n self.image = self.get_image()\n if self.image is None:\n self.imagefilename = None\n return\n sh1 = self.image.shape\n self.xposition = sh1[1] // 2\n self.yposition = sh1[0] // 2\n print('centre position: ', self.xposition, self.yposition)\n self.displayImage()\n self.canvas1.draw()\n except Exception:\n pass\n\n def get_limits(self, values, nsamples=1000, contrast=0.25, max_reject=0.5,\n min_npixels=5, krej=2.5, max_iterations=5):\n \"\"\"\n Find the IRAF-like \"zscale\" signal limits for an image.\n\n This routine is copied from astropy.visualization.\n\n Aside from a change to the passing of the arguments the code has\n not been changed. The original code is part of ZScaleInterval.\n It is a recoding of the IRAF zscale algorithm in python.\n\n All parameters except the input image array are optional.\n\n Parameters\n ----------\n values : a two-dimensional numpy array for which the zscale limit\n values are to be calculated. Can be float or integer values.\n\n nsamples : the number of pixels to use to estimate the median and the\n range (integer).\n\n contrast : The constrast parameter from IRAF imexam which controls the\n range of values considered to estimate the minimum and\n maximum values to use in the display, a real value between\n 0.0 and 1.0.\n\n max_reject : Parameter for the maximum fraction of rejected pixels,\n a real values between 0.0 and 1.0; if more than this\n fraction of pixels are rejected then the full range\n of the data values is returned.\n\n min_npixels : An integer value for the minimum number of pixels that\n are rejected by the iterative algorithm; if less than\n this number of pixels is rejected the full data range is\n returned.\n\n krej : A float value, The number of standard deviations used for\n rejection. It must be positive.\n\n max_iterations : An integer value giving the maximum number of\n rejection iterations to use.\n\n Returns\n -------\n vmin : the minimum value for the zscale range, a real number\n\n vmax : the maximum value for the zscale range, a real number\n\n \"\"\"\n # Sample the image\n values = numpy.asarray(values)\n values = values[numpy.isfinite(values)]\n stride = int(max(1.0, values.size / nsamples))\n samples = values[::stride][:nsamples]\n samples.sort()\n\n npix = len(samples)\n vmin = samples[0]\n vmax = samples[-1]\n\n # Fit a line to the sorted array of samples\n minpix = max(min_npixels, int(npix * max_reject))\n xvalues = numpy.arange(npix)\n ngoodpix = npix\n last_ngoodpix = npix + 1\n\n # Bad pixels mask used in k-sigma clipping\n badpix = numpy.zeros(npix, dtype=bool)\n\n # Kernel used to dilate the bad pixels mask\n ngrow = max(1, int(npix * 0.01))\n kernel = numpy.ones(ngrow, dtype=bool)\n\n for niter in range(max_iterations):\n if ngoodpix >= last_ngoodpix or ngoodpix < minpix:\n break\n\n fit = numpy.polyfit(xvalues, samples, deg=1,\n w=(~badpix).astype(int))\n fitted = numpy.poly1d(fit)(xvalues)\n\n # Subtract fitted line from the data array\n flat = samples - fitted\n\n # Compute the k-sigma rejection threshold\n threshold = krej * flat[~badpix].std()\n\n # Detect and reject pixels further than k*sigma from the\n # fitted line\n badpix[(flat < - threshold) | (flat > threshold)] = True\n\n # Convolve with a kernel of length ngrow\n badpix = numpy.convolve(badpix, kernel, mode='same')\n\n last_ngoodpix = ngoodpix\n ngoodpix = numpy.sum(~badpix)\n\n slope, intercept = fit\n\n if ngoodpix >= minpix:\n if contrast > 0:\n slope = slope / contrast\n center_pixel = (npix - 1) // 2\n median = numpy.median(samples)\n vmin = max(vmin, median - (center_pixel - 1) * slope)\n vmax = min(vmax, median + (npix - center_pixel) * slope)\n\n return vmin, vmax\n\n def get_image(self):\n \"\"\"\n Read a FITS image from the 0th or 1st extension.\n\n This routine tries to read a FITS file and returns the image, or None\n if there is an issue:\n\n Parameters\n ----------\n None\n\n Returns\n -------\n image : a numpy two-dimensional array of image values, or None\n if there is an issue.\n\n \"\"\"\n try:\n image = fits.getdata(self.imagefilename)\n except IndexError:\n image = fits.getdata(self.imagefilename, ext=1)\n sh1 = image.shape\n if len(sh1) < 2:\n print('Bad image dimensions in file %s.' %\n (self.imagefilename))\n return None\n if len(sh1) == 3:\n image = numpy.squeeze(image[0, :, :])\n if len(sh1) == 4:\n image = numpy.squeeze(image[0, 0, :, :])\n if len(sh1) == 5:\n image = numpy.squeeze(image[0, 0, 0, :, :])\n if len(sh1) == 6:\n image = numpy.squeeze(image[0, 0, 0, 0, :, :])\n zmin = numpy.min(image)\n zmax = numpy.max(image)\n general_utilities.put_value(zmin, self.minField)\n general_utilities.put_value(zmax, self.maxField)\n return image\n\n def imageHistogram(self):\n \"\"\"\n Plot an IRAF-like image histogram for the current image.\n\n This routine plots a histogram of the image pixel values in\n a new window. No values are passed to this routine or returned from\n this routine.\n \"\"\"\n if self.image is None:\n return\n BGCOL = '#F8F8FF'\n try:\n histogramwindow = Tk.Toplevel()\n histogramwindow.config(bg=BGCOL)\n if self.zscale_flag:\n xmin = float(self.zsminField.get())\n xmax = float(self.zsmaxField.get())\n else:\n xmin = float(self.minField.get())\n xmax = float(self.maxField.get())\n yscale_option = self.yscaleType.get()\n try:\n value = float(self.bin_field.get())\n if value == 0:\n nbins = 100\n if value < 0.:\n xstep = abs(value)\n xmin = xmin - xstep\n xmax = xmax + 2.0*xstep\n nbins = int((xmax - xmin)/xstep)\n xmax = xmin + nbins*xstep\n else:\n nbins = int(value)\n nbins = max(nbins, 10)\n except ValueError:\n nbins = 100\n xstep = (xmax - xmin)/nbins\n xmin = xmin - xstep\n xmax = xmax + 2.0*xstep\n nbins = int((xmax - xmin)/xstep)\n xmax = xmin + nbins*xstep\n self.imageHistogramLabelText = Tk.StringVar()\n self.imageHistogramLabel = Tk.Label(\n histogramwindow, textvariable=self.imageHistogramLabelText,\n anchor=Tk.N, width=70)\n self.imageHistogramLabel.pack()\n self.imageHistogramLabelText.set(\"Value:\")\n self.p3 = Figure(figsize=(6, 6), dpi=100)\n sp1 = self.p3.add_subplot(1, 1, 1)\n c1 = FigureCanvasTkAgg(self.p3, master=histogramwindow)\n c1.mpl_connect(\"motion_notify_event\", self.imageHistogramPosition)\n histogramy, hxedges = numpy.histogram(\n self.image.flatten(), nbins, range=[xmin, xmax])\n histogramx = (hxedges[1:]+hxedges[0:-1])/2.\n if yscale_option == 1:\n newyvalues = general_utilities.hybrid_transform(histogramy)\n sp1.plot(histogramx, newyvalues, color='blue')\n else:\n sp1.plot(histogramx, histogramy, color='blue')\n sp1.set_xlabel('Signal')\n sp1.set_ylabel('Number of points per bin')\n if yscale_option == 1:\n tickmarks, ticklabels = general_utilities.hybrid_labels(\n newyvalues)\n sp1.set_yticks(tickmarks)\n sp1.set_yticklabels(ticklabels)\n label = 'Bin size: %.5g\\nNumber of Bins: %d' % (xstep, nbins)\n xpos = xmin + 0.01*(xmax - xmin)\n ymin, ymax = sp1.get_ybound()\n ypos = ymax + (ymax - ymin)*0.02\n if self.imagefilename is None:\n outstring = None\n else:\n outstring = '# Histogram from file ' + self.imagefilename\n sp1.text(xpos, ypos, label)\n c1.draw()\n c1.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=Tk.YES)\n h1 = Tk.Frame(histogramwindow)\n h1.pack(side=Tk.TOP)\n h1.config(bg=BGCOL)\n button = Tk.Button(\n h1, text=\"Save values\",\n command=lambda: general_utilities.save_data_set_values(\n histogramx, histogramy, outstring))\n button.pack(side=Tk.LEFT)\n button.config(bg=BGCOL)\n button = Tk.Button(\n h1, text=\"Save as PS\",\n command=lambda: general_utilities.save_ps_figure(self.p3))\n button.pack(side=Tk.LEFT)\n button.config(bg=BGCOL)\n button = Tk.Button(\n h1, text=\"Save as PNG\",\n command=lambda: general_utilities.save_png_figure(self.p3))\n button.pack(side=Tk.LEFT)\n button.config(bg=BGCOL)\n button = Tk.Button(h1, text=\"Close\",\n command=histogramwindow.destroy)\n button.pack()\n button.config(bg=BGCOL)\n except Exception:\n pass\n\n def imageHistogramPosition(self, event):\n \"\"\"\n Post mouse position on image to the status line.\n\n When a normal histogram plot exists, this routine takes the mouse\n position events and updates the position values at the top of the\n window.\n\n Parameters\n ----------\n event a standard Tkinter event variable.\n\n Returns\n -------\n No values are returned by this routine.\n\n \"\"\"\n try:\n xpos = float(event.xdata)\n ypos = float(event.ydata)\n if self.yscaleType.get() == 1:\n ypos = general_utilities.inverse_hybrid_transform(ypos)\n s1 = 'Value: [%g, %g]' % (xpos, ypos)\n self.imageHistogramLabelText.set(s1)\n except Exception:\n pass\n\n def put_value(self, value, field):\n \"\"\"\n Place a value in a widgit text field.\n\n Any current contents of the field are deleted.\n\n Parameters\n ----------\n value : the string value to be placed in the text field\n\n field : the tkinter text field variable where the string is to\n be put\n\n No values are returned from this routine.\n\n \"\"\"\n try:\n s1 = field.get()\n field.delete(0, last=len(s1))\n field.insert(0, str(value))\n except Exception:\n pass\n\n def toggleAxes(self):\n \"\"\"\n Toggle the axis display variable.\n\n Each call to this routine toggles the logical variable determining\n whether the axes are plotted with the image. No values are passed\n to this routine or returned from it.\n \"\"\"\n self.showImageAxes = not self.showImageAxes\n self.displayImage()\n\n def imageAutoscale(self):\n \"\"\"\n Autoscale the image display.\n\n This routine resets the minimum and maximum image display values to\n the full range of the current image.\n\n No values are passed to this routine or returned from this routine.\n \"\"\"\n zmin = numpy.min(self.image)\n zmax = numpy.max(self.image)\n general_utilities.put_value(zmin, self.minField)\n general_utilities.put_value(zmax, self.maxField)\n zmin1, zmax1 = self.get_limits(self.image)\n general_utilities.put_value(zmin1, self.zsminField)\n general_utilities.put_value(zmax1, self.zsmaxField)\n self.displayImage()\n\n def imageExit(self, window):\n \"\"\"\n Close a Tkinter window.\n\n This routine closes the window for the image display (or\n whichever top level window variable is passed into the routine).\n\n Parameters\n ----------\n window : A tkinter Toplevel variable (or equivalent), the window\n to be closed.\n\n No values are returned by this routine.\n\n \"\"\"\n window.destroy()\n\n def keyPress(self, event):\n \"\"\"\n Routine for applying imaging key press events.\n\n Currently the routine sets the image center at the event position.\n This does nothing if the zoom is not applied.\n \"\"\"\n if (event.xdata is None) or (event.ydata is None):\n return\n xpixel = int(self.zoom[1]+event.xdata+0.5)\n ypixel = int(self.zoom[2]+event.ydata+0.5)\n if (xpixel is None) or (ypixel is None):\n return\n imshape = self.image.shape\n if event.key == 'l':\n yvalues = numpy.squeeze(self.image[ypixel, :])\n xvalues = numpy.arange(imshape[0])+1\n self.plotxy(xvalues, yvalues, symbol='-', colour='blue',\n xlabel='Column (Pixels)', ylabel='Pixel Value',\n title='Line %d' % (ypixel))\n if event.key == 'c':\n yvalues = numpy.squeeze(self.image[:, xpixel])\n xvalues = numpy.arange(imshape[1])+1\n self.plotxy(xvalues, yvalues, symbol='-', colour='blue',\n xlabel='Line (Pixels)', ylabel='Pixel Value',\n title='Column %d' % (xpixel))\n if event.key == 'j':\n x0 = xpixel-10\n x0 = max(x0, 0)\n x1 = x0 + 22\n if x1 > imshape[1]:\n x1 = imshape[1]\n x0 = x1 - 22\n y0 = ypixel-2\n y0 = max(y0, 0)\n y1 = y0 + 5\n if y1 > imshape[0]:\n y1 = imshape[0]\n y0 = y1 - 5\n subim = numpy.copy(self.image[y0:y1, x0:x1])\n vector = numpy.mean(subim, axis=0)\n xvalues = numpy.arange(len(vector))+x0\n ind = numpy.argmax(vector)\n mind = numpy.argmin(vector)\n start = numpy.asarray(\n [xvalues[ind], vector[ind], 1., vector[mind]])\n params, yfit = mpfitexpr.mpfitexpr(\n \"p[3]+p[1]/numpy.exp((x-p[0])*(x-p[0])/(2.*p[2]*p[2]))\",\n xvalues, vector, vector*0.+1., start)\n try:\n str1 = 'Centre: %.3f\\nPeak: %.2f\\nSigma: %.2f\\nBaseline: %.2f' % (\n params[0], params[1], params[2], params[3])\n print(str1)\n except:\n pass\n tstring = 'Mean of lines (y) %d:%d' % (y0, y1)\n self.plotxy(xvalues, vector, symbol='-', colour='blue',\n xlabel='x pixel position', ylabel='Signal (ADU/s)',\n title=tstring, ymodel=yfit, fitparams=params)\n return\n if event.key == 'k':\n y0 = ypixel-10\n if y0 < 0:\n y0 = 0\n y1 = y0 + 22\n if y1 > imshape[0]:\n y1 = imshape[0]\n y0 = y1 - 22\n x0 = xpixel-2\n if x0 < 0:\n x0 = 0\n x1 = x0 + 5\n if x1 >= imshape[0]:\n x1 = imshape[0]\n x0 = x1 - 5\n subim = numpy.copy(self.image[y0:y1, x0:x1])\n vector = numpy.mean(subim, axis=1)\n xvalues = numpy.arange(len(vector))+y0\n ind = numpy.argmax(vector)\n mind = numpy.argmin(vector)\n start = numpy.asarray(\n [xvalues[ind], vector[ind], 1., vector[mind]])\n params, yfit = mpfitexpr.mpfitexpr(\n \"p[3]+p[1]/numpy.exp((x-p[0])*(x-p[0])/(2.*p[2]*p[2]))\",\n xvalues, vector, vector*0.+1., start)\n try:\n str1 = 'Centre: %.3f\\nPeak: %.2f\\nSigma: %.2f\\nBaseline: %.2f' % (\n params[0], params[1], params[2], params[3])\n print(str1)\n except:\n pass\n tstring = 'Mean of rows (x) %d:%d' % (y0, y1)\n self.plotxy(xvalues, vector, symbol='-', colour='blue',\n xlabel='y pixel position', ylabel='Signal (ADU/s)',\n title=tstring, ymodel=yfit, fitparams=params)\n return\n self.xposition = self.zoom[1]+event.xdata\n self.yposition = self.zoom[2]+event.ydata\n sh1 = self.image.shape\n xmin, ymin = self.zoom_corner(sh1, self.zoom[0], self.xposition,\n self.yposition)\n self.zoom[1] = xmin\n self.zoom[2] = ymin\n self.displayImage()\n return\n\n def buttonPress(self, event):\n \"\"\"\n Routine for applying imaging button press events.\n\n Holder routine for button press events in the image window.\n Not currently active.\n \"\"\"\n return\n\n def buttonRelease(self, event):\n \"\"\"\n Routine for applying imaging button release events.\n\n Holder routine for button release events in the image window.\n\n \"\"\"\n if (event.xdata is None) or (event.ydata is None):\n return\n sh1 = self.image.shape\n xpixel = int(self.zoom[1]+event.xdata+0.5)\n ypixel = int(self.zoom[2]+event.ydata+0.5)\n if (xpixel is None) or (ypixel is None):\n return\n self.xposition = self.zoom[1]+event.xdata\n self.yposition = self.zoom[2]+event.ydata\n xmin, ymin = self.zoom_corner(sh1, self.zoom[0], self.xposition,\n self.yposition)\n self.zoom[1] = xmin\n self.zoom[2] = ymin\n self.displayImage()\n return\n\n def setPlotPosition(self, event):\n \"\"\"\n Post the image position to the information line on the image display.\n\n Routine to post the image position and the image value (if possible)\n to the text area above the image display.\n\n Parameters\n ----------\n event : a motion-notify event from the image display window\n\n Returns\n -------\n No values are returned by this routine.\n\n \"\"\"\n try:\n event.canvas.get_tk_widget().focus_set()\n x1 = int(self.zoom[1]+event.xdata+0.5)\n y1 = int(self.zoom[2]+event.ydata+0.5)\n try:\n value = '%.6g' % (self.image[y1, x1])\n except ValueError:\n value = ' '\n s1 = \"Position: x = %.2f y = %.2f Value: %s\" % (x1, y1, value)\n self.imagePosLabelText.set(s1)\n self.imagexpos = event.xdata\n self.imageypos = event.ydata\n except Exception:\n pass\n\n def displayImage(self, getrange=False, angle=None):\n \"\"\"\n Display the current image in the display area.\n\n Parameters\n ----------\n\n getrange: An optional boolean variable, if True the code resets\n the display range, default is False.\n \"\"\"\n if self.image is not None:\n self.mplsubplot1.clear()\n if getrange:\n self.zoom = [1, 0, 0]\n zmin = numpy.min(self.image)\n general_utilities.put_value(zmin, self.minField)\n zmax = numpy.max(self.image)\n general_utilities.put_value(zmax, self.maxField)\n try:\n zmin1, zmax1 = self.get_limits(self.image)\n except:\n zmin1 = 0.\n zmax1 = 1.\n general_utilities.put_value(zmin1, self.zsminField)\n general_utilities.put_value(zmax1, self.zsmaxField)\n zmin = float(self.minField.get())\n zmax = float(self.maxField.get())\n zsmin = float(self.zsminField.get())\n zsmax = float(self.zsmaxField.get())\n cind = self.colourScheme.current()\n scaleOption = self.scaleType.get()\n try:\n # if the colourBarVariable exists, remove it\n self.colourBarVariable.remove()\n except Exception:\n pass\n startimage = general_utilities.get_subimage(self.image, self.zoom)\n if self.zscale_flag:\n zmin, zmax = self.get_limits(startimage)\n scaleOption = 0\n self.scaleType.set(0)\n general_utilities.put_value(zmin, self.zsminField)\n general_utilities.put_value(zmax, self.zsmaxField)\n else:\n s1 = self.minField.get()\n zmin = float(s1)\n s1 = self.maxField.get()\n zmax = float(s1)\n if (scaleOption == 0) or self.zscale_flag:\n newimage = numpy.copy(startimage)\n im1 = self.mplsubplot1.imshow(\n newimage, cmap=self.colourLabels[cind],\n origin='lower', vmin=zmin, vmax=zmax)\n elif scaleOption == 1:\n newimage = self.logTransform(startimage, zmin, zmax)\n zmin1 = numpy.min(newimage)\n zmax1 = numpy.max(newimage)\n im1 = self.mplsubplot1.imshow(\n newimage, cmap=self.colourLabels[cind], origin='lower',\n vmin=zmin1, vmax=zmax1)\n else:\n newimage = self.sqrtTransform(startimage, zmin, zmax)\n zmin1 = numpy.min(newimage)\n zmax1 = numpy.max(newimage)\n im1 = self.mplsubplot1.imshow(\n newimage, cmap=self.colourLabels[cind], origin='lower',\n vmin=zmin1, vmax=zmax1)\n if angle is not None:\n try:\n value = float(angle)\n self.mplsubplot1.set_title(\n 'Rotation angle = %.3f' % (value))\n self.angle = value\n self.mplsubplot1.set_title(\n 'Rotation angle = %.3f' % (value))\n except:\n pass\n else:\n if not self.angle is None:\n self.mplsubplot1.set_title(\n 'Rotation angle = %.3f' % (self.angle))\n self.mplsubplot1.get_xaxis().set_visible(self.showImageAxes)\n self.mplsubplot1.get_yaxis().set_visible(self.showImageAxes)\n if self.showImageAxes:\n self.mplsubplot1.set_xlabel('x Pixel Position')\n self.mplsubplot1.set_ylabel('y Pixel Position')\n cbflag = self.colourBar.get()\n cblabel = self.barLabel.get()\n if scaleOption == 1:\n ticklist = numpy.zeros((11), dtype=numpy.float32)\n label1 = numpy.zeros((11), dtype=numpy.float32)\n for lv in range(11):\n ticklist[lv] = 0.3*lv\n vout = self.invLogTransform(ticklist[lv], zmin, zmax)\n label1[lv] = '%.3g' % (vout)\n if scaleOption == 2:\n ticklist = numpy.zeros((11), dtype=numpy.float32)\n label1 = numpy.zeros((11), dtype=numpy.float32)\n for lv in range(11):\n zrange = numpy.max(newimage) - numpy.min(newimage)\n ticklist[lv] = numpy.min(newimage) + (zrange * lv / 10.)\n vout = self.invSqrtTransform(ticklist[lv], zmin, zmax)\n label1[lv] = '%.3g' % (vout)\n if cbflag == 0:\n if scaleOption > 0:\n self.colourBarVariable = self.mplfig1.colorbar(\n im1, cmap=self.colourLabels[cind],\n orientation='vertical', ticks=ticklist)\n self.colourBarVariable.ax.set_yticklabels(label1)\n else:\n self.colourBarVariable = self.mplfig1.colorbar(\n im1, cmap=self.colourLabels[cind],\n orientation='vertical')\n self.colourBarVariable.ax.get_yaxis().labelpad = 15\n self.colourBarVariable.ax.set_ylabel(cblabel, rotation=90)\n if cbflag == 1:\n if scaleOption > 0:\n self.colourBarVariable = self.mplfig1.colorbar(\n im1, cmap=self.colourLabels[cind],\n orientation='horizontal', ticks=ticklist)\n self.colourBarVariable.ax.set_xticklabels(label1)\n else:\n self.colourBarVariable = self.mplfig1.colorbar(\n im1, cmap=self.colourLabels[cind],\n orientation='horizontal')\n self.colourBarVariable.ax.set_xlabel(cblabel, rotation=0)\n sh1 = self.image.shape\n if self.zoom[0] == 1:\n if sh1[0] == 3631:\n xline1 = numpy.asarray([655, 2977, 2977, 655, 655])\n yline1 = numpy.asarray([655, 655, 2977, 2977, 655])\n xline2 = numpy.asarray([792, 2840, 2840, 792, 792])\n yline2 = numpy.asarray([792, 792, 2840, 2840, 792])\n self.mplsubplot1.plot(xline1, yline1, color='white',\n linestyle='dashed', linewidth=1.0)\n self.mplsubplot1.plot(xline2, yline2, color='white',\n linestyle='dotted', linewidth=1.0)\n\n elif sh1[0] == 2322:\n xline1 = numpy.asarray([137, 2185, 2185, 137, 137])\n yline1 = numpy.asarray([137, 137, 2185, 2185, 137])\n self.mplsubplot1.plot(xline1, yline1, color='white',\n linestyle='dotted', linewidth=1.0)\n self.canvas1.draw()\n\n def invLogTransform(self, value, zmin, zmax):\n \"\"\"\n Transform a log value back to the original value in an image.\n\n This routine returns the original value corresponding to a given\n logarithmic display value in the range from 1 to 1000.\n\n Parameters\n ----------\n value : a real value, by assumption, in the range from 1 to 1000\n\n zmin : a real value, the signal minimum for the logarithmic\n mapping\n\n zmax : a real value, the signal maximum for the logarithmic\n mapping\n\n Returns\n -------\n vout The original image value corresponding to the new image\n value, a real number\n\n \"\"\"\n newvalue = value*1\n if value < 0.:\n newvalue = 0.\n if value > 3.:\n newvalue = 3.\n v1 = math.pow(10., newvalue)\n v1 = max(v1, 1.0)\n v1 = min(v1, 1000.0)\n v2 = (v1 - 1.)/999.9\n vout = zmin + (zmax - zmin) * v2\n return vout\n\n def logTransform(self, image, zmin, zmax):\n \"\"\"\n Apply an IRAF-style logarithmic transformation to an image.\n\n This routine applies an iraf-style logaritmic transform to an\n image; the requested range is mapped to logarithmic values from\n 0.0 to 3.0. The range can be negative since the mapping is for the\n signal values with respect to the defined range.\n\n Parameters\n ----------\n image : a numpy array of (assumed) floating point or integer\n values\n\n zmin : a real value, the minimum of the range for the\n transformation\n\n zmax : a real value, the maximum of the range for the\n transformation\n\n Returns\n -------\n newimage : a numpy image of the same dimensions as the input\n image, with the transformation applied; floating\n point values in the range between 0.0 and 3.0 are\n contained in the new image\n \"\"\"\n newimage = numpy.copy(image)\n newimage[newimage < zmin] = zmin\n newimage[newimage > zmax] = zmax\n zrange = zmax - zmin\n newimage = 1. + 999.*(newimage - zmin)/zrange\n newimage = numpy.log10(newimage)\n self.transvalues = [zmin, zmax]\n return newimage\n\n def invSqrtTransform(self, value, zmin, zmax):\n \"\"\"\n Transform a sqaure-root scaled image value back to the original value.\n\n Routine to map the data values from the sqrt transform back to the\n original range of values.\n\n Parameters\n ----------\n value : a real value, by assumption\n\n zmin : a real value, the signal minimum for the sqrt mapping\n (not used, present for uniformity with the log call)\n\n zmax : a real value, the signal maximum for the sqrt mapping\n (not used, present for uniformity with the log call)\n\n Returns\n -------\n v1 : The original image value corresponding to the new image\n value, a real number\n\n \"\"\"\n newvalue = abs(value)\n v1 = newvalue * newvalue\n if value < 0.:\n v1 = -v1\n return v1\n\n def sqrtTransform(self, image, zmin, zmax):\n \"\"\"\n Apply a square-root scaling to an image.\n\n Given an image, this routine applies a square-root scaling of the\n absolute value, preserving the original sign, and returns the\n transformed image.\n\n Parameters\n ----------\n image : a numpy array of (assumed) floating point or integer\n values\n\n zmin : a real value, the minimum of the range for the\n transformation (not currently used, present so the\n form of the call matches the other transformations)\n\n zmax : a real value, the maximum of the range for the\n transformation (not currently used)\n\n Returns\n -------\n newimage a numpy image of the same dimensions as the input\n image, with the transformation applied; all values\n in the image are replaced by the square-root of the\n absolute value times the original sign\n \"\"\"\n newimage = numpy.sqrt(numpy.abs(image))\n newimage[image < 0.] = -1. * newimage[image < 0.]\n self.transvalues = [1.]\n return newimage\n\n\n def plotxy(self, xvalues, yvalues, **parameters):\n \"\"\"\n A basic plot routine, for quick use without having to keep looking up the\n plot commands; parameters can include \"symbol\", \"title\", \"xlabel\", and\n \"ylabel\".\n \"\"\"\n pyplot.figure(1)\n pyplot.subplot(111)\n colour = parameters.get(\"colour\")\n sym = parameters.get(\"symbol\")\n markersize = parameters.get(\"markersize\")\n ymodel = parameters.get(\"ymodel\")\n params = parameters.get(\"fitparams\")\n if sym is None:\n sym='-'\n if colour is None:\n colour = 'black'\n if markersize is None:\n markersize = 2.0\n pyplot.plot(xvalues, yvalues, sym, color=colour, markersize=markersize)\n if parameters.get(\"title\") is not None:\n pyplot.title(parameters.get(\"title\"))\n if not ymodel is None:\n pyplot.plot(xvalues, ymodel, ':', color='red')\n if not params is None:\n str1 = 'Fit: Centre %.3f Peak %.2f Sigma %.2f Baseline %.2f' % (\n params[0], params[1], params[2], params[3])\n pyplot.suptitle(str1)\n if parameters.get(\"xlabel\") is not None:\n pyplot.xlabel(parameters.get(\"xlabel\"))\n if parameters.get(\"ylabel\") is not None:\n pyplot.ylabel(parameters.get(\"ylabel\"))\n pyplot.show()\n\n\nif __name__ == \"__main__\":\n # create the window\n root = Tk.Tk()\n root.title('Image Display Widget')\n imdisp = ImageGUI(root)\n if '.fits' in sys.argv[-1]:\n imdisp.imagename = sys.argv[-1]\n imdisp.get_image()\n imdisp.make_image_window()\n root.mainloop()\n" ]
[ [ "numpy.poly1d", "numpy.asarray", "numpy.squeeze", "matplotlib.pyplot.plot", "numpy.max", "numpy.mean", "numpy.argmin", "numpy.arange", "numpy.copy", "matplotlib.pyplot.subplot", "numpy.argmax", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.min", "numpy.median", "numpy.log10", "matplotlib.pyplot.show", "matplotlib.pyplot.suptitle", "numpy.sum", "numpy.convolve", "numpy.abs", "numpy.isfinite", "matplotlib.figure.Figure", "numpy.ones", "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
climate-and-health-datasci-Unicamp/permapy
[ "23be83f0a8c31fe79611dc63bcaef5b0efebf5ad" ]
[ "utils/utils.py" ]
[ "import os\nimport numpy as np\n\nclass Utils:\n def __init__(self):\n pass\n \n def retrieve_data_from_np_array(self,path):\n \"\"\" Read a numpy array\"\"\"\n with open(path, 'rb') as f:\n np_array = np.load(f)\n return np_array\n\n def create_folder_structure(self,folder):\n \"\"\" Create the comple folder structure if it does not exists \"\"\"\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n def save_nparray_to_folder(self,np_array,folder_path,filename):\n \"\"\" Save numpy array to the specified folder path \"\"\"\n complete_path = os.path.join(folder_path,filename+'.npy')\n with open(complete_path, 'wb') as f:\n print(f\"{filename} Shape: \",np_array.shape)\n np.save(f, np_array)\n " ]
[ [ "numpy.load", "numpy.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FilatovArtm/RAdam-Tensorflow
[ "29328c3ddf07b62585c29fb1bc1b8ebf33a71c8b" ]
[ "RAdam.py" ]
[ "import tensorflow as tf\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.training import optimizer\n\nclass RAdamOptimizer(optimizer.Optimizer):\n\n \"\"\"\n RAdam optimizer : On The Variance Of The Adaptive Learning Rate And Beyond\n https://arxiv.org/abs/1908.03265\n \"\"\"\n\n def __init__(self,\n learning_rate=0.001,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-8,\n weight_decay=0.,\n use_locking=False,\n name=\"RAdam\"):\n\n super(RAdamOptimizer, self).__init__(use_locking, name)\n self._lr = learning_rate\n self._beta1 = beta1\n self._beta2 = beta2\n self._epsilon = epsilon\n self._weight_decay = weight_decay\n\n self._lr_t = None\n self._step_t = None\n self._beta1_t = None\n self._beta2_t = None\n self._epsilon_t = None\n self._weight_decay_t = None\n\n def _get_beta_accumulators(self):\n with ops.init_scope():\n if context.executing_eagerly():\n graph = None\n else:\n graph = ops.get_default_graph()\n return (self._get_non_slot_variable(\"step\", graph=graph),\n self._get_non_slot_variable(\"beta1_power\", graph=graph),\n self._get_non_slot_variable(\"beta2_power\", graph=graph))\n\n def _create_slots(self, var_list):\n first_var = min(var_list, key=lambda x: x.name)\n self._create_non_slot_variable(initial_value=1.0, name=\"step\", colocate_with=first_var)\n self._create_non_slot_variable(initial_value=self._beta1, name=\"beta1_power\", colocate_with=first_var)\n self._create_non_slot_variable(initial_value=self._beta2, name=\"beta2_power\", colocate_with=first_var)\n\n for v in var_list:\n self._zeros_slot(v, \"m\", self._name)\n self._zeros_slot(v, \"v\", self._name)\n\n def _prepare(self):\n lr = self._call_if_callable(self._lr)\n beta1 = self._call_if_callable(self._beta1)\n beta2 = self._call_if_callable(self._beta2)\n epsilon = self._call_if_callable(self._epsilon)\n weight_decay = self._call_if_callable(self._weight_decay)\n\n self._lr_t = ops.convert_to_tensor(lr, name=\"learning_rate\")\n self._beta1_t = ops.convert_to_tensor(beta1, name=\"beta1\")\n self._beta2_t = ops.convert_to_tensor(beta2, name=\"beta2\")\n self._epsilon_t = ops.convert_to_tensor(epsilon, name=\"epsilon\")\n self._weight_decay_t = ops.convert_to_tensor(weight_decay, name=\"weight_decay\")\n\n def _apply_dense(self, grad, var):\n return self._resource_apply_dense(grad, var)\n\n def _resource_apply_dense(self, grad, var):\n step, beta1_power, beta2_power = self._get_beta_accumulators()\n beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)\n beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)\n lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)\n\n beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)\n beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)\n epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)\n\n sma_inf = 2.0 / (1.0 - beta2_t) - 1.0\n sma_t = sma_inf - 2.0 * step * beta2_power / (1.0 - beta2_power)\n\n m = self.get_slot(var, \"m\")\n m_t = state_ops.assign(m, beta1_t * m + (1.0 - beta1_t) * grad, use_locking=self._use_locking)\n mhat_t = m_t / (1.0 - beta1_power)\n\n v = self.get_slot(var, \"v\")\n v_t = state_ops.assign(v, beta2_t * v + (1.0 - beta2_t) * math_ops.square(grad), use_locking=self._use_locking)\n vhat_t = math_ops.sqrt(v_t / ((1.0 - beta2_power) + epsilon_t))\n\n r_t = math_ops.sqrt( ((sma_t - 4.0) * (sma_t - 2.0) * sma_inf) / ((sma_inf - 4.0) * (sma_inf - 2.0) * sma_t) )\n\n var_t = tf.cond(sma_t >= 5.0, lambda : r_t * mhat_t / vhat_t, lambda : mhat_t)\n\n if self._weight_decay > 0.0:\n var_t += math_ops.cast(self._weight_decay_t, var.dtype.base_dtype) * var\n\n var_update = state_ops.assign_sub(var, lr_t * var_t, use_locking=self._use_locking)\n\n updates = [var_update, m_t, v_t]\n\n return control_flow_ops.group(*updates)\n\n def _apply_sparse_shared(self, grad, var, indices, scatter_add):\n step, beta1_power, beta2_power = self._get_beta_accumulators()\n beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)\n beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)\n lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)\n\n beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)\n beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)\n epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)\n\n sma_inf = 2.0 / (1.0 - beta2_t) - 1.0\n sma_t = sma_inf - 2.0 * step * beta2_power / (1.0 - beta2_power)\n\n m = self.get_slot(var, \"m\")\n m_scaled_g_values = grad * (1 - beta1_t)\n m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)\n\n with ops.control_dependencies([m_t]):\n m_t = scatter_add(m, indices, m_scaled_g_values)\n\n mhat_t = m_t / (1.0 - beta1_power)\n\n v = self.get_slot(var, \"v\")\n v_scaled_g_values = (grad * grad) * (1 - beta2_t)\n v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)\n\n with ops.control_dependencies([v_t]):\n v_t = scatter_add(v, indices, v_scaled_g_values)\n\n vhat_t = math_ops.sqrt(v_t / (1.0 - beta2_power) + epsilon_t)\n\n r_t = math_ops.sqrt( ((sma_t - 4.0) * (sma_t - 2.0) * sma_inf) / ((sma_inf - 4.0) * (sma_inf - 2.0) * sma_t) )\n\n var_t = tf.cond(sma_t >= 5.0, lambda : r_t * mhat_t / vhat_t, lambda : mhat_t)\n\n if self._weight_decay > 0.0:\n var_t += math_ops.cast(self._weight_decay_t, var.dtype.base_dtype) * var\n\n var_update = state_ops.assign_sub(var, lr_t * var_t, use_locking=self._use_locking)\n\n updates = [var_update, m_t, v_t]\n\n return control_flow_ops.group(*updates)\n\n def _apply_sparse(self, grad, var):\n return self._apply_sparse_shared(\n grad.values,\n var,\n grad.indices,\n lambda x, i, v: state_ops.scatter_add(x, i, v, use_locking=self._use_locking))\n\n def _resource_scatter_add(self, x, i, v):\n with ops.control_dependencies([resource_variable_ops.resource_scatter_add(x.handle, i, v)]):\n return x.value()\n\n def _resource_apply_sparse(self, grad, var, indices):\n return self._apply_sparse_shared(grad, var, indices, self._resource_scatter_add)\n\n def _finish(self, update_ops, name_scope):\n with ops.control_dependencies(update_ops):\n step, beta1_power, beta2_power = self._get_beta_accumulators()\n with ops.colocate_with(beta1_power):\n update_step = step.assign(step + 1.0, use_locking=self._use_locking)\n update_beta1 = beta1_power.assign(beta1_power * self._beta1_t, use_locking=self._use_locking)\n update_beta2 = beta2_power.assign(beta2_power * self._beta2_t, use_locking=self._use_locking)\n return control_flow_ops.group(*update_ops + [update_step, update_beta1, update_beta2], name=name_scope)\n" ]
[ [ "tensorflow.cond", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.ops.math_ops.square", "tensorflow.python.framework.ops.init_scope", "tensorflow.python.framework.ops.colocate_with", "tensorflow.python.ops.resource_variable_ops.resource_scatter_add", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.ops.state_ops.assign_sub", "tensorflow.python.ops.math_ops.sqrt", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.state_ops.assign", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.state_ops.scatter_add", "tensorflow.python.ops.math_ops.cast" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13", "1.7", "1.10", "1.12" ] } ]
deekshaarya4/gymexperiments
[ "2d503ba14fcfba41339de25dd78d649bd12693e6" ]
[ "irmodel.py" ]
[ "import numpy as np\nfrom sklearn.linear_model import LinearRegression, LogisticRegression\n\nclass TimeBuffer:\n def __init__(self, max_timesteps, max_episodes, observation_shape, action_shape):\n self.max_timesteps = max_timesteps\n self.max_episodes = max_episodes\n self.observation_shape = observation_shape\n self.action_shape = action_shape\n\n self.preobs = np.empty((self.max_timesteps, self.max_episodes) + observation_shape)\n self.actions = np.empty((self.max_timesteps, self.max_episodes) + action_shape)\n self.rewards = np.empty((self.max_timesteps, self.max_episodes))\n self.postobs = np.empty((self.max_timesteps, self.max_episodes) + observation_shape)\n self.terminals = np.empty((self.max_timesteps, self.max_episodes), dtype = np.bool)\n self.lengths = np.zeros(self.max_episodes, np.uint)\n \n self.num_episodes = 0\n self.episode = 0\n self.timestep = 0\n\n def add(self, preobs, action, reward, postobs, terminal):\n assert preobs.shape == self.observation_shape\n assert action.shape == self.action_shape\n assert postobs.shape == self.observation_shape\n self.preobs[self.timestep, self.episode] = preobs\n self.actions[self.timestep, self.episode] = action\n self.rewards[self.timestep, self.episode] = reward\n self.postobs[self.timestep, self.episode] = postobs\n self.terminals[self.timestep, self.episode] = terminal\n self.timestep += 1\n \n def sample(self, batch_size, max_timestep):\n episodes = []\n timesteps = []\n for i in xrange(batch_size):\n episode = np.random.choice(self.num_episodes)\n timestep = np.random.choice(min(self.lengths[episode], max_timestep))\n episodes.append(episode)\n timesteps.append(timestep)\n #return self.preobs[indexes], self.actions[indexes], self.rewards[indexes], self.postobs[indexes], timesteps\n return self.postobs[timesteps, episodes], np.array(timesteps)\n\n def new_episode(self):\n self.lengths[self.episode] = self.timestep\n self.episode += 1\n self.timestep = 0\n self.num_episodes = self.episode\n\n def reset(self):\n self.num_episodes = 0\n self.episode = 0\n self.timestep = 0\n self.lengths *= 0\n\n def is_full(self):\n return self.num_episodes == self.max_episodes\n\nclass IRModel:\n def __init__(self, max_timesteps):\n self.max_timesteps = max_timesteps\n\n self.obsmodels = []\n self.obscovs = []\n self.rewmodels = []\n self.termmodels = []\n\n def fit(self, preobs, actions, rewards, postobs, terminals, lengths):\n self.obsmodels = []\n self.obscovs = []\n self.rewmodels = []\n self.termmodels = []\n for t in xrange(self.max_timesteps):\n episodes = lengths > t\n if sum(episodes) < 2:\n break\n \n # fit observation/state model\n X = np.concatenate([preobs[t, episodes], actions[t, episodes]], axis=1)\n Y = postobs[t, episodes]\n obsmodel = LinearRegression().fit(X, Y)\n self.obsmodels.append(obsmodel)\n Yhat = obsmodel.predict(X)\n obscov = np.cov(Y - Yhat, rowvar=0)\n self.obscovs.append(obscov)\n\n # fit reward model\n Y = rewards[t, episodes]\n rewmodel = LinearRegression().fit(X, Y)\n self.rewmodels.append(rewmodel)\n\n # fit terminal model\n #Y = terminals[t, episodes]\n #termmodel = LogisticRegression().fit(X, Y)\n #self.termmodels.append(termmodel)\n\n def predict(self, preobs, actions, timesteps):\n postobs = []\n rewards = []\n terminals = []\n for preob, action, timestep in zip(preobs, actions, timesteps):\n # predict next observation\n X = np.concatenate((preob, action), axis=0)\n obsmodel = self.obsmodels[timestep]\n obsmeans = obsmodel.predict(X)[0]\n obscov = self.obscovs[timestep]\n postob = np.random.multivariate_normal(obsmeans, obscov)\n postobs.append(postob)\n\n # predict reward\n rewmodel = self.rewmodels[timestep]\n reward = rewmodel.predict(X)[0]\n rewards.append(reward)\n\n # predict terminal\n #termmodel = self.termmodels[timestep]\n #terminal = termmodel.predict(X)[0]\n #terminals.append(terminal)\n terminals.append(False)\n\n return np.stack(postobs), np.stack(rewards), np.stack(terminals)\n\n def supported_timesteps(self):\n return len(self.obsmodels)\n\nif __name__ == \"__main__\":\n obs_shape = (3,)\n act_shape = (2,)\n buf = TimeBuffer(10, 4, obs_shape, act_shape)\n for i in xrange(7):\n buf.add(i*np.ones(obs_shape), np.ones(act_shape), 1.0, (i+1)*np.ones(obs_shape), False)\n assert buf.timestep == 7\n buf.add(7*np.ones(obs_shape), np.ones(act_shape), 1.0, 8*np.ones(obs_shape), True)\n assert buf.num_episodes == 1\n assert buf.timestep == 0\n assert buf.lengths[0] == 8\n\n for i in xrange(5):\n buf.add((i+1)*np.ones(obs_shape), np.ones(act_shape), 1.0, (i+2)*np.ones(obs_shape), False)\n assert buf.timestep == 5\n buf.add(6*np.ones(obs_shape), np.ones(act_shape), 1.0, 7*np.ones(obs_shape), True)\n assert buf.num_episodes == 2\n assert buf.timestep == 0\n assert buf.lengths[1] == 6\n\n preobs, timesteps = buf.sample(3)\n assert len(preobs) == 3\n assert len(timesteps) == 3\n assert preobs[0].shape == obs_shape\n\n mdl = IRModel(10)\n mdl.fit(buf.preobs, buf.actions, buf.rewards, buf.postobs, buf.terminals, buf.lengths)\n assert len(mdl.obsmodels) == 6\n\n timesteps = [0,1,2]\n postobs, rewards, terminals = mdl.predict(preobs, np.ones((3,2)), timesteps)\n assert np.all(postobs - (preobs + 1) < 0.00001)\n" ]
[ [ "numpy.random.choice", "numpy.random.multivariate_normal", "numpy.stack", "numpy.ones", "numpy.all", "numpy.concatenate", "numpy.cov", "sklearn.linear_model.LinearRegression", "numpy.array", "numpy.zeros", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Granero0011/lambdata
[ "c31afa997f825997ecc70a57f46805122a42fd5e" ]
[ "lambdata_granero0011/__init__.py" ]
[ "#!/usr/bin/env python\n\"\"\"\n\nlambdata- a collection of Data Science helper functions\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom . import example_module\n\nY= example_module.increment(example_module.x)\nTEST = pd.DataFrame(np.ones(10))\n\n" ]
[ [ "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sallysyw/ClassyVision
[ "b6202d6323431203997039a6768762811cb7215f" ]
[ "classy_vision/models/classy_model.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport copy\nimport types\nfrom enum import Enum\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom classy_vision.heads.classy_head import ClassyHead\n\nfrom .classy_block import ClassyBlock\n\n\nclass ClassyModelEvaluationMode(Enum):\n DEFAULT = 0\n VIDEO_CLIP_AVERAGING = 1\n\n\nclass _ClassyModelMeta(type):\n \"\"\"Metaclass to return a ClassyModel instance wrapped by a ClassyModelWrapper.\"\"\"\n\n def __call__(cls, *args, **kwargs):\n \"\"\"Override the __call__ function for the metaclass.\n\n This is called when a new instance of a class with this class as its metaclass\n is initialized. For example -\n\n .. code-block:: python\n class MyClass(metaclass=_ClassyModelMeta):\n wrapper_cls = MyWrapper\n\n my_class_instance = MyClass() # returned instance will be a MyWrapper\n \"\"\"\n classy_model = super().__call__(*args, **kwargs)\n\n wrapper_cls = cls.wrapper_cls\n if wrapper_cls is not None:\n # wrap the ClassyModel instance with a wrapper class and return that instead\n classy_model = wrapper_cls(classy_model)\n return classy_model\n\n\nclass _ClassyModelMethod:\n \"\"\"Class to override ClassyModel method calls to ensure the wrapper is returned.\n\n This helps override calls like model.cuda() which return self, to return the\n wrapper instead of the underlying classy_model.\n \"\"\"\n\n def __init__(self, wrapper, classy_method):\n self.wrapper = wrapper\n self.classy_method = classy_method\n\n def __call__(self, *args, **kwargs):\n ret_val = self.classy_method(*args, **kwargs)\n if ret_val is self.wrapper.classy_model:\n # if the method is returning the classy_model, return the wrapper instead\n ret_val = self.wrapper\n return ret_val\n\n\nclass ClassyModelWrapper:\n \"\"\"Base ClassyModel wrapper class.\n\n This class acts as a thin pass through wrapper which lets users modify the behavior\n of ClassyModels, such as changing the return output of the forward() call.\n This wrapper acts as a ClassyModel by itself and the underlying model can be\n accessed by the `classy_model` attribute.\n \"\"\"\n\n # TODO: Make this torchscriptable by inheriting from nn.Module / ClassyModel\n\n def __init__(self, classy_model):\n self.classy_model = classy_model\n\n def __getattr__(self, name):\n if name != \"classy_model\" and hasattr(self, \"classy_model\"):\n attr = getattr(self.classy_model, name)\n if isinstance(attr, types.MethodType):\n attr = _ClassyModelMethod(self, attr)\n return attr\n else:\n return super().__getattr__(name)\n\n def __setattr__(self, name, value):\n # __setattr__ works differently from __getattr__ and is called even when the\n # attribute is a method, like forward.\n if name not in [\"classy_model\", \"forward\"] and hasattr(self, \"classy_model\"):\n setattr(self.classy_model, name, value)\n else:\n super().__setattr__(name, value)\n\n def forward(self, *args, **kwargs):\n return self.classy_model(*args, **kwargs)\n\n def __call__(self, *args, **kwargs):\n return self.forward(*args, **kwargs)\n\n def __repr__(self):\n return f\"Classy {type(self.classy_model)}:\\n{self.classy_model.__repr__()}\"\n\n @property\n def __class__(self):\n return self.classy_model.__class__\n\n\nclass ClassyModelHeadExecutorWrapper(ClassyModelWrapper):\n \"\"\"Wrapper which changes the forward to also execute and return head output.\"\"\"\n\n def forward(self, *args, **kwargs):\n out = self.classy_model(*args, **kwargs)\n\n if len(self._heads) == 0:\n return out\n\n # heads have been attached to the model, return their output instead\n head_outputs = self.execute_heads()\n if len(head_outputs) == 1:\n return list(head_outputs.values())[0]\n else:\n return head_outputs\n\n\nclass ClassyModel(nn.Module, metaclass=_ClassyModelMeta):\n \"\"\"Base class for models in classy vision.\n\n A model refers either to a specific architecture (e.g. ResNet50) or a\n family of architectures (e.g. ResNet). Models can take arguments in the\n constructor in order to configure different behavior (e.g.\n hyperparameters). Classy Models must implement :func:`from_config` in\n order to allow instantiation from a configuration file. Like regular\n PyTorch models, Classy Models must also implement :func:`forward`, where\n the bulk of the inference logic lives.\n\n Classy Models also have some advanced functionality for production\n fine-tuning systems. For example, we allow users to train a trunk\n model and then attach heads to the model via the attachable\n blocks. Making your model support the trunk-heads paradigm is\n completely optional.\n\n NOTE: Advanced users can modify the behavior of their implemented models by\n specifying the `wrapper_cls` class attribute, which should be a class\n derived from :class:`ClassyModelWrapper` (see the documentation for that class\n for more information). Users can set it to `None` to skip wrapping their model\n and to make their model torchscriptable. This is set to\n :class:`ClassyModelHeadExecutorWrapper` by default.\n \"\"\"\n\n wrapper_cls = ClassyModelHeadExecutorWrapper\n\n _attachable_block_names: List[str]\n\n def __init__(self):\n \"\"\"Constructor for ClassyModel.\"\"\"\n super().__init__()\n self._attachable_blocks = {}\n self._attachable_block_names = []\n self._heads = nn.ModuleDict()\n self._head_outputs = {}\n\n @classmethod\n def from_config(cls, config: Dict[str, Any]) -> \"ClassyModel\":\n \"\"\"Instantiates a ClassyModel from a configuration.\n\n Args:\n config: A configuration for the ClassyModel.\n\n Returns:\n A ClassyModel instance.\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def from_model(\n cls,\n model: nn.Module,\n input_shape: Optional[Tuple] = None,\n output_shape: Optional[Tuple] = None,\n model_depth: Optional[int] = None,\n ):\n \"\"\"Converts an :class:`nn.Module` to a `ClassyModel`.\n\n Args:\n model: The model to convert\n For the remaining args, look at the corresponding properties of ClassyModel\n\n Returns:\n A ClassyModel instance.\n \"\"\"\n return _ClassyModelAdapter(\n model,\n input_shape=input_shape,\n output_shape=output_shape,\n model_depth=model_depth,\n )\n\n @classmethod\n def from_checkpoint(cls, checkpoint):\n from . import build_model\n\n model = build_model(checkpoint[\"input_args\"][\"config\"][\"model\"])\n model.set_classy_state(checkpoint[\"classy_state_dict\"][\"base_model\"])\n return model\n\n def get_classy_state(self, deep_copy=False):\n \"\"\"Get the state of the ClassyModel.\n\n The returned state is used for checkpointing.\n\n NOTE: For advanced users, the structure of the returned dict is -\n `{\"model\": {\"trunk\": trunk_state, \"heads\": heads_state}}`.\n The trunk state is the state of the model when no heads are attached.\n\n Args:\n deep_copy: If True, creates a deep copy of the state Dict. Otherwise, the\n returned Dict's state will be tied to the object's.\n\n Returns:\n A state dictionary containing the state of the model.\n \"\"\"\n attached_heads = self.get_heads()\n # clear heads to get the state of the model without any heads, which we refer to\n # as the trunk state. If the model doesn't have heads attached, all of the\n # model's state lives in the trunk.\n self.clear_heads()\n trunk_state_dict = self.state_dict()\n self.set_heads(attached_heads)\n\n head_state_dict = {}\n for block, heads in attached_heads.items():\n head_state_dict[block] = {\n head.unique_id: head.state_dict() for head in heads\n }\n model_state_dict = {\n \"model\": {\"trunk\": trunk_state_dict, \"heads\": head_state_dict}\n }\n if deep_copy:\n model_state_dict = copy.deepcopy(model_state_dict)\n return model_state_dict\n\n def load_head_states(self, state):\n \"\"\"Load only the state (weights) of the heads.\n\n For a trunk-heads model, this function allows the user to\n only update the head state of the model. Useful for attaching\n fine-tuned heads to a pre-trained trunk.\n\n Args:\n state (Dict): Contains the classy model state under key \"model\"\n\n \"\"\"\n for block_name, head_states in state[\"model\"][\"heads\"].items():\n for head_name, head_state in head_states.items():\n self._heads[block_name][head_name].load_state_dict(head_state)\n\n def set_classy_state(self, state):\n \"\"\"Set the state of the ClassyModel.\n\n Args:\n state_dict: The state dictionary. Must be the output of a call to\n :func:`get_classy_state`.\n\n This is used to load the state of the model from a checkpoint.\n \"\"\"\n # load the state for heads\n self.load_head_states(state)\n\n # clear the heads to set the trunk's state. This is done because when heads are\n # attached to modules, we wrap them by ClassyBlocks, thereby changing the\n # structure of the model and its state dict. So, the trunk state is always\n # fetched / set when there are no blocks attached.\n attached_heads = self.get_heads()\n self.clear_heads()\n self.load_state_dict(state[\"model\"][\"trunk\"])\n\n # set the heads back again\n self.set_heads(attached_heads)\n\n def forward(self, x):\n \"\"\"\n Perform computation of blocks in the order define in get_blocks.\n \"\"\"\n raise NotImplementedError\n\n def extract_features(self, x):\n \"\"\"\n Extract features from the model.\n\n Derived classes can implement this method to extract the features before\n applying the final fully connected layer.\n \"\"\"\n return self.forward(x)\n\n def _build_attachable_block(self, name, module):\n \"\"\"\n Add a wrapper to the module to allow to attach heads to the module.\n \"\"\"\n if name in self._attachable_blocks:\n raise ValueError(\"Found duplicated block name {}\".format(name))\n block = ClassyBlock(name, module)\n self._attachable_blocks[name] = block\n self._attachable_block_names.append(name)\n return block\n\n @property\n def attachable_block_names(self):\n \"\"\"\n Return names of all attachable blocks.\n \"\"\"\n return self._attachable_block_names\n\n def clear_heads(self):\n # clear all existing heads\n self._heads.clear()\n self._head_outputs.clear()\n self._strip_classy_blocks(self)\n self._attachable_blocks = {}\n self._attachable_block_names = []\n\n def _strip_classy_blocks(self, module):\n for name, child_module in module.named_children():\n if isinstance(child_module, ClassyBlock):\n module.add_module(name, child_module.wrapped_module())\n self._strip_classy_blocks(child_module)\n\n def _make_module_attachable(self, module, module_name):\n found = False\n for name, child_module in module.named_children():\n if name == module_name:\n module.add_module(\n name, self._build_attachable_block(name, child_module)\n )\n found = True\n # do not exit - we will check all possible modules and raise an\n # exception if there are duplicates\n found_in_child = self._make_module_attachable(child_module, module_name)\n found = found or found_in_child\n return found\n\n def set_heads(self, heads: Dict[str, List[ClassyHead]]):\n \"\"\"Attach all the heads to corresponding blocks.\n\n A head is expected to be a ClassyHead object. For more\n details, see :class:`classy_vision.heads.ClassyHead`.\n\n Args:\n heads (Dict): a mapping between attachable block name\n and a list of heads attached to that block. For\n example, if you have two different teams that want to\n attach two different heads for downstream classifiers to\n the 15th block, then they would use:\n\n .. code-block:: python\n\n heads = {\"block15\":\n [classifier_head1, classifier_head2]\n }\n \"\"\"\n self.clear_heads()\n\n head_ids = set()\n for block_name, block_heads in heads.items():\n if not self._make_module_attachable(self, block_name):\n raise KeyError(f\"{block_name} not found in the model\")\n for head in block_heads:\n if head.unique_id in head_ids:\n raise ValueError(\"head id {} already exists\".format(head.unique_id))\n head_ids.add(head.unique_id)\n self._heads[block_name] = nn.ModuleDict(\n {head.unique_id: head for head in block_heads}\n )\n\n def get_heads(self):\n \"\"\"Returns the heads on the model\n\n Function returns the heads a dictionary of block names to\n `nn.Modules <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_\n attached to that block.\n\n \"\"\"\n return {\n block_name: list(heads.values())\n for block_name, heads in self._heads.items()\n }\n\n @property\n def head_outputs(self):\n \"\"\"Return outputs of all heads in the format of Dict[head_id, output]\n\n Head outputs are cached during a forward pass.\n \"\"\"\n return self._head_outputs.copy()\n\n def get_block_outputs(self) -> Dict[str, torch.Tensor]:\n outputs = {}\n for name, block in self._attachable_blocks.items():\n outputs[name] = block.output\n return outputs\n\n def execute_heads(self) -> Dict[str, torch.Tensor]:\n block_outs = self.get_block_outputs()\n outputs = {}\n for block_name, heads in self._heads.items():\n for head in heads.values():\n outputs[head.unique_id] = head(block_outs[block_name])\n self._head_outputs = outputs\n return outputs\n\n def get_optimizer_params(self, bn_weight_decay=False):\n \"\"\"Returns param groups for optimizer.\n\n Function to return dict of params with \"keys\" from\n {\"regularized_params\", \"unregularized_params\"}\n to \"values\" a list of `pytorch Params <https://pytorch.org/docs/\n stable/nn.html#torch.nn.Parameter>`_.\n\n \"weight_decay\" provided as part of optimizer is only used\n for \"regularized_params\". For \"unregularized_params\", weight_decay is set\n to 0.0\n\n This implementation sets `BatchNorm's <https://pytorch.org/docs/\n stable/nn.html#normalization-layers>`_ all trainable params to be\n unregularized_params if ``bn_weight_decay`` is False.\n\n Override this function for any custom behavior.\n\n Args:\n bn_weight_decay (bool): Apply weight decay to bn params if true\n \"\"\"\n unregularized_params = []\n regularized_params = []\n for module in self.modules():\n # If module has children (i.e. internal node of constructed DAG) then\n # only add direct parameters() to the list of params, else go over\n # children node to find if they are BatchNorm or have \"bias\".\n if list(module.children()) != []:\n for params in module.parameters(recurse=False):\n if params.requires_grad:\n regularized_params.append(params)\n elif not bn_weight_decay and isinstance(\n module, nn.modules.batchnorm._BatchNorm\n ):\n for params in module.parameters():\n if params.requires_grad:\n unregularized_params.append(params)\n else:\n for params in module.parameters():\n if params.requires_grad:\n regularized_params.append(params)\n return {\n \"regularized_params\": regularized_params,\n \"unregularized_params\": unregularized_params,\n }\n\n @property\n def input_shape(self):\n \"\"\"If implemented, returns expected input tensor shape\n \"\"\"\n raise NotImplementedError\n\n @property\n def output_shape(self):\n \"\"\"If implemented, returns expected output tensor shape\n \"\"\"\n raise NotImplementedError\n\n @property\n def model_depth(self):\n \"\"\"If implemented, returns number of layers in model\n \"\"\"\n raise NotImplementedError\n\n @property\n def evaluation_mode(self):\n \"\"\"Used by video models for averaging over contiguous clips.\n \"\"\"\n # TODO: Remove this once we have a video task, this logic should\n # live in a video specific task\n return ClassyModelEvaluationMode.DEFAULT\n\n\nclass _ClassyModelAdapter(ClassyModel):\n \"\"\"\n Class which adapts an `nn.Module <https://pytorch.org/docs/stable/\n nn.html#torch.nn.Module>`_ to a ClassyModel by wrapping the model.\n\n The only required argument is the model, the additional args are needed\n to get some additional capabilities from Classy Vision to work.\n \"\"\"\n\n def __init__(\n self,\n model: nn.Module,\n input_shape: Optional[Tuple] = None,\n output_shape: Optional[Tuple] = None,\n model_depth: Optional[int] = None,\n ):\n super().__init__()\n self.model = model\n self._input_shape = input_shape\n self._output_shape = output_shape\n self._model_depth = model_depth\n\n def forward(self, x):\n return self.model(x)\n\n def extract_features(self, x):\n if hasattr(self.model, \"extract_features\"):\n return self.model.extract_features(x)\n return super().extract_features(x)\n\n @property\n def input_shape(self):\n if self._input_shape is not None:\n return self._input_shape\n return super().input_shape\n\n @property\n def output_shape(self):\n if self._output_shape is not None:\n return self._output_shape\n return super().output_shape\n\n @property\n def model_depth(self):\n if self._model_depth is not None:\n return self._model_depth\n return super().model_depth\n" ]
[ [ "torch.nn.ModuleDict" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
awacha/credolib
[ "11c0be3eea7257d3d6e13697d3e76ce538f2f1b2" ]
[ "credolib/plotting.py" ]
[ "__all__=['plotsascurve','guinierplot','kratkyplot']\nfrom .io import getsascurve\nimport matplotlib.pyplot as plt\nfrom sastool.libconfig import qunit, dunit\n\ndef plotsascurve(samplename, *args, **kwargs):\n if 'dist' not in kwargs:\n kwargs['dist'] = None\n data1d, dist = getsascurve(samplename, kwargs['dist'])\n del kwargs['dist']\n if 'factor' in kwargs:\n factor=kwargs['factor']\n del kwargs['factor']\n else:\n factor=1\n if 'label' not in kwargs:\n if isinstance(dist, str):\n kwargs['label'] = samplename + ' ' + dist\n else:\n kwargs['label'] = samplename + ' %g mm' % dist\n if 'errorbar' in kwargs:\n errorbars = bool(kwargs['errorbar'])\n del kwargs['errorbar']\n else:\n errorbars = False\n if errorbars:\n ret = (data1d*factor).errorbar(*args, **kwargs)\n plt.xscale('log')\n plt.yscale('log')\n else:\n ret = (data1d*factor).loglog(*args, **kwargs)\n plt.xlabel('q (' + qunit() + ')')\n plt.ylabel('$d\\\\Sigma/d\\\\Omega$ (cm$^{-1}$ sr$^{-1}$)')\n plt.legend(loc='best')\n plt.grid(True, which='both')\n plt.axis('tight')\n return ret\n\n\ndef guinierplot(*args, **kwargs):\n \"\"\"Make a Guinier plot. This is simply a wrapper around plotsascurve().\"\"\"\n ret=plotsascurve(*args, **kwargs)\n plt.xscale('power',exponent=2)\n plt.yscale('log')\n return ret\n\n\ndef kratkyplot(samplename, *args, **kwargs):\n if 'dist' not in kwargs:\n kwargs['dist'] = None\n data1d, dist = getsascurve(samplename, kwargs['dist'])\n del kwargs['dist']\n if 'factor' in kwargs:\n factor=kwargs['factor']\n del kwargs['factor']\n else:\n factor=1\n if 'label' not in kwargs:\n if isinstance(dist, str):\n kwargs['label'] = samplename + ' ' + dist\n else:\n kwargs['label'] = samplename + ' %g mm' % dist\n if 'errorbar' in kwargs:\n errorbars = bool(kwargs['errorbar'])\n del kwargs['errorbar']\n else:\n errorbars = False\n data1dscaled=data1d*factor\n if errorbars:\n if hasattr(data1dscaled, 'dx'):\n dx=data1dscaled.qError\n dy=(data1dscaled.Error ** 2 * data1dscaled.q ** 4 +\n data1dscaled.Intensity ** 2 * data1dscaled.qError ** 2\n * data1dscaled.q ** 2 * 4) ** 0.5\n else:\n dx=None\n dy=data1dscaled.Error\n ret = plt.errorbar(data1dscaled.q,\n data1dscaled.q ** 2 * data1dscaled.Intensity,\n dy, dx, *args, **kwargs)\n else:\n ret = plt.plot(data1dscaled.q,\n data1dscaled.Intensity * data1dscaled.q ** 2,\n *args, **kwargs)\n plt.xlabel('q (' + dunit() + ')')\n plt.ylabel('$q^2 d\\\\Sigma/d\\\\Omega$ (' +\n dunit() +\n '$^{-2}$ cm$^{-1}$ sr$^{-1}$)')\n plt.legend(loc='best')\n plt.grid(True, which='both')\n plt.axis('tight')\n return ret\n\ndef porodplot(samplename, *args, **kwargs):\n if 'dist' not in kwargs:\n kwargs['dist'] = None\n data1d, dist = getsascurve(samplename, kwargs['dist'])\n del kwargs['dist']\n if 'factor' in kwargs:\n factor=kwargs['factor']\n del kwargs['factor']\n else:\n factor=1\n if 'label' not in kwargs:\n if isinstance(dist, str):\n kwargs['label'] = samplename + ' ' + dist\n else:\n kwargs['label'] = samplename + ' %g mm' % dist\n if 'errorbar' in kwargs:\n errorbars = bool(kwargs['errorbar'])\n del kwargs['errorbar']\n else:\n errorbars = False\n data1dscaled=data1d*factor\n if errorbars:\n if hasattr(data1dscaled, 'dx'):\n dx=data1dscaled.qError\n dy=(data1dscaled.Error ** 2 * data1dscaled.q ** 8 +\n data1dscaled.Intensity ** 2 * data1dscaled.qError ** 2\n * data1dscaled.q ** 6 * 14) ** 0.5\n else:\n dx=None\n dy=data1dscaled.Error\n ret = plt.errorbar(data1dscaled.q,\n data1dscaled.q ** 4 * data1dscaled.Intensity,\n dy, dx, *args, **kwargs)\n else:\n ret = plt.plot(data1dscaled.q,\n data1dscaled.Intensity * data1dscaled.q ** 2,\n *args, **kwargs)\n plt.xlabel('q (' + dunit() + ')')\n plt.ylabel('$q^4 d\\\\Sigma/d\\\\Omega$ (' +\n dunit() +\n '$^{-4}$ cm$^{-1}$ sr$^{-1}$)')\n plt.legend(loc='best')\n plt.xscale('power',exponent=4)\n plt.yscale('linear')\n plt.grid(True, which='both')\n plt.axis('tight')\n return ret\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.yscale", "matplotlib.pyplot.plot", "matplotlib.pyplot.errorbar", "matplotlib.pyplot.grid", "matplotlib.pyplot.axis", "matplotlib.pyplot.xscale", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ankitaduttagupta/ga-learner-dsmp-repo
[ "8e1bed56ba8c86964857fa52396c8ecd60976888" ]
[ "NLP/code.py" ]
[ "# --------------\n# import packages\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\nimport re\nfrom nltk.corpus import stopwords\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score ,confusion_matrix\n\n\n# Code starts here\nnews=pd.read_csv(path)\n# load data\nnews=news[['TITLE','CATEGORY']]\ndist=news.CATEGORY.value_counts()\nprint(dist)\nprint(news.head())\n# subset data\n\n# distribution of classes\n\n\n# display class distribution\n\n\n# display data\n\n\n# Code ends here\n\n\n# --------------\n# Code starts here\n\n# stopwords \nstop=set(stopwords.words('english'))\n# retain only alphabets\nnews['TITLE']=news['TITLE'].apply(lambda x:re.sub(\"[^a-zA-Z]\", \" \",x),)\n\n# convert to lowercase and tokenize\nnews['TITLE']=news['TITLE'].apply(lambda x:x.lower().split(),)\n\n# remove stopwords\nnews['TITLE']=news['TITLE'].apply(lambda x:[i for i in x if i not in stop],)\n\n# join list elements\nnews['TITLE']=news['TITLE'].apply(lambda x: ' '.join(x),)\nprint(news['TITLE'])\n# split into training and test sets\nX_train,X_test,Y_train,Y_test=train_test_split(news[\"TITLE\"], news[\"CATEGORY\"], test_size = 0.2,random_state=3)\n# Code ends here\n\n\n# --------------\n# Code starts here\n\n# initialize count vectorizer\ncount_vectorizer=CountVectorizer()\ntfidf_vectorizer=TfidfVectorizer(ngram_range=(1,3))\n\n# initialize tfidf vectorizer\nX_train_count=count_vectorizer.fit_transform(X_train)\nX_test_count=count_vectorizer.transform(X_test)\n# fit and transform with count vectorizer\nX_train_tfidf=tfidf_vectorizer.fit_transform(X_train)\nX_test_tfidf=tfidf_vectorizer.transform(X_test)\nprint(X_train_count)\nprint('=======')\nprint(X_test_count)\nprint('=======')\n\nprint(X_train_tfidf)\nprint('=======')\n\nprint(X_test_tfidf)\n\n# fit and transform with tfidf vectorizer\n\n# Code ends here\n\n\n# --------------\n# Code starts here\n\n# initialize multinomial naive bayes\nnb_1=MultinomialNB()\nnb_2=MultinomialNB()\n\n\n# fit on count vectorizer training data\nnb_1.fit(X_train_count,Y_train)\n# fit on tfidf vectorizer training data\nnb_2.fit(X_train_tfidf,Y_train)\n\n# accuracy with count vectorizer\nacc_count_nb=accuracy_score(nb_1.predict(X_test_count), Y_test)\n\n# accuracy with tfidf vectorizer\nacc_tfidf_nb=accuracy_score(nb_2.predict(X_test_tfidf), Y_test)\n\n# display accuracies\nprint(acc_count_nb,acc_tfidf_nb)\n\n# Code ends here\n\n\n# --------------\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# initialize logistic regression\nlogreg_1 =OneVsRestClassifier(LogisticRegression(random_state=10))\nlogreg_2 =OneVsRestClassifier(LogisticRegression(random_state=10))\n\n# fit on count vectorizer training data\nlogreg_1.fit(X_train_count ,Y_train)\nlogreg_2.fit(X_train_tfidf ,Y_train)\n# fit on tfidf vectorizer training data\n\n\n# accuracy with count vectorizer\nacc_count_logreg=accuracy_score(logreg_1.predict(X_test_count), Y_test)\n# accuracy with tfidf vectorizer\nacc_tfidf_logreg=accuracy_score(logreg_2.predict(X_test_tfidf), Y_test)\n\n# display accuracies\n\nprint(acc_count_logreg,acc_tfidf_logreg)\n# Code ends here\n\n\n" ]
[ [ "pandas.read_csv", "sklearn.linear_model.LogisticRegression", "sklearn.naive_bayes.MultinomialNB", "sklearn.model_selection.train_test_split", "sklearn.feature_extraction.text.CountVectorizer", "sklearn.feature_extraction.text.TfidfVectorizer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
vivekbarsagadey/medicom-services
[ "319321aff3f38e68765291cf5e0b84252a3414b5" ]
[ "src/com/medicom/health/diabetes/domain/user.py" ]
[ "import json\n\nimport pandas as pd\n\nfrom com.medicom.health.diabetes.services.db_handler import DBHandler\n\n\nclass User:\n def __init__(self ,org={}):\n self.firstName = org['firstName']\n self.lastName = org['lastName']\n self.pregnancy = org['pregnancy']\n self.glucose = org['glucose']\n self.bloodpressure = org['bloodpressure']\n self.skinThickness = org['skinThickness']\n self.insulin = org['insulin']\n self.bmi = org['bmi']\n self.diabetesPedigreeFunction = org['diabetesPedigreeFunction']\n self.age = org['age']\n self.outcome=0\n\n def __str__(self):\n return str(self.__dict__)\n\n def save(self):\n user_String = json.dumps(self.__dict__)\n print(\"User json is \")\n print(user_String)\n DBHandler().getUserDataSource().insert_one(json.loads(user_String))\n\n\n\n def getFrame(self ):\n\n return pd.DataFrame(\n {'Pregnancies': self.pregnancy, 'Glucose': self.glucose, 'BloodPressure': self.bloodpressure, 'SkinThickness': self.skinThickness,\n 'Insulin': self.insulin, 'BMI': self.bmi,\n 'DiabetesPedigreeFunction': self.diabetesPedigreeFunction, 'Age': self.age}, index=[0])\n\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
mustafabozkaya/Advanced-Deep-Learning-with-Keras
[ "81b9da2fb7d18e9bf08a0716fa1e7627045002e8" ]
[ "chapter1-keras-quick-tour/plot-linear-1.1.1.py" ]
[ "'''Utility for plotting a linear function\nwith and without noise\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nwant_noise = True\n# grayscale plot, comment if color is wanted\nplt.figure(figureSize=(18,9))\nplt.style.use('grayscale')\n\n# generate data bet -1,1 interval of 0.2\nx = np.arange(-1,1,0.2)\ny = 2*x + 3\nplt.xlabel('x')\nplt.ylabel('y=f(x)')\nplt.plot(x, y, 'o-', label=\"y\")\n\nif want_noise:\n # generate data with uniform distribution\n noise = np.random.uniform(-0.2, 0.2, x.shape)\n xn = x + noise\n\n plt.ylabel('y=f(x)')\n plt.plot(xn, y, 's-', label=\"y with noised x\")\n\nplt.legend(loc=0)\nplt.grid(b=True)\nplt.savefig(\"linear_regression.png\")\nplt.show()\nplt.close('all')\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.arange", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "numpy.random.uniform", "matplotlib.pyplot.grid", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.style.use", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Vrroom/pacPlanning
[ "cd149af0332c626521dabe95c3d2cfc706737272" ]
[ "LUCB.py" ]
[ "from constants import *\nimport math\nimport numpy as np\nimport sys\nimport time\nfrom util import bestTwoActions\n\nverbose = 0\n\ndef LUCBStopping(mdp, start_state=0, epsilon=4, delta=0.1, fileprint=1):\n\tglobal MAX_ITERATION_LIMIT, c\n\titeration = 0\n\tit=0\n\tinitial_iterations = 1*mdp.numStates*mdp.numActions\n\trewards_s_a_sprime = np.zeros((mdp.numStates,mdp.numActions,mdp.numStates))\n\tsampled_frequency_s_a = np.zeros((mdp.numStates,mdp.numActions))\n\tN_s_a_sprime = np.zeros((mdp.numStates,mdp.numActions,mdp.numStates))\n\tP = np.zeros((mdp.numStates,mdp.numActions,mdp.numStates))\n\tVlower = np.zeros((mdp.numStates))\n\tVstar = (mdp.Vmax/2)*np.ones((mdp.numStates))\n\tVupper = mdp.Vmax*np.ones((mdp.numStates))\n\tQlower = np.zeros((mdp.numStates,mdp.numActions))\n\tQstar = (mdp.Vmax/2)*np.ones((mdp.numStates,mdp.numActions))\n\tQupper = mdp.Vmax*np.ones((mdp.numStates,mdp.numActions))\n\tfinal_policy = (-1)*np.ones((mdp.numStates), dtype=np.int)\n\tstates_to_sample = range(mdp.numStates)\n\tcolliding_values = np.zeros((mdp.numStates))\n\tconverge_iterations = 10000\n\tepsilon_convergence = 1e-4\n\tis_converged = 0\n\tprint(\"Vmax\", mdp.Vmax)\n\n\t### Initial sampling for all state action pairs\n\twhile it < initial_iterations:\n\t\tfor state in range(mdp.numStates):\n\t\t\tfor act in range(mdp.numActions):\n\t\t\t\tit+=1\n\t\t\t\ts_prime, r = mdp.simulate(state, act)\n\t\t\t\trewards_s_a_sprime[state][act][s_prime] += r\n\t\t\t\tsampled_frequency_s_a[state][act] += 1\n\t\t\t\tN_s_a_sprime[state][act][s_prime] += 1\n\n\t### Calculating V, Q estimates thus far\n\tfor internal in range(converge_iterations):\n\t\toldQlower = np.copy(Qlower[start_state])\n\t\tfor state in range(mdp.numStates):\n\t\t\tfor act in range(mdp.numActions):\n\t\t\t\t# Calculations for Qupper and Qlower\n\t\t\t\tfirstterm = np.sum(rewards_s_a_sprime[state][act])/sampled_frequency_s_a[state][act]\n\t\t\t\tsecondterm = mdp.discountFactor*np.sum(Vupper*(N_s_a_sprime[state][act]/sampled_frequency_s_a[state][act]))\n\t\t\t\t#secondterm = mdp.discountFactor*sum(Vupper[ss]*N_s_a_sprime[state][act][ss]/sampled_frequency_s_a[state][act] for ss in range(mdp.numStates)) \n\t\t\t\tlower_secondterm = mdp.discountFactor*np.sum(Vlower*(N_s_a_sprime[state][act]/sampled_frequency_s_a[state][act]))\n\t\t\t\tstar_secondterm = mdp.discountFactor*np.sum(Vstar*(N_s_a_sprime[state][act]/sampled_frequency_s_a[state][act]))\n\t\t\t\t#lower_secondterm = mdp.discountFactor*sum(Vlower[ss]*N_s_a_sprime[state][act][ss]/sampled_frequency_s_a[state][act] for ss in range(mdp.numStates)) \n\t\t\t\tthirdterm = mdp.Vmax*math.sqrt((math.log(c*mdp.numStates*mdp.numActions)-math.log(delta))/sampled_frequency_s_a[state][act])\n\t\t\t\t#Qupper[state][act] = (float)(sum(rewards_s_a_sprime[state][act][ss] for ss in range(mdp.numStates))/sampled_frequency_s_a[state][act]) + secondterm + thirdterm\n\t\t\t\tQupper[state][act] = firstterm + secondterm + thirdterm\n\t\t\t\tQlower[state][act] = firstterm + lower_secondterm - thirdterm\n\t\t\t\tQstar[state][act] = firstterm + star_secondterm\n\t\t\t\t# Calculation for Vstar\n\t\t\t\t# t = (float)N_s_a_sprime[state][act][stateprime]/sampled_frequency_s_a[state][act]\n\t\t\t\t# val = t*(rewards_s_a[state][act][stateprime]+mdp.discountFactor*Vstar[stateprime])\n\n\t\t\t\t# if(state==start_state and abs(Vupper[state]-Quppermax)<epsilon_convergence):\n\t\t\t\t# \tVupper[state] = Quppermax\n\t\t\t\t# \tprint \"Stopping with \", internal, \"initial internal iterations\"\n\t\t\t\t# \tis_converged = 1\n\t\t\t\t# \tbreak\n\t\t\tVupper[state] = np.amax(Qupper[state])\n\t\t\tVlower[state] = np.amax(Qlower[state])\n\t\t\tVstar[state] = np.amax(Qstar[state])\n\t\tif(np.linalg.norm(oldQlower-Qlower[start_state])<=epsilon_convergence):\n\t\t\tprint(\"Stopping with \", internal, \"initial internal iterations\")\n\t\t\tbreak\n\n\tif internal == converge_iterations:\n\t\t\tprint(\"Used all iterations\")\n\t\n\tprint(\"Initial estimate of Qupper found! Now sampling\")\n\n\tif(verbose==0):\n\t\toutp = open(mdp.filename+'-lucb.txt', 'wb')\n\t# sys.stdout = open(mdp.filename+'-lucb.txt', 'w+')\n\tff = open(mdp.filename+'-lucb-samples.txt', 'w+')\n\n\twhile iteration<MAX_ITERATION_LIMIT:\n\t\tmax_collision_state = [sorted(states_to_sample,key=lambda x: colliding_values[x], reverse=True)[0]]\n\t\t# print \"Sampling state \", max_collision_state[0]\n\t\t# print colliding_values\n\t\tfor state1 in max_collision_state:\n\t\t\t# print \"Sampling \", state1, \"for this round\"\n\t\t\tactionsList = bestTwoActions(mdp, state1, Qlower, Qupper, Qstar)\n\t\t\tfor act1 in actionsList:\n\t\t\t\titeration += 1\n\t\t\t\tsampled_frequency_s_a[state1][act1] += 1\n\t\t\t\t\n\t\t\t\t# Simluate the MDP with this state,action and update counts\n\t\t\t\t#### TRying 10 continuous simulations \n\t\t\t\tfor t in range(1):\n\t\t\t\t\ts_prime, r = mdp.simulate(state1, act1)\n\t\t\t\t\trewards_s_a_sprime[state1][act1][s_prime] += r\n\t\t\t\t\tN_s_a_sprime[state1][act1][s_prime] += 1\n\t\t\t\t\n\t\t\t\t# Calculations for Qupper and Qlower\n\t\t\t\t#### This involved a two for-loop and iterating convergence\n\t\t\t\tfor internal in range(converge_iterations):\n\t\t\t\t\toldQlower = np.copy(Qlower[start_state])\n\t\t\t\t\tfor state in range(mdp.numStates):\n\t\t\t\t\t\tfor act in range(mdp.numActions):\n\t\t\t\t\t\t# Calculations for Qupper and Qlower\n\t\t\t\t\t\t\t# Calculations for Qupper and Qlower\n\t\t\t\t\t\t\tfirstterm = np.sum(rewards_s_a_sprime[state][act])/sampled_frequency_s_a[state][act]\n\t\t\t\t\t\t\tsecondterm = mdp.discountFactor*np.sum(Vupper*(N_s_a_sprime[state][act]/sampled_frequency_s_a[state][act]))\n\t\t\t\t\t\t\t#secondterm = mdp.discountFactor*sum(Vupper[ss]*N_s_a_sprime[state][act][ss]/sampled_frequency_s_a[state][act] for ss in range(mdp.numStates)) \n\t\t\t\t\t\t\tlower_secondterm = mdp.discountFactor*np.sum(Vlower*(N_s_a_sprime[state][act]/sampled_frequency_s_a[state][act]))\n\t\t\t\t\t\t\t#lower_secondterm = mdp.discountFactor*sum(Vlower[ss]*N_s_a_sprime[state][act][ss]/sampled_frequency_s_a[state][act] for ss in range(mdp.numStates)) \n\t\t\t\t\t\t\tthirdterm = mdp.Vmax*math.sqrt((math.log(c*(iteration**2)*mdp.numStates*mdp.numActions)-math.log(delta))/sampled_frequency_s_a[state][act])\n\t\t\t\t\t\t\t#Qupper[state][act] = (float)(sum(rewards_s_a_sprime[state][act][ss] for ss in range(mdp.numStates))/sampled_frequency_s_a[state][act]) + secondterm + thirdterm\n\t\t\t\t\t\t\tQupper[state][act] = firstterm + secondterm + thirdterm\n\t\t\t\t\t\t\tQlower[state][act] = firstterm + lower_secondterm - thirdterm\n\n\t\t\t\t\t\t\t# Calculation for Vstar\n\t\t\t\t\t\t\t# t = (float)N_s_a_sprime[state][act][stateprime]/sampled_frequency_s_a[state][act]\n\t\t\t\t\t\t\t# val = t*(rewards_s_a[state][act][stateprime]+mdp.discountFactor*Vstar[stateprime])\n\t\t\t\t\t\tVupper[state] = np.amax(Qupper[state])\n\t\t\t\t\t\tVlower[state] = np.amax(Qlower[state])\n\t\t\t\t\tif(np.linalg.norm(oldQlower-Qlower[start_state])<=epsilon_convergence):\n\t\t\t\t\t\t# print \"Stopping with \", internal, \"iterations\"\n\t\t\t\t\t\tbreak\n\n\t\tcount = 0\n\t\tif(iteration%100==0):\n\t\t\tif (verbose==0):\n\t\t\t\toutp.write(str(iteration))\n\t\t\t\toutp.write('\\t')\n\t\t\t\toutp.write(str(Qupper[start_state][acList[1]]-Qlower[start_state][acList[0]]))#-epsilon*(1-mdp.discountFactor)/2 \n\t\t\t\toutp.write('\\n')\n\t\t\telse:\n\t\t\t\tprint(Qupper[start_state], Qlower[start_state])\n\t\t\t\t# print d_h_policy_s[0][start_state]-2/(1-mdp.discountFactor)\n\t\t\t\t# print samples, (QupperMBAE[start_state][acList[1]]-QlowerMBAE[start_state][acList[0]])-epsilon*(1-mdp.discountFactor)/2\n\t\t\tnp.savetxt(ff, sampled_frequency_s_a, delimiter=',')\n\t\t\tff.write('\\n')\n\t\t\t# print iteration, (Qupper[start_state][acList[1]]-Qlower[start_state][acList[0]])/epsilon \n\t\t\t# print Qupper\n\t\t\t# print iteration\n\t\t\n\t\t#### Check epsilon condition for all the states\n\t\t# for st in range(mdp.numStates):\n\t\t# \tacList = bestTwoActions(mdp, st, Qstar, Qupper)\n\t\t# \t# print \"Comparing \",Qupper[st][acList[1]], Qlower[st][acList[0]]\n\t\t\t \n\t\t# \tif(Qupper[st][acList[1]]-Qlower[st][acList[0]]<=epsilon):\n\t\t# \t\t# print \"Setting action \", acList[0], \"for state \", st\n\t\t# \t\tfinal_policy[st]=acList[0]\n\t\t# \t\tcount+=1\n\n\t\t##### Updating the list of coliliding states\n\t\tstates_to_sample = []\n\t\tfor st in range(mdp.numStates):\n\t\t\tacList = bestTwoActions(mdp, st, Qlower, Qupper, Qstar)\n\t\t\t# colliding_values[st] = Qupper[st][acList[1]]-Qlower[st][acList[0]]-epsilon\n\t\t\t##### Changing stopping condition to epsilon*(1-gamma)/2\n\t\t\tcolliding_values[st] = Qupper[st][acList[1]]-Qlower[st][acList[0]]-epsilon*(1-mdp.discountFactor)/2\n\t\t\t# print colliding_values[st]\n\t\t\tif(colliding_values[st]>0):\n\t\t\t\t### this state is still colliding, add to sample states\n\t\t\t\tstates_to_sample.append(st)\n\n\t\t#### Check epsilon condition for only starting state\n\t\tif(not (start_state in states_to_sample)):\n\t\t# if(count==mdp.numStates):\n\t\t\tacList = bestTwoActions(mdp, start_state, Qlower, Qupper, Qstar)\n\t\t\tprint(\"Setting final_policy of \", start_state, \" to\", acList[0])\n\t\t\tfinal_policy[start_state] = acList[0]\n\t\t\tprint(\"Iterations taken : \", iteration)\n\t\t\tprint(\"Returning the policy :\", final_policy)\n\t\t\tfor i in range(mdp.numStates):\n\t\t\t\tif(final_policy[i]==-1):\n\t\t\t\t\tfinal_policy[i] = bestTwoActions(mdp,i,Qlower,Qupper, Qstar)[0]\n\t\t\treturn final_policy\n\n\n\tfor i in range(mdp.numStates):\n\t\tif(final_policy[i]==-1):\n\t\t\tfinal_policy[i] = bestTwoActions(mdp,i,Qlower,Qupper, Qstar)[0]\n\treturn final_policy" ]
[ [ "numpy.amax", "numpy.linalg.norm", "numpy.ones", "numpy.copy", "numpy.savetxt", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
quachconghoang/SemanticFusion
[ "68dc24c732c0ea9754038eda4d3f1c0396d22844" ]
[ "GTSAM/keyframe_factor.py" ]
[ "import os.path\nfrom os import path\nimport glob\nimport numpy as np\nimport open3d as o3d\nfrom functools import partial\nimport cv2 as cv\nimport sys, copy\nsys.path.append('../')\n\nfrom SlamUtils.transformation import pos_quats2SEs, pos_quats2SE_matrices, pose2motion, SEs2ses, line2mat, tartan2kitti\nfrom SlamUtils.visualization import getVisualizationBB, getKeyframe\nfrom SlamUtils.utils import dataset_intrinsics\nfrom SlamUtils.Loader.TartanAir import getRootDir, getDataSequences, getDataLists\n\nfrom GTSAM.keyframe import Keyframe\n\n#import gtsam\n#from gtsam import Cal3_S2, Point3, Pose3\n\nfrom Semantics.dnn_engine import DnnEngine\n\nengine = DnnEngine()\n\ndef saveView(viz):\n viz.capture_screen_image(path)\n ctr = viz.get_view_control()\n param = ctr.convert_to_pinhole_camera_parameters()\n o3d.io.write_pinhole_camera_parameters(\"NED-View.json\", param)\n print(\"Saved\")\n\n\nrootDIR = getRootDir()\npath = getDataSequences(root=rootDIR, scenario='office', level='Easy', seq_num=4)\nfiles_rgb_left, files_rgb_right, files_depth_left, poselist = getDataLists(dir=path, skip=10)\n\n############ GTSAM ###################\nfocalx, focaly, centerx, centery = dataset_intrinsics(dataset='tartanair')\n# K = Cal3_S2(focalx, focaly, 0.0, centerx, centery)\n# Add a prior on pose x1. This indirectly specifies where the origin is.\n# 0.3 rad std on roll,pitch,yaw and 0.1m on x,y,z\n# pose_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1]))\n\n\nassert (poselist.shape[1] == 7) # x-y-z qx-qy-qz-qw\nposes_mat34 = pos_quats2SEs(poselist) # [R|t - array 12]\nposes_mat34_kitty, poses_mat44_kitty = tartan2kitti(poselist)\nposes_mat44 = pos_quats2SE_matrices(poselist)\nmotions_mat = pose2motion(poses_mat34) # [R|t]\nmotions_quat = SEs2ses(motions_mat).astype(np.float32) # x-y-z qx-qy-qz-qw\n\nline_set = getVisualizationBB()\npointSet = o3d.geometry.PointCloud()\nblob3D = o3d.geometry.PointCloud()\n\nvis = o3d.visualization.VisualizerWithKeyCallback()\nvis.create_window(window_name='Trajectory', width=1024, height=768)\nvis.register_key_callback(ord(\"S\"), partial(saveView))\n\naxis_pcd = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0, origin=[0, 0, 0])\nvis.add_geometry(axis_pcd)\n\n\npose_axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.25, origin=[0, 0, 0])\nvis.add_geometry(line_set)\nvis.add_geometry(pointSet)\nvis.add_geometry(blob3D)\n\nparam = o3d.io.read_pinhole_camera_parameters(rootDIR + 'NED-View.json')\nvis.get_view_control().convert_from_pinhole_camera_parameters(param, allow_arbitrary=True)\nvis.poll_events(); vis.update_renderer()\n\nnum_keyframes = len(files_rgb_left)\nkeyframes = []\n# num_keyframes = 30\n\nfor id in range(num_keyframes):\n pose = poselist[id]\n pointSet.points.append([pose[0], pose[1], pose[2]])\n k = Keyframe(rgb=cv.imread(files_rgb_left[id]), depth=np.load(files_depth_left[id]), transform=poses_mat44[id],_processing=True)\n k.kp2D = engine.getSuperPoint(k.gray_raw)\n k.loadKeyCloud()\n blob3D += k.keyCloud\n vis.update_geometry(blob3D)\n\n camFrame = getKeyframe(transform=poses_mat44[id])\n\n vis.add_geometry(camFrame)\n vis.update_geometry(pointSet)\n vis.update_geometry(camFrame)\n vis.poll_events()\n vis.update_renderer()\n\n if id < num_keyframes - 1:\n print(motions_quat[id][0:3])\n\n # rgb = cv.imread(files_rgb_left[id])\n # depth = np.load(files_depth_left[id])\n cv.imshow('rgb', k.color_raw)\n cv.imshow('depth', k.depth_raw/10)\n k = cv.waitKey(30)\n if k == 27:\n cv.destroyAllWindows()\n break\n\ncv.destroyAllWindows()\nvis.poll_events();vis.update_renderer();vis.run()\n# ctr = vis.get_view_control()\n# param = ctr.convert_to_pinhole_camera_parameters()\n# o3d.io.write_pinhole_camera_parameters(\"NED-View.json\", param)\nvis.destroy_window()\n# visFPV.poll_events();visFPV.update_renderer();visFPV.run()\n\n\n\n### STUPID - STEPS - to Fuck C++:\n# I. Initialize local group (Local BA)\n# 0 - Define Keyframe specification:\n# + Point - Lines - Semantics Descriptor\n# + Raw rgb - depth - calibration\n# + GLOBAL - pose\n# 1 - Extract features & matching (Adopt deeplearning matching)\n# 2 - Local SFM with points - lines constraints: init point-cloud & 3D-to-2D projection with GTSAM (Factor Graph with GTSAM)\n# I'm considering other non-linear solver such as: G2O, Ceres and SE-Sync; but it is not a good choice at all.\n# (Pure Python research tools are trending)\n# II. Incremental local BA (Forget) -> Global BA: go straight with iSAM2\n# 1 - KDTree (Octree) keyframes (to generate smart - landmarks)\n# For example: https://stackoverflow.com/questions/65003877/understanding-leafsize-in-scipy-spatial-kdtree\n# 2 - Insert procedure: matching score / entropy score / minimum distance / timestamp\n# - Terminate procedure?\n# 3 - Develop some interesting Smart Factor from Semantic Graph: Points & Line segments\n# Examples: 3D Bounding-Box, Dual-Quadratic.\n# 4 - iSAM2 Smart-Factors (SEMANTIC_Factor)\n" ]
[ [ "numpy.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
duongntbk/FashionMNIST
[ "982f31ac7d857b5deadfde37f979bc6a047fa007" ]
[ "graph_utils.py" ]
[ "# -*- coding: utf-8 -*-\n\n'''\nMiscellaneous methods to draw graphs and images.\n'''\n\nimport pickle\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.axes._subplots import SubplotBase\n\ndef set_graph_layout(nrows, ncols, figsize):\n '''\n Set the size and layout of graph.\n '''\n\n return plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)\n\ndef show_graph():\n '''\n Displays graph using matplotlib.\n '''\n\n plt.show()\n\ndef draw_history_from_path(history_path, attr, show=True, drw_obj=plt, disp_obj=plt):\n '''\n Reads history file of keras models from storage and\n draws validation loss/accuracy vs epochs count graph.\n '''\n\n with open(history_path, 'rb') as f:\n history = pickle.load(f)\n draw_history(history, attr, show, drw_obj, disp_obj)\n\ndef draw_history(history, attr, show=True, drw_obj=plt, disp_obj=plt):\n '''\n Draws validation loss/accuracy vs epochs count graph,\n base on history file of keras models.\n '''\n\n history_dict = history.history\n values = history_dict['{0}'.format(attr)]\n val_values = history_dict['val_{0}'.format(attr)]\n\n epochs = range(1, len(history_dict['accuracy']) + 1)\n\n drw_obj.plot(epochs, values, 'bo', label='Training {0}'.format(attr))\n drw_obj.plot(epochs, val_values, 'b', label='Validation {0}'.format(attr))\n drw_obj.legend()\n\n if isinstance(drw_obj, SubplotBase):\n drw_obj.set_title('Training and validation {0}'.format(attr))\n drw_obj.set_xlabel('Epochs')\n drw_obj.set_ylabel(attr.title())\n else:\n drw_obj.title('Training and validation {0}'.format(attr))\n drw_obj.xlabel('Epochs')\n drw_obj.ylabel(attr.title())\n\n if show:\n disp_obj.show()\n\ndef display_image(data, height, width):\n '''\n Reads image data from numpy array and displays in photo viewer apps.\n '''\n\n image = np.array(data, dtype='float')\n pixels = image.reshape((height, width))\n plt.imshow(pixels, cmap='gray')\n plt.show()\n" ]
[ [ "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.imshow", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
17IT089/Music-Recommendation-System
[ "880f1f9289ec8e1b930cbd6c795b65b9b006fb17" ]
[ "music_recommendation.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Music Recommendation.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1RW9ABbhb1cBSQEbBdIPdE6wtG1roLF30\n\"\"\"\n\nimport pandas as pd\n\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\npath = \"/content/drive/My Drive/Music Recommendation/song_data.csv\"\n\nsong_data = pd.read_csv(path,usecols=['user_id','song_id','listen_count','title','artist','song'],dtype={'user_id':'str','song_id':'str','listen_count':'int32','title':'str','artist':'str','song':'str'})\nsong_data.head(10)\n\nsong_data = song_data[['song','listen_count']]\nsong_data\n\nsong_data = song_data.groupby('song').sum()\nsong_data.sort_values(by=['listen_count'], inplace=True, ascending=False)\nsong_data = song_data.reset_index() \nsong_data.head(10)\n\n# function to recommend new songs\ndef Music_Recommendation(song_name, k):\n\n # to convert dataframe into array\n song_list = (song_data['song']).to_list() \n\n # to check if song selected by user is available?\n if song_name in song_list :\n\n print(\"Song \"+\"'\"+song_name+\"'\"+\" is available\")\n song_index = (song_list.index(song_name))\n\n # total listen count of the song which is selected by user\n song_listen_count = (song_data['listen_count'].iloc[song_index])\n new_distance_list = []\n\n for i in range(len(song_data)):\n if i != song_index :\n\n # calculate euclidean distance\n abs_distance = abs(song_listen_count - song_data['listen_count'][i])\n new_distance_list.append([abs_distance,song_data['song'][i]])\n\n # to sort distance list by ascending order \n new_distance_list = sorted(new_distance_list)\n\n print(\"Top \" + str(k) + \" Recommendation for you.......\")\n for i in range(k):\n print(i+1,\")\",new_distance_list[i][1])\n \n \n else:\n print(\"Song \"+\"'\"+song_name+\"'\"+\" is not available\")\n print(\"Top \" + str(k) + \" Recommendation for you........\")\n recommend_song = song_data[['song']].head(k)\n for i in range(k):\n print(i+1,\")\",recommend_song['song'][i])\n\nk = 10\nMusic_Recommendation('Trahison By Vitalic', k)\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
krzysztofoporowski/Ichimoku_trading
[ "0fd5031d03c413925b69131022b0e0aeaa1e556e" ]
[ "ichimoku_trading.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 10 19:48:17 2019\n\n@author: krzysztof.oporowski\n\"\"\"\n\nfrom datetime import timedelta, datetime\nimport warnings\nimport pandas as pd\nfrom talib import ATR\nfrom simtradesim import Budget, Transaction, define_gl\nfrom wsedatareader import get_date_only, get_data_from_bossa, create_directory\nfrom misctradingtools import get_prev_workday_datestring, plot_ichimoku\n\ndef algo_1_1(row):\n '''\n Tenkan Sen Kijun Sen cross - strong version (algo 1_1)\n '''\n global BUDZET\n global TRANS_ID\n global TRANS\n global COUNT\n global STOCK\n global data\n global BE_VERBOSE\n if not TRANS.in_transaction:\n if row['kumo'] == 1:\n condition = (row['tenkan_sen'] > row['senkou_span_a'])\n elif row['kumo'] == -1:\n condition = (row['tenkan_sen'] > row['senkou_span_b'])\n else:\n condition = False\n if (condition and (row['cross'] == 1) and (row['cross_2'] == -1) and\n (row['c_1'] > row['c26']) and (row['kumo26'] == 1)):\n if row['open'] != 0:\n if BUDZET.equity > 4:\n stock_number = TRANS.how_many_stocks(row['open'],\n BUDZET.equity)\n if stock_number > 0:\n TRANS.open_transaction(stock_number, row['open'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.define_risk(verbose=BE_VERBOSE)\n TRANS.curr_value(price=row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.set_sl(sl_type='fixed',\n sl_factor=(row['open']-row['atr_sl']),\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(-TRANS.open_total)\n return 1\n else:\n if ((row['cross'] == -1) and (row['cross_2'] == 1) or\n (row['c_1'] < TRANS.stop_loss)):\n if row['open'] != 0:\n TRANS.close_transaction(row['close'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(TRANS.close_total)\n TRANS.reset_values()\n return -1\n else:\n print('cena zamkniecia {}'.format(row['open']))\n else:\n TRANS.curr_value(row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.register_transaction(verbose=BE_VERBOSE)\n\ndef algo_1_2(row):\n '''\n Tenkan Sen Kijun Sen cross - strong version (algo 1_1)\n '''\n global BUDZET\n global TRANS_ID\n global TRANS\n global COUNT\n global STOCK\n global data\n global BE_VERBOSE\n if not TRANS.in_transaction:\n if row['kumo'] == 1:\n condition = ((row['tenkan_sen'] > row['senkou_span_b']) and\n (row['tenkan_sen'] < row['senkou_span_a']))\n elif row['kumo'] == -1:\n condition = ((row['tenkan_sen'] < row['senkou_span_b']) and\n (row['tenkan_sen'] > row['senkou_span_a']))\n else:\n condition = False\n if (condition and (row['cross'] == 1) and (row['cross_2'] == -1) and\n (row['c_1'] > row['c26']) and (row['kumo26'] == 1)):\n if row['open'] != 0:\n if BUDZET.equity > 4:\n stock_number = TRANS.how_many_stocks(row['open'],\n BUDZET.equity)\n if stock_number > 0:\n TRANS.open_transaction(stock_number,\n row['open'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.define_risk(verbose=BE_VERBOSE)\n TRANS.curr_value(price=row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.set_sl(sl_type='fixed',\n sl_factor=(row['open']-row['atr_sl']),\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(-TRANS.open_total)\n return 1\n else:\n if ((row['cross'] == -1) and (row['cross_2'] == 1) or\n (row['c_1'] < TRANS.stop_loss)):\n if row['open'] != 0:\n TRANS.close_transaction(row['close'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(TRANS.close_total)\n TRANS.reset_values()\n return -1\n else:\n print('cena zamkniecia {}'.format(row['open']))\n else:\n TRANS.curr_value(row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.register_transaction(verbose=BE_VERBOSE)\n\ndef algo_1_3(row):\n '''\n Tenkan Sen Kijun Sen cross - strong version (algo 1_1)\n '''\n global BUDZET\n global TRANS_ID\n global TRANS\n global COUNT\n global STOCK\n global data\n global BE_VERBOSE\n if not TRANS.in_transaction:\n if row['kumo'] == 1:\n condition = (row['tenkan_sen'] < row['senkou_span_b'])\n elif row['kumo'] == -1:\n condition = (row['tenkan_sen'] < row['senkou_span_a'])\n else:\n condition = False\n if (condition and (row['cross'] == 1) and (row['cross_2'] == -1) and\n (row['c_1'] > row['c26']) and (row['kumo26'] == 1)):\n if row['open'] != 0:\n if BUDZET.equity > 4:\n stock_number = TRANS.how_many_stocks(row['open'],\n BUDZET.equity)\n if stock_number > 0:\n TRANS.open_transaction(stock_number, row['open'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.define_risk(verbose=BE_VERBOSE)\n TRANS.curr_value(price=row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.set_sl(sl_type='fixed',\n sl_factor=(row['open']-row['atr_sl']),\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(-TRANS.open_total)\n return 1\n else:\n if ((row['cross'] == -1) and (row['cross_2'] == 1) or\n (row['c_1'] < TRANS.stop_loss)):\n if row['open'] != 0:\n TRANS.close_transaction(row['close'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(TRANS.close_total)\n TRANS.reset_values()\n return -1\n else:\n print('cena zamkniecia {}'.format(row['open']))\n else:\n TRANS.curr_value(row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.register_transaction(verbose=BE_VERBOSE)\n\ndef algo_2_1(row):\n '''\n Tenkan Sen Kijun Sen cross - strong version (algo 1_1)\n '''\n global BUDZET\n global TRANS_ID\n global TRANS\n global COUNT\n global STOCK\n global data\n global BE_VERBOSE\n if not TRANS.in_transaction:\n if row['kumo'] == 1:\n condition = (row['c_1'] > row['senkou_span_a'])\n elif row['kumo'] == -1:\n condition = (row['c_1'] > row['senkou_span_b'])\n else:\n condition = False\n if (condition and (row['c_1'] > row['kijun_sen']) and\n (row['c_3'] < row['kijun_sen']) and\n (row['c_1'] > row['c26']) and (row['kumo26'] == 1)):\n if row['open'] != 0:\n if BUDZET.equity > 4:\n stock_number = TRANS.how_many_stocks(row['open'],\n BUDZET.equity)\n if stock_number > 0:\n TRANS.open_transaction(stock_number, row['open'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.curr_value(price=row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.set_sl(sl_type='fixed',\n sl_factor=(row['kijun_sen']-row['atr_sl']),\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.define_risk(verbose=BE_VERBOSE)\n BUDZET.manage_amount(-TRANS.open_total)\n return 1\n else:\n if row['c_1'] < TRANS.stop_loss:\n if row['open'] != 0:\n TRANS.close_transaction(row['close'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(TRANS.close_total)\n TRANS.reset_values()\n return -1\n else:\n print('cena zamkniecia {}'.format(row['open']))\n else:\n TRANS.curr_value(row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n if TRANS.current_value > (TRANS.open_value + (3 * TRANS.risk)):\n new_sl = (TRANS.current_value - TRANS.risk)/TRANS.stocks_number\n TRANS.set_sl(sl_type='fixed',\n sl_factor=new_sl,\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n else:\n TRANS.set_sl(sl_type='fixed',\n sl_factor=(row['kijun_sen']-row['atr_sl']),\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.register_transaction(verbose=BE_VERBOSE)\n\n\ndef algo_2_0(row):\n '''\n Tenkan Sen Kijun Sen cross - strong version 2 all in one\n '''\n global BUDZET\n global TRANS_ID\n global TRANS\n global COUNT\n global STOCK\n global data\n global BE_VERBOSE\n if not TRANS.in_transaction:\n condition = True\n if (condition and (row['c_1'] > row['kijun_sen']) and\n (row['c_1'] > row['c26']) and (row['kumo26'] == 1)):\n if row['open'] != 0:\n if BUDZET.equity > 4:\n stock_number = TRANS.how_many_stocks(row['open'],\n BUDZET.equity)\n if stock_number > 0:\n TRANS.open_transaction(stock_number, row['open'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.curr_value(price=row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.set_sl(sl_type='fixed',\n sl_factor=(row['kijun_sen']-row['atr_sl']),\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.define_risk(verbose=BE_VERBOSE)\n BUDZET.manage_amount(-TRANS.open_total)\n return 1\n else:\n if row['c_1'] < TRANS.stop_loss:\n if row['open'] != 0:\n TRANS.close_transaction(row['close'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(TRANS.close_total)\n TRANS.reset_values()\n return -1\n else:\n print('cena zamkniecia {}'.format(row['open']))\n else:\n TRANS.curr_value(row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n if TRANS.current_value > (TRANS.open_value + (3 * TRANS.risk)):\n new_sl = (TRANS.current_value - TRANS.risk)/TRANS.stocks_number\n TRANS.set_sl(sl_type='fixed',\n sl_factor=new_sl,\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n else:\n TRANS.set_sl(sl_type='fixed',\n sl_factor=(row['kijun_sen']-row['atr_sl']),\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.register_transaction(verbose=BE_VERBOSE)\n\ndef kumo_brekaut_01(row):\n '''\n Kumo breakout\n 1.\n total: 42122.05, percent: 38.64%, years: 10, percent/year: 3.86%, nr -: 59,\n sum -: -18925.83, number +: 50, sum +: 61047.88, ATR_RAIO: 0.5\n 2.\n total: 47832.74, percent: 43.88%, years: 10, prct/year: 4.39%, nr -: 58,\n sum -: -17366.41, number +: 51, sum +: 65199.15, ATR_RAIO: 1\n 3.\n total: 91883.32, prct: 153.14%, years: 10, prct/year: 15.31%, nr -: 36,\n sum -: -18271.12, nr +: 24, sum +: 110154.43, ATR_RAIO: 33\n 4.\n total: 114699.25, prct: 327.71%, years: 10, prct/year: 32.77%, nr -: 14,\n sum -: -9509.71, nr +: 21, sum +: 124208.96, ATR_RAIO: 50\n '''\n global BUDZET\n global TRANS_ID\n global TRANS\n global COUNT\n global STOCK\n global data\n global BE_VERBOSE\n if not TRANS.in_transaction:\n condition = True\n if ((row['kumo'] == 1) and (row['c_1'] > row['senkou_span_a']) and\n (row['c_1'] > row['c26']) and (row['kumo26'] == 1)):\n if row['open'] != 0:\n if BUDZET.equity > 4:\n stock_number = TRANS.how_many_stocks(row['open'],\n BUDZET.equity)\n if stock_number > 0:\n TRANS.open_transaction(stock_number, row['open'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.curr_value(price=row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n temp_sl = row['senkou_span_a'] - row['atr_sl']\n TRANS.set_sl(sl_type='fixed',\n sl_factor=temp_sl,\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.define_risk(verbose=BE_VERBOSE)\n BUDZET.manage_amount(-TRANS.open_total)\n return 1\n else:\n if row['c_1'] < TRANS.stop_loss:\n if row['open'] != 0:\n TRANS.close_transaction(row['close'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(TRANS.close_total)\n TRANS.reset_values()\n return -1\n else:\n print('cena zamkniecia {}'.format(row['open']))\n else:\n TRANS.curr_value(row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n if TRANS.current_value > (TRANS.open_value + (3 * TRANS.risk)):\n new_sl = (TRANS.current_value - TRANS.risk)/TRANS.stocks_number\n TRANS.set_sl(sl_type='fixed',\n sl_factor=new_sl,\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n else:\n temp_sl = row['senkou_span_a'] - row['atr_sl']\n TRANS.set_sl(sl_type='fixed',\n sl_factor=temp_sl,\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.register_transaction(verbose=BE_VERBOSE)\n\n\ndef kumo_brekaut_01_01(row):\n '''\n Kumo breakout, clear stop loss \n 1. slabe\n total: 42600.90, prct: 39.08%, years: 10, prct/year: 3.91%, nr -: 57,\n sum -: -18267.29, nr +: 52, sum +: 60868.18, ATR_RAIO: 0.5\n 2.\n total: -9525.87, prct: -63.51%, years: 10, prct/year: -6.35%, nr -: 14,\n sum -: -9677.75, nr +: 1, sum +: 151.89, ATR_RAIO: 50\n '''\n global BUDZET\n global TRANS_ID\n global TRANS\n global COUNT\n global STOCK\n global data\n global BE_VERBOSE\n if not TRANS.in_transaction:\n condition = True\n if ((row['kumo'] == 1) and (row['c_1'] > row['senkou_span_a']) and\n (row['c_1'] > row['c26']) and (row['kumo26'] == 1)):\n if row['open'] != 0:\n if BUDZET.equity > 4:\n stock_number = TRANS.how_many_stocks(row['open'],\n BUDZET.equity)\n if stock_number > 0:\n TRANS.open_transaction(stock_number, row['open'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.curr_value(price=row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n temp_sl = row['senkou_span_a'] - row['atr_sl']\n TRANS.set_sl(sl_type='fixed',\n sl_factor=temp_sl,\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.define_risk(verbose=BE_VERBOSE)\n BUDZET.manage_amount(-TRANS.open_total)\n return 1\n else:\n if row['c_1'] < TRANS.stop_loss:\n if row['open'] != 0:\n TRANS.close_transaction(row['close'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(TRANS.close_total)\n TRANS.reset_values()\n return -1\n else:\n print('cena zamkniecia {}'.format(row['open']))\n else:\n TRANS.curr_value(row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n temp_sl = row['senkou_span_a'] - row['atr_sl']\n TRANS.set_sl(sl_type='fixed',\n sl_factor=temp_sl,\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.register_transaction(verbose=BE_VERBOSE)\n\n\ndef kumo_brekaut_02(row):\n '''\n Kumo breakout modified, removed c_1 > c_26\n 1.\n total: 34600.39, percent: 31.74%, years: 10, percent/year: 3.17%, \n number -: 60, sum -: -20757.86, number +: 49, sum +: 55358.25, ATR_RAIO: 0.5\n \n 2.\n '''\n global BUDZET\n global TRANS_ID\n global TRANS\n global COUNT\n global STOCK\n global data\n global BE_VERBOSE\n if not TRANS.in_transaction:\n condition = True\n if ((row['kumo'] == 1) and (row['c_1'] > row['senkou_span_a']) and\n (row['kumo26'] == 1)):\n if row['open'] != 0:\n if BUDZET.equity > 4:\n stock_number = TRANS.how_many_stocks(row['open'],\n BUDZET.equity)\n if stock_number > 0:\n TRANS.open_transaction(stock_number, row['open'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.curr_value(price=row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n temp_sl = row['senkou_span_a'] - row['atr_sl']\n TRANS.set_sl(sl_type='fixed',\n sl_factor=temp_sl,\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.define_risk(verbose=BE_VERBOSE)\n BUDZET.manage_amount(-TRANS.open_total)\n return 1\n else:\n if row['c_1'] < TRANS.stop_loss:\n if row['open'] != 0:\n TRANS.close_transaction(row['close'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(TRANS.close_total)\n TRANS.reset_values()\n return -1\n else:\n print('cena zamkniecia {}'.format(row['open']))\n else:\n TRANS.curr_value(row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n if TRANS.current_value > (TRANS.open_value + (3 * TRANS.risk)):\n new_sl = (TRANS.current_value - TRANS.risk)/TRANS.stocks_number\n TRANS.set_sl(sl_type='fixed',\n sl_factor=new_sl,\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n else:\n temp_sl = row['senkou_span_a'] - row['atr_sl']\n TRANS.set_sl(sl_type='fixed',\n sl_factor=temp_sl,\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.register_transaction(verbose=BE_VERBOSE)\n\ndef kumo_brekaut_02_01(row):\n '''\n Kumo breakout modified, removed c_1 > c_26\n 1. SŁABE\n \n 2.\n '''\n global BUDZET\n global TRANS_ID\n global TRANS\n global COUNT\n global STOCK\n global data\n global BE_VERBOSE\n if not TRANS.in_transaction:\n condition = True\n if ((row['kumo'] == 1) and (row['c_1'] > row['senkou_span_a']) and\n (row['kumo26'] == 1)):\n if row['open'] != 0:\n if BUDZET.equity > 4:\n stock_number = TRANS.how_many_stocks(row['open'],\n BUDZET.equity)\n if stock_number > 0:\n TRANS.open_transaction(stock_number, row['open'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.curr_value(price=row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n temp_sl = row['senkou_span_a'] - row['atr_sl']\n TRANS.set_sl(sl_type='fixed',\n sl_factor=temp_sl,\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.define_risk(verbose=BE_VERBOSE)\n BUDZET.manage_amount(-TRANS.open_total)\n return 1\n else:\n if row['c_1'] < TRANS.stop_loss:\n if row['open'] != 0:\n TRANS.close_transaction(row['close'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(TRANS.close_total)\n TRANS.reset_values()\n return -1\n else:\n print('cena zamkniecia {}'.format(row['open']))\n else:\n TRANS.curr_value(row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n temp_sl = row['senkou_span_a'] - row['atr_sl']\n TRANS.set_sl(sl_type='fixed',\n sl_factor=temp_sl,\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.register_transaction(verbose=BE_VERBOSE)\n\ndef kumo_brekaut_01_02(row):\n '''\n To jest dopiero prawdziwy Kumo breakout, clear stop loss \n 1.\n total: 45383.67, prct: 41.64%, years: 10, prct/year: 4.16%, nr -: 55,\n sum -: -18812.82, nr +: 54, sum +: 64196.49, ATR_RAIO: 0.5\n 2. \n\n '''\n global BUDZET\n global TRANS_ID\n global TRANS\n global COUNT\n global STOCK\n global data\n global BE_VERBOSE\n if not TRANS.in_transaction:\n condition = True\n if ((row['kumo'] == 1) and (row['c_1'] > row['senkou_span_a']) and\n (row['c_1'] > row['c26']) and (row['kumo26'] == 1)):\n if row['open'] != 0:\n if BUDZET.equity > 4:\n stock_number = TRANS.how_many_stocks(row['open'],\n BUDZET.equity)\n if stock_number > 0:\n TRANS.open_transaction(stock_number, row['open'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.curr_value(price=row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n temp_sl = row['senkou_span_b'] - row['atr_sl']\n TRANS.set_sl(sl_type='fixed',\n sl_factor=temp_sl,\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.define_risk(verbose=BE_VERBOSE)\n BUDZET.manage_amount(-TRANS.open_total)\n return 1\n else:\n if row['c_1'] < TRANS.stop_loss:\n if row['open'] != 0:\n TRANS.close_transaction(row['close'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(TRANS.close_total)\n TRANS.reset_values()\n return -1\n else:\n print('cena zamkniecia {}'.format(row['open']))\n else:\n TRANS.curr_value(row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n temp_sl = row['senkou_span_b'] - row['atr_sl']\n TRANS.set_sl(sl_type='fixed',\n sl_factor=temp_sl,\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.register_transaction(verbose=BE_VERBOSE)\n\ndef kumo_brekaut_01_03(row):\n '''\n To jest dopiero prawdziwy Kumo breakout, clear stop loss. Dodałem \n rzeczywiste test wyjścia z chmury \n 1. SLABE\n total: 8702.63, prct: 8.62%, years: 10, prct/year: 0.86%, \n nr -: 61, sum -: -9491.73, nr +: 40, sum +: 18194.36, ATR_RAIO: 0.5\n 2. \n\n '''\n global BUDZET\n global TRANS_ID\n global TRANS\n global COUNT\n global STOCK\n global data\n global BE_VERBOSE\n if not TRANS.in_transaction:\n condition = True\n if ((row['kumo'] == 1) and (row['c_1'] > row['senkou_span_a']) and\n (row['c_3'] < row['senkou_span_a']) and\n (row['c_1'] > row['c26']) and (row['kumo26'] == 1)):\n if row['open'] != 0:\n if BUDZET.equity > 4:\n stock_number = TRANS.how_many_stocks(row['open'],\n BUDZET.equity)\n if stock_number > 0:\n TRANS.open_transaction(stock_number, row['open'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.curr_value(price=row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n temp_sl = row['senkou_span_b'] - row['atr_sl']\n TRANS.set_sl(sl_type='fixed',\n sl_factor=temp_sl,\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.define_risk(verbose=BE_VERBOSE)\n BUDZET.manage_amount(-TRANS.open_total)\n return 1\n else:\n if row['c_1'] < TRANS.stop_loss:\n if row['open'] != 0:\n TRANS.close_transaction(row['close'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(TRANS.close_total)\n TRANS.reset_values()\n return -1\n else:\n print('cena zamkniecia {}'.format(row['open']))\n else:\n TRANS.curr_value(row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n temp_sl = row['senkou_span_b'] - row['atr_sl']\n TRANS.set_sl(sl_type='fixed',\n sl_factor=temp_sl,\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.register_transaction(verbose=BE_VERBOSE)\n\n\ndef tenkan_sen_kijun_sen_cross_f_sl(row):\n '''\n Tenkan Sen Kijun Sen cross - only stron version (algo 1_1)\n Result files\n - fixed senkou_span_b - 2020-7-23_10_6_24\n - senkou_span_b o _a - 2020-7-23_10_14_10\n '''\n global BUDZET\n global TRANS_ID\n global TRANS\n global COUNT\n global STOCK\n global data\n global BE_VERBOSE\n if not TRANS.in_transaction:\n if ((row['tenkan_sen'] > row['kijun_sen']) and\n (row['tenkan_sen'] > row['senkou_span_a']) and\n (row['tenkan_sen'] > row['senkou_span_b']) and\n (row['c_mean'] > row['c_mean_26'])):\n if row['open'] != 0:\n if BUDZET.equity > 4:\n stock_number = TRANS.how_many_stocks(row['open'],\n BUDZET.equity)\n if stock_number > 0:\n TRANS.open_transaction(stock_number, row['open'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n if row['senkou_span_a'] > row['senkou_span_b']:\n TRANS.set_sl(sl_type='fixed',\n sl_factor=row['senkou_span_b'],\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n else:\n TRANS.set_sl(sl_type='fixed',\n sl_factor=row['senkou_span_a'],\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(-TRANS.open_total)\n return 1\n else:\n if row['low'] < TRANS.stop_loss:\n # closing trade when SL is hit\n TRANS.close_transaction(TRANS.stop_loss,\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(TRANS.close_total)\n TRANS.reset_values()\n return -1\n if row['tenkan_sen'] < row['kijun_sen']:\n # closing trade when close signal appears\n if row['open'] != 0:\n TRANS.close_transaction(row['close'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(TRANS.close_total)\n TRANS.reset_values()\n return -1\n else:\n print('cena zamkniecia {}'.format(row['open']))\n\ndef videos_strategy(row):\n '''\n Tenkan Sen Kijun Sen cross - strong version (algo 1_1)\n '''\n global BUDZET\n global TRANS_ID\n global TRANS\n global COUNT\n global STOCK\n global data\n global BE_VERBOSE\n global MULT_RISK\n if not TRANS.in_transaction:\n if (((row['c_1']) > row['senkou_span_a'] + row['atr_sl']) and\n ((row['c_1']) > row['senkou_span_b'] + row['atr_sl']) and\n (row['tenkan_sen'] > row['kijun_sen']) and\n (row['c_mean'] > row['c_mean_26'])):# and\n #(row['s_s_a_m26'] > row['s_s_b_m26'])):\n if row['open'] != 0:\n if BUDZET.equity > 4:\n stock_number = TRANS.how_many_stocks(row['open'],\n BUDZET.equity)\n if stock_number > 0:\n TRANS.open_transaction(stock_number, row['open'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.set_sl(sl_type='fixed',\n sl_factor=row['senkou_span_b'] - row['atr_sl'],\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n TRANS.define_risk(verbose=BE_VERBOSE)\n TRANS.curr_value(price=row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(-TRANS.open_total)\n return 1\n else:\n TRANS.curr_value(price=row['close'],\n date=get_date_only(row),\n be_verbose=BE_VERBOSE)\n if row['tenkan_sen'] < row['kijun_sen']:\n pass\n \"\"\"\n if row['open'] != 0:\n TRANS.close_transaction(row['open'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(TRANS.close_total)\n TRANS.reset_values()\n #print('zamykam, bo przeciecie tankan i kijun')\n return -1\n else:\n print('cena zamkniecia {}'.format(row['open']))\n \"\"\"\n if row['c_1'] < TRANS.stop_loss:\n TRANS.close_transaction(row['open'],\n get_date_only(row),\n be_verbose=BE_VERBOSE)\n BUDZET.manage_amount(TRANS.close_total)\n TRANS.reset_values()\n #print('zamykam, bo stop loss')\n return -1\n if TRANS.current_value > TRANS.open_total + MULT_RISK * TRANS.risk:\n if row['tenkan_sen'] - row['atr_sl'] > TRANS.stop_loss:\n TRANS.set_sl(sl_type='fixed',\n sl_factor=(row['senkou_span_a']-row['atr_sl']),\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n if TRANS.current_value < TRANS.open_total + MULT_RISK * TRANS.risk:\n if row['kijun_sen'] - row['atr_sl'] > TRANS.stop_loss:\n TRANS.set_sl(sl_type='fixed',\n sl_factor=(row['senkou_span_b']-row['atr_sl']),\n date_sl=get_date_only(row),\n be_verbose=BE_VERBOSE)\n\n\n\n#****************************************************************************\n\nWIG20 = ['ALIOR', 'CCC', 'CDPROJEKT', 'CYFRPLSAT', 'DINOPL', 'JSW', 'KGHM',\n 'LPP', 'LOTOS', 'MBANK', 'ORANGEPL', 'PEKAO', 'PGE', 'PGNIG',\n 'PKNORLEN', 'PKOBP', 'PLAY', 'PZU', 'SANPL', 'TAURONPE']\nETF = ['ETFSP500', 'ETFDAX', 'ETFW20L']\nMWIG40 = ['11BIT', 'ASSECOPOL', 'AMICA', 'GRUPAAZOTY', 'BUDIMEX', 'BENEFIT',\n 'HANDLOWY', 'BORYSZEW', 'INTERCARS', 'CIECH', 'CIGAMES',\n 'CLNPHARMA', 'COMARCH',\n 'AMREST', 'FORTE',\n 'ECHO', 'ENEA',\n 'ENERGA', 'EUROCASH', 'FAMUR', 'GPW', 'GTC', 'GETIN',\n 'INGBSK', 'KERNEL', 'KRUK', 'KETY', 'LIVECHAT', 'BOGDANKA', 'MABION',\n 'BNPPPL', 'DEVELIA', 'VRG',\n 'MILLENNIUM', 'ORBIS', 'PKPCARGO', 'PLAYWAY', 'STALPROD', 'TSGAMES',\n 'WIRTUALNA', 'MWIG40']\nSWIG80 = ['ATAL', 'ABPL', 'ASSECOBS', 'ACAUTOGAZ', 'AGORA', 'ALTUSTFI',\n 'AMBRA', 'ALUMETAL', 'AUTOPARTN', 'APATOR', 'ARCHICOM', 'ASBIS',\n 'ASSECOSEE', 'ASTARTA', 'ATMGRUPA', 'BAHOLDING', 'BIOTON', 'PBKM',\n 'BOS', 'BSCDRUK', 'COMP', 'COGNOR', 'CPGROUP', 'CORMAY',\n 'DEBICA', 'DOMDEV', 'EKOEXPORT', 'ELBUDOWA', 'ELEMENTAL', 'ENTER',\n 'FERRO', 'IDEABANK', 'IMCOMPANY', 'INSTALKRK', 'KOGENERA',\n 'DATAWALK',\n 'KRUSZWICA', 'LENTEX', 'MCI', 'MEDICALG', 'MANGATA', 'MLPGROUP',\n 'MENNICA', 'MONNARI', 'NETIA', 'NEUCA', 'NEWAG', 'OAT', 'OPONEO.PL',\n 'OVOSTAR', 'WIELTON']\nSTOCKS = WIG20 + MWIG40 + SWIG80\n# STOCKS = ['WIELTON']\nEXCEPTIONS = ['PEKAO', 'PGE', 'PKOBP', 'CLNPHARMA', 'BOS', 'MEDICALG',\n 'OAT']\nEXCEPTIONS = ['BAHOLDING'] # 'BAHOLDING' has duplicated index\n#EXCEPTIONS = []\nDATA_PATH = \"C:/Users/krzysztof.oporowski.HWS01/Documents/Python_projects/Data/\"\nHISTORY = []\nYEARS = 1\nSAMPLES = 240 * YEARS\nALL_TRADES = pd.DataFrame()\nBE_VERBOSE = False\nSHOW_GRAPH = False\nSAVE_SINGLE_STOCK = True\nSAVE_RESULTS = True\nALGO = [kumo_brekaut_01_02]\nMULT_RISK = 2\nATR_RATIO = 0.50 # 33 dla 10 lat\nwarnings.filterwarnings('ignore')\n# Creating directory to store results\ncreate_directory('Results')\n\nfor STOCK in STOCKS:\n if STOCK not in EXCEPTIONS:\n #print(STOCK)\n temp = []\n INITIAL_BUDGET = 1000\n BUDZET = Budget(amount=INITIAL_BUDGET)\n GeneralLedger = []\n TRANS_ID = 0\n COUNT = 0\n TRANS = Transaction(TRANS_ID, GeneralLedger)\n DATA = get_data_from_bossa(stooq_name=STOCK, path_to_data=DATA_PATH)\n DATA['c_1'] = DATA.close.shift(1)\n DATA['c_3'] = DATA.close.shift(2)\n DATA['o_1'] = DATA.open.shift(1)\n DATA['l_1'] = DATA.low.shift(1)\n DATA['h_1'] = DATA.high.shift(1)\n high_9 = DATA['h_1'].rolling(window=9).max()\n low_9 = DATA['l_1'].rolling(window=9).min()\n DATA['tenkan_sen'] = (high_9 + low_9) /2\n high_26 = DATA['h_1'].rolling(window=26).max()\n low_26 = DATA['l_1'].rolling(window=26).min()\n DATA['kijun_sen'] = (high_26 + low_26) /2\n last_index = DATA.iloc[-1:].index[0]\n last_date = DATA.iloc[-1:].index[0].date()\n for i in range(26):\n DATA.loc[last_index+timedelta(days=1) +\n timedelta(days=i),\n 'date'] = last_date + timedelta(days=i)\n DATA['senkou_span_a'] = ((DATA['tenkan_sen'] +\n DATA['kijun_sen']) / 2).shift(26)\n high_52 = DATA['h_1'].rolling(window=52).max()\n low_52 = DATA['l_1'].rolling(window=52).min()\n DATA['senkou_span_b'] = ((high_52 + low_52) /2).shift(26)\n DATA['chikou_span'] = DATA['c_1'].shift(-26) # sometimes -22\n DATA.loc[DATA.tenkan_sen > DATA.kijun_sen, 'cross'] = 1\n DATA.loc[DATA.tenkan_sen < DATA.kijun_sen, 'cross'] = -1\n DATA.loc[DATA.tenkan_sen == DATA.kijun_sen, 'cross'] = 0\n DATA['cross_2'] = DATA.cross.shift(2)\n DATA['sygnal'] = 0\n DATA.loc[DATA.senkou_span_a > DATA.senkou_span_b, 'kumo'] = 1\n DATA.loc[DATA.senkou_span_a < DATA.senkou_span_b, 'kumo'] = -1\n DATA.loc[DATA.senkou_span_a == DATA.senkou_span_b, 'kumo'] = 0\n DATA['kumo26'] = DATA.kumo.shift(-26)\n DATA['c26'] = DATA.c_1.shift(26)\n DATA['atr26'] = ATR(DATA.c_1, DATA.l_1, DATA.h_1, timeperiod=26)\n DATA['atr_sl'] = DATA.atr26 * ATR_RATIO\n data = DATA.tail(SAMPLES)\n '''\n kolumny = ['close', 'kijun_sen', 'tenkan_sen', 'senkou_span_a',\n 'senkou_span_b', 'chikou_span']#, 'sygnal']\n '''\n data['signal'] = data.apply(ALGO[0], axis=1)\n man_close_date = get_prev_workday_datestring()\n if TRANS.in_transaction:\n try:\n TRANS.close_transaction(data['close'].loc[man_close_date],\n date_close=man_close_date,\n be_verbose=BE_VERBOSE)\n except KeyError:\n print('przy tej spolce {} KeyError'.format(STOCK))\n tr_r = round((TRANS.trans_result / TRANS.open_total)*100, 2)\n if BE_VERBOSE:\n print('SELL signal on date {}, trade result: {:.2f}, \\\n percent: {}%'.format(man_close_date,\n TRANS.trans_result,\n tr_r))\n BUDZET.manage_amount(TRANS.close_total)\n TRANS.reset_values()\n buy = data[data.signal == 1]\n sell = data[data.signal == -1]\n trades = define_gl(GeneralLedger)\n trades['stock'] = STOCK\n if SAVE_SINGLE_STOCK:\n FILE = 'Results/' + STOCK + '-' + str(ALGO[0]).split(' ')[1]\n FILE = FILE + '-atr-' + str(ATR_RATIO) + '.csv'\n trades.to_csv(FILE)\n # print('{} trades sum: {:.2f}'.format(STOCK,\n # trades['trans_result'].sum()))\n temp = [STOCK, trades.trans_result.sum()]\n HISTORY.append(temp)\n if buy.signal.sum() > 0 or sell.signal.sum() < 0:\n if SHOW_GRAPH:\n plot_ichimoku(data, STOCK, buy, sell)\n ALL_TRADES = pd.concat([trades, ALL_TRADES])\nHIST = pd.DataFrame(HISTORY, columns=['stock', 'result'])\nA_T = HIST.result.sum()\nT_PERC = 100 * HIST.result.sum()\nT_PERC = T_PERC/(HIST[HIST.result != 0].stock.count()*INITIAL_BUDGET)\nPERC_Y = 100 * HIST.result.sum()\nPERC_Y = PERC_Y/YEARS\nPERC_Y = PERC_Y/(HIST[HIST.result != 0].stock.count()*INITIAL_BUDGET)\n\"\"\"\nprint('all trades: {:.2f}, \\\n percent: {:.2f}%, \\\n years: {}, \\\n percent/year: {:.2f}%,\\\n multiplier of risk: {},\\\n muliplier do ATR ratio: {}'.format(A_T, T_PERC, YEARS, PERC_Y, MULT_RISK,\n ATR_RATIO))\n\"\"\"\nHIST.sort_values(by=['result'], ascending=True, inplace=True)\nNOW = datetime.now()\nprint('total: {:.2f}, \\\n prct: {:.2f}%, \\\n years: {}, \\\n prct/year: {:.2f}%,\\\n nr -: {},\\\n sum -: {:.2f},\\\n nr +: {},\\\n sum +: {:.2f},\\\n ATR_RAIO: {}'.format(A_T, T_PERC, YEARS, PERC_Y,\n HIST['result'].loc[HIST.result < 0].count(),\n HIST['result'].loc[HIST.result < 0].sum(),\n HIST['result'].loc[HIST.result > 0].count(),\n HIST['result'].loc[HIST.result > 0].sum(),\n ATR_RATIO))\nif SAVE_RESULTS:\n try:\n F_N = 'Results/' + str(ALGO[0]).split(' ')[1] + '-'\n F_N = F_N + str(NOW.year) + '-' + str(NOW.month) + '-' + str(NOW.day)\n F_N = F_N + '_' + str(NOW.hour) + '_' + str(NOW.minute) + '_'\n F_N = F_N + str(NOW.second)\n ALL_TRANS_DETAILS = F_N + '-d-atr-' + str(ATR_RATIO) + '-' \\\n + str(YEARS) + '-years.xlsx'\n ALL_TRANS_SUMMARY = F_N + '-s-atr-' + str(ATR_RATIO) + '-' \\\n + str(YEARS) + '-years.xlsx'\n ALL_TRADES.to_excel(ALL_TRANS_DETAILS, sheet_name='Original')\n HIST.to_excel(ALL_TRANS_SUMMARY, sheet_name='Original')\n except:\n print('Note! results not saved due to error. Save manually!')\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
IT-gMA/tabular-dl
[ "d0fb7086fa8851370e35ebfaf4c119181f037f10" ]
[ "bin/read_npydata.py" ]
[ "import numpy as np\nimport sys\n\nFILE_PATH = '../data/california_housing/idx_test.npy'\nnp.set_printoptions(threshold=sys.maxsize)\ndata = np.load(FILE_PATH)\nprint(data)\nprint(data.shape)" ]
[ [ "numpy.load", "numpy.set_printoptions" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Bruce-Dudu/zvt
[ "a261a2b688b2c75b7b637b85eabaffdc27fbd80f" ]
[ "zvt/__init__.py" ]
[ "# -*- coding: utf-8 -*-\nimport enum\nimport json\nimport logging\nimport os\nfrom logging.handlers import RotatingFileHandler\n\nimport pandas as pd\nfrom pkg_resources import get_distribution, DistributionNotFound\n\nfrom zvt.settings import DATA_SAMPLE_ZIP_PATH, ZVT_TEST_HOME, ZVT_HOME, ZVT_TEST_DATA_PATH, ZVT_TEST_ZIP_DATA_PATH\n\ntry:\n dist_name = __name__\n __version__ = get_distribution(dist_name).version\nexcept DistributionNotFound:\n __version__ = 'unknown'\nfinally:\n del get_distribution, DistributionNotFound\n\n\n# common class\nclass IntervalLevel(enum.Enum):\n LEVEL_TICK = 'tick'\n LEVEL_1MIN = '1m'\n LEVEL_5MIN = '5m'\n LEVEL_15MIN = '15m'\n LEVEL_30MIN = '30m'\n LEVEL_1HOUR = '1h'\n LEVEL_4HOUR = '4h'\n LEVEL_1DAY = '1d'\n LEVEL_1WEEK = '1wk'\n LEVEL_1MON = '1mon'\n\n def to_pd_freq(self):\n if self == IntervalLevel.LEVEL_1MIN:\n return '1min'\n if self == IntervalLevel.LEVEL_5MIN:\n return '5min'\n if self == IntervalLevel.LEVEL_15MIN:\n return '15min'\n if self == IntervalLevel.LEVEL_30MIN:\n return '30min'\n if self == IntervalLevel.LEVEL_1HOUR:\n return '1H'\n if self == IntervalLevel.LEVEL_4HOUR:\n return '4H'\n if self >= IntervalLevel.LEVEL_1DAY:\n return '1D'\n\n def floor_timestamp(self, pd_timestamp):\n if self == IntervalLevel.LEVEL_1MIN:\n return pd_timestamp.floor('1min')\n if self == IntervalLevel.LEVEL_5MIN:\n return pd_timestamp.floor('5min')\n if self == IntervalLevel.LEVEL_15MIN:\n return pd_timestamp.floor('15min')\n if self == IntervalLevel.LEVEL_30MIN:\n return pd_timestamp.floor('30min')\n if self == IntervalLevel.LEVEL_1HOUR:\n return pd_timestamp.floor('1h')\n if self == IntervalLevel.LEVEL_4HOUR:\n return pd_timestamp.floor('4h')\n if self == IntervalLevel.LEVEL_1DAY:\n return pd_timestamp.floor('1d')\n\n def to_minute(self):\n return int(self.to_second() / 60)\n\n def to_second(self):\n return int(self.to_ms() / 1000)\n\n def to_ms(self):\n # we treat tick intervals is 5s, you could change it\n if self == IntervalLevel.LEVEL_TICK:\n return 5 * 1000\n if self == IntervalLevel.LEVEL_1MIN:\n return 60 * 1000\n if self == IntervalLevel.LEVEL_5MIN:\n return 5 * 60 * 1000\n if self == IntervalLevel.LEVEL_15MIN:\n return 15 * 60 * 1000\n if self == IntervalLevel.LEVEL_30MIN:\n return 30 * 60 * 1000\n if self == IntervalLevel.LEVEL_1HOUR:\n return 60 * 60 * 1000\n if self == IntervalLevel.LEVEL_4HOUR:\n return 4 * 60 * 60 * 1000\n if self == IntervalLevel.LEVEL_1DAY:\n return 24 * 60 * 60 * 1000\n if self == IntervalLevel.LEVEL_1WEEK:\n return 7 * 24 * 60 * 60 * 1000\n if self == IntervalLevel.LEVEL_1MON:\n return 31 * 7 * 24 * 60 * 60 * 1000\n\n def __ge__(self, other):\n if self.__class__ is other.__class__:\n return self.to_ms() >= other.to_ms()\n return NotImplemented\n\n def __gt__(self, other):\n\n if self.__class__ is other.__class__:\n return self.to_ms() > other.to_ms()\n return NotImplemented\n\n def __le__(self, other):\n if self.__class__ is other.__class__:\n return self.to_ms() <= other.to_ms()\n return NotImplemented\n\n def __lt__(self, other):\n if self.__class__ is other.__class__:\n return self.to_ms() < other.to_ms()\n return NotImplemented\n\n\nclass AdjustType(enum.Enum):\n # 这里用拼音,因为英文不直观 split-adjusted?wtf?\n # 不复权\n bfq = 'bfq'\n # 前复权\n qfq = 'qfq'\n # 后复权\n hfq = 'hfq'\n\n\ndef init_log(file_name='zvt.log', log_dir=None, simple_formatter=True):\n if not log_dir:\n log_dir = zvt_env['log_path']\n\n root_logger = logging.getLogger()\n\n # reset the handlers\n root_logger.handlers = []\n\n root_logger.setLevel(logging.INFO)\n\n file_name = os.path.join(log_dir, file_name)\n\n fh = RotatingFileHandler(file_name, maxBytes=524288000, backupCount=10)\n\n fh.setLevel(logging.INFO)\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n\n # create formatter and add it to the handlers\n if simple_formatter:\n formatter = logging.Formatter(\n \"%(asctime)s %(levelname)s %(threadName)s %(message)s\")\n else:\n formatter = logging.Formatter(\n \"%(asctime)s %(levelname)s %(threadName)s %(name)s:%(filename)s:%(lineno)s %(funcName)s %(message)s\")\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n\n # add the handlers to the logger\n root_logger.addHandler(fh)\n root_logger.addHandler(ch)\n\n\npd.set_option('expand_frame_repr', False)\npd.set_option('mode.chained_assignment', 'raise')\n\nzvt_env = {}\n\n\ndef init_env(zvt_home: str) -> None:\n \"\"\"\n\n :param zvt_home: home path for zvt\n \"\"\"\n data_path = os.path.join(zvt_home, 'data')\n tmp_path = os.path.join(zvt_home, 'tmp')\n if not os.path.exists(data_path):\n os.makedirs(data_path)\n\n if not os.path.exists(tmp_path):\n os.makedirs(tmp_path)\n\n zvt_env['zvt_home'] = zvt_home\n zvt_env['data_path'] = data_path\n zvt_env['tmp_path'] = tmp_path\n\n # path for storing ui results\n zvt_env['ui_path'] = os.path.join(zvt_home, 'ui')\n if not os.path.exists(zvt_env['ui_path']):\n os.makedirs(zvt_env['ui_path'])\n\n # path for storing logs\n zvt_env['log_path'] = os.path.join(zvt_home, 'logs')\n if not os.path.exists(zvt_env['log_path']):\n os.makedirs(zvt_env['log_path'])\n\n # create default config.json if not exist\n config_path = os.path.join(zvt_home, 'config.json')\n if not os.path.exists(config_path):\n from shutil import copyfile\n copyfile(os.path.abspath(os.path.join(os.path.dirname(__file__), 'samples', 'config.json')), config_path)\n\n with open(config_path) as f:\n config_json = json.load(f)\n for k in config_json:\n zvt_env[k] = config_json[k]\n\n init_log()\n\n import pprint\n pprint.pprint(zvt_env)\n\n\nif os.getenv('TESTING_ZVT'):\n init_env(zvt_home=ZVT_TEST_HOME)\n\n # init the sample data if need\n same = False\n if os.path.exists(ZVT_TEST_ZIP_DATA_PATH):\n import filecmp\n\n same = filecmp.cmp(ZVT_TEST_ZIP_DATA_PATH, DATA_SAMPLE_ZIP_PATH)\n\n if not same:\n from shutil import copyfile\n from zvt.utils.zip_utils import unzip\n\n copyfile(DATA_SAMPLE_ZIP_PATH, ZVT_TEST_ZIP_DATA_PATH)\n unzip(ZVT_TEST_ZIP_DATA_PATH, ZVT_TEST_DATA_PATH)\n\nelse:\n init_env(zvt_home=ZVT_HOME)\n\n# import the recorders for register them to the domain\nimport zvt.recorders as zvt_recorders\n\n__all__ = ['zvt_env', 'init_log', 'init_env', 'IntervalLevel', '__version__', 'AdjustType']\n" ]
[ [ "pandas.set_option" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
singhalok641/Machine-Learning-Engineer-Nanodegree
[ "d0ae8bddb85f2d62d4d35e2f2fb80100f055e19c" ]
[ "SentimentAnalysisIMDB/train/train.py" ]
[ "import argparse\nimport json\nimport os\nimport pickle\nimport sys\nimport sagemaker_containers\nimport pandas as pd\nimport torch\nimport torch.optim as optim\nimport torch.utils.data\n\nfrom model import LSTMClassifier\n\ndef model_fn(model_dir):\n \"\"\"Load the PyTorch model from the `model_dir` directory.\"\"\"\n print(\"Loading model.\")\n\n # First, load the parameters used to create the model.\n model_info = {}\n model_info_path = os.path.join(model_dir, 'model_info.pth')\n with open(model_info_path, 'rb') as f:\n model_info = torch.load(f)\n\n print(\"model_info: {}\".format(model_info))\n\n # Determine the device and construct the model.\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])\n\n # Load the stored model parameters.\n model_path = os.path.join(model_dir, 'model.pth')\n with open(model_path, 'rb') as f:\n model.load_state_dict(torch.load(f))\n\n # Load the saved word_dict.\n word_dict_path = os.path.join(model_dir, 'word_dict.pkl')\n with open(word_dict_path, 'rb') as f:\n model.word_dict = pickle.load(f)\n\n model.to(device).eval()\n\n print(\"Done loading model.\")\n return model\n\ndef _get_train_data_loader(batch_size, training_dir):\n print(\"Get train data loader.\")\n\n train_data = pd.read_csv(os.path.join(training_dir, \"train.csv\"), header=None, names=None)\n\n train_y = torch.from_numpy(train_data[[0]].values).float().squeeze()\n train_X = torch.from_numpy(train_data.drop([0], axis=1).values).long()\n\n train_ds = torch.utils.data.TensorDataset(train_X, train_y)\n\n return torch.utils.data.DataLoader(train_ds, batch_size=batch_size)\n\n\ndef train(model, train_loader, epochs, optimizer, loss_fn, device):\n \"\"\"\n This is the training method that is called by the PyTorch training script. The parameters\n passed are as follows:\n model - The PyTorch model that we wish to train.\n train_loader - The PyTorch DataLoader that should be used during training.\n epochs - The total number of epochs to train for.\n optimizer - The optimizer to use during training.\n loss_fn - The loss function used for training.\n device - Where the model and data should be loaded (gpu or cpu).\n \"\"\"\n \n # TODO: Paste the train() method developed in the notebook here.\n for epoch in range(1, epochs + 1):\n model.train()\n total_loss = 0\n for batch in train_loader: \n batch_X, batch_y = batch\n \n batch_X = batch_X.to(device)\n batch_y = batch_y.to(device)\n \n # TODO: Complete this train method to train the model provided.\n out = model(batch_X)\n loss = loss_fn(out, batch_y)\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n total_loss += loss.data.item()\n print(\"Epoch: {}, BCELoss: {}\".format(epoch, total_loss / len(train_loader)))\n\nif __name__ == '__main__':\n # All of the model parameters and training parameters are sent as arguments when the script\n # is executed. Here we set up an argument parser to easily access the parameters.\n\n parser = argparse.ArgumentParser()\n\n # Training Parameters\n parser.add_argument('--batch-size', type=int, default=512, metavar='N',\n help='input batch size for training (default: 512)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n\n # Model Parameters\n parser.add_argument('--embedding_dim', type=int, default=32, metavar='N',\n help='size of the word embeddings (default: 32)')\n parser.add_argument('--hidden_dim', type=int, default=100, metavar='N',\n help='size of the hidden dimension (default: 100)')\n parser.add_argument('--vocab_size', type=int, default=5000, metavar='N',\n help='size of the vocabulary (default: 5000)')\n\n # SageMaker Parameters\n parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))\n parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])\n parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])\n parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAINING'])\n parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])\n\n args = parser.parse_args()\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"Using device {}.\".format(device))\n\n torch.manual_seed(args.seed)\n\n # Load the training data.\n train_loader = _get_train_data_loader(args.batch_size, args.data_dir)\n\n # Build the model.\n model = LSTMClassifier(args.embedding_dim, args.hidden_dim, args.vocab_size).to(device)\n\n with open(os.path.join(args.data_dir, \"word_dict.pkl\"), \"rb\") as f:\n model.word_dict = pickle.load(f)\n\n print(\"Model loaded with embedding_dim {}, hidden_dim {}, vocab_size {}.\".format(\n args.embedding_dim, args.hidden_dim, args.vocab_size\n ))\n\n # Train the model.\n optimizer = optim.Adam(model.parameters())\n loss_fn = torch.nn.BCELoss()\n\n train(model, train_loader, args.epochs, optimizer, loss_fn, device)\n\n # Save the parameters used to construct the model\n model_info_path = os.path.join(args.model_dir, 'model_info.pth')\n with open(model_info_path, 'wb') as f:\n model_info = {\n 'embedding_dim': args.embedding_dim,\n 'hidden_dim': args.hidden_dim,\n 'vocab_size': args.vocab_size,\n }\n torch.save(model_info, f)\n\n\t# Save the word_dict\n word_dict_path = os.path.join(args.model_dir, 'word_dict.pkl')\n with open(word_dict_path, 'wb') as f:\n pickle.dump(model.word_dict, f)\n\n\t# Save the model parameters\n model_path = os.path.join(args.model_dir, 'model.pth')\n with open(model_path, 'wb') as f:\n torch.save(model.cpu().state_dict(), f)\n" ]
[ [ "torch.load", "torch.manual_seed", "torch.utils.data.TensorDataset", "torch.utils.data.DataLoader", "torch.from_numpy", "torch.nn.BCELoss", "torch.cuda.is_available", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sscardapane/jax
[ "21e5eb13dd879f92b6ff94e18bf33a24ed8cc2a7" ]
[ "tests/linalg_test.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for the LAPAX linear algebra module.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom functools import partial\nimport itertools\n\nimport numpy as onp\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nfrom jax import jvp\nfrom jax import numpy as np\nfrom jax import scipy\nfrom jax import test_util as jtu\nfrom jax.lib import xla_bridge\n\nfrom jax.config import config\nconfig.parse_flags_with_absl()\n\nT = lambda x: onp.swapaxes(x, -1, -2)\n\n\ndef float_types():\n return set(onp.dtype(xla_bridge.canonicalize_dtype(dtype))\n for dtype in [onp.float32, onp.float64])\n\n\nclass NumpyLinalgTest(jtu.JaxTestCase):\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_shape={}\".format(jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype, \"rng\": rng}\n for shape in [(1, 1), (4, 4), (2, 5, 5), (200, 200), (1000, 0, 0)]\n for dtype in float_types()\n for rng in [jtu.rand_default()]))\n def testCholesky(self, shape, dtype, rng):\n def args_maker():\n a = rng(shape, dtype)\n return [onp.matmul(a, T(a))]\n\n self._CheckAgainstNumpy(onp.linalg.cholesky, np.linalg.cholesky, args_maker,\n check_dtypes=True, tol=1e-3)\n self._CompileAndCheck(np.linalg.cholesky, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_shape={}_fullmatrices={}\".format(\n jtu.format_shape_dtype_string(shape, dtype), full_matrices),\n \"shape\": shape, \"dtype\": dtype, \"full_matrices\": full_matrices,\n \"rng\": rng}\n for shape in [(1, 1), (3, 4), (2, 10, 5), (2, 200, 100)]\n for dtype in float_types()\n for full_matrices in [False, True]\n for rng in [jtu.rand_default()]))\n def testQr(self, shape, dtype, full_matrices, rng):\n m, n = shape[-2:]\n\n if full_matrices:\n mode, k = \"complete\", m\n else:\n mode, k = \"reduced\", min(m, n)\n\n a = rng(shape, dtype)\n lq, lr = np.linalg.qr(a, mode=mode)\n\n # onp.linalg.qr doesn't support broadcasting. But it seems like an\n # inevitable extension so we support it in our version.\n nq = onp.zeros(shape[:-2] + (m, k), dtype)\n nr = onp.zeros(shape[:-2] + (k, n), dtype)\n for index in onp.ndindex(*shape[:-2]):\n nq[index], nr[index] = onp.linalg.qr(a[index], mode=mode)\n\n max_rank = max(m, n)\n\n # Norm, adjusted for dimension and type.\n def norm(x):\n n = onp.linalg.norm(x, axis=(-2, -1))\n return n / (max_rank * onp.finfo(dtype).eps)\n\n def compare_orthogonal(q1, q2):\n # Q is unique up to sign, so normalize the sign first.\n sum_of_ratios = onp.sum(onp.divide(q1, q2), axis=-2, keepdims=True)\n phases = onp.divide(sum_of_ratios, onp.abs(sum_of_ratios))\n q1 *= phases\n self.assertTrue(onp.all(norm(q1 - q2) < 30))\n\n # Check a ~= qr\n self.assertTrue(onp.all(norm(a - onp.matmul(lq, lr)) < 30))\n\n # Compare the first 'k' vectors of Q; the remainder form an arbitrary\n # orthonormal basis for the null space.\n compare_orthogonal(nq[..., :k], lq[..., :k])\n\n # Check that q is close to unitary.\n self.assertTrue(onp.all(norm(onp.eye(k) - onp.matmul(T(lq), lq)) < 5))\n\n if not full_matrices and m >= n:\n jtu.check_jvp(np.linalg.qr, partial(jvp, np.linalg.qr), (a,))\n\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_shape={}\".format(jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype, \"rng\": rng}\n for shape in [(1, 1), (4, 4), (2, 5, 5), (200, 200), (5, 5, 5)]\n for dtype in float_types()\n for rng in [jtu.rand_default()]))\n def testInv(self, shape, dtype, rng):\n def args_maker():\n invertible = False\n while not invertible:\n a = rng(shape, dtype)\n try:\n onp.linalg.inv(a)\n invertible = True\n except onp.linalg.LinAlgError:\n pass\n return [a]\n\n self._CheckAgainstNumpy(onp.linalg.inv, np.linalg.inv, args_maker,\n check_dtypes=True, tol=1e-3)\n self._CompileAndCheck(np.linalg.inv, args_maker, check_dtypes=True)\n\n\nclass ScipyLinalgTest(jtu.JaxTestCase):\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs={}_rhs={}_lower={}_transposea={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n lower, transpose_a),\n \"lower\": lower, \"transpose_a\": transpose_a,\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"rng\": rng}\n for lower, transpose_a in itertools.product([False, True], repeat=2)\n for lhs_shape, rhs_shape in [\n ((4, 4), (4,)),\n ((4, 4), (4, 3)),\n ((2, 8, 8), (2, 8, 10)),\n ]\n for dtype in float_types()\n for rng in [jtu.rand_default()]))\n def testSolveTriangularBlocked(self, lower, transpose_a, lhs_shape,\n rhs_shape, dtype, rng):\n k = rng(lhs_shape, dtype)\n l = onp.linalg.cholesky(onp.matmul(k, T(k))\n + lhs_shape[-1] * onp.eye(lhs_shape[-1]))\n l = l.astype(k.dtype)\n b = rng(rhs_shape, dtype)\n\n a = l if lower else T(l)\n inv = onp.linalg.inv(T(a) if transpose_a else a).astype(a.dtype)\n if len(lhs_shape) == len(rhs_shape):\n onp_ans = onp.matmul(inv, b)\n else:\n onp_ans = onp.einsum(\"...ij,...j->...i\", inv, b)\n\n # The standard scipy.linalg.solve_triangular doesn't support broadcasting.\n # But it seems like an inevitable extension so we support it.\n ans = scipy.linalg.solve_triangular(\n l if lower else T(l), b, trans=1 if transpose_a else 0, lower=lower)\n\n self.assertAllClose(onp_ans, ans, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\":\n \"_lhs={}_rhs={}_lower={}_transposea={}\".format(\n jtu.format_shape_dtype_string(lhs_shape, dtype),\n jtu.format_shape_dtype_string(rhs_shape, dtype),\n lower, transpose_a),\n \"lower\": lower, \"transpose_a\": transpose_a,\n \"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n \"rng\": rng}\n for lower, transpose_a in itertools.product([False, True], repeat=2)\n for lhs_shape, rhs_shape in [\n ((4, 4), (4,)),\n ((4, 4), (4, 3)),\n ((2, 8, 8), (2, 8, 10)),\n ]\n for dtype in float_types()\n for rng in [jtu.rand_default()]))\n def testSolveTriangularBlockedGrad(self, lower, transpose_a, lhs_shape,\n rhs_shape, dtype, rng):\n # TODO(frostig): change ensemble to support a bigger rtol\n A = np.tril(rng(lhs_shape, dtype) + 5 * onp.eye(lhs_shape[-1], dtype=dtype))\n A = A if lower else T(A)\n B = rng(rhs_shape, dtype)\n f = partial(scipy.linalg.solve_triangular, lower=lower,\n trans=1 if transpose_a else 0)\n jtu.check_grads(f, (A, B), 2, rtol=1e-3)\n\nif __name__ == \"__main__\":\n absltest.main()\n" ]
[ [ "numpy.swapaxes", "numpy.abs", "numpy.einsum", "numpy.linalg.inv", "numpy.eye", "numpy.matmul", "numpy.linalg.norm", "numpy.finfo", "numpy.linalg.qr", "numpy.ndindex", "numpy.zeros", "numpy.divide" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MarekWadinger/semestral_project
[ "62ff869e8315426ce0af8be044bcd94d911e3028" ]
[ "src/anomaly.py" ]
[ "import pandas as pd\nimport time\n\nfrom pyod.models.iforest import IForest\nfrom matplotlib import pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\n\n\ndef get_sampling_frequency(s, ratio):\n \"\"\"Get sampling frequency of time-series data.\n\n Function that returns the sampling frequency of time-series data\n\n Function\n\n Parameters\n ----------\n ratio:\n\n s:\n\n Returns\n -------\n The fitted Prophet object.\n\n \"\"\"\n tf = s['ds'].dropna().diff().value_counts()\n counter = 0\n\n for i in range(len(tf)):\n counter += tf[i]\n if counter / sum(tf[:]) >= ratio:\n break\n if tf.index[i].days == 0:\n freq = str(tf.index[i].seconds) + \"S\"\n else:\n freq = str(tf.index[i].days) + \"D\"\n return freq\n\n\ndef resample_data(s: pd.DataFrame, ratio=0.9):\n \"\"\"Resample time-series data.\n\n Function\n\n Parameters\n ----------\n ratio:\n\n s:\n\n Returns\n -------\n The fitted Prophet object.\n\n \"\"\"\n\n freq = get_sampling_frequency(s, ratio)\n\n s = s.set_index('ds').resample(freq).first().reset_index()\n s = s.dropna()\n\n return s, freq\n\n\ndef anomaly_rate(model, validation_df, freq, plot=False):\n if freq[:-1].isnumeric() and (freq[-1] == 'S' or freq[-1] == 'D'):\n last_history = (model.start + model.t_scale).round(freq)\n else:\n raise ValueError(\"Unsupported frequency format. \"\n \"Provide any valid frequency for pd.date_range, as multiple of 'D' or 'S'.\")\n\n first_validation = validation_df['ds'].iloc[0]\n last_validation = validation_df['ds'].iloc[-1]\n\n if last_validation > last_history:\n if first_validation <= last_history:\n validation_df = validation_df.loc[validation_df['ds'] > last_history].dropna()[['ds', 'y']]\n\n start_timer = time.time()\n future = validation_df['ds'].to_frame(name='ds')\n prediction_data = model.predict(future)[['ds', 'yhat']] # TOO SLOW!\n print(\"--- Prediction: %s seconds ---\" % (time.time() - start_timer))\n\n df = pd.DataFrame({'y': validation_df['y'].values, 'yhat': prediction_data['yhat'].values})\n scaler = MinMaxScaler(feature_range=(0, 1))\n df[['y', 'yhat']] = scaler.fit_transform(df[['y', 'yhat']])\n\n clf_name = 'iForest'\n clf = IForest()\n clf.fit(df)\n\n # get the prediction labels and outlier scores of the training data\n y_train_pred = clf.labels_ # binary labels (0: inliers, 1: outliers)\n y_train_scores = clf.decision_scores_ # raw outlier scores\n\n if plot:\n # fig = plt.figure(facecolor='w', figsize=(10, 6))\n # ax = fig.add_subplot(111)\n # ax.plot(prediction_data['ds'].dt.to_pydatetime(), deviation, 'k.')\n # ax.plot(prediction_data['ds'][y_train_pred == 1].dt.to_pydatetime(), deviation[y_train_pred == 1], 'r.')\n # fig.show()\n\n fig1 = plt.figure(facecolor='w', figsize=(10, 6))\n ax = fig1.add_subplot(111)\n ax.plot(prediction_data['ds'].dt.to_pydatetime(), y_train_scores)\n ax.plot(prediction_data['ds'][y_train_pred == 1].dt.to_pydatetime(), y_train_scores[y_train_pred == 1], 'r.')\n fig1.show()\n\n fig2 = plt.figure(facecolor='w', figsize=(10, 6))\n ax = fig2.add_subplot(111)\n ax.plot(validation_df['ds'].dt.to_pydatetime(), validation_df['y'].values)\n ax.plot(prediction_data['ds'].dt.to_pydatetime(), prediction_data['yhat'].values)\n ax.vlines(prediction_data['ds'][y_train_pred == 1].dt.to_pydatetime(), min(validation_df['y'].values), max(validation_df['y'].values), 'r')\n fig2.show()\n\n return sum(y_train_pred) / len(y_train_pred)\n\n else:\n raise ValueError(\"Validation dataset has no data point after last member of time-series of historical data that\",\n \"the model was trained on. Please use validation dataset with last member of the time series\",\n \"after %s.\" % last_history)\n" ]
[ [ "matplotlib.pyplot.figure", "pandas.DataFrame", "sklearn.preprocessing.MinMaxScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
n778509775/NWCQ
[ "72851d26f470465e9e13b219a12d52daa5e1ceed" ]
[ "MI/crossValidation-complex.py" ]
[ "import numpy as np\nimport pandas as pd\nimport random\nimport math\nimport operator\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_auc_score\nimport argparse\n#from collections import Counter\n\nimport torch\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom sklearn.preprocessing import scale, minmax_scale, Imputer\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\nfrom network import Discriminator\n\nnum_epochs = 10\nbatch_size = 128 # batch size for each cluster\nbase_lr = 1e-3\nlr_step = 100 # step decay of learning rates\nl2_decay = 5e-5\n\ndef read_csv_faster(filename):\n\tdata_df = pd.read_csv(filename,index_col=1)\n\tdataset = {}\n\tdataset['labels'] = data_df.iloc[:,0].tolist()\n\t#dataset['board'] = data_df.iloc[:,1].tolist()\n\tdataset['mz_exp'] = np.transpose(np.array(data_df.iloc[:,1:]))\n\tdataset['feature'] = data_df.columns.values.tolist()[1:]\n\treturn dataset\n\ndef plot_clas_loss(loss_classifier_list, save_path):\n fig, ax = plt.subplots(figsize=(8, 4))\n ax.plot(range(len(loss_classifier_list)), loss_classifier_list, \"b--\",linewidth=1)\n ax.legend(['loss_classification'], loc=\"upper right\")\n ax.set_xlabel(\"Epochs\")\n ax.set_ylabel(\"Loss\")\n fig.savefig(save_path, dpi=300)\n plt.close(fig)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data_folder', type=str, default='data/')\nparser.add_argument('--train_file', type=str, default='3.csv')\nconfig = parser.parse_args()\n\ntrain_file = config.data_folder + config.train_file\n#dataset = read_csv_faster('./data/1.csv')\ndataset = read_csv_faster(train_file)\nFinalData = dataset['mz_exp'].transpose()\nAllLabel = dataset['labels']\n\nnum_inputs = FinalData.shape[1]\ndiscriminator = Discriminator(num_inputs=num_inputs)\n\ndef matric(cluster, labels):\n TP, TN, FP, FN = 0, 0, 0, 0\n n = len(labels)\n for i in range(n):\n if cluster[i]:\n if labels[i]:\n TP += 1\n else:\n FP += 1\n elif labels[i]:\n FN += 1\n else:\n TN += 1\n return TP, TN, FP, FN\n\nscoreA=[]\ncriterion = torch.nn.CrossEntropyLoss()\nmodels=[]\nzzz=np.arange(len(AllLabel)*2).reshape((len(AllLabel), 2))\nX=FinalData\ny=np.array(AllLabel)\nskf = StratifiedKFold(n_splits=5,shuffle=True, random_state=random.randint(0,99))\nfor train_index,test_index in skf.split(zzz,y):\n\tX_train, X_test = X[train_index], X[test_index]\n\ty_train, y_test = y[train_index], y[test_index]\n\ttorch_dataset = torch.utils.data.TensorDataset(torch.FloatTensor(X_train), torch.LongTensor(y_train))\n\tdata_loader = torch.utils.data.DataLoader(torch_dataset, batch_size=batch_size, shuffle=True, drop_last=True)\n\tloss_classifier_list = []\n\tfor epoch in range(1, num_epochs + 1):\n\t\tlearning_rate = base_lr * math.pow(0.9, epoch / lr_step)\n\t\tgamma_rate = 2 / (1 + math.exp(-10 * (epoch) / num_epochs)) - 1\n\t\toptimizer = torch.optim.Adam([{'params': discriminator.parameters()},], lr=learning_rate, weight_decay=l2_decay)\n\n\t\tdiscriminator.train()\n\t\titer_data = iter(data_loader)\n\t\tnum_iter = len(data_loader)\n\t\t#print(num_iter)\n\t\ttotal_clas_loss = 0\n\t\tnum_batches = 0\n\t\tfor it in range(0, num_iter):\n\t\t\tdata, label = iter_data.next()\n\t\t\tif it % len(data_loader) == 0:\n\t\t\t\titer_data = iter(data_loader)\n\t\t\tdata = Variable(torch.FloatTensor(data))\n\t\t\tlabel = Variable(torch.LongTensor(label))\n\t\t\tDisc_a = discriminator(data)\n\n\t\t\toptimizer.zero_grad()\n\t\t\tloss_classification = torch.FloatTensor([0])\n\t\t\tfor cls in range(len(label)):\n\t\t\t\tloss_classification += F.binary_cross_entropy(torch.squeeze(Disc_a)[cls], label[cls].float())\n\t\t\t#loss_classification = criterion(Disc_a, label)\n\t\t\tloss = loss_classification\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\n\t\t\tnum_batches += 1\n\t\t\ttotal_clas_loss += loss_classification.data.item()\n\t\tavg_clas_loss = total_clas_loss / num_batches\n\t\tloss_classifier_list.append(avg_clas_loss)\n\tplot_clas_loss(loss_classifier_list, 'clas_loss.png')\n\tdiscriminator.eval()\n\tmodels.append(discriminator.state_dict())\n\n\tDisc_b = discriminator(torch.from_numpy(X_test).float())\n\tpred_b = torch.from_numpy(np.array([1 if i > 0.5 else 0 for i in Disc_b]))\n\t#pred_b = torch.max(F.softmax(Disc_b), 1)[1]\n\ttest_label = torch.from_numpy(y_test)\n\tnum_correct_b = 0\n\tnum_correct_b += torch.eq(pred_b, test_label).sum().float().item()\n\tAcc_b = num_correct_b/len(test_label)\n\tscoreA.append(Acc_b)\n\nprint(np.mean(scoreA))\n\n" ]
[ [ "torch.nn.CrossEntropyLoss", "pandas.read_csv", "torch.LongTensor", "torch.eq", "torch.utils.data.DataLoader", "matplotlib.pyplot.subplots", "torch.from_numpy", "numpy.mean", "torch.FloatTensor", "matplotlib.pyplot.close", "numpy.array", "torch.squeeze" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Vedantsahai18/k-yantra
[ "e515bcec21455d1882f64a21906795766b5d8b16" ]
[ "app.py" ]
[ "import os\nimport sys\nimport json\n\n# Flask\nfrom flask import Flask, redirect, url_for, request, render_template, Response, jsonify, redirect\nfrom werkzeug.utils import secure_filename\nfrom gevent.pywsgi import WSGIServer\n\n# TensorFlow and tf.keras\nimport tensorflow as tf\nfrom tensorflow import keras\nimport tensorflow_hub as hub\n\nfrom tensorflow.keras.applications.imagenet_utils import preprocess_input, decode_predictions\nfrom tensorflow.keras.models import load_model, model_from_json\nfrom tensorflow.keras.preprocessing import image\n\n# Some utilites\nimport numpy as np\nfrom datetime import date\nimport owncloud\nimport requests\nimport urllib\nimport cv2\n\n# Declare a flask app\napp = Flask(__name__)\n\nwith open('credentials.txt','r') as f:\n f = f.read()\n username, password = f.split(' ')\n\ndef setup():\n today = date.today()\n oc = owncloud.Client('http://localhost/owncloud')\n oc.login(username, password)\n file = f'{today.day}-{today.month}/image.jpg'\n link_info = oc.share_file_with_link(f'{file}')\n url = link_info.get_link() + \"/download\"\n link = link_info.get_link()[-15:]\n link = f'http://localhost/owncloud/index.php/apps/files_sharing/ajax/publicpreview.php?x=1400&y=688&a=true&file=image.jpg&t={link}&scalingup=0'\n return link\n\n\nMODEL_PATH = 'models/tomato_model.h5'\nMODEL_JSON = 'models/tomato_model.json'\nmodel = load_model(MODEL_PATH, custom_objects={'KerasLayer':hub.KerasLayer})\nmodel.summary()\nclasses = ['Tomato___Bacterial_spot' , 'Tomato___Septoria_leaf_spot',\n 'Tomato___Early_blight', 'Tomato___Spider_mites_Two-spotted_spider_mite',\n 'Tomato___healthy', 'Tomato___Target_Spot',\n 'Tomato___Late_blight', 'Tomato___Tomato_mosaic_virus',\n 'Tomato___Leaf_Mold', 'Tomato___Tomato_Yellow_Leaf_Curl_Virus']\n\ndef model_predict(img, model):\n img = cv2.resize(img,(224,224))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x, mode='tf')\n preds = model.predict(x)\n \n return preds\n\[email protected]('/', methods=['GET'])\ndef index():\n return render_template('index.html')\n\[email protected]('/generic', methods=['GET'])\ndef predict():\n today = date.today()\n link = setup()\n if not os.path.exists(f'{today.day}-{today.month}'):\n os.mkdir(f'{today.day}-{today.month}')\n\n # image = requests.get(link)\n # with iopen(f'{today.day}-{today.month}/image.jpg', \"wb\") as file:\n # file.write(image.content)\n\n # img_path = os.path.join(f'{today.day}-{today.month}', 'image.jpg')\n #with open(f'{today.day}-{today.month}/image.jpg', \"rb\") as file:\n\n image = urllib.request.urlopen(link)\n image = np.asarray(bytearray(image.read()), dtype=\"uint8\")\n image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n print(image.shape)\n preds = model_predict(image, model) \n pred_class = classes[np.argmax(preds)] \n result = pred_class.replace('_', ' ').capitalize()\n return render_template('generic.html',user_image= link,response=json.dumps(result))\n\n\nif __name__ == '__main__':\n http_server = WSGIServer(('0.0.0.0', 5000), app)\n http_server.serve_forever()\n index()\n predict()\n" ]
[ [ "tensorflow.keras.models.load_model", "numpy.expand_dims", "numpy.argmax", "tensorflow.keras.applications.imagenet_utils.preprocess_input" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
saltedpotato/custom-data-with-mcrnn
[ "59dc46447b207f6f6dd3c228874cbc8c6e2911d9" ]
[ "mrcnn/model.py" ]
[ "\"\"\"\nMask R-CNN\nThe main Mask R-CNN model implementation.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport random\nimport datetime\nimport re\nimport math\nimport logging\nfrom collections import OrderedDict\nimport multiprocessing\nimport numpy as np\nimport tensorflow as tf\nimport keras\nimport keras.backend as K\nimport keras.layers as KL\nimport keras.engine as KE\nimport keras.models as KM\n\nfrom mrcnn import utils\n\n# Requires TensorFlow 1.3+ and Keras 2.0.8+.\nfrom distutils.version import LooseVersion\nassert LooseVersion(tf.__version__) >= LooseVersion(\"1.3\")\nassert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')\n\n\n############################################################\n# Utility Functions\n############################################################\n\ndef log(text, array=None):\n \"\"\"Prints a text message. And, optionally, if a Numpy array is provided it\n prints it's shape, min, and max values.\n \"\"\"\n if array is not None:\n text = text.ljust(25)\n text += (\"shape: {:20} \".format(str(array.shape)))\n if array.size:\n text += (\"min: {:10.5f} max: {:10.5f}\".format(array.min(), array.max()))\n else:\n text += (\"min: {:10} max: {:10}\".format(\"\", \"\"))\n text += \" {}\".format(array.dtype)\n print(text)\n\n\nclass BatchNorm(KL.BatchNormalization):\n \"\"\"Extends the Keras BatchNormalization class to allow a central place\n to make changes if needed.\n\n Batch normalization has a negative effect on training if batches are small\n so this layer is often frozen (via setting in Config class) and functions\n as linear layer.\n \"\"\"\n\n def call(self, inputs, training=None):\n \"\"\"\n Note about training values:\n None: Train BN layers. This is the normal mode\n False: Freeze BN layers. Good when batch size is small\n True: (don't use). Set layer in training mode even when making inferences\n \"\"\"\n return super(self.__class__, self).call(inputs, training=training)\n\n\ndef compute_backbone_shapes(config, image_shape):\n \"\"\"Computes the width and height of each stage of the backbone network.\n\n Returns:\n [N, (height, width)]. Where N is the number of stages\n \"\"\"\n if callable(config.BACKBONE):\n return config.COMPUTE_BACKBONE_SHAPE(image_shape)\n\n # Currently supports ResNet only\n assert config.BACKBONE in [\"resnet50\", \"resnet101\"]\n return np.array(\n [[int(math.ceil(image_shape[0] / stride)),\n int(math.ceil(image_shape[1] / stride))]\n for stride in config.BACKBONE_STRIDES])\n\n\n############################################################\n# Resnet Graph\n############################################################\n\n# Code adopted from:\n# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block,\n use_bias=True, train_bn=True):\n \"\"\"The identity_block is the block that has no conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',\n use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',\n use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n x = KL.Add()([x, input_tensor])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef conv_block(input_tensor, kernel_size, filters, stage, block,\n strides=(2, 2), use_bias=True, train_bn=True):\n \"\"\"conv_block is the block that has a conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n Note that from stage 3, the first conv layer at main path is with subsample=(2,2)\n And the shortcut should have subsample=(2,2) as well\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,\n name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +\n '2c', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,\n name=conv_name_base + '1', use_bias=use_bias)(input_tensor)\n shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)\n\n x = KL.Add()([x, shortcut])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef resnet_graph(input_image, architecture, stage5=False, train_bn=True):\n \"\"\"Build a ResNet graph.\n architecture: Can be resnet50 or resnet101\n stage5: Boolean. If False, stage5 of the network is not created\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n assert architecture in [\"resnet50\", \"resnet101\"]\n # Stage 1\n x = KL.ZeroPadding2D((3, 3))(input_image)\n x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)\n x = BatchNorm(name='bn_conv1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)\n # Stage 2\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a',\n strides=(1, 1), train_bn=train_bn)\n x = identity_block(x, 3, [64, 64, 256], stage=2,\n block='b', train_bn=train_bn)\n C2 = x = identity_block(x, 3, [64, 64, 256],\n stage=2, block='c', train_bn=train_bn)\n # Stage 3\n x = conv_block(x, 3, [128, 128, 512], stage=3,\n block='a', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3,\n block='b', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3,\n block='c', train_bn=train_bn)\n C3 = x = identity_block(\n x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)\n # Stage 4\n x = conv_block(x, 3, [256, 256, 1024], stage=4,\n block='a', train_bn=train_bn)\n block_count = {\"resnet50\": 5, \"resnet101\": 22}[architecture]\n for i in range(block_count):\n x = identity_block(x, 3, [256, 256, 1024],\n stage=4, block=chr(98 + i), train_bn=train_bn)\n C4 = x\n # Stage 5\n if stage5:\n x = conv_block(x, 3, [512, 512, 2048], stage=5,\n block='a', train_bn=train_bn)\n x = identity_block(x, 3, [512, 512, 2048],\n stage=5, block='b', train_bn=train_bn)\n C5 = x = identity_block(\n x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)\n else:\n C5 = None\n return [C1, C2, C3, C4, C5]\n\n\n############################################################\n# Proposal Layer\n############################################################\n\ndef apply_box_deltas_graph(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, (y1, x1, y2, x2)] boxes to update\n deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply\n \"\"\"\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= tf.exp(deltas[:, 2])\n width *= tf.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n result = tf.stack([y1, x1, y2, x2], axis=1, name=\"apply_box_deltas_out\")\n return result\n\n\ndef clip_boxes_graph(boxes, window):\n \"\"\"\n boxes: [N, (y1, x1, y2, x2)]\n window: [4] in the form y1, x1, y2, x2\n \"\"\"\n # Split\n wy1, wx1, wy2, wx2 = tf.split(window, 4)\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)\n # Clip\n y1 = tf.maximum(tf.minimum(y1, wy2), wy1)\n x1 = tf.maximum(tf.minimum(x1, wx2), wx1)\n y2 = tf.maximum(tf.minimum(y2, wy2), wy1)\n x2 = tf.maximum(tf.minimum(x2, wx2), wx1)\n clipped = tf.concat([y1, x1, y2, x2], axis=1, name=\"clipped_boxes\")\n clipped.set_shape((clipped.shape[0], 4))\n return clipped\n\n\nclass ProposalLayer(KE.Layer):\n \"\"\"Receives anchor scores and selects a subset to pass as proposals\n to the second stage. Filtering is done based on anchor scores and\n non-max suppression to remove overlaps. It also applies bounding\n box refinement deltas to anchors.\n\n Inputs:\n rpn_probs: [batch, num_anchors, (bg prob, fg prob)]\n rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]\n anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates\n\n Returns:\n Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]\n \"\"\"\n\n def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):\n super(ProposalLayer, self).__init__(**kwargs)\n self.config = config\n self.proposal_count = proposal_count\n self.nms_threshold = nms_threshold\n\n def call(self, inputs):\n # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]\n scores = inputs[0][:, :, 1]\n # Box deltas [batch, num_rois, 4]\n deltas = inputs[1]\n deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])\n # Anchors\n anchors = inputs[2]\n\n # Improve performance by trimming to top anchors by score\n # and doing the rest on the smaller subset.\n pre_nms_limit = tf.minimum(\n self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])\n ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,\n name=\"top_anchors\").indices\n scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),\n self.config.IMAGES_PER_GPU,\n names=[\"pre_nms_anchors\"])\n\n # Apply deltas to anchors to get refined anchors.\n # [batch, N, (y1, x1, y2, x2)]\n boxes = utils.batch_slice([pre_nms_anchors, deltas],\n lambda x, y: apply_box_deltas_graph(x, y),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors\"])\n\n # Clip to image boundaries. Since we're in normalized coordinates,\n # clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]\n window = np.array([0, 0, 1, 1], dtype=np.float32)\n boxes = utils.batch_slice(boxes,\n lambda x: clip_boxes_graph(x, window),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors_clipped\"])\n\n # Filter out small boxes\n # According to Xinlei Chen's paper, this reduces detection accuracy\n # for small objects, so we're skipping it.\n\n # Non-max suppression\n def nms(boxes, scores):\n indices = tf.image.non_max_suppression(\n boxes, scores, self.proposal_count,\n self.nms_threshold, name=\"rpn_non_max_suppression\")\n proposals = tf.gather(boxes, indices)\n # Pad if needed\n padding = tf.maximum(self.proposal_count -\n tf.shape(proposals)[0], 0)\n proposals = tf.pad(proposals, [(0, padding), (0, 0)])\n return proposals\n proposals = utils.batch_slice([boxes, scores], nms,\n self.config.IMAGES_PER_GPU)\n return proposals\n\n def compute_output_shape(self, input_shape):\n return (None, self.proposal_count, 4)\n\n\n############################################################\n# ROIAlign Layer\n############################################################\n\ndef log2_graph(x):\n \"\"\"Implementation of Log2. TF doesn't have a native implementation.\"\"\"\n return tf.log(x) / tf.log(2.0)\n\n\nclass PyramidROIAlign(KE.Layer):\n \"\"\"Implements ROI Pooling on multiple levels of the feature pyramid.\n\n Params:\n - pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]\n\n Inputs:\n - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized\n coordinates. Possibly padded with zeros if not enough\n boxes to fill the array.\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - feature_maps: List of feature maps from different levels of the pyramid.\n Each is [batch, height, width, channels]\n\n Output:\n Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].\n The width and height are those specific in the pool_shape in the layer\n constructor.\n \"\"\"\n\n def __init__(self, pool_shape, **kwargs):\n super(PyramidROIAlign, self).__init__(**kwargs)\n self.pool_shape = tuple(pool_shape)\n\n def call(self, inputs):\n # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords\n boxes = inputs[0]\n\n # Image meta\n # Holds details about the image. See compose_image_meta()\n image_meta = inputs[1]\n\n # Feature Maps. List of feature maps from different level of the\n # feature pyramid. Each is [batch, height, width, channels]\n feature_maps = inputs[2:]\n\n # Assign each ROI to a level in the pyramid based on the ROI area.\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)\n h = y2 - y1\n w = x2 - x1\n # Use shape of first image. Images in a batch must have the same size.\n image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]\n # Equation 1 in the Feature Pyramid Networks paper. Account for\n # the fact that our coordinates are normalized here.\n # e.g. a 224x224 ROI (in pixels) maps to P4\n image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)\n roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))\n roi_level = tf.minimum(5, tf.maximum(\n 2, 4 + tf.cast(tf.round(roi_level), tf.int32)))\n roi_level = tf.squeeze(roi_level, 2)\n\n # Loop through levels and apply ROI pooling to each. P2 to P5.\n pooled = []\n box_to_level = []\n for i, level in enumerate(range(2, 6)):\n ix = tf.where(tf.equal(roi_level, level))\n level_boxes = tf.gather_nd(boxes, ix)\n\n # Box indices for crop_and_resize.\n box_indices = tf.cast(ix[:, 0], tf.int32)\n\n # Keep track of which box is mapped to which level\n box_to_level.append(ix)\n\n # Stop gradient propogation to ROI proposals\n level_boxes = tf.stop_gradient(level_boxes)\n box_indices = tf.stop_gradient(box_indices)\n\n # Crop and Resize\n # From Mask R-CNN paper: \"We sample four regular locations, so\n # that we can evaluate either max or average pooling. In fact,\n # interpolating only a single value at each bin center (without\n # pooling) is nearly as effective.\"\n #\n # Here we use the simplified approach of a single value per bin,\n # which is how it's done in tf.crop_and_resize()\n # Result: [batch * num_boxes, pool_height, pool_width, channels]\n pooled.append(tf.image.crop_and_resize(\n feature_maps[i], level_boxes, box_indices, self.pool_shape,\n method=\"bilinear\"))\n\n # Pack pooled features into one tensor\n pooled = tf.concat(pooled, axis=0)\n\n # Pack box_to_level mapping into one array and add another\n # column representing the order of pooled boxes\n box_to_level = tf.concat(box_to_level, axis=0)\n box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)\n box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],\n axis=1)\n\n # Rearrange pooled features to match the order of the original boxes\n # Sort box_to_level by batch then box index\n # TF doesn't have a way to sort by two columns, so merge them and sort.\n sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]\n ix = tf.nn.top_k(sorting_tensor, k=tf.shape(\n box_to_level)[0]).indices[::-1]\n ix = tf.gather(box_to_level[:, 2], ix)\n pooled = tf.gather(pooled, ix)\n\n # Re-add the batch dimension\n shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0)\n pooled = tf.reshape(pooled, shape)\n return pooled\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )\n\n\n############################################################\n# Detection Target Layer\n############################################################\n\ndef overlaps_graph(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n \"\"\"\n # 1. Tile boxes2 and repeat boxes1. This allows us to compare\n # every boxes1 against every boxes2 without loops.\n # TF doesn't have an equivalent to np.repeat() so simulate it\n # using tf.tile() and tf.reshape.\n b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),\n [1, 1, tf.shape(boxes2)[0]]), [-1, 4])\n b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])\n # 2. Compute intersections\n b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)\n b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)\n y1 = tf.maximum(b1_y1, b2_y1)\n x1 = tf.maximum(b1_x1, b2_x1)\n y2 = tf.minimum(b1_y2, b2_y2)\n x2 = tf.minimum(b1_x2, b2_x2)\n intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)\n # 3. Compute unions\n b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)\n b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)\n union = b1_area + b2_area - intersection\n # 4. Compute IoU and reshape to [boxes1, boxes2]\n iou = intersection / union\n overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])\n return overlaps\n\n\ndef detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generates detection targets for one image. Subsamples proposals and\n generates target class IDs, bounding box deltas, and masks for each.\n\n Inputs:\n proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [MAX_GT_INSTANCES] int class IDs\n gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.\n gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.\n deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]\n masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox\n boundaries and resized to neural network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n # Assertions\n asserts = [\n tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],\n name=\"roi_assertion\"),\n ]\n with tf.control_dependencies(asserts):\n proposals = tf.identity(proposals)\n\n # Remove zero padding\n proposals, _ = trim_zeros_graph(proposals, name=\"trim_proposals\")\n gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name=\"trim_gt_boxes\")\n gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,\n name=\"trim_gt_class_ids\")\n gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,\n name=\"trim_gt_masks\")\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = tf.where(gt_class_ids < 0)[:, 0]\n non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]\n crowd_boxes = tf.gather(gt_boxes, crowd_ix)\n gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)\n gt_boxes = tf.gather(gt_boxes, non_crowd_ix)\n gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)\n\n # Compute overlaps matrix [proposals, gt_boxes]\n overlaps = overlaps_graph(proposals, gt_boxes)\n\n # Compute overlaps with crowd boxes [proposals, crowd_boxes]\n crowd_overlaps = overlaps_graph(proposals, crowd_boxes)\n crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n\n # Determine positive and negative ROIs\n roi_iou_max = tf.reduce_max(overlaps, axis=1)\n # 1. Positive ROIs are those with >= 0.5 IoU with a GT box\n positive_roi_bool = (roi_iou_max >= 0.5)\n positive_indices = tf.where(positive_roi_bool)[:, 0]\n # 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.\n negative_indices = tf.where(tf.logical_and(\n roi_iou_max < 0.5, no_crowd_bool))[:, 0]\n\n # Subsample ROIs. Aim for 33% positive\n # Positive ROIs\n positive_count = int(config.TRAIN_ROIS_PER_IMAGE *\n config.ROI_POSITIVE_RATIO)\n positive_indices = tf.random_shuffle(positive_indices)[:positive_count]\n positive_count = tf.shape(positive_indices)[0]\n # Negative ROIs. Add enough to maintain positive:negative ratio.\n r = 1.0 / config.ROI_POSITIVE_RATIO\n negative_count = tf.cast(\n r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count\n negative_indices = tf.random_shuffle(negative_indices)[:negative_count]\n # Gather selected ROIs\n positive_rois = tf.gather(proposals, positive_indices)\n negative_rois = tf.gather(proposals, negative_indices)\n\n # Assign positive ROIs to GT boxes.\n positive_overlaps = tf.gather(overlaps, positive_indices)\n roi_gt_box_assignment = tf.cond(\n tf.greater(tf.shape(positive_overlaps)[1], 0),\n true_fn=lambda: tf.argmax(positive_overlaps, axis=1),\n false_fn=lambda: tf.cast(tf.constant([]), tf.int64)\n )\n roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)\n roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)\n\n # Compute bbox refinement for positive ROIs\n deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)\n deltas /= config.BBOX_STD_DEV\n\n # Assign positive ROIs to GT masks\n # Permute masks to [N, height, width, 1]\n transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)\n # Pick the right mask for each ROI\n roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)\n\n # Compute mask targets\n boxes = positive_rois\n if config.USE_MINI_MASK:\n # Transform ROI coordinates from normalized image space\n # to normalized mini-mask space.\n y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)\n gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)\n gt_h = gt_y2 - gt_y1\n gt_w = gt_x2 - gt_x1\n y1 = (y1 - gt_y1) / gt_h\n x1 = (x1 - gt_x1) / gt_w\n y2 = (y2 - gt_y1) / gt_h\n x2 = (x2 - gt_x1) / gt_w\n boxes = tf.concat([y1, x1, y2, x2], 1)\n box_ids = tf.range(0, tf.shape(roi_masks)[0])\n masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,\n box_ids,\n config.MASK_SHAPE)\n # Remove the extra dimension from masks.\n masks = tf.squeeze(masks, axis=3)\n\n # Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with\n # binary cross entropy loss.\n masks = tf.round(masks)\n\n # Append negative ROIs and pad bbox deltas and masks that\n # are not used for negative ROIs with zeros.\n rois = tf.concat([positive_rois, negative_rois], axis=0)\n N = tf.shape(negative_rois)[0]\n P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)\n rois = tf.pad(rois, [(0, P), (0, 0)])\n roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])\n roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])\n deltas = tf.pad(deltas, [(0, N + P), (0, 0)])\n masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])\n\n return rois, roi_gt_class_ids, deltas, masks\n\n\nclass DetectionTargetLayer(KE.Layer):\n \"\"\"Subsamples proposals and generates target box refinement, class_ids,\n and masks for each.\n\n Inputs:\n proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.\n gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized\n coordinates.\n gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized\n coordinates\n target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]\n target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]\n Masks cropped to bbox boundaries and resized to neural\n network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n\n def __init__(self, config, **kwargs):\n super(DetectionTargetLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n proposals = inputs[0]\n gt_class_ids = inputs[1]\n gt_boxes = inputs[2]\n gt_masks = inputs[3]\n\n # Slice the batch and run a graph for each slice\n # TODO: Rename target_bbox to target_deltas for clarity\n names = [\"rois\", \"target_class_ids\", \"target_bbox\", \"target_mask\"]\n outputs = utils.batch_slice(\n [proposals, gt_class_ids, gt_boxes, gt_masks],\n lambda w, x, y, z: detection_targets_graph(\n w, x, y, z, self.config),\n self.config.IMAGES_PER_GPU, names=names)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return [\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois\n (None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas\n (None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],\n self.config.MASK_SHAPE[1]) # masks\n ]\n\n def compute_mask(self, inputs, mask=None):\n return [None, None, None, None]\n\n\n############################################################\n# Detection Layer\n############################################################\n\ndef refine_detections_graph(rois, probs, deltas, window, config):\n \"\"\"Refine classified proposals and filter overlaps and return final\n detections.\n\n Inputs:\n rois: [N, (y1, x1, y2, x2)] in normalized coordinates\n probs: [N, num_classes]. Class probabilities.\n deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific\n bounding box deltas.\n window: (y1, x1, y2, x2) in normalized coordinates. The part of the image\n that contains the image excluding the padding.\n\n Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where\n coordinates are normalized.\n \"\"\"\n # Class IDs per ROI\n class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)\n # Class probability of the top class of each ROI\n indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)\n class_scores = tf.gather_nd(probs, indices)\n # Class-specific bounding box deltas\n deltas_specific = tf.gather_nd(deltas, indices)\n # Apply bounding box deltas\n # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates\n refined_rois = apply_box_deltas_graph(\n rois, deltas_specific * config.BBOX_STD_DEV)\n # Clip boxes to image window\n refined_rois = clip_boxes_graph(refined_rois, window)\n\n # TODO: Filter out boxes with zero area\n\n # Filter out background boxes\n keep = tf.where(class_ids > 0)[:, 0]\n # Filter out low confidence boxes\n if config.DETECTION_MIN_CONFIDENCE:\n conf_keep = tf.where(\n class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(conf_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n\n # Apply per-class NMS\n # 1. Prepare variables\n pre_nms_class_ids = tf.gather(class_ids, keep)\n pre_nms_scores = tf.gather(class_scores, keep)\n pre_nms_rois = tf.gather(refined_rois, keep)\n unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]\n\n def nms_keep_map(class_id):\n \"\"\"Apply Non-Maximum Suppression on ROIs of the given class.\"\"\"\n # Indices of ROIs of the given class\n ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]\n # Apply NMS\n class_keep = tf.image.non_max_suppression(\n tf.gather(pre_nms_rois, ixs),\n tf.gather(pre_nms_scores, ixs),\n max_output_size=config.DETECTION_MAX_INSTANCES,\n iou_threshold=config.DETECTION_NMS_THRESHOLD)\n # Map indices\n class_keep = tf.gather(keep, tf.gather(ixs, class_keep))\n # Pad with -1 so returned tensors have the same shape\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]\n class_keep = tf.pad(class_keep, [(0, gap)],\n mode='CONSTANT', constant_values=-1)\n # Set shape so map_fn() can infer result shape\n class_keep.set_shape([config.DETECTION_MAX_INSTANCES])\n return class_keep\n\n # 2. Map over class IDs\n nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,\n dtype=tf.int64)\n # 3. Merge results into one list, and remove -1 padding\n nms_keep = tf.reshape(nms_keep, [-1])\n nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])\n # 4. Compute intersection between keep and nms_keep\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(nms_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n # Keep top detections\n roi_count = config.DETECTION_MAX_INSTANCES\n class_scores_keep = tf.gather(class_scores, keep)\n num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)\n top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]\n keep = tf.gather(keep, top_ids)\n\n # Arrange output as [N, (y1, x1, y2, x2, class_id, score)]\n # Coordinates are normalized.\n detections = tf.concat([\n tf.gather(refined_rois, keep),\n tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],\n tf.gather(class_scores, keep)[..., tf.newaxis]\n ], axis=1)\n\n # Pad with zeros if detections < DETECTION_MAX_INSTANCES\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]\n detections = tf.pad(detections, [(0, gap), (0, 0)], \"CONSTANT\")\n return detections\n\n\nclass DetectionLayer(KE.Layer):\n \"\"\"Takes classified proposal boxes and their bounding box deltas and\n returns the final detection boxes.\n\n Returns:\n [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where\n coordinates are normalized.\n \"\"\"\n\n def __init__(self, config=None, **kwargs):\n super(DetectionLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n rois = inputs[0]\n mrcnn_class = inputs[1]\n mrcnn_bbox = inputs[2]\n image_meta = inputs[3]\n\n # Get windows of images in normalized coordinates. Windows are the area\n # in the image that excludes the padding.\n # Use the shape of the first image in the batch to normalize the window\n # because we know that all images get resized to the same size.\n m = parse_image_meta_graph(image_meta)\n image_shape = m['image_shape'][0]\n window = norm_boxes_graph(m['window'], image_shape[:2])\n\n # Run detection refinement graph on each item in the batch\n detections_batch = utils.batch_slice(\n [rois, mrcnn_class, mrcnn_bbox, window],\n lambda x, y, w, z: refine_detections_graph(\n x, y, w, z, self.config),\n self.config.IMAGES_PER_GPU)\n\n # Reshape output\n # [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in\n # normalized coordinates\n return tf.reshape(\n detections_batch,\n [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])\n\n def compute_output_shape(self, input_shape):\n return (None, self.config.DETECTION_MAX_INSTANCES, 6)\n\n\n############################################################\n# Region Proposal Network (RPN)\n############################################################\n\ndef rpn_graph(feature_map, anchors_per_location, anchor_stride):\n \"\"\"Builds the computation graph of Region Proposal Network.\n\n feature_map: backbone features [batch, height, width, depth]\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n\n Returns:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n # TODO: check if stride of 2 causes alignment issues if the feature map\n # is not even.\n # Shared convolutional base of the RPN\n shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',\n strides=anchor_stride,\n name='rpn_conv_shared')(feature_map)\n\n # Anchor Score. [batch, height, width, anchors per location * 2].\n x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',\n activation='linear', name='rpn_class_raw')(shared)\n\n # Reshape to [batch, anchors, 2]\n rpn_class_logits = KL.Lambda(\n lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)\n\n # Softmax on last dimension of BG/FG.\n rpn_probs = KL.Activation(\n \"softmax\", name=\"rpn_class_xxx\")(rpn_class_logits)\n\n # Bounding box refinement. [batch, H, W, anchors per location * depth]\n # where depth is [x, y, log(w), log(h)]\n x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding=\"valid\",\n activation='linear', name='rpn_bbox_pred')(shared)\n\n # Reshape to [batch, anchors, 4]\n rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)\n\n return [rpn_class_logits, rpn_probs, rpn_bbox]\n\n\ndef build_rpn_model(anchor_stride, anchors_per_location, depth):\n \"\"\"Builds a Keras model of the Region Proposal Network.\n It wraps the RPN graph so it can be used multiple times with shared\n weights.\n\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n depth: Depth of the backbone feature map.\n\n Returns a Keras Model object. The model outputs, when called, are:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n input_feature_map = KL.Input(shape=[None, None, depth],\n name=\"input_rpn_feature_map\")\n outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)\n return KM.Model([input_feature_map], outputs, name=\"rpn_model\")\n\n\n############################################################\n# Feature Pyramid Network Heads\n############################################################\n\ndef fpn_classifier_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True,\n fc_layers_size=1024):\n \"\"\"Builds the computation graph of the feature pyramid network classifier\n and regressor heads.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n fc_layers_size: Size of the 2 FC layers\n\n Returns:\n logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)\n probs: [batch, num_rois, NUM_CLASSES] classifier probabilities\n bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to\n proposal boxes\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_classifier\")([rois, image_meta] + feature_maps)\n # Two 1024 FC layers (implemented with Conv2D for consistency)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding=\"valid\"),\n name=\"mrcnn_class_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(\n x, training=train_bn)\n x = KL.Activation('relu')(x)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),\n name=\"mrcnn_class_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(\n x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),\n name=\"pool_squeeze\")(x)\n\n # Classifier head\n mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),\n name='mrcnn_class_logits')(shared)\n mrcnn_probs = KL.TimeDistributed(KL.Activation(\"softmax\"),\n name=\"mrcnn_class\")(mrcnn_class_logits)\n\n # BBox head\n # [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]\n x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),\n name='mrcnn_bbox_fc')(shared)\n # Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]\n s = K.int_shape(x)\n mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name=\"mrcnn_bbox\")(x)\n\n return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox\n\n\ndef build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n \"\"\"Builds the computation graph of the mask head of Feature Pyramid Network.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n\n Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n return x\n\n\n############################################################\n# Loss Functions\n############################################################\n\ndef smooth_l1_loss(y_true, y_pred):\n \"\"\"Implements Smooth-L1 loss.\n y_true and y_pred are typically: [N, 4], but could be any shape.\n \"\"\"\n diff = K.abs(y_true - y_pred)\n less_than_one = K.cast(K.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n return loss\n\n\ndef rpn_class_loss_graph(rpn_match, rpn_class_logits):\n \"\"\"RPN anchor classifier loss.\n\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.\n \"\"\"\n # Squeeze last dim to simplify\n rpn_match = tf.squeeze(rpn_match, -1)\n # Get anchor classes. Convert the -1/+1 match to 0/1 values.\n anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)\n # Positive and Negative anchors contribute to the loss,\n # but neutral anchors (match value = 0) don't.\n indices = tf.where(K.not_equal(rpn_match, 0))\n # Pick rows that contribute to the loss and filter out the rest.\n rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)\n anchor_class = tf.gather_nd(anchor_class, indices)\n # Cross entropy loss\n loss = K.sparse_categorical_crossentropy(target=anchor_class,\n output=rpn_class_logits,\n from_logits=True)\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):\n \"\"\"Return the RPN bounding box loss graph.\n\n config: the model config object.\n target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].\n Uses 0 padding to fill in unsed bbox deltas.\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Positive anchors contribute to the loss, but negative and\n # neutral anchors (match value of 0 or -1) don't.\n rpn_match = K.squeeze(rpn_match, -1)\n indices = tf.where(K.equal(rpn_match, 1))\n\n # Pick bbox deltas that contribute to the loss\n rpn_bbox = tf.gather_nd(rpn_bbox, indices)\n\n # Trim target bounding box deltas to the same length as rpn_bbox.\n batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)\n target_bbox = batch_pack_graph(target_bbox, batch_counts,\n config.IMAGES_PER_GPU)\n\n loss = smooth_l1_loss(target_bbox, rpn_bbox)\n\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef mrcnn_class_loss_graph(target_class_ids, pred_class_logits,\n active_class_ids):\n \"\"\"Loss for the classifier head of Mask RCNN.\n\n target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero\n padding to fill in the array.\n pred_class_logits: [batch, num_rois, num_classes]\n active_class_ids: [batch, num_classes]. Has a value of 1 for\n classes that are in the dataset of the image, and 0\n for classes that are not in the dataset.\n \"\"\"\n # During model building, Keras calls this function with\n # target_class_ids of type float32. Unclear why. Cast it\n # to int to get around it.\n target_class_ids = tf.cast(target_class_ids, 'int64')\n\n # Find predictions of classes that are not in the dataset.\n pred_class_ids = tf.argmax(pred_class_logits, axis=2)\n # TODO: Update this line to work with batch > 1. Right now it assumes all\n # images in a batch have the same active_class_ids\n pred_active = tf.gather(active_class_ids[0], pred_class_ids)\n\n # Loss\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=target_class_ids, logits=pred_class_logits)\n\n # Erase losses of predictions of classes that are not in the active\n # classes of the image.\n loss = loss * pred_active\n\n # Computer loss mean. Use only predictions that contribute\n # to the loss to get a correct mean.\n loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)\n return loss\n\n\ndef mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):\n \"\"\"Loss for Mask R-CNN bounding box refinement.\n\n target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]\n target_class_ids: [batch, num_rois]. Integer class IDs.\n pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Reshape to merge batch and roi dimensions for simplicity.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n target_bbox = K.reshape(target_bbox, (-1, 4))\n pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))\n\n # Only positive ROIs contribute to the loss. And only\n # the right class_id of each ROI. Get their indices.\n positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_roi_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_roi_ix), tf.int64)\n indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)\n\n # Gather the deltas (predicted and true) that contribute to loss\n target_bbox = tf.gather(target_bbox, positive_roi_ix)\n pred_bbox = tf.gather_nd(pred_bbox, indices)\n\n # Smooth-L1 Loss\n loss = K.switch(tf.size(target_bbox) > 0,\n smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\ndef mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):\n \"\"\"Mask binary cross-entropy loss for the masks head.\n\n target_masks: [batch, num_rois, height, width].\n A float32 tensor of values 0 or 1. Uses zero padding to fill array.\n target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.\n pred_masks: [batch, proposals, height, width, num_classes] float32 tensor\n with values from 0 to 1.\n \"\"\"\n # Reshape for simplicity. Merge first two dimensions into one.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n mask_shape = tf.shape(target_masks)\n target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))\n pred_shape = tf.shape(pred_masks)\n pred_masks = K.reshape(pred_masks,\n (-1, pred_shape[2], pred_shape[3], pred_shape[4]))\n # Permute predicted masks to [N, num_classes, height, width]\n pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])\n\n # Only positive ROIs contribute to the loss. And only\n # the class specific mask of each ROI.\n positive_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_ix), tf.int64)\n indices = tf.stack([positive_ix, positive_class_ids], axis=1)\n\n # Gather the masks (predicted and true) that contribute to loss\n y_true = tf.gather(target_masks, positive_ix)\n y_pred = tf.gather_nd(pred_masks, indices)\n\n # Compute binary cross entropy. If no positive ROIs, then return 0.\n # shape: [batch, roi, num_classes]\n loss = K.switch(tf.size(y_true) > 0,\n K.binary_crossentropy(target=y_true, output=y_pred),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\n############################################################\n# Data Generator\n############################################################\n\ndef load_image_gt(dataset, config, image_id, augment=False, augmentation=None,\n use_mini_mask=False):\n \"\"\"Load and return ground truth data for an image (image, mask, bounding boxes).\n\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n use_mini_mask: If False, returns full-size masks that are the same height\n and width as the original image. These can be big, for example\n 1024x1024x100 (for 100 instances). Mini masks are smaller, typically,\n 224x224 and are generated by extracting the bounding box of the\n object and resizing it to MINI_MASK_SHAPE.\n\n Returns:\n image: [height, width, 3]\n shape: the original shape of the image before resizing and cropping.\n class_ids: [instance_count] Integer class IDs\n bbox: [instance_count, (y1, x1, y2, x2)]\n mask: [height, width, instance_count]. The height and width are those\n of the image unless use_mini_mask is True, in which case they are\n defined in MINI_MASK_SHAPE.\n \"\"\"\n # Load image and mask\n image = dataset.load_image(image_id)\n mask, class_ids = dataset.load_mask(image_id)\n original_shape = image.shape\n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n mask = utils.resize_mask(mask, scale, padding, crop)\n\n # Random horizontal flips.\n # TODO: will be removed in a future update in favor of augmentation\n if augment:\n logging.warning(\"'augment' is deprecated. Use 'augmentation' instead.\")\n if random.randint(0, 1):\n image = np.fliplr(image)\n mask = np.fliplr(mask)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation:\n import imgaug\n\n # Augmenters that are safe to apply to masks\n # Some, such as Affine, have settings that make them unsafe, so always\n # test your augmentation on masks\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\n \"Affine\", \"PiecewiseAffine\"]\n\n def hook(images, augmenter, parents, default):\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\n return augmenter.__class__.__name__ in MASK_AUGMENTERS\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n mask_shape = mask.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n # Change mask to np.uint8 because imgaug doesn't support np.bool\n mask = det.augment_image(mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\n # Change mask back to bool\n mask = mask.astype(np.bool)\n\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\n # and here is to filter them out\n _idx = np.sum(mask, axis=(0, 1)) > 0\n mask = mask[:, :, _idx]\n class_ids = class_ids[_idx]\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox = utils.extract_bboxes(mask)\n\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n\n # Resize masks to smaller size to reduce memory usage\n if use_mini_mask:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\n window, scale, active_class_ids)\n\n return image, image_meta, class_ids, bbox, mask\n\n\ndef build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generate targets for training Stage 2 classifier and mask heads.\n This is not used in normal training. It's useful for debugging or to train\n the Mask RCNN heads without using the RPN head.\n\n Inputs:\n rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.\n gt_class_ids: [instance count] Integer class IDs\n gt_boxes: [instance count, (y1, x1, y2, x2)]\n gt_masks: [height, width, instance count] Ground truth masks. Can be full\n size or mini-masks.\n\n Returns:\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific\n bbox refinements.\n masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped\n to bbox boundaries and resized to neural network output size.\n \"\"\"\n assert rpn_rois.shape[0] > 0\n assert gt_class_ids.dtype == np.int32, \"Expected int but got {}\".format(\n gt_class_ids.dtype)\n assert gt_boxes.dtype == np.int32, \"Expected int but got {}\".format(\n gt_boxes.dtype)\n assert gt_masks.dtype == np.bool_, \"Expected bool but got {}\".format(\n gt_masks.dtype)\n\n # It's common to add GT Boxes to ROIs but we don't do that here because\n # according to XinLei Chen's paper, it doesn't help.\n\n # Trim empty padding in gt_boxes and gt_masks parts\n instance_ids = np.where(gt_class_ids > 0)[0]\n assert instance_ids.shape[0] > 0, \"Image must contain instances.\"\n gt_class_ids = gt_class_ids[instance_ids]\n gt_boxes = gt_boxes[instance_ids]\n gt_masks = gt_masks[:, :, instance_ids]\n\n # Compute areas of ROIs and ground truth boxes.\n rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \\\n (rpn_rois[:, 3] - rpn_rois[:, 1])\n gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \\\n (gt_boxes[:, 3] - gt_boxes[:, 1])\n\n # Compute overlaps [rpn_rois, gt_boxes]\n overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))\n for i in range(overlaps.shape[1]):\n gt = gt_boxes[i]\n overlaps[:, i] = utils.compute_iou(\n gt, rpn_rois, gt_box_area[i], rpn_roi_area)\n\n # Assign ROIs to GT boxes\n rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)\n rpn_roi_iou_max = overlaps[np.arange(\n overlaps.shape[0]), rpn_roi_iou_argmax]\n # GT box assigned to each ROI\n rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]\n rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]\n\n # Positive ROIs are those with >= 0.5 IoU with a GT box.\n fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]\n\n # Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)\n # TODO: To hard example mine or not to hard example mine, that's the question\n # bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n\n # Subsample ROIs. Aim for 33% foreground.\n # FG\n fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)\n if fg_ids.shape[0] > fg_roi_count:\n keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)\n else:\n keep_fg_ids = fg_ids\n # BG\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]\n if bg_ids.shape[0] > remaining:\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n else:\n keep_bg_ids = bg_ids\n # Combine indices of ROIs to keep\n keep = np.concatenate([keep_fg_ids, keep_bg_ids])\n # Need more?\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]\n if remaining > 0:\n # Looks like we don't have enough samples to maintain the desired\n # balance. Reduce requirements and fill in the rest. This is\n # likely different from the Mask RCNN paper.\n\n # There is a small chance we have neither fg nor bg samples.\n if keep.shape[0] == 0:\n # Pick bg regions with easier IoU threshold\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n assert bg_ids.shape[0] >= remaining\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n assert keep_bg_ids.shape[0] == remaining\n keep = np.concatenate([keep, keep_bg_ids])\n else:\n # Fill the rest with repeated bg rois.\n keep_extra_ids = np.random.choice(\n keep_bg_ids, remaining, replace=True)\n keep = np.concatenate([keep, keep_extra_ids])\n assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \\\n \"keep doesn't match ROI batch size {}, {}\".format(\n keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)\n\n # Reset the gt boxes assigned to BG ROIs.\n rpn_roi_gt_boxes[keep_bg_ids, :] = 0\n rpn_roi_gt_class_ids[keep_bg_ids] = 0\n\n # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.\n rois = rpn_rois[keep]\n roi_gt_boxes = rpn_roi_gt_boxes[keep]\n roi_gt_class_ids = rpn_roi_gt_class_ids[keep]\n roi_gt_assignment = rpn_roi_iou_argmax[keep]\n\n # Class-aware bbox deltas. [y, x, log(h), log(w)]\n bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,\n config.NUM_CLASSES, 4), dtype=np.float32)\n pos_ids = np.where(roi_gt_class_ids > 0)[0]\n bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(\n rois[pos_ids], roi_gt_boxes[pos_ids, :4])\n # Normalize bbox refinements\n bboxes /= config.BBOX_STD_DEV\n\n # Generate class-specific target masks\n masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),\n dtype=np.float32)\n for i in pos_ids:\n class_id = roi_gt_class_ids[i]\n assert class_id > 0, \"class id must be greater than 0\"\n gt_id = roi_gt_assignment[i]\n class_mask = gt_masks[:, :, gt_id]\n\n if config.USE_MINI_MASK:\n # Create a mask placeholder, the size of the image\n placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)\n # GT box\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]\n gt_w = gt_x2 - gt_x1\n gt_h = gt_y2 - gt_y1\n # Resize mini mask to size of GT box\n placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \\\n np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)\n # Place the mini batch in the placeholder\n class_mask = placeholder\n\n # Pick part of the mask and resize it\n y1, x1, y2, x2 = rois[i].astype(np.int32)\n m = class_mask[y1:y2, x1:x2]\n mask = utils.resize(m, config.MASK_SHAPE)\n masks[i, :, :, class_id] = mask\n\n return rois, roi_gt_class_ids, bboxes, masks\n\n\ndef build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):\n \"\"\"Given the anchors and GT boxes, compute overlaps and identify positive\n anchors and deltas to refine them to match their corresponding GT boxes.\n\n anchors: [num_anchors, (y1, x1, y2, x2)]\n gt_class_ids: [num_gt_boxes] Integer class IDs.\n gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]\n\n Returns:\n rpn_match: [N] (int32) matches between anchors and GT boxes.\n 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n \"\"\"\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = np.where(gt_class_ids < 0)[0]\n if crowd_ix.shape[0] > 0:\n # Filter out crowds from ground truth class IDs and boxes\n non_crowd_ix = np.where(gt_class_ids > 0)[0]\n crowd_boxes = gt_boxes[crowd_ix]\n gt_class_ids = gt_class_ids[non_crowd_ix]\n gt_boxes = gt_boxes[non_crowd_ix]\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)\n crowd_iou_max = np.amax(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n else:\n # All anchors don't intersect a crowd\n no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = utils.compute_overlaps(anchors, gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n # 2. Set an anchor for each GT box (regardless of IoU value).\n # If multiple anchors have the same IoU match all of them\n gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:, 0]\n rpn_match[gt_iou_argmax] = 1\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = np.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n # Same for negative proposals\n ids = np.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -\n np.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = np.where(rpn_match == 1)[0]\n ix = 0 # index into rpn_bbox\n # TODO: use box_refinement() rather than duplicating the code here\n for i, a in zip(ids, anchors[ids]):\n # Closest gt box (it might have IoU < 0.7)\n gt = gt_boxes[anchor_iou_argmax[i]]\n\n # Convert coordinates to center plus width/height.\n # GT Box\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n\n # Compute the bbox refinement that the RPN should predict.\n rpn_bbox[ix] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n ]\n # Normalize\n rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV\n ix += 1\n\n return rpn_match, rpn_bbox\n\n\ndef generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):\n \"\"\"Generates ROI proposals similar to what a region proposal network\n would generate.\n\n image_shape: [Height, Width, Depth]\n count: Number of ROIs to generate\n gt_class_ids: [N] Integer ground truth class IDs\n gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.\n\n Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.\n \"\"\"\n # placeholder\n rois = np.zeros((count, 4), dtype=np.int32)\n\n # Generate random ROIs around GT boxes (90% of count)\n rois_per_box = int(0.9 * count / gt_boxes.shape[0])\n for i in range(gt_boxes.shape[0]):\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]\n h = gt_y2 - gt_y1\n w = gt_x2 - gt_x1\n # random boundaries\n r_y1 = max(gt_y1 - h, 0)\n r_y2 = min(gt_y2 + h, image_shape[0])\n r_x1 = max(gt_x1 - w, 0)\n r_x2 = min(gt_x2 + w, image_shape[1])\n\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))\n x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:rois_per_box]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:rois_per_box]\n if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n box_rois = np.hstack([y1, x1, y2, x2])\n rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois\n\n # Generate random ROIs anywhere in the image (10% of count)\n remaining_count = count - (rois_per_box * gt_boxes.shape[0])\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))\n x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:remaining_count]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:remaining_count]\n if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n global_rois = np.hstack([y1, x1, y2, x2])\n rois[-remaining_count:] = global_rois\n return rois\n\n\ndef data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,\n random_rois=0, batch_size=1, detection_targets=False,\n no_augmentation_sources=None):\n \"\"\"A generator that returns images and corresponding target class ids,\n bounding box deltas, and masks.\n\n dataset: The Dataset object to pick data from\n config: The model config object\n shuffle: If True, shuffles the samples before every epoch\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n random_rois: If > 0 then generate proposals to be used to train the\n network classifier and mask heads. Useful if training\n the Mask RCNN part without the RPN.\n batch_size: How many images to return in each call\n detection_targets: If True, generate detection targets (class IDs, bbox\n deltas, and masks). Typically for debugging or visualizations because\n in trainig detection targets are generated by DetectionTargetLayer.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n\n Returns a Python generator. Upon calling next() on it, the\n generator returns two lists, inputs and outputs. The contents\n of the lists differs depending on the received arguments:\n inputs list:\n - images: [batch, H, W, C]\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)\n - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs\n - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]\n - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width\n are those of the image unless use_mini_mask is True, in which\n case they are defined in MINI_MASK_SHAPE.\n\n outputs list: Usually empty in regular training. But if detection_targets\n is True then the outputs list contains target class_ids, bbox deltas,\n and masks.\n \"\"\"\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n no_augmentation_sources = no_augmentation_sources or []\n\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)\n anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n # Keras requires a generator to run indefinitely.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n\n # If the image source is not to be augmented pass None as augmentation\n if dataset.image_info[image_id]['source'] in no_augmentation_sources:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=None,\n use_mini_mask=config.USE_MINI_MASK)\n else:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=augmentation,\n use_mini_mask=config.USE_MINI_MASK)\n\n # Skip images that have no instances. This can happen in cases\n # where we train on a subset of classes and the image doesn't\n # have any of the classes we care about.\n if not np.any(gt_class_ids > 0):\n continue\n\n # RPN Targets\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,\n gt_class_ids, gt_boxes, config)\n\n # Mask R-CNN Targets\n if random_rois:\n rpn_rois = generate_random_rois(\n image.shape, random_rois, gt_class_ids, gt_boxes)\n if detection_targets:\n rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\\\n build_detection_targets(\n rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)\n\n # Init batch arrays\n if b == 0:\n batch_image_meta = np.zeros(\n (batch_size,) + image_meta.shape, dtype=image_meta.dtype)\n batch_rpn_match = np.zeros(\n [batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)\n batch_rpn_bbox = np.zeros(\n [batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)\n batch_images = np.zeros(\n (batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_boxes = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)\n batch_gt_masks = np.zeros(\n (batch_size, gt_masks.shape[0], gt_masks.shape[1],\n config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)\n if random_rois:\n batch_rpn_rois = np.zeros(\n (batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)\n if detection_targets:\n batch_rois = np.zeros(\n (batch_size,) + rois.shape, dtype=rois.dtype)\n batch_mrcnn_class_ids = np.zeros(\n (batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)\n batch_mrcnn_bbox = np.zeros(\n (batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)\n batch_mrcnn_mask = np.zeros(\n (batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)\n\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n gt_boxes = gt_boxes[ids]\n gt_masks = gt_masks[:, :, ids]\n\n # Add to batch\n batch_image_meta[b] = image_meta\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\n batch_rpn_bbox[b] = rpn_bbox\n batch_images[b] = mold_image(image.astype(np.float32), config)\n batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids\n batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes\n batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks\n if random_rois:\n batch_rpn_rois[b] = rpn_rois\n if detection_targets:\n batch_rois[b] = rois\n batch_mrcnn_class_ids[b] = mrcnn_class_ids\n batch_mrcnn_bbox[b] = mrcnn_bbox\n batch_mrcnn_mask[b] = mrcnn_mask\n b += 1\n\n # Batch full?\n if b >= batch_size:\n inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]\n outputs = []\n\n if random_rois:\n inputs.extend([batch_rpn_rois])\n if detection_targets:\n inputs.extend([batch_rois])\n # Keras requires that output and targets have the same number of dimensions\n batch_mrcnn_class_ids = np.expand_dims(\n batch_mrcnn_class_ids, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(\n dataset.image_info[image_id]))\n error_count += 1\n if error_count > 5:\n raise\n\n\n############################################################\n# MaskRCNN Class\n############################################################\n\nclass MaskRCNN():\n \"\"\"Encapsulates the Mask RCNN model functionality.\n\n The actual Keras model is in the keras_model property.\n \"\"\"\n\n def save(self, save_path):\n self.keras_model.save(save_path)\n\n def __init__(self, mode, config, model_dir):\n \"\"\"\n mode: Either \"training\" or \"inference\"\n config: A Sub-class of the Config class\n model_dir: Directory to save training logs and trained weights\n \"\"\"\n assert mode in ['training', 'inference']\n self.mode = mode\n self.config = config\n self.model_dir = model_dir\n self.set_log_dir()\n self.keras_model = self.build(mode=mode, config=config)\n\n def build(self, mode, config):\n \"\"\"Build Mask R-CNN architecture.\n input_shape: The shape of the input image.\n mode: Either \"training\" or \"inference\". The inputs and\n outputs of the model differ accordingly.\n \"\"\"\n assert mode in ['training', 'inference']\n\n # Image size must be dividable by 2 multiple times\n h, w = config.IMAGE_SHAPE[:2]\n if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\n \"to avoid fractions when downscaling and upscaling.\"\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\n\n # Inputs\n input_image = KL.Input(\n shape=[None, None, config.IMAGE_SHAPE[2]], name=\"input_image\")\n input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],\n name=\"input_image_meta\")\n if mode == \"training\":\n # RPN GT\n input_rpn_match = KL.Input(\n shape=[None, 1], name=\"input_rpn_match\", dtype=tf.int32)\n input_rpn_bbox = KL.Input(\n shape=[None, 4], name=\"input_rpn_bbox\", dtype=tf.float32)\n\n # Detection GT (class IDs, bounding boxes, and masks)\n # 1. GT Class IDs (zero padded)\n input_gt_class_ids = KL.Input(\n shape=[None], name=\"input_gt_class_ids\", dtype=tf.int32)\n # 2. GT Boxes in pixels (zero padded)\n # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates\n input_gt_boxes = KL.Input(\n shape=[None, 4], name=\"input_gt_boxes\", dtype=tf.float32)\n # Normalize coordinates\n gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_gt_boxes)\n # 3. GT Masks (zero padded)\n # [batch, height, width, MAX_GT_INSTANCES]\n if config.USE_MINI_MASK:\n input_gt_masks = KL.Input(\n shape=[config.MINI_MASK_SHAPE[0],\n config.MINI_MASK_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n else:\n input_gt_masks = KL.Input(\n shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n elif mode == \"inference\":\n # Anchors in normalized coordinates\n input_anchors = KL.Input(shape=[None, 4], name=\"input_anchors\")\n\n # Build the shared convolutional layers.\n # Bottom-up Layers\n # Returns a list of the last layers of each stage, 5 in total.\n # Don't create the thead (stage 5), so we pick the 4th item in the list.\n if callable(config.BACKBONE):\n _, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,\n train_bn=config.TRAIN_BN)\n else:\n _, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,\n stage5=True, train_bn=config.TRAIN_BN)\n # Top-down Layers\n # TODO: add assert to varify feature map sizes match what's in config\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE,\n (1, 1), name='fpn_c5p5')(C5)\n P4 = KL.Add(name=\"fpn_p4add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p5upsampled\")(P5),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])\n P3 = KL.Add(name=\"fpn_p3add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p4upsampled\")(P4),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])\n P2 = KL.Add(name=\"fpn_p2add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p3upsampled\")(P3),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])\n # Attach 3x3 conv to all P layers to get the final feature maps.\n P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3),\n padding=\"SAME\", name=\"fpn_p2\")(P2)\n P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3),\n padding=\"SAME\", name=\"fpn_p3\")(P3)\n P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3),\n padding=\"SAME\", name=\"fpn_p4\")(P4)\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3),\n padding=\"SAME\", name=\"fpn_p5\")(P5)\n # P6 is used for the 5th anchor scale in RPN. Generated by\n # subsampling from P5 with stride of 2.\n P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name=\"fpn_p6\")(P5)\n\n # Note that P6 is used in RPN, but not in the classifier heads.\n rpn_feature_maps = [P2, P3, P4, P5, P6]\n mrcnn_feature_maps = [P2, P3, P4, P5]\n\n # Anchors\n if mode == \"training\":\n anchors = self.get_anchors(config.IMAGE_SHAPE)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(\n anchors, (config.BATCH_SIZE,) + anchors.shape)\n # A hack to get around Keras's bad support for constants\n anchors = KL.Lambda(lambda x: tf.Variable(\n anchors), name=\"anchors\")(input_image)\n else:\n anchors = input_anchors\n\n # RPN Model\n rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,\n len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)\n # Loop through pyramid layers\n layer_outputs = [] # list of lists\n for p in rpn_feature_maps:\n layer_outputs.append(rpn([p]))\n # Concatenate layer outputs\n # Convert from list of lists of level outputs to list of lists\n # of outputs across levels.\n # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\n output_names = [\"rpn_class_logits\", \"rpn_class\", \"rpn_bbox\"]\n outputs = list(zip(*layer_outputs))\n outputs = [KL.Concatenate(axis=1, name=n)(list(o))\n for o, n in zip(outputs, output_names)]\n\n rpn_class_logits, rpn_class, rpn_bbox = outputs\n\n # Generate proposals\n # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\n # and zero padded.\n proposal_count = config.POST_NMS_ROIS_TRAINING if mode == \"training\"\\\n else config.POST_NMS_ROIS_INFERENCE\n rpn_rois = ProposalLayer(\n proposal_count=proposal_count,\n nms_threshold=config.RPN_NMS_THRESHOLD,\n name=\"ROI\",\n config=config)([rpn_class, rpn_bbox, anchors])\n\n if mode == \"training\":\n # Class ID mask to mark class IDs supported by the dataset the image\n # came from.\n active_class_ids = KL.Lambda(\n lambda x: parse_image_meta_graph(x)[\"active_class_ids\"]\n )(input_image_meta)\n\n if not config.USE_RPN_ROIS:\n # Ignore predicted ROIs and use ROIs provided as an input.\n input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],\n name=\"input_roi\", dtype=np.int32)\n # Normalize coordinates\n target_rois = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_rois)\n else:\n target_rois = rpn_rois\n\n # Generate detection targets\n # Subsamples proposals and generates target outputs for training\n # Note that proposal class IDs, gt_boxes, and gt_masks are zero\n # padded. Equally, returned rois and targets are zero padded.\n rois, target_class_ids, target_bbox, target_mask =\\\n DetectionTargetLayer(config, name=\"proposal_targets\")([\n target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])\n\n # Network Heads\n # TODO: verify that this handles zero padded ROIs\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n # TODO: clean up (use tf.identify if necessary)\n output_rois = KL.Lambda(lambda x: x * 1, name=\"output_rois\")(rois)\n\n # Losses\n rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name=\"rpn_class_loss\")(\n [input_rpn_match, rpn_class_logits])\n rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name=\"rpn_bbox_loss\")(\n [input_rpn_bbox, input_rpn_match, rpn_bbox])\n class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name=\"mrcnn_class_loss\")(\n [target_class_ids, mrcnn_class_logits, active_class_ids])\n bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name=\"mrcnn_bbox_loss\")(\n [target_bbox, target_class_ids, mrcnn_bbox])\n mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name=\"mrcnn_mask_loss\")(\n [target_mask, target_class_ids, mrcnn_mask])\n\n # Model\n inputs = [input_image, input_image_meta,\n input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]\n if not config.USE_RPN_ROIS:\n inputs.append(input_rois)\n outputs = [rpn_class_logits, rpn_class, rpn_bbox,\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,\n rpn_rois, output_rois,\n rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]\n model = KM.Model(inputs, outputs, name='mask_rcnn')\n else:\n # Network Heads\n # Proposal classifier and BBox regressor heads\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n # Detections\n # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in\n # normalized coordinates\n detections = DetectionLayer(config, name=\"mrcnn_detection\")(\n [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])\n\n # Create masks for detections\n detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)\n mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n model = KM.Model([input_image, input_image_meta, input_anchors],\n [detections, mrcnn_class, mrcnn_bbox,\n mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],\n name='mask_rcnn')\n\n # Add multi-GPU support.\n if config.GPU_COUNT > 1:\n from mrcnn.parallel_model import ParallelModel\n model = ParallelModel(model, config.GPU_COUNT)\n\n return model\n\n def find_last(self):\n \"\"\"Finds the last checkpoint file of the last trained model in the\n model directory.\n Returns:\n The path of the last checkpoint file\n \"\"\"\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n import errno\n raise FileNotFoundError(\n errno.ENOENT,\n \"Could not find model directory under {}\".format(self.model_dir))\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"mask_rcnn\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n import errno\n raise FileNotFoundError(\n errno.ENOENT, \"Could not find weight files in {}\".format(dir_name))\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return checkpoint\n\n def load_weights(self, filepath, by_name=False, exclude=None):\n \"\"\"Modified version of the corresponding Keras function with\n the addition of multi-GPU support and the ability to exclude\n some layers from loading.\n exclude: list of layer names to exclude\n \"\"\"\n import h5py\n # Conditional import to support versions of Keras before 2.2\n # TODO: remove in about 6 months (end of 2018)\n try:\n from keras.engine import saving\n except ImportError:\n # Keras before 2.2 used the 'topology' namespace.\n from keras.engine import topology as saving\n\n if exclude:\n by_name = True\n\n if h5py is None:\n raise ImportError('`load_weights` requires h5py.')\n f = h5py.File(filepath, mode='r')\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n keras_model = self.keras_model\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n # Exclude some layers\n if exclude:\n layers = filter(lambda l: l.name not in exclude, layers)\n\n if by_name:\n saving.load_weights_from_hdf5_group_by_name(f, layers)\n else:\n saving.load_weights_from_hdf5_group(f, layers)\n if hasattr(f, 'close'):\n f.close()\n\n # Update the log directory\n self.set_log_dir(filepath)\n\n def get_imagenet_weights(self):\n \"\"\"Downloads ImageNet trained weights from Keras.\n Returns path to weights file.\n \"\"\"\n from keras.utils.data_utils import get_file\n TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\\\n 'releases/download/v0.2/'\\\n 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n TF_WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n return weights_path\n\n def compile(self, learning_rate, momentum):\n \"\"\"Gets the model ready for training. Adds losses, regularization, and\n metrics. Then calls the Keras compile() function.\n \"\"\"\n # Optimizer object\n optimizer = keras.optimizers.SGD(\n lr=learning_rate, momentum=momentum,\n clipnorm=self.config.GRADIENT_CLIP_NORM)\n # Add Losses\n # First, clear previously set losses to avoid duplication\n self.keras_model._losses = []\n self.keras_model._per_input_losses = {}\n loss_names = [\n \"rpn_class_loss\", \"rpn_bbox_loss\",\n \"mrcnn_class_loss\", \"mrcnn_bbox_loss\", \"mrcnn_mask_loss\"]\n for name in loss_names:\n layer = self.keras_model.get_layer(name)\n if layer.output in self.keras_model.losses:\n continue\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.add_loss(loss)\n\n # Add L2 Regularization\n # Skip gamma and beta weights of batch normalization layers.\n reg_losses = [\n keras.regularizers.l2(self.config.WEIGHT_DECAY)(\n w) / tf.cast(tf.size(w), tf.float32)\n for w in self.keras_model.trainable_weights\n if 'gamma' not in w.name and 'beta' not in w.name]\n self.keras_model.add_loss(tf.add_n(reg_losses))\n\n # Compile\n self.keras_model.compile(\n optimizer=optimizer,\n loss=[None] * len(self.keras_model.outputs))\n\n # Add metrics for losses\n for name in loss_names:\n if name in self.keras_model.metrics_names:\n continue\n layer = self.keras_model.get_layer(name)\n self.keras_model.metrics_names.append(name)\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.metrics_tensors.append(loss)\n\n def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):\n \"\"\"Sets model layers as trainable if their names match\n the given regular expression.\n \"\"\"\n # Print message on the first call (but not on recursive calls)\n if verbose > 0 and keras_model is None:\n log(\"Selecting layers to train\")\n\n keras_model = keras_model or self.keras_model\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n for layer in layers:\n # Is the layer a model?\n if layer.__class__.__name__ == 'Model':\n print(\"In model: \", layer.name)\n self.set_trainable(\n layer_regex, keras_model=layer, indent=indent + 4)\n continue\n\n if not layer.weights:\n continue\n # Is it trainable?\n trainable = bool(re.fullmatch(layer_regex, layer.name))\n # Update layer. If layer is a container, update inner layer.\n if layer.__class__.__name__ == 'TimeDistributed':\n layer.layer.trainable = trainable\n else:\n layer.trainable = trainable\n # Print trainable layer names\n if trainable and verbose > 0:\n log(\"{}{:20} ({})\".format(\" \" * indent, layer.name,\n layer.__class__.__name__))\n\n def set_log_dir(self, model_path=None):\n \"\"\"Sets the model log directory and epoch counter.\n\n model_path: If None, or a format different from what this code uses\n then set a new log directory and start epochs from 0. Otherwise,\n extract the log directory and the epoch counter from the file\n name.\n \"\"\"\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n now = datetime.datetime.now()\n\n # If we have a model path with date and epochs use them\n if model_path:\n # Continue from we left of. Get epoch and date from the file name\n # A sample model path might look like:\n # \\path\\to\\logs\\coco20171029T2315\\mask_rcnn_coco_0001.h5 (Windows)\n # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)\n regex = r\".*[/\\\\][\\w-]+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})[/\\\\]mask\\_rcnn\\_[\\w-]+(\\d{4})\\.h5\"\n m = re.match(regex, model_path)\n if m:\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n int(m.group(4)), int(m.group(5)))\n # Epoch number in file is 1-based, and in Keras code it's 0-based.\n # So, adjust for that then increment by one to start from the next epoch\n self.epoch = int(m.group(6)) - 1 + 1\n print('Re-starting from epoch %d' % self.epoch)\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\n self.config.NAME.lower(), now))\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"mask_rcnn_{}_*epoch*.h5\".format(\n self.config.NAME.lower()))\n self.checkpoint_path = self.checkpoint_path.replace(\n \"*epoch*\", \"{epoch:04d}\")\n\n def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,\n augmentation=None, custom_callbacks=None, no_augmentation_sources=None):\n \"\"\"Train the model.\n train_dataset, val_dataset: Training and validation Dataset objects.\n learning_rate: The learning rate to train with\n epochs: Number of training epochs. Note that previous training epochs\n are considered to be done alreay, so this actually determines\n the epochs to train in total rather than in this particaular\n call.\n layers: Allows selecting wich layers to train. It can be:\n - A regular expression to match layer names to train\n - One of these predefined values:\n heads: The RPN, classifier and mask heads of the network\n all: All the layers\n 3+: Train Resnet stage 3 and up\n 4+: Train Resnet stage 4 and up\n 5+: Train Resnet stage 5 and up\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)\n augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)\n flips images right/left 50% of the time. You can pass complex\n augmentations as well. This augmentation applies 50% of the\n time, and when it does it flips images right/left half the time\n and adds a Gaussian blur with a random sigma in range 0 to 5.\n\n augmentation = imgaug.augmenters.Sometimes(0.5, [\n imgaug.augmenters.Fliplr(0.5),\n imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))\n ])\n custom_callbacks: Optional. Add custom callbacks to be called\n with the keras fit_generator method. Must be list of type keras.callbacks.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n \"\"\"\n assert self.mode == \"training\", \"Create model in training mode.\"\n\n sess = K.get_session()\n\n # Pre-defined layer regular expressions\n layer_regex = {\n # all layers but the backbone\n \"heads\": r\"(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # From a specific Resnet stage and up\n \"3+\": r\"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"4+\": r\"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"5+\": r\"(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # All layers\n \"all\": \".*\",\n }\n if layers in layer_regex.keys():\n layers = layer_regex[layers]\n\n # Data generators\n train_generator = data_generator(train_dataset, self.config, shuffle=True,\n augmentation=augmentation,\n batch_size=self.config.BATCH_SIZE,\n no_augmentation_sources=no_augmentation_sources)\n val_generator = data_generator(val_dataset, self.config, shuffle=True,\n batch_size=self.config.BATCH_SIZE)\n\n # Create log_dir if it does not exist\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n # Callbacks\n callbacks = [\n keras.callbacks.TensorBoard(log_dir=self.log_dir,\n histogram_freq=0, write_graph=True, write_images=False),\n keras.callbacks.ModelCheckpoint(self.checkpoint_path,\n verbose=0, save_weights_only=True),\n ]\n\n # Add custom callbacks to the list\n if custom_callbacks:\n callbacks += custom_callbacks\n\n # Train\n log(\"\\nStarting at epoch {}. LR={}\\n\".format(self.epoch, learning_rate))\n log(\"Checkpoint Path: {}\".format(self.checkpoint_path))\n self.set_trainable(layers)\n self.compile(learning_rate, self.config.LEARNING_MOMENTUM)\n\n # Work-around for Windows: Keras fails on Windows when using\n # multiprocessing workers. See discussion here:\n # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009\n if os.name is 'nt':\n workers = 0\n else:\n workers = multiprocessing.cpu_count()\n\n self.keras_model.fit_generator(\n train_generator,\n initial_epoch=self.epoch,\n epochs=epochs,\n steps_per_epoch=self.config.STEPS_PER_EPOCH,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=self.config.VALIDATION_STEPS,\n max_queue_size=100,\n workers=workers,\n use_multiprocessing=True,\n )\n self.epoch = max(self.epoch, epochs)\n\n # To save as .pb\n frozen_graph = self.freeze_session(\n sess, output_names=[out.op.name for out in self.keras_model.outputs])\n tf.train.write_graph(frozen_graph, self.log_dir,\n \"doorway_model.pb\", as_text=False)\n\n def freeze_session(self, session, keep_var_names=None, output_names=None, clear_devices=True):\n \"\"\"\n Freezes the state of a session into a pruned computation graph.\n Creates a new computation graph where variable nodes are replaced by\n constants taking their current value in the session. The new graph will be\n pruned so subgraphs that are not necessary to compute the requested\n outputs are removed.\n @param session The TensorFlow session to be frozen.\n @param keep_var_names A list of variable names that should not be frozen,\n or None to freeze all the variables in the graph.\n\n @param output_names Names of the relevant graph outputs.\n @param clear_devices Remove the device directives from the graph for better portability.\n @return The frozen graph definition.\n \"\"\"\n graph = session.graph\n with session.as_default():\n freeze_var_names = list(set(v.op.name for v in\n tf.global_variables()).difference(keep_var_names or []))\n output_names = output_names or []\n output_names += [v.op.name for v in tf.global_variables()]\n input_graph_def = graph.as_graph_def()\n if clear_devices:\n for node in input_graph_def.node:\n node.device = \"\"\n frozen_graph = tf.graph_util.convert_variables_to_constants(\n session, input_graph_def, output_names, freeze_var_names)\n return frozen_graph\n\n def mold_inputs(self, images):\n \"\"\"Takes a list of images and modifies them to the format expected\n as an input to the neural network.\n images: List of image matrices [height,width,depth]. Images can have\n different sizes.\n\n Returns 3 Numpy matrices:\n molded_images: [N, h, w, 3]. Images resized and normalized.\n image_metas: [N, length of meta data]. Details about each image.\n windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the\n original image (padding excluded).\n \"\"\"\n molded_images = []\n image_metas = []\n windows = []\n for image in images:\n # Resize image\n # TODO: move resizing to mold_image()\n molded_image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=self.config.IMAGE_MIN_DIM,\n min_scale=self.config.IMAGE_MIN_SCALE,\n max_dim=self.config.IMAGE_MAX_DIM,\n mode=self.config.IMAGE_RESIZE_MODE)\n molded_image = mold_image(molded_image, self.config)\n # Build image_meta\n image_meta = compose_image_meta(\n 0, image.shape, molded_image.shape, window, scale,\n np.zeros([self.config.NUM_CLASSES], dtype=np.int32))\n # Append\n molded_images.append(molded_image)\n windows.append(window)\n image_metas.append(image_meta)\n # Pack into arrays\n molded_images = np.stack(molded_images)\n image_metas = np.stack(image_metas)\n windows = np.stack(windows)\n return molded_images, image_metas, windows\n\n def unmold_detections(self, detections, mrcnn_mask, original_image_shape,\n image_shape, window):\n \"\"\"Reformats the detections of one image from the format of the neural\n network output to a format suitable for use in the rest of the\n application.\n\n detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates\n mrcnn_mask: [N, height, width, num_classes]\n original_image_shape: [H, W, C] Original image shape before resizing\n image_shape: [H, W, C] Shape of the image after resizing and padding\n window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real\n image is excluding the padding.\n\n Returns:\n boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels\n class_ids: [N] Integer class IDs for each bounding box\n scores: [N] Float probability scores of the class_id\n masks: [height, width, num_instances] Instance masks\n \"\"\"\n # How many detections do we have?\n # Detections array is padded with zeros. Find the first class_id == 0.\n zero_ix = np.where(detections[:, 4] == 0)[0]\n N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]\n\n # Extract boxes, class_ids, scores, and class-specific masks\n boxes = detections[:N, :4]\n class_ids = detections[:N, 4].astype(np.int32)\n scores = detections[:N, 5]\n masks = mrcnn_mask[np.arange(N), :, :, class_ids]\n\n # Translate normalized coordinates in the resized image to pixel\n # coordinates in the original image before resizing\n window = utils.norm_boxes(window, image_shape[:2])\n wy1, wx1, wy2, wx2 = window\n shift = np.array([wy1, wx1, wy1, wx1])\n wh = wy2 - wy1 # window height\n ww = wx2 - wx1 # window width\n scale = np.array([wh, ww, wh, ww])\n # Convert boxes to normalized coordinates on the window\n boxes = np.divide(boxes - shift, scale)\n # Convert boxes to pixel coordinates on the original image\n boxes = utils.denorm_boxes(boxes, original_image_shape[:2])\n\n # Filter out detections with zero area. Happens in early training when\n # network weights are still random\n exclude_ix = np.where(\n (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]\n if exclude_ix.shape[0] > 0:\n boxes = np.delete(boxes, exclude_ix, axis=0)\n class_ids = np.delete(class_ids, exclude_ix, axis=0)\n scores = np.delete(scores, exclude_ix, axis=0)\n masks = np.delete(masks, exclude_ix, axis=0)\n N = class_ids.shape[0]\n\n # Resize masks to original image size and set boundary threshold.\n full_masks = []\n for i in range(N):\n # Convert neural network mask to full size mask\n full_mask = utils.unmold_mask(\n masks[i], boxes[i], original_image_shape)\n full_masks.append(full_mask)\n full_masks = np.stack(full_masks, axis=-1)\\\n if full_masks else np.empty(original_image_shape[:2] + (0,))\n\n return boxes, class_ids, scores, full_masks\n\n def detect(self, images, verbose=0):\n \"\"\"Runs the detection pipeline.\n\n images: List of images, potentially of different sizes.\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(images)))\n for image in images:\n log(\"image\", image)\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape,\\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(\n anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict(\n [molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def detect_molded(self, molded_images, image_metas, verbose=0):\n \"\"\"Runs the detection pipeline, but expect inputs that are\n molded already. Used mostly for debugging and inspecting\n the model.\n\n molded_images: List of images loaded using load_image_gt()\n image_metas: image meta data, also returned by load_image_gt()\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(molded_images) == self.config.BATCH_SIZE,\\\n \"Number of images must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(molded_images)))\n for image in molded_images:\n log(\"image\", image)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape, \"Images must have the same size\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(\n anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict(\n [molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(molded_images):\n window = [0, 0, image.shape[0], image.shape[1]]\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n window)\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def get_anchors(self, image_shape):\n \"\"\"Returns anchor pyramid for the given image size.\"\"\"\n backbone_shapes = compute_backbone_shapes(self.config, image_shape)\n # Cache anchors and reuse if image shape is the same\n if not hasattr(self, \"_anchor_cache\"):\n self._anchor_cache = {}\n if not tuple(image_shape) in self._anchor_cache:\n # Generate Anchors\n a = utils.generate_pyramid_anchors(\n self.config.RPN_ANCHOR_SCALES,\n self.config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n self.config.BACKBONE_STRIDES,\n self.config.RPN_ANCHOR_STRIDE)\n # Keep a copy of the latest anchors in pixel coordinates because\n # it's used in inspect_model notebooks.\n # TODO: Remove this after the notebook are refactored to not use it\n self.anchors = a\n # Normalize coordinates\n self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(\n a, image_shape[:2])\n return self._anchor_cache[tuple(image_shape)]\n\n def ancestor(self, tensor, name, checked=None):\n \"\"\"Finds the ancestor of a TF tensor in the computation graph.\n tensor: TensorFlow symbolic tensor.\n name: Name of ancestor tensor to find\n checked: For internal use. A list of tensors that were already\n searched to avoid loops in traversing the graph.\n \"\"\"\n checked = checked if checked is not None else []\n # Put a limit on how deep we go to avoid very long loops\n if len(checked) > 500:\n return None\n # Convert name to a regex and allow matching a number prefix\n # because Keras adds them automatically\n if isinstance(name, str):\n name = re.compile(name.replace(\"/\", r\"(\\_\\d+)*/\"))\n\n parents = tensor.op.inputs\n for p in parents:\n if p in checked:\n continue\n if bool(re.fullmatch(name, p.name)):\n return p\n checked.append(p)\n a = self.ancestor(p, name, checked)\n if a is not None:\n return a\n return None\n\n def find_trainable_layer(self, layer):\n \"\"\"If a layer is encapsulated by another layer, this function\n digs through the encapsulation and returns the layer that holds\n the weights.\n \"\"\"\n if layer.__class__.__name__ == 'TimeDistributed':\n return self.find_trainable_layer(layer.layer)\n return layer\n\n def get_trainable_layers(self):\n \"\"\"Returns a list of layers that have weights.\"\"\"\n layers = []\n # Loop through all layers\n for l in self.keras_model.layers:\n # If layer is a wrapper, find inner trainable layer\n l = self.find_trainable_layer(l)\n # Include layer if it has weights\n if l.get_weights():\n layers.append(l)\n return layers\n\n def run_graph(self, images, outputs, image_metas=None):\n \"\"\"Runs a sub-set of the computation graph that computes the given\n outputs.\n\n image_metas: If provided, the images are assumed to be already\n molded (i.e. resized, padded, and normalized)\n\n outputs: List of tuples (name, tensor) to compute. The tensors are\n symbolic TensorFlow tensors and the names are for easy tracking.\n\n Returns an ordered dict of results. Keys are the names received in the\n input and values are Numpy arrays.\n \"\"\"\n model = self.keras_model\n\n # Organize desired outputs into an ordered dict\n outputs = OrderedDict(outputs)\n for o in outputs.values():\n assert o is not None\n\n # Build a Keras function to run parts of the computation graph\n inputs = model.inputs\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs += [K.learning_phase()]\n kf = K.function(model.inputs, list(outputs.values()))\n\n # Prepare inputs\n if image_metas is None:\n molded_images, image_metas, _ = self.mold_inputs(images)\n else:\n molded_images = images\n image_shape = molded_images[0].shape\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(\n anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n model_in = [molded_images, image_metas, anchors]\n\n # Run inference\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n model_in.append(0.)\n outputs_np = kf(model_in)\n\n # Pack the generated Numpy arrays into a a dict and log the results.\n outputs_np = OrderedDict([(k, v)\n for k, v in zip(outputs.keys(), outputs_np)])\n for k, v in outputs_np.items():\n log(k, v)\n return outputs_np\n\n\n############################################################\n# Data Formatting\n############################################################\n\ndef compose_image_meta(image_id, original_image_shape, image_shape,\n window, scale, active_class_ids):\n \"\"\"Takes attributes of an image and puts them in one 1D array.\n\n image_id: An int ID of the image. Useful for debugging.\n original_image_shape: [H, W, C] before resizing or padding.\n image_shape: [H, W, C] after resizing and padding\n window: (y1, x1, y2, x2) in pixels. The area of the image where the real\n image is (excluding the padding)\n scale: The scaling factor applied to the original image (float32)\n active_class_ids: List of class_ids available in the dataset from which\n the image came. Useful if training on images from multiple datasets\n where not all classes are present in all datasets.\n \"\"\"\n meta = np.array(\n [image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n # size=4 (y1, x1, y2, x2) in image cooredinates\n list(window) +\n [scale] + # size=1\n list(active_class_ids) # size=num_classes\n )\n return meta\n\n\ndef parse_image_meta(meta):\n \"\"\"Parses an array that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed values.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id.astype(np.int32),\n \"original_image_shape\": original_image_shape.astype(np.int32),\n \"image_shape\": image_shape.astype(np.int32),\n \"window\": window.astype(np.int32),\n \"scale\": scale.astype(np.float32),\n \"active_class_ids\": active_class_ids.astype(np.int32),\n }\n\n\ndef parse_image_meta_graph(meta):\n \"\"\"Parses a tensor that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed tensors.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n\n\ndef mold_image(images, config):\n \"\"\"Expects an RGB image (or array of images) and subtracts\n the mean pixel and converts it to float. Expects image\n colors in RGB order.\n \"\"\"\n return images.astype(np.float32) - config.MEAN_PIXEL\n\n\ndef unmold_image(normalized_images, config):\n \"\"\"Takes a image normalized with mold() and returns the original.\"\"\"\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)\n\n\n############################################################\n# Miscellenous Graph Functions\n############################################################\n\ndef trim_zeros_graph(boxes, name='trim_zeros'):\n \"\"\"Often boxes are represented with matrices of shape [N, 4] and\n are padded with zeros. This removes zero boxes.\n\n boxes: [N, 4] matrix of boxes.\n non_zeros: [N] a 1D boolean mask identifying the rows to keep\n \"\"\"\n non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)\n boxes = tf.boolean_mask(boxes, non_zeros, name=name)\n return boxes, non_zeros\n\n\ndef batch_pack_graph(x, counts, num_rows):\n \"\"\"Picks different number of values from each row\n in x depending on the values in counts.\n \"\"\"\n outputs = []\n for i in range(num_rows):\n outputs.append(x[i, :counts[i]])\n return tf.concat(outputs, axis=0)\n\n\ndef norm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from pixel coordinates to normalized coordinates.\n boxes: [..., (y1, x1, y2, x2)] in pixel coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in normalized coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.divide(boxes - shift, scale)\n\n\ndef denorm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from normalized coordinates to pixel coordinates.\n boxes: [..., (y1, x1, y2, x2)] in normalized coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in pixel coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)\n" ]
[ [ "numpy.amax", "numpy.expand_dims", "tensorflow.concat", "tensorflow.control_dependencies", "tensorflow.stack", "tensorflow.reduce_sum", "tensorflow.minimum", "tensorflow.cast", "tensorflow.image.non_max_suppression", "tensorflow.equal", "tensorflow.image.crop_and_resize", "numpy.concatenate", "numpy.max", "tensorflow.abs", "tensorflow.map_fn", "tensorflow.global_variables", "numpy.any", "tensorflow.pad", "tensorflow.where", "tensorflow.random_shuffle", "numpy.where", "tensorflow.add_n", "numpy.divide", "numpy.random.randint", "tensorflow.boolean_mask", "numpy.hstack", "tensorflow.graph_util.convert_variables_to_constants", "tensorflow.Variable", "numpy.reshape", "numpy.fliplr", "numpy.arange", "tensorflow.squeeze", "numpy.stack", "tensorflow.divide", "tensorflow.stop_gradient", "tensorflow.gather", "numpy.copy", "numpy.argmax", "tensorflow.nn.top_k", "tensorflow.argmax", "numpy.zeros", "tensorflow.train.write_graph", "numpy.log", "tensorflow.gather_nd", "tensorflow.unique", "tensorflow.shape", "numpy.random.choice", "tensorflow.identity", "tensorflow.exp", "tensorflow.sparse_tensor_to_dense", "numpy.delete", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.split", "tensorflow.round", "numpy.array", "numpy.sum", "tensorflow.size", "tensorflow.reduce_max", "tensorflow.multiply", "tensorflow.transpose", "tensorflow.constant", "tensorflow.range", "tensorflow.reduce_mean", "numpy.abs", "tensorflow.maximum", "tensorflow.reshape", "tensorflow.expand_dims", "numpy.sort", "numpy.ones", "numpy.random.shuffle", "tensorflow.log", "numpy.broadcast_to", "tensorflow.sqrt", "numpy.empty", "tensorflow.logical_and" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Pradhy729/BertSum
[ "6864f3dadb51a1e680623a3d018f46d714d4b62c" ]
[ "src/models/model_builder.py" ]
[ "\nimport torch\nimport torch.nn as nn\nfrom transformers import BertModel, BertConfig\nfrom torch.nn.init import xavier_uniform_\n\nfrom models.encoder import TransformerInterEncoder, Classifier, RNNEncoder\nfrom models.optimizers import Optimizer\n\n\ndef build_optim(args, model, checkpoint):\n \"\"\" Build optimizer \"\"\"\n saved_optimizer_state_dict = None\n\n if args.train_from != '':\n optim = checkpoint['optim']\n saved_optimizer_state_dict = optim.optimizer.state_dict()\n else:\n optim = Optimizer(\n args.optim, args.lr, args.max_grad_norm,\n beta1=args.beta1, beta2=args.beta2,\n decay_method=args.decay_method,\n warmup_steps=args.warmup_steps)\n\n optim.set_parameters(list(model.named_parameters()))\n\n if args.train_from != '':\n optim.optimizer.load_state_dict(saved_optimizer_state_dict)\n if args.visible_gpus != '-1':\n for state in optim.optimizer.state.values():\n for k, v in state.items():\n if torch.is_tensor(v):\n state[k] = v.cuda()\n\n if (optim.method == 'adam') and (len(optim.optimizer.state) < 1):\n raise RuntimeError(\n \"Error: loaded Adam optimizer from existing model\" +\n \" but optimizer state is empty\")\n\n return optim\n\n\nclass Bert(nn.Module):\n def __init__(self, temp_dir, load_pretrained_bert, bert_config):\n super(Bert, self).__init__()\n if(load_pretrained_bert):\n self.model = BertModel.from_pretrained('bert-base-uncased', cache_dir=temp_dir, output_hidden_states=True)\n else:\n self.model = BertModel(bert_config)\n\n def forward(self, x, segs, mask):\n outputs = self.model(x, token_type_ids=segs, attention_mask =mask)\n top_vec = outputs[0]\n return top_vec\n\n\n\nclass Summarizer(nn.Module):\n def __init__(self, args, device, load_pretrained_bert = False, bert_config = None):\n super(Summarizer, self).__init__()\n self.args = args\n self.device = device\n self.bert = Bert(args.temp_dir, load_pretrained_bert, bert_config)\n if (args.encoder == 'classifier'):\n self.encoder = Classifier(self.bert.model.config.hidden_size)\n elif(args.encoder=='transformer'):\n self.encoder = TransformerInterEncoder(self.bert.model.config.hidden_size, args.ff_size, args.heads,\n args.dropout, args.inter_layers)\n elif(args.encoder=='rnn'):\n self.encoder = RNNEncoder(bidirectional=True, num_layers=1,\n input_size=self.bert.model.config.hidden_size, hidden_size=args.rnn_size,\n dropout=args.dropout)\n elif (args.encoder == 'baseline'):\n bert_config = BertConfig(self.bert.model.config.vocab_size, hidden_size=args.hidden_size,\n num_hidden_layers=6, num_attention_heads=8, intermediate_size=args.ff_size)\n self.bert.model = BertModel(bert_config)\n self.encoder = Classifier(self.bert.model.config.hidden_size)\n\n if args.param_init != 0.0:\n for p in self.encoder.parameters():\n p.data.uniform_(-args.param_init, args.param_init)\n if args.param_init_glorot:\n for p in self.encoder.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n\n self.to(device)\n def load_cp(self, pt):\n self.load_state_dict(pt['model'], strict=True)\n\n def forward(self, x, segs, clss, mask, mask_cls, sentence_range=None):\n #print(f'The shape of x is {x.shape}, and clss is {clss.shape}')\n #print(f'The shape of segs is {segs.shape}, the shape of mask is {mask.shape}')\n top_vec = self.bert(x, segs, mask)\n #print(f'The shape of top_vec is {top_vec.shape}')\n sents_vec = top_vec[torch.arange(top_vec.size(0)).unsqueeze(1), clss]\n sents_vec = sents_vec * mask_cls[:, :, None].float()\n sent_scores = self.encoder(sents_vec, mask_cls).squeeze(-1)\n return sent_scores, mask_cls\n" ]
[ [ "torch.is_tensor", "torch.nn.init.xavier_uniform_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tranbahien/bae_prior
[ "6bb385ed765ae1dd8c961e597f1ed91e036ae470" ]
[ "learnable_priors/distributions/gaussian.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom learnable_priors.distributions import PriorDistribution\nfrom utils import inv_softplus\n\n\nclass GaussianDistribution(PriorDistribution):\n\n def __init__(self, *shape, mean_init=0., log_var_init=-6,\n mean_trainable=True, std_trainable=False):\n super(GaussianDistribution, self).__init__()\n self.shape = shape \n self.mean = nn.Parameter(\n torch.ones(*shape) * mean_init,\n requires_grad=True)\n\n self.log_var = nn.Parameter(\n torch.ones(*shape) * log_var_init,\n requires_grad=True)\n\n @property\n def std(self):\n return torch.sqrt(self.var)\n \n\n @property\n def var(self):\n return torch.exp(self.log_var)\n\n def sample(self, *shape):\n eps = torch.randn(*shape, *self.shape, device=self.mean.device,\n requires_grad=False)\n\n samples = self.mean + self.std * eps\n return samples\n\n def log_prob(self, inputs):\n return torch.sum(-0.5 * ((self.mean - inputs) ** 2 / self.var))\n\n\nclass FactorizedGaussianDistribution(nn.Module):\n def __init__(self, *shape):\n super(FactorizedGaussianDistribution, self).__init__()\n self.shape = shape\n self.mean = nn.Parameter(torch.zeros(*shape), requires_grad=False)\n self.logvars = nn.Parameter(torch.zeros(*shape), requires_grad=True)\n\n def sample(self, *shape):\n epsilon_for_samples = torch.randn(*shape, *self.shape,\n device=self.mean.device,\n requires_grad=False)\n\n samples = self.mean + (self.logvars/2).exp() * epsilon_for_samples\n return samples\n\n def log_prob(self, inputs):\n return torch.sum(-0.5 * (np.log(2 * np.pi) + self.logvars +\\\n (self.mean - inputs) ** 2 / self.logvars.exp()))" ]
[ [ "numpy.log", "torch.ones", "torch.zeros", "torch.sqrt", "torch.randn", "torch.sum", "torch.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GustavoHBDuarte/fraud-detection
[ "89090ff1071b056a58a28710fadb8af2fa73836a" ]
[ "web_app/Fraud_class/Data_prep.py" ]
[ "import pickle\nimport bz2\nimport _pickle as cPickle\nfrom math import ceil\nfrom math import floor\nimport pandas as pd\nimport numpy as np\nimport json\n\n\n\nclass Data_prep(object):\n \n def __init__(self): \n self.amount_scaler = pickle.load(open('encoders/amount_scaler.pkl', 'rb')) # Opening scaler \n self.day_of_month_scaler = pickle.load(open('encoders/day_of_month_scaler.pkl', 'rb')) # Opening scaler \n self.newbalanceDest_scaler = pickle.load(open('encoders/newbalanceDest_scaler.pkl', 'rb')) # Opening scaler \n \n self.newbalanceOrig_scaler = pickle.load(open('encoders/newbalanceOrig_scaler.pkl', 'rb')) # Opening scaler \n self.oldbalanceDest_scaler = pickle.load(open('encoders/oldbalanceDest_scaler.pkl', 'rb')) # Opening scaler \n self.oldbalanceOrg_scaler = pickle.load(open('encoders/oldbalanceOrg_scaler.pkl', 'rb')) # Opening scaler \n self.step_scaler = pickle.load(open('encoders/step_scaler.pkl', 'rb')) # Opening scaler \n self.type_encoder = pickle.load(open('encoders/type_encoder.pkl', 'rb')) # Opening encoder \n self.week_of_month_scaler = pickle.load(open('encoders/week_of_month_scaler.pkl', 'rb')) # Opening scaler\n \n \n \n \n \n def feature_engineering(self, df1):\n \n # Day of month\n\n df1['day_of_month'] = df1['step'].apply(lambda x: ceil(x/24))\n \n # Week of month\n\n df1['week_of_month'] = df1['step'].apply(lambda x: ceil(x/168))\n \n # Creating aux column 'min_step_of_day'\n\n for i in df1['day_of_month'].unique():\n df1.loc[df1['day_of_month']==i,'min_step_of_day'] = df1.loc[df1['day_of_month']==i, 'step'].min()\n \n \n # Creating 'hour_of_day' column\n\n df1['hour_of_day'] = df1['step'] - df1['min_step_of_day']\n \n \n # Creating aux column 'min_day_of_week'\n\n for i in df1['week_of_month'].unique():\n df1.loc[df1['week_of_month']==i, 'min_day_of_week'] = df1.loc[df1['week_of_month']==i, 'day_of_month'].min()\n\n\n # Creating 'day_of_week' column\n \n df1['day_of_week'] = ((df1['day_of_month'] - df1['min_day_of_week'])+1)\n \n \n # Creating 'is_weekend' column\n\n df1['is_weekend'] = df1['day_of_week'].apply(lambda x: 'weekend' if x == 1 or x == 7 else 'weekdays')\n \n \n # Creating 'time_of_day' column\n\n df1['time_of_day'] = df1['hour_of_day'].apply(lambda x: 'AM' if x <= 12 else 'PM')\n \n \n # Creating 'period_of_day' column\n\n df1['period_of_day'] = ['Morning' if i<=12 else 'Afternoon' if i>12 and i<=18 else 'Evening' for i in df1['hour_of_day']]\n \n \n # Creating 'orig_type' column\n\n df1['orig_type'] = ['Merchant' if i[0]== 'M' else 'Costumer' for i in df1['nameOrig']]\n \n \n # Creating 'dest_type' column\n\n df1['dest_type'] = ['Merchant' if i[0]== 'M' else 'Costumer' for i in df1['nameDest']]\n \n \n # Creating 'oldbalanceOrg_status'\n\n df1['oldbalanceOrg_status'] = ['zero' if i==0 else 'non-zero' for i in df1['oldbalanceOrg']]\n \n \n # Creating 'newbalanceOrig_status'\n\n df1['newbalanceOrig_status'] = ['zero' if i==0 else 'non-zero' for i in df1['newbalanceOrig']]\n \n \n # Creating 'oldbalanceDest_status'\n\n df1['oldbalanceDest_status'] = ['zero' if i==0 else 'non-zero' for i in df1['oldbalanceDest']]\n \n \n # Creating 'newbalanceDest_status'\n\n df1['newbalanceDest_status'] = ['zero' if i==0 else 'non-zero' for i in df1['newbalanceDest']]\n \n \n # Creating the column 'is_orig_equal_dest'\n\n df1['is_orig_equal_dest'] = ['yes' if i==it else 'no' for i, it in zip(df1['nameOrig'], df1['nameDest'])]\n \n \n # Creating the column 'is_oldbalanceOrg_higherthan_newbalanceOrig'\n\n df1['is_oldbalanceOrg_higherthan_newbalanceOrig'] = ['yes'if i>it else 'no' for i, it in zip(df1['oldbalanceOrg'], df1['newbalanceOrig'])]\n \n \n # Creating the column 'is_oldbalanceDest_higherthan_newbalanceDest'\n\n df1['is_oldbalanceDest_higherthan_newbalanceDest'] = ['yes'if i>it else 'no' for i, it in zip(df1['oldbalanceDest'], df1['newbalanceDest'])]\n \n \n # Creating the column 'transaction_direction'\n\n df1['transaction_direction'] = [i[0]+'2'+it[0] for i, it in zip(df1['orig_type'], df1['dest_type'])]\n \n \n # Converting 'isFlaggedFraud' column to categorical (to further corr calculation)\n\n df1['isFlaggedFraud'] = ['no' if i==0 else 'yes' for i in df1['isFlaggedFraud']]\n \n \n # Droping aux column 'min_step_of_day'\n\n df1 = df1.drop(columns=['min_step_of_day', 'min_day_of_week'])\n \n \n # Converting 'hour_of_day' and 'day_of_week' columns to int\n\n df1[['hour_of_day','day_of_week']] = df1[['hour_of_day','day_of_week']].astype(int)\n \n \n \n \n #========================================================\n # Data filtering\n #========================================================\n \n df1 = df1.drop(columns=['orig_type','is_orig_equal_dest'])\n \n df1 = df1.drop(columns=['nameOrig'])\n \n return df1\n \n \n def data_preparation(self, df2):\n \n X_train = df2\n \n \n # Scaling: Numeric attributes\n \n \n # Numeric non-cyclic attributes - Robust Scaler (each of them has extreme values)\n \n # 'amount'\n \n X_train['amount'] = self.amount_scaler.transform(X_train[['amount']].values)\n \n # 'oldbalanceOrg'\n \n X_train['oldbalanceOrg'] = self.oldbalanceOrg_scaler.transform(X_train[['oldbalanceOrg']].values)\n \n # 'newbalanceOrig'\n \n X_train['newbalanceOrig'] = self.newbalanceOrig_scaler.transform(X_train[['newbalanceOrig']].values)\n \n # 'oldbalanceDest'\n \n X_train['oldbalanceDest'] = self.oldbalanceDest_scaler.transform(X_train[['oldbalanceDest']].values)\n \n # 'newbalanceDest'\n \n X_train['newbalanceDest'] = self.newbalanceDest_scaler.transform(X_train[['newbalanceDest']].values)\n \n # 'day_of_month'\n \n X_train['day_of_month'] = self.day_of_month_scaler.transform(X_train[['day_of_month']].values)\n \n # 'week_of_month'\n \n X_train['week_of_month'] = self.week_of_month_scaler.transform(X_train[['week_of_month']].values)\n \n # 'step'\n \n X_train['step'] = self.step_scaler.transform(X_train[['step']].values)\n \n \n \n # Encoding: Categorical attributes\n \n \n # Dummies\n \n # Features to encode via Dummies:\n \n # 'isFlaggedFraud',\n # 'is_weekend',\n # 'time_of_day',\n # 'dest_type',\n # 'oldbalanceOrg_status',\n # 'newbalanceOrig_status',\n # 'oldbalanceDest_status',\n # 'newbalanceDest_status',\n # 'is_oldbalanceOrg_higherthan_newbalanceOrig',\n # 'is_oldbalanceDest_higherthan_newbalanceDest',\n # 'transaction_direction'\n \n \n # Getting Dummie variables \n \n \n X_train['isFlaggedFraud_no'] = [1 if i=='no' else 0 for i in X_train['isFlaggedFraud']]\n \n X_train['isFlaggedFraud_yes'] = [1 if i=='yes' else 0 for i in X_train['isFlaggedFraud']]\n \n X_train['is_weekend_weekdays'] = [1 if i=='weekdays' else 0 for i in X_train['is_weekend']]\n \n X_train['is_weekend_weekend'] = [1 if i=='weekend' else 0 for i in X_train['is_weekend']]\n \n X_train['time_of_day_AM'] = [1 if i=='AM' else 0 for i in X_train['time_of_day']]\n \n X_train['time_of_day_PM'] = [1 if i=='PM' else 0 for i in X_train['time_of_day']]\n \n X_train['dest_type_Costumer'] = [1 if i=='Costumer' else 0 for i in X_train['dest_type']]\n \n X_train['dest_type_Merchant'] = [1 if i=='Merchant' else 0 for i in X_train['dest_type']]\n \n X_train['oldbalanceOrg_status_non-zero'] = [1 if i=='non-zero' else 0 for i in X_train['oldbalanceOrg_status']]\n \n X_train['oldbalanceOrg_status_zero'] = [1 if i=='zero' else 0 for i in X_train['oldbalanceOrg_status']]\n \n X_train['newbalanceOrig_status_non-zero'] = [1 if i=='non-zero' else 0 for i in X_train['newbalanceOrig_status']]\n \n X_train['newbalanceOrig_status_zero'] = [1 if i=='zero' else 0 for i in X_train['newbalanceOrig_status']]\n \n X_train['oldbalanceDest_status_non-zero'] = [1 if i=='non-zero' else 0 for i in X_train['oldbalanceDest_status']]\n \n X_train['oldbalanceDest_status_zero'] = [1 if i=='zero' else 0 for i in X_train['oldbalanceDest_status']]\n \n X_train['newbalanceDest_status_non-zero'] = [1 if i=='non-zero' else 0 for i in X_train['newbalanceDest_status']]\n \n X_train['newbalanceDest_status_zero'] = [1 if i=='zero' else 0 for i in X_train['newbalanceDest_status']]\n \n X_train['is_oldbalanceOrg_higherthan_newbalanceOrig_no'] = [1 if i=='no' else 0 for i in X_train['is_oldbalanceOrg_higherthan_newbalanceOrig']]\n \n X_train['is_oldbalanceOrg_higherthan_newbalanceOrig_yes'] = [1 if i=='yes' else 0 for i in X_train['is_oldbalanceOrg_higherthan_newbalanceOrig']]\n \n X_train['is_oldbalanceDest_higherthan_newbalanceDest_no'] = [1 if i=='no' else 0 for i in X_train['is_oldbalanceDest_higherthan_newbalanceDest']]\n \n X_train['is_oldbalanceDest_higherthan_newbalanceDest_yes'] = [1 if i=='yes' else 0 for i in X_train['is_oldbalanceDest_higherthan_newbalanceDest']]\n \n X_train['transaction_direction_C2C'] = [1 if i=='C2C' else 0 for i in X_train['transaction_direction']]\n \n X_train['transaction_direction_C2M'] = [1 if i=='C2M' else 0 for i in X_train['transaction_direction']]\n \n X_train = X_train.drop(columns=['isFlaggedFraud', 'is_weekend', 'time_of_day', 'dest_type', 'oldbalanceOrg_status', 'newbalanceOrig_status', 'oldbalanceDest_status', 'newbalanceDest_status', 'is_oldbalanceOrg_higherthan_newbalanceOrig', 'is_oldbalanceDest_higherthan_newbalanceDest', 'transaction_direction'])\n \n \n\n \n \n # Label encoding - 'type'\n \n X_train['type'] = self.type_encoder.transform(X_train['type'])\n \n \n \n \n # Cyclic transform\n \n \n # Cyclic variables:\n \n # hour_of_day\n # day_of_week\n # period_of_day\n \n \n # First let's just encode 'period_of_day' from string to numbers\n \n # period_of_day (Label encoder)\n \n period_of_day_encoding_map = {'Morning': 1, 'Afternoon':2, 'Evening':3}\n \n X_train['period_of_day'] = X_train['period_of_day'].map(period_of_day_encoding_map)\n \n \n # Transforming the attributes\n \n # hour_of_day\n \n X_train['hour_of_day_sin'] = X_train['hour_of_day'].apply(lambda x: np.sin(x*(2.*np.pi/24)))\n X_train['hour_of_day_cos'] = X_train['hour_of_day'].apply(lambda x: np.cos(x*(2.*np.pi/24)))\n \n # day_of_week\n \n X_train['day_of_week_sin'] = X_train['day_of_week'].apply(lambda x: np.sin(x*(2.*np.pi/7)))\n X_train['day_of_week_cos'] = X_train['day_of_week'].apply(lambda x: np.cos(x*(2.*np.pi/7)))\n \n # period_of_day\n \n X_train['period_of_day_sin'] = X_train['period_of_day'].apply(lambda x: np.sin(x*(2.*np.pi/3)))\n X_train['period_of_day_cos'] = X_train['period_of_day'].apply(lambda x: np.cos(x*(2.*np.pi/3)))\n \n \n \n # Dropping the precursor attributes\n \n X_train = X_train.drop(columns=['hour_of_day','day_of_week', 'period_of_day'])\n \n \n # Columns selected by Boruta \n \n cols_selected_boruta = ['step',\n 'type',\n 'amount',\n 'oldbalanceOrg',\n 'oldbalanceDest',\n 'newbalanceDest',\n 'day_of_month',\n 'oldbalanceDest_status_non-zero',\n 'newbalanceDest_status_non-zero',\n 'newbalanceDest_status_zero',\n 'hour_of_day_sin',\n 'hour_of_day_cos']\n \n return X_train[cols_selected_boruta]\n\n\n def get_predictions(self, model, test_raw, df_3):\n \n # prediction\n predictions = model.predict(df_3)\n \n # join predictions into the original data\n test_raw.loc[df_3.index, 'Predictions'] = predictions\n \n # return desired columns\n test_raw = test_raw[['step', 'type', 'amount', 'nameOrig', 'oldbalanceOrg', 'newbalanceOrig', 'nameDest', 'oldbalanceDest', 'newbalanceDest', 'isFlaggedFraud', 'Predictions']]\n \n \n return test_raw.to_json(orient='records', date_format='iso')\n\n\n" ]
[ [ "numpy.cos", "numpy.sin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CriticalLink/ArrowCMR
[ "d7e45421b8762421aeff660108d1166be83e0d89" ]
[ "MotorControlGUI/Python/libs/adiSerial.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 17 10:23:20 2020\n\n@brief: Serial class for communication over COM port.\n@description: Serial connection manager with convenience\nmethods for connecting to serial, discovering ports\nand sending data as a stream of bytes.\n\n@author: Tom Sharkey\n@last-modified: 2020-11-09\n\"\"\"\n\nimport logging\nimport queue\nimport struct\nimport threading\nimport time\nimport traceback\n\nimport numpy as np\nimport pandas as pd\nimport serial\nimport serial.tools.list_ports\n\n# constants\nSTART_KEY = 170\nSTART_BYTE = 255\nTRANSMIT_STATE = 65\n\n# Create or get the logger\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nfile_handler = logging.FileHandler('logfile.log')\nformatter = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s :%(funcName)20s(): %(message)s')\nfile_handler.setFormatter(formatter)\nlogger.addHandler(file_handler)\n\n\nclass SerialQueue(threading.Thread):\n \"\"\"\n @brief: rx threaded queue for continuous serial communication\n @description: A variety of serial connector objects that uses a queue and thread\n structure to continuously receive data, and place data in a formatted queue, dataQ.\n This queue can be read from to plot data.\n \"\"\"\n\n def __init__(self, parent, dataQ, data_ready_event):\n self.connected = False\n self.comPort = None\n self.baudrate = parent.config['baudrate']\n self.serial_conn = None\n self.parent = parent\n self.data_ready_event = data_ready_event # set when data is available in Q\n\n # queueing commands out and raw data in\n self.enabled = False\n self.first = True\n self.serial_rx_queue = queue.Queue()\n self.raw = list()\n self.dataQ = dataQ # formatted rx data for plotting\n\n self.TOTAL = parent.config['total']\n self.type = \"float\"\n\n threading.Thread.__init__(self, daemon=True)\n\n def __str__(self):\n return f\"serialQueue object on {self.comPort}@{self.baudrate}\"\n\n # serial connection & start continuous rx\n def connect(self, comPort):\n logger.info('connect')\n print(\"Connected to serialQ\")\n self.comPort = comPort\n try:\n self.serial_conn = serial.Serial(\n self.comPort,\n baudrate=self.baudrate\n )\n except serial.SerialException:\n traceback.print_exc()\n self.disable()\n else:\n self.connected = True\n\n def disconnect(self):\n logger.info(\"disconnect\")\n if self.connected:\n self.disable()\n self.serial_conn.close()\n self.connected = False\n self.comPort = ''\n\n def enable(self): \n self.serial_conn.flushInput()\n self.raw.clear()\n self.dataQ.queue.clear() \n self.sendAsBytes([START_BYTE, TRANSMIT_STATE, 1]) \n self.enabled = True\n \n def disable(self):\n self.enabled = False \n self.sendAsBytes([START_BYTE, TRANSMIT_STATE, 0]) # stop sending data message \n\n def getRaw(self):\n \"\"\"Look at raw incoming data, useful for debugging\"\"\"\n print(f\"Raw: {len(self.raw)}, {self.raw[-10:]}\")\n\n def getByte(self):\n if not self.serial_rx_queue.empty():\n rx_byte = self.serial_rx_queue.get()\n return rx_byte\n else:\n print(\"Queue is empty\")\n\n def updateTotal(self, total):\n self.TOTAL = total\n\n def sendAsBytes(self, data):\n try:\n assert (self.serial_conn is not None)\n assert (self.connected)\n # print(f\"Sending data: {data}\")\n databytes = bytearray(data)\n self.serial_conn.write(databytes)\n logger.debug(f\"Sending command over serial: raw:{data}, bytes:{databytes}\")\n except AssertionError:\n print(\"You must connect to a serial port before configuring the motor\")\n\n @staticmethod\n def discoverPorts():\n \"\"\"Automatically discovers available comports when SerialConnector\n object is created. Returns names as list.\n Current implementation is Windows only\"\"\"\n ports = serial.tools.list_ports.comports(include_links=False)\n available_ports = []\n for port in sorted(ports):\n available_ports.append(port.device)\n return available_ports\n\n def run(self):\n while True:\n time.sleep(0.3)\n if self.enabled:\n # Check for bytes to receive from serial\n try:\n while self.serial_conn.inWaiting() > 0:\n rx_buffer_inwaiting = self.serial_conn.inWaiting()\n self.serial_rx_queue.put(self.serial_conn.read(rx_buffer_inwaiting))\n except serial.SerialException:\n traceback.print_exc()\n self.disable()\n\n # Place stream of rx bytes in raw\n try:\n if not self.serial_rx_queue.empty():\n while not self.serial_rx_queue.empty():\n for i, byte in enumerate(list(self.getByte())):\n self.raw.append(byte)\n\n if START_KEY in self.raw:\n index = self.raw.index(START_KEY)\n # check for full dataframe after index (0xAA)\n if len(self.raw[index:]) >= self.TOTAL:\n dataframe = self.raw[index + 1:self.TOTAL + 1] # grab data excluding index\n formatted_data = bytesToNum(dataframe, self.type) # one var or many\n self.raw = self.raw[index + self.TOTAL + 1:] # remove the dataframe from raw\n with threading.Lock():\n self.dataQ.put(formatted_data)\n self.data_ready_event.set()\n logger.debug(f\"dataQ: {self.dataQ.qsize()}\")\n\n message = [START_BYTE, TRANSMIT_STATE, 1] # request more data\n self.sendAsBytes(message)\n else:\n print(f\"Could not find start key ({START_KEY}) in raw: {self.raw[0:5]}\")\n except:\n traceback.print_exc()\n\n\nclass DataPlotter(threading.Thread):\n def __init__(self, parent, dataQ, canvas_frame, motor, data_ready_event):\n self.enabled = False\n self.dataQ = dataQ\n self.logging = False\n self.canvas_frame = canvas_frame\n self.motor = motor\n self.first_frame = True\n self.parent = parent\n self.data_ready_event = data_ready_event\n threading.Thread.__init__(self, daemon=True)\n\n def __str__(self):\n return f\"DataPlotter Object with:\\n -DataQ: {self.dataQ}\\n \"\n\n def run(self):\n # i = 0\n # start_time = time.time()\n while True:\n self.data_ready_event.wait() # wait for serial thread to set data ready event\n self.data_ready_event.clear()\n if self.enabled:\n try:\n if not self.dataQ.empty():\n # print(self.dataQ.qsize())\n y = self.dataQ.get()\n if y is None:\n print(\"Skipping plot\")\n continue\n if self.logging:\n data = {\"Phase U\": y[::2],\n \"Phase V\": y[1::2]}\n log = pd.DataFrame(data)\n log.index = log.index + 1 # to start index at 1\n log.to_csv('out.csv')\n x = self.motor.getX()\n time_sec = False\n # if max(x) > 0.5:\n # print(f\"Showing last x {x[-1]}, and max {max(x)}\")\n # time_sec = True\n\n # # print(f\"T value: {x}\")\n\n self.canvas_frame.update_data(\"Phase U\", x, y[::2]) # even members of dataQ frame\n self.canvas_frame.update_data(\"Phase V\", x, y[1::2]) # odd members of dataQ frame\n\n # i += 1\n # if i == 30:\n # uptime = time.time() - start_time\n # print(f\"FPS: {(i / uptime)}\")\n except:\n traceback.print_exc()\n\n def start_logging(self):\n self.logging = True\n\n def stop_logging(self):\n self.logging = False\n\n def enable(self):\n self.enabled = True\n\n def disable(self):\n self.enabled = False # ends tx rx thread\n\n\nclass DataReader(threading.Thread):\n \"\"\"Simple Queue reader for use with CLI\"\"\"\n\n def __init__(self, parent, dataQ):\n self.logging = False\n self.enabled = False\n self.dataQ = dataQ\n self.y = []\n\n threading.Thread.__init__(self, daemon=True)\n\n raw_size = 1600\n BUFF_SIZE = raw_size // 8\n DOWN_SAMP_FACTOR = 1\n step = DOWN_SAMP_FACTOR * 0.0001\n self.t = np.arange(0.0001, (BUFF_SIZE + 1 / 4) * step, step)\n\n def run(self):\n while True:\n time.sleep(0.3)\n if self.enabled:\n try:\n if not self.dataQ.empty():\n self.y = self.dataQ.get()\n except:\n traceback.print_exc()\n\n def start_logging(self):\n self.logging = True\n\n def stop_logging(self):\n self.logging = False\n\n def enable(self):\n print(f\"Enabling dataReader {self}\")\n self.enabled = True\n\n def disable(self):\n self.enabled = False # ends tx rx thread\n\n def latestData(self):\n \"\"\"Prints latest data frame - used for debug\"\"\"\n print(f\"Data reader data: {self}\")\n print(self.y)\n\n\ndef bytesToNum(data, type):\n output = None\n if type == \"int\":\n output = bytesToInt(data)\n elif type == \"float\":\n output = bytesToFloat(data)\n return output\n\n\ndef bytesToInt(data, signed=False):\n output = []\n out_bytes = []\n step = 2\n for j in range(0, len(data), step):\n output.append((int.from_bytes(data[j:j + step], byteorder='little', signed=signed)))\n out_bytes.append(data[j:j + step])\n return output\n\n\ndef bytesToFloat(data):\n try:\n output = []\n out_bytes = []\n step = 4\n for j in range(0, len(data), step):\n [x] = struct.unpack('f', bytearray(data[j:j + step]))\n output.append(x)\n out_bytes.append(data[j:j + step])\n return output\n except:\n pass\n" ]
[ [ "numpy.arange", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
frankfengdi/lidarMTL
[ "70c6181149e84c638b0435738282ae93faab8d1b" ]
[ "pcdet/models/detectors/detector3d_template.py" ]
[ "import os\n\nimport torch\nimport torch.nn as nn\n\nfrom ...ops.iou3d_nms import iou3d_nms_utils\nfrom .. import backbones_2d, backbones_3d, dense_heads, roi_heads\nfrom ..backbones_2d import map_to_bev\nfrom ..backbones_3d import pfe, vfe\nfrom ..model_utils import model_nms_utils\n\nfrom visual_utils import debug_utils as V\nfrom eval_utils import eval_utils_point as E\n\nclass Detector3DTemplate(nn.Module):\n def __init__(self, model_cfg, num_class, dataset):\n super().__init__()\n self.model_cfg = model_cfg\n self.num_class = num_class\n self.dataset = dataset\n self.class_names = dataset.class_names\n self.register_buffer('global_step', torch.LongTensor(1).zero_())\n\n self.module_topology = [\n 'vfe', 'backbone_3d', 'map_to_bev_module', 'pfe',\n 'backbone_2d', 'dense_head', 'point_head', 'roi_head'\n ]\n\n @property\n def mode(self):\n return 'TRAIN' if self.training else 'TEST'\n\n def update_global_step(self):\n self.global_step += 1\n\n def build_networks(self):\n model_info_dict = {\n 'module_list': [],\n 'num_rawpoint_features': self.dataset.point_feature_encoder.num_point_features,\n 'num_point_features': self.dataset.point_feature_encoder.num_point_features,\n 'grid_size': self.dataset.grid_size,\n 'point_cloud_range': self.dataset.point_cloud_range,\n 'voxel_size': self.dataset.voxel_size\n }\n for module_name in self.module_topology:\n module, model_info_dict = getattr(self, 'build_%s' % module_name)(\n model_info_dict=model_info_dict\n )\n self.add_module(module_name, module)\n return model_info_dict['module_list']\n\n def build_vfe(self, model_info_dict):\n if self.model_cfg.get('VFE', None) is None:\n return None, model_info_dict\n\n vfe_module = vfe.__all__[self.model_cfg.VFE.NAME](\n model_cfg=self.model_cfg.VFE,\n num_point_features=model_info_dict['num_rawpoint_features'],\n point_cloud_range=model_info_dict['point_cloud_range'],\n voxel_size=model_info_dict['voxel_size']\n )\n model_info_dict['num_point_features'] = vfe_module.get_output_feature_dim()\n model_info_dict['module_list'].append(vfe_module)\n return vfe_module, model_info_dict\n\n def build_backbone_3d(self, model_info_dict):\n if self.model_cfg.get('BACKBONE_3D', None) is None:\n return None, model_info_dict\n\n backbone_3d_module = backbones_3d.__all__[self.model_cfg.BACKBONE_3D.NAME](\n model_cfg=self.model_cfg.BACKBONE_3D,\n input_channels=model_info_dict['num_point_features'],\n grid_size=model_info_dict['grid_size'],\n voxel_size=model_info_dict['voxel_size'],\n point_cloud_range=model_info_dict['point_cloud_range']\n )\n model_info_dict['module_list'].append(backbone_3d_module)\n model_info_dict['num_point_features'] = backbone_3d_module.num_point_features\n return backbone_3d_module, model_info_dict\n\n def build_map_to_bev_module(self, model_info_dict):\n if self.model_cfg.get('MAP_TO_BEV', None) is None:\n return None, model_info_dict\n\n map_to_bev_module = map_to_bev.__all__[self.model_cfg.MAP_TO_BEV.NAME](\n model_cfg=self.model_cfg.MAP_TO_BEV,\n grid_size=model_info_dict['grid_size']\n )\n model_info_dict['module_list'].append(map_to_bev_module)\n model_info_dict['num_bev_features'] = map_to_bev_module.num_bev_features\n return map_to_bev_module, model_info_dict\n\n def build_backbone_2d(self, model_info_dict):\n if self.model_cfg.get('BACKBONE_2D', None) is None:\n return None, model_info_dict\n\n backbone_2d_module = backbones_2d.__all__[self.model_cfg.BACKBONE_2D.NAME](\n model_cfg=self.model_cfg.BACKBONE_2D,\n input_channels=model_info_dict['num_bev_features']\n )\n model_info_dict['module_list'].append(backbone_2d_module)\n model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features\n return backbone_2d_module, model_info_dict\n\n def build_pfe(self, model_info_dict):\n if self.model_cfg.get('PFE', None) is None:\n return None, model_info_dict\n\n pfe_module = pfe.__all__[self.model_cfg.PFE.NAME](\n model_cfg=self.model_cfg.PFE,\n voxel_size=model_info_dict['voxel_size'],\n point_cloud_range=model_info_dict['point_cloud_range'],\n num_bev_features=model_info_dict['num_bev_features'],\n num_rawpoint_features=model_info_dict['num_rawpoint_features']\n )\n model_info_dict['module_list'].append(pfe_module)\n model_info_dict['num_point_features'] = pfe_module.num_point_features\n model_info_dict['num_point_features_before_fusion'] = pfe_module.num_point_features_before_fusion\n return pfe_module, model_info_dict\n\n def build_dense_head(self, model_info_dict):\n if self.model_cfg.get('DENSE_HEAD', None) is None:\n return None, model_info_dict\n dense_head_module = dense_heads.__all__[self.model_cfg.DENSE_HEAD.NAME](\n model_cfg=self.model_cfg.DENSE_HEAD,\n input_channels=model_info_dict['num_bev_features'],\n num_class=self.num_class if not self.model_cfg.DENSE_HEAD.CLASS_AGNOSTIC else 1,\n class_names=self.class_names,\n grid_size=model_info_dict['grid_size'],\n point_cloud_range=model_info_dict['point_cloud_range'],\n predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False)\n )\n model_info_dict['module_list'].append(dense_head_module)\n return dense_head_module, model_info_dict\n\n def build_point_head(self, model_info_dict):\n if self.model_cfg.get('POINT_HEAD', None) is None:\n return None, model_info_dict\n\n if self.model_cfg.POINT_HEAD.get('USE_POINT_FEATURES_BEFORE_FUSION', False):\n num_point_features = model_info_dict['num_point_features_before_fusion']\n else:\n num_point_features = model_info_dict['num_point_features']\n\n point_head_module = dense_heads.__all__[self.model_cfg.POINT_HEAD.NAME](\n model_cfg=self.model_cfg.POINT_HEAD,\n input_channels=num_point_features,\n num_class=self.num_class if not self.model_cfg.POINT_HEAD.CLASS_AGNOSTIC else 1,\n predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False)\n )\n\n model_info_dict['module_list'].append(point_head_module)\n return point_head_module, model_info_dict\n\n def build_roi_head(self, model_info_dict):\n if self.model_cfg.get('ROI_HEAD', None) is None:\n return None, model_info_dict\n point_head_module = roi_heads.__all__[self.model_cfg.ROI_HEAD.NAME](\n model_cfg=self.model_cfg.ROI_HEAD,\n input_channels=model_info_dict['num_point_features'],\n num_class=self.num_class if not self.model_cfg.ROI_HEAD.CLASS_AGNOSTIC else 1,\n )\n\n model_info_dict['module_list'].append(point_head_module)\n return point_head_module, model_info_dict\n\n def forward(self, **kwargs):\n raise NotImplementedError\n\n def post_processing(self, batch_dict):\n \"\"\"\n Args:\n batch_dict:\n batch_size:\n batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)\n or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]\n multihead_label_mapping: [(num_class1), (num_class2), ...]\n batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)\n cls_preds_normalized: indicate whether batch_cls_preds is normalized\n batch_index: optional (N1+N2+...)\n has_class_labels: True/False\n roi_labels: (B, num_rois) 1 .. num_classes\n batch_pred_labels: (B, num_boxes, 1)\n Returns:\n\n \"\"\"\n post_process_cfg = self.model_cfg.POST_PROCESSING\n\n if post_process_cfg.VISUALIZATION_POINT_PREDICTION:\n V.visualize_voxel_prediction(batch_dict)\n if post_process_cfg.SAVE_POINT_PREDICTION:\n E.save_prediction_point_argoverse(batch_dict, save_gt=False)\n\n batch_size = batch_dict['batch_size']\n recall_dict = {}\n pred_dicts = []\n for index in range(batch_size):\n if batch_dict.get('batch_index', None) is not None:\n assert batch_dict['batch_box_preds'].shape.__len__() == 2\n batch_mask = (batch_dict['batch_index'] == index)\n else:\n assert batch_dict['batch_box_preds'].shape.__len__() == 3\n batch_mask = index\n\n box_preds = batch_dict['batch_box_preds'][batch_mask]\n src_box_preds = box_preds\n\n if not isinstance(batch_dict['batch_cls_preds'], list):\n cls_preds = batch_dict['batch_cls_preds'][batch_mask]\n\n src_cls_preds = cls_preds\n assert cls_preds.shape[1] in [1, self.num_class]\n\n if not batch_dict['cls_preds_normalized']:\n cls_preds = torch.sigmoid(cls_preds)\n else:\n cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']]\n src_cls_preds = cls_preds\n if not batch_dict['cls_preds_normalized']:\n cls_preds = [torch.sigmoid(x) for x in cls_preds]\n\n if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:\n if not isinstance(cls_preds, list):\n cls_preds = [cls_preds]\n multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)]\n else:\n multihead_label_mapping = batch_dict['multihead_label_mapping']\n\n cur_start_idx = 0\n pred_scores, pred_labels, pred_boxes = [], [], []\n for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping):\n assert cur_cls_preds.shape[1] == len(cur_label_mapping)\n cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]]\n cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms(\n cls_scores=cur_cls_preds, box_preds=cur_box_preds,\n nms_config=post_process_cfg.NMS_CONFIG,\n score_thresh=post_process_cfg.SCORE_THRESH\n )\n cur_pred_labels = cur_label_mapping[cur_pred_labels]\n pred_scores.append(cur_pred_scores)\n pred_labels.append(cur_pred_labels)\n pred_boxes.append(cur_pred_boxes)\n cur_start_idx += cur_cls_preds.shape[0]\n\n final_scores = torch.cat(pred_scores, dim=0)\n final_labels = torch.cat(pred_labels, dim=0)\n final_boxes = torch.cat(pred_boxes, dim=0)\n else:\n cls_preds, label_preds = torch.max(cls_preds, dim=-1)\n if batch_dict.get('has_class_labels', False):\n label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'\n label_preds = batch_dict[label_key][index]\n else:\n label_preds = label_preds + 1\n selected, selected_scores = model_nms_utils.class_agnostic_nms(\n box_scores=cls_preds, box_preds=box_preds,\n nms_config=post_process_cfg.NMS_CONFIG,\n score_thresh=post_process_cfg.SCORE_THRESH\n )\n\n if post_process_cfg.OUTPUT_RAW_SCORE:\n max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)\n selected_scores = max_cls_preds[selected]\n\n final_scores = selected_scores\n final_labels = label_preds[selected]\n final_boxes = box_preds[selected]\n\n recall_dict = self.generate_recall_record(\n box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,\n recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,\n thresh_list=post_process_cfg.RECALL_THRESH_LIST\n )\n\n record_dict = {\n 'pred_boxes': final_boxes,\n 'pred_scores': final_scores,\n 'pred_labels': final_labels\n }\n pred_dicts.append(record_dict)\n\n return pred_dicts, recall_dict\n\n @staticmethod\n def generate_recall_record(box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None):\n if 'gt_boxes' not in data_dict:\n return recall_dict\n\n rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None\n gt_boxes = data_dict['gt_boxes'][batch_index]\n\n if recall_dict.__len__() == 0:\n recall_dict = {'gt': 0}\n for cur_thresh in thresh_list:\n recall_dict['roi_%s' % (str(cur_thresh))] = 0\n recall_dict['rcnn_%s' % (str(cur_thresh))] = 0\n\n cur_gt = gt_boxes\n k = cur_gt.__len__() - 1\n while k > 0 and cur_gt[k].sum() == 0:\n k -= 1\n cur_gt = cur_gt[:k + 1]\n\n if cur_gt.shape[0] > 0:\n if box_preds.shape[0] > 0:\n iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(box_preds[:, 0:7], cur_gt[:, 0:7])\n else:\n iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))\n\n if rois is not None:\n iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(rois[:, 0:7], cur_gt[:, 0:7])\n\n for cur_thresh in thresh_list:\n if iou3d_rcnn.shape[0] == 0:\n recall_dict['rcnn_%s' % str(cur_thresh)] += 0\n else:\n rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item()\n recall_dict['rcnn_%s' % str(cur_thresh)] += rcnn_recalled\n if rois is not None:\n roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item()\n recall_dict['roi_%s' % str(cur_thresh)] += roi_recalled\n\n recall_dict['gt'] += cur_gt.shape[0]\n else:\n gt_iou = box_preds.new_zeros(box_preds.shape[0])\n return recall_dict\n\n def load_params_from_file(self, filename, logger, to_cpu=False):\n if not os.path.isfile(filename):\n raise FileNotFoundError\n\n logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))\n loc_type = torch.device('cpu') if to_cpu else None\n checkpoint = torch.load(filename, map_location=loc_type)\n model_state_disk = checkpoint['model_state']\n\n if 'version' in checkpoint:\n logger.info('==> Checkpoint trained from version: %s' % checkpoint['version'])\n\n update_model_state = {}\n for key, val in model_state_disk.items():\n if key in self.state_dict() and self.state_dict()[key].shape == model_state_disk[key].shape:\n update_model_state[key] = val\n # logger.info('Update weight %s: %s' % (key, str(val.shape)))\n\n state_dict = self.state_dict()\n state_dict.update(update_model_state)\n self.load_state_dict(state_dict)\n\n for key in state_dict:\n if key not in update_model_state:\n logger.info('Not updated weight %s: %s' % (key, str(state_dict[key].shape)))\n\n logger.info('==> Done (loaded %d/%d)' % (len(update_model_state), len(self.state_dict())))\n\n def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None):\n if not os.path.isfile(filename):\n raise FileNotFoundError\n\n logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))\n loc_type = torch.device('cpu') if to_cpu else None\n checkpoint = torch.load(filename, map_location=loc_type)\n epoch = checkpoint.get('epoch', -1)\n it = checkpoint.get('it', 0.0)\n\n self.load_state_dict(checkpoint['model_state'])\n\n if optimizer is not None:\n if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None:\n logger.info('==> Loading optimizer parameters from checkpoint %s to %s'\n % (filename, 'CPU' if to_cpu else 'GPU'))\n optimizer.load_state_dict(checkpoint['optimizer_state'])\n else:\n assert filename[-4] == '.', filename\n src_file, ext = filename[:-4], filename[-3:]\n optimizer_filename = '%s_optim.%s' % (src_file, ext)\n if os.path.exists(optimizer_filename):\n optimizer_ckpt = torch.load(optimizer_filename, map_location=loc_type)\n optimizer.load_state_dict(optimizer_ckpt['optimizer_state'])\n\n if 'version' in checkpoint:\n print('==> Checkpoint trained from version: %s' % checkpoint['version'])\n logger.info('==> Done')\n\n return it, epoch\n" ]
[ [ "torch.LongTensor", "torch.sigmoid", "torch.max", "torch.zeros", "torch.cat", "torch.load", "torch.arange", "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Lewington-pitsos/lanenet-lane-detection
[ "12009ebc0cd7b418b1e07a646b4e1b7285466790" ]
[ "semantic_segmentation_zoo/cnn_basenet.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 17-9-18 下午3:59\n# @Author : MaybeShewill-CV\n# @Site : https://github.com/MaybeShewill-CV/lanenet-lane-detection\n# @File : cnn_basenet.py\n# @IDE: PyCharm Community Edition\n\"\"\"\nThe base convolution neural networks mainly implement some useful cnn functions\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\n\n\nclass CNNBaseModel(object):\n \"\"\"\n Base model for other specific cnn ctpn_models\n \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def conv2d(inputdata, out_channel, kernel_size, padding='SAME',\n stride=1, w_init=None, b_init=None,\n split=1, use_bias=True, data_format='NHWC', name=None):\n \"\"\"\n Packing the tensorflow conv2d function.\n :param name: op name\n :param inputdata: A 4D tensorflow tensor which ust have known number of channels, but can have other\n unknown dimensions.\n :param out_channel: number of output channel.\n :param kernel_size: int so only support square kernel convolution\n :param padding: 'VALID' or 'SAME'\n :param stride: int so only support square stride\n :param w_init: initializer for convolution weights\n :param b_init: initializer for bias\n :param split: split channels as used in Alexnet mainly group for GPU memory save.\n :param use_bias: whether to use bias.\n :param data_format: default set to NHWC according tensorflow\n :return: tf.Tensor named ``output``\n \"\"\"\n with tf.compat.v1.variable_scope (name):\n in_shape = inputdata.get_shape().as_list()\n channel_axis = 3 if data_format == 'NHWC' else 1\n in_channel = in_shape[channel_axis]\n assert in_channel is not None, \"[Conv2D] Input cannot have unknown channel!\"\n assert in_channel % split == 0\n assert out_channel % split == 0\n\n padding = padding.upper()\n\n if isinstance(kernel_size, list):\n filter_shape = [int(kernel_size[0]), int(kernel_size[1])] + [int(in_channel / split), int(out_channel)]\n else:\n filter_shape = [int(kernel_size), int(kernel_size)] + [int(in_channel / split), int(out_channel)]\n\n if isinstance(stride, list):\n strides = [1, stride[0], stride[1], 1] if data_format == 'NHWC' \\\n else [1, 1, stride[0], stride[1]]\n else:\n strides = [1, stride, stride, 1] if data_format == 'NHWC' \\\n else [1, 1, stride, stride]\n\n if w_init is None:\n w_init = tf.keras.initializers.VarianceScaling()\n if b_init is None:\n b_init = tf.constant_initializer()\n\n w = tf.compat.v1.get_variable('W', filter_shape, initializer=w_init)\n b = None\n\n if use_bias:\n b = tf.compat.v1.get_variable('b', [out_channel], initializer=b_init)\n\n if split == 1:\n conv = tf.nn.conv2d(inputdata, w, strides, padding, data_format=data_format)\n else:\n inputs = tf.split(inputdata, split, channel_axis)\n kernels = tf.split(w, split, 3)\n outputs = [tf.nn.conv2d(i, k, strides, padding, data_format=data_format)\n for i, k in zip(inputs, kernels)]\n conv = tf.concat(outputs, channel_axis)\n\n ret = tf.identity(tf.nn.bias_add(conv, b, data_format=data_format)\n if use_bias else conv, name=name)\n\n return ret\n\n @staticmethod\n def depthwise_conv(input_tensor, kernel_size, name, depth_multiplier=1,\n padding='SAME', stride=1):\n \"\"\"\n\n :param input_tensor:\n :param kernel_size:\n :param name:\n :param depth_multiplier:\n :param padding:\n :param stride:\n :return:\n \"\"\"\n with tf.compat.v1.variable_scope (name_or_scope=name):\n in_shape = input_tensor.get_shape().as_list()\n in_channel = in_shape[3]\n padding = padding.upper()\n\n depthwise_filter_shape = [kernel_size, kernel_size] + [in_channel, depth_multiplier]\n w_init = tf.keras.initializers.VarianceScaling()\n\n depthwise_filter = tf.compat.v1.get_variable(\n name='depthwise_filter_w', shape=depthwise_filter_shape,\n initializer=w_init\n )\n\n result = tf.nn.depthwise_conv2d(\n input=input_tensor,\n filter=depthwise_filter,\n strides=[1, stride, stride, 1],\n padding=padding,\n name='depthwise_conv_output'\n )\n return result\n\n @staticmethod\n def relu(inputdata, name=None):\n \"\"\"\n\n :param name:\n :param inputdata:\n :return:\n \"\"\"\n return tf.nn.relu(features=inputdata, name=name)\n\n @staticmethod\n def sigmoid(inputdata, name=None):\n \"\"\"\n\n :param name:\n :param inputdata:\n :return:\n \"\"\"\n return tf.nn.sigmoid(x=inputdata, name=name)\n\n @staticmethod\n def maxpooling(inputdata, kernel_size, stride=None, padding='VALID',\n data_format='NHWC', name=None):\n \"\"\"\n\n :param name:\n :param inputdata:\n :param kernel_size:\n :param stride:\n :param padding:\n :param data_format:\n :return:\n \"\"\"\n padding = padding.upper()\n\n if stride is None:\n stride = kernel_size\n\n if isinstance(kernel_size, list):\n kernel = [1, kernel_size[0], kernel_size[1], 1] if data_format == 'NHWC' else \\\n [1, 1, kernel_size[0], kernel_size[1]]\n else:\n kernel = [1, kernel_size, kernel_size, 1] if data_format == 'NHWC' \\\n else [1, 1, kernel_size, kernel_size]\n\n if isinstance(stride, list):\n strides = [1, stride[0], stride[1], 1] if data_format == 'NHWC' \\\n else [1, 1, stride[0], stride[1]]\n else:\n strides = [1, stride, stride, 1] if data_format == 'NHWC' \\\n else [1, 1, stride, stride]\n\n return tf.nn.max_pool(input=inputdata, ksize=kernel, strides=strides, padding=padding,\n data_format=data_format, name=name)\n\n @staticmethod\n def avgpooling(inputdata, kernel_size, stride=None, padding='VALID',\n data_format='NHWC', name=None):\n \"\"\"\n\n :param name:\n :param inputdata:\n :param kernel_size:\n :param stride:\n :param padding:\n :param data_format:\n :return:\n \"\"\"\n if stride is None:\n stride = kernel_size\n\n kernel = [1, kernel_size, kernel_size, 1] if data_format == 'NHWC' \\\n else [1, 1, kernel_size, kernel_size]\n\n strides = [1, stride, stride, 1] if data_format == 'NHWC' else [1, 1, stride, stride]\n\n return tf.nn.avg_pool(input=inputdata, ksize=kernel, strides=strides, padding=padding,\n data_format=data_format, name=name)\n\n @staticmethod\n def globalavgpooling(inputdata, data_format='NHWC', name=None):\n \"\"\"\n\n :param name:\n :param inputdata:\n :param data_format:\n :return:\n \"\"\"\n assert inputdata.shape.ndims == 4\n assert data_format in ['NHWC', 'NCHW']\n\n axis = [1, 2] if data_format == 'NHWC' else [2, 3]\n\n return tf.reduce_mean(input_tensor=inputdata, axis=axis, name=name)\n\n @staticmethod\n def layernorm(inputdata, epsilon=1e-5, use_bias=True, use_scale=True,\n data_format='NHWC', name=None):\n \"\"\"\n :param name:\n :param inputdata:\n :param epsilon: epsilon to avoid divide-by-zero.\n :param use_bias: whether to use the extra affine transformation or not.\n :param use_scale: whether to use the extra affine transformation or not.\n :param data_format:\n :return:\n \"\"\"\n shape = inputdata.get_shape().as_list()\n ndims = len(shape)\n assert ndims in [2, 4]\n\n mean, var = tf.nn.moments(inputdata, list(range(1, len(shape))), keep_dims=True)\n\n if data_format == 'NCHW':\n channnel = shape[1]\n new_shape = [1, channnel, 1, 1]\n else:\n channnel = shape[-1]\n new_shape = [1, 1, 1, channnel]\n if ndims == 2:\n new_shape = [1, channnel]\n\n if use_bias:\n beta = tf.compat.v1.get_variable('beta', [channnel], initializer=tf.constant_initializer())\n beta = tf.reshape(beta, new_shape)\n else:\n beta = tf.zeros([1] * ndims, name='beta')\n if use_scale:\n gamma = tf.compat.v1.get_variable('gamma', [channnel], initializer=tf.constant_initializer(1.0))\n gamma = tf.reshape(gamma, new_shape)\n else:\n gamma = tf.ones([1] * ndims, name='gamma')\n\n return tf.nn.batch_normalization(inputdata, mean, var, beta, gamma, epsilon, name=name)\n\n @staticmethod\n def instancenorm(inputdata, epsilon=1e-5, data_format='NHWC', use_affine=True, name=None):\n \"\"\"\n\n :param name:\n :param inputdata:\n :param epsilon:\n :param data_format:\n :param use_affine:\n :return:\n \"\"\"\n shape = inputdata.get_shape().as_list()\n if len(shape) != 4:\n raise ValueError(\"Input data of instancebn layer has to be 4D tensor\")\n\n if data_format == 'NHWC':\n axis = [1, 2]\n ch = shape[3]\n new_shape = [1, 1, 1, ch]\n else:\n axis = [2, 3]\n ch = shape[1]\n new_shape = [1, ch, 1, 1]\n if ch is None:\n raise ValueError(\"Input of instancebn require known channel!\")\n\n mean, var = tf.nn.moments(inputdata, axis, keep_dims=True)\n\n if not use_affine:\n return tf.divide(inputdata - mean, tf.sqrt(var + epsilon), name='output')\n\n beta = tf.compat.v1.get_variable('beta', [ch], initializer=tf.constant_initializer())\n beta = tf.reshape(beta, new_shape)\n gamma = tf.compat.v1.get_variable('gamma', [ch], initializer=tf.constant_initializer(1.0))\n gamma = tf.reshape(gamma, new_shape)\n return tf.nn.batch_normalization(inputdata, mean, var, beta, gamma, epsilon, name=name)\n\n @staticmethod\n def dropout(inputdata, keep_prob, noise_shape=None, name=None):\n \"\"\"\n\n :param name:\n :param inputdata:\n :param keep_prob:\n :param noise_shape:\n :return:\n \"\"\"\n return tf.nn.dropout(inputdata, keep_prob=keep_prob, noise_shape=noise_shape, name=name)\n\n @staticmethod\n def fullyconnect(inputdata, out_dim, w_init=None, b_init=None,\n use_bias=True, name=None):\n \"\"\"\n Fully-Connected layer, takes a N>1D tensor and returns a 2D tensor.\n It is an equivalent of `tf.layers.dense` except for naming conventions.\n\n :param inputdata: a tensor to be flattened except for the first dimension.\n :param out_dim: output dimension\n :param w_init: initializer for w. Defaults to `variance_scaling_initializer`.\n :param b_init: initializer for b. Defaults to zero\n :param use_bias: whether to use bias.\n :param name:\n :return: tf.Tensor: a NC tensor named ``output`` with attribute `variables`.\n \"\"\"\n shape = inputdata.get_shape().as_list()[1:]\n if None not in shape:\n inputdata = tf.reshape(inputdata, [-1, int(np.prod(shape))])\n else:\n inputdata = tf.reshape(inputdata, tf.stack([tf.shape(inputdata)[0], -1]))\n\n if w_init is None:\n w_init = tf.keras.initializers.VarianceScaling()\n if b_init is None:\n b_init = tf.constant_initializer()\n\n ret = tf.layers.dense(inputs=inputdata, activation=lambda x: tf.identity(x, name='output'),\n use_bias=use_bias, name=name,\n kernel_initializer=w_init, bias_initializer=b_init,\n trainable=True, units=out_dim)\n return ret\n\n @staticmethod\n def layerbn(inputdata, is_training, name, scale=True):\n \"\"\"\n\n :param inputdata:\n :param is_training:\n :param name:\n :param scale:\n :return:\n \"\"\"\n\n return tf.compat.v1.layers.batch_normalization(inputs=inputdata, training=is_training, name=name, scale=scale)\n\n @staticmethod\n def layergn(inputdata, name, group_size=32, esp=1e-5):\n \"\"\"\n\n :param inputdata:\n :param name:\n :param group_size:\n :param esp:\n :return:\n \"\"\"\n with tf.compat.v1.variable_scope (name):\n inputdata = tf.transpose(inputdata, [0, 3, 1, 2])\n n, c, h, w = inputdata.get_shape().as_list()\n group_size = min(group_size, c)\n inputdata = tf.reshape(inputdata, [-1, group_size, c // group_size, h, w])\n mean, var = tf.nn.moments(inputdata, [2, 3, 4], keep_dims=True)\n inputdata = (inputdata - mean) / tf.sqrt(var + esp)\n\n # 每个通道的gamma和beta\n gamma = tf.Variable(tf.constant(1.0, shape=[c]), dtype=tf.float32, name='gamma')\n beta = tf.Variable(tf.constant(0.0, shape=[c]), dtype=tf.float32, name='beta')\n gamma = tf.reshape(gamma, [1, c, 1, 1])\n beta = tf.reshape(beta, [1, c, 1, 1])\n\n # 根据论文进行转换 [n, c, h, w, c] 到 [n, h, w, c]\n output = tf.reshape(inputdata, [-1, c, h, w])\n output = output * gamma + beta\n output = tf.transpose(output, [0, 2, 3, 1])\n\n return output\n\n @staticmethod\n def squeeze(inputdata, axis=None, name=None):\n \"\"\"\n\n :param inputdata:\n :param axis:\n :param name:\n :return:\n \"\"\"\n return tf.squeeze(input=inputdata, axis=axis, name=name)\n\n @staticmethod\n def deconv2d(inputdata, out_channel, kernel_size, padding='SAME',\n stride=1, w_init=None, b_init=None,\n use_bias=True, activation=None, data_format='channels_last',\n trainable=True, name=None):\n \"\"\"\n Packing the tensorflow conv2d function.\n :param name: op name\n :param inputdata: A 4D tensorflow tensor which ust have known number of channels, but can have other\n unknown dimensions.\n :param out_channel: number of output channel.\n :param kernel_size: int so only support square kernel convolution\n :param padding: 'VALID' or 'SAME'\n :param stride: int so only support square stride\n :param w_init: initializer for convolution weights\n :param b_init: initializer for bias\n :param activation: whether to apply a activation func to deconv result\n :param use_bias: whether to use bias.\n :param data_format: default set to NHWC according tensorflow\n :return: tf.Tensor named ``output``\n \"\"\"\n with tf.compat.v1.variable_scope (name):\n in_shape = inputdata.get_shape().as_list()\n channel_axis = 3 if data_format == 'channels_last' else 1\n in_channel = in_shape[channel_axis]\n assert in_channel is not None, \"[Deconv2D] Input cannot have unknown channel!\"\n\n padding = padding.upper()\n\n if w_init is None:\n w_init = tf.keras.initializers.VarianceScaling()\n if b_init is None:\n b_init = tf.constant_initializer()\n\n ret = tf.layers.conv2d_transpose(inputs=inputdata, filters=out_channel,\n kernel_size=kernel_size,\n strides=stride, padding=padding,\n data_format=data_format,\n activation=activation, use_bias=use_bias,\n kernel_initializer=w_init,\n bias_initializer=b_init, trainable=trainable,\n name=name)\n return ret\n\n @staticmethod\n def dilation_conv(input_tensor, k_size, out_dims, rate, padding='SAME',\n w_init=None, b_init=None, use_bias=False, name=None):\n \"\"\"\n\n :param input_tensor:\n :param k_size:\n :param out_dims:\n :param rate:\n :param padding:\n :param w_init:\n :param b_init:\n :param use_bias:\n :param name:\n :return:\n \"\"\"\n with tf.compat.v1.variable_scope (name):\n in_shape = input_tensor.get_shape().as_list()\n in_channel = in_shape[3]\n assert in_channel is not None, \"[Conv2D] Input cannot have unknown channel!\"\n\n padding = padding.upper()\n\n if isinstance(k_size, list):\n filter_shape = [k_size[0], k_size[1]] + [in_channel, out_dims]\n else:\n filter_shape = [k_size, k_size] + [in_channel, out_dims]\n\n if w_init is None:\n w_init = tf.keras.initializers.VarianceScaling()\n if b_init is None:\n b_init = tf.constant_initializer()\n\n w = tf.compat.v1.get_variable('W', filter_shape, initializer=w_init)\n b = None\n\n if use_bias:\n b = tf.compat.v1.get_variable('b', [out_dims], initializer=b_init)\n\n conv = tf.nn.atrous_conv2d(value=input_tensor, filters=w, rate=rate,\n padding=padding, name='dilation_conv')\n\n if use_bias:\n ret = tf.add(conv, b)\n else:\n ret = conv\n\n return ret\n\n @staticmethod\n def spatial_dropout(input_tensor, keep_prob, is_training, name, seed=1234):\n \"\"\"\n 空间dropout实现\n :param input_tensor:\n :param keep_prob:\n :param is_training:\n :param name:\n :param seed:\n :return:\n \"\"\"\n\n def f1():\n input_shape = input_tensor.get_shape().as_list()\n noise_shape = tf.constant(value=[input_shape[0], 1, 1, input_shape[3]])\n return tf.nn.dropout(input_tensor, keep_prob, noise_shape, seed=seed, name=\"spatial_dropout\")\n\n def f2():\n return input_tensor\n\n with tf.compat.v1.variable_scope (name_or_scope=name):\n\n output = tf.cond(is_training, f1, f2)\n\n return output\n\n @staticmethod\n def lrelu(inputdata, name, alpha=0.2):\n \"\"\"\n\n :param inputdata:\n :param alpha:\n :param name:\n :return:\n \"\"\"\n with tf.compat.v1.variable_scope (name):\n return tf.nn.relu(inputdata) - alpha * tf.nn.relu(-inputdata)\n" ]
[ [ "tensorflow.cond", "tensorflow.concat", "tensorflow.zeros", "tensorflow.nn.max_pool", "tensorflow.layers.conv2d_transpose", "tensorflow.keras.initializers.VarianceScaling", "tensorflow.nn.depthwise_conv2d", "tensorflow.nn.atrous_conv2d", "tensorflow.nn.conv2d", "tensorflow.nn.moments", "tensorflow.squeeze", "tensorflow.add", "tensorflow.compat.v1.layers.batch_normalization", "tensorflow.compat.v1.variable_scope", "tensorflow.nn.dropout", "tensorflow.nn.batch_normalization", "tensorflow.nn.sigmoid", "tensorflow.shape", "tensorflow.compat.v1.get_variable", "tensorflow.identity", "tensorflow.nn.avg_pool", "tensorflow.split", "tensorflow.nn.bias_add", "tensorflow.nn.relu", "tensorflow.transpose", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.ones", "tensorflow.constant_initializer", "numpy.prod", "tensorflow.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
AlbertiPot/darts
[ "ad6d458ac5a556ab3ce876bcc46ab4dfcf73ed38" ]
[ "cnn/architect.py" ]
[ "import torch\nimport numpy as np\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\n\ndef _concat(xs):\n return torch.cat([x.view(-1) for x in xs])\n\n\nclass Architect(object):\n\n def __init__(self, model, args):\n self.network_momentum = args.momentum\n self.network_weight_decay = args.weight_decay\n self.model = model\n self.optimizer = torch.optim.Adam(self.model.arch_parameters(), #### 指定优化结构参数\n lr=args.arch_learning_rate, betas=(0.5, 0.999), weight_decay=args.arch_weight_decay)\n\n \n # target:以一阶还是二阶估计计算结构参数在验证集上的梯度\n def step(self, input_train, target_train, input_valid, target_valid, eta, network_optimizer, unrolled):\n self.optimizer.zero_grad()\n if unrolled:\n self._backward_step_unrolled(input_train, target_train, input_valid, target_valid, eta, network_optimizer)\n else:\n self._backward_step(input_valid, target_valid) \n \n self.optimizer.step() # 上述反传后,更新结构参数的梯度\n\n # target:一阶估计 → 当sigma=0时,为一阶估计梯度,即用w估计w*的梯度,计算Lval(w,a)对a的梯度\n def _backward_step(self, input_valid, target_valid):\n loss = self.model._loss(input_valid, target_valid) # model._loss在model_search.py的Network类中定义,用于计算网络的loss,_loss函数包含 1)logits = model(inputs);2)loss = criterion(logits,targets)\n loss.backward()\n\n # target:二阶梯度估计 → 更新各个参数的梯度\n def _backward_step_unrolled(self, input_train, target_train, input_valid, target_valid, eta, network_optimizer):\n unrolled_model = self._compute_unrolled_model(input_train, target_train, eta, network_optimizer) # 在训练集计算w` = w - xigma*Ltrain(w,a),以w`创建新模型\n unrolled_loss = unrolled_model._loss(input_valid, target_valid)\n\n unrolled_loss.backward() # 以w`创建的模型计算一次前向\n dalpha = [v.grad for v in unrolled_model.arch_parameters()] # 提取结构参数的梯度\n vector = [v.grad.data for v in unrolled_model.parameters()] # 提取权重在val上的梯度 ##### 注意这里unrolled模型的使命到此为止,其模型仅仅是为了计算w`,提取出参数到vector和dalpha后放弃\n implicit_grads = self._hessian_vector_product(vector, input_train, target_train)\n\n for g, ig in zip(dalpha, implicit_grads): # w* 的梯度减去二阶梯度估计后,得到新的梯度\n g.data.sub_(other = ig.data, alpha = eta)\n\n for v, g in zip(self.model.arch_parameters(), dalpha): # 将上一个循环得到新的梯度赋值给现在的梯度,类似于backward()\n if v.grad is None:\n v.grad = Variable(g.data)\n else:\n v.grad.data.copy_(g.data)\n \n # target:手动计算一次w在train上的梯度,计算w` = w - xigma*Ltrain(w,a)\n # return:一个以w`创建的新模型,a不变\n def _compute_unrolled_model(self, input, target, eta, network_optimizer): \n loss = self.model._loss(input, target) # 计算train上的loss\n theta = _concat(self.model.parameters()).data # 将模型的权重拼接为1行后,用.data取出tensor本体数据,舍弃grad, grad_fn等额外的反向图计算过程需要的信息\n try:\n # optimizer.state 是一个字典,key是网络的权重,value是上一步的动量,本行是更新动量\n moment = _concat(network_optimizer.state[v]['momentum_buffer'] for v in self.model.parameters()).mul_(self.network_momentum) # 从optimizer的缓存中提取上一步的动量,乘上动量系数构成这一步的动量\n except:\n moment = torch.zeros_like(theta)\n\n # torch.autograd.grad是计算形参第一个变量对第二个变量的梯度和\n dtheta = _concat(torch.autograd.grad(loss, self.model.parameters())).data + self.network_weight_decay*theta # 总梯度:weight_decay×权重 + loss对w的梯度 \n # 手动更新好算子的权重参数后,创建一个保留结构参数的新的模型\n unrolled_model = self._construct_model_from_theta(theta.sub(other=moment+dtheta, alpha = eta)) # sub(eta, moment+dtheta)更改为sub(other,*,alpha) → w` = w -signma(moment+J_train(loss对w的梯度) + weight_decay*w)\n return unrolled_model\n\n # target: 创建一个结构不变的模型,其权重是w` = w-sita*L_train(w,a),用这个模型前向一次得到梯度后不在使用\n def _construct_model_from_theta(self, theta):\n model_new = self.model.new() # 创建一个保留结构参数的新模型\n model_dict = self.model.state_dict()\n\n params, offset = {}, 0\n for k, v in self.model.named_parameters():\n v_length = np.prod(v.size()) # 计算一层参数的长度\n params[k] = theta[offset: offset+v_length].view(v.size())\n offset += v_length\n\n assert offset == len(theta)\n model_dict.update(params) # 将截取的参数放入字典中\n model_new.load_state_dict(model_dict) # 将前向一次的参数赋给新的模型\n return model_new.cuda()\n\n # target: 计算前向和后向w±模型对结构参数的梯度\n def _hessian_vector_product(self, vector, input, target, r=1e-2):\n R = r / _concat(vector).norm() # vector是权重参数的梯度的2范数\n \n # 计算train 上 w+对a的梯度\n for p, v in zip(self.model.parameters(), vector):\n p.data.add_(other = v, alpha = R) # p.data in_place加更新模型的参数,即w+ = w+R*v\n loss = self.model._loss(input, target)\n grads_p = torch.autograd.grad(loss, self.model.arch_parameters()) # p = positive前向一步\n\n \n for p, v in zip(self.model.parameters(), vector):\n p.data.sub_(other = v, alpha = 2*R) # w- = w-2R*v /v 是w`对a的梯度\n loss = self.model._loss(input, target)\n grads_n = torch.autograd.grad(loss, self.model.arch_parameters()) # n = negative,后向一步\n\n for p, v in zip(self.model.parameters(), vector): # 计算完后恢复梯度(w- + v*R → w)\n p.data.add_(other = v, alpha = R)\n\n return [(x-y).div_(2*R) for x, y in zip(grads_p, grads_n)]\n\n" ]
[ [ "torch.zeros_like", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ubsuny/data-analysis-final20
[ "96bd050527f31aee49eee08de4c8e27d02558ec2" ]
[ "GPS/utility/tests/least_squares_test.py" ]
[ "# So we can access the utility files:\nimport sys\nsys.path.append('../')\n\nimport numpy as np\n\n# The main library:\nfrom least_squares import least_squares, chi2, rmse\n\n# The test:\ndef test_least_squares():\n # First, we can test in the base case where it's a polynomial of the form\n x = np.linspace(0, 10, 1000)\n y = x**2\n\n # Due to all of the multiplication going on, the constants might not \n # properly zero out. This is ok because we know that they should be zero.\n coefficients = np.round(least_squares(y, x), decimals = 10)\n \n # Coefficients are ordered by increasing power\n assert(coefficients[0] == 0)\n assert(coefficients[1] == 0)\n assert(coefficients[2] == 1)\n\n # Then, chi2. This should be less than 1e-10 since we rounded by that decimal.\n\n f = coefficients[0] + coefficients[1]*x + coefficients[2]*x**2\n\n # We introduce neglible STD so that we know that our results are consistent. \n assert(chi2(y, f, 1) < 1e-10)\n\n # RMSE as well. But this should be sqrt(1e-10/1000) \n assert(rmse(y, f, 1) < np.sqrt(1e-10/1000))\n\n # We can also do the linear case for the equation y = 5x + 10\n y = 5*x + 10\n\n coefficients = np.round(least_squares(y, x), decimals = 10)\n \n # This test is good because it shows that if you overshoot with the degree\n # least-squares will still return the lesser polynomial.\n # Coefficients are ordered by increasing power\n assert(coefficients[0] == 10)\n assert(coefficients[1] == 5)\n assert(coefficients[2] == 0)\n\n # And if we perturb the data a bit we can show that the error increases.\n x_p = x + np.random.choice(np.linspace(-0.1, 0.1, 1000))\n\n coefficients_p = np.round(least_squares(y, x_p), decimals = 10)\n \n f = coefficients[0] + coefficients[1]*x + coefficients[2]*x**2\n f_p = coefficients_p[0] + coefficients_p[1]*x + coefficients_p[2]*x**2\n\n assert(chi2(y, f, 1) < chi2(y, f_p, 1))\n\n # We don't have to worry about the case where \n # x_0 = x_1 = x_2 = ... x_P because \n # Vandermonde matrices force away singularity. \n" ]
[ [ "numpy.sqrt", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhoulei-biubiu/pytorch-lightning
[ "09669028d5b56914c4f7381862d9151d09947a98" ]
[ "tests/utilities/test_apply_func.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numbers\nfrom collections import namedtuple, OrderedDict\n\nimport numpy as np\nimport pytest\nimport torch\n\nfrom pytorch_lightning.utilities.apply_func import apply_to_collection, apply_to_collections\n\n\ndef test_recursive_application_to_collection():\n ntc = namedtuple('Foo', ['bar'])\n\n to_reduce = {\n 'a': torch.tensor([1.]), # Tensor\n 'b': [torch.tensor([2.])], # list\n 'c': (torch.tensor([100.]), ), # tuple\n 'd': ntc(bar=5.), # named tuple\n 'e': np.array([10.]), # numpy array\n 'f': 'this_is_a_dummy_str', # string\n 'g': 12., # number\n }\n\n expected_result = {\n 'a': torch.tensor([2.]),\n 'b': [torch.tensor([4.])],\n 'c': (torch.tensor([200.]), ),\n 'd': ntc(bar=torch.tensor([10.])),\n 'e': np.array([20.]),\n 'f': 'this_is_a_dummy_str',\n 'g': 24.,\n }\n\n reduced = apply_to_collection(to_reduce, (torch.Tensor, numbers.Number, np.ndarray), lambda x: x * 2)\n\n assert isinstance(reduced, dict), ' Type Consistency of dict not preserved'\n assert all([x in reduced for x in to_reduce.keys()]), 'Not all entries of the dict were preserved'\n assert all([isinstance(reduced[k], type(expected_result[k])) for k in to_reduce.keys()]), \\\n 'At least one type was not correctly preserved'\n\n assert isinstance(reduced['a'], torch.Tensor), 'Reduction Result of a Tensor should be a Tensor'\n assert torch.allclose(expected_result['a'], reduced['a']), \\\n 'Reduction of a tensor does not yield the expected value'\n\n assert isinstance(reduced['b'], list), 'Reduction Result of a list should be a list'\n assert all([torch.allclose(x, y) for x, y in zip(reduced['b'], expected_result['b'])]), \\\n 'At least one value of list reduction did not come out as expected'\n\n assert isinstance(reduced['c'], tuple), 'Reduction Result of a tuple should be a tuple'\n assert all([torch.allclose(x, y) for x, y in zip(reduced['c'], expected_result['c'])]), \\\n 'At least one value of tuple reduction did not come out as expected'\n\n assert isinstance(reduced['d'], ntc), 'Type Consistency for named tuple not given'\n assert isinstance(reduced['d'].bar, numbers.Number), \\\n 'Failure in type promotion while reducing fields of named tuples'\n assert reduced['d'].bar == expected_result['d'].bar\n\n assert isinstance(reduced['e'], np.ndarray), 'Type Promotion in reduction of numpy arrays failed'\n assert reduced['e'] == expected_result['e'], \\\n 'Reduction of numpy array did not yield the expected result'\n\n assert isinstance(reduced['f'], str), 'A string should not be reduced'\n assert reduced['f'] == expected_result['f'], 'String not preserved during reduction'\n\n assert isinstance(reduced['g'], numbers.Number), 'Reduction of a number should result in a number'\n assert reduced['g'] == expected_result['g'], 'Reduction of a number did not yield the desired result'\n\n # mapping support\n reduced = apply_to_collection({'a': 1, 'b': 2}, int, lambda x: str(x))\n assert reduced == {'a': '1', 'b': '2'}\n reduced = apply_to_collection(OrderedDict([('b', 2), ('a', 1)]), int, lambda x: str(x))\n assert reduced == OrderedDict([('b', '2'), ('a', '1')])\n\n\ndef test_apply_to_collection_include_none():\n to_reduce = [1, 2, 3.4, 5.6, 7]\n\n def fn(x):\n if isinstance(x, float):\n return x\n\n reduced = apply_to_collection(to_reduce, (int, float), fn)\n assert reduced == [None, None, 3.4, 5.6, None]\n\n reduced = apply_to_collection(to_reduce, (int, float), fn, include_none=False)\n assert reduced == [3.4, 5.6]\n\n\ndef test_apply_to_collections():\n to_reduce_1 = {'a': {'b': [1, 2]}, 'c': 5}\n to_reduce_2 = {'a': {'b': [3, 4]}, 'c': 6}\n\n def fn(a, b):\n return a + b\n\n # basic test\n reduced = apply_to_collections(to_reduce_1, to_reduce_2, int, fn)\n assert reduced == {'a': {'b': [4, 6]}, 'c': 11}\n\n with pytest.raises(KeyError):\n # strict mode - if a key does not exist in both we fail\n apply_to_collections({**to_reduce_2, 'd': 'foo'}, to_reduce_1, float, fn)\n\n # multiple dtypes\n reduced = apply_to_collections(to_reduce_1, to_reduce_2, (list, int), fn)\n assert reduced == {'a': {'b': [1, 2, 3, 4]}, 'c': 11}\n\n # wrong dtype\n reduced = apply_to_collections(to_reduce_1, to_reduce_2, (list, int), fn, wrong_dtype=int)\n assert reduced == {'a': {'b': [1, 2, 3, 4]}, 'c': 5}\n\n # list takes precedence because it is the type of data1\n reduced = apply_to_collections([1, 2, 3], [4], (int, list), fn)\n assert reduced == [1, 2, 3, 4]\n\n # different sizes\n with pytest.raises(AssertionError, match='Sequence collections have different sizes'):\n apply_to_collections([[1, 2], [3]], [4], int, fn)\n\n def fn(a, b):\n return a.keys() | b.keys()\n\n # base case\n reduced = apply_to_collections(to_reduce_1, to_reduce_2, dict, fn)\n assert reduced == {'a', 'c'}\n\n # type conversion\n to_reduce = [(1, 2), (3, 4)]\n reduced = apply_to_collections(to_reduce, to_reduce, int, lambda *x: sum(x))\n assert reduced == [(2, 4), (6, 8)]\n\n # named tuple\n foo = namedtuple('Foo', ['bar'])\n to_reduce = [foo(1), foo(2), foo(3)]\n reduced = apply_to_collections(to_reduce, to_reduce, int, lambda *x: sum(x))\n assert reduced == [foo(2), foo(4), foo(6)]\n\n # passing none\n reduced1 = apply_to_collections([1, 2, 3], None, int, lambda x: x * x)\n reduced2 = apply_to_collections(None, [1, 2, 3], int, lambda x: x * x)\n assert reduced1 == reduced2 == [1, 4, 9]\n reduced = apply_to_collections(None, None, int, lambda x: x * x)\n assert reduced is None\n" ]
[ [ "torch.allclose", "numpy.array", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PengFCB/Traffic_Anomaly_Detection_System_UCAS
[ "30675b3fcb5ed65bf64daa782e271f45f27d40e2" ]
[ "main.py" ]
[ "import cal_distance\nimport cal_f1\nimport csv_dealer\nimport get_gps\nimport get_Sit\nimport K_Means\nimport nmf_sklearn\nimport probability\nimport numpy as np\nimport os\n\n\ndef check_dic():\n path_list = ['final_test_cut', 'test_csv_cut', 'training_csv_cut', 'result', 'result1']\n\n for path in path_list:\n if not os.path.exists('./data/'+path):\n os.mkdir('./data/'+path)\n\n\ndef total():\n check_dic()\n\n csv_dealer.csv_deal_to_file(\"./data/training_csv/\",no_total=1,to_addr=\"./data/training_csv_cut\")\n csv_dealer.csv_deal_to_file('./data/test_csv/',no_total=1,to_addr='./data/test_csv_cut')\n csv_dealer.csv_deal_to_file('./data/final_test/',no_total=1,to_addr='./data/final_test_cut')\n\n mat_list = nmf_sklearn.generate_new_mat_list('./data/training_csv')\n total_mat = csv_dealer.mat_list_to_total(mat_list)\n np.savetxt(\"./data/total.csv\", total_mat, delimiter=',', fmt=\"%f\")\n w, h = nmf_sklearn.nmf_sklearn(3,total_mat)\n K_Means.k_means(w,250,new_file_addr='./data/class_list.txt')\n\n probability.run()\n get_Sit.run()\n cal_f1.run()\n\n\nif __name__=='__main__':\n total()\n\n\n\n\n\n" ]
[ [ "numpy.savetxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wilsonify/DSIRP
[ "72747f177956952369df287ad9cc833458e0e205" ]
[ "timing.py" ]
[ "\n\nimport os\n\ndef etime():\n \"\"\"Measures user and system time this process has used.\n\n Returns the sum of user and system time.\"\"\"\n user, sys, chuser, chsys, real = os.times()\n return user+sys\n\ndef time_func(func, n):\n \"\"\"Run a function and return the elapsed time.\n\n func: function\n n: problem size, passed as an argument to func\n\n returns: user+sys time in seconds\n \"\"\"\n start = etime()\n func(n)\n end = etime()\n elapsed = end - start\n return elapsed\n\ndef run_timing_test(func, start_at=10, max_time=1):\n \"\"\"Tests the given function with a range of values for n.\n\n func: function object\n\n returns: list of ns and a list of run times.\n \"\"\"\n ns = []\n ts = []\n for i in range(start_at, 28):\n n = 2**i\n t = time_func(func, n)\n print(n, t)\n if t > 0:\n ns.append(n)\n ts.append(t)\n if t > max_time:\n break\n\n return ns, ts\n\ndef fit(ns, ts, exp=1.0, index=-1):\n \"\"\"Fits a curve with the given exponent.\n\n ns: sequence of problem sizes\n ts: sequence of times\n exp: exponent of the fitted curve\n index: index of the element the fitted line should go through\n\n returns: sequence of fitted times\n\n\n \"\"\"\n # Use the element with the given index as a reference point,\n # and scale all other points accordingly.\n nref = ns[index]\n tref = ts[index]\n\n tfit = []\n for n in ns:\n ratio = n / nref\n t = ratio**exp * tref\n tfit.append(t)\n\n return tfit\n\nimport matplotlib.pyplot as plt\n\ndef plot_timing_test(ns, ts, label='', color='C0', exp=1.0, scale='log'):\n \"\"\"Plots data and a fitted curve.\n\n ns: sequence of n (problem size)\n ts: sequence of t (run time)\n label: string label for the data curve\n color: string color for the data curve\n exp: exponent (slope) for the fitted curve\n scale: string passed to xscale and yscale\n \"\"\"\n ts_fit = fit(ns, ts, exp)\n fit_label = 'exp = %d' % exp\n plt.plot(ns, ts_fit, label=fit_label, color='0.7', linestyle='dashed')\n plt.plot(ns, ts, 'o-', label=label, color=color, alpha=0.7)\n plt.xlabel('Problem size (n)')\n plt.ylabel('Runtime (seconds)')\n plt.xscale(scale)\n plt.yscale(scale)\n plt.legend()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.xscale", "matplotlib.pyplot.yscale", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Chiraagkv/Driver-distraction-detection
[ "633115c3e3a861ad267d8fd5db5878666d6f4d4b" ]
[ "backend.py" ]
[ "import tensorflow as tf\r\nimport tensorflow_hub as hub\r\n# import streamlit as st\r\n# import matplotlib \r\n# import matplotlib.pyplot as plt\r\nimport os\r\nimport pandas\r\nimport numpy as np\r\nfrom PIL import Image, ImageOps\r\n\r\n# What I have to do:\r\n\r\n# 1. Get create_batches\r\n# 2. Get unbatchify\r\n# 3. download the model.h5 thing\r\n# 4. Load it\r\n# 5. Predict\r\n# 6. Plot\r\n# 7. Streamlit stuff\r\n\r\n\r\nbreednames = np.array([\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\",\r\n \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\", \"del\", \"nothing\", \"space\"])\r\nIMAGE_SIZE = 200\r\ndef bgr_to_rgb(input, name=None):\r\n \"\"\"\r\n Convert a BGR image to RGB.\r\n Args:\r\n input: A 3-D (`[H, W, 3]`) or 4-D (`[N, H, W, 3]`) Tensor.\r\n name: A name for the operation (optional).\r\n Returns:\r\n A 3-D (`[H, W, 3]`) or 4-D (`[N, H, W, 3]`) Tensor.\r\n \"\"\"\r\n bgr = tf.unstack(input, axis=-1)\r\n b, g, r = bgr[0], bgr[1], bgr[2]\r\n return tf.stack([r, g, b], axis=-1)\r\n\r\n\r\ndef process_image(img):\r\n\tdata = np.ndarray(shape=(1, 200, 200, 3), dtype=np.float32)\r\n\t# image = tf.constant(img, dtype=\"float32\")\r\n\r\n\timage = tf.image.resize(img, [200, 200])\r\n\timage = bgr_to_rgb(image)\r\n\r\n\timage_array = np.asarray(image)\r\n\tnormalized_image_array = (image_array.astype(np.float32) / 127.0) -1\r\n\r\n\tdata[0] = normalized_image_array\r\n\treturn data\r\n\r\ndef preds_to_text(prediction_proba):\r\n return breednames[np.argmax(prediction_proba)]\r\n\r\n\r\ndef load_model(model_path):\r\n print(f'Loading model from: {model_path}...')\r\n model = tf.keras.models.load_model(model_path, \r\n custom_objects={\"KerasLayer\": hub.KerasLayer})\r\n return model\r\n\r\ndef predict_custom(image, model):\r\n\t\r\n\tcustom_data = process_image(image)\r\n\tcustom_preds = model.predict(custom_data)\r\n\tconf = f'{np.max(custom_preds[0])* 100:.2f}%'\r\n\tcustom_preds_labels = preds_to_text(custom_preds)\r\n\treturn custom_preds_labels, conf\r\n" ]
[ [ "tensorflow.keras.models.load_model", "tensorflow.unstack", "numpy.asarray", "tensorflow.stack", "numpy.ndarray", "numpy.max", "tensorflow.image.resize", "numpy.argmax", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
YoussefSaied/ML2
[ "db3215fe2f1e2400e39a85f1550733447af9955c" ]
[ "MainY23.py" ]
[ "# %% Global parameters\n#Our variables:\nYoussefPathModel= '/home/youssef/EPFL/MA1/Machine learning/MLProject2/ML2/youssefServer4.modeldict' # Path of the weights of the model\nYoussefdatapath = '/home/youssef/EPFL/MA1/Machine learning/MLProject2/Data' # Path of data\nYoussefServerPathModel= '/home/saied/ML/ML2/youssefServer23.modeldict' # Path of weights of the Model\n#Server 5 is init(Batchnorm), not balanced, 128 auc=0.7 after 10 epochs \n#Server 6 is init(Batchnorm), balanced, 128 auc=0.7/0.64 after 2/10 epochs\n#Server 7 is init(Batchnorm), balanced, 8 auc=0.74/0.7 after 1/5 epochs\n#Server 9 is init(Batchnorm), balanced, 4 auc=0.74 after 1/5 epochs \n#Server 8 is init(Batchnorm), balanced, 128, weightdecay =0.0001 auc =0.64 after 4 epochs \n#Server 10 is SIMPLE is init(Batchnorm), not balanced, 8 auc=0.65/0.7 after 5/15 epochs (redo)\n#Server 11 is init(Batchnorm), not balanced, 8 auc= 0.72 after 1 epochs\n#Server 12 is Data augmented, init(Batchnorm), balanced, 8, weightdecay =0.0001 auc=0.825/?? after 10/?? epochs (best) (redo decrease weight decay) (increase lr) (parallelization)\n#Server 16 is Data augmented, init(Batchnorm), balanced, 8, weightdecay =0 auc=??/?? after ??/?? epochs \n#Server 13 is Data augmented, init(Batchnorm), balanced, 128 auc=??/?? after ??/?? epochs (best?)\n#Server 14 is Data augmented, SIMPLE, init(Batchnorm), balanced, 128 auc=??/?? after ??/?? epochs\n#Server 15 is Data augmented, SIMPLE, init(Batchnorm), balanced, 8 auc=??/?? after ??/?? epochs\n#Server 20 is TRANSFERLEARNING, init(Batchnorm), balanced, 128, auc=??/?? after ??/?? epochs\nYoussefServerdatapath = '/data/mgeiger/gg2/data' # Path of data\nYoussefServerPicklingPath = '/home/saied/ML/ML2/' # Path for pickling \nYoussefPicklingPath = '/home/youssef/EPFL/MA1/Machine learning/MLProject2/ML2/Predictions/' # Path for pickling \nYoussefPathDataset= '/home/youssef/EPFL/MA1/Machine learning/MLProject2/traintestsets.pckl' # Path of training and test dataset\nYoussefServerPathDataset= '/home/saied/ML/ML2/traintestsets.pckl' # Path of training and test dataset\n\n#Global variables (booleans):\ntransfer_learning=0\ninit_batchnormv =1\nuse_parallelization=1\nsimple =0\ndata_augmentation =1\nuse_saved_model =1\nsave_trained_model=1\ntrain_or_not =1\nepochs =5\nOnServer =1\nif OnServer:\n PicklingPath=YoussefServerPicklingPath\n PathModel= YoussefServerPathModel\n PathDataset =YoussefServerPathDataset\n datapath = YoussefServerdatapath\nelse:\n PicklingPath=YoussefPicklingPath\n PathModel= YoussefPathModel\n PathDataset =YoussefPathDataset\n datapath = Youssefdatapath\nproportion_traindata = 0.8 # the proportion of the full dataset used for training\nprintevery = 1000\n\nprint(\"Server23\")\n\n# %% Import Dataset and create trainloader \nimport datasetY as dataset\nimport torch\nimport importlib\nfrom datasetY import BalancedBatchSampler, BalancedBatchSampler2, random_splitY, accuracy, load_GG2_imagesTransfer, load_GG2_images2\nimport itertools\nimport numpy as np\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n#importlib.reload(module)\n\n\n\n\n# Pickling datasets\n\nif transfer_learning:\n transform=load_GG2_imagesTransfer\nelse:\n transform=load_GG2_images2\n\nimport os\nif os.path.isfile(PathDataset):\n if os.stat(PathDataset).st_size > 0:\n import pickle\n with open(PathDataset, 'rb') as pickle_file:\n [full_dataset,trainset,testset] = pickle.load(pickle_file)\n full_dataset.transform=transform\n trainset.transform=transform\n testset.transform=transform\n print(\"Loading datasets...\")\nelse: \n full_dataset = dataset.GG2(datapath,data_augmentation=False,transform=transform)\n\n # To split the full_dataset\n train_size = int(proportion_traindata * len(full_dataset))\n test_size = len(full_dataset) - train_size\n indices, sets = random_splitY(full_dataset, [train_size, test_size])\n [trainset, testset]=sets\n\n import pickle\n with open(PathDataset, 'wb') as pickle_file:\n pickle.dump([full_dataset,trainset,testset],pickle_file)\n print(\"Creating and pickling datasets...\")\n\n# Data augmentation\n\nif data_augmentation:\n full_dataset.data_augmentation=True\n trainset.data_augmentation=True\n testset.data_augmentation=True\n\nprint(len(trainset))\n\n# Dataloaders\n\nbatch_sizev=24\ntest_batch_size = 1\n\nsamplerv= BalancedBatchSampler2(trainset)\nsamplertest = BalancedBatchSampler2(testset)\n\ntrainloader = torch.utils.data.DataLoader(trainset, sampler=samplerv, shuffle=False, batch_size= batch_sizev)\ntestloader = torch.utils.data.DataLoader(testset, sampler=None, shuffle =True, batch_size= test_batch_size)\nROCloader = torch.utils.data.DataLoader(testset,batch_size=1)\n# %% Import Neural network\n\nif simple:\n net = torch.hub.load('rwightman/gen-efficientnet-pytorch', 'tf_mobilenetv3_small_minimal_100',\n pretrained=False)\n\n # Change First and Last Layer\n if not transfer_learning:\n net.conv_stem = torch.nn.Conv2d(4,16,kernel_size=(2,2),bias=False)\n net.classifier = torch.nn.Linear(1024, 1)\nelse: \n net = torch.hub.load('rwightman/gen-efficientnet-pytorch', 'efficientnet_b0',\n pretrained=True)\n\n # Change First and Last Layer\n if not transfer_learning:\n net.conv_stem = torch.nn.Conv2d(4, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n net.classifier = torch.nn.Linear(1280, 1)\n\n\n\n\n\n\n\nif not torch.cuda.is_available() : #ie if NOT on the server\n print(net)\n\n# Replace all batch normalization layers by Instance\ndef convert_batch_to_instance(model):\n import torch.nn as nn\n for child_name, child in model.named_children():\n if isinstance(child, nn.BatchNorm2d):\n num_features= child.num_features\n setattr(model, child_name, nn.InstanceNorm2d(num_features=num_features))\n else:\n convert_batch_to_instance(child)\n\n\ndef init_batchnorm(model): # For initializing the batch normalization layers\n import torch.nn as nn\n for child_name, child in model.named_children():\n if isinstance(child, nn.BatchNorm2d):\n num_features= child.num_features\n setattr(model, child_name, nn.BatchNorm2d(num_features=num_features))\n else:\n convert_batch_to_instance(child)\n\n#convert_batch_to_instance(net)\n\nnet.to(device)\nif not transfer_learning and init_batchnormv:\n init_batchnorm(net)\n\n\n#Option to parallelize\nprint(\"There are\", torch.cuda.device_count(), \"GPUs!\")\nif torch.cuda.device_count() > 1 and use_parallelization:\n import torch.nn as nn\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n net = nn.DataParallel(net)\nnet.to(device)\n\nif not torch.cuda.is_available() : #ie if NOT on the server\n print(net)\n# %% Train Neural network\n\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nmomentumv=0.90\nlrv=10**-2\n\nprint(\"Learning rate= \"+str(lrv))\n\n\n# To calculate accuracy\nfrom sampler import accuracy\n\ndef train_accuracy(net):\n return accuracy(net, loader= trainloader,device=device)\n\ndef test_accuracy(net):\n return accuracy(net, loader= testloader,device=device)\n\ndef ROC_accuracy(net):\n return accuracy(net, loader= ROCloader,device=device)\n\n\n#Option to use a saved model parameters\nif use_saved_model:\n import os\n if os.path.isfile(PathModel):\n if os.stat(PathModel).st_size > 0:\n net.load_state_dict(torch.load(PathModel,map_location=torch.device(device )))\n #convert_batch_to_instance(net)\n print(\"Loading model...\")\n else: \n print(\"Empty file...\")\n print(\"Using saved model...\")\n\n\n#Training starts\n\ncriterion = nn.SoftMarginLoss()\n\nif not transfer_learning:\n optimizer = optim.SGD(net.parameters(), lr=lrv, momentum=momentumv, weight_decay=0.0001)\nelse:\n optimizer = optim.SGD(net.classifier.parameters(), lr=lrv, momentum=momentumv)\n for param in net.parameters():\n param.requires_grad = False\n for param in net.classifier.parameters():\n param.requires_grad = True\n \n \n# Decay LR by a factor of 0.1 every 7 epochs\nfrom torch.optim import lr_scheduler\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)\n\nnet.train()\n\nif train_or_not:\n print(\"Starting training...\")\n train_auc_list = np.array([0])\n test_auc_list = []\n for epoch in range(epochs): # loop over the dataset multiple times\n exp_lr_scheduler.step()\n print(\"Starting epoch %d\"%(epoch+1))\n print(\"Learning rate= \"+str(lrv))\n running_loss = 0.0\n for i, data in enumerate(trainloader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data[0].to(device), data[1].to(device)\n labels = torch.unsqueeze(labels, dim =1)\n labels = labels.float()\n \n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % printevery == printevery-1: # print every n mini-batches\n print('[%5d, %5d] loss: %.6f ' %\n (epoch+1, i + 1, running_loss/printevery) )\n running_loss = 0.0\n\n # fixing batch normalization statistics\n #print(\"Fixing batch normalization statistics...\")\n\n \n # save predictions and labels for ROC curve calculation\n print(\"Saving predictions and calculating accuracies...\")\n if False:\n #net.eval()\n predictions = []\n labels = []\n for k, testset_partial in enumerate(testloader):\n with torch.no_grad():\n if k <100000:\n testset_partial_I , testset_partial_labels = testset_partial[0].to(device), testset_partial[1].to(device)\n predictions += [net(image[None]).item() for image in testset_partial_I ]\n labels += testset_partial_labels.tolist()\n else:\n break\n net.eval()\n \n file_name= PicklingPath+\"PredictionsAndLabelsTrial1Epoch\"+str(epoch)\n import os\n if os.path.exists(file_name): # checking if there is a file with this name\n os.remove(file_name) # deleting the file\n import pickle\n with open(file_name, 'wb') as pickle_file:\n pickle.dump([predictions,labels],pickle_file)\n pickle_file.close()\n print(\"Pickling done...\")\n\n # calculate and save accuracy and stop if test accuracy increases \n if epoch%2 ==0:\n net.eval()\n test_accuracyv = ROC_accuracy(net)\n print(\"Test accuracy: %5f\"%test_accuracyv)\n if test_accuracyv< np.min(train_accuracy_list) and False:\n break\n train_accuracy_list = np.concatenate((train_accuracy_list, np.array([test_accuracyv])))\n net.train()\n\n\n # AUC for ROC curve\n \n net.eval()\n from sklearn import metrics\n predictions = []\n labels = []\n with torch.no_grad():\n if True:\n for k, testset_partial in enumerate(testloader):\n if k <1000:\n testset_partial_I , testset_partial_labels = testset_partial[0].to(device), testset_partial[1].to(device)\n predictions += [p.item() for p in net(testset_partial_I) ]\n labels += testset_partial_labels.tolist()\n else: break\n\n auc = metrics.roc_auc_score(labels, predictions)\n test_auc_list = np.concatenate((train_auc_list, np.array([auc])))\n if auc < np.max(test_auc_list)-0.04 and False:\n break\n print(\"Test auc: %5f\"%auc)\n\n #train_accuracy_list = np.concatenate((train_accuracy_list, np.array([auc])))\n if False:\n for k, trainset_partial in enumerate(trainloader):\n if k <100:\n trainset_partial_I , trainset_partial_labels = testset_partial[0].to(device), testset_partial[1].to(device)\n predictions += [p.item() for p in net(testset_partial_I) ]\n labels += testset_partial_labels.tolist()\n\n auc = metrics.roc_auc_score(labels, predictions)\n print(\"Train auc: %5f\"%auc)\n net.train()\n \n import os\n print(\"Pickling accuracies...\")\n file_name= PicklingPath+\"accuracies\"\n if os.path.exists(file_name): # checking if there is a file with this name\n os.remove(file_name) # deleting the file\n import pickle\n with open(file_name, 'wb') as pickle_file:\n pickle.dump(test_auc_list,pickle_file)\n pickle_file.close()\n \n print('Finished Training')\n if save_trained_model:\n import os\n if os.path.exists(PathModel): # checking if there is a file with this name\n os.remove(PathModel) # deleting the file\n torch.save(net.state_dict(), PathModel)\n print(\"Saving model...\")\n\nif torch.cuda.is_available() : #ie if on the server\n net.eval()\n test_accuracyv = test_accuracy(net)\n print(\"Test accuracy: %5f\"%test_accuracyv)\n train_accuracyv = train_accuracy(net)\n print(\"Train accuracy: %5f\"%train_accuracyv)\n import sys\n sys.exit()\n\n# %% Metrics\n\n# Testing mode for net\n#net.eval()\nif False:\n test_accuracyv = test_accuracy(net)\n print(\"Test accuracy: %5f\"%test_accuracyv)\n\nfrom sklearn import metrics\n\n# ROC curve calculation\n\n# testset_partial= iter(testloader).next()\n# testset_partial_I , testset_partial_labels = testset_partial[0], testset_partial[1] \n# predictions = [net(image[None]).item() for image in testset_partial_I ]\n\npredictions = []\nlabels = []\nwith torch.no_grad():\n if True:\n net.train() # remove\n for k, testset_partial in enumerate(testloader):\n if k <10:\n testset_partial_I , testset_partial_labels = testset_partial[0].to(device), testset_partial[1].to(device)\n predictions += [p.item() for p in net(testset_partial_I) ]\n labels += testset_partial_labels.tolist()\n else: break\n if k%100==0:\n print(k)\n\n fpr, tpr, thresholds = metrics.roc_curve(labels, predictions)\n\n # importing the required module \n import matplotlib.pyplot as plt \n \n # x axis and y axis values \n x ,y = fpr, tpr\n\n # plotting the points \n plt.plot(x, y,marker='x') \n plt.plot(x, x,marker='x')\n \n # naming the x axis \n plt.xlabel('False Positive Rate') \n # naming the y axis \n plt.ylabel('True Positive Rate') \n \n # giving a title to my graph \n plt.title('Reciever operating characteristic curve') \n \n # function to show the plot \n plt.show()\n\n # plot all ROC curves from pickle \n print(\"Pickling accuracies...\")\n\n for epoch in range(epochs): \n file_name= PicklingPath+\"PredictionsAndLabelsTrial1Epoch\"+str(epoch)\n import pickle\n with open(file_name, 'rb') as pickle_file:\n [predictions,labels] = pickle.load(pickle_file)\n pickle_file.close()\n \n fpr, tpr, thresholds = metrics.roc_curve(labels, predictions)\n\n # importing the required module \n import matplotlib.pyplot as plt \n # x axis and y axis values \n x ,y = fpr, tpr\n\n # plotting the points \n plt.plot(x, y,marker='x') \n plt.plot(x, x,marker='x')\n \n # naming the x axis \n plt.xlabel('False Positive Rate') \n # naming the y axis \n plt.ylabel('True Positive Rate') \n \n # giving a title to my graph \n plt.title('Reciever operating characteristic curve epoch '+str(epoch)) \n \n # function to show the plot \n plt.show()\n\n" ]
[ [ "sklearn.metrics.roc_auc_score", "torch.utils.data.DataLoader", "matplotlib.pyplot.plot", "numpy.max", "torch.no_grad", "torch.cuda.is_available", "torch.device", "torch.nn.SoftMarginLoss", "torch.optim.lr_scheduler.StepLR", "matplotlib.pyplot.title", "numpy.min", "torch.nn.Conv2d", "sklearn.metrics.roc_curve", "torch.unsqueeze", "torch.nn.Linear", "torch.nn.InstanceNorm2d", "torch.nn.BatchNorm2d", "torch.cuda.device_count", "numpy.array", "torch.hub.load", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "torch.nn.DataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BaselLaserMouse/rt_model_orsolic
[ "19217bedfb3d973d74972b8b361628e0f366709e" ]
[ "src/gp_fit.py" ]
[ "#!/usr/bin/env python3\n\nimport json\nimport time\nfrom pathlib import Path\nfrom functools import partial\n\nimport defopt\nimport numpy as np\nimport scipy.io as io\nimport pandas as pd\nimport tensorflow as tf\nimport gpflow\nfrom sklearn.model_selection import train_test_split\n\nfrom strenum import strenum\nfrom gp_model import prepare_Xy, build_model, build_model_ard\n\n\ndef load_data(filename):\n \"\"\"load reaction time dataset\"\"\"\n\n # convert matlab data into dataframe\n mat = io.loadmat(filename)\n dset = pd.DataFrame({\n 'rt': mat['rt'].ravel() - 1,\n 'sig': mat['sig'].ravel(),\n 'sig_avg': mat['sig_avg'].ravel(),\n 'sig_std': mat['sig_std'].ravel(),\n 'session': mat['session'].ravel(),\n 'hazard': np.vstack(mat['hazard'].ravel()).ravel(),\n 'outcome': mat['outcome'].ravel(),\n 'noiseless': mat['noiseless'].ravel() != 0,\n 'ys': [y.ravel() for y in mat['ys'].flat],\n 'change': mat['change'].ravel() - 1\n })\n\n # add reaction-time from change point\n # TODO move to matlab code\n dset['rt_change'] = dset.rt - dset.change\n\n # add filename as mouse name\n # TODO move to matlab code\n dset['mouse'] = Path(filename).stem\n\n # misc. cleaning\n # TODO move to matlab code\n dset = dset.groupby('sig').filter(lambda x: len(x) > 200)\n dset = dset[~dset.noiseless] # remove noiseless sessions\n dset = dset[dset.outcome != 'abort'] # remove movement aborted trials\n\n return dset\n\n\ndef split_data(dset, fractions, seed):\n \"\"\"split dataset in train/val/test folds according to fractions\"\"\"\n\n idx_train, idx_test = train_test_split(\n dset.index, test_size=fractions[0], random_state=seed,\n stratify=dset[['sig', 'mouse', 'hazard']]\n )\n\n # test split and validation split are the same if no validation fraction\n if len(fractions) == 1:\n idx_val = idx_test\n\n else:\n val_fraction = fractions[1] / (1 - fractions[0])\n idx_train, idx_val = train_test_split(\n idx_train, test_size=val_fraction, random_state=seed,\n stratify=dset.loc[idx_train, ['sig', 'mouse', 'hazard']]\n )\n\n dset = dset.copy()\n dset['train'], dset['val'], dset['test'] = False, False, False\n dset.loc[idx_train, 'train'] = True\n dset.loc[idx_val, 'val'] = True\n dset.loc[idx_test, 'test'] = True\n\n return dset\n\n\nclass StopOptimization(Exception):\n pass\n\n\nclass Logger:\n\n def __init__(self, name, model, dset, batch_size, patience=np.inf):\n self.name = name\n self.model = model\n self.batch_size = batch_size\n self.patience = patience\n\n X, y = prepare_Xy(dset, model.n_lags, model.max_nt)\n self.X, self.y = np.vstack(X), np.vstack(y)\n\n self.logp = []\n self.best_logp = -np.inf\n self.max_iter = patience\n self.previous_time = time.time()\n\n def __call__(self, cnt):\n logp = 0\n for i in range(0, len(self.X), self.batch_size):\n j = min(i + self.batch_size, len(self.X))\n logp += self.model.predict_density(self.X[i:j], self.y[i:j]).sum()\n self.logp.append(logp)\n\n current_time = time.time()\n elapsed_time = current_time - self.previous_time\n self.previous_time = current_time\n\n elapsed = time.strftime('%H:%M:%S', time.gmtime(elapsed_time))\n current = time.strftime(\n '%Y-%m-%d %H:%M:%S', time.localtime(current_time)\n )\n print(\n '{} :: elapsed {} :: {} :: {} (stop {}) :: current {} (best {})'\n .format(current, elapsed, self.name, cnt, self.max_iter,\n self.logp[-1], self.best_logp)\n )\n\n has_improved = False\n if self.logp[-1] > self.best_logp:\n self.best_logp = self.logp[-1]\n self.max_iter = cnt + self.patience\n has_improved = True\n\n if cnt > self.max_iter:\n raise StopOptimization()\n\n return has_improved\n\n\n# enumeration types used to define GP model and kernel options\nHazard = strenum('Hazard', 'early late split nonsplit all')\nMeanType = strenum('MeanType', 'zero constant linear')\nHierarchy = strenum('Hierarchy', 'mouse hzrd')\nCombination = strenum('Combination', 'add mul')\nKernelInput = strenum(\n 'KernelInput', 'full time logtime wtime stim proj expproj hzrd'\n)\nKernelType = strenum(\n 'KernelType', 'RBF Linear Matern12 Matern32 Matern52 White Constant'\n)\n\n\ndef main(result_dir, *dset_filename, hazard=Hazard.nonsplit,\n mean_type=MeanType.zero, kernels_type=(KernelType.RBF,),\n kernels_input=(KernelInput.full,),\n hierarchy=(), combination=Combination.add,\n sigma=1e-1, nproj=5, ntanh=5, nz=100, batch_size=50000, nlags=50,\n learning_rate=1e-3, max_iter=1000000, patience=10000,\n max_duration=np.inf, fractions=(0.2, 0.2), threads=0,\n logger_batch_size=100000, save_train=False, save_test=False,\n load_params=None, use_ard=False):\n \"\"\"Fit a Gaussian process model to reaction time data\n\n :param str result_dir: directory for results files\n :param str dset_filename: reaction time dataset file\n :param Hazard hazard: hazard rate block type\n :param MeanType mean_type: Gaussian process mean function\n :param list[KernelType] kernels_type: kernels type\n :param list[KernelInput] kernels_input: kernels input\n :param list[Hierarchy] hierarchy: kernel hierarchical structure, if any\n :param Combination combination: kernels combination\n :param float sigma: standard deviation of Laplacian prior for projected\n kernels\n :param int nproj: number of projections in projected kernels\n :param int ntanh: number of tanh functions in warped kernels\n :param int nz: number of inducing points per mouse\n :param int batch_size: size of mini-batches\n :param int nlags: number of past stimulus to include for each observation\n :param float learning_rate: Adam learning rate\n :param int max_iter: maximum number of iterations for optimization\n :param int patience: patience parameter for early stopping\n :param int max_duration: maximum time allowed for model fit in minutes\n :param list[float] fractions: validation and test fold fractions (same sets\n if only one value provided)\n :param float threads: limit number of threads for tensorflow-cpu\n (0: no limit)\n :param int logger_batch_size: batch size for Logger objects\n :param bool save_train: save training set score\n :param bool save_test: save test set score\n :param str load_params: file used to initialize the GP model parameters\n :param bool use_ard: use ARD prior for projected kernels\n\n \"\"\"\n\n # record all inputs\n main_inputs = locals().copy()\n\n # fix seed for reproducibility\n seed = 12345\n np.random.seed(seed)\n\n # load datasets and create splits for training\n dset = pd.concat([load_data(fname) for fname in dset_filename])\n dset['mouse_code'] = dset.mouse.astype('category').cat.codes\n dset['hazard_code'] = dset.hazard.astype('category').cat.codes\n\n dset = dset[dset.hazard != 'experimental']\n if hazard == Hazard.split:\n dset = dset[dset.hazard != 'nonsplit']\n elif hazard != Hazard.all:\n dset = dset[dset.hazard == hazard]\n\n dset = split_data(dset, fractions, seed)\n\n # limit multithreading in tensorflow-cpu\n if threads > 0:\n session_conf = tf.ConfigProto(\n intra_op_parallelism_threads=threads,\n inter_op_parallelism_threads=threads\n )\n tf.Session(config=session_conf)\n\n # build the model\n model_opts = {\n 'kernels_type': kernels_type,\n 'kernels_input': kernels_input,\n 'hierarchy': hierarchy,\n 'combination': combination,\n 'sigma': sigma,\n 'n_proj': nproj,\n 'n_tanh': ntanh,\n 'n_z': nz,\n 'batch_size': batch_size,\n 'n_lags': nlags,\n 'max_nt': dset.ys.map(len).max(),\n 'mean_type': mean_type,\n 'hazard': hazard\n }\n if ('proj' in kernels_input) and use_ard:\n model = build_model_ard(dset[dset.train], **model_opts)\n else:\n model = build_model(dset[dset.train], **model_opts)\n\n if load_params:\n model_params = dict(np.load(load_params))\n model.assign(model_params)\n\n # save options\n result_path = Path(result_dir)\n result_path.mkdir(parents=True, exist_ok=True)\n with (result_path / 'arguments.json').open('w') as fd:\n json.dump(main_inputs, fd, indent=4, sort_keys=True)\n dset.to_pickle(result_path / 'dataset.pickle')\n np.savez(result_path / 'model_options.npz', **model_opts)\n\n # prepare logging objects\n logger = partial(Logger, model=model, batch_size=logger_batch_size)\n logger_train = logger('train', dset=dset[dset.train])\n logger_val = logger('val', dset=dset[dset.val], patience=patience)\n logger_test = logger('test', dset=dset[dset.test])\n\n n_iter_per_epoch = int(np.ceil(model.Y.shape[0] / model.Y.batch_size))\n max_time = time.time() + max_duration * 60\n best_model_path = result_path / 'model_params_best.npz'\n\n def callback(x):\n if time.time() > max_time:\n raise StopOptimization()\n if (x % n_iter_per_epoch) != 0:\n return\n if logger_val(x):\n session = model.enquire_session()\n np.savez(best_model_path, **model.read_values(session))\n if save_train:\n logger_train(x)\n if save_test:\n logger_test(x)\n\n # fit the model\n optimizer = gpflow.train.AdamOptimizer(learning_rate=learning_rate)\n try:\n optimizer.minimize(model, maxiter=max_iter, step_callback=callback)\n except StopOptimization:\n model.anchor(model.enquire_session())\n\n # save final params and log\n np.savez(result_path / 'model_params_last.npz',\n **model.read_values())\n np.savez(result_path / 'logger.npz', logp_train=logger_train.logp,\n logp_val=logger_val.logp, logp_test=logger_test.logp)\n\n\nif __name__ == \"__main__\":\n defopt.run(main, short={})\n" ]
[ [ "numpy.savez", "numpy.random.seed", "scipy.io.loadmat", "sklearn.model_selection.train_test_split", "tensorflow.ConfigProto", "numpy.ceil", "tensorflow.Session", "numpy.load", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
FeherBalazs/text-analytics-service
[ "36292f835419c9cd6266f4813731b084b2a6cf24" ]
[ "flask_deployment/sentiment_service.py" ]
[ "# -*- coding: utf-8 -*-\n\n'''\nsentiment_service.py\n~~~~~~~~~~~~~~~~~~~~\n\nApp implements a sentiment analysis pipeline. \n\n'''\nimport cPickle\nimport os\n\nfrom flask import Flask, request, jsonify\nimport pandas as pd\nimport requests\nimport sklearn\n\nresp = requests.get(\"https://raw.githubusercontent.com/crawles/gpdb_sentiment_analysis_twitter_model/master/twitter_sentiment_model.pkl\")\nresp.raise_for_status()\ncl = cPickle.loads(resp.content)\n\ndef regex_preprocess(raw_tweets):\n pp_text = pd.Series(raw_tweets)\n \n user_pat = '(?<=^|(?<=[^a-zA-Z0-9-_\\.]))@([A-Za-z]+[A-Za-z0-9]+)'\n http_pat = '(https?:\\/\\/(?:www\\.|(?!www))[^\\s\\.]+\\.[^\\s]{2,}|www\\.[^\\s]+\\.[^\\s]{2,})'\n repeat_pat, repeat_repl = \"(.)\\\\1\\\\1+\",'\\\\1\\\\1'\n\n pp_text = pp_text.str.replace(pat = user_pat, repl = 'USERNAME')\n pp_text = pp_text.str.replace(pat = http_pat, repl = 'URL')\n pp_text.str.replace(pat = repeat_pat, repl = repeat_repl)\n return pp_text\n\napp = Flask(__name__)\n\[email protected]('/polarity_compute', methods=['POST'])\ndef sentiment_compute():\n req = request.get_json(force=True)\n X = regex_preprocess(req['data'])\n prediction = cl.predict_proba(X)[:][:,1]\n return(jsonify({\"polarity\" : prediction.tolist()}))\n\nif __name__ == \"__main__\":\n if os.environ.get('VCAP_SERVICES') is None: # running locally\n PORT = 8080\n DEBUG = True\n else: # running on CF\n PORT = int(os.getenv(\"PORT\"))\n DEBUG = False\n\n app.run(host='0.0.0.0', port=PORT, debug=DEBUG)\n" ]
[ [ "pandas.Series" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
hsnee/LSSTC-DSFP-Sessions
[ "5d90992179c80efbd63e9ecc95fe0fef7a0d83c1" ]
[ "Session4/Day4/triangle_linear.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n'''\n This hacked version of triangle, called triangle_linear, is an adaption of the following authors open source code, their credentials below. triangle_linear is an adaption by Megan Shabram and Dan Gettings, made in 2013. Some of the original functionality may still be there but are not being used in this version. This code takes longer to run. It is calculating a 2d Gaussian kernel density estimate of 2d marginal posteriors in order to report posterior summarys. It also reports 95 % equal tailed credible intervals by importing python code written by Megan Shabram and Dan Gettings called credible_interval.py\n'''\n\n\nfrom __future__ import print_function, absolute_import, unicode_literals\n\n__all__ = [\"corner\", \"hist2d\", \"error_ellipse\"]\n__version__ = \"0.0.5\"\n__author__ = \"Dan Foreman-Mackey ([email protected])\"\n__copyright__ = \"Copyright 2013 Daniel Foreman-Mackey\"\n__contributors__ = [ # Alphabetical by first name.\n \"Ekta Patel @ekta1224\",\n \"Geoff Ryan @geoffryan\",\n \"Phil Marshall @drphilmarshall\",\n \"Pierre Gratier @pirg\",\n ]\n\nimport numpy as np\nimport matplotlib.pyplot as pl\nfrom matplotlib.ticker import MaxNLocator\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom matplotlib.patches import Ellipse\nimport matplotlib.cm as cm\nimport matplotlib.gridspec as gridspec\nimport scipy.ndimage as ndimage\nimport scipy.stats as stats\nimport kdestats\nimport credible_interval\n\ndef corner(xs, labels=None,labelsy=None, extents=None, truths=None, truth_color='black',\n scale_hist=False, quantiles=[], **kwargs):\n \"\"\"\n Make a *sick* corner plot showing the projections of a set of samples\n drawn in a multi-dimensional space.\n\n :param xs: ``(nsamples, ndim)``\n The samples. This should be a 1- or 2-dimensional array. For a 1-D\n array this results in a simple histogram. For a 2-D array, the zeroth\n axis is the list of samples and the next axis are the dimensions of\n the space.\n\n :param labels: ``ndim`` (optional)\n A list of names for the dimensions.\n\n :param truths: ``ndim`` (optional)\n A list of reference values to indicate on the plots.\n\n :param truth_color: (optional)\n A ``matplotlib`` style color for the ``truths`` makers.\n\n :param quantiles: (optional)\n A list of fractional quantiles to show on the 1-D histograms as\n vertical dashed lines.\n\n :param scale_hist: (optional)\n Should the 1-D histograms be scaled in such a way that the zero line\n is visible?\n\n \"\"\"\n\n # Deal with 1D sample lists.\n xs = np.atleast_1d(xs)\n if len(xs.shape) == 1:\n xs = np.atleast_2d(xs)\n else:\n assert len(xs.shape) == 2, \"The input sample array must be 1- or 2-D.\"\n xs = xs.T\n assert xs.shape[0] <= xs.shape[1], \"I don't believe that you want more \" \\\n \"dimensions than samples!\"\n\n K = len(xs)\n factor = 2.0 # size of one side of one panel\n lbdim = 0.5 * factor # size of left/bottom margin\n trdim = 0.05 * factor # size of top/right margin\n whspace = 0.05 # w/hspace size\n plotdim = factor * K + factor * (K - 1.) * whspace\n dim = lbdim + plotdim + trdim\n fig = pl.figure(figsize=(dim, dim))\n lb = lbdim / dim\n tr = (lbdim + plotdim) / dim\n fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr,\n wspace=whspace, hspace=whspace)\n\n if extents is None:\n # LINEAR-SCALE\n extents = [[x.min(), x.max()] for x in xs]\n ##log##\n #extents = [[np.log10(x.min()), np.log10(x.max())] for x in xs]\n\n for i, x in enumerate(xs):\n # Plot the histograms.\n ax = fig.add_subplot(K, K, i * (K + 1) + 1)\n n, b, p = ax.hist(x, bins=kwargs.get(\"bins\", 50), range=extents[i],\n histtype=\"step\", color=kwargs.get(\"color\", \"red\"), linewidth=2.0)\n #### log10###\n # n, b, p = ax.hist(np.log10(x), bins=kwargs.get(\"bins\", 50), range=extents[i],\n # histtype=\"step\", color=kwargs.get(\"color\", \"black\"), linewidth=2.0)\n ####============================================================================\n values = credible_interval.from_histogram(n, b,[0.6827, 0.9545])\n #values = credible_interval.from_histogram(n, b,[0.3173, 0.0455])\n peak_location = b[np.argmax(n)]\n nearest_index1 = np.argmin(np.abs(b-values[0][0])) #from left\n nearest_index2 = np.argmin(np.abs(b-values[0][1])) #from right\n nearest_index3 = np.argmin(np.abs(b-peak_location))\n ax.plot([(values[0][0]+((b[1]-b[0])/2.)),(values[0][0]+((b[1]-b[0])/2.))],[0,n[nearest_index1]],color='r',linestyle='--')\n ax.plot([(values[0][1]+((b[1]-b[0])/2.)),(values[0][1]+((b[1]-b[0])/2.))],[0,n[nearest_index2]],color='r',linestyle='--')\n ax.plot([(peak_location+((b[1]-b[0])/2.)),(peak_location+((b[1]-b[0])/2.))],[0,n[nearest_index3]],color='r',linestyle='-')\n ####============================================================================\n #ax.plot([np.log10(0.9),np.log10(0.9)],[0,n[nearest_index3]],color='red',linestyle='--')\n #ax.plot([np.log10(1.0),np.log10(1.0)],[0,n[nearest_index3]],color='green',linestyle='-')\n \n if truths is not None:\n ax.axvline(truths[i], color='black',zorder=102)\n #ax.axvline(truths[i], color=truth_color)\n\n # Plot quantiles if wanted.\n if len(quantiles) > 0:\n xsorted = sorted(x)\n for q in quantiles:\n ax.axvline(xsorted[int(q * len(xsorted))], ls=\"dashed\",\n color=kwargs.get(\"color\", \"k\"))\n\n # Set up the axes.\n ax.set_xlim(extents[i])\n if scale_hist:\n maxn = np.max(n)\n ax.set_ylim(-0.1 * maxn, 1.1 * maxn)\n else:\n ax.set_ylim(0, 1.1 * np.max(n))\n ax.set_yticklabels([])\n ax.xaxis.set_major_locator(MaxNLocator(5))\n\n # Not so DRY.\n if i < K - 1:\n ax.set_xticklabels([])\n else:\n [l.set_rotation(45) for l in ax.get_xticklabels()]\n if labels is not None:\n #print(\"\\n=====================\\n xlabel: \",labels[i],\"\\n===================\\n\")\n ax.set_xlabel(labels[i], fontsize=24)\n ax.xaxis.set_label_coords(0.5, -0.3)\n\n for j, y in enumerate(xs[:i]):\n # ````````````````````````````````````````````````````````````````````````````````````````````````````````````\n # if labels is not None: print(\"--------------------------\\n xlabel: \",labels[j],\"\\n---------------------------\")\n # if labelsy is not None: print(\"--------------------------\\n ylabel: \",labelsy[i],\"\\n---------------------------\")\n # ````````````````````````````````````````````````````````````````````````````````````````````````````````````\n ax = fig.add_subplot(K, K, (i * K + j) + 1)\n #hist2d(y, x, ax=ax, extent=[extents[j], extents[i]], **kwargs)\n # ```````````````````````````````````````````````````````````````\n try: \n hist2d(y, x, ax=ax, extent=[extents[j], extents[i]], **kwargs)\n except:\n # Attempting to catch the following error:\n # \"RuntimeError: Failed to converge after 100 iterations.\"\n # If so, leaving this subplot blank\n print(\"This one failed!\")\n # ```````````````````````````````````````````````````````````````\n\n if truths is not None:\n ax.plot(truths[j], truths[i], \"o\", color='black', zorder=102)\n ax.axvline(truths[j], color='black', zorder=102)\n ax.axhline(truths[i], color='black',zorder=102)\n\n ax.xaxis.set_major_locator(MaxNLocator(5))\n ax.yaxis.set_major_locator(MaxNLocator(5))\n\n if i < K - 1:\n ax.set_xticklabels([])\n else:\n [l.set_rotation(45) for l in ax.get_xticklabels()]\n if labels is not None:\n ax.set_xlabel(labels[j], fontsize=24)\n ax.xaxis.set_label_coords(0.5, -0.3)\n\n\n if j > 0:\n ax.set_yticklabels([])\n else:\n [l.set_rotation(45) for l in ax.get_yticklabels()]\n if labelsy is not None:\n ax.set_ylabel(labelsy[i], fontsize=24)\n ax.yaxis.set_label_coords(-0.3, 0.5)\n\n return fig\n\n\ndef error_ellipse(mu, cov, ax=None, factor=1.0, **kwargs):\n \"\"\"\n Plot the error ellipse at a point given it's covariance matrix.\n\n \"\"\"\n # some sane defaults\n facecolor = kwargs.pop('facecolor', 'none')\n edgecolor = kwargs.pop('edgecolor', 'k')\n\n x, y = mu\n U, S, V = np.linalg.svd(cov)\n theta = np.degrees(np.arctan2(U[1, 0], U[0, 0]))\n ellipsePlot = Ellipse(xy=[x, y],\n width=2 * np.sqrt(S[0]) * factor,\n height=2 * np.sqrt(S[1]) * factor,\n angle=theta,\n facecolor=facecolor, edgecolor=edgecolor, **kwargs)\n\n if ax is None:\n ax = pl.gca()\n ax.add_patch(ellipsePlot)\n\n\ndef hist2d(x, y, *args, **kwargs):\n \"\"\"\n Plot a 2-D histogram of samples.\n\n \"\"\"\n ax = kwargs.pop(\"ax\", pl.gca())\n\n extent = kwargs.pop(\"extent\", [[x.min(), x.max()], [y.min(), y.max()]])\n bins = kwargs.pop(\"bins\", 50)\n color = kwargs.pop(\"color\", \"k\")\n plot_datapoints = kwargs.get(\"plot_datapoints\", True)\n\n cmap = cm.get_cmap(\"gray\")\n cmap._init()\n cmap._lut[:-3, :-1] = 0.\n cmap._lut[:-3, -1] = np.linspace(1, 0, cmap.N)\n\n #-------------------------------------------------------------------------------------\n N_levels = 2\n\n # -------------------------------\n # Creating the interpolation grid\n # -------------------------------\n # Number of grid points\n Npts = 100j\n \n # X Grid\n # ----------------------\n # Find grid limits\n xminx2 = x.min()\n xmaxx2 = x.max()\n yminx2 = y.min()\n ymaxx2 = y.max()\n # Create the Grid\n #Xx, Yx = np.mgrid[xminx:xmaxx:0.001, yminx:ymaxx:0.001] # 2D Version\n Xx2, Yx2 = np.mgrid[xminx2:xmaxx2:Npts, yminx2:ymaxx2:Npts] # 2D Version\n positionsx2 = np.vstack([Xx2.ravel(), Yx2.ravel()]) # 2xN Version\n \n # Make Log-Space KDE\n # ------------------\n # Data Point Values\n # LINEAR-SCALE DATA\n valuesx2_lin = np.vstack( [x,y] )\n #### log 10 ###\n #valuesx2_log = np.vstack( [np.log10(x),y] )\n # Make the X kernel from the X values array\n #kernelx2_lin = stats.gaussian_kde(valuesx2_lin) # this is the Gaussian KDE\n try:\n kernelx2_lin = stats.gaussian_kde(valuesx2_lin) # this is the Gaussian KDE\n except:\n pass\n \n # Sample KDE For Contours\n # -----------------------\n # LINEAR-SPACED X,Y VALUES\n Xx2_lin_vals = np.linspace(xminx2, xmaxx2, num=np.real(Npts*(-1j)), endpoint=True)\n # Log-spaced x,y values\n ###log10###\n #Xx2_log_vals = np.linspace(np.log10(xminx2), np.log10(xmaxx2), num=np.real(Npts*(-1j)), endpoint=True)\n # LINEAR-SPACED X,Y VALUES\n Yx2_lin_vals = np.linspace(yminx2, ymaxx2, num=np.real(Npts*(-1j)), endpoint=True)\n # Make into meshgrid arrays\n Xx2_lin, Yx2_lin = np.meshgrid(Xx2_lin_vals, Yx2_lin_vals)\n # print('\\n````````````````````````````````')\n # print('Xx2_lin.min(), Xx2_lin.max()')\n # print(Xx2_lin.min(), Xx2_lin.max())\n # print('log10(Xx2_lin.min()), log10(Xx2_lin.max())')\n # print(np.log10(Xx2_lin.min()), np.log10(Xx2_lin.max()))\n\n # print('\\nYx2_lin.min(), Yx2_lin.max()')\n # print(Yx2_lin.min(), Yx2_lin.max())\n # print('````````````````````````````````\\n')\n\n # Format that Gaussian KDE can understand\n positionsx2_lin = np.vstack([Xx2_lin.ravel(), Yx2_lin.ravel()]) # 2xN Version\n\n # Get Samples of KDE for Contours\n try:\n Zkernelx2_lin = np.reshape( kernelx2_lin(positionsx2_lin).T, Xx2_lin.shape)\n except:\n pass\n\n #sigma_levels_X2_lin = ( kdestats.confmap(Zkernelx2_lin, 0.68), )\n try:\n #sigma_levels_X2_lin = ( kdestats.confmap(Zkernelx2_lin, 0.68), )\n sigma_levels_X2_lin = ( kdestats.confmap(Zkernelx2_lin, 0.95), )\n except:\n pass\n\n # Plot Contours, Points\n # ---------------------\n # Extent Array\n # LINEAR-SCALED EXTENTS\n extentx2_lin = [xminx2, xmaxx2, yminx2, ymaxx2]\n ###log10###\n #extentx2_log = [np.log10(xminx2), np.log10(xmaxx2), yminx2, ymaxx2]\n # Contours, Points\n \n ax.locator_params(axis='x', nbins=4)\n \n # ------------------\n # Plotting\n # ------------------\n \n # X Plot\n # --------------\n #x_points_color = '#444444'\n #x_points_color = 'red'\n x_points_color = '#1E90FF'\n x_contours_color = 'red'\n #x_contours_color = 'black'\n #x_points_color = 'blue'\n #x_contours_color = 'navy'\n \n \n #-------------------------------------------------------------------------------------\n# X = np.linspace(extent[0][0], extent[0][1], bins + 1)\n# Y = np.linspace(extent[1][0], extent[1][1], bins + 1)\n# H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=(X, Y))\n#\n# V = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)\n# Hflat = H.flatten()\n# inds = np.argsort(Hflat)[::-1]\n# Hflat = Hflat[inds]\n# sm = np.cumsum(Hflat)\n# sm /= sm[-1]\n#\n# for i, v0 in enumerate(V):\n# try:\n# V[i] = Hflat[sm <= v0][-1]\n# except:\n# V[i] = Hflat[0]\n#\n# X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])\n# X, Y = X[:-1], Y[:-1]\n\n if plot_datapoints:\n # Kernel Density Estimate IMAGE\n # Kernel Density Estimate CONTOURS\n try:\n ax.contour(Xx2_lin, Yx2_lin, Zkernelx2_lin, levels=sigma_levels_X2_lin, linewidths=2, alpha=1, colors=x_contours_color, zorder=101, extent=extentx2_lin )\n except:\n pass\n#n_skip=int(len(x)/5000)\n # LINEAR-SCALE DATA\n### plot points, or not, using this line below #######\n ax.plot(x, y, lw=0, marker='s',markersize=2,mew=0.2,mec=x_points_color, mfc='none',zorder=99,alpha=1)\n ## Thin out the points plotted:\n#ax.plot(x[::n_skip], y[::n_skip], lw=0, marker='s',markersize=2,mew=0.2,mec=x_points_color, mfc='none',zorder=100,alpha=1)\n\n\n ### use log10 scaleing below ###\n #ax.plot(np.log10(x), y, lw=0, marker='s',markersize=2,mew=0.2,mec=x_points_color, mfc='none',zorder=100,alpha=1)\n #ax.plot(esigma_lowx,esigma_hix, lw=0, marker='+',markersize=3,mew=0.5,mec=x_points_color, mfc='none',zorder=1,alpha=1) ## NEW\n # CENTER\n #ax1.plot(max_coords_X[0],max_coords_X[1], marker='.', markersize=5, mew=2, mec='navy', mfc='none', zorder=200)\n \n\n# ax.pcolor(X, Y, H.max() - H.T, cmap=cmap)\n# ax.contour(X1, Y1, H.T, V, colors=color)\n\n data = np.vstack([x, y])\n mu = np.mean(data, axis=1)\n cov = np.cov(data)\n if kwargs.pop(\"plot_ellipse\", False):\n error_ellipse(mu, cov, ax=ax, edgecolor=\"r\", ls=\"dashed\")\n \n # Scaling\n ax.set_xscale('linear')\n ax.set_yscale('linear')\n# ax.set_xlim(extent[0])\n# ax.set_ylim(extent[1])\n" ]
[ [ "numpy.sqrt", "numpy.linspace", "numpy.arctan2", "numpy.max", "scipy.stats.gaussian_kde", "numpy.mean", "matplotlib.pyplot.gca", "numpy.linalg.svd", "numpy.atleast_1d", "numpy.real", "numpy.argmax", "matplotlib.pyplot.figure", "numpy.atleast_2d", "numpy.cov", "numpy.meshgrid", "numpy.abs", "matplotlib.ticker.MaxNLocator", "matplotlib.cm.get_cmap", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
gait-analyzer/.github
[ "2064375ddc36bf38f3ff65f09e776328b8b4612a" ]
[ "scripts/gaitdb/stride_time.py" ]
[ "import np # main library for numeric calculations\nimport pandas as pd # main library for data analysis\nimport matplotlib.pyplot as plt # main library for data plotting\nfrom glob import glob # check files\nfrom math import *\nfrom .logReg import *\n\ngaitdb = []\ngaitDBnames = []\nfor i in sorted(glob(\"gaitdb/*.txt\")):\n gaitDBnames.append(i.split(\"\\\\\")[-1])\n gaitdb.append(pd.read_csv(i, sep=\"\\t\", names=[\"time\", \"stride_time\"]))\n\n\nvariances = []\nfor i in range(15):\n var = gaitdb[i].var()[\"stride_time\"]\n print(gaitDBnames[i][:-5]+\":\", var)\n variances.append(var)\n\nX = np.array([[1]*15, variances])\nY = np.array([0]*5 + [1]*5 + [0]*5)[np.newaxis].T\ntheta = np.array([0, 1])\nm = 15\n\nlogReg(5000000) # tried twice\nlogReg(5000000)\n\ntheta0, theta1 = tuple(theta)\nfor i in range(15):\n print(gaitDBnames[i][:-5]+\":\", sigmoid(theta0 + theta1*variances[i]))\n\nh = sigmoid(theta @ X)\nnp.round(h)\n\nfig = plt.figure()\nax = fig.add_axes([0,0,1,1])\nax.set_ylabel(\"Probability of FoG\")\nax.set_title(\"Bar Graph of FoG per patient\")\nax.bar([i[:-4] for i in gaitDBnames], h)\nplt.show()\n\n" ]
[ [ "matplotlib.pyplot.show", "pandas.read_csv", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
landsito/pysatNASA
[ "8ebbad6e8447ed656641a4bbeb29e6a41f06bcc8" ]
[ "pysatNASA/instruments/cnofs_plp.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Supports the Planar Langmuir Probe (PLP) onboard the Communication\nand Navigation Outage Forecasting System (C/NOFS) satellite. Downloads\ndata from the NASA Coordinated Data Analysis Web (CDAWeb).\n\nDescription from CDAWeb:\n\nThe Planar Langmuir Probe on C/NOFS is a suite of 2 current measuring sensors\nmounted on the ram facing surface of the spacecraft. The primary sensor is an\nIon Trap (conceptually similar to RPAs flown on many other spacecraft) capable\nof measuring ion densities as low as 1 cm-3 with a 12 bit log electrometer.\nThe secondary senor is a swept bias planar Langmuir probe (Surface Probe)\ncapable of measuring Ne, Te, and spacecraft potential.\n\nThe ion number density is the one second average of the ion density sampled at\neither 32, 256, 512, or 1024 Hz (depending on the mode).\n\nThe ion density standard deviation is the standard deviation of the samples\nused to produce the one second average number density.\n\nDeltaN/N is the detrened ion number density 1 second standard deviation divided\nby the mean 1 sec density.\n\nThe electron density, electron temperature, and spacecraft potential are all\nderived from a least squares fit to the current-bias curve from the Surface\nProbe.\n\nThe data is PRELIMINARY, and as such, is intended for BROWSE PURPOSES ONLY.\n\nReferences\n----------\nA brief discussion of the C/NOFS mission and instruments can be found at\nde La Beaujardière, O., et al. (2004), C/NOFS: A mission to forecast\nscintillations, J. Atmos. Sol. Terr. Phys., 66, 1573–1591,\ndoi:10.1016/j.jastp.2004.07.030.\n\nProperties\n----------\nplatform\n 'cnofs'\nname\n 'plp'\ntag\n None supported\ninst_id\n None supported\n\n\nWarnings\n--------\n- The data are PRELIMINARY, and as such, are intended for BROWSE PURPOSES ONLY.\n- Currently no cleaning routine.\n- Module not written by PLP team.\n\n\"\"\"\n\nimport datetime as dt\nimport functools\nimport numpy as np\n\nfrom pysat import logger\nfrom pysat.instruments.methods import general as mm_gen\n\nfrom pysatNASA.instruments.methods import cnofs as mm_cnofs\nfrom pysatNASA.instruments.methods import cdaweb as cdw\n\n# ----------------------------------------------------------------------------\n# Instrument attributes\n\nplatform = 'cnofs'\nname = 'plp'\ntags = {'': ''}\ninst_ids = {'': ['']}\n\n# ----------------------------------------------------------------------------\n# Instrument test attributes\n\n_test_dates = {'': {'': dt.datetime(2009, 1, 1)}}\n\n# ----------------------------------------------------------------------------\n# Instrument methods\n\n\ndef init(self):\n \"\"\"Initializes the Instrument object with instrument specific values.\n\n Runs once upon instantiation.\n\n \"\"\"\n logger.info(mm_cnofs.ackn_str)\n self.acknowledgements = mm_cnofs.ackn_str\n self.references = '\\n'.join((mm_cnofs.refs['mission'],\n mm_cnofs.refs['plp']))\n\n return\n\n\ndef clean(self):\n \"\"\"Routine to return C/NOFS PLP data cleaned to the specified level\n\n Note\n ----\n Basic cleaning to find valid Epoch values\n\n \"\"\"\n\n for key in self.data.columns:\n if key != 'Epoch':\n fill = self.meta[key, self.meta.labels.fill_val][0]\n idx, = np.where(self[key] == fill)\n self[idx, key] = np.nan\n return\n\n\n# ----------------------------------------------------------------------------\n# Instrument functions\n#\n# Use the default CDAWeb and pysat methods\n\n# Set the list_files routine\nfname = ''.join(('cnofs_plp_plasma_1sec_{year:04d}{month:02d}{day:02d}',\n '_v{version:02d}.cdf'))\nsupported_tags = {'': {'': fname}}\nlist_files = functools.partial(mm_gen.list_files,\n supported_tags=supported_tags)\n\n# Set the load routine\nload = cdw.load\n\n# Set the download routine\nbasic_tag = {'remote_dir': '/pub/data/cnofs/plp/plasma_1sec/{year:4d}/',\n 'fname': fname}\ndownload_tags = {'': {'': basic_tag}}\ndownload = functools.partial(cdw.download, supported_tags=download_tags)\n\n# Set the list_remote_files routine\nlist_remote_files = functools.partial(cdw.list_remote_files,\n supported_tags=download_tags)\n" ]
[ [ "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
YufeiHU-fr/obow_ssl
[ "011b8b0def3d8eda1a7671ea088dace8e479e67a" ]
[ "test.py" ]
[ "import glob\nimport os\nimport pathlib\nimport datetime\nimport logging\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.distributed\n\n\nfrom collections import defaultdict\n\n\ndef setup_printing(is_master):\n \"\"\"\n This function disables printing when not in master process\n \"\"\"\n import builtins as __builtin__\n builtin_print = __builtin__.print\n\n def print(*args, **kwargs):\n force = kwargs.pop('force', False)\n if is_master or force:\n builtin_print(*args, **kwargs)\n\n __builtin__.print = print\n\n\ndef is_dist_avail_and_initialized():\n if not torch.distributed.is_available():\n return False\n if not torch.distributed.is_initialized():\n return False\n return True\n\n\ndef get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return torch.distributed.get_world_size()\n\n\ndef get_rank():\n if not is_dist_avail_and_initialized():\n return 0\n return torch.distributed.get_rank()\n\n\ndef is_main_process():\n return get_rank() == 0\n\n\nclass setup_dist_logger(object): # very hacky...\n def __init__(self, logger):\n self.logger = logger\n self.is_main_process = is_main_process()\n\n def info(self, msg, *args, **kwargs):\n if self.is_main_process:\n self.logger.info(msg, *args, **kwargs)\n\n\ndef setup_logger(dst_dir, name):\n logger = logging.getLogger(name)\n\n strHandler = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s - %(name)-8s - %(levelname)-6s - %(message)s')\n strHandler.setFormatter(formatter)\n logger.addHandler(strHandler)\n logger.setLevel(logging.INFO)\n\n log_dir = dst_dir / \"logs\"\n os.makedirs(log_dir, exist_ok=True)\n now_str = datetime.datetime.now().__str__().replace(' ','_')\n now_str = now_str.replace(' ','_').replace('-','').replace(':','')\n logger.addHandler(logging.FileHandler(log_dir / f'LOG_INFO_{now_str}.txt'))\n\n return logger\n\n\nlogger = setup_dist_logger(logging.getLogger(__name__))\n\n\[email protected]_grad()\ndef reduce_all(tensor):\n if get_world_size() > 1:\n torch.distributed.all_reduce(tensor, op=torch.distributed.ReduceOp.SUM)\n return tensor\n\n\[email protected]_grad()\ndef concat_all_gather(tensor):\n if get_world_size() > 1:\n tensors_gather = [torch.ones_like(tensor)\n for _ in range(torch.distributed.get_world_size())]\n torch.distributed.all_gather(tensors_gather, tensor, async_op=False)\n return torch.cat(tensors_gather, dim=0)\n else:\n return tensor\n\n\[email protected]_grad()\ndef top1accuracy(output, target):\n pred = output.max(dim=1)[1]\n pred = pred.view(-1)\n target = target.view(-1)\n accuracy = 100 * pred.eq(target).float().mean()\n return accuracy\n\n\[email protected]_grad()\ndef sanity_check_for_distributed_training(model, buffers_only_bow_extr=True):\n \"\"\" Verifies that all nodes have the same copy of params & bow buffers. \"\"\"\n if get_world_size() > 1:\n world_size = get_world_size()\n rank = get_rank()\n is_close_all = True\n list_of_failed_states = []\n torch.distributed.barrier()\n for name, state in model.named_parameters():\n state = state.data.detach()\n state_src = state.clone()\n torch.distributed.barrier()\n torch.distributed.broadcast(state_src, src=0)\n torch.distributed.barrier()\n is_close = torch.allclose(state, state_src, rtol=1e-05, atol=1e-08)\n is_close_tensor = torch.tensor(\n [is_close], dtype=torch.float64, device='cuda')\n torch.distributed.barrier()\n is_close_all_nodes = concat_all_gather(is_close_tensor)\n is_close_all_nodes = [v >= 0.5 for v in is_close_all_nodes.tolist()]\n is_close_all_nodes_reduce = all(is_close_all_nodes)\n is_close_all &= is_close_all_nodes_reduce\n\n status = \"PASSED\" if is_close_all_nodes_reduce else \"FAILED\"\n\n logger.info(f\"====> Check {name}: [{status}]\")\n if not is_close_all_nodes_reduce:\n logger.info(f\"======> Failed nodes: [{is_close_all_nodes}]\")\n list_of_failed_states.append(name)\n\n for name, state in model.named_buffers():\n if buffers_only_bow_extr and name.find(\"module.bow_extractor\") == -1:\n continue\n state = state.data.detach().float()\n state_src = state.clone()\n torch.distributed.barrier()\n torch.distributed.broadcast(state_src, src=0)\n torch.distributed.barrier()\n is_close = torch.allclose(state, state_src, rtol=1e-05, atol=1e-08)\n is_close_tensor = torch.tensor(\n [is_close], dtype=torch.float64, device='cuda')\n torch.distributed.barrier()\n is_close_all_nodes = concat_all_gather(is_close_tensor)\n is_close_all_nodes = [v >= 0.5 for v in is_close_all_nodes.tolist()]\n is_close_all_nodes_reduce = all(is_close_all_nodes)\n is_close_all &= is_close_all_nodes_reduce\n\n status = \"PASSED\" if is_close_all_nodes_reduce else \"FAILED\"\n\n logger.info(f\"====> Check {name}: [{status}]\")\n if not is_close_all_nodes_reduce:\n logger.info(f\"======> Failed nodes: [{is_close_all_nodes}]\")\n list_of_failed_states.append(name)\n\n status = \"ALL PASSED\" if is_close_all else \"FAILED\"\n logger.info(f\"==> Sanity checked [{status}]\")\n if not is_close_all:\n logger.info(f\"====> List of failed states:\\n{list_of_failed_states}\")\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, fmt=':.4f', out_val=False):\n self.fmt = fmt\n self.out_val = out_val\n self.reset()\n\n def reset(self):\n self.val = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n\n @property\n def avg(self):\n if self.count > 0:\n return self.sum / self.count\n else:\n return 0\n\n def synchronize_between_processes(self):\n if not is_dist_avail_and_initialized():\n return\n values = torch.tensor(\n [self.count, self.sum], dtype=torch.float64, device='cuda')\n torch.distributed.barrier()\n torch.distributed.all_reduce(values)\n values = values.tolist()\n self.count = int(values[0])\n self.sum = values[1]\n\n def __str__(self):\n if self.out_val:\n fmtstr = '{avg' + self.fmt + '} ({val' + self.fmt + '})'\n return fmtstr.format(avg=self.avg, val=self.val)\n else:\n fmtstr = '{avg' + self.fmt + '}'\n return fmtstr.format(avg=self.avg)\n\n\nclass MetricLogger(object):\n def __init__(self, delimiter=\"\\t\", prefix=\"\"):\n self.meters = defaultdict(AverageMeter)\n self.delimiter = delimiter\n\n def update(self, **kwargs):\n for k, v in kwargs.items():\n if isinstance(v, torch.Tensor):\n v = v.item()\n assert isinstance(v, (float, int))\n self.meters[k].update(v)\n\n def __getitem__(self, attr):\n if not (attr in self.meters):\n self.meters[attr] = AverageMeter()\n return self.meters[attr]\n\n def __str__(self):\n meters_str = []\n for key, meter in self.meters.items():\n meters_str.append(\"{}: {}\".format(key, str(meter)))\n return self.delimiter.join(meters_str)\n\n def synchronize_between_processes(self):\n for meter in self.meters.values():\n meter.synchronize_between_processes()\n\n def add_meter(self, name, meter):\n self.meters[name] = meter\n\n def log_every(self, iterable, print_freq, header=None, sync=True):\n i = 0\n if not header:\n header = ''\n start_time = time.time()\n end = time.time()\n iter_time = AverageMeter(out_val=True)\n data_time = AverageMeter(out_val=True)\n space_fmt = ':' + str(len(str(len(iterable)))) + 'd'\n\n log_msg_fmt = self.delimiter.join([\n header,\n '[{0' + space_fmt + '}/{1}]',\n 'eta: {eta}',\n '{meters}',\n 'time: {time}',\n 'data: {data}'])\n if torch.cuda.is_available():\n log_msg_cuda_fmt = 'max mem: {memory:.0f}'\n MB = 1024.0 * 1024.0\n for obj in iterable:\n data_time.update(time.time() - end)\n yield obj\n iter_time.update(time.time() - end)\n if i % print_freq == 0:\n eta_seconds = iter_time.avg * (len(iterable) - i)\n eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n log_msg = log_msg_fmt.format(\n i, len(iterable),\n eta=eta_string,\n meters=str(self),\n time=str(iter_time),\n data=str(data_time))\n if torch.cuda.is_available():\n log_msg_cuda = log_msg_cuda_fmt.format(\n memory=torch.cuda.max_memory_allocated() / MB)\n log_msg = self.delimiter.join([log_msg, log_msg_cuda])\n logger.info(log_msg)\n\n i += 1\n end = time.time()\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n logger.info(f'{header} Total time: {total_time}')\n\n\n\ndef global_pooling(x, type):\n assert x.dim() == 4\n if type == 'max':\n return F.max_pool2d(x, (x.size(2), x.size(3)))\n elif type == 'avg':\n return F.avg_pool2d(x, (x.size(2), x.size(3)))\n else:\n raise ValueError(\n f\"Unknown pooling type '{type}'. Supported types: ('avg', 'max').\")\n\n\nclass GlobalPooling(nn.Module):\n def __init__(self, type):\n super(GlobalPooling, self).__init__()\n assert type in (\"avg\", \"max\")\n self.type = type\n\n def forward(self, x):\n return global_pooling(x, self.type)\n\n def extra_repr(self):\n s = f'type={self.type}'\n return s\n\n\nclass L2Normalize(nn.Module):\n def __init__(self, dim):\n super(L2Normalize, self).__init__()\n self.dim = dim\n\n def forward(self, x):\n return F.normalize(x, p=2, dim=self.dim)\n\n\ndef convert_from_5d_to_4d(tensor_5d):\n _, _, channels, height, width = tensor_5d.size()\n return tensor_5d.view(-1, channels, height, width)\n\n\ndef add_dimension(tensor, dim_size):\n assert((tensor.size(0) % dim_size) == 0)\n return tensor.view(\n [dim_size, tensor.size(0) // dim_size,] + list(tensor.size()[1:]))\n\n\ndef find_last_epoch(search_pattern):\n print(f\"Search the last checkpoint with pattern {str(search_pattern)}\")\n\n search_pattern = search_pattern.format(epoch=\"*\")\n\n all_files = glob.glob(search_pattern)\n if len(all_files) == 0:\n raise ValueError(f\"{search_pattern}: no such file.\")\n\n substrings = search_pattern.split(\"*\")\n assert(len(substrings) == 2)\n start, end = substrings\n all_epochs = [fname.replace(start,\"\").replace(end,\"\") for fname in all_files]\n all_epochs = [int(epoch) for epoch in all_epochs if epoch.isdigit()]\n assert(len(all_epochs) > 0)\n all_epochs = sorted(all_epochs)\n last_epoch = int(all_epochs[-1])\n\n checkpoint_filename = search_pattern.replace(\"*\", str(last_epoch))\n print(f\"Last epoch: {str(last_epoch)} ({checkpoint_filename})\")\n\n checkpoint_filename = pathlib.Path(checkpoint_filename)\n assert checkpoint_filename.is_file()\n\n return last_epoch, checkpoint_filename\n\n\ndef load_network_params(network, filename, strict=True):\n if isinstance(filename, str):\n filename = pathlib.Path(filename)\n\n print(f\"[Rank {get_rank()}: load network params from: {filename}\")\n assert filename.is_file()\n checkpoint = torch.load(filename, map_location=\"cpu\")\n return network.load_state_dict(checkpoint[\"network\"], strict=strict)\n\nif __name__ == '__main__':\n epoch = 1\n metric_logger = MetricLogger(delimiter=\" \")\n metric_logger.add_meter(\"iter/s\", AverageMeter(\":.2f\", out_val=True))\n header = f\"Epoch: [{epoch + 1}]\"\n loader_train = []\n for i in range(89):\n loader_train.append(torch.randn((2,3)))\n # print(len(loader_train))\n # print(loader_train[1].size())\n # for _iter, mini_batch in enumerate(\n # metric_logger.log_every(loader_train, 250, header)):\n # print( metric_logger.log_every(loader_train, 250, header))\n # print(mini_batch)\n # print('loader_train,iter,mini_batch',len(loader_train),_iter,len(mini_batch))\n # exit()\n # print(len(mini_batch))\n a = torch.IntTensor([[1,2],[4,5]])\n b = torch.IntTensor([[7,8,9],[44,55,66]])\n c = []\n c.append(a)\n c.append(b)\n print(a)\n print(b)\n print(c)\n # print(torch.stack([a,b],dim=0))\n # print(torch.stack([a,b],dim=1))\n # print(torch.stack([a,b],dim=2))" ]
[ [ "torch.distributed.broadcast", "torch.load", "torch.cat", "torch.no_grad", "torch.cuda.is_available", "torch.allclose", "torch.distributed.get_rank", "torch.randn", "torch.distributed.barrier", "torch.tensor", "torch.ones_like", "torch.distributed.is_initialized", "torch.distributed.is_available", "torch.distributed.get_world_size", "torch.nn.functional.normalize", "torch.distributed.all_gather", "torch.cuda.max_memory_allocated", "torch.IntTensor", "torch.distributed.all_reduce" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Chenfeng1271/JSPNet-Learning-Joint-Semantic-Instance-Segmentation-of-Point-Clouds-via-Similarity-and-Probabili
[ "1d50417431b5af1ad76b96749e841969d4dc1359" ]
[ "utils/provider.py" ]
[ "import numpy as np\nimport h5py\n\n# BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n# sys.path.append(BASE_DIR)\n#\n# # Download dataset for point cloud classification\n# DATA_DIR = os.path.join(BASE_DIR, 'data')\n# if not os.path.exists(DATA_DIR):\n# os.mkdir(DATA_DIR)\n# if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):\n# www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'\n# zipfile = os.path.basename(www)\n# os.system('wget %s; unzip %s' % (www, zipfile))\n# os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))\n# os.system('rm %s' % (zipfile))\n\n\ndef shuffle_data(data, labels, ins_label=None):\n \"\"\" Shuffle data and labels.\n Input:\n data: B,N,... numpy array\n label: B,... numpy array\n Return:\n shuffled data, label and shuffle indices\n \"\"\"\n idx = np.arange(len(labels))\n np.random.shuffle(idx)\n if ins_label is None:\n return data[idx, ...], labels[idx], idx\n else:\n return data[idx, ...], labels[idx], ins_label[idx]\n\n\ndef rotate_point_cloud(batch_data):\n \"\"\" Randomly rotate the point clouds to augument the dataset\n rotation is per shape based along up direction\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, rotated batch of point clouds\n \"\"\"\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n for k in range(batch_data.shape[0]):\n rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, 0, sinval],\n [0, 1, 0],\n [-sinval, 0, cosval]])\n shape_pc = batch_data[k, ...]\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\n return rotated_data\n\n\ndef rotate_point_cloud_3z(batch_data):\n \"\"\" randomly rotate the point clouds to augument the dataset\n rotation is per shape based along z direction\n :param batch_data: B x N x 3 array, original batch of point clouds\n :return: B x N x 3 array, rotated batch of point clouds\n \"\"\"\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n for k in range(batch_data.shape[0]):\n rotation_angle = np.random.uniform() * 2 * np.pi\n cv = np.cos(rotation_angle)\n sv = np.sin(rotation_angle)\n rotation_matrix = np.array([[cv, -sv, 0],\n [sv, cv, 0],\n [ 0, 0, 1]])\n shape_pc = batch_data[k, ...]\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\n\n return rotated_data\n\n\ndef rotate_point_cloud_by_angle(batch_data, rotation_angle):\n \"\"\" Rotate the point cloud along up direction with certain angle.\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, rotated batch of point clouds\n \"\"\"\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n for k in range(batch_data.shape[0]):\n # rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, 0, sinval],\n [0, 1, 0],\n [-sinval, 0, cosval]])\n shape_pc = batch_data[k, ...]\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\n return rotated_data\n\n\ndef jitter_point_cloud(batch_data, sigma=0.01, clip=0.05):\n \"\"\" Randomly jitter points. jittering is per point.\n Input:\n BxNx3 array, original batch of point clouds\n Return:\n BxNx3 array, jittered batch of point clouds\n \"\"\"\n B, N, C = batch_data.shape\n assert (clip > 0)\n jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1 * clip, clip)\n jittered_data += batch_data\n return jittered_data\n\n\ndef save_h5_output(h5_filename, seg, segrefine, group, grouppred, label_dtype='uint8'):\n print(h5_filename)\n h5_fout = h5py.File(h5_filename)\n h5_fout.create_dataset(\n 'seglabel', data=seg,\n compression='gzip', compression_opts=1,\n dtype=label_dtype)\n h5_fout.create_dataset(\n 'segrefine', data=segrefine,\n compression='gzip', compression_opts=1,\n dtype=label_dtype)\n h5_fout.create_dataset(\n 'pid', data=group,\n compression='gzip', compression_opts=1,\n dtype=label_dtype)\n h5_fout.create_dataset(\n 'predpid', data=grouppred,\n compression='gzip', compression_opts=1,\n dtype=label_dtype)\n h5_fout.close()\n\n\ndef getDataFiles(list_filename):\n return [line.rstrip() for line in open(list_filename)]\n\n\ndef load_h5(h5_filename):\n f = h5py.File(h5_filename)\n data = f['data'][:]\n label = f['label'][:]\n return (data, label)\n\n\ndef loadDataFile(filename):\n return load_h5(filename)\n\n\ndef load_h5_data_label_seg(h5_filename):\n f = h5py.File(h5_filename)\n data = f['data'][:]\n label = f['label'][:]\n seg = f['pid'][:]\n return data, label, seg\n\n\ndef loadDataFile_with_seg(filename):\n return load_h5_data_label_seg(filename)\n\n\ndef loadDataFile_with_grouplabel(filename):\n f = h5py.File(filename)\n data = f['data'][:]\n # label = f['label'][:]\n group = f['pid'][:] # Nx1\n if 'groupcategory' in f:\n cate = f['groupcategory'][:] # Gx1\n else:\n cate = 0\n return data, group, cate\n\n\ndef loadDataFile_with_groupseglabel(filename):\n f = h5py.File(filename)\n data = f['data'][:]\n # label = f['label'][:]\n group = f['pid'][:] # Nx1\n if 'groupcategory' in f:\n cate = f['groupcategory'][:] # Gx1\n else:\n cate = 0\n seg = -1 * np.ones_like(group)\n for i in range(group.shape[0]):\n for j in range(group.shape[1]):\n if group[i, j, 0] != -1 and cate[i, group[i, j, 0], 0] != -1:\n seg[i, j, 0] = cate[i, group[i, j, 0], 0]\n return data, group, cate, seg\n\n\ndef loadDataFile_with_groupseglabel_sunrgbd(filename):\n f = h5py.File(filename)\n data = f['data'][:]\n group = f['pid'][:] # NxG\n if 'groupcategory' in f:\n cate = f['groupcategory'][:] # Gx1\n else:\n cate = 0\n if 'seglabel' in f:\n seg = f['seglabel'][:]\n else:\n seg = f['seglabels'][:]\n return (data, group, cate, seg)\n\n\ndef loadDataFile_with_groupseglabel_scannet(filename):\n f = h5py.File(filename)\n data = f['data'][:]\n # label = f['label'][:]\n group = f['pid'][:] # NxG\n if 'groupcategory' in f:\n cate = f['groupcategory'][:] # Gx1\n else:\n cate = 0\n if 'seglabel' in f:\n seg = f['seglabel'][:]\n else:\n seg = f['seglabels'][:]\n return (data, group, cate, seg)\n\n\ndef loadDataFile_with_groupseglabel_nuyv2(filename):\n f = h5py.File(filename)\n data = f['data'][:]\n group = f['pid'][:] # NxG\n if 'groupcategory' in f:\n cate = f['groupcategory'][:] # Gx1\n else:\n cate = 0\n if 'seglabel' in f:\n seg = f['seglabel'][:]\n else:\n seg = f['seglabels'][:]\n boxes = f['bbox'][:]\n return data, group, cate, seg, boxes\n\n\ndef loadDataFile_with_groupseglabel_stanfordindoor(filename):\n f = h5py.File(filename)\n data = f['data'][:]\n group = f['pid'][:].astype(np.int32) # NxG\n if 'label' in f:\n label = f['label'][:].astype(np.int32)\n else:\n label = []\n if 'seglabel' in f:\n seg = f['seglabel'][:].astype(np.int32)\n else:\n seg = f['seglabels'][:].astype(np.int32)\n return (data, group, label, seg)\n\n\ndef loadDataFile_with_img(filename):\n f = h5py.File(filename)\n data = f['data'][:]\n group = f['pid'][:] # NxG\n seg = f['seglabel'][:]\n img = f['img'][:].transpose([2, 1, 0])\n return data, group, seg, img\n" ]
[ [ "numpy.ones_like", "numpy.cos", "numpy.random.shuffle", "numpy.sin", "numpy.random.randn", "numpy.random.uniform", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
VegaSera/DS-Unit-3-Sprint-2-SQL-and-Databases
[ "0668c3d57424e6d4dc5af659dce3becd52ff32cf" ]
[ "module2-sql-for-analysis/migrate_rpg.py" ]
[ "import sqlite3\nimport pandas as pd\nimport os\nfrom dotenv import load_dotenv\nimport psycopg2\nfrom sqlalchemy import create_engine\n\nfilepath = os.path.join(os.path.dirname(__file__),\"..\",\"module1-introduction-to-sql\",\"\")\n#filepath = 'C:/Users/Vega/Documents/DS-Unit-3-Sprint-2-SQL-and-Databases/module1-introduction-to-sql/' # Original Filepath\nconn = sqlite3.connect(filepath + 'rpg_db.sqlite3')\n\nrpg_tables = ['armory_item', 'armory_weapon','charactercreator_character','charactercreator_character_inventory',\n 'charactercreator_cleric','charactercreator_fighter','charactercreator_mage','charactercreator_necromancer',\n 'charactercreator_thief']\n\ndfs = []\nfor table in rpg_tables:\n dfs.append(pd.read_sql(f\"SELECT * FROM {table};\", conn))\n\n\nload_dotenv()\nURL = os.getenv('URL')\nengine = create_engine(URL)\n\n#Didn't work, Google says to use sqlalchemy\n#psyc_conn = psycopg2.connect(dbname=dbname, user=dbuser, host=dbhost, password=dbpass)\n\nfor i in range(len(rpg_tables)):\n dfs[i].to_sql(rpg_tables[i], engine,\n if_exists='replace', method='multi')" ]
[ [ "pandas.read_sql" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
jcmgray/quijy
[ "3440fa382ec1bc87bdb6ee880f983160c08e47e6" ]
[ "quimb/tensor/tensor_arbgeom_tebd.py" ]
[ "import random\nimport itertools\nimport collections\n\nfrom autoray import do, to_numpy, dag\n\nfrom ..core import eye, kron, qarray\nfrom ..utils import ensure_dict\nfrom ..utils import progbar as Progbar\nfrom .tensor_core import Tensor\nfrom .drawing import get_colors, get_positions\n\n\nclass LocalHamGen:\n \"\"\"Representation of a local hamiltonian defined on a general graph. This\n combines all two site and one site terms into a single interaction per\n lattice pair, and caches operations on the terms such as getting their\n exponential. The sites (nodes) should be hashable and comparable.\n\n Parameters\n ----------\n H2 : dict[tuple[node], array_like]\n The interaction terms, with each key being an tuple of nodes defining\n an edge and each value the local hamilotonian term for those two nodes.\n H1 : array_like or dict[node, array_like], optional\n The one site term(s). If a single array is given, assume to be the\n default onsite term for all terms. If a dict is supplied,\n the keys should represent specific coordinates like\n ``(i, j)`` with the values the array representing the local term for\n that site. A default term for all remaining sites can still be supplied\n with the key ``None``.\n\n Attributes\n ----------\n terms : dict[tuple, array_like]\n The total effective local term for each interaction (with single site\n terms appropriately absorbed). Each key is a pair of coordinates\n ``site_a, site_b`` with ``site_a < site_b``.\n \"\"\"\n\n def __init__(self, H2, H1=None):\n # caches for not repeating operations / duplicating tensors\n self._op_cache = collections.defaultdict(dict)\n\n self.terms = dict(H2)\n\n # convert qarrays (mostly useful for working with jax)\n for key, X in self.terms.items():\n if isinstance(X, qarray):\n self.terms[key] = self._convert_from_qarray_cached(X)\n\n self.sites = tuple(\n sorted(set(itertools.chain.from_iterable(self.terms)))\n )\n\n # first combine terms to ensure coo1 < coo2\n for where in tuple(filter(bool, self.terms)):\n coo1, coo2 = where\n if coo1 < coo2:\n continue\n\n # pop and flip the term\n X12 = self._flip_cached(self.terms.pop(where))\n\n # add to, or create, term with flipped coos\n new_where = coo2, coo1\n if new_where in self.terms:\n self.terms[new_where] = self._add_cached(\n self.terms[new_where], X12\n )\n else:\n self.terms[new_where] = X12\n\n # make a directory of which single sites are covered by which terms\n # - to merge them into later\n self._sites_to_covering_terms = collections.defaultdict(list)\n for where in self.terms:\n site_a, site_b = where\n self._sites_to_covering_terms[site_a].append(where)\n self._sites_to_covering_terms[site_b].append(where)\n\n # parse one site terms\n if H1 is None:\n H1s = dict()\n elif hasattr(H1, \"shape\"):\n # set a default site term\n H1s = {None: H1}\n else:\n H1s = dict(H1)\n\n # convert qarrays (mostly useful for working with jax)\n for key, X in H1s.items():\n if isinstance(X, qarray):\n H1s[key] = self._convert_from_qarray_cached(X)\n\n # possibly set the default single site term\n default_H1 = H1s.pop(None, None)\n if default_H1 is not None:\n for site in self.sites:\n H1s.setdefault(site, default_H1)\n\n # now absorb the single site terms evenly into the two site terms\n for site, H in H1s.items():\n\n # get interacting terms which cover the site\n pairs = self._sites_to_covering_terms[site]\n num_pairs = len(pairs)\n if num_pairs == 0:\n raise ValueError(\n f\"There are no two site terms to add this single site \"\n f\"term to - site {site} is not coupled to anything.\"\n )\n\n # merge the single site term in equal parts into all covering pairs\n H_tensoreds = (self._op_id_cached(H), self._id_op_cached(H))\n for pair in pairs:\n H_tensored = H_tensoreds[pair.index(site)]\n self.terms[pair] = self._add_cached(\n self.terms[pair], self._div_cached(H_tensored, num_pairs)\n )\n\n @property\n def nsites(self):\n \"\"\"The number of sites in the system.\n \"\"\"\n return len(self.sites)\n\n def items(self):\n \"\"\"Iterate over all terms in the hamiltonian. This is mostly for\n convenient compatibility with ``compute_local_expectation``.\n \"\"\"\n return self.terms.items()\n\n def _convert_from_qarray_cached(self, x):\n cache = self._op_cache[\"convert_from_qarray\"]\n key = id(x)\n if key not in cache:\n cache[key] = x.A\n return cache[key]\n\n def _flip_cached(self, x):\n cache = self._op_cache[\"flip\"]\n key = id(x)\n if key not in cache:\n d = int(x.size ** (1 / 4))\n xf = do(\"reshape\", x, (d, d, d, d))\n xf = do(\"transpose\", xf, (1, 0, 3, 2))\n xf = do(\"reshape\", xf, (d * d, d * d))\n cache[key] = xf\n return cache[key]\n\n def _add_cached(self, x, y):\n cache = self._op_cache[\"add\"]\n key = (id(x), id(y))\n if key not in cache:\n cache[key] = x + y\n return cache[key]\n\n def _div_cached(self, x, y):\n cache = self._op_cache[\"div\"]\n key = (id(x), y)\n if key not in cache:\n cache[key] = x / y\n return cache[key]\n\n def _op_id_cached(self, x):\n cache = self._op_cache[\"op_id\"]\n key = id(x)\n if key not in cache:\n xn = to_numpy(x)\n d = int(xn.size ** 0.5)\n Id = eye(d, dtype=xn.dtype)\n XI = do(\"array\", kron(xn, Id), like=x)\n cache[key] = XI\n return cache[key]\n\n def _id_op_cached(self, x):\n cache = self._op_cache[\"id_op\"]\n key = id(x)\n if key not in cache:\n xn = to_numpy(x)\n d = int(xn.size ** 0.5)\n Id = eye(d, dtype=xn.dtype)\n IX = do(\"array\", kron(Id, xn), like=x)\n cache[key] = IX\n return cache[key]\n\n def _expm_cached(self, x, y):\n cache = self._op_cache[\"expm\"]\n key = (id(x), y)\n if key not in cache:\n el, ev = do(\"linalg.eigh\", x)\n cache[key] = ev @ do(\"diag\", do(\"exp\", el * y)) @ dag(ev)\n return cache[key]\n\n def get_gate(self, where):\n \"\"\"Get the local term for pair ``where``, cached.\n \"\"\"\n return self.terms[tuple(sorted(where))]\n\n def get_gate_expm(self, where, x):\n \"\"\"Get the local term for pair ``where``, matrix exponentiated by\n ``x``, and cached.\n \"\"\"\n return self._expm_cached(self.get_gate(where), x)\n\n def apply_to_arrays(self, fn):\n \"\"\"Apply the function ``fn`` to all the arrays representing terms.\n \"\"\"\n for k, x in self.terms.items():\n self.terms[k] = fn(x)\n\n def _nx_color_ordering(self, strategy=\"smallest_first\", interchange=True):\n \"\"\"Generate a term ordering based on a coloring on the line graph.\n \"\"\"\n import networkx as nx\n\n G = nx.Graph(tuple(self.terms))\n\n coloring = list(\n nx.coloring.greedy_color(\n nx.line_graph(G), strategy, interchange=interchange\n ).items()\n )\n\n # sort into color groups\n coloring.sort(key=lambda coo_color: coo_color[1])\n\n return [\n # networkx doesn't preserve node order of edge spec\n tuple(sorted(coo)) for\n coo, _ in coloring\n ]\n\n def get_auto_ordering(self, order=\"sort\", **kwargs):\n \"\"\"Get an ordering of the terms to use with TEBD, for example. The\n default is to sort the coordinates then greedily group them into\n commuting sets.\n\n Parameters\n ----------\n order : {'sort', None, 'random', str}\n How to order the terms *before* greedily grouping them into\n commuting (non-coordinate overlapping) sets:\n\n - ``'sort'`` will sort the coordinate pairs first.\n - ``None`` will use the current order of terms which should\n match the order they were supplied to this ``LocalHam2D``\n instance.\n - ``'random'`` will randomly shuffle the coordinate pairs\n before grouping them - *not* the same as returning a\n completely random order.\n - ``'random-ungrouped'`` will randomly shuffle the coordinate\n pairs but *not* group them at all with respect to\n commutation.\n\n Any other option will be passed as a strategy to\n ``networkx.coloring.greedy_color`` to generate the ordering.\n\n Returns\n -------\n list[tuple[node]]\n Sequence of coordinate pairs.\n \"\"\"\n if order is None:\n pairs = self.terms\n elif order == \"sort\":\n pairs = sorted(self.terms)\n elif order == \"random\":\n pairs = list(self.terms)\n random.shuffle(pairs)\n elif order == \"random-ungrouped\":\n pairs = list(self.terms)\n random.shuffle(pairs)\n return pairs\n else:\n return self._nx_color_ordering(order, **kwargs)\n\n pairs = {x: None for x in pairs}\n\n cover = set()\n ordering = list()\n while pairs:\n for pair in tuple(pairs):\n ij1, ij2 = pair\n if (ij1 not in cover) and (ij2 not in cover):\n ordering.append(pair)\n pairs.pop(pair)\n cover.add(ij1)\n cover.add(ij2)\n cover.clear()\n\n return ordering\n\n def __repr__(self):\n s = \"<LocalHamGen(nsites={}, num_terms={})>\"\n return s.format(self.nsites, len(self.terms))\n\n def draw(\n self,\n ordering=\"sort\",\n show_norm=True,\n figsize=None,\n fontsize=8,\n legend=True,\n ax=None,\n return_fig=False,\n **kwargs,\n ):\n \"\"\"Plot this Hamiltonian as a network.\n\n Parameters\n ----------\n ordering : {'sort', None, 'random'}, optional\n An ordering of the termns, or an argument to be supplied to\n :meth:`quimb.tensor.tensor_gen_tebd.LocalHamGen.get_auto_ordering`\n to generate this automatically.\n show_norm : bool, optional\n Show the norm of each term as edge labels.\n figsize : None or tuple[int], optional\n Size of the figure, defaults to size of Hamiltonian.\n fontsize : int, optional\n Font size for norm labels.\n legend : bool, optional\n Whether to show the legend of which terms are in which group.\n ax : None or matplotlib.Axes, optional\n Add to a existing set of axes.\n return_fig : bool, optional\n Whether to return any newly created figure.\n \"\"\"\n import networkx as nx\n import matplotlib.pyplot as plt\n\n if figsize is None:\n L = self.nsites ** 0.5 + 1\n figsize = (L, L)\n\n ax_supplied = ax is not None\n if not ax_supplied:\n fig, ax = plt.subplots(figsize=figsize, constrained_layout=True)\n ax.axis(\"off\")\n ax.set_aspect(\"equal\")\n\n if ordering is None or isinstance(ordering, str):\n ordering = self.get_auto_ordering(ordering, **kwargs)\n\n G = nx.Graph()\n seen = set()\n n = 0\n edge_labels = dict()\n for where in ordering:\n site_a, site_b = where\n if (site_a in seen) or (site_b in seen):\n # start a new group\n seen = {site_a, site_b}\n n += 1\n else:\n seen.add(site_a)\n seen.add(site_b)\n\n nrm = do(\"linalg.norm\", self.terms[where])\n edge_labels[where] = f\"{nrm:.2f}\"\n G.add_edge(site_a, site_b, norm=nrm, group=n)\n\n num_groups = n + 1\n colors = get_colors(range(num_groups))\n\n pos = get_positions(None, G)\n\n # do the plotting\n nx.draw_networkx_edges(\n G,\n pos=pos,\n width=tuple(2 * x[2][\"norm\"] ** 0.5 for x in G.edges(data=True)),\n edge_color=tuple(\n colors[x[2][\"group\"]] for x in G.edges(data=True)\n ),\n alpha=0.8,\n ax=ax,\n )\n nx.draw_networkx_edge_labels(\n G,\n pos=pos,\n edge_labels=edge_labels,\n font_size=fontsize,\n font_color=(0.5, 0.5, 0.5),\n bbox=dict(alpha=0),\n ax=ax,\n )\n nx.draw_networkx_labels(\n G,\n pos,\n font_color=(0.2, 0.2, 0.2),\n font_size=fontsize,\n font_weight=\"bold\",\n ax=ax,\n )\n\n # create legend\n if legend:\n handles = []\n for color in colors.values():\n handles += [\n plt.Line2D(\n [0],\n [0],\n marker=\"o\",\n color=color,\n linestyle=\"\",\n markersize=10,\n )\n ]\n\n lbls = [f\"Group {i + 1}\" for i in range(num_groups)]\n\n ax.legend(\n handles,\n lbls,\n ncol=max(round(len(handles) / 20), 1),\n loc=\"center left\",\n bbox_to_anchor=(1, 0.5),\n )\n\n if ax_supplied:\n return\n\n if return_fig:\n return fig\n\n plt.show()\n\n graph = draw\n\n\nclass TEBDGen:\n \"\"\"Generic class for performing time evolving block decimation on an\n arbitrary graph, i.e. applying the exponential of a Hamiltonian using\n a product formula that involves applying local exponentiated gates only.\n \"\"\"\n\n def __init__(\n self,\n psi0,\n ham,\n tau=0.01,\n D=None,\n imag=True,\n gate_opts=None,\n ordering=None,\n second_order_reflect=False,\n compute_energy_every=None,\n compute_energy_final=True,\n compute_energy_opts=None,\n compute_energy_fn=None,\n compute_energy_per_site=False,\n callback=None,\n keep_best=False,\n progbar=True,\n ):\n self.imag = imag\n if not imag:\n raise NotImplementedError(\"Real time evolution not tested yet.\")\n\n self.state = psi0\n self.ham = ham\n self.progbar = progbar\n self.callback = callback\n\n # default time step to use\n self.tau = tau\n\n # parse gate application options\n if D is None:\n D = self._psi.max_bond()\n self.gate_opts = ensure_dict(gate_opts)\n self.gate_opts['max_bond'] = D\n self.gate_opts.setdefault('cutoff', 0.0)\n self.gate_opts.setdefault('contract', 'reduce-split')\n\n # parse energy computation options\n self.compute_energy_opts = ensure_dict(compute_energy_opts)\n\n self.compute_energy_every = compute_energy_every\n self.compute_energy_final = compute_energy_final\n self.compute_energy_fn = compute_energy_fn\n self.compute_energy_per_site = bool(compute_energy_per_site)\n\n if ordering is None:\n\n def dynamic_random():\n return self.ham.get_auto_ordering('random_sequential')\n\n self.ordering = dynamic_random\n elif isinstance(ordering, str):\n self.ordering = self.ham.get_auto_ordering(ordering)\n elif callable(ordering):\n self.ordering = ordering\n else:\n self.ordering = tuple(ordering)\n\n self.second_order_reflect = second_order_reflect\n\n # storage\n self._n = 0\n self.its = []\n self.taus = []\n self.energies = []\n\n self.keep_best = bool(keep_best)\n self.best = dict(energy=float('inf'), state=None, it=None)\n\n def sweep(self, tau):\n r\"\"\"Perform a full sweep of gates at every pair.\n\n .. math::\n\n \\psi \\rightarrow \\prod_{\\{ij\\}} \\exp(-\\tau H_{ij}) \\psi\n\n \"\"\"\n if callable(self.ordering):\n ordering = self.ordering()\n else:\n ordering = self.ordering\n\n if self.second_order_reflect:\n ordering = tuple(ordering) + tuple(reversed(ordering))\n factor = 2.0\n else:\n factor = 1.0\n\n for where in ordering:\n\n if callable(tau):\n U = self.ham.get_gate_expm(where, -tau(where) / factor)\n else:\n U = self.ham.get_gate_expm(where, -tau / factor)\n\n self.gate(U, where)\n\n def _update_progbar(self, pbar):\n desc = f\"n={self._n}, tau={self.tau}, energy~{float(self.energy):.6f}\"\n pbar.set_description(desc)\n\n def evolve(self, steps, tau=None, progbar=None):\n \"\"\"Evolve the state with the local Hamiltonian for ``steps`` steps with\n time step ``tau``.\n \"\"\"\n if tau is not None:\n self.tau = tau\n\n if progbar is None:\n progbar = self.progbar\n\n pbar = Progbar(total=steps, disable=self.progbar is not True)\n\n try:\n for i in range(steps):\n # anything required by both energy and sweep\n self.presweep(i)\n\n # possibly compute the energy\n should_compute_energy = (\n bool(self.compute_energy_every) and\n (i % self.compute_energy_every == 0))\n if should_compute_energy:\n self._check_energy()\n self._update_progbar(pbar)\n\n # actually perform the gates\n self.sweep(self.tau)\n self._n += 1\n pbar.update()\n\n if self.callback is not None:\n if self.callback(self):\n break\n\n # possibly compute the energy\n if self.compute_energy_final:\n self._check_energy()\n self._update_progbar(pbar)\n\n except KeyboardInterrupt:\n # allow the user to interupt early\n pass\n finally:\n pbar.close()\n\n @property\n def state(self):\n \"\"\"Return a copy of the current state.\n \"\"\"\n return self.get_state()\n\n @state.setter\n def state(self, psi):\n self.set_state(psi)\n\n @property\n def n(self):\n \"\"\"The number of sweeps performed.\n \"\"\"\n return self._n\n\n @property\n def D(self):\n \"\"\"The maximum bond dimension.\n \"\"\"\n return self.gate_opts['max_bond']\n\n @D.setter\n def D(self, value):\n \"\"\"The maximum bond dimension.\n \"\"\"\n self.gate_opts['max_bond'] = round(value)\n\n def _check_energy(self):\n \"\"\"Logic for maybe computing the energy if needed.\n \"\"\"\n if self.its and (self._n == self.its[-1]):\n # only compute if haven't already\n return self.energies[-1]\n\n if self.compute_energy_fn is not None:\n en = self.compute_energy_fn(self)\n else:\n en = self.compute_energy()\n\n if self.compute_energy_per_site:\n en = en / self.ham.nsites\n\n self.energies.append(float(en))\n self.taus.append(float(self.tau))\n self.its.append(self._n)\n\n if self.keep_best and en < self.best['energy']:\n self.best['energy'] = en\n self.best['state'] = self.state\n self.best['it'] = self._n\n\n return self.energies[-1]\n\n @property\n def energy(self):\n \"\"\"Return the energy of current state, computing it only if necessary.\n \"\"\"\n return self._check_energy()\n\n # ------- abstract methods that subclasses might want to override ------- #\n\n def get_state(self):\n \"\"\"The default method for retrieving the current state - simply a copy.\n Subclasses can override this to perform additional transformations.\n \"\"\"\n return self._psi.copy()\n\n def set_state(self, psi):\n \"\"\"The default method for setting the current state - simply a copy.\n Subclasses can override this to perform additional transformations.\n \"\"\"\n self._psi = psi.copy()\n\n def presweep(self, i):\n \"\"\"Perform any computations required before the sweep (and energy\n computation). For the basic TEBD this is nothing.\n \"\"\"\n pass\n\n def gate(self, U, where):\n \"\"\"Perform single gate ``U`` at coordinate pair ``where``. This is the\n the most common method to override.\n \"\"\"\n self._psi.gate_(U, where, **self.gate_opts)\n\n def compute_energy(self):\n \"\"\"Compute and return the energy of the current state. Subclasses can\n override this with a custom method to compute the energy.\n \"\"\"\n return self._psi.compute_local_expectation_simple(\n terms=self.ham.terms,\n **self.compute_energy_opts\n )\n\n def __repr__(self):\n s = \"<{}(n={}, tau={}, D={})>\"\n return s.format(\n self.__class__.__name__, self.n, self.tau, self.D)\n\n\nclass SimpleUpdateGen(TEBDGen):\n\n def gate(self, U, where):\n self._psi.gate_simple_(\n U, where, gauges=self.gauges, **self.gate_opts\n )\n\n def compute_energy(self):\n return self._psi.compute_local_expectation_simple(\n terms=self.ham.terms,\n gauges=self.gauges,\n **self.compute_energy_opts,\n )\n\n def get_state(self, absorb_gauges=True):\n psi = self._psi.copy()\n\n if absorb_gauges:\n psi.gauge_simple_insert(self.gauges)\n else:\n for ix, g in self.gauges.items():\n psi |= Tensor(g, inds=[ix])\n\n return psi\n\n def set_state(self, psi):\n self._psi = psi.copy()\n self.gauges = {}\n self._psi.gauge_all_simple_(gauges=self.gauges)\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.Line2D", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ViToSVK/dt-linear
[ "6de413a125bce73dc3b5d1df860f6353a9729fda" ]
[ "src/split/auc.py" ]
[ "# Area under the curve splitting criterion\n\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression #LogisticRegression\nfrom sklearn.metrics import accuracy_score, roc_auc_score\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import LinearSVC\nimport sys\nsys.path.insert(0, '../dt')\n\nfrom node_types import Predicate\n\n\nclass Split_auc:\n def __init__(self, use_svm=False):\n self.b_score = -1.\n self.b_pos = -1\n self.b_val = -1\n self.b_eq = None\n self.feature_mask = []\n self.use_svm = use_svm\n self.EPSILON = 0.00000001\n self.pos_uses = []\n\n\n def best(self, data, node):\n self.b_score = 0.\n self.b_pos = -1\n self.b_val = -1\n self.b_eq = None\n\n # Create a mask to only include features that\n # do not contain the same value in all samples\n self.feature_mask = []\n self.pos_uses = []\n for i, dom in enumerate(data.Xdomains):\n assert(len(dom) >= 1)\n self.feature_mask.append(len(dom) > 1)\n self.pos_uses.append(0)\n\n cur = node.parent\n while (cur is not None):\n assert(cur.is_predicate())\n assert(cur.predicate.fpos >= 0 and cur.predicate.fpos < len(self.pos_uses))\n self.pos_uses[cur.predicate.fpos] += 1\n cur = cur.parent\n\n # Compute for each predicate\n for i, dom in enumerate(data.Xdomains):\n assert(len(dom) > 0)\n if (len(dom) == 2):\n # {0,1} --> =1\n # {0,8} --> =8\n self.split_score(data, i, max(dom), True, dom)\n elif (len(dom) > 2):\n # {0,1,2} --> =0 =1 =2 (<1 IS =0; <2 IS =!2)\n # {0,3,4,6} --> =0 =3 =4 <4 (<3 IS =0; <6 IS =!6)\n for idx, val in enumerate(sorted(dom)):\n assert(idx == 0 or val != min(dom))\n also_ineq = (i not in data.Xineqforbidden and\n idx >= 2 and val != max(dom))\n if (also_ineq):\n self.split_score(data, i, val, False, dom)\n self.split_score(data, i, val, True, dom)\n\n # Done; return the best predicate\n assert(self.b_eq is not None)\n assert(self.b_pos >= 0)\n assert(self.b_pos < data.Xnames.size)\n\n numname = None\n if (data.Xnames[self.b_pos] == 'module'):\n assert(self.b_eq)\n assert(self.b_val in data.ModuleIDtoName)\n numname = data.ModuleIDtoName[self.b_val]\n elif (data.Xnames[self.b_pos] == 'action'):\n assert(self.b_eq)\n assert(self.b_val in data.ActionIDtoName)\n numname = data.ActionIDtoName[self.b_val]\n\n return Predicate(fname=data.Xnames[self.b_pos], fpos=self.b_pos,\n equality=self.b_eq, number=self.b_val, numberName=numname)\n\n\n def split_score(self, data, pos, value, equality, domain):\n pred = Predicate(fname='', fpos=pos, equality=equality, number=value)\n mask = (pred.evaluate_matrix(data.X))\n sat_Y = data.Y[mask]\n if (sat_Y.size == 0 or sat_Y.size == data.Y.size):\n return\n sat_X = data.X[mask]\n sat_X = sat_X[:,self.feature_mask] # Apply feature mask\n unsat_X = data.X[~mask]\n unsat_X = unsat_X[:,self.feature_mask] # Apply feature mask\n unsat_Y = data.Y[~mask]\n\n reg = LinearRegression()\n #reg = LogisticRegression(dual=False, tol=0.0001, C=100.0, solver='liblinear',\n #fit_intercept=True, random_state=31337)\n clf = None if not self.use_svm else \\\n LinearSVC(penalty='l1', tol=0.000001, C=10000.0,\n dual=False, fit_intercept=True, random_state=42)\n area = {'sat': 0., 'unsat': 0.}\n\n sides_done = set()\n sides_done_clean = 0\n for (X, Y, name) in [(sat_X, sat_Y, 'sat'), (unsat_X, unsat_Y, 'unsat')]:\n assert(X.shape[0] > 0)\n Ys_present = set()\n for y in Y:\n Ys_present.add(y)\n assert(len(Ys_present) > 0)\n if (len(Ys_present) == 1): # only one class present\n area[name] = 1.\n sides_done.add(name)\n sides_done_clean += 1\n elif (len(Ys_present) == 2):\n X_tr = StandardScaler().fit_transform(X)\n if (self.use_svm):\n # SVM classifier\n clf.fit(X_tr, Y)\n ac = accuracy_score(normalize=False, y_true=Y,\n y_pred=clf.predict(X_tr))\n if (ac == Y.size):\n area[name] = 1.\n sides_done.add(name)\n else:\n reg.fit(X_tr, Y)\n area[name] = roc_auc_score(y_true=Y, y_score=reg.predict(X_tr))\n else:\n # Linear regressor\n reg.fit(X_tr, Y)\n area[name] = roc_auc_score(y_true=Y, y_score=reg.predict(X_tr))\n assert(area[name] < 1. + self.EPSILON)\n\n assert(len(sides_done) <= 2)\n if (len(sides_done) == 2):\n # Making sure solving split wins\n assert(sides_done_clean <= 2)\n if (sides_done_clean == 2):\n # Two clean partitions - best\n area['sat'], area['unsat'] = 4., 4.\n elif (sides_done_clean == 1):\n # One clean one LC partition - second best\n area['sat'], area['unsat'] = 3., 3.\n else:\n # Two LC partitions - third best\n area['sat'], area['unsat'] = 2., 2.\n else:\n # Punishment of obviously unfavourable split\n if (pos not in data.Xineqforbidden):\n assert(pos >= 0 and pos < len(self.pos_uses))\n area['sat'] /= float(1 + self.pos_uses[pos])\n area['unsat'] /= float(1 + self.pos_uses[pos])\n #\n if (equality and (pos not in data.Xineqforbidden) and\n value != min(domain) and value != max(domain)):\n area['sat'] /= 2\n area['unsat'] /= 2\n #\n if (len(domain) > 2):\n sat_dom = 0\n for domv in domain:\n if (equality):\n if (domv == value):\n sat_dom += 1\n else:\n if (domv < value):\n sat_dom += 1\n unsat_dom = len(domain) - sat_dom\n assert(sat_dom >= 1 and unsat_dom >= 1)\n assert((not equality) or sat_dom == 1)\n for name, sz, dm in [('sat', sat_Y.size, sat_dom),\n ('unsat', unsat_Y.size, unsat_dom)]:\n if (dm > 1 and (name not in sides_done) and\n sz / float(data.Y.size) > 0.99):\n area['sat'] /= float(dm)\n area['unsat'] /= float(dm)\n\n if (area['sat'] + area['unsat'] > self.b_score + self.EPSILON):\n self.b_score = area['sat'] + area['unsat']\n self.b_pos = pos\n self.b_val = value\n self.b_eq = equality\n\n" ]
[ [ "sklearn.preprocessing.StandardScaler", "sklearn.linear_model.LinearRegression", "sklearn.svm.LinearSVC" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KrisNguyen135/Advanced-Python-Programming-Second-Edition
[ "e5d473e3efc5f6590028cb3f318e1f4aeb0aadd1" ]
[ "Chapter08/test_tensorflow_matmul.py" ]
[ "import tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nimport time\nimport numpy as np\nN = 5000\n\nA_data = np.random.rand(N, N)\nB_data = np.random.rand(N, N)\n\n# Creates a graph.\n\nwith tf.device('/gpu:0'):\n A = tf.placeholder('float32')\n B = tf.placeholder('float32')\n\n C = tf.matmul(A, B)\n\nwith tf.Session() as sess:\n start = time.time()\n sess.run(C, {A: A_data, B: B_data})\n print('Matrix multiply ({}) took: {}'.format(N, time.time() - start))\n" ]
[ [ "tensorflow.compat.v1.device", "tensorflow.compat.v1.disable_v2_behavior", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.matmul", "tensorflow.compat.v1.placeholder", "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
elicharlese/pyextremes
[ "efcf342df0116e68b88cd1ad0c0f0dd07f9e30ae" ]
[ "tests/extremes/test_return_periods.py" ]
[ "import numpy as np\nimport pytest\n\nfrom pyextremes.extremes import get_return_periods\n\n\ndef test_get_return_periods_errors(battery_wl_preprocessed, extremes_bm_high):\n # Test bad block_size type\n with pytest.raises(TypeError, match=r\"invalid type.*block_size.*argument\"):\n get_return_periods(\n ts=battery_wl_preprocessed,\n extremes=extremes_bm_high,\n extremes_method=\"BM\",\n extremes_type=\"high\",\n block_size=1,\n return_period_size=\"365.2425D\",\n plotting_position=\"weibull\",\n )\n\n # Test block_size for POT\n with pytest.raises(ValueError, match=r\"block_size.*only if 'extremes_method'\"):\n get_return_periods(\n ts=battery_wl_preprocessed,\n extremes=extremes_bm_high,\n extremes_method=\"POT\",\n extremes_type=\"high\",\n block_size=\"365.2425D\",\n return_period_size=\"365.2425D\",\n plotting_position=\"weibull\",\n )\n\n # Test bad return_period_size type\n with pytest.raises(TypeError, match=r\"invalid type.*return_period_size\"):\n get_return_periods(\n ts=battery_wl_preprocessed,\n extremes=extremes_bm_high,\n extremes_method=\"BM\",\n extremes_type=\"high\",\n block_size=\"365.2425D\",\n return_period_size=1,\n plotting_position=\"weibull\",\n )\n\n # Test bad extremes_method\n with pytest.raises(ValueError, match=r\"invalid value.*extremes_method\"):\n get_return_periods(\n ts=battery_wl_preprocessed,\n extremes=extremes_bm_high,\n extremes_method=\"BAD EXTREMES_METHOD VALUE\",\n extremes_type=\"high\",\n block_size=None,\n return_period_size=\"365.2425D\",\n plotting_position=\"weibull\",\n )\n\n # Test bad extremes_type\n with pytest.raises(ValueError, match=r\"invalid value.*extremes_type\"):\n get_return_periods(\n ts=battery_wl_preprocessed,\n extremes=extremes_bm_high,\n extremes_method=\"BM\",\n extremes_type=\"BAD EXTREMES_TYPE VALUE\",\n block_size=\"365.2425D\",\n return_period_size=\"365.2425D\",\n plotting_position=\"weibull\",\n )\n\n # Test bad plotting_position\n with pytest.raises(ValueError, match=r\"invalid value.*plotting_position\"):\n get_return_periods(\n ts=battery_wl_preprocessed,\n extremes=extremes_bm_high,\n extremes_method=\"BM\",\n extremes_type=\"high\",\n block_size=\"365.2425D\",\n return_period_size=\"365.2425D\",\n plotting_position=\"BAD PLOTTING_POSITION VALUE\",\n )\n\n # Test automatic block_size type\n return_periods_automatic = get_return_periods(\n ts=battery_wl_preprocessed,\n extremes=extremes_bm_high,\n extremes_method=\"BM\",\n extremes_type=\"high\",\n block_size=None,\n return_period_size=\"365.2425D\",\n plotting_position=\"weibull\",\n )\n return_periods = get_return_periods(\n ts=battery_wl_preprocessed,\n extremes=extremes_bm_high,\n extremes_method=\"BM\",\n extremes_type=\"high\",\n block_size=\"365.2425D\",\n return_period_size=\"365.2425D\",\n plotting_position=\"weibull\",\n )\n assert (\n np.abs(\n return_periods_automatic.loc[:, \"return period\"].values.max()\n - return_periods.loc[:, \"return period\"].values.max()\n )\n <= 1\n )\n\n\[email protected](\"extremes_type\", [\"high\", \"low\"])\ndef test_extremes_method_bm(\n battery_wl_preprocessed, extremes_bm_high, extremes_bm_low, extremes_type\n):\n if extremes_type == \"high\":\n extremes = extremes_bm_high\n else:\n extremes = extremes_bm_low\n\n for plotting_position in [\n \"ecdf\",\n \"hazen\",\n \"weibull\",\n \"tukey\",\n \"blom\",\n \"median\",\n \"cunnane\",\n \"gringorten\",\n \"beard\",\n ]:\n return_periods = get_return_periods(\n ts=battery_wl_preprocessed,\n extremes=extremes,\n extremes_method=\"BM\",\n extremes_type=extremes_type,\n block_size=\"365.2425D\",\n return_period_size=\"365.2425D\",\n plotting_position=plotting_position,\n )\n if extremes_type == \"high\":\n assert np.argmax(return_periods.loc[:, extremes.name].values) == np.argmax(\n return_periods.loc[:, \"return period\"].values\n )\n else:\n assert np.argmin(return_periods.loc[:, extremes.name].values) == np.argmax(\n return_periods.loc[:, \"return period\"].values\n )\n\n\[email protected](\"extremes_type\", [\"high\", \"low\"])\ndef test_extremes_method_pot(\n battery_wl_preprocessed, extremes_pot_high, extremes_pot_low, extremes_type\n):\n if extremes_type == \"high\":\n extremes = extremes_pot_high\n else:\n extremes = extremes_pot_low\n\n for plotting_position in [\n \"ecdf\",\n \"hazen\",\n \"weibull\",\n \"tukey\",\n \"blom\",\n \"median\",\n \"cunnane\",\n \"gringorten\",\n \"beard\",\n ]:\n return_periods = get_return_periods(\n ts=battery_wl_preprocessed,\n extremes=extremes,\n extremes_method=\"POT\",\n extremes_type=extremes_type,\n block_size=None,\n return_period_size=\"365.2425D\",\n plotting_position=plotting_position,\n )\n if extremes_type == \"high\":\n assert np.argmax(return_periods.loc[:, extremes.name].values) == np.argmax(\n return_periods.loc[:, \"return period\"].values\n )\n else:\n assert np.argmin(return_periods.loc[:, extremes.name].values) == np.argmax(\n return_periods.loc[:, \"return period\"].values\n )\n" ]
[ [ "numpy.argmax", "numpy.argmin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lordk911/incubator-linkis
[ "f27699aa6be01d7ee2ae3e544c8fefcef2830b0a" ]
[ "linkis-engineconn-plugins/engineconn-plugins/spark/src/main/resources/python/mix_pyspark.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport sys, getopt, traceback, json, re\nimport os\nos.environ['PYSPARK_ALLOW_INSECURE_GATEWAY']='1'\nimport matplotlib\nmatplotlib.use('Agg')\nzipPaths = sys.argv[4]\npaths = zipPaths.split(':')\nfor i in range(len(paths)):\n sys.path.insert(0, paths[i])\n\nfrom py4j.protocol import Py4JJavaError, Py4JNetworkError\nfrom py4j.java_gateway import java_import, JavaGateway, GatewayClient, GatewayParameters\nfrom pyspark.conf import SparkConf\nfrom pyspark.context import SparkContext\nfrom pyspark.sql.session import SparkSession\nfrom pyspark.rdd import RDD\nfrom pyspark.files import SparkFiles\nfrom pyspark.storagelevel import StorageLevel\nfrom pyspark.accumulators import Accumulator, AccumulatorParam\nfrom pyspark.broadcast import Broadcast\nfrom pyspark.serializers import MarshalSerializer, PickleSerializer\nimport base64\nfrom io import BytesIO\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\n# for back compatibility\nfrom pyspark.sql import SQLContext, HiveContext, Row\n\nclass Logger(object):\n def __init__(self):\n self.out = \"\"\n\n def write(self, message):\n intp.appendOutput(message)\n\n def reset(self):\n self.out = \"\"\n\n def flush(self):\n pass\n\nclass ErrorLogger(object):\n def __init__(self):\n self.out = \"\"\n\n def write(self, message):\n intp.appendErrorOutput(message)\n\n def reset(self):\n self.out = \"\"\n\n def flush(self):\n pass\n\nclass SparkVersion(object):\n SPARK_1_4_0 = 140\n SPARK_1_3_0 = 130\n\n def __init__(self, versionNumber):\n self.version = versionNumber\n\n def isAutoConvertEnabled(self):\n return self.version >= self.SPARK_1_4_0\n\n def isImportAllPackageUnderSparkSql(self):\n return self.version >= self.SPARK_1_3_0\n\noutput = Logger()\nerrorOutput = ErrorLogger()\nsys.stdout = output\nsys.stderr = errorOutput\n\ntry:\n client = GatewayClient(port=int(sys.argv[1]),\n gateway_parameters=GatewayParameters(port = int(sys.argv[1]), auto_convert = True, auth_token = sys.argv[3]))\nexcept:\n client = GatewayClient(port=int(sys.argv[1]))\n\nsparkVersion = SparkVersion(int(sys.argv[2]))\n\nif sparkVersion.isAutoConvertEnabled():\n try:\n gateway = JavaGateway(client, auto_field = True, auto_convert = True,\n gateway_parameters=GatewayParameters(port = int(sys.argv[1]), auto_convert = True, auth_token = sys.argv[3]))\n except:\n gateway = JavaGateway(client, auto_convert = True)\nelse:\n gateway = JavaGateway(client)\n\njava_import(gateway.jvm, \"org.apache.spark.SparkEnv\")\njava_import(gateway.jvm, \"org.apache.spark.SparkConf\")\njava_import(gateway.jvm, \"org.apache.spark.api.java.*\")\njava_import(gateway.jvm, \"org.apache.spark.api.python.*\")\njava_import(gateway.jvm, \"org.apache.spark.mllib.api.python.*\")\n\nintp = gateway.entry_point\n\nif sparkVersion.isImportAllPackageUnderSparkSql():\n java_import(gateway.jvm, \"org.apache.spark.sql.*\")\n java_import(gateway.jvm, \"org.apache.spark.sql.hive.*\")\nelse:\n java_import(gateway.jvm, \"org.apache.spark.sql.SQLContext\")\n java_import(gateway.jvm, \"org.apache.spark.sql.hive.HiveContext\")\n java_import(gateway.jvm, \"org.apache.spark.sql.hive.LocalHiveContext\")\n java_import(gateway.jvm, \"org.apache.spark.sql.hive.TestHiveContext\")\n\njobGroup = \"\"\n\ndef show(obj):\n from pyspark.sql import DataFrame\n if isinstance(obj, DataFrame):\n # print(intp.showDF(jobGroup, obj._jdf))\n intp.showDF(jobGroup, obj._jdf)\n else:\n print((str(obj)))\ndef printlog(obj):\n try:\n intp.printLog(obj)\n except Exception as e:\n print(\"send log failed\")\n\n\ndef showAlias(obj,alias):\n from pyspark.sql import DataFrame\n if isinstance(obj, DataFrame):\n # print(intp.showDF(jobGroup, obj._jdf))\n intp.showAliasDF(jobGroup, obj._jdf,alias)\n else:\n print((str(obj)))\n\ndef show_matplotlib(p, fmt=\"png\", width=\"auto\", height=\"auto\", **kwargs):\n \"\"\"Matplotlib show function\n \"\"\"\n if fmt == \"png\":\n img = BytesIO()\n p.savefig(img, format=fmt)\n img_str = b\"data:image/png;base64,\"\n img_str += base64.b64encode(img.getvalue().strip())\n img_tag = \"<img src={img} style='width={width};height:{height}'>\"\n # Decoding is necessary for Python 3 compability\n img_str = img_str.decode(\"utf-8\")\n img_str = img_tag.format(img=img_str, width=width, height=height)\n elif fmt == \"svg\":\n img = StringIO()\n p.savefig(img, format=fmt)\n img_str = img.getvalue()\n else:\n raise ValueError(\"fmt must be 'png' or 'svg'\")\n\n html = \"<div style='width:{width};height:{height}'>{img}<div>\"\n intp.showHTML(jobGroup,html.format(width=width, height=height, img=img_str))\n img.close()\n\n\ndef saveDFToCsv(df, path, hasheader=True,isOverwrite=False,option={}):\n from pyspark.sql import DataFrame\n from py4j.java_collections import MapConverter\n if isinstance(df, DataFrame):\n intp.saveDFToCsv(df._jdf, path, hasheader, isOverwrite, MapConverter().convert(option,gateway._gateway_client))\n else:\n print(str(df))\n\njava_import(gateway.jvm, \"scala.Tuple2\")\n\njsc = intp.getJavaSparkContext()\njconf = intp.getSparkConf()\nconf = SparkConf(_jvm = gateway.jvm, _jconf = jconf)\nsc = SparkContext(jsc=jsc, gateway=gateway, conf=conf)\nsqlc = HiveContext(sc, intp.sqlContext())\nsqlContext = sqlc\nspark = SparkSession(sc, intp.getSparkSession())\n\n##add pyfiles\ntry:\n pyfile = sys.argv[5]\n pyfiles = pyfile.split(',')\n for i in range(len(pyfiles)):\n if \"\"!=pyfiles[i]:\n sc.addPyFile(pyfiles[i])\nexcept Exception as e:\n print(\"add pyfile error: \" + pyfile)\n\nclass UDF(object):\n def __init__(self, intp, sqlc):\n self.intp = intp\n self.sqlc = sqlc\n def register(self, udfName, udf):\n self.sqlc.registerFunction(udfName, udf)\n def listUDFs(self):\n self.intp.listUDFs()\n def existsUDF(self, name):\n self.intp.existsUDF(name)\nudf = UDF(intp, sqlc)\nintp.onPythonScriptInitialized(os.getpid())\n\nwhile True :\n req = intp.getStatements()\n try:\n stmts = req.statements().split(\"\\n\")\n jobGroup = req.jobGroup()\n final_code = None\n\n for bdp_dwc_s in stmts:\n if bdp_dwc_s == None:\n continue\n\n # skip comment\n s_stripped = bdp_dwc_s.strip()\n if len(s_stripped) == 0 or s_stripped.startswith(\"#\"):\n continue\n\n if final_code:\n final_code += \"\\n\" + bdp_dwc_s\n else:\n final_code = bdp_dwc_s\n\n if final_code:\n compiledCode = compile(final_code, \"<string>\", \"exec\")\n sc.setJobGroup(jobGroup, final_code)\n eval(compiledCode)\n\n intp.setStatementsFinished(\"\", False)\n except Py4JJavaError:\n excInnerError = traceback.format_exc() # format_tb() does not return the inner exception\n innerErrorStart = excInnerError.find(\"Py4JJavaError:\")\n if innerErrorStart > -1:\n excInnerError = excInnerError[innerErrorStart:]\n intp.setStatementsFinished(excInnerError + str(sys.exc_info()), True)\n except Py4JNetworkError:\n # lost connection from gateway server. exit\n intp.setStatementsFinished(msg, True)\n sys.exit(1)\n except:\n msg = traceback.format_exc()\n intp.setStatementsFinished(msg, True)\n\n output.reset()" ]
[ [ "matplotlib.use" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]