repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
Zerwer/Chemistry | [
"7f8f9618553bbad458b193e030e6c18e981c7d66"
] | [
"graphs/acid_similarity.py"
] | [
"# Graph to visualize predicted versus actual acid pKa for similarity model\n# Splits the same way as the model training\nimport matplotlib.pyplot as plt\nfrom rdkit import Chem\nfrom rdkit.Chem import rdMolDescriptors\nfrom chemical_models import AcidSimilarity\nfrom sklearn.model_selection import train_test_split\n\n# Load model\nacid_model = AcidSimilarity('acid_sim')\n\nacid_data = open('data/pKa/formatted_acidic.txt', 'r')\nacids = []\n\n# Read file to gather reference acids\nfor line in acid_data.readlines():\n split = line.split(' ')\n mol = Chem.MolFromSmiles(split[0])\n fingerprint = rdMolDescriptors.GetHashedAtomPairFingerprintAsBitVect(mol)\n acids.append([split[0], float(split[1][:-1]), fingerprint])\n\n# Split data into reference set that will be used to get similarity and\n# test set which will be used to train and validate the model\nreference, test = train_test_split(acids, test_size=0.5, random_state=1)\n\nX = []\ny = []\n\n# Set x to predicted values and y to actual\nfor acid in test:\n X.append(acid_model.run(acid[0], acids))\n y.append(acid[1])\n\n# Split data into training and test set\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,\n random_state=1)\n\n# Plot the data used to train the model as red and validation data as blue\n# If overfit the red data will fit the line significantly more frequently\n# than blue\nplt.scatter(X_train, y_train, s=1, color='red')\nplt.scatter(X_test, y_test, s=1, color='blue')\nplt.show()\n"
] | [
[
"matplotlib.pyplot.show",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.scatter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dineshkumar4u/monitor-ml-models-efficiently-across-fleets-of-devices-aim325 | [
"ad330f3d1c37081284ffa979f4fb99630903688a"
] | [
"lab/app/simulator.py"
] | [
"# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: MIT-0\nimport time\nimport pandas as pd\nimport subprocess\nimport ipywidgets as widgets\nimport logging\nimport numpy as np\nimport os\nimport signal\nfrom turbine import WindTurbine\n\nclass WindTurbineFarmSimulator(object):\n def __init__(self, n_turbines=5):\n self.n_turbines = n_turbines\n \n # read the raw data. This data was captured from real sensors installed in the mini Wind Turbine\n self.raw_data = pd.read_csv('data/dataset_wind.csv.gz', compression=\"gzip\", sep=',', low_memory=False).values\n \n # now create the virtual wind turbines\n self.turbines = [WindTurbine(i, self.raw_data) for i in range(n_turbines)]\n self.data_buffer = [[] for i in range(n_turbines)]\n self.running = False\n self.agents = None\n self.halted = False \n \n self.feature_ids = np.array([8,9,10,7, 22, 5, 6]) # qX,qy,qz,qw ,wind_seed_rps, rps, voltage \n self.feature_names = np.array(['qx', 'qy', 'qz', 'qw', 'wind speed rps', 'rps', 'voltage'])\n self.colors = np.array([ 'r', 'g', 'y', 'b', 'r', 'g', 'b'])\n \n self.max_buffer_size = 500\n for idx in range(n_turbines):\n for j in range(self.max_buffer_size):\n self.__read_next_turbine_sample__(idx)\n\n self.dashboard = widgets.Textarea(value='\\n' * self.n_turbines, disabled=True,\n layout={'border': '1px solid black', 'width': '850px', 'height': '90px'})\n\n def __del__(self):\n self.halt()\n \n def __launch_agent__(self, agent_id):\n \"\"\"\n Launches Linux processes for each Edge Agent. \n They will run in background and listen to a unix socket\n \"\"\"\n # remove channel\n subprocess.Popen([\"rm\", \"-f\", \"/tmp/agent%d\" % agent_id])\n # launch main process\n cmd = \"./agent/bin/sagemaker_edge_agent_binary -c agent/conf/config_edge_device_%d.json -a /tmp/agent%d\" % (agent_id, agent_id)\n logs = open(\"agent/logs/agent%d.log\" % agent_id, \"+w\")\n # we need to return the process in order to terminate it later\n return subprocess.Popen(cmd.split(' '), stdout=logs)\n\n def __prep_turbine_sample__(self, turbine_id, data):\n vib_noise,rot_noise,vol_noise = self.is_noise_enabled(turbine_id)\n #np.array([8,9,10,7, 22, 5, 6]) # qX,qy,qz,qw ,wind_seed_rps, rps, voltage \n if vib_noise: data[self.feature_ids[0:4]] = np.random.rand(4) * 100 # out of the radians range\n if rot_noise: data[self.feature_ids[5]] = np.random.rand(1) * 100 # out of the normalized wind range\n if vol_noise: data[self.feature_ids[6]] = int(np.random.rand(1)[0] * 10000) # out of the normalized voltage range\n\n self.data_buffer[turbine_id].append(data)\n if len(self.data_buffer[turbine_id]) > self.max_buffer_size:\n del self.data_buffer[turbine_id][0]\n \n def __read_next_turbine_sample__(self, turbine_id):\n self.__prep_turbine_sample__(turbine_id, self.turbines[turbine_id].read_next_sample() )\n \n def is_turbine_running(self, turbine_id):\n return self.turbines[turbine_id].is_running()\n \n def show(self): \n return widgets.VBox([\n widgets.HBox([t.show() for t in self.turbines]),\n self.dashboard\n ])\n\n def update_dashboard(self, turbine_id, data):\n if not self.turbines[turbine_id].is_running(): return\n lines = self.dashboard.value.split('\\n') \n features = np.mean(data[-50:,self.feature_ids], axis=0)\n tokens = [\"%s: %0.3f\" % (self.feature_names[i], features[i]) for i in range(len(features))]\n lines[turbine_id] = ' '.join([\"Turbine: %d\" % turbine_id] + tokens) \n self.dashboard.value = '\\n'.join(lines)\n\n def start(self):\n \"\"\"\n Run the main application by creating the Edge Agents, loading the model and\n kicking-off the anomaly detector program\n \"\"\"\n if not self.running and not self.halted:\n self.running = True\n logging.info(\"Launching Edge Manager Agents...\")\n self.agents = [self.__launch_agent__(i) for i in range(self.n_turbines)]\n logging.info(\"Agents launched! (waiting 5 secs)\")\n time.sleep(5) # give some time for the agents to launch\n \n def halt(self):\n if self.running:\n self.running = False\n self.halted = True\n # halt all the turbines\n for i in self.turbines: i.halt() \n # kill the agents\n for i in self.agents: \n #os.kill(i.pid, signal.SIGINT)\n i.kill()\n i.wait()\n\n def get_num_turbines(self):\n return self.n_turbines\n \n def get_raw_data(self, turbine_id): \n assert(turbine_id >= 0 and turbine_id < len(self.data_buffer))\n self.__read_next_turbine_sample__(turbine_id)\n return self.data_buffer[turbine_id]\n \n def detected_anomalies(self, turbine_id, values, anomalies):\n assert(turbine_id >= 0 and turbine_id < len(self.data_buffer))\n self.turbines[turbine_id].detected_anomalies(values, anomalies)\n\n def update_label(self, turbine_id, value ):\n self.turbines[turbine_id].update_label(value)\n\n def is_noise_enabled(self, turbine_id):\n return [self.turbines[turbine_id].is_noise_enabled('Vib'),\n self.turbines[turbine_id].is_noise_enabled('Rot'),\n self.turbines[turbine_id].is_noise_enabled('Vol')]\n\n"
] | [
[
"numpy.random.rand",
"numpy.array",
"numpy.mean",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
abiswas3/NAS | [
"e6da66f84e305e603d21df331f90b354b0b353bc"
] | [
"reinforce.py"
] | [
"import tensorflow as tf\nimport random\nimport numpy as np\n\nclass Reinforce():\n def __init__(self,\n sess,\n optimizer,\n policy_network,\n max_layers,\n global_step,\n division_rate= 100.0,\n reg_param=0.001,\n discount_factor=0.99,\n exploration=0.3):\n\n '''\n Notation:\n policy network : used describe model that predicts hyperparameters\n learned network : learned network with hyper params as recommended\n\n Args:\n sess: tensorflow session\n optimizer : type of optimization algorithm used for minimization\n policy network : final tensorflow output state of the policy network\n max_layers: the maximum number of layers for the learned neural network\n global_step : number of cycles of learning of policy network (i,e gradient updates)\n reg_param : lambda for l2 regularizaion of loss of policy network\n discoun_factor : as stated\n exploration : not used for anything right now (but meant for random exploration)\n '''\n \n self.sess = sess\n self.optimizer = optimizer\n self.policy_network = policy_network \n self.division_rate = division_rate\n self.reg_param = reg_param\n self.discount_factor=discount_factor\n self.max_layers = max_layers\n self.global_step = global_step\n\n self.reward_buffer = []\n self.state_buffer = []\n\n self.create_variables()\n var_lists = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n self.sess.run(tf.variables_initializer(var_lists))\n\n def get_action(self, state):\n '''Given the state of the neural network (Rewards so far are stored\n interanally as member variables) get new state.\n '''\n return self.sess.run(self.predicted_action, {self.states: state})\n\n def create_variables(self):\n\n with tf.name_scope(\"model_inputs\"):\n # raw state representation\n self.states = tf.placeholder(tf.float32, [None, self.max_layers*4], name=\"states\")\n\n with tf.name_scope(\"predict_actions\"):\n \n # initialize policy network\n with tf.variable_scope(\"policy_network\"):\n\n # In this case this is just the final state of the RNN\n self.policy_outputs = self.policy_network(self.states,\n self.max_layers)\n\n # Identity is used to remember the last policy_output how\n # tf.identity works isn't completely clear to me but for\n # now I'll trust that this works: it's basically deep copy\n self.action_scores = tf.identity(self.policy_outputs,\n name=\"action_scores\")\n\n # Scale them and cast them into int:\n # Note this doesn't depend on the reward\n # All that matters is the hidden weights of my policy controller\n # The reward is used to update those weights\n self.predicted_action = tf.cast(tf.scalar_mul(self.division_rate, self.action_scores),\n tf.int32,\n name=\"predicted_action\")\n\n\n # regularization loss\n policy_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\"policy_network\")\n\n # compute loss and gradients\n with tf.name_scope(\"compute_gradients\"):\n # gradients for selecting action from policy network\n self.discounted_rewards = tf.placeholder(tf.float32, (None,), name=\"discounted_rewards\")\n\n with tf.variable_scope(\"policy_network\", reuse=True):\n self.logprobs = self.policy_network(self.states,\n self.max_layers)\n \n print(\"self.logprobs\", self.logprobs)\n\n # compute policy loss and regularization loss\n self.cross_entropy_loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logprobs[:, -1, :],\n labels=self.states)\n \n self.pg_loss = tf.reduce_mean(self.cross_entropy_loss)\n self.reg_loss = tf.reduce_sum([tf.reduce_sum(tf.square(x)) for x in policy_network_variables]) # L2 by the look of itRegularization\n self.loss = self.pg_loss + self.reg_param * self.reg_loss\n\n #compute gradients\n self.gradients = self.optimizer.compute_gradients(self.loss)\n \n # compute policy gradients\n for i, (grad, var) in enumerate(self.gradients):\n if grad is not None:\n self.gradients[i] = (grad * self.discounted_rewards, var)\n\n # training update\n with tf.name_scope(\"train_policy_network\"):\n # apply gradients to update policy network\n self.train_op = self.optimizer.apply_gradients(self.gradients,\n global_step=self.global_step)\n\n def storeRollout(self, state, reward):\n '''Caching for the win: for long running programs this is a shite\n solution\n '''\n self.reward_buffer.append(reward)\n self.state_buffer.append(state[0])\n\n \n def train_step(self, steps_count):\n '''\n This is where policy gradientx happens \n but to understand this also understand create_variable function\n \n steps_count: how many previous states to consider\n '''\n\n # take the last steps_count number of states\n states = np.array(self.state_buffer[-steps_count:])/self.division_rate\n\n # rewards are never discounted\n rewars = self.reward_buffer[-steps_count:]\n \n _, ls = self.sess.run([self.train_op, self.loss],\n {self.states: states,\n self.discounted_rewards: rewars})\n \n return ls\n"
] | [
[
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.scalar_mul",
"tensorflow.reduce_mean",
"tensorflow.get_collection",
"tensorflow.identity",
"tensorflow.variables_initializer",
"tensorflow.placeholder",
"tensorflow.name_scope",
"tensorflow.square",
"tensorflow.variable_scope",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
bristy1588/EvadeML-Zoo | [
"7d4acf19cc9e8a213f7541448b8638096d507ad8"
] | [
"datasets/imagenet.py"
] | [
"import sys, os\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nimport numpy as np\nimport os\n# from multiprocessing import Pool\nfrom keras.preprocessing import image\n\nfrom models.keras_models import keras_resnet50_imagenet_model\nfrom models.keras_models import keras_vgg19_imagenet_model\nfrom models.keras_models import keras_inceptionv3_imagenet_model\nfrom models.mobilenets_model import mobilenet_imagenet_model\n\n# pool = Pool()\n\ndef load_single_image(img_path, img_size=224):\n size = (img_size,img_size)\n img = image.load_img(img_path, target_size=size)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n # Embeded preprocessing in the model.\n # x = preprocess_input(x)\n return x\n\n\ndef _load_single_image(args):\n img_path, img_size = args\n return load_single_image(img_path, img_size)\n\n\ndef data_imagenet(img_folder, img_size, label_style = 'caffe', label_size = 1000, selected_idx = None):\n fnames = os.listdir(img_folder)\n fnames = sorted(fnames, key = lambda x: int(x.split('.')[1]))\n \n if isinstance(selected_idx, list):\n selected_fnames = [fnames[i] for i in selected_idx]\n elif isinstance(selected_idx, int):\n selected_fnames = fnames[:selected_idx]\n else:\n selected_fnames = fnames\n\n labels = map(lambda x: int(x.split('.')[0]), selected_fnames)\n img_path_list = map(lambda x: [os.path.join(img_folder, x), img_size], selected_fnames)\n X = map(_load_single_image, img_path_list)\n X = np.concatenate(X, axis=0)\n Y = np.eye(1000)[labels]\n return X, Y\n\n\nclass ImageNetDataset:\n def __init__(self):\n self.dataset_name = \"ImageNet\"\n #self.image_size = 224\n self.num_channels = 3\n self.num_classes = 1000\n self.img_folder = \"/home/bristy/ILSVRC2012_img_val_labeled_caffe\"\n\n if not os.path.isdir:\n raise Exception(\"Please prepare the ImageNet dataset first: EvadeML-Zoo/datasets/imagenet_dataset/label_as_filename.py.\")\n\n def get_test_dataset(self, img_size=224, num_images=100):\n self.image_size = img_size\n X, Y = data_imagenet(self.img_folder, self.image_size, selected_idx=num_images)\n X /= 255\n return X, Y\n\n def get_test_data(self, img_size, idx_begin, idx_end):\n # Return part of the dataset.\n self.image_size = img_size\n X, Y = data_imagenet(self.img_folder, self.image_size, selected_idx=range(idx_begin, idx_end))\n X /= 255\n return X, Y\n\n def load_model_by_name(self, model_name, logits=False, input_range_type=1, input_tensor=None, pre_filter=lambda x:x):\n \"\"\"\n :params logits: no softmax layer if True.\n :params scaling: expect [-0.5,0.5] input range if True, otherwise [0, 1]\n \"\"\"\n if model_name == 'resnet50':\n model = keras_resnet50_imagenet_model(logits=logits, input_range_type=input_range_type)\n elif model_name == 'vgg19':\n model = keras_vgg19_imagenet_model(logits=logits, input_range_type=input_range_type)\n elif model_name == 'inceptionv3':\n model = keras_inceptionv3_imagenet_model(logits=logits, input_range_type=input_range_type)\n elif model_name == 'mobilenet':\n model = mobilenet_imagenet_model(logits=logits, input_range_type=input_range_type, pre_filter=pre_filter)\n else:\n raise Exception(\"Unsupported model: [%s]\" % model_name)\n\n return model\n\nif __name__ == '__main__':\n # label_style = 'caffe'\n # # img_folder = \"/mnt/nfs/taichi/imagenet_data/data_val_labeled_%s\" % label_style\n # img_folder = \"/tmp/ILSVRC2012_img_val_labeled_caffe\"\n # X, Y = data_imagenet(img_folder, selected_idx=10)\n # print (X.shape)\n # print (np.argmax(Y, axis=1))\n\n dataset = ImageNetDataset()\n\n X, Y = dataset.get_test_dataset()\n model = dataset.load_model_by_name('ResNet50')\n\n\n \n\n"
] | [
[
"numpy.concatenate",
"numpy.eye",
"numpy.expand_dims"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
igor-93/ml_utils | [
"898dbeec1ebd54443a83d95eadfb37fb1db2943c"
] | [
"common/sparse_mat.py"
] | [
"import numpy as np\nfrom scipy.sparse import isspmatrix_coo, coo_matrix\n\n\ndef drop_data(mat, threshold):\n \"\"\"Removes common from the matrix that is smaller then threshold.\n\n Parameters\n ----------\n mat : coo_matrix\n matrix\n threshold : float\n value below which we want to drop common\n\n Returns\n -------\n mat : coo\n matrix in the same format\n\n \"\"\"\n if not isspmatrix_coo(mat):\n raise ValueError('Given matrix is not in COO format')\n\n mat.data[mat.data < threshold] = 0\n mat.eliminate_zeros()\n return mat\n\n\ndef drop_cols(mat, idx_to_drop):\n \"\"\"Removes columns from the matrix.\n\n Parameters\n ----------\n mat : coo_matrix\n matrix\n idx_to_drop : array-like\n indices of columns we want to drop\n\n Returns\n -------\n mat: coo_matrix\n matrix without dropped columns\n \"\"\"\n if not isspmatrix_coo(mat):\n raise ValueError('Given matrix is not in COO format')\n if np.max(idx_to_drop) >= mat.shape[1]:\n raise ValueError('Column indices are bigger then shape of the matrix.')\n\n idx_to_drop = np.unique(idx_to_drop)\n keep = ~np.in1d(mat.col, idx_to_drop)\n mat.data, mat.row, mat.col = mat.data[keep], mat.row[keep], mat.col[keep]\n mat.col -= idx_to_drop.searchsorted(mat.col) # decrement column indices\n mat._shape = (mat.shape[0], mat.shape[1] - len(idx_to_drop))\n return coo_matrix(mat)\n\n\ndef drop_rows(mat, idx_to_drop):\n \"\"\"Removes rows from the matrix.\n\n Parameters\n ----------\n mat : coo_matrix\n matrix\n idx_to_drop : array-like\n indices of rows we want to drop\n Returns\n -------\n mat: coo_matrix\n matrix without dropped rows\n \"\"\"\n if not isspmatrix_coo(mat):\n raise ValueError('Given matrix is not in COO format')\n if not np.max(idx_to_drop) < mat.shape[0]:\n raise ValueError('Row indices are bigger then shape of the matrix.')\n\n idx_to_drop = np.unique(idx_to_drop)\n keep = ~np.in1d(mat.row, idx_to_drop)\n mat.data, mat.row, mat.col = mat.data[keep], mat.row[keep], mat.col[keep]\n mat.row -= idx_to_drop.searchsorted(mat.row) # decrement row indices\n mat._shape = (mat.shape[0] - len(idx_to_drop), mat.shape[1])\n return coo_matrix(mat)\n\n\ndef make_zero_cols(mat, columns):\n \"\"\"Annihilate entries in the given columns\n\n Parameters\n ----------\n mat : coo_matrix\n matrix\n columns : array-like\n indices of columns to set to 0\n\n Returns\n -------\n mat: coo_matrix\n matrix with given columns set to 0\n \"\"\"\n if not isspmatrix_coo(mat):\n raise ValueError('Given matrix is not in COO format')\n if not np.max(columns) < mat.shape[1]:\n raise ValueError('Column indices are bigger then shape of the matrix.')\n\n columns = np.unique(columns)\n make_zero = np.in1d(mat.col, columns)\n mat.data[make_zero] = 0\n mat.eliminate_zeros()\n return mat.tocsr()\n"
] | [
[
"scipy.sparse.coo_matrix",
"scipy.sparse.isspmatrix_coo",
"numpy.unique",
"numpy.in1d",
"numpy.max"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
mrgloom/pystruct | [
"08277d5a102e813958c9d76dd3fcc413ddb4889f"
] | [
"pystruct/learners/frankwolfe_ssvm.py"
] | [
"######################\n# Authors:\n# Xianghang Liu <[email protected]>\n# Andreas Mueller <[email protected]>\n#\n# License: BSD 3-clause\n#\n# Implements structured SVM as described in Joachims et. al.\n# Cutting-Plane Training of Structural SVMs\n\nimport warnings\nfrom time import time\nimport numpy as np\nfrom sklearn.utils import check_random_state\n\nfrom pystruct.learners.ssvm import BaseSSVM\nfrom pystruct.utils import find_constraint\n\n\nclass FrankWolfeSSVM(BaseSSVM):\n \"\"\"Structured SVM solver using Block-coordinate Frank-Wolfe.\n\n This implementation is somewhat experimental. Use with care.\n\n This implementation follows the paper:\n Lacoste-Julien, Jaggi, Schmidt, Pletscher JMLR 2013\n Block-Coordinage Frank-Wolfe Optimization for Structural SVMs\n\n With batch_mode=False, this implements the online (block-coordinate)\n version of the algorithm (BCFW)\n BCFW is an attractive alternative to subgradient methods, as no\n learning rate is needed and a duality gap guarantee is given.\n\n Parameters\n ----------\n model : StructuredModel\n Object containing the model structure. Has to implement\n `loss`, `inference` and `loss_augmented_inference`.\n\n max_iter : int, default=1000\n Maximum number of passes over dataset to find constraints.\n\n C : float, default=1\n Regularization parameter. Corresponds to 1 / (lambda * n_samples).\n\n verbose : int\n Verbosity.\n\n n_jobs : int, default=1\n Number of parallel processes. Currently only n_jobs=1 is supported.\n\n show_loss_every : int, default=0\n How often the training set loss should be computed.\n Zero corresponds to never.\n\n tol : float, default=1e-3\n Convergence tolerance on the duality gap.\n\n logger : logger object, default=None\n Pystruct logger for storing the model or extracting additional\n information.\n\n batch_mode : boolean, default=False\n Whether to use batch updates. Will slow down learning enormously.\n\n line_search : boolean, default=True\n Whether to compute the optimum step size in each step.\n The line-search is done in closed form and cheap.\n There is usually no reason to turn this off.\n\n check_dual_every : int, default=10\n How often the stopping criterion should be checked. Computing\n the stopping criterion is as costly as doing one pass over the dataset,\n so check_dual_every=1 will make learning twice as slow.\n\n do_averaging : bool, default=True\n Whether to use weight averaging as described in the reference paper.\n Currently this is only supported in the block-coordinate version.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n\n Attributes\n ----------\n w : nd-array, shape=(model.size_psi,)\n The learned weights of the SVM.\n\n ``loss_curve_`` : list of float\n List of loss values if show_loss_every > 0.\n\n ``objective_curve_`` : list of float\n Cutting plane objective after each pass through the dataset.\n\n ``primal_objective_curve_`` : list of float\n Primal objective after each pass through the dataset.\n\n ``timestamps_`` : list of int\n Total training time stored before each iteration.\n \"\"\"\n def __init__(self, model, max_iter=1000, C=1.0, verbose=0, n_jobs=1,\n show_loss_every=0, logger=None, batch_mode=False,\n line_search=True, check_dual_every=10, tol=.001,\n do_averaging=True, sample_method='perm', random_state=None):\n\n if n_jobs != 1:\n warnings.warn(\"FrankWolfeSSVM does not support multiprocessing\"\n \" yet. Ignoring n_jobs != 1.\")\n\n if sample_method not in ['perm', 'rnd', 'seq']:\n raise ValueError(\"sample_method can only be perm, rnd, or seq\")\n\n BaseSSVM.__init__(self, model, max_iter, C, verbose=verbose,\n n_jobs=n_jobs, show_loss_every=show_loss_every,\n logger=logger)\n self.tol = tol\n self.batch_mode = batch_mode\n self.line_search = line_search\n self.check_dual_every = check_dual_every\n self.do_averaging = do_averaging\n self.sample_method = sample_method\n self.random_state = random_state\n\n def _calc_dual_gap(self, X, Y, l):\n n_samples = len(X)\n psi_gt = self.model.batch_psi(X, Y, Y) # FIXME don't calculate this again\n Y_hat = self.model.batch_loss_augmented_inference(X, Y, self.w,\n relaxed=True)\n dpsi = psi_gt - self.model.batch_psi(X, Y_hat)\n ls = np.sum(self.model.batch_loss(Y, Y_hat))\n ws = dpsi * self.C\n l = l * n_samples * self.C\n\n dual_val = -0.5 * np.sum(self.w ** 2) + l\n w_diff = self.w - ws\n dual_gap = w_diff.T.dot(self.w) - l + ls * self.C\n primal_val = dual_val + dual_gap\n self.primal_objective_curve_.append(primal_val)\n self.objective_curve_.append(dual_val)\n self.timestamps_.append(time() - self.timestamps_[0])\n return dual_val, dual_gap, primal_val\n\n def _frank_wolfe_batch(self, X, Y):\n \"\"\"Batch Frank-Wolfe learning.\n\n This is basically included for reference / comparision only,\n as the block-coordinate version is much faster.\n\n Compare Algorithm 2 in the reference paper.\n \"\"\"\n l = 0.0\n n_samples = float(len(X))\n psi_gt = self.model.batch_psi(X, Y, Y)\n\n for k in xrange(self.max_iter):\n Y_hat = self.model.batch_loss_augmented_inference(X, Y, self.w,\n relaxed=True)\n dpsi = psi_gt - self.model.batch_psi(X, Y_hat)\n ls = np.mean(self.model.batch_loss(Y, Y_hat))\n ws = dpsi * self.C\n\n w_diff = self.w - ws\n dual_gap = 1.0 / (self.C * n_samples) * w_diff.T.dot(self.w) - l + ls\n\n # line search for gamma\n if self.line_search:\n eps = 1e-15\n gamma = dual_gap / (np.sum(w_diff ** 2) / (self.C * n_samples) + eps)\n gamma = max(0.0, min(1.0, gamma))\n else:\n gamma = 2.0 / (k + 2.0)\n\n dual_val = -0.5 * np.sum(self.w ** 2) + l * (n_samples * self.C)\n dual_gap_display = dual_gap * n_samples * self.C\n primal_val = dual_val + dual_gap_display\n\n self.primal_objective_curve_.append(primal_val)\n self.objective_curve_.append(dual_val)\n self.timestamps_.append(time() - self.timestamps_[0])\n if self.verbose > 0:\n print(\"k = %d, dual: %f, dual_gap: %f, primal: %f, gamma: %f\"\n % (k, dual_val, dual_gap_display, primal_val, gamma))\n\n # update w and l\n self.w = (1.0 - gamma) * self.w + gamma * ws\n l = (1.0 - gamma) * l + gamma * ls\n\n if self.logger is not None:\n self.logger(self, k)\n\n if dual_gap < self.tol:\n return\n\n def _frank_wolfe_bc(self, X, Y):\n \"\"\"Block-Coordinate Frank-Wolfe learning.\n\n Compare Algorithm 3 in the reference paper.\n \"\"\"\n n_samples = len(X)\n w = self.w.copy()\n w_mat = np.zeros((n_samples, self.model.size_psi))\n l_mat = np.zeros(n_samples)\n l_avg = 0.0\n l = 0.0\n k = 0\n\n rng = check_random_state(self.random_state)\n for p in xrange(self.max_iter):\n if self.verbose > 0:\n print(\"Iteration %d\" % p)\n\n perm = np.arange(n_samples)\n if self.sample_method == 'perm':\n rng.shuffle(perm)\n elif self.sample_method == 'rnd':\n perm = rng.randint(low=0, high=n_samples, size=n_samples)\n\n for j in range(n_samples):\n i = perm[j]\n x, y = X[i], Y[i]\n y_hat, delta_psi, slack, loss = find_constraint(self.model, x, y, w)\n # ws and ls\n ws = delta_psi * self.C\n ls = loss / n_samples\n\n # line search\n if self.line_search:\n eps = 1e-15\n w_diff = w_mat[i] - ws\n gamma = (w_diff.T.dot(w) - (self.C * n_samples)*(l_mat[i] - ls)) / (np.sum(w_diff ** 2) + eps)\n gamma = max(0.0, min(1.0, gamma))\n else:\n gamma = 2.0 * n_samples / (k + 2.0 * n_samples)\n\n w -= w_mat[i]\n w_mat[i] = (1.0 - gamma) * w_mat[i] + gamma * ws\n w += w_mat[i]\n\n l -= l_mat[i]\n l_mat[i] = (1.0 - gamma) * l_mat[i] + gamma * ls\n l += l_mat[i]\n\n if self.do_averaging:\n rho = 2.0 / (k + 2.0)\n self.w = (1.0 - rho) * self.w + rho * w\n l_avg = (1.0 - rho) * l_avg + rho * l\n else:\n self.w = w\n k += 1\n\n if self.logger is not None:\n self.logger(self, p)\n if (self.check_dual_every != 0) and (p % self.check_dual_every == 0):\n dual_val, dual_gap, primal_val = self._calc_dual_gap(X, Y, l)\n self.primal_objective_curve_.append(primal_val)\n self.objective_curve_.append(dual_val)\n self.timestamps_.append(time() - self.timestamps_[0])\n if self.verbose > 0:\n print(\"dual: %f, dual_gap: %f, primal: %f\"\n % (dual_val, dual_gap, primal_val))\n if dual_gap < self.tol:\n return\n\n def fit(self, X, Y, constraints=None, initialize=True):\n \"\"\"Learn parameters using (block-coordinate) Frank-Wolfe learning.\n\n Parameters\n ----------\n X : iterable\n Traing instances. Contains the structured input objects.\n No requirement on the particular form of entries of X is made.\n\n Y : iterable\n Training labels. Contains the strctured labels for inputs in X.\n Needs to have the same length as X.\n\n contraints : ignored\n\n initialize : boolean, default=True\n Whether to initialize the model for the data.\n Leave this true except if you really know what you are doing.\n \"\"\"\n if initialize:\n self.model.initialize(X, Y)\n self.objective_curve_, self.primal_objective_curve_ = [], []\n self.timestamps_ = [time()]\n self.w = getattr(self, \"w\", np.zeros(self.model.size_psi))\n try:\n if self.batch_mode:\n self._frank_wolfe_batch(X, Y)\n else:\n self._frank_wolfe_bc(X, Y)\n except KeyboardInterrupt:\n pass\n if self.verbose:\n print(\"Calculating final objective.\")\n self.timestamps_.append(time() - self.timestamps_[0])\n self.primal_objective_curve_.append(self._objective(X, Y))\n self.objective_curve_.append(self.objective_curve_[-1])\n if self.logger is not None:\n self.logger(self, 'final')\n\n return self\n"
] | [
[
"numpy.sum",
"numpy.arange",
"numpy.zeros",
"sklearn.utils.check_random_state"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
iiduka-researches/201811-kaz | [
"52d7b101dec1ce404296d69e1af36bce54e2a91d"
] | [
"tests/test_projections.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport numpy as np\nfrom unittest import TestCase\nfrom qcopt.projections import *\n\n\nclass TestBox(TestCase):\n def test_behavior(self):\n p = box(np.array([-1., -2.]), np.array([3., 4.]))\n np.testing.assert_equal(\n p(np.array([0., 0.])), np.array([0., 0.]))\n np.testing.assert_equal(\n p(np.array([-1., -2.])), np.array([-1., -2.]))\n np.testing.assert_equal(\n p(np.array([3., 4.])), np.array([3., 4.]))\n np.testing.assert_equal(\n p(np.array([-3., -4.])), np.array([-1., -2.]))\n np.testing.assert_equal(\n p(np.array([5., 6.])), np.array([3., 4.]))\n\n def test_lb_only(self):\n p = box(lb=np.ones(2))\n np.testing.assert_equal(\n p(np.zeros(2)), np.ones(2))\n np.testing.assert_equal(\n p(np.ones(2)), np.ones(2))\n np.testing.assert_equal(\n p(np.full(2, 2)), np.full(2, 2))\n\n def test_ub_only(self):\n p = box(ub=np.ones(2))\n np.testing.assert_equal(\n p(np.zeros(2)), np.zeros(2))\n np.testing.assert_equal(\n p(np.ones(2)), np.ones(2))\n np.testing.assert_equal(\n p(np.full(2, 2)), np.ones(2))\n\n def test_scalar(self):\n p = box(0, 1)\n np.testing.assert_equal(\n p(np.full(10, -1)), np.full(10, 0))\n np.testing.assert_equal(\n p(np.full(10, 0)), np.full(10, 0))\n np.testing.assert_equal(\n p(np.full(10, 1)), np.full(10, 1))\n np.testing.assert_equal(\n p(np.full(10, 2)), np.full(10, 1))\n\n def test_reallocation(self):\n p = box(np.zeros(2), np.ones(2))\n x = np.array([0.5, 0.5])\n self.assertIsNot(p(x), x)\n\n\nclass TestHalfSpace(TestCase):\n def test_behavior(self):\n p = half_space(np.array([1., 2.]), 3.)\n np.testing.assert_array_equal(\n p(np.array([0., 0.])), np.array([0., 0.]))\n np.testing.assert_array_equal(\n p(np.array([0., 1.5])), np.array([0., 1.5]))\n np.testing.assert_array_equal(\n p(np.array([3., 0.])), np.array([3., 0.]))\n np.testing.assert_array_equal(\n p(np.array([3., 0.])), np.array([3., 0.]))\n np.testing.assert_array_almost_equal(\n p(np.array([4., 2.])), np.array([3., 0.]))\n\n def test_behavior_negative(self):\n p = half_space(np.array([-1., -1.]), -1.)\n np.testing.assert_array_almost_equal(\n p(np.array([0., 0.])), np.array([0.5, 0.5]))\n np.testing.assert_array_equal(\n p(np.array([0.5, 0.5])), np.array([0.5, 0.5]))\n np.testing.assert_array_equal(\n p(np.array([1., 1.])), np.array([1., 1.]))\n np.testing.assert_array_equal(\n p(np.array([0., 1.])), np.array([0., 1.]))\n\n def test_w_changed(self):\n w = np.array([1., 1.])\n p = half_space(w, 3.)\n w[0] = 3.\n np.testing.assert_equal(\n p(np.array([3., 0.])), np.array([3., 0.]))\n\n def test_reallocation(self):\n p = half_space(np.array([1., 1.]), 3.)\n x = np.array([1., 1.])\n self.assertIsNot(p(x), x)\n x = np.array([3., 3.])\n self.assertIsNot(p(x), x)\n\n\nclass TestBall(TestCase):\n def test_behavior(self):\n p = ball(np.array([2., -3.]), 1.)\n np.testing.assert_equal(\n p(np.array([2., -3.])), np.array([2., -3.]))\n np.testing.assert_equal(\n p(np.array([1., -3.])), np.array([1., -3.]))\n np.testing.assert_equal(\n p(np.array([3., -3.])), np.array([3., -3.]))\n np.testing.assert_equal(\n p(np.array([2., -4.])), np.array([2., -4.]))\n np.testing.assert_equal(\n p(np.array([2., -2.])), np.array([2., -2.]))\n np.testing.assert_almost_equal(\n p(np.array([2., -1.])), np.array([2., -2.]))\n np.testing.assert_almost_equal(\n p(np.array([5., -3.])), np.array([3., -3.]))\n np.testing.assert_almost_equal(\n p(np.array([3., -2.])), np.array([2., -3.]) + 2 ** -0.5)\n\n def test_c_changed(self):\n c = np.array([1., 1.])\n p = ball(c, 1.)\n c[0] = 3.\n np.testing.assert_equal(\n p(np.array([1., 1.])), np.array([1., 1.]))\n\n def test_reallocation(self):\n p = ball(np.array([2., -3.]), 1.)\n x = np.array([2., -3.])\n self.assertIsNot(p(x), x)\n x = np.array([3., -2.])\n self.assertIsNot(p(x), x)\n\n\nclass TestSumUp(TestCase):\n def test_behavior(self):\n def u(x: np.ndarray) -> np.ndarray:\n return x + 1.\n def v(x: np.ndarray) -> np.ndarray:\n return x + 2.\n w = sum_up(u, v)\n np.testing.assert_almost_equal(\n w(np.array([0., 0.])), np.array([3., 3.]))\n\n\nclass TestAverage(TestCase):\n def test_behavior(self):\n def u(x: np.ndarray) -> np.ndarray:\n return x + 1.\n def v(x: np.ndarray) -> np.ndarray:\n return x + 2.\n w = average(u, v)\n np.testing.assert_almost_equal(\n w(np.array([0., 0.])), np.array([1.5, 1.5]))\n\n\nclass TestCompose(TestCase):\n def test_behavior(self):\n def u(x: np.ndarray) -> np.ndarray:\n return x + 1.\n def v(x: np.ndarray) -> np.ndarray:\n return x + 2.\n w = compose(u, v)\n np.testing.assert_almost_equal(\n w(np.array([0., 0.])), np.array([3., 3.]))\n\n def test_order(self):\n def u(x: np.ndarray) -> np.ndarray:\n return x - 1.\n def v(x: np.ndarray) -> np.ndarray:\n return np.fmax(0., x)\n w = compose(v, u)\n np.testing.assert_equal(\n w(np.array([0., 0.])), np.array([0., 0.]))\n\n\nclass TestFirmUp(TestCase):\n def test_behavior(self):\n def u(x: np.ndarray) -> np.ndarray:\n return x / 2.\n v = firm_up(u)\n np.testing.assert_almost_equal(\n v(np.array([1., 2.])), np.array([0.75, 1.5]))\n\n def test_behavior_with_alpha(self):\n def u(x: np.ndarray) -> np.ndarray:\n return x / 2.\n v = firm_up(u, 0.25)\n np.testing.assert_array_almost_equal(\n v(np.array([1., 2.])), np.array([0.875, 1.75]))\n"
] | [
[
"numpy.full",
"numpy.ones",
"numpy.fmax",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
naisuu/Rxitect | [
"d39d3f00a537f49e6fa5a43570f93a50618064af"
] | [
"src/rxitect/data/chembl_corpus.py"
] | [
"from pathlib import Path\nfrom typing import Optional\n\nimport pandas as pd\nimport pytorch_lightning as pl\nfrom pytorch_lightning.utilities.types import (EVAL_DATALOADERS,\n TRAIN_DATALOADERS)\nfrom torch.utils.data.dataloader import DataLoader\nfrom globals import root_path\n\nfrom rxitect.structs.vocabulary import Vocabulary\nfrom rxitect.tensor_utils import random_split_frac\n\n\nclass ChemblCorpus(pl.LightningDataModule):\n def __init__(\n self,\n vocabulary: Vocabulary,\n data_dir: Path = root_path / \"data/processed\",\n use_smiles: bool = False,\n batch_size: int = 512,\n n_workers: int = 1,\n dev_run: bool = False,\n ):\n super().__init__()\n self.vocabulary = vocabulary\n self.data_dir = data_dir\n self.use_smiles = use_smiles\n self.batch_size = batch_size\n self.n_workers = n_workers\n self.dev_run = dev_run\n\n self.chembl_train = None\n self.chembl_test = None\n self.chembl_val = None\n\n def setup(self, stage: Optional[str] = None):\n corpus_filename = \"smiles_chembl_corpus.txt\" if self.use_smiles else \"selfies_chembl_corpus.csv\"\n\n chembl_full = pd.read_csv(self.data_dir / corpus_filename, nrows=100_000 if self.dev_run else None)[\"token\"]\n\n if stage == \"test\" or stage is None:\n chembl_test = chembl_full.sample(frac=0.2, random_state=42)\n chembl_full = chembl_full.drop(\n chembl_test.index\n ) # Make sure the test set is excluded\n self.chembl_test = self.vocabulary.encode(\n [seq.split(\" \") for seq in chembl_test]\n )\n\n if stage == \"fit\" or stage is None:\n chembl_train, chembl_val = random_split_frac(dataset=chembl_full)\n self.chembl_train = self.vocabulary.encode(\n [seq.split(\" \") for seq in chembl_train]\n )\n self.chembl_val = self.vocabulary.encode(\n [seq.split(\" \") for seq in chembl_val]\n )\n\n def train_dataloader(self) -> TRAIN_DATALOADERS:\n return DataLoader(\n self.chembl_train,\n batch_size=self.batch_size,\n shuffle=True,\n drop_last=True,\n pin_memory=False,\n num_workers=self.n_workers,\n )\n\n def val_dataloader(self) -> EVAL_DATALOADERS:\n return DataLoader(\n self.chembl_val,\n batch_size=self.batch_size,\n drop_last=True,\n pin_memory=False,\n num_workers=self.n_workers,\n )\n\n def test_dataloader(self) -> EVAL_DATALOADERS:\n return DataLoader(\n self.chembl_test,\n batch_size=self.batch_size,\n drop_last=True,\n pin_memory=False,\n num_workers=self.n_workers,\n )\n"
] | [
[
"torch.utils.data.dataloader.DataLoader",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
BerkeleyAutomation/rlqp_benchmarks | [
"5c79e870c4bd697383f66f5dff26aea29dc1ebfa"
] | [
"solvers/mosek.py"
] | [
"import mosek\nimport numpy as np\nimport scipy.sparse as spa\nfrom . import statuses as s\nfrom .results import Results\nfrom utils.general import is_qp_solution_optimal\n\n\nclass MOSEKSolver(object):\n\n # Map of Mosek status to mathprogbasepy status.\n STATUS_MAP = {mosek.solsta.optimal: s.OPTIMAL,\n mosek.solsta.integer_optimal: s.OPTIMAL,\n mosek.solsta.prim_feas: s.OPTIMAL_INACCURATE, # for integer problems\n mosek.solsta.prim_infeas_cer: s.PRIMAL_INFEASIBLE,\n mosek.solsta.dual_infeas_cer: s.DUAL_INFEASIBLE,\n mosek.solsta.unknown: s.SOLVER_ERROR}\n\n def __init__(self, settings={}):\n '''\n Initialize solver object by setting require settings\n '''\n self._settings = settings\n\n @property\n def settings(self):\n \"\"\"Solver settings\"\"\"\n return self._settings\n\n def solve(self, example):\n '''\n Solve problem\n\n Args:\n example: example object\n\n Returns:\n Results structure\n '''\n p = example.qp_problem\n\n # Get problem dimensions\n n = p['P'].shape[0]\n m = p['A'].shape[0]\n\n '''\n Load problem\n '''\n # Create environment\n env = mosek.Env()\n\n # Create optimization task\n task = env.Task()\n\n if 'verbose' in self._settings: # if verbose is null, suppress it\n if self._settings['verbose']:\n # Define a stream printer to grab output from MOSEK\n def streamprinter(text):\n import sys\n sys.stdout.write(text)\n sys.stdout.flush()\n env.set_Stream(mosek.streamtype.log, streamprinter)\n task.set_Stream(mosek.streamtype.log, streamprinter)\n\n # Load problem into task object\n\n # Append 'm' empty constraints.\n # The constraints will initially have no bounds.\n task.appendcons(m)\n\n # Append 'n' variables.\n # The variables will initially be fixed at zero (x=0).\n task.appendvars(n)\n\n # Add linear cost by iterating over all variables\n for j in range(n):\n task.putcj(j, p['q'][j])\n task.putvarbound(j, mosek.boundkey.fr, -np.inf, np.inf)\n\n # Add constraints\n if p['A'] is not None:\n row_A, col_A, el_A = spa.find(p['A'])\n task.putaijlist(row_A, col_A, el_A)\n\n for j in range(m):\n # Get bounds and keys\n u_temp = p['u'][j] if p['u'][j] < 1e20 else np.inf\n l_temp = p['l'][j] if p['l'][j] > -1e20 else -np.inf\n\n # Divide 5 cases\n if (np.abs(l_temp - u_temp) < 1e-08):\n bound_key = mosek.boundkey.fx\n elif l_temp == -np.inf and u_temp == np.inf:\n bound_key = mosek.boundkey.fr\n elif l_temp != -np.inf and u_temp == np.inf:\n bound_key = mosek.boundkey.lo\n elif l_temp != -np.inf and u_temp != np.inf:\n bound_key = mosek.boundkey.ra\n elif l_temp == -np.inf and u_temp != np.inf:\n bound_key = mosek.boundkey.up\n\n # Add bound\n task.putconbound(j, bound_key, l_temp, u_temp)\n\n # Add quadratic cost\n if p['P'].count_nonzero(): # If there are any nonzero elms in P\n P = spa.tril(p['P'], format='coo')\n task.putqobj(P.row, P.col, P.data)\n\n # Set problem minimization\n task.putobjsense(mosek.objsense.minimize)\n\n '''\n Set parameters\n '''\n for param, value in self._settings.items():\n if param == 'verbose':\n if value is False:\n self._handle_str_param(task, 'MSK_IPAR_LOG'.strip(), 0)\n elif param != 'time_limit' and param != 'high_accuracy':\n if isinstance(param, str):\n self._handle_str_param(task, param.strip(), value)\n else:\n self._handle_enum_param(task, param, value)\n\n '''\n Solve problem\n '''\n try:\n # Optimization and check termination code\n termination_code = task.optimize()\n except:\n if self._settings['verbose']:\n print(\"Error in MOSEK solution\\n\")\n return Results(s.SOLVER_ERROR, None, None, None,\n None, None)\n\n if 'verbose' in self._settings: # if verbose is null, suppress it\n if self._settings['verbose']:\n task.solutionsummary(mosek.streamtype.msg)\n\n '''\n Extract results\n '''\n\n # Get solution type and status\n # soltype, solsta = self.choose_solution(task)\n solsta = task.getsolsta(mosek.soltype.itr)\n soltype = mosek.soltype.itr\n\n # Map status using statusmap\n status = self.STATUS_MAP.get(solsta, s.SOLVER_ERROR)\n\n # Check if the return code is max time and change status\n # MOSEK does not return max time as solution status\n # but as a solver status\n if termination_code == mosek.rescode.trm_max_time:\n status = s.TIME_LIMIT\n\n # Get statistics\n cputime = task.getdouinf(mosek.dinfitem.optimizer_time)\n total_iter = task.getintinf(mosek.iinfitem.intpnt_iter)\n\n if status in s.SOLUTION_PRESENT:\n # get primal variables values\n x = np.zeros(task.getnumvar())\n task.getxx(soltype, x)\n # get obj value\n objval = task.getprimalobj(soltype)\n # get dual\n y = np.zeros(task.getnumcon())\n task.gety(soltype, y)\n # it appears signs are inverted\n y = -y\n\n if not is_qp_solution_optimal(p, x, y,\n high_accuracy=self._settings.get('high_accuracy')):\n status = s.SOLVER_ERROR\n\n # Validate execution time (do not trust commercial solvers)\n if 'time_limit' in self._settings:\n if cputime > self._settings['time_limit']:\n status = s.TIME_LIMIT\n\n return Results(status, objval, x, y,\n cputime, total_iter)\n else:\n return Results(status, None, None, None,\n cputime, None)\n\n # def choose_solution(self, task):\n # \"\"\"Chooses between the basic, interior point solution or integer solution\n # Parameters\n # N.B. From CVXPY\n # ----------\n # task : mosek.Task\n # The solver status interface.\n # Returns\n # -------\n # soltype\n # The preferred solution (mosek.soltype.*)\n # solsta\n # The status of the preferred solution (mosek.solsta.*)\n # \"\"\"\n # import mosek\n\n # def rank(status):\n # # Rank solutions\n # # optimal > near_optimal > anything else > None\n # if status == mosek.solsta.optimal:\n # return 3\n # elif status == mosek.solsta.near_optimal:\n # return 2\n # elif status is not None:\n # return 1\n # else:\n # return 0\n\n # solsta_bas, solsta_itr = None, None\n\n # # Integer solution\n # if task.solutiondef(mosek.soltype.itg):\n # solsta_itg = task.getsolsta(mosek.soltype.itg)\n # return mosek.soltype.itg, solsta_itg\n\n # # Continuous solution\n # if task.solutiondef(mosek.soltype.bas):\n # solsta_bas = task.getsolsta(mosek.soltype.bas)\n\n # if task.solutiondef(mosek.soltype.itr):\n # solsta_itr = task.getsolsta(mosek.soltype.itr)\n\n # # As long as interior solution is not worse, take it\n # # (for backward compatibility)\n # if rank(solsta_itr) >= rank(solsta_bas):\n # return mosek.soltype.itr, solsta_itr\n # else:\n # return mosek.soltype.bas, solsta_bas\n\n @staticmethod\n def _handle_str_param(task, param, value):\n if param.startswith(\"MSK_DPAR_\"):\n task.putnadouparam(param, value)\n elif param.startswith(\"MSK_IPAR_\"):\n task.putnaintparam(param, value)\n elif param.startswith(\"MSK_SPAR_\"):\n task.putnastrparam(param, value)\n else:\n raise ValueError(\"Invalid MOSEK parameter '%s'.\" % param)\n\n @staticmethod\n def _handle_enum_param(task, param, value):\n if isinstance(param, mosek.dparam):\n task.putdouparam(param, value)\n elif isinstance(param, mosek.iparam):\n task.putintparam(param, value)\n elif isinstance(param, mosek.sparam):\n task.putstrparam(param, value)\n else:\n raise ValueError(\"Invalid MOSEK parameter '%s'.\" % param)\n"
] | [
[
"numpy.abs",
"scipy.sparse.tril",
"scipy.sparse.find"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
iariav/EDSR-PyTorch | [
"c709b3d43adb6c2457cf87c37c1f34a7bcfc48bb"
] | [
"src/data/dsm.py"
] | [
"import os\nfrom data import srdata\nimport numpy as np\n\nclass dsm(srdata.SRData):\n def __init__(self, args, name='depth', train=True, benchmark=False):\n\n if train:\n train_file = os.path.join(args.dir_data, args.data_train[0], 'train.txt')\n self.data_indices = np.loadtxt(train_file,dtype=np.int).flatten()\n else:\n test_file = os.path.join(args.dir_data, args.data_train[0], 'test.txt')\n self.data_indices = np.loadtxt(test_file,dtype=np.int).flatten()\n super(dsm, self).__init__(\n args, name=name, train=train, benchmark=benchmark\n )\n\n def _scan(self):\n names_hr, names_lr = super(dsm, self)._scan()\n names_hr = [names_hr[i-1] for i in self.data_indices]\n for n in range(len(names_lr)):\n names_lr[n] = [names_lr[n][i-1] for i in self.data_indices]\n\n return names_hr, names_lr\n\n def _set_filesystem(self, dir_data):\n super(dsm, self)._set_filesystem(dir_data)\n self.dir_hr = os.path.join(self.apath, 'dsm_train_HR')\n self.dir_lr = os.path.join(self.apath, 'dsm_train_LR_bicubic')\n if self.input_large: self.dir_lr += 'L'\n\n"
] | [
[
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
adrn/BarChaos | [
"1f6619763efb7d4425df8633e6425b18232efbb0"
] | [
"scripts/test_mpi.py"
] | [
"from os import path\nimport sys\n\n# Third-party\nimport astropy.units as u\nimport gala.dynamics as gd\nimport numpy as np\nimport h5py\nimport schwimmbad\n\n# Package\nfrom barchaos.experiments.freqmap import FreqMap\nfrom barchaos.log import logger\n\nlogger.setLevel(1)\n\ndef cache_file():\n fn = path.join('/tmp/cache.hdf5')\n\n w0 = gd.PhaseSpacePosition(pos=np.random.random((3,16))*u.kpc,\n vel=np.random.random((3,16))*u.m/u.s)\n with h5py.File(fn, 'w') as f:\n g = f.create_group('w0')\n w0.to_hdf5(g)\n\n return fn\n\ndef test_freqmap_mpi(cache_file, pool):\n\n with FreqMap(cache_file) as exp:\n pool.map(exp, list(range(16)), callback=exp.callback)\n\n exp.status()\n\nif __name__ == '__main__':\n pool = schwimmbad.MPIPool()\n if not pool.is_master():\n pool.wait()\n sys.exit(0)\n\n test_freqmap_mpi(cache_file(), pool)\n\n pool.close()\n"
] | [
[
"numpy.random.random"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zy-han/Data-X | [
"1f2fd241d7af8cf926ccefc7308e5b7941739b21"
] | [
"API/Request_Uniswap_try.py"
] | [
"import requests\r\nimport json\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\ndef auto_query_uniswap(token_id_0, token_id_1, time_left, time_right, skip=5, first=100):\r\n pair_prefix = \"\"\"{\r\n pairs(where: {token0: \"\"\"\r\n pair_midfix = \"\"\", token1: \"\"\"\r\n pair_tailfix = \"\"\"}) {\r\n id\r\n token0 {\r\n id\r\n name\r\n symbol\r\n totalSupply\r\n totalLiquidity\r\n }\r\n token1 {\r\n id\r\n name\r\n symbol\r\n totalSupply\r\n totalLiquidity\r\n }\r\n token0Price\r\n volumeToken0\r\n token1Price\r\n volumeToken1\r\n volumeUSD\r\n txCount\r\n }\r\n }\"\"\"\r\n pair_query = pair_prefix + \"\\\"\" +token_id_0 + \"\\\"\" + pair_midfix + \"\\\"\" + token_id_1 + \"\\\"\" + pair_tailfix\r\n url = 'https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v2'\r\n r = requests.post(url, json={'query':pair_query})\r\n if r:\r\n print('Success_pair')\r\n else:\r\n print('Fail_pair.')\r\n raw_data = r.json()\r\n pair_id = raw_data['data']['pairs'][0]['id']\r\n volume_token_0 = raw_data['data']['pairs'][0]['volumeToken0']\r\n volume_token_1 = raw_data['data']['pairs'][0]['volumeToken1']\r\n volume_USD = raw_data['data']['pairs'][0]['volumeUSD']\r\n volume_all = {'volume_token_0': volume_token_0, 'volume_token_1': volume_token_1, 'volume_USD': volume_USD}\r\n\r\n #find the actual transaction records\r\n tran_prefix = \"{ swaps (where: {pair: \"\r\n tran_midfix_0 = \", timestamp_gte: \"\r\n tran_midfix_1 = \", timestamp_lt: \"\r\n tran_skip = \"}, skip: \"\r\n tran_first = \", first: \"\r\n tran_tail = \"\"\") {\r\n id\r\n timestamp\r\n pair {\r\n token0 {\r\n symbol\r\n }\r\n token1 {\r\n symbol\r\n }\r\n }\r\n sender\r\n to\r\n amount0In\r\n amount0Out\r\n amount1In\r\n amount1Out\r\n amountUSD\r\n }\r\n }\"\"\"\r\n tran_query = tran_prefix + \"\\\"\" + pair_id + \"\\\"\" + tran_midfix_0 + str(time_left) + tran_midfix_1 + str(time_right) \\\r\n + tran_skip + str(skip) + tran_first + str(first) + tran_tail\r\n tran_r = requests.post(url, json={'query': tran_query})\r\n if tran_r:\r\n print('Success_transaction')\r\n else:\r\n print('Fail_transaction')\r\n return volume_all, tran_r.json()\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # sample code to request transaction records from uniswap\r\n token_id_0 = \"0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48\" #USDC\r\n token_id_1 = \"0xdac17f958d2ee523a2206206994597c13d831ec7\" #USDT\r\n time_left = 1613523600 #2021.2.17 - 1am\r\n time_right = 1614387600 #2021.2.27 - 1am\r\n skip = 1\r\n first = 200\r\n volume, raw_data = auto_query_uniswap(token_id_0, token_id_1, time_left, time_right, skip, first)\r\n if len(raw_data['data']['swaps']) >= 1:\r\n df_data = pd.DataFrame(raw_data['data']['swaps'])\r\n df_data.to_csv('sample_uniswap_record.csv', index = False)\r\n else:\r\n print('No record')\r\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Duton/yolov4-tf2 | [
"cdd49b5e0bd69a609c28071555da7ff5191d8781"
] | [
"utils/dataloader.py"
] | [
"import math\r\nfrom random import sample, shuffle\r\n\r\nimport cv2\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom tensorflow import keras\r\n\r\nfrom utils.utils import cvtColor, preprocess_input\r\n\r\nclass YoloDatasets(keras.utils.Sequence):\r\n def __init__(self, annotation_lines, input_shape, anchors, batch_size, num_classes, anchors_mask, epoch_now, epoch_length, mosaic, train, mosaic_ratio = 0.7):\r\n self.annotation_lines = annotation_lines\r\n self.length = len(self.annotation_lines)\r\n \r\n self.input_shape = input_shape\r\n self.anchors = anchors\r\n self.batch_size = batch_size\r\n self.num_classes = num_classes\r\n self.anchors_mask = anchors_mask\r\n self.epoch_now = epoch_now - 1\r\n self.epoch_length = epoch_length\r\n self.mosaic = mosaic\r\n self.train = train\r\n self.mosaic_ratio = mosaic_ratio\r\n\r\n self.threshold = 4\r\n\r\n def __len__(self):\r\n return math.ceil(len(self.annotation_lines) / float(self.batch_size))\r\n\r\n def __getitem__(self, index):\r\n image_data = []\r\n box_data = []\r\n for i in range(index * self.batch_size, (index + 1) * self.batch_size): \r\n i = i % self.length\r\n #---------------------------------------------------#\r\n # 训练时进行数据的随机增强\r\n # 验证时不进行数据的随机增强\r\n #---------------------------------------------------#\r\n if self.mosaic:\r\n if self.rand() < 0.5 and self.epoch_now < self.epoch_length * self.mosaic_ratio:\r\n lines = sample(self.annotation_lines, 3)\r\n lines.append(self.annotation_lines[i])\r\n shuffle(lines)\r\n image, box = self.get_random_data_with_Mosaic(lines, self.input_shape)\r\n else:\r\n image, box = self.get_random_data(self.annotation_lines[i], self.input_shape, random = self.train)\r\n else:\r\n image, box = self.get_random_data(self.annotation_lines[i], self.input_shape, random = self.train)\r\n image_data.append(preprocess_input(np.array(image, np.float32)))\r\n box_data.append(box)\r\n\r\n image_data = np.array(image_data)\r\n box_data = np.array(box_data)\r\n y_true = self.preprocess_true_boxes(box_data, self.input_shape, self.anchors, self.num_classes)\r\n return [image_data, *y_true], np.zeros(self.batch_size)\r\n\r\n def generate(self):\r\n i = 0\r\n while True:\r\n image_data = []\r\n box_data = []\r\n for b in range(self.batch_size):\r\n if i==0:\r\n np.random.shuffle(self.annotation_lines)\r\n #---------------------------------------------------#\r\n # 训练时进行数据的随机增强\r\n # 验证时不进行数据的随机增强\r\n #---------------------------------------------------#\r\n if self.mosaic:\r\n if self.rand() < 0.5 and self.epoch_now < self.epoch_length * self.mosaic_ratio:\r\n lines = sample(self.annotation_lines, 3)\r\n lines.append(self.annotation_lines[i])\r\n shuffle(lines)\r\n image, box = self.get_random_data_with_Mosaic(lines, self.input_shape)\r\n else:\r\n image, box = self.get_random_data(self.annotation_lines[i], self.input_shape, random = self.train)\r\n else:\r\n image, box = self.get_random_data(self.annotation_lines[i], self.input_shape, random = self.train)\r\n\r\n i = (i+1) % self.length\r\n image_data.append(preprocess_input(np.array(image, np.float32)))\r\n box_data.append(box)\r\n image_data = np.array(image_data)\r\n box_data = np.array(box_data)\r\n y_true = self.preprocess_true_boxes(box_data, self.input_shape, self.anchors, self.num_classes)\r\n yield image_data, y_true[0], y_true[1], y_true[2]\r\n \r\n def on_epoch_end(self):\r\n self.epoch_now += 1\r\n shuffle(self.annotation_lines)\r\n\r\n def rand(self, a=0, b=1):\r\n return np.random.rand()*(b-a) + a\r\n\r\n def get_random_data(self, annotation_line, input_shape, max_boxes=500, jitter=.3, hue=.1, sat=0.7, val=0.4, random=True):\r\n line = annotation_line.split()\r\n #------------------------------#\r\n # 读取图像并转换成RGB图像\r\n #------------------------------#\r\n image = Image.open(line[0])\r\n image = cvtColor(image)\r\n #------------------------------#\r\n # 获得图像的高宽与目标高宽\r\n #------------------------------#\r\n iw, ih = image.size\r\n h, w = input_shape\r\n #------------------------------#\r\n # 获得预测框\r\n #------------------------------#\r\n box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])\r\n\r\n if not random:\r\n scale = min(w/iw, h/ih)\r\n nw = int(iw*scale)\r\n nh = int(ih*scale)\r\n dx = (w-nw)//2\r\n dy = (h-nh)//2\r\n\r\n #---------------------------------#\r\n # 将图像多余的部分加上灰条\r\n #---------------------------------#\r\n image = image.resize((nw,nh), Image.BICUBIC)\r\n new_image = Image.new('RGB', (w,h), (128,128,128))\r\n new_image.paste(image, (dx, dy))\r\n image_data = np.array(new_image, np.float32)\r\n\r\n #---------------------------------#\r\n # 对真实框进行调整\r\n #---------------------------------#\r\n box_data = np.zeros((max_boxes,5))\r\n if len(box)>0:\r\n np.random.shuffle(box)\r\n box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx\r\n box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy\r\n box[:, 0:2][box[:, 0:2]<0] = 0\r\n box[:, 2][box[:, 2]>w] = w\r\n box[:, 3][box[:, 3]>h] = h\r\n box_w = box[:, 2] - box[:, 0]\r\n box_h = box[:, 3] - box[:, 1]\r\n box = box[np.logical_and(box_w>1, box_h>1)]\r\n if len(box)>max_boxes: box = box[:max_boxes]\r\n box_data[:len(box)] = box\r\n\r\n return image_data, box_data\r\n \r\n #------------------------------------------#\r\n # 对图像进行缩放并且进行长和宽的扭曲\r\n #------------------------------------------#\r\n new_ar = iw/ih * self.rand(1-jitter,1+jitter) / self.rand(1-jitter,1+jitter)\r\n scale = self.rand(.25, 2)\r\n if new_ar < 1:\r\n nh = int(scale*h)\r\n nw = int(nh*new_ar)\r\n else:\r\n nw = int(scale*w)\r\n nh = int(nw/new_ar)\r\n image = image.resize((nw,nh), Image.BICUBIC)\r\n\r\n #------------------------------------------#\r\n # 将图像多余的部分加上灰条\r\n #------------------------------------------#\r\n dx = int(self.rand(0, w-nw))\r\n dy = int(self.rand(0, h-nh))\r\n new_image = Image.new('RGB', (w,h), (128,128,128))\r\n new_image.paste(image, (dx, dy))\r\n image = new_image\r\n\r\n #------------------------------------------#\r\n # 翻转图像\r\n #------------------------------------------#\r\n flip = self.rand()<.5\r\n if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)\r\n\r\n image_data = np.array(image, np.uint8)\r\n #---------------------------------#\r\n # 对图像进行色域变换\r\n # 计算色域变换的参数\r\n #---------------------------------#\r\n r = np.random.uniform(-1, 1, 3) * [hue, sat, val] + 1\r\n #---------------------------------#\r\n # 将图像转到HSV上\r\n #---------------------------------#\r\n hue, sat, val = cv2.split(cv2.cvtColor(image_data, cv2.COLOR_RGB2HSV))\r\n dtype = image_data.dtype\r\n #---------------------------------#\r\n # 应用变换\r\n #---------------------------------#\r\n x = np.arange(0, 256, dtype=r.dtype)\r\n lut_hue = ((x * r[0]) % 180).astype(dtype)\r\n lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)\r\n lut_val = np.clip(x * r[2], 0, 255).astype(dtype)\r\n\r\n image_data = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))\r\n image_data = cv2.cvtColor(image_data, cv2.COLOR_HSV2RGB)\r\n\r\n #---------------------------------#\r\n # 对真实框进行调整\r\n #---------------------------------#\r\n box_data = np.zeros((max_boxes,5))\r\n if len(box)>0:\r\n np.random.shuffle(box)\r\n box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx\r\n box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy\r\n if flip: box[:, [0,2]] = w - box[:, [2,0]]\r\n box[:, 0:2][box[:, 0:2]<0] = 0\r\n box[:, 2][box[:, 2]>w] = w\r\n box[:, 3][box[:, 3]>h] = h\r\n box_w = box[:, 2] - box[:, 0]\r\n box_h = box[:, 3] - box[:, 1]\r\n box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box\r\n if len(box)>max_boxes: box = box[:max_boxes]\r\n box_data[:len(box)] = box\r\n \r\n return image_data, box_data\r\n\r\n def merge_bboxes(self, bboxes, cutx, cuty):\r\n merge_bbox = []\r\n for i in range(len(bboxes)):\r\n for box in bboxes[i]:\r\n tmp_box = []\r\n x1, y1, x2, y2 = box[0], box[1], box[2], box[3]\r\n\r\n if i == 0:\r\n if y1 > cuty or x1 > cutx:\r\n continue\r\n if y2 >= cuty and y1 <= cuty:\r\n y2 = cuty\r\n if x2 >= cutx and x1 <= cutx:\r\n x2 = cutx\r\n\r\n if i == 1:\r\n if y2 < cuty or x1 > cutx:\r\n continue\r\n if y2 >= cuty and y1 <= cuty:\r\n y1 = cuty\r\n if x2 >= cutx and x1 <= cutx:\r\n x2 = cutx\r\n\r\n if i == 2:\r\n if y2 < cuty or x2 < cutx:\r\n continue\r\n if y2 >= cuty and y1 <= cuty:\r\n y1 = cuty\r\n if x2 >= cutx and x1 <= cutx:\r\n x1 = cutx\r\n\r\n if i == 3:\r\n if y1 > cuty or x2 < cutx:\r\n continue\r\n if y2 >= cuty and y1 <= cuty:\r\n y2 = cuty\r\n if x2 >= cutx and x1 <= cutx:\r\n x1 = cutx\r\n tmp_box.append(x1)\r\n tmp_box.append(y1)\r\n tmp_box.append(x2)\r\n tmp_box.append(y2)\r\n tmp_box.append(box[-1])\r\n merge_bbox.append(tmp_box)\r\n return merge_bbox\r\n\r\n def get_random_data_with_Mosaic(self, annotation_line, input_shape, max_boxes=500, jitter=0.3, hue=.1, sat=0.7, val=0.4):\r\n h, w = input_shape\r\n min_offset_x = self.rand(0.3, 0.7)\r\n min_offset_y = self.rand(0.3, 0.7)\r\n\r\n image_datas = [] \r\n box_datas = []\r\n index = 0\r\n for line in annotation_line:\r\n #---------------------------------#\r\n # 每一行进行分割\r\n #---------------------------------#\r\n line_content = line.split()\r\n #---------------------------------#\r\n # 打开图片\r\n #---------------------------------#\r\n image = Image.open(line_content[0])\r\n image = cvtColor(image)\r\n \r\n #---------------------------------#\r\n # 图片的大小\r\n #---------------------------------#\r\n iw, ih = image.size\r\n #---------------------------------#\r\n # 保存框的位置\r\n #---------------------------------#\r\n box = np.array([np.array(list(map(int,box.split(',')))) for box in line_content[1:]])\r\n \r\n #---------------------------------#\r\n # 是否翻转图片\r\n #---------------------------------#\r\n flip = self.rand()<.5\r\n if flip and len(box)>0:\r\n image = image.transpose(Image.FLIP_LEFT_RIGHT)\r\n box[:, [0,2]] = iw - box[:, [2,0]]\r\n\r\n #------------------------------------------#\r\n # 对图像进行缩放并且进行长和宽的扭曲\r\n #------------------------------------------#\r\n new_ar = iw/ih * self.rand(1-jitter,1+jitter) / self.rand(1-jitter,1+jitter)\r\n scale = self.rand(.4, 1)\r\n if new_ar < 1:\r\n nh = int(scale*h)\r\n nw = int(nh*new_ar)\r\n else:\r\n nw = int(scale*w)\r\n nh = int(nw/new_ar)\r\n image = image.resize((nw, nh), Image.BICUBIC)\r\n\r\n #-----------------------------------------------#\r\n # 将图片进行放置,分别对应四张分割图片的位置\r\n #-----------------------------------------------#\r\n if index == 0:\r\n dx = int(w*min_offset_x) - nw\r\n dy = int(h*min_offset_y) - nh\r\n elif index == 1:\r\n dx = int(w*min_offset_x) - nw\r\n dy = int(h*min_offset_y)\r\n elif index == 2:\r\n dx = int(w*min_offset_x)\r\n dy = int(h*min_offset_y)\r\n elif index == 3:\r\n dx = int(w*min_offset_x)\r\n dy = int(h*min_offset_y) - nh\r\n \r\n new_image = Image.new('RGB', (w,h), (128,128,128))\r\n new_image.paste(image, (dx, dy))\r\n image_data = np.array(new_image)\r\n\r\n index = index + 1\r\n box_data = []\r\n #---------------------------------#\r\n # 对box进行重新处理\r\n #---------------------------------#\r\n if len(box)>0:\r\n np.random.shuffle(box)\r\n box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx\r\n box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy\r\n box[:, 0:2][box[:, 0:2]<0] = 0\r\n box[:, 2][box[:, 2]>w] = w\r\n box[:, 3][box[:, 3]>h] = h\r\n box_w = box[:, 2] - box[:, 0]\r\n box_h = box[:, 3] - box[:, 1]\r\n box = box[np.logical_and(box_w>1, box_h>1)]\r\n box_data = np.zeros((len(box),5))\r\n box_data[:len(box)] = box\r\n \r\n image_datas.append(image_data)\r\n box_datas.append(box_data)\r\n\r\n #---------------------------------#\r\n # 将图片分割,放在一起\r\n #---------------------------------#\r\n cutx = int(w * min_offset_x)\r\n cuty = int(h * min_offset_y)\r\n\r\n new_image = np.zeros([h, w, 3])\r\n new_image[:cuty, :cutx, :] = image_datas[0][:cuty, :cutx, :]\r\n new_image[cuty:, :cutx, :] = image_datas[1][cuty:, :cutx, :]\r\n new_image[cuty:, cutx:, :] = image_datas[2][cuty:, cutx:, :]\r\n new_image[:cuty, cutx:, :] = image_datas[3][:cuty, cutx:, :]\r\n\r\n new_image = np.array(new_image, np.uint8)\r\n #---------------------------------#\r\n # 对图像进行色域变换\r\n # 计算色域变换的参数\r\n #---------------------------------#\r\n r = np.random.uniform(-1, 1, 3) * [hue, sat, val] + 1\r\n #---------------------------------#\r\n # 将图像转到HSV上\r\n #---------------------------------#\r\n hue, sat, val = cv2.split(cv2.cvtColor(new_image, cv2.COLOR_RGB2HSV))\r\n dtype = new_image.dtype\r\n #---------------------------------#\r\n # 应用变换\r\n #---------------------------------#\r\n x = np.arange(0, 256, dtype=r.dtype)\r\n lut_hue = ((x * r[0]) % 180).astype(dtype)\r\n lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)\r\n lut_val = np.clip(x * r[2], 0, 255).astype(dtype)\r\n\r\n new_image = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))\r\n new_image = cv2.cvtColor(new_image, cv2.COLOR_HSV2RGB)\r\n\r\n #---------------------------------#\r\n # 对框进行进一步的处理\r\n #---------------------------------#\r\n new_boxes = self.merge_bboxes(box_datas, cutx, cuty)\r\n\r\n #---------------------------------#\r\n # 将box进行调整\r\n #---------------------------------#\r\n box_data = np.zeros((max_boxes, 5))\r\n if len(new_boxes)>0:\r\n if len(new_boxes)>max_boxes: new_boxes = new_boxes[:max_boxes]\r\n box_data[:len(new_boxes)] = new_boxes\r\n return new_image, box_data\r\n\r\n def preprocess_true_boxes(self, true_boxes, input_shape, anchors, num_classes):\r\n assert (true_boxes[..., 4]<num_classes).all(), 'class id must be less than num_classes'\r\n #-----------------------------------------------------------#\r\n # 获得框的坐标和图片的大小\r\n #-----------------------------------------------------------#\r\n true_boxes = np.array(true_boxes, dtype='float32')\r\n input_shape = np.array(input_shape, dtype='int32')\r\n \r\n #-----------------------------------------------------------#\r\n # 一共有三个特征层数\r\n #-----------------------------------------------------------#\r\n num_layers = len(self.anchors_mask)\r\n #-----------------------------------------------------------#\r\n # m为图片数量,grid_shapes为网格的shape\r\n #-----------------------------------------------------------#\r\n m = true_boxes.shape[0]\r\n grid_shapes = [input_shape // {0:32, 1:16, 2:8}[l] for l in range(num_layers)]\r\n #-----------------------------------------------------------#\r\n # y_true的格式为(m,13,13,3,85)(m,26,26,3,85)(m,52,52,3,85)\r\n #-----------------------------------------------------------#\r\n y_true = [np.zeros((m, grid_shapes[l][0], grid_shapes[l][1], len(self.anchors_mask[l]), 5 + num_classes),\r\n dtype='float32') for l in range(num_layers)]\r\n\r\n #-----------------------------------------------------------#\r\n # 通过计算获得真实框的中心和宽高\r\n # 中心点(m,n,2) 宽高(m,n,2)\r\n #-----------------------------------------------------------#\r\n boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2\r\n boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]\r\n #-----------------------------------------------------------#\r\n # 将真实框归一化到小数形式\r\n #-----------------------------------------------------------#\r\n true_boxes[..., 0:2] = boxes_xy / input_shape[::-1]\r\n true_boxes[..., 2:4] = boxes_wh / input_shape[::-1]\r\n\r\n #-----------------------------------------------------------#\r\n # [9,2] -> [1,9,2]\r\n #-----------------------------------------------------------#\r\n anchors = np.expand_dims(anchors, 0)\r\n anchor_maxes = anchors / 2.\r\n anchor_mins = -anchor_maxes\r\n\r\n #-----------------------------------------------------------#\r\n # 长宽要大于0才有效\r\n #-----------------------------------------------------------#\r\n valid_mask = boxes_wh[..., 0]>0\r\n\r\n for b in range(m):\r\n #-----------------------------------------------------------#\r\n # 对每一张图进行处理\r\n #-----------------------------------------------------------#\r\n wh = boxes_wh[b, valid_mask[b]]\r\n if len(wh) == 0: continue\r\n #-----------------------------------------------------------#\r\n # [n,2] -> [n,1,2]\r\n #-----------------------------------------------------------#\r\n wh = np.expand_dims(wh, -2)\r\n box_maxes = wh / 2.\r\n box_mins = - box_maxes\r\n\r\n #-----------------------------------------------------------#\r\n # 计算所有真实框和先验框的交并比\r\n # intersect_area [n,9]\r\n # box_area [n,1]\r\n # anchor_area [1,9]\r\n # iou [n,9]\r\n #-----------------------------------------------------------#\r\n intersect_mins = np.maximum(box_mins, anchor_mins)\r\n intersect_maxes = np.minimum(box_maxes, anchor_maxes)\r\n intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)\r\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\r\n\r\n box_area = wh[..., 0] * wh[..., 1]\r\n anchor_area = anchors[..., 0] * anchors[..., 1]\r\n\r\n iou = intersect_area / (box_area + anchor_area - intersect_area)\r\n #-----------------------------------------------------------#\r\n # 维度是[n,] 感谢 消尽不死鸟 的提醒\r\n #-----------------------------------------------------------#\r\n best_anchor = np.argmax(iou, axis=-1)\r\n\r\n for t, n in enumerate(best_anchor):\r\n #-----------------------------------------------------------#\r\n # 找到每个真实框所属的特征层\r\n #-----------------------------------------------------------#\r\n for l in range(num_layers):\r\n if n in self.anchors_mask[l]:\r\n #-----------------------------------------------------------#\r\n # floor用于向下取整,找到真实框所属的特征层对应的x、y轴坐标\r\n #-----------------------------------------------------------#\r\n i = np.floor(true_boxes[b,t,0] * grid_shapes[l][1]).astype('int32')\r\n j = np.floor(true_boxes[b,t,1] * grid_shapes[l][0]).astype('int32')\r\n #-----------------------------------------------------------#\r\n # k指的的当前这个特征点的第k个先验框\r\n #-----------------------------------------------------------#\r\n k = self.anchors_mask[l].index(n)\r\n #-----------------------------------------------------------#\r\n # c指的是当前这个真实框的种类\r\n #-----------------------------------------------------------#\r\n c = true_boxes[b, t, 4].astype('int32')\r\n #-----------------------------------------------------------#\r\n # y_true的shape为(m,13,13,3,85)(m,26,26,3,85)(m,52,52,3,85)\r\n # 最后的85可以拆分成4+1+80,4代表的是框的中心与宽高、\r\n # 1代表的是置信度、80代表的是种类\r\n #-----------------------------------------------------------#\r\n y_true[l][b, j, i, k, 0:4] = true_boxes[b, t, 0:4]\r\n y_true[l][b, j, i, k, 4] = 1\r\n y_true[l][b, j, i, k, 5+c] = 1\r\n\r\n return y_true\r\n"
] | [
[
"numpy.expand_dims",
"numpy.maximum",
"numpy.minimum",
"numpy.logical_and",
"numpy.clip",
"numpy.arange",
"numpy.random.shuffle",
"numpy.argmax",
"numpy.random.rand",
"numpy.floor",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DAkinyemi/Vortex_in_Cell_Method | [
"311bf4875d8983fb369345f5bcc13039680c83f1"
] | [
"Python_Code/Vortex_in_Cell.py"
] | [
"#Importing required libraries\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure\nimport random\nimport copy\nnp.set_printoptions(suppress=True)\n# import sys\n# sys.path.insert(0, \"./../../vortexincell/src/wrappedCode/\")\nimport ConvSolver\nimport importlib\nimportlib.reload(ConvSolver)\n\n#Vortex Particle-in-Cell Method\nclass VPIC:\n #Takes points and breaks into x, y, vorticity strength, dx and dy\n def __init__(self,points,dx,dy):\n self.points = points\n self.dx = dx\n self.dy = dy\n self.x = points[:,0]\n self.y = points[:,1]\n self.size = points[:,2]\n\n #Interpolates the points to 4 corners indexed\n def interpolate(self):\n interpolated_points = []\n for i in range(0,len(self.points)):\n stuff = np.zeros((4,2))\n stuff[0][0] = math.floor(self.x[i]/self.dx) #bottom left x\n stuff[0][1] = math.floor(self.y[i]/self.dy) #bottom left y\n stuff[1][0] = math.floor(self.x[i]/self.dx) + 1 #bottom right x\n stuff[1][1] = math.floor(self.y[i]/self.dy) #bottom right y\n stuff[2][0] = math.floor(self.x[i]/self.dx) + 1 #top right x\n stuff[2][1] = math.floor(self.y[i]/self.dy) + 1 #top right y\n stuff[3][0] = math.floor(self.x[i]/self.dx) #top left x\n stuff[3][1] = math.floor(self.y[i]/self.dy) + 1 #top left y\n interpolated_points.append(stuff)\n interpolated_points = np.concatenate(interpolated_points)\n return interpolated_points\n\n #Deposits the weights relative to the distance from points\n #Where xi and yi are the interpolated x and y coordinates\n def weight_deposition(self,xi,yi):\n array_of_newsizes = []\n for i in range(0,len(self.points)):\n for k in range((4*i),((4*i)+4)):\n volfact = self.size[i]#/(self.dx*self.dy)\n if self.y[i] - (self.dy * yi[k]) > 0:\n yweights = 1 - ((self.y[i] - (self.dy * yi[k]))/self.dy)\n else:\n yweights = 1 - abs((self.y[i] - (self.dy * yi[k]))/self.dy)\n if self.x[i] - (self.dx * xi[k]) > 0:\n xweights = 1 - ((self.x[i] - (self.dx * xi[k]))/self.dx)\n else:\n xweights = 1 - abs((self.x[i] - (self.dx * xi[k]))/self.dx)\n totweight = volfact * (yweights) * (xweights)\n array_of_newsizes.append([(self.dx * xi[k]),(self.dy * yi[k]), totweight])\n array_of_newsizes = np.asarray(array_of_newsizes)\n return (array_of_newsizes)\n\n #Finds the places where two arrays overlap in both x and y\n def findliketerms(self,list1,list2):\n liketerms = []\n for i in range(0,len(list1)):\n for k in range(i+1,len(list2)):\n if list1[i][0] == list2[k][0] and list1[i][1] == list2[k][1]:\n liketerms.append([i,k])\n liketerms = np.asarray(liketerms)\n liketerms = np.resize(liketerms,(len(liketerms),2))\n return liketerms\n\n def findliketerms2(self,list1,list2):\n liketerms = []\n for i in range(0,len(list1)):\n for k in range(0,len(list2)):\n if list1[i][0] == list2[k][0] and list1[i][1] == list2[k][1]:\n liketerms.append([i,k])\n liketerms = np.asarray(liketerms)\n liketerms = np.resize(liketerms,(len(liketerms),2))\n return liketerms\n\n #Combines the vorticcity strengths of the same x and y coordinate\n def combineliketerms(self, array_of_newsizes, liketerms, interpolated_points):\n for k in range(0,len(liketerms)):\n array_of_newsizes[liketerms[k][0]] =+ array_of_newsizes[liketerms[k][0]] + array_of_newsizes[liketerms[k][1]]\n array_of_newsizes = np.delete(array_of_newsizes, ((liketerms[:,1]).tolist()), axis = 0)\n interpolated_points = np.delete(interpolated_points, ((liketerms[:,1]).tolist()), axis = 0)\n data = np.zeros((len(interpolated_points),3))\n data[:,0] = interpolated_points[:,0] * self.dx\n data[:,1] = interpolated_points[:,1] * self.dy\n data[:,2] = array_of_newsizes\n if np.any(data) == 0:\n return array_of_newsizes\n else:\n return data\n\n #creates an x-grid & runs imported Poisson Solver\n #RHS = Right Hand Side\n def RHSstart(self, xlengthstart, xlengthstop, combinedliketerms):\n x_grid = np.linspace(xlengthstart/self.dx, xlengthstop/self.dx, (N+1))\n x_grid = x_grid[:-1]\n #print(x_grid*self.dx)\n gridSize = int(len(x_grid))\n RHSArray = np.zeros((int(gridSize), int(gridSize)))\n\n for item in combinedliketerms:\n #print(item)\n try:\n RHSArray[int(item[0]/self.dx), int(item[1]/self.dx)] = item[2]\n except IndexError:\n print(\"Error: Particle was deleted because it was indexed outside of accepted range; results are inconclusive.\")\n pass\n #print(RHSArray)\n PS = ConvSolver.ConvSolver(x_grid*self.dx, False)\n solution = PS.solve(RHSArray)\n return solution\n\n #Takes the solution from the poisson solver and run potential difference\n def potentialcalculator(self, points2):\n potentials = []\n for i in range(1,len(points2)-1):\n for k in range(1,len(points2)-1):\n ycentraldiff = ((points2[i, k+1]) - (points2[i, k-1])) / (2 * self.dx)\n xcentraldiff = ((points2[i+1, k]) - (points2[i-1, k])) / (2 * self.dy)\n potentials.append(([self.dx * i, self.dy * k , ycentraldiff, -xcentraldiff]))\n potentials = np.asarray(potentials)\n potentials = np.resize(potentials,(len(potentials),4))\n #plt.quiver(potentials[:,0], potentials[:,1], potentials[:,2], potentials[:,3])\n return potentials\n\n #Field Interpolation calculation\n def weight_deposition2(self,x,y,xi,yi,xweight,yweight):\n if yi - y > 0:\n yweights = 1.0 - ((yi - y)/self.dy)\n else:\n yweights = 1.0 - abs((yi - y)/self.dy)\n if xi - x > 0:\n xweights = 1.0 - ((xi - x)/self.dx)\n else:\n xweights = 1.0 - abs((xi - x)/self.dx)\n xvelocity = xweight * (yweights) * (xweights)\n yvelocity = yweight * (yweights) * (xweights)\n return (xvelocity, yvelocity)\n\n #Looks for the interpolated velocity for each correspoiding point to interpolate back\n def interpolation_backtopoints(self,newdata):\n velocities_on_grid = []\n for i in range(0,len(self.points)):\n for k in range(0, len(newdata)):\n #print(newdata[k][0], math.floor(self.points[i][0]/self.dx))\n if (newdata[k][0] == math.floor(self.points[i][0]/self.dx)*self.dx and newdata[k][1] == math.floor(self.points[i][1]/self.dy)*self.dy):\n #print(i,k,\"t1\")\n velocities_on_grid.append([i,k,self.weight_deposition2(self.x[i],self.y[i],newdata[k][0], newdata[k][1], newdata[k][2], newdata[k][3])])\n if (newdata[k][0] == (math.floor(self.points[i][0]/self.dx) + 1)*self.dx and newdata[k][1] == math.floor(self.points[i][1]/self.dy)*self.dy):\n #print(i,k,\"t2\")\n velocities_on_grid.append([i,k,self.weight_deposition2(self.x[i],self.y[i],newdata[k][0], newdata[k][1], newdata[k][2], newdata[k][3])])\n if (newdata[k][0] == (math.floor(self.points[i][0]/self.dx) +1)*self.dx and newdata[k][1] == (math.floor(self.points[i][1]/self.dy) + 1)*self.dy):\n #print(i,k,\"t3\")\n velocities_on_grid.append([i,k,self.weight_deposition2(self.x[i],self.y[i],newdata[k][0], newdata[k][1], newdata[k][2], newdata[k][3])])\n if (newdata[k][0] == math.floor(self.points[i][0]/self.dx)*self.dx and newdata[k][1] == (math.floor(self.points[i][1]/self.dy) + 1)*self.dy):\n #print(i,k,\"t4\")\n velocities_on_grid.append([i,k,self.weight_deposition2(self.x[i],self.y[i],newdata[k][0], newdata[k][1], newdata[k][2], newdata[k][3])])\n velocities_on_grid = np.array(velocities_on_grid)\n #velocities_on_grid = np.resize(velocities_on_grid,(len(velocities_on_grid),4))\n return velocities_on_grid\n\n #Combines velocities based on the points they belong to\n def combining_velocities(self,velocities_on_grid):\n empty = np.zeros((len(self.points),3))\n for i in range(0,len(velocities_on_grid)):\n for k in range(0,len(velocities_on_grid)):\n #print(i, velocities_on_grid[k][0])\n if i == velocities_on_grid[k][0]:\n #print(i, velocities_on_grid[k])\n empty[i][0] = i\n empty[i][1] =+ empty[i][1] + velocities_on_grid[k][2][0]\n empty[i][2] =+ empty[i][2] + velocities_on_grid[k][2][1]\n return(empty[:,1:3])\n\n#INITIAL CONDITIONS\n#X and Y grid size\nxlengthstop = 1\nylengthstop = 1\n\nxlengthstart = 0\nylengthstart = 0\n\nxlength = xlengthstop - xlengthstart\nylength = ylengthstop - ylengthstart\n\nstartvalue = 0\nparticles = 3\n\n#Grid criteria\nN = 2**5\ndx = (xlengthstop - xlengthstart)/N\ndy = (ylengthstop - ylengthstart)/N\n\nweightmin = -1\nweightmax = 1\n\n#CREATES Random points to run the test on for if you want to run this method on random points\npoints = np.zeros((particles,3))\npoints[:, 0] = np.array(xlengthstop - xlengthstart)*np.random.random_sample(particles,) + xlengthstart\npoints[:, 1] = np.array(ylengthstop - ylengthstart)*np.random.random_sample(particles,) + ylengthstart\npoints[:, 2] = (weightmax-weightmin)*np.random.random_sample(particles)+weightmin\n\n#Function that runs everything\ndef k2(points,dx,dy):\n initial_particles_with_sizes = VPIC(points,dx,dy)\n interpalations = initial_particles_with_sizes.interpolate()\n depositions = initial_particles_with_sizes.weight_deposition(interpalations[:,0], interpalations[:,1])\n liketerms = initial_particles_with_sizes.findliketerms(interpalations,interpalations)\n updated_data = initial_particles_with_sizes.combineliketerms(depositions[:,2],liketerms,interpalations)\n RHS_calculations = initial_particles_with_sizes.RHSstart(xlengthstart, xlengthstop, updated_data)\n potentials = initial_particles_with_sizes.potentialcalculator(RHS_calculations)\n likterms2 = initial_particles_with_sizes.findliketerms2(updated_data[:,0:2],potentials)\n new_data = potentials[likterms2[:,1].tolist()]\n velocities_on_grid = initial_particles_with_sizes.interpolation_backtopoints(new_data)\n return initial_particles_with_sizes.combining_velocities(velocities_on_grid)\n\n#RK2 Solver\ndef RK2(points,time,N):\n np.set_printoptions(suppress=False)\n h = 140*0.025/N\n numTimeSteps = int(time/h)\n everything = []\n for i in range(0,numTimeSteps):\n pointsnewx = []\n pointsnewy = []\n tpoints = copy.deepcopy(points)\n #print(tpoints,\"test\")\n F = h * k2(points,dx,dy)\n #print(F,\"F\")\n for i in range(0,len(points)):\n k1x = F[i][0]\n k1y = F[i][1]\n tpoints[i][0] = tpoints[i][0] + (.5 * k1x)\n tpoints[i][1] = tpoints[i][1] + (.5 * k1y)\n F2 = h * k2(tpoints, dx, dy)\n #print(F2, \"F2\")\n for i in range(0,len(tpoints)):\n k2x = F2[i][0]\n k2y = F2[i][1]\n pointsnewx.append(points[i][0] + k2x)\n pointsnewy.append(points[i][1] + k2y)\n pointsnewx = np.asarray(pointsnewx)\n pointsnewy = np.asarray(pointsnewy)\n pointsnewx = np.resize(pointsnewx,(len(pointsnewx),1))\n pointsnewy = np.resize(pointsnewy,(len(pointsnewy),1))\n points[:,0] = pointsnewx[:,0]\n points[:,1] = pointsnewy[:,0]\n everything.append(copy.deepcopy(points))\n everything = np.asarray(everything)\n everything = np.concatenate(everything)\n return everything\n\n\n#Test 1\nfigure(num=1, figsize=(6, 6), dpi=80, facecolor='w', edgecolor='k')\npoints_test = np.array([[0.5, 0.5, (1/(dx*dy))]])\ntest = RK2(points_test, 10, N)\nplt.scatter(test[:,0], test[:,1])\nplt.xlabel('X Position')\nplt.ylabel('Y Position')\nplt.title('Particle Position as a function of Time (Test 1)')\n\n#Test 2\nfigure(num=2, figsize=(6, 6), dpi=80, facecolor='w', edgecolor='k')\npoints_test2 = np.array([[0.25, 0.5, 0],[0.5,0.5,1/(dx*dy)]])\ntest2 = RK2(points_test2, 10, N)\nplt.scatter(test2[:,0], test2[:,1])\nplt.xlabel('X Position')\nplt.ylabel('Y Position')\nplt.title('Particle Position as a function of Time (Test 2)')\n\n#Test 3\nfigure(num=3, figsize=(6, 6), dpi=80, facecolor='w', edgecolor='k')\npoints_test3 = np.array([[0.5, 0.5,1/(dx*dy)],[0.75,0.5,1/(dx*dy)]])\ntest3 = RK2(points_test3, 10, N*2)\nplt.scatter(test3[:,0], test3[:,1])\nplt.xlabel('X Position')\nplt.ylabel('Y Position')\nplt.title('Particle Position as a function of Time (Test 3)')\n\n# Test 4A Set up\nnp.set_printoptions(suppress=False)\nNp = int(1/(dx/2))\nhp = 1/Np\ntotal_data = []\nfor i in range(0,Np):\n for k in range(0,Np):\n if abs(np.sqrt((i*dx - 0.5)**2 + (k*dy - 0.375)**2))<= .12 or abs(np.sqrt((i*dx - 0.5)**2 + (k*dy - 0.625)**2)) <= .12:\n total_data.append([dx *i,dy * k,((hp**2)/(dx*dy))])\n else:\n total_data.append([dx * i, dy * k,0])\ntotal_data = np.array(total_data)\nnew_data = []\nfor item in total_data:\n if item[2] != 0:\n new_data.append([item[0],item[1],item[2]])\nnew_data = np.array(new_data)\nfigure(num=4, figsize=(6, 6), dpi=80, facecolor='w', edgecolor='k')\nplt.scatter(new_data[:,0], new_data[:,1])\nplt.xlabel('X Position')\nplt.ylabel('Y Position')\nplt.title('Inital Particle Position (Test 4A)')\n\n#Test 4A\nfigure(num=5, figsize=(6, 6), dpi=80, facecolor='w', edgecolor='k')\ntest4 = RK2(new_data, 12.5, N)\nplt.scatter(test4[:,0], test4[:,1],0.1)\nplt.xlabel('X Position')\nplt.ylabel('Y Position')\nplt.title('Particle Position as a function of Time (Test 4A)')\n\n# Test 4B Set up\nnp.set_printoptions(suppress=True)\nNp = int(1/(dx/2))\nhp = 1/Np\nrbdry = 1/4\ntotal_data = []\nfor i in range(0,Np):\n for k in range(0,Np):\n if abs(np.sqrt(((i*dx) - 0.5)**2 + ((k*dy) - 0.5)**2)) <= rbdry:\n total_data.append([dx *i,dy * k,((rbdry - abs((i*dx - 0.5)**2 + (k*dy - 0.5)**2))**7)*100000])\n else:\n total_data.append([dx * i, dy * k,0])\ntotal_data = np.array(total_data)\n\nnew_data2 = []\nfor item in total_data:\n if item[2] != 0:\n new_data2.append([item[0],item[1],item[2]])\nnew_data2 = np.array(new_data2)\nfigure(num=6, figsize=(6, 6), dpi=80, facecolor='w', edgecolor='k')\nplt.scatter(new_data2[:,0], new_data2[:,1] , new_data2[:,2])\nplt.xlabel('X Position')\nplt.ylabel('Y Position')\nplt.title('Inital Particle Position (Test 4B)')\n\n#Test 4B\nfigure(num=7, figsize=(6, 6), dpi=80, facecolor='w', edgecolor='k')\ntest5 = RK2(new_data2, 12.5, N)\nplt.scatter(test5[:,0], test5[:,1],0.1)\nplt.xlabel('X Position')\nplt.ylabel('Y Position')\nplt.title('Particle Position as a function of Time (Test 4B)')\nplt.show()\n"
] | [
[
"numpy.sqrt",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"numpy.asarray",
"numpy.linspace",
"numpy.set_printoptions",
"numpy.random.random_sample",
"numpy.concatenate",
"matplotlib.pyplot.ylabel",
"numpy.any",
"matplotlib.pyplot.xlabel",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
muhlbach/econometric_causality | [
"372e05bec8d229f0180c859a6fb5d3d63e7bf91a"
] | [
"econometric_causality/selection_on_observables/matching.py"
] | [
"#------------------------------------------------------------------------------\n# Libraries\n#------------------------------------------------------------------------------\n# Standard\nimport pandas as pd\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.linear_model import LogisticRegression\n\n# User\nfrom base.base_estimator import BaseCateEstimator\nfrom utils.exceptions import CateError\nfrom utils.sanity_check import check_propensity_score_estimator\n#------------------------------------------------------------------------------\n# Treatment Effect Estimators\n#------------------------------------------------------------------------------\nclass MatchingOnInput(BaseCateEstimator):\n \"\"\"\n This class estimates treatment effects based on matching on inputs\n \"\"\"\n # --------------------\n # Constructor function\n # --------------------\n def __init__(self,\n verbose=False\n ):\n # Initialize inputs\n self.verbose = verbose\n\n # --------------------\n # Class variables\n # --------------------\n\n # --------------------\n # Private functions\n # --------------------\n\n # --------------------\n # Public functions\n # --------------------\n def fit(self,Y,W,I):\n \"\"\"\n Parameters\n ----------\n Y : pd.Series\n Outcome variable\n W : pd.Series\n Treatment variable\n I : pd.Dataframe\n Input variable to match on\n \"\"\"\n # Preprocess data\n super().preprocess_data(Y=Y,W=W)\n \n # Check input type\n if isinstance(I, pd.Series):\n I = I.to_frame()\n\n # Instantiate object to identofy the nearest neighbors\n nearestneighbors = NearestNeighbors(n_neighbors=1,\n radius=1.0,\n algorithm='auto',\n leaf_size=30,\n metric='minkowski',\n p=2,\n metric_params=None,\n n_jobs=None)\n \n # Initialize matched outcomes\n Y_matched = pd.Series(index=Y.index, dtype=Y.dtype, name=Y.name)\n \n # For each treatment arm, find the nearest neighbor in the other treatment arm \n for w in self.unique_treatments:\n \n # Mask treatment w\n mask = self.mask_treatment[w]\n \n # Fit on treatment w\n nearestneighbors.fit(X=I[mask])\n \n # Find neighbors among treatment ~w\n neigbors_idx = nearestneighbors.kneighbors(X=I[~mask], return_distance=False).flatten()\n \n # Use outcomes values that match based on X\n Y_matched[~mask] = Y.loc[neigbors_idx].values\n\n self.mean_residualized_outcome_per_treatment = {} \n for w in self.unique_treatments:\n self.mean_residualized_outcome_per_treatment[w] = ((W==w) * (Y - Y_matched)).mean()\n \n return self\n\n def calculate_heterogeneous_treatment_effect(self):\n raise CateError\n \n def calculate_average_treatment_effect(self,w0=None,w1=None):\n \n if all([w is None for w in [w0,w1]]):\n w0 = self.unique_treatments[0]\n w1 = self.unique_treatments[1]\n \n # Compute the ATE as the difference in sample means of the outcome between group1 (treated) and group2 (control)\n tau = self.mean_residualized_outcome_per_treatment[w1] - self.mean_residualized_outcome_per_treatment[w0]\n \n tau_obj = {\"ate\":tau}\n \n return tau_obj\n\n\n\n#------------------------------------------------------------------------------\n# Treatment Effect Estimators\n#------------------------------------------------------------------------------\nclass MatchingOnCovariates(MatchingOnInput):\n \"\"\"\n This class estimates treatment effects based on matching on covariates\n \"\"\"\n # --------------------\n # Constructor function\n # --------------------\n def __init__(self,\n verbose=False\n ):\n # Initialize inputs\n self.verbose = verbose\n super().__init__(verbose=self.verbose)\n\n # --------------------\n # Class variables\n # --------------------\n\n # --------------------\n # Private functions\n # --------------------\n\n # --------------------\n # Public functions\n # --------------------\n def fit(self,Y,W,X):\n\n super().fit(Y=Y, W=W, I=X)\n \n return self\n\n def calculate_heterogeneous_treatment_effect(self):\n super().calculate_heterogeneous_treatment_effect()\n \n def calculate_average_treatment_effect(self,w0=None,w1=None):\n return super().calculate_average_treatment_effect()\n \n\nclass MatchingOnPropensityScore(MatchingOnInput):\n \"\"\"\n This class estimates treatment effects based on matching on covariates\n \"\"\"\n # --------------------\n # Constructor function\n # --------------------\n def __init__(self,\n verbose=False\n ):\n # Initialize inputs\n self.verbose = verbose\n super().__init__(verbose=self.verbose)\n\n # --------------------\n # Class variables\n # --------------------\n\n # --------------------\n # Private functions\n # --------------------\n\n # --------------------\n # Public functions\n # --------------------\n def fit(self,Y,W,X,propensity_score_estimator=LogisticRegression(penalty='l2',\n dual=False,\n tol=0.0001,\n C=1.0,\n fit_intercept=True,\n intercept_scaling=1,\n class_weight=None,\n random_state=None,\n solver='lbfgs',\n max_iter=100,\n multi_class='auto',\n verbose=0,\n warm_start=False,\n n_jobs=None,\n l1_ratio=None)): \n \n # Estimate propensity scores\n check_propensity_score_estimator(estimator=propensity_score_estimator)\n propensity_score_estimator.fit(X=X,y=W) \n \n # Some sklearn modules with have 'predict_proba' as a method. Try this before defaulting to 'predict'\n try:\n propensity_score = propensity_score_estimator.predict_proba(X=X)[:,-1]\n except AttributeError as attribute_error_message:\n if self.verbose: \n print(f\"\"\"\n AttributeError caught when calling 'predict_proba' on {type(propensity_score_estimator).__name__}.\n Defaulting to 'predict'.\n The original error message was: \n {str(attribute_error_message)}\n \"\"\")\n propensity_score = propensity_score_estimator.predict(X=X)\n \n # Transform to series\n propensity_score = pd.Series(propensity_score)\n \n super().fit(Y=Y, W=W, I=propensity_score)\n \n return self\n\n def calculate_heterogeneous_treatment_effect(self):\n super().calculate_heterogeneous_treatment_effect()\n \n def calculate_average_treatment_effect(self,w0=None,w1=None):\n return super().calculate_average_treatment_effect()\n \n \n "
] | [
[
"sklearn.neighbors.NearestNeighbors",
"pandas.Series",
"sklearn.linear_model.LogisticRegression"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
tegg89/categorical_dqn | [
"647c24ee4734450551fc446d3225f57dadd82d48"
] | [
"model.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass CategoricalDQN(nn.Module):\n\tdef __init__(self, num_inputs, num_actions, args):\n\t\tsuper(CategoricalDQN, self).__init__()\n\t\tself.num_inputs = num_inputs\n\t\tself.num_actions = num_actions\n\t\tself.num_atoms = args.atom\n\t\tself.vmax = args.vmax\n\t\tself.vmin = args.vmin\n\n\t\tself.linear1 = nn.Linear(num_inputs, args.hidden_size//4)\n\t\tself.linear2 = nn.Linear(args.hidden_size//4, args.hidden_size)\n\t\tself.linear3 = nn.Linear(args.hidden_size, num_actions * args.atom)\n\n\tdef forward(self, input):\n\t\tx = F.relu(self.linear1(input))\n\t\tx = F.relu(self.linear2(x))\n\t\tx = F.relu(self.linear3(x))\n\t\tx = F.softmax(x.view(-1, self.num_atoms)).view(-1, self.num_actions, self.num_atoms)\n\t\treturn x\n\n\tdef act(self, state):\n\t\twith torch.no_grad():\n\t\t\tstate = torch.tensor(state, dtype=torch.float).unsqueeze(0)\n\t\tdist = self.forward(state).data.cpu() # [1, 2, 51]\n\t\tdist = dist * torch.linspace(self.vmin, self.vmax, self.num_atoms)\n\t\taction = dist.sum(2).max(1)[1].numpy()[0]\n\t\treturn action\n"
] | [
[
"torch.nn.Linear",
"torch.linspace",
"torch.no_grad",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stephwag/coco-tools | [
"7b6030830c6f1dc0ce254c0e725cdc3847c255b1"
] | [
"annotate.py"
] | [
"import sys\nfrom PIL import Image\nimport numpy as np\nimport json\nimport cv2\nimport imutils\nfrom config import *\n\ndef annotation_obj(image_name, image_id, min_threshold=10, max_threshold=255, category_id=1, show_annotation=False, is_crowd=False):\n image = cv2.imread(\"{}/{}\".format(ANNOTATION_DIR, image_name))\n imgray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n ret,thresh = cv2.threshold(imgray,min_threshold,max_threshold,cv2.THRESH_BINARY)\n height, width, channels = image.shape\n image_info = {\n \"id\" : image_id,\n \"file_name\" : image_name,\n \"height\" : height,\n \"width\" : width\n }\n\n # Draws the mask, useful for debugging\n if show_annotation:\n cv2.imshow('threshold',thresh)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n cnts = cv2.findContours(thresh.copy(), cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n count = 1\n results = []\n for c in cnts:\n x, y, w, h = cv2.boundingRect(c)\n results.append(\n {\n \"segmentation\": np.array(c).ravel().tolist(),\n \"area\": w * h,\n \"iscrowd\": (\"crowd\" in image_name) or is_crowd,\n \"image_id\": image_id,\n \"bbox\": (x, y, w, h),\n \"category_id\": category_id,\n \"id\": count\n }\n )\n count += 1\n return (image_info, results)\n\n\nif __name__==\"__main__\":\n anno_arr = []\n image_arr = []\n\n with open(\"{}/base.json\".format(DATASET_DIR), 'r') as f:\n data = json.loads(f.read())\n\n # Setup dataset.json with image and annotation data\n for i in range(1, 9):\n for category in data[\"categories\"]:\n image_info, anno_info = annotation_obj(\"{}_{}.jpg\".format(i, category[\"name\"]), i, category_id=1, show_annotation=False)\n image_arr.append(image_info)\n anno_arr.append(anno_info)\n\n with open(\"{}/dataset.json\".format(DATASET_DIR), 'w') as f:\n data[\"annotations\"] = anno_arr\n data[\"images\"] = image_arr\n anno_str = json.dumps(data)\n f.write(anno_str)\n\n\n\n\n\n\n\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
taodav/novelty-search-repr-space | [
"461691104dc3a72b9b4f7ec040b71d95eec434b1"
] | [
"nsrl/helper/plot.py"
] | [
"import os\nimport json\nimport shutil\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport visdom\n\nfrom plotly import tools\nimport plotly.graph_objs as go\n\n\nclass Plotter(object):\n def __init__(self, experiment_dir, env_name=\"default\", host=None, port=8097, offline=False):\n if not host:\n host = 'localhost'\n print(\"connecting to host: \" + host + ':' + str(port))\n\n log_file = os.path.join(experiment_dir, \"plot\")\n\n self.env_name = env_name\n\n self.vis = visdom.Visdom(log_to_filename=log_file,\n offline=offline,\n server='http://' + host, port=port)\n self.plots = {}\n\n def plot_dict(self, x, value_dict):\n for k, v in value_dict.items():\n self.plot(k, x[:len(v)], v, k, ymin=0)\n\n def plot(self, var_name, x, y, title_name=\"Default Plot\",\n ymin=None, ymax=None, xmin=None, xmax=None, markers=False,\n linecolor=None, name=\"default\"):\n if var_name not in self.plots:\n self.plots[var_name] = self.vis.line(X=x, Y=y, env=self.env_name, name=name,\n opts=dict(\n title=title_name,\n xlabel='training_steps',\n ylabel=var_name,\n xtickmin=xmin,\n xtickmax=xmax,\n ytickmin=ymin,\n ytickmax=ymax,\n markers=markers,\n linecolor=linecolor\n ))\n else:\n self.vis.line(X=x, Y=y,\n env=self.env_name, win=self.plots[var_name],\n update='append', name=name,\n opts=dict(\n title=title_name,\n xlabel='training_steps',\n ylabel=var_name,\n xtickmin=xmin,\n xtickmax=xmax,\n ytickmin=ymin,\n ytickmax=ymax,\n markers=markers,\n linecolor=linecolor\n ))\n\n def plot_text(self, var_name, text):\n if var_name not in self.plots:\n self.plots[var_name] = self.vis.text(text, env=self.env_name)\n else:\n self.vis.text(text,\n env=self.env_name, win=self.plots[var_name])\n\n def plot_mpl_fig(self, var_name, fig, title_name='Default MPL plot', replace=False):\n fig = tools.mpl_to_plotly(fig)\n fig['layout'].update(width=650, height=500, title=title_name, showlegend=False)\n\n if not replace:\n if var_name not in self.plots or not replace:\n self.plots[var_name] = self.vis.plotlyplot(fig, env=self.env_name)\n else:\n self.vis.plotlyplot(fig, env=self.env_name, win=self.plots[var_name])\n else:\n self.plots[var_name] = self.vis.plotlyplot(fig, env=self.env_name)\n\n self.vis.update_window_opts(win=self.plots[var_name],\n opts=dict(\n width=650,\n height=500\n ))\n\n def plot_mpl_plt(self, var_name, plt, title_name='Default MPL plot', replace=False):\n if not replace:\n if var_name not in self.plots:\n self.plots[var_name] = self.vis.matplot(plt, env=self.env_name, opts=dict(\n title_name=title_name\n ))\n else:\n self.vis.matplot(plt, win=self.plots[var_name], env=self.env_name, opts=dict(\n title_name=title_name\n ))\n else:\n self.vis.matplot(plt, env=self.env_name, opts=dict(\n title_name=title_name\n ))\n plt.close()\n\n def fig2data(self, fig):\n \"\"\"\n @brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it\n @param fig a matplotlib figure\n @return a numpy 3D array of RGBA values\n \"\"\"\n # draw the renderer\n fig.canvas.draw()\n\n # Get the RGBA buffer from the figure\n w, h = fig.canvas.get_width_height()\n buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)\n buf.shape = (w, h, 4)\n\n # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode\n # buf = np.roll(buf, 3, axis=2)\n return buf\n\n def plot_quiver(self, var_name, up, down, left, right, title_name=\"default\"):\n x = right - left\n y = up - down\n if var_name not in self.plots:\n self.plots[var_name] = self.vis.quiver(X=x, Y=y, env=self.env_name, opts=dict(\n title=title_name,\n normalize=1,\n layoutopts=dict(\n plotly=dict(\n yaxis=dict(autorange='reversed')\n ))))\n else:\n self.vis.quiver(X=x, Y=y, win=self.plots[var_name], env=self.env_name, opts=dict(\n title=title_name\n ))\n\n\n def plot_scatter(self, var_name, x, y=None, title_name=\"default\"):\n self.plots[var_name] = self.vis.scatter(x, y, env=self.env_name, opts=dict(\n title=title_name,\n xtickmin=0,\n xtickmax=16,\n ytickmin=0,\n ytickmax=16,\n ztickmin=0\n ))\n\n def plot_heatmap(self, var_name, x, title_name=\"default\"):\n if var_name not in self.plots:\n self.plots[var_name] = self.vis.heatmap(X=x, env=self.env_name, opts=dict(\n colormap='Viridis',\n title=title_name\n ))\n else:\n self.vis.heatmap(X=x, win=self.plots[var_name], env=self.env_name, opts=dict(\n title=title_name\n ))\n\n def plot_mapping_heatmap(self, var_name, heatmaps, title_name='default', cols=4):\n nrows = (len(heatmaps) + (cols - 1)) // cols\n fig = tools.make_subplots(rows=nrows, cols=cols, subplot_titles=[title for title, _ in heatmaps])\n for i, (title, x) in enumerate(heatmaps, 1):\n hmap = go.Heatmap(z=x, colorscale='Viridis')\n col = i % cols\n col = col if col != 0 else cols\n\n fig.append_trace(hmap, (i + (cols - 1)) // cols, col)\n\n fig['layout'].update(width=300 * cols, height=400 * nrows, title=title_name, showlegend=False)\n for e in fig.layout:\n if 'yaxis' in e:\n fig['layout'][e].update(autorange='reversed')\n self.plots[var_name] = self.vis.plotlyplot(fig, env=self.env_name)\n self.vis.update_window_opts(win=self.plots[var_name],\n opts=dict(\n width=300 * cols,\n height=400 * nrows\n ))\n\n def plot_plotly_fig(self, var_name, fig, title_name='default'):\n layout = go.Layout(title=title_name)\n fig = go.Figure(data=fig['data'], layout=layout)\n self.plots[var_name] = self.vis.plotlyplot(fig, env=self.env_name)\n\n def plot_image(self, var_name, image, title_name=\"default\"):\n if len(image.shape) == 2:\n image = np.expand_dims(image, axis=0)\n\n if var_name not in self.plots:\n self.plots[var_name] = self.vis.image(image, env=self.env_name, opts=dict(\n title=title_name\n ))\n else:\n self.vis.image(image, env=self.env_name, win=self.plots[var_name])\n\n def plot_video(self, var_name, video):\n self.plots[var_name] = self.vis.video(video, env=self.env_name)\n\ndef scatter_3d(x, y, z, color='blue'):\n trace = go.Scatter3d(\n x=x, y=y, z=z, mode='markers',\n marker=dict(size=4, symbol=\"cross\", color=color)\n )\n return trace\n\ndef scatter_3d_multi_color(point_list):\n \"\"\"\n\n :param point_list: list of dicts {x: v, y: v, z:v, color: \"color\"}\n :return:\n \"\"\"\n data = [scatter_3d(d['x'], d['y'], d['z'], color=d['color']) for d in point_list]\n layout = go.Layout(\n margin=dict(l=0, r=0, b=0, t=0)\n )\n fig = go.Figure(data=data, layout=layout)\n return fig\n\ndef create_loss_figure(loss_dict, x, n, title='default'):\n nrows = len(loss_dict.items()) // 2\n if len(loss_dict.items()) > nrows:\n nrows += 1\n fig, axes = plt.subplots(nrows=nrows, ncols=2, figsize=(8, 12))\n plt.tight_layout(pad=0.4, w_pad=1.0, h_pad=1.0)\n for (key, value), ax in zip(loss_dict.items(), axes.flatten()):\n graph_data(ax, key + \" \" + str(n),\n x,\n value)\n\n file_path = os.path.join(os.getcwd(), \"plots\", title + str(n) + '.png')\n\n if os.path.exists(file_path):\n os.remove(file_path)\n plt.savefig(file_path, bbox_inches='tight')\n plt.clf()\n\ndef graph_data(ax, title, x, y):\n ax.plot(x, y, lw=2)\n ax.set_title(title)\n\ndef replay_plot(fname, host='localhost', port=8098):\n vis = visdom.Visdom(server='http://' + host, port=port)\n vis.replay_log(fname)\n return vis\n\ndef get_visdom_data(data, titles):\n results = {}\n inv_titles = {value: key for key, value in titles.items()}\n for window_name, content in data['jsons'].items():\n if 'title' in content and content['title'] in titles.values():\n results[inv_titles[content['title']]] = content['content']['data']\n return results\n\ndef parse_visdom_plot_dir(plot_dir, titles, trials=1):\n \"\"\"\n Parses a .visdom directory that includes saved plots.\n :param plot_dir: directory path to plot\n \"\"\"\n results = {}\n for fname in os.listdir(plot_dir)[:trials]:\n if fname.endswith('.json'):\n with open(os.path.join(plot_dir, fname)) as json_file:\n data = json.load(json_file)\n found = get_visdom_data(data, titles)\n results[fname] = found\n return results\n\ndef plot_offline(experiments_dir):\n \"\"\"\n For offline plotting usage.\n :param experiments_dir: directory with experiment folders inside\n :return: results from parse_func.\n \"\"\"\n assert os.path.isdir(experiments_dir)\n for experiment_dir in os.listdir(experiments_dir):\n plot_file = os.path.join(experiments_dir, experiment_dir, 'plot')\n json_plot_file = os.path.join(os.path.expanduser('~'), '.visdom', experiment_dir + '.json')\n if not os.path.exists(json_plot_file):\n replay_plot(plot_file)\n # shutil.move(json_plot_file, experiments_dir)\n\n\ndef group_results(mf):\n \"\"\"\n Groups results by their keys.\n :param mf: visdom JSON\n :return: grouped results\n \"\"\"\n mf_plots = {k: [] for k in list(mf.values())[0].keys()}\n for fname, res in mf.items():\n for k in mf_plots.keys():\n mf_plots[k].append(res[k][0]['y'][:1000])\n return mf_plots\n\n\ndef plot_means_with_std(x, explr_fac, visited_ratios, title, fig, ax1, ax2, legends, keys, color='orange'):\n avg_mf_exploration_factor = np.average(explr_fac, axis=0)\n\n mf1, = ax1.plot(x, avg_mf_exploration_factor, color=color)\n\n avg_mf_ratios_visited = np.average(visited_ratios, axis=0)\n mf2, = ax2.plot(x, avg_mf_ratios_visited, color=color)\n\n # y_mins_ef = explr_fac.min(axis=0)\n # y_max_ef = explr_fac.max(axis=0)\n y_mins_ef = avg_mf_exploration_factor - np.std(explr_fac, axis=0)\n y_max_ef = avg_mf_exploration_factor + np.std(explr_fac, axis=0)\n ax1.fill_between(x, y_mins_ef, y_max_ef, color=color, alpha=0.2)\n\n y_mins_rv = avg_mf_ratios_visited - np.std(visited_ratios, axis=0)\n y_max_rv = avg_mf_ratios_visited + np.std(visited_ratios, axis=0)\n ax2.fill_between(x, y_mins_rv, y_max_rv, color=color, alpha=0.2)\n\n legends.append(mf1)\n keys.append(title)\n\n return mf1, legends, keys\n\n\ndef exploration_plots(x, mf, fig, ax1, ax2, legends, keys, title='default', color='orange'):\n mf_plots = group_results(mf)\n\n exp_fac_plots = mf_plots['exploration']\n vis_rat_plots = mf_plots['ratio_states']\n\n explr_fac = np.array(exp_fac_plots)\n visited_ratios = np.array(vis_rat_plots)\n\n return plot_means_with_std(x, explr_fac, visited_ratios, title, fig, ax1, ax2, legends, keys, color)\n\n\ndef plot_baseline(plot_fname, ax1, ax2, legends, keys, plot=True):\n with open(plot_fname, 'r') as f:\n baseline = json.load(f)\n exp_factor_baseline = np.array([l for l in baseline['exploration_factors'] if l])\n avg_exp_factor_baseline = np.average(exp_factor_baseline, axis=0)\n\n ratio_visited_baseline = np.array([l for l in baseline['ratios_visited'] if l])\n avg_ratio_visited_baseline = np.average(ratio_visited_baseline, axis=0)\n\n x = np.arange(0, avg_exp_factor_baseline.shape[0])\n if plot:\n ax1.title.set_text('Exploration factor')\n b1, = ax1.plot(x, avg_exp_factor_baseline, color='blue')\n y_min_baseline = avg_exp_factor_baseline - np.std(exp_factor_baseline, axis=0)\n y_max_baseline = avg_exp_factor_baseline + np.std(exp_factor_baseline, axis=0)\n ax1.fill_between(x, y_min_baseline, y_max_baseline, color='blue', alpha=0.2)\n\n ax2.title.set_text('Ratio of states visited')\n b2, = ax2.plot(x, avg_ratio_visited_baseline, color='blue')\n y_min_baseline = avg_ratio_visited_baseline - np.std(ratio_visited_baseline, axis=0)\n y_max_baseline = avg_ratio_visited_baseline + np.std(ratio_visited_baseline, axis=0)\n ax2.fill_between(x, y_min_baseline, y_max_baseline, color='blue', alpha=0.2)\n\n legends.append(b1)\n keys.append('Random Baseline')\n\n return legends, keys, x\n\n\nif __name__ == \"__main__\":\n from definitions import ROOT_DIR\n experiment = os.path.join(ROOT_DIR, \"experiments\", 'ALE', \"runs\", 'montezumas revenge novelty_reward_with_d_step_q_planning_2020-05-29 12-48-51_8222810/plot')\n replay_plot(experiment)\n\n # exp_dir_walls = os.path.join(ROOT_DIR, \"experiments\", 'maze', \"runs\", 'walls_count_q')\n # plot_offline(exp_dir_walls)\n # plt.rcParams.update({'font.size': 18})\n #\n # plot_dir = os.path.join(ROOT_DIR, \"experiments\", 'maze', 'results')\n # old_titles = {'exploration': 'Average exploration factor over 2 episodes',\n # 'ratio_states': 'Average ratio of states visited over 2 episodes'}\n # new_titles = {'exploration': 'Average exploration factor over 1 episodes',\n # 'ratio_states': 'Average ratio of states visited over 1 episodes'}\n # # experiments = [\n # # ('empty_count_q', 'Count w/ Q-argmax', new_titles),\n # # ('empty_q_argmax', 'Novelty w/ Q-argmax', new_titles),\n # # ('empty_1_step', 'Novelty w/ Planning (d=1)', new_titles),\n # # ('empty_5_step', 'Novelty w/ Planning (d=5)', new_titles),\n # # ]\n # experiments = [\n # ('walls_count_q', 'Count w/ Q-argmax', new_titles),\n # ('walls_q_argmax', 'Novelty w/ Q-argmax', new_titles),\n # ('walls_1_step', 'Novelty w/ Planning (d=1)', new_titles),\n # ('walls_5_step', 'Novelty w/ Planning (d=5)', new_titles),\n # ]\n # colors = ['orange', 'purple', 'green', 'red', 'brown', 'cyan']\n #\n # results = {}\n # for exp, title, titles in experiments:\n # results[exp] = parse_visdom_plot_dir(os.path.join(plot_dir, exp), titles, trials=10)\n #\n # # PLOTTING OUR BASELINE\n # size_maze = 21\n # baseline_data_fname=os.path.join(ROOT_DIR, \"experiments\", 'maze', 'plots', 'baselines', 'random_agent_wallless_%d.json' % size_maze)\n #\n # fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))\n # # fig.suptitle(\"Open Labyrinth experiments steps=%d, trials=%d, size_maze=%d\" % (n_steps, trials, size_maze), fontsize=16)\n # ax1.set_ylim([0, 1.01])\n # ax1.set_xlabel('environment steps')\n # ax1.set_ylabel('# unique states visited /\\n# total states visited', wrap=True)\n #\n # ax2.set_ylim([0, 1.01])\n # ax2.set_xlabel('environment steps')\n # ax2.set_ylabel('proportion of all states visited')\n #\n # ax1.grid(True)\n # ax2.grid(True)\n #\n # fig.tight_layout(rect=[0, 0.03, 1, 0.9])\n # legends = []\n # keys = []\n #\n # legends, keys, x = plot_baseline(baseline_data_fname, ax1, ax2, legends, keys)\n # for color, (name, title, titles) in zip(colors, experiments):\n # _, legends, keys = exploration_plots(x, results[name], fig, ax1, ax2, legends, keys, title=title, color=color)\n # # fig.legend((l for l in legends), (k for k in keys), 'lower right')\n # plt.show()"
] | [
[
"matplotlib.pyplot.tight_layout",
"numpy.expand_dims",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.std",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.average"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ferozkhanabbasi/human_pose | [
"5bf8bd33eec2b53aa173a6d34644c9df183759b7"
] | [
"tf_pose/slim/nets/inception_v3.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Contains the definition for inception v3 classification network.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport tensorflow as tf\r\n\r\nfrom nets import inception_utils\r\n\r\nslim = tf.contrib.slim\r\ntrunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)\r\n\r\n\r\ndef inception_v3_base(inputs,\r\n final_endpoint='Mixed_7c',\r\n min_depth=16,\r\n depth_multiplier=1.0,\r\n scope=None):\r\n \"\"\"Inception model from http://arxiv.org/abs/1512.00567.\r\n\r\n Constructs an Inception v3 network from inputs to the given final endpoint.\r\n This method can construct the network up to the final inception block\r\n Mixed_7c.\r\n\r\n Note that the names of the layers in the paper do not correspond to the names\r\n of the endpoints registered by this function although they build the same\r\n network.\r\n\r\n Here is a mapping from the old_names to the new names:\r\n Old name | New name\r\n =======================================\r\n conv0 | Conv2d_1a_3x3\r\n conv1 | Conv2d_2a_3x3\r\n conv2 | Conv2d_2b_3x3\r\n pool1 | MaxPool_3a_3x3\r\n conv3 | Conv2d_3b_1x1\r\n conv4 | Conv2d_4a_3x3\r\n pool2 | MaxPool_5a_3x3\r\n mixed_35x35x256a | Mixed_5b\r\n mixed_35x35x288a | Mixed_5c\r\n mixed_35x35x288b | Mixed_5d\r\n mixed_17x17x768a | Mixed_6a\r\n mixed_17x17x768b | Mixed_6b\r\n mixed_17x17x768c | Mixed_6c\r\n mixed_17x17x768d | Mixed_6d\r\n mixed_17x17x768e | Mixed_6e\r\n mixed_8x8x1280a | Mixed_7a\r\n mixed_8x8x2048a | Mixed_7b\r\n mixed_8x8x2048b | Mixed_7c\r\n\r\n Args:\r\n inputs: a tensor of size [batch_size, height, width, channels].\r\n final_endpoint: specifies the endpoint to construct the network up to. It\r\n can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',\r\n 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',\r\n 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c',\r\n 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'].\r\n min_depth: Minimum depth value (number of channels) for all convolution ops.\r\n Enforced when depth_multiplier < 1, and not an active constraint when\r\n depth_multiplier >= 1.\r\n depth_multiplier: Float multiplier for the depth (number of channels)\r\n for all convolution ops. The value must be greater than zero. Typical\r\n usage will be to set this value in (0, 1) to reduce the number of\r\n parameters or computation cost of the model.\r\n scope: Optional variable_scope.\r\n\r\n Returns:\r\n tensor_out: output tensor corresponding to the final_endpoint.\r\n end_points: a set of activations for external use, for example summaries or\r\n losses.\r\n\r\n Raises:\r\n ValueError: if final_endpoint is not set to one of the predefined values,\r\n or depth_multiplier <= 0\r\n \"\"\"\r\n # end_points will collect relevant activations for external use, for example\r\n # summaries or losses.\r\n end_points = {}\r\n\r\n if depth_multiplier <= 0:\r\n raise ValueError('depth_multiplier is not greater than zero.')\r\n depth = lambda d: max(int(d * depth_multiplier), min_depth)\r\n\r\n with tf.variable_scope(scope, 'InceptionV3', [inputs]):\r\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\r\n stride=1, padding='VALID'):\r\n # 299 x 299 x 3\r\n end_point = 'Conv2d_1a_3x3'\r\n net = slim.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point)\r\n end_points[end_point] = net\r\n if end_point == final_endpoint: return net, end_points\r\n # 149 x 149 x 32\r\n end_point = 'Conv2d_2a_3x3'\r\n net = slim.conv2d(net, depth(32), [3, 3], scope=end_point)\r\n end_points[end_point] = net\r\n if end_point == final_endpoint: return net, end_points\r\n # 147 x 147 x 32\r\n end_point = 'Conv2d_2b_3x3'\r\n net = slim.conv2d(net, depth(64), [3, 3], padding='SAME', scope=end_point)\r\n end_points[end_point] = net\r\n if end_point == final_endpoint: return net, end_points\r\n # 147 x 147 x 64\r\n end_point = 'MaxPool_3a_3x3'\r\n net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)\r\n end_points[end_point] = net\r\n if end_point == final_endpoint: return net, end_points\r\n # 73 x 73 x 64\r\n end_point = 'Conv2d_3b_1x1'\r\n net = slim.conv2d(net, depth(80), [1, 1], scope=end_point)\r\n end_points[end_point] = net\r\n if end_point == final_endpoint: return net, end_points\r\n # 73 x 73 x 80.\r\n end_point = 'Conv2d_4a_3x3'\r\n net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)\r\n end_points[end_point] = net\r\n if end_point == final_endpoint: return net, end_points\r\n # 71 x 71 x 192.\r\n end_point = 'MaxPool_5a_3x3'\r\n net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)\r\n end_points[end_point] = net\r\n if end_point == final_endpoint: return net, end_points\r\n # 35 x 35 x 192.\r\n\r\n # Inception blocks\r\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\r\n stride=1, padding='SAME'):\r\n # mixed: 35 x 35 x 256.\r\n end_point = 'Mixed_5b'\r\n with tf.variable_scope(end_point):\r\n with tf.variable_scope('Branch_0'):\r\n branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')\r\n with tf.variable_scope('Branch_1'):\r\n branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')\r\n branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],\r\n scope='Conv2d_0b_5x5')\r\n with tf.variable_scope('Branch_2'):\r\n branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')\r\n branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],\r\n scope='Conv2d_0b_3x3')\r\n branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],\r\n scope='Conv2d_0c_3x3')\r\n with tf.variable_scope('Branch_3'):\r\n branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')\r\n branch_3 = slim.conv2d(branch_3, depth(32), [1, 1],\r\n scope='Conv2d_0b_1x1')\r\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\r\n end_points[end_point] = net\r\n if end_point == final_endpoint: return net, end_points\r\n\r\n # mixed_1: 35 x 35 x 288.\r\n end_point = 'Mixed_5c'\r\n with tf.variable_scope(end_point):\r\n with tf.variable_scope('Branch_0'):\r\n branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')\r\n with tf.variable_scope('Branch_1'):\r\n branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0b_1x1')\r\n branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],\r\n scope='Conv_1_0c_5x5')\r\n with tf.variable_scope('Branch_2'):\r\n branch_2 = slim.conv2d(net, depth(64), [1, 1],\r\n scope='Conv2d_0a_1x1')\r\n branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],\r\n scope='Conv2d_0b_3x3')\r\n branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],\r\n scope='Conv2d_0c_3x3')\r\n with tf.variable_scope('Branch_3'):\r\n branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')\r\n branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],\r\n scope='Conv2d_0b_1x1')\r\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\r\n end_points[end_point] = net\r\n if end_point == final_endpoint: return net, end_points\r\n\r\n # mixed_2: 35 x 35 x 288.\r\n end_point = 'Mixed_5d'\r\n with tf.variable_scope(end_point):\r\n with tf.variable_scope('Branch_0'):\r\n branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')\r\n with tf.variable_scope('Branch_1'):\r\n branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')\r\n branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],\r\n scope='Conv2d_0b_5x5')\r\n with tf.variable_scope('Branch_2'):\r\n branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')\r\n branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],\r\n scope='Conv2d_0b_3x3')\r\n branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],\r\n scope='Conv2d_0c_3x3')\r\n with tf.variable_scope('Branch_3'):\r\n branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')\r\n branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],\r\n scope='Conv2d_0b_1x1')\r\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\r\n end_points[end_point] = net\r\n if end_point == final_endpoint: return net, end_points\r\n\r\n # mixed_3: 17 x 17 x 768.\r\n end_point = 'Mixed_6a'\r\n with tf.variable_scope(end_point):\r\n with tf.variable_scope('Branch_0'):\r\n branch_0 = slim.conv2d(net, depth(384), [3, 3], stride=2,\r\n padding='VALID', scope='Conv2d_1a_1x1')\r\n with tf.variable_scope('Branch_1'):\r\n branch_1 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')\r\n branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],\r\n scope='Conv2d_0b_3x3')\r\n branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], stride=2,\r\n padding='VALID', scope='Conv2d_1a_1x1')\r\n with tf.variable_scope('Branch_2'):\r\n branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',\r\n scope='MaxPool_1a_3x3')\r\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])\r\n end_points[end_point] = net\r\n if end_point == final_endpoint: return net, end_points\r\n\r\n # mixed4: 17 x 17 x 768.\r\n end_point = 'Mixed_6b'\r\n with tf.variable_scope(end_point):\r\n with tf.variable_scope('Branch_0'):\r\n branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')\r\n with tf.variable_scope('Branch_1'):\r\n branch_1 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')\r\n branch_1 = slim.conv2d(branch_1, depth(128), [1, 7],\r\n scope='Conv2d_0b_1x7')\r\n branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],\r\n scope='Conv2d_0c_7x1')\r\n with tf.variable_scope('Branch_2'):\r\n branch_2 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')\r\n branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],\r\n scope='Conv2d_0b_7x1')\r\n branch_2 = slim.conv2d(branch_2, depth(128), [1, 7],\r\n scope='Conv2d_0c_1x7')\r\n branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],\r\n scope='Conv2d_0d_7x1')\r\n branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],\r\n scope='Conv2d_0e_1x7')\r\n with tf.variable_scope('Branch_3'):\r\n branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')\r\n branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],\r\n scope='Conv2d_0b_1x1')\r\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\r\n end_points[end_point] = net\r\n if end_point == final_endpoint: return net, end_points\r\n\r\n # mixed_5: 17 x 17 x 768.\r\n end_point = 'Mixed_6c'\r\n with tf.variable_scope(end_point):\r\n with tf.variable_scope('Branch_0'):\r\n branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')\r\n with tf.variable_scope('Branch_1'):\r\n branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')\r\n branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],\r\n scope='Conv2d_0b_1x7')\r\n branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],\r\n scope='Conv2d_0c_7x1')\r\n with tf.variable_scope('Branch_2'):\r\n branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')\r\n branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],\r\n scope='Conv2d_0b_7x1')\r\n branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],\r\n scope='Conv2d_0c_1x7')\r\n branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],\r\n scope='Conv2d_0d_7x1')\r\n branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],\r\n scope='Conv2d_0e_1x7')\r\n with tf.variable_scope('Branch_3'):\r\n branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')\r\n branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],\r\n scope='Conv2d_0b_1x1')\r\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\r\n end_points[end_point] = net\r\n if end_point == final_endpoint: return net, end_points\r\n # mixed_6: 17 x 17 x 768.\r\n end_point = 'Mixed_6d'\r\n with tf.variable_scope(end_point):\r\n with tf.variable_scope('Branch_0'):\r\n branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')\r\n with tf.variable_scope('Branch_1'):\r\n branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')\r\n branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],\r\n scope='Conv2d_0b_1x7')\r\n branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],\r\n scope='Conv2d_0c_7x1')\r\n with tf.variable_scope('Branch_2'):\r\n branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')\r\n branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],\r\n scope='Conv2d_0b_7x1')\r\n branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],\r\n scope='Conv2d_0c_1x7')\r\n branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],\r\n scope='Conv2d_0d_7x1')\r\n branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],\r\n scope='Conv2d_0e_1x7')\r\n with tf.variable_scope('Branch_3'):\r\n branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')\r\n branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],\r\n scope='Conv2d_0b_1x1')\r\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\r\n end_points[end_point] = net\r\n if end_point == final_endpoint: return net, end_points\r\n\r\n # mixed_7: 17 x 17 x 768.\r\n end_point = 'Mixed_6e'\r\n with tf.variable_scope(end_point):\r\n with tf.variable_scope('Branch_0'):\r\n branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')\r\n with tf.variable_scope('Branch_1'):\r\n branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')\r\n branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],\r\n scope='Conv2d_0b_1x7')\r\n branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],\r\n scope='Conv2d_0c_7x1')\r\n with tf.variable_scope('Branch_2'):\r\n branch_2 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')\r\n branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],\r\n scope='Conv2d_0b_7x1')\r\n branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],\r\n scope='Conv2d_0c_1x7')\r\n branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],\r\n scope='Conv2d_0d_7x1')\r\n branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],\r\n scope='Conv2d_0e_1x7')\r\n with tf.variable_scope('Branch_3'):\r\n branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')\r\n branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],\r\n scope='Conv2d_0b_1x1')\r\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\r\n end_points[end_point] = net\r\n if end_point == final_endpoint: return net, end_points\r\n\r\n # mixed_8: 8 x 8 x 1280.\r\n end_point = 'Mixed_7a'\r\n with tf.variable_scope(end_point):\r\n with tf.variable_scope('Branch_0'):\r\n branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')\r\n branch_0 = slim.conv2d(branch_0, depth(320), [3, 3], stride=2,\r\n padding='VALID', scope='Conv2d_1a_3x3')\r\n with tf.variable_scope('Branch_1'):\r\n branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')\r\n branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],\r\n scope='Conv2d_0b_1x7')\r\n branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],\r\n scope='Conv2d_0c_7x1')\r\n branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], stride=2,\r\n padding='VALID', scope='Conv2d_1a_3x3')\r\n with tf.variable_scope('Branch_2'):\r\n branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',\r\n scope='MaxPool_1a_3x3')\r\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])\r\n end_points[end_point] = net\r\n if end_point == final_endpoint: return net, end_points\r\n # mixed_9: 8 x 8 x 2048.\r\n end_point = 'Mixed_7b'\r\n with tf.variable_scope(end_point):\r\n with tf.variable_scope('Branch_0'):\r\n branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')\r\n with tf.variable_scope('Branch_1'):\r\n branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')\r\n branch_1 = tf.concat(axis=3, values=[\r\n slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),\r\n slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')])\r\n with tf.variable_scope('Branch_2'):\r\n branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')\r\n branch_2 = slim.conv2d(\r\n branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')\r\n branch_2 = tf.concat(axis=3, values=[\r\n slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),\r\n slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])\r\n with tf.variable_scope('Branch_3'):\r\n branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')\r\n branch_3 = slim.conv2d(\r\n branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')\r\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\r\n end_points[end_point] = net\r\n if end_point == final_endpoint: return net, end_points\r\n\r\n # mixed_10: 8 x 8 x 2048.\r\n end_point = 'Mixed_7c'\r\n with tf.variable_scope(end_point):\r\n with tf.variable_scope('Branch_0'):\r\n branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')\r\n with tf.variable_scope('Branch_1'):\r\n branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')\r\n branch_1 = tf.concat(axis=3, values=[\r\n slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),\r\n slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')])\r\n with tf.variable_scope('Branch_2'):\r\n branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')\r\n branch_2 = slim.conv2d(\r\n branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')\r\n branch_2 = tf.concat(axis=3, values=[\r\n slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),\r\n slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])\r\n with tf.variable_scope('Branch_3'):\r\n branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')\r\n branch_3 = slim.conv2d(\r\n branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')\r\n net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\r\n end_points[end_point] = net\r\n if end_point == final_endpoint: return net, end_points\r\n raise ValueError('Unknown final endpoint %s' % final_endpoint)\r\n\r\n\r\ndef inception_v3(inputs,\r\n num_classes=1000,\r\n is_training=True,\r\n dropout_keep_prob=0.8,\r\n min_depth=16,\r\n depth_multiplier=1.0,\r\n prediction_fn=slim.softmax,\r\n spatial_squeeze=True,\r\n reuse=None,\r\n create_aux_logits=True,\r\n scope='InceptionV3',\r\n global_pool=False):\r\n \"\"\"Inception model from http://arxiv.org/abs/1512.00567.\r\n\r\n \"Rethinking the Inception Architecture for Computer Vision\"\r\n\r\n Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,\r\n Zbigniew Wojna.\r\n\r\n With the default arguments this method constructs the exact model defined in\r\n the paper. However, one can experiment with variations of the inception_v3\r\n network by changing arguments dropout_keep_prob, min_depth and\r\n depth_multiplier.\r\n\r\n The default image size used to train this network is 299x299.\r\n\r\n Args:\r\n inputs: a tensor of size [batch_size, height, width, channels].\r\n num_classes: number of predicted classes. If 0 or None, the logits layer\r\n is omitted and the input features to the logits layer (before dropout)\r\n are returned instead.\r\n is_training: whether is training or not.\r\n dropout_keep_prob: the percentage of activation values that are retained.\r\n min_depth: Minimum depth value (number of channels) for all convolution ops.\r\n Enforced when depth_multiplier < 1, and not an active constraint when\r\n depth_multiplier >= 1.\r\n depth_multiplier: Float multiplier for the depth (number of channels)\r\n for all convolution ops. The value must be greater than zero. Typical\r\n usage will be to set this value in (0, 1) to reduce the number of\r\n parameters or computation cost of the model.\r\n prediction_fn: a function to get predictions out of logits.\r\n spatial_squeeze: if True, logits is of shape [B, C], if false logits is of\r\n shape [B, 1, 1, C], where B is batch_size and C is number of classes.\r\n reuse: whether or not the network and its variables should be reused. To be\r\n able to reuse 'scope' must be given.\r\n create_aux_logits: Whether to create the auxiliary logits.\r\n scope: Optional variable_scope.\r\n global_pool: Optional boolean flag to control the avgpooling before the\r\n logits layer. If false or unset, pooling is done with a fixed window\r\n that reduces default-sized inputs to 1x1, while larger inputs lead to\r\n larger outputs. If true, any input size is pooled down to 1x1.\r\n\r\n Returns:\r\n net: a Tensor with the logits (pre-softmax activations) if num_classes\r\n is a non-zero integer, or the non-dropped-out input to the logits layer\r\n if num_classes is 0 or None.\r\n end_points: a dictionary from components of the network to the corresponding\r\n activation.\r\n\r\n Raises:\r\n ValueError: if 'depth_multiplier' is less than or equal to zero.\r\n \"\"\"\r\n if depth_multiplier <= 0:\r\n raise ValueError('depth_multiplier is not greater than zero.')\r\n depth = lambda d: max(int(d * depth_multiplier), min_depth)\r\n\r\n with tf.variable_scope(scope, 'InceptionV3', [inputs], reuse=reuse) as scope:\r\n with slim.arg_scope([slim.batch_norm, slim.dropout],\r\n is_training=is_training):\r\n net, end_points = inception_v3_base(\r\n inputs, scope=scope, min_depth=min_depth,\r\n depth_multiplier=depth_multiplier)\r\n\r\n # Auxiliary Head logits\r\n if create_aux_logits and num_classes:\r\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\r\n stride=1, padding='SAME'):\r\n aux_logits = end_points['Mixed_6e']\r\n with tf.variable_scope('AuxLogits'):\r\n aux_logits = slim.avg_pool2d(\r\n aux_logits, [5, 5], stride=3, padding='VALID',\r\n scope='AvgPool_1a_5x5')\r\n aux_logits = slim.conv2d(aux_logits, depth(128), [1, 1],\r\n scope='Conv2d_1b_1x1')\r\n\r\n # Shape of feature map before the final layer.\r\n kernel_size = _reduced_kernel_size_for_small_input(\r\n aux_logits, [5, 5])\r\n aux_logits = slim.conv2d(\r\n aux_logits, depth(768), kernel_size,\r\n weights_initializer=trunc_normal(0.01),\r\n padding='VALID', scope='Conv2d_2a_{}x{}'.format(*kernel_size))\r\n aux_logits = slim.conv2d(\r\n aux_logits, num_classes, [1, 1], activation_fn=None,\r\n normalizer_fn=None, weights_initializer=trunc_normal(0.001),\r\n scope='Conv2d_2b_1x1')\r\n if spatial_squeeze:\r\n aux_logits = tf.squeeze(aux_logits, [1, 2], name='SpatialSqueeze')\r\n end_points['AuxLogits'] = aux_logits\r\n\r\n # Final pooling and prediction\r\n with tf.variable_scope('Logits'):\r\n if global_pool:\r\n # Global average pooling.\r\n net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='GlobalPool')\r\n end_points['global_pool'] = net\r\n else:\r\n # Pooling with a fixed kernel size.\r\n kernel_size = _reduced_kernel_size_for_small_input(net, [8, 8])\r\n net = slim.avg_pool2d(net, kernel_size, padding='VALID',\r\n scope='AvgPool_1a_{}x{}'.format(*kernel_size))\r\n end_points['AvgPool_1a'] = net\r\n if not num_classes:\r\n return net, end_points\r\n # 1 x 1 x 2048\r\n net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')\r\n end_points['PreLogits'] = net\r\n # 2048\r\n logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,\r\n normalizer_fn=None, scope='Conv2d_1c_1x1')\r\n if spatial_squeeze:\r\n logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')\r\n # 1000\r\n end_points['Logits'] = logits\r\n end_points['Predictions'] = prediction_fn(logits, scope='Predictions')\r\n return logits, end_points\r\ninception_v3.default_image_size = 299\r\n\r\n\r\ndef _reduced_kernel_size_for_small_input(input_tensor, kernel_size):\r\n \"\"\"Define kernel size which is automatically reduced for small input.\r\n\r\n If the shape of the input images is unknown at graph construction time this\r\n function assumes that the input images are is large enough.\r\n\r\n Args:\r\n input_tensor: input tensor of size [batch_size, height, width, channels].\r\n kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]\r\n\r\n Returns:\r\n a tensor with the kernel size.\r\n\r\n TODO(jrru): Make this function work with unknown shapes. Theoretically, this\r\n can be done with the code below. Problems are two-fold: (1) If the shape was\r\n known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot\r\n handle tensors that define the kernel size.\r\n shape = tf.shape(input_tensor)\r\n return = tf.stack([tf.minimum(shape[1], kernel_size[0]),\r\n tf.minimum(shape[2], kernel_size[1])])\r\n\r\n \"\"\"\r\n shape = input_tensor.get_shape().as_list()\r\n if shape[1] is None or shape[2] is None:\r\n kernel_size_out = kernel_size\r\n else:\r\n kernel_size_out = [min(shape[1], kernel_size[0]),\r\n min(shape[2], kernel_size[1])]\r\n return kernel_size_out\r\n\r\n\r\ninception_v3_arg_scope = inception_utils.inception_arg_scope\r\n"
] | [
[
"tensorflow.concat",
"tensorflow.reduce_mean",
"tensorflow.squeeze",
"tensorflow.truncated_normal_initializer",
"tensorflow.variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
eight0153/flownet2-pytorch | [
"cc2964233cd18c8db05d1751281c6ab9d3165da6"
] | [
"Video3D/inference.py"
] | [
"import argparse\nimport os\nfrom typing import Optional\n\nimport numpy as np\nimport plac\nimport torch\nfrom numpy.lib.format import open_memmap\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader\n\nfrom MiDaS.models.midas_net import MidasNet\nfrom Video3D.dataset import NumpyDataset, create_image_transform\nfrom Video3D.io import write_video, read_video, VideoData\nfrom mannequinchallenge.models import pix2pix_model\nfrom utils.tools import TimerBlock\n\n\ndef inference_lasinger(video_data, model_path, logger: Optional[TimerBlock] = None, batch_size=8):\n close_logger_on_exit = False\n\n if logger is None:\n logger = TimerBlock(\"Inference with Depth Estimation Network\")\n logger.__enter__()\n close_logger_on_exit = True\n\n model = MidasNet(model_path, non_negative=True)\n model = model.cuda()\n model.eval()\n logger.log(\"Loaded model weights from {}.\".format(model_path))\n\n transform = create_image_transform(video_data.height, video_data.width)\n\n video_dataset = NumpyDataset(video_data.frames, transform)\n data_loader = DataLoader(video_dataset, batch_size=batch_size, shuffle=False)\n\n with torch.no_grad():\n num_frames_processed = 0\n\n for batch_i, batch in enumerate(data_loader):\n images = batch.cuda()\n depth = model(images)\n depth = F.interpolate(depth.unsqueeze(1), size=(video_data.height, video_data.width), mode='bilinear',\n align_corners=True)\n yield depth.detach().cpu().numpy()\n\n num_frames_processed += images.shape[0]\n\n logger.log(\"Generated {}/{} depth maps.\\r\".format(num_frames_processed, video_data.num_frames), end=\"\")\n\n print()\n\n if close_logger_on_exit:\n logger.__exit__(None, None, None)\n\n\n# TODO: Make inference code DRY.\ndef inference_li(video_data, model_path, logger: Optional[TimerBlock] = None, batch_size=8):\n close_logger_on_exit = False\n\n if logger is None:\n logger = TimerBlock(\"Inference with Depth Estimation Network\")\n logger.__enter__()\n close_logger_on_exit = True\n\n opt = argparse.Namespace(input='single_view', mode='Ours_Bilinear',\n checkpoints_dir='', name='', isTrain=True,\n gpu_ids='0', lr=0.0004, lr_policy='step', lr_decay_epoch=8)\n model = pix2pix_model.Pix2PixModel(opt, _isTrain=True)\n state_dict = torch.load(model_path)\n\n if not next(iter(state_dict.keys())).startswith(\"module.\"):\n state_dict = {\"module.{}\".format(k): v for k, v in state_dict.items()}\n\n model.netG.load_state_dict(state_dict)\n model.switch_to_eval()\n\n logger.log(\"Loaded model weights from {}.\".format(model_path))\n\n transform = create_image_transform(video_data.height, video_data.width, normalise=True)\n\n video_dataset = NumpyDataset(video_data.frames, transform)\n data_loader = DataLoader(video_dataset, batch_size=batch_size, shuffle=False)\n\n with torch.no_grad():\n num_frames_processed = 0\n\n for batch_i, batch in enumerate(data_loader):\n images = batch.cuda()\n images = images.to(torch.float32)\n depth, _ = model.netG(images)\n depth = F.interpolate(depth, size=(video_data.height, video_data.width), mode='bilinear',\n align_corners=True)\n yield depth.detach().cpu().numpy()\n\n num_frames_processed += images.shape[0]\n\n logger.log(\"Generated {}/{} depth maps.\\r\".format(num_frames_processed, video_data.num_frames), end=\"\")\n\n print()\n\n if close_logger_on_exit:\n logger.__exit__(None, None, None)\n\n\ndef create_and_save_depth(inference_fn, video_data, depth_estimation_model_path, dnn_depth_map_path, logger, batch_size):\n try:\n depth_maps = open_memmap(\n filename=dnn_depth_map_path,\n dtype=np.float32,\n mode='w+',\n shape=(video_data.num_frames, 1, *video_data.shape)\n )\n\n depth_map_generator = inference_fn(video_data, depth_estimation_model_path, logger, batch_size=batch_size)\n\n for batch_i, depth_map in enumerate(depth_map_generator):\n batch_start_idx = batch_size * batch_i\n # Sometimes the last batch is a different size to the rest, so we need to use the actual batch size rather\n # than the specified one.\n current_batch_size = depth_map.shape[0]\n batch_end_idx = batch_start_idx + current_batch_size\n depth_maps[batch_start_idx:batch_end_idx] = depth_map\n\n depth_maps.flush()\n\n logger.log(\"Saved DNN depth maps to {}.\".format(dnn_depth_map_path))\n\n return depth_maps\n except Exception:\n logger.log(\"\\nError occurred during creation of depth maps - deleting {}.\".format(dnn_depth_map_path))\n os.remove(dnn_depth_map_path)\n raise\n\n\[email protected](\n video_path=plac.Annotation(\"The path to the input video.\", kind=\"option\", type=str, abbrev=\"i\"),\n model_path=plac.Annotation(\"The path to the depth estimation model weights.\", kind=\"option\", type=str, abbrev=\"m\"),\n video_output_path=plac.Annotation(\"The path to the write the output to.\", kind=\"option\", type=str, abbrev=\"o\"),\n batch_size=plac.Annotation(\"The mini-batch size to use for the depth estimation network.\", kind=\"option\", type=int),\n)\ndef main(video_path, model_path, video_output_path, batch_size=8):\n with TimerBlock(\"Load Video\") as block:\n video_data = read_video(video_path, block)\n\n with TimerBlock(\"Depth Estimation\") as block:\n tmp_dir = \".tmp\"\n os.makedirs(tmp_dir, exist_ok=True)\n depth_map_path = os.path.join(tmp_dir, \"inference_depth_maps.npy\")\n\n depth_maps = create_and_save_depth(inference_li, video_data, model_path, depth_map_path, block, batch_size=batch_size)\n depth_maps = (255 * (depth_maps - depth_maps.min()) / (depth_maps.max() - depth_maps.min())).to(torch.uint8)\n write_video(VideoData(depth_maps, video_data.fps), video_output_path, block)\n\n\nif __name__ == '__main__':\n plac.call(main)"
] | [
[
"torch.load",
"numpy.lib.format.open_memmap",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.nn.functional.interpolate"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
weiyuanlou/PyCSR3D | [
"67bf2fa296398209f38c28b639e6096de43ad154"
] | [
"csr3d/convolution.py"
] | [
"import numpy as np\nimport scipy.fft as sp_fft\n\n\ndef fftconvolve3(rho, *greens):\n \"\"\"\n Efficiently perform a 3D convolution of a charge density rho and multiple Green functions. \n \n Parameters\n ----------\n \n rho : np.array (3D)\n Charge mesh\n \n *greens : np.arrays (3D)\n Charge meshes for the Green functions, which should be twice the size of rho \n \n \n Returns\n -------\n \n fields : tuple of np.arrays with the same shape as rho. \n \n \"\"\"\n\n # FFT Configuration\n fft = lambda x: sp_fft.fftn(x, overwrite_x=True)\n ifft = lambda x: sp_fft.ifftn(x, overwrite_x=True) \n \n # Place rho in double-sized array. Should match the shape of green\n nx, ny, nz = rho.shape\n crho = np.zeros( (2*nx, 2*ny, 2*nz))\n crho[0:nx,0:ny,0:nz] = rho[0:nx,0:ny,0:nz]\n \n # FFT\n crho = fft(crho) \n \n results = []\n for green in greens:\n assert crho.shape == green.shape, f'Green array shape {green.shape} should be twice rho shape {rho.shape}'\n result = ifft(crho*fft(green))\n # Extract the result\n result = np.real(result[nx-1:2*nx-1,ny-1:2*ny-1,nz-1:2*nz-1])\n results.append(result)\n \n return tuple(results)"
] | [
[
"scipy.fft.ifftn",
"numpy.real",
"numpy.zeros",
"scipy.fft.fftn"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"1.5",
"1.7",
"1.8"
],
"tensorflow": []
}
] |
ningpang/com-CNN | [
"a494589a9445c9b3ad63175ec6b2084d4d3e3e81"
] | [
"CL/networks/selector.py"
] | [
"import torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\nclass Selector(nn.Module):\n\tdef __init__(self, config, s_relation_dim, p_relation_dim):\n\t\tsuper(Selector, self).__init__()\n\t\tself.config = config\n\t\t# self.relation_matrix = nn.Embedding(self.config.num_classes, relation_dim+self.config.data_word_vec.shape[1])\n\t\tself.relation_matrix = nn.Embedding(self.config.num_classes, s_relation_dim+p_relation_dim+2*self.config.data_word_vec.shape[1])\n\t\tself.bias = nn.Parameter(torch.Tensor(self.config.num_classes))\n\t\t# self.attention_matrix = nn.Embedding(self.config.num_classes, relation_dim+self.config.data_word_vec.shape[1])\n\t\tself.attention_matrix = nn.Embedding(self.config.num_classes, s_relation_dim+p_relation_dim+2*self.config.data_word_vec.shape[1])\n\t\tself.init_weights()\n\t\tself.scope = None\n\t\tself.attention_query = None\n\t\tself.label = None\n\t\tself.dropout = nn.Dropout(self.config.drop_prob)\n\tdef init_weights(self):\n\t\tnn.init.xavier_uniform(self.relation_matrix.weight.data)\n\t\tnn.init.normal(self.bias)\n\t\tnn.init.xavier_uniform(self.attention_matrix.weight.data)\n\tdef get_logits(self, x):\n\t\tlogits = torch.matmul(x, torch.transpose(self.relation_matrix.weight, 0, 1),) + self.bias\n\t\treturn logits\n\tdef forward(self, x):\n\t\traise NotImplementedError\n\tdef test(self, x):\n\t\traise NotImplementedError\n\nclass Attention(Selector):\n\tdef _attention_train_logit(self, x):\n\t\trelation_query = self.relation_matrix(self.attention_query)\n\t\tattention = self.attention_matrix(self.attention_query)\n\t\tattention_logit = torch.sum(x * attention * relation_query, 1, True)\n\t\treturn attention_logit\n\tdef _attention_test_logit(self, x):\n\t\tattention_logit = torch.matmul(x, torch.transpose(self.attention_matrix.weight * self.relation_matrix.weight, 0, 1))\n\t\treturn attention_logit\n\tdef forward(self, x):\n\t\tattention_logit = self._attention_train_logit(x)\n\t\ttower_repre = []\n\t\tfor i in range(len(self.scope) - 1):\n\t\t\tsen_matrix = x[self.scope[i] : self.scope[i + 1]]\n\t\t\tattention_score = F.softmax(torch.transpose(attention_logit[self.scope[i] : self.scope[i + 1]], 0, 1), 1)\n\t\t\tfinal_repre = torch.squeeze(torch.matmul(attention_score, sen_matrix))\n\t\t\ttower_repre.append(final_repre)\n\t\tstack_repre = torch.stack(tower_repre)\n\t\tstack_repre = self.dropout(stack_repre)\n\t\tlogits = self.get_logits(stack_repre)\n\t\treturn logits\n\tdef test(self, x):\n\t\tattention_logit = self._attention_test_logit(x)\n\t\ttower_output = []\n\t\tfor i in range(len(self.scope) - 1):\n\t\t\tsen_matrix = x[self.scope[i] : self.scope[i + 1]]\n\t\t\t# sen_matrix = self.dropout(sen_matrix)\n\t\t\tattention_score = F.softmax(torch.transpose(attention_logit[self.scope[i] : self.scope[i + 1]], 0, 1), 1)\n\t\t\tfinal_repre = torch.matmul(attention_score, sen_matrix)\n\t\t\tlogits = self.get_logits(final_repre)\n\t\t\ttower_output.append(torch.diag(F.softmax(logits, 1)))\n\t\tstack_output = torch.stack(tower_output)\n\t\treturn list(stack_output.data.cpu().numpy())\n\nclass One(Selector):\n\tdef forward(self, x):\n\t\ttower_logits = []\n\t\tfor i in range(len(self.scope) - 1):\n\t\t\tsen_matrix = x[self.scope[i] : self.scope[i + 1]]\n\t\t\tsen_matrix = self.dropout(sen_matrix)\n\t\t\tlogits = self.get_logits(sen_matrix)\n\t\t\tscore = F.softmax(logits, 1)\n\t\t\t_, k = torch.max(score, dim = 0)\n\t\t\tk = k[self.label[i]]\n\t\t\ttower_logits.append(logits[k])\n\t\treturn torch.cat(tower_logits, 0)\n\tdef test(self, x):\n\t\ttower_score = []\n\t\tfor i in range(len(self.scope) - 1):\n\t\t\tsen_matrix = x[self.scope[i] : self.scope[i + 1]]\n\t\t\tlogits = self.get_logits(sen_matrix)\n\t\t\tscore = F.softmax(logits, 1)\n\t\t\tscore, _ = torch.max(score, 0)\n\t\t\ttower_score.append(score)\n\t\ttower_score = torch.stack(tower_score)\n\t\treturn list(tower_score.data.cpu().numpy())\n\nclass Average(Selector):\n\tdef forward(self, x):\n\t\ttower_repre = []\n\t\tfor i in range(len(self.scope) - 1):\n\t\t\tsen_matrix = x[self.scope[i] : self.scope[i+ 1]]\n\t\t\tfinal_repre = torch.mean(sen_matrix, 0)\n\t\t\ttower_repre.append(final_repre)\n\t\tstack_repre = torch.stack(tower_repre)\n\t\tstack_repre = self.dropout(stack_repre)\n\t\tlogits = self.get_logits(stack_repre)\n\t\treturn logits\n\tdef test(self, x):\n\t\ttower_repre = []\n\t\tfor i in range(len(self.scope) - 1):\n\t\t\tsen_matrix = x[self.scope[i] : self.scope[i + 1]]\n\t\t\tfinal_repre = torch.mean(sen_matrix, 0)\n\t\t\ttower_repre.append(final_repre)\n\t\tstack_repre = torch.stack(tower_repre)\n\t\tlogits = self.get_logits(stack_repre)\n\t\tscore = F.softmax(logits, 1)\n\t\treturn list(score.data.cpu().numpy())\n"
] | [
[
"torch.mean",
"torch.nn.Dropout",
"torch.nn.functional.softmax",
"torch.transpose",
"torch.max",
"torch.Tensor",
"torch.cat",
"torch.sum",
"torch.nn.Embedding",
"torch.matmul",
"torch.stack",
"torch.nn.init.xavier_uniform",
"torch.nn.init.normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JerryX1110/NL-Augmenter | [
"f0dd6b2369c19146fe396f5b122d016f3b619eb3"
] | [
"transformations/suspecting_paraphraser/transformation.py"
] | [
"from typing import List, Tuple\n\nimport nltk\nimport numpy as np\n\n# Spacy needs this module, but it's used only implicitly\nimport pyinflect # noqa: F401\nimport spacy\nfrom gender_extractor import GenderExtractor\n\nfrom initialize import spacy_nlp\nfrom interfaces.QuestionAnswerOperation import QuestionAnswerOperation\nfrom tasks.TaskTypes import TaskType\n\n\"\"\"\nBase Class for implementing the different input transformations a generation should be robust against.\n\"\"\"\n\n\nclass SuspectingParaphraser(QuestionAnswerOperation):\n \"\"\"This paraphraser transforms a yes/no question into a tag one.\n\n Example: \"Did the American National Shipment company really break its own fleet?\"\n -> \"The American National Shipment company really broke its own fleet, didn't it?\"\n \"\"\"\n\n tasks = [TaskType.QUESTION_ANSWERING, TaskType.QUESTION_GENERATION]\n\n languages = [\"en\"]\n\n def __init__(self, seed=0, max_outputs=1, pronoun_mod=0.9):\n super().__init__(seed, max_outputs=max_outputs)\n np.random.seed(seed)\n nltk.download(\"punkt\")\n\n self.nlp = spacy_nlp if spacy_nlp else spacy.load(\"en_core_web_sm\")\n\n self.gender_detector = GenderExtractor()\n self.pronouns = [\"he\", \"she\", \"it\", \"they\"]\n self.static_pronouns = [\"i\", \"we\", \"you\", *self.pronouns]\n\n self._special_endings = {\n \"may\": \"may {} not\",\n \"might\": \"might {} not\",\n \"shall\": \"shan't {}\",\n \"will\": \"won't {}\",\n }\n\n self.pronoun_mod = pronoun_mod\n self._pronoun_alt = (1 - pronoun_mod) / (len(self.pronouns) - 1)\n\n def _transform(self, question):\n text = nltk.word_tokenize(question)\n doc = self.nlp(question)\n modal = str(doc[0]).lower()\n token = doc[0]\n\n verb_position = [\n i for i in range(len(doc)) if str(doc[i]) == token.head.text\n ][0]\n\n rest_of_sentence = [i for i in text[verb_position:]]\n\n if text[1].lower() == \"n't\":\n text = text[1:]\n doc = doc[1:]\n verb_position = verb_position - 1\n\n beginning = [text[1].capitalize()]\n beginning.extend(text[2:verb_position])\n sentence = nltk.tokenize.treebank.TreebankWordDetokenizer().detokenize(\n beginning + rest_of_sentence\n )\n\n first_verb = doc[verb_position]\n\n # If 'did' is our modal, the verb will be in a present tense\n # It means that we need to inflect it to the past one (VBD)\n # (Did John _drink_ my tea? -> John _drank_ my tea, didn't he?)\n # Otherwise, the verb is already in a good form and we can use\n # it directly\n if modal == \"did\":\n demodded = first_verb._.inflect(\"VBD\")\n else:\n demodded = modal + \" \" + str(first_verb)\n sentence = sentence.replace(str(first_verb), str(demodded)).replace(\n \"?\", \"\"\n )\n\n ending = self._resolve_ending(doc, modal)\n result = sentence + ending\n return result\n\n def _resolve_ending(self, doc, modal):\n try:\n subject = str([tok for tok in doc if (tok.dep_ == \"nsubj\")][0])\n except IndexError:\n return \", right?\"\n\n prob = {i: 1 / len(self.pronouns) for i in self.pronouns}\n\n tagged = [(X.text, X.label_) for X in doc.ents]\n if subject.lower() in self.static_pronouns:\n pronoun = subject.lower()\n if pronoun == \"i\":\n pronoun = \"I\"\n else:\n if len(tagged) > 0 and tagged[0][1] != \"PERSON\":\n prob = {i: 0 for i in self.pronouns}\n prob[\"it\"] = 1\n else:\n noun_gender = self.gender_detector.extract_gender(subject)\n\n if noun_gender in [\"male\", \"mostly_male\"]:\n prob = {i: self._pronoun_alt for i in self.pronouns}\n\n prob[\"he\"] = self.pronoun_mod\n\n elif noun_gender in [\"female\", \"mostly_female\"]:\n prob = {i: self._pronoun_alt for i in self.pronouns}\n\n prob[\"she\"] = self.pronoun_mod\n\n pronoun = np.random.choice(self.pronouns, p=list(prob.values()))\n\n ending = \", \"\n if modal in self._special_endings.keys():\n ending += self._special_endings[modal].format(pronoun)\n else:\n ending += f\"{modal}n't {pronoun}\"\n ending += \"?\"\n return ending\n\n def _filter_phrase(self, question):\n try:\n if question.strip()[-1] != \"?\":\n return False\n except IndexError:\n return False\n\n if \" or \" in question:\n return False\n\n doc = self.nlp(question)\n token = doc[0]\n if token.pos_ != \"AUX\":\n return False\n\n return True\n\n def generate(\n self, context: str, question: str, answers: [str]\n ) -> List[Tuple[str, str, List[str]]]:\n if not self._filter_phrase(question):\n return [(context, question, answers)]\n\n paraphrased = self._transform(question)\n return [(context, paraphrased, answers)]\n\n\nif __name__ == \"__main__\":\n import json\n\n from TestRunner import convert_to_snake_case\n\n tf = SuspectingParaphraser()\n\n test_cases = []\n for i, sentence in enumerate(\n [\n \"Did Sally finally return the french book to Chris?\",\n \"Did the American National Shipment company really break its own fleet?\",\n \"Couldn't she just leave?\",\n \"Shall you begone, lad?\",\n \"Has Buzz Aldrin, the first person who walked on the moon, brought back some aliens?\",\n ]\n ):\n res = tf.generate(\"\", sentence, [])\n test_cases.append(\n {\n \"class\": tf.name(),\n \"inputs\": {\"context\": \"\", \"question\": sentence, \"answers\": []},\n \"outputs\": [],\n }\n )\n\n for p_context, p_question, p_answers in res:\n print(sentence)\n print(p_question)\n print()\n test_cases[i][\"outputs\"].append(\n {\n \"context\": p_context,\n \"question\": p_question,\n \"answers\": p_answers,\n }\n )\n\n json_file = {\n \"type\": convert_to_snake_case(tf.name()),\n \"test_cases\": test_cases,\n }\n\n with open(\"test.json\", \"w\") as f:\n json.dump(json_file, f, indent=2)\n"
] | [
[
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
limkhashing/SpotPark-Backend | [
"e982e6ee43f4763c42877788565ce2412ac9abce"
] | [
"utility.py"
] | [
"import cv2\nimport os\nimport numpy as np\nimport io\nimport time\nfrom google.cloud import vision\n\n# MySql / PhpMyAdmin Connection Variable\nhost = u'sql132.main-hosting.eu'\nuser = u'u824500046_fyp'\npw = u'7pmfyvTNvyeU'\ndb = u'u824500046_fyp'\n\n# Initialize Cloud Vision API and Firebase Admin SDK\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'google_vision.json'\n\n\n# [START vision_text_detection]\ndef detect_text(frame):\n \"\"\"Detects text in the file.\"\"\"\n client = vision.ImageAnnotatorClient()\n\n # [START vision_python_migration_text_detection]\n with io.open(frame, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n response = client.text_detection(image=image)\n texts = response.text_annotations\n print('Texts:')\n for text in texts:\n if len(text.description) == 4:\n # print(text.description)\n return text.description\n\n\n# Function that return canny detection\ndef auto_canny(image, sigma=0.33):\n # compute the median of the single channel pixel intensities\n v = np.median(image)\n\n # apply automatic Canny edge detection using the computed median\n # In practice, sigma=0.33 tends to give good results on most of the dataset\n lower = int(max(0, (1.0 - sigma) * v))\n upper = int(min(255, (1.0 + sigma) * v))\n edged = cv2.Canny(image, lower, upper)\n\n # return the edged image\n return edged\n # [END vision_python_migration_text_detection]\n# [END vision_text_detection]\n"
] | [
[
"numpy.median"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
feynmanliang/pyro | [
"ef62e718a101c58e8ed920d06445eeb0d35ff895"
] | [
"tests/ops/gamma_gaussian.py"
] | [
"import torch\n\nimport pyro.distributions as dist\nfrom pyro.ops.gamma_gaussian import GammaGaussian\nfrom tests.common import assert_close\n\n\ndef random_gamma_gaussian(batch_shape, dim, rank=None):\n \"\"\"\n Generate a random Gaussian for testing.\n \"\"\"\n if rank is None:\n rank = dim + dim\n log_normalizer = torch.randn(batch_shape)\n loc = torch.randn(batch_shape + (dim,))\n samples = torch.randn(batch_shape + (dim, rank))\n precision = torch.matmul(samples, samples.transpose(-2, -1))\n if dim > 0:\n info_vec = precision.matmul(loc.unsqueeze(-1)).squeeze(-1)\n else:\n info_vec = loc\n alpha = torch.randn(batch_shape).exp() + 0.5 * dim - 1\n beta = torch.randn(batch_shape).exp() + 0.5 * (info_vec * loc).sum(-1)\n result = GammaGaussian(log_normalizer, info_vec, precision, alpha, beta)\n assert result.dim() == dim\n assert result.batch_shape == batch_shape\n return result\n\n\ndef random_gamma(batch_shape):\n \"\"\"\n Generate a random Gamma distribution for testing.\n \"\"\"\n concentration = torch.randn(batch_shape).exp()\n rate = torch.randn(batch_shape).exp()\n return dist.Gamma(concentration, rate)\n\n\ndef assert_close_gamma_gaussian(actual, expected):\n assert isinstance(actual, GammaGaussian)\n assert isinstance(expected, GammaGaussian)\n assert actual.dim() == expected.dim()\n assert actual.batch_shape == expected.batch_shape\n assert_close(actual.log_normalizer, expected.log_normalizer)\n assert_close(actual.info_vec, expected.info_vec)\n assert_close(actual.precision, expected.precision)\n assert_close(actual.alpha, expected.alpha)\n assert_close(actual.beta, expected.beta)\n"
] | [
[
"torch.randn"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ehrenb/Mercator | [
"f87c6aa38d304a62d86a9e4b3a7d96d1f7aba156"
] | [
"Mercator/utils/nx_scripts/path.py"
] | [
"import json\nfrom pprint import pprint\n\nimport networkx as nx\nfrom networkx.readwrite import json_graph\n\nimport matplotlib.pyplot as plt\n\n\ngraph = None\nwith open('02e231f85558f37da6802142440736f6/02e231f85558f37da6802142440736f6_component_graph.json') as f:\n graph = json.load(f)\nnx_graph = json_graph.node_link_graph(graph)\n\npos = nx.circular_layout(nx_graph, scale=0.2)#k,iterations used to increase distance btwn nodes\n\nnodes = nx_graph.nodes(data=True)\n\n#custom labels:\nlabels = {}\nfor (p, d) in nodes:\n labels[p] = d['attr_dict']['name']\n\nedge_labels=dict([((u,v,),d['attr_dict']['method'])\n for u,v,d in nx_graph.edges(data=True)])\n\n\n#Determine index of src/dst based on name attribute\nsource = None\nsource_class_name = 'Lkrep/itmtd/ywtjexf/UampleUverlayUhowUctivity;'\nfor (p, d) in nodes:\n if d['attr_dict']['name'] == source_class_name:\n source = p\n break\n\nprint(\"Source: \")\nprint(source)\n\ndest = None\ndest_class_name = 'Lkrep/itmtd/ywtjexf/MasterInterceptor;'\nfor (p, d) in nodes:\n if d['attr_dict']['name'] == dest_class_name:\n dest = p\n break\n\nprint(\"Dest: \")\nprint(dest)\n\n# Has path?\nprint(nx.has_path(nx_graph, source, dest))\n\n\n# Shorest path\nshortest_path = nx.shortest_path(nx_graph, source=source, target=dest)\npath_str = ' -> '.join([nodes[i]['attr_dict']['name'] for i in shortest_path])\nprint(path_str)\n\nshortest_path_edges = list(zip(shortest_path,shortest_path[1:]))\n\n# Path length\nprint(nx.shortest_path_length(nx_graph, source=source, target=dest))\n\n# Drawing\nnx.draw(nx_graph, pos, node_size=60, node_color='k')\nnx.draw_networkx_nodes(nx_graph,pos, nodelist=shortest_path, node_color='r')\nnx.draw_networkx_edges(nx_graph,pos, edgelist=shortest_path_edges, edge_color='r')\nnx.draw_networkx_labels(nx_graph, pos, labels, font_size=8, alpha=0.5)\nnx.draw_networkx_edge_labels(nx_graph, pos, edge_labels, font_size=8, alpha=0.5)\n\nplt.axis('equal')\nplt.savefig(\"graph_shortest_path.png\")\n"
] | [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.axis"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Combatd/probability_analysis_package | [
"3f39afe52d8a46556fedfc4d269db36c8e5ab73f"
] | [
"Gaussian_Code_Exercise/gaussian.py"
] | [
"import math\nimport matplotlib.pyplot as plt\n\nclass Gaussian():\n \"\"\" Gaussian distribution class for calculating and \n visualizing a Gaussian distribution.\n \n Attributes:\n mean (float) representing the mean value of the distribution\n stdev (float) representing the standard deviation of the distribution\n data_list (list of floats) a list of floats extracted from the data file\n \n \"\"\"\n def __init__(self, mu = 0, sigma = 1):\n \n self.mean = mu\n self.stdev = sigma\n self.data = []\n\n\n \n def calculate_mean(self):\n \n \"\"\"Method to calculate the mean of the data set.\n \n Args: \n None\n \n Returns: \n float: mean of the data set\n \n \"\"\"\n \n #TODO: Calculate the mean of the data set. Remember that the data set is stored in self.data\n # Change the value of the mean attribute to be the mean of the data set\n # Return the mean of the data set\n # \n # Mean (average) is the sum of all values in the list divided by the number of values \n average = 1.0 * sum(self.data) / len(self.data)\n self.mean = average\n return self.mean\n\n\n def calculate_stdev(self, sample=True):\n\n \"\"\"Method to calculate the standard deviation of the data set.\n \n Args: \n sample (bool): whether the data represents a sample or population\n \n Returns: \n float: standard deviation of the data set\n \n \"\"\"\n\n # TODO:\n # Calculate the standard deviation of the data set\n # \n # The sample variable determines if the data set contains a sample or a population\n # If sample = True, this means the data is a sample. \n # Keep the value of sample in mind for calculating the standard deviation\n #\n # Make sure to update self.stdev and return the standard deviation as well \n \n # Dividing by n − 1 rather than by n gives an unbiased estimate of the variance of the larger parent population.\n # if sample:\n # n = len(self.data) - 1\n # else:\n # n = len(self.data)\n\n # # We need ot find the average of all the data points\n # average = self.mean\n # # sigma represents deviation\n # sigma = 0\n # # We take deviations of each data point fron the\n # # average, and then square the result.\n # # (data_point - mean)^2\n # for deviation in self.data:\n # sigma += (deviation - average) ** 2\n # # We get the variance calculating the mean of those values\n # sigma = math.sqrt(sigma / n)\n # # We square root the variance to get our standard deviation\n # self.stdev = sigma\n # return self.stdev\n\n if sample:\n n = len(self.data) - 1\n else:\n n = len(self.data)\n \n mean = self.mean\n \n sigma = 0\n \n for d in self.data:\n sigma += (d - mean) ** 2\n \n sigma = math.sqrt(sigma / n)\n \n self.stdev = sigma\n \n return self.stdev\n\n def read_data_file(self, file_name, sample=True):\n \n \"\"\"Method to read in data from a txt file. The txt file should have\n one number (float) per line. The numbers are stored in the data attribute. \n After reading in the file, the mean and standard deviation are calculated\n \n Args:\n file_name (string): name of a file to read from\n \n Returns:\n None\n \n \"\"\"\n \n # This code opens a data file and appends the data to a list called data_list\n with open(file_name) as file:\n data_list = []\n line = file.readline()\n while line:\n data_list.append(int(line))\n line = file.readline()\n file.close()\n \n # TODO: \n # Update the self.data attribute with the data_list\n self.data = data_list\n # Update self.mean with the mean of the data_list.\n self.mean = self.calculate_mean() \n # You can use the calculate_mean() method with self.calculate_mean()\n # Update self.stdev with the standard deviation of the data_list. Use the \n # calculate_stdev() method.\n self.stdev = self.calculate_stdev(sample) \n \n def plot_histogram(self):\n \"\"\"Method to output a histogram of the instance variable data using \n matplotlib pyplot library.\n \n Args:\n None\n \n Returns:\n None\n \"\"\"\n \n # TODO: Plot a histogram of the data_list using the matplotlib package.\n # Be sure to label the x and y axes and also give the chart a title\n plt.hist(self.data)\n plt.title(\"Histogram of Data Points\")\n plt.xlabel(\"Data\")\n plt.set_ylabel(\"Counts\")\n \n \n def pdf(self, x):\n \"\"\"Probability density function calculator for the gaussian distribution.\n \n Args:\n x (float): point for calculating the probability density function\n \n \n Returns:\n float: probability density function output\n \"\"\"\n \n # TODO: Calculate the probability density function of the Gaussian distribution\n # at the value x. You'll need to use self.stdev and self.mean to do the calculation\n return (1.0 / (self.stdev * math.sqrt(2 * math.pi))) * math.exp(-0.5 * ((x - self.mean) / self.stdev) ** 2)\n\n def plot_histogram_pdf(self, n_spaces = 50):\n\n \"\"\"Method to plot the normalized histogram of the data and a plot of the \n probability density function along the same range\n \n Args:\n n_spaces (int): number of data points \n \n Returns:\n list: x values for the pdf plot\n list: y values for the pdf plot\n \n \"\"\"\n \n #TODO: Nothing to do for this method. Try it out and see how it works.\n \n mu = self.mean\n sigma = self.stdev\n\n min_range = min(self.data)\n max_range = max(self.data)\n \n # calculates the interval between x values\n interval = 1.0 * (max_range - min_range) / n_spaces\n\n x = []\n y = []\n \n # calculate the x values to visualize\n for i in range(n_spaces):\n tmp = min_range + interval*i\n x.append(tmp)\n y.append(self.pdf(tmp))\n\n # make the plots\n fig, axes = plt.subplots(2,sharex=True)\n fig.subplots_adjust(hspace=.5)\n axes[0].hist(self.data, density=True)\n axes[0].set_title('Normed Histogram of Data')\n axes[0].set_ylabel('Density')\n\n axes[1].plot(x, y)\n axes[1].set_title('Normal Distribution for \\n Sample Mean and Sample Standard Deviation')\n axes[0].set_ylabel('Density')\n plt.show()\n\n return x, y"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.set_ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tradewartracker/phase-one-product-hs2 | [
"38dd328a8211695c31f09a34832535dc2c82a5c2"
] | [
"main-hs2.py"
] | [
"import datetime as dt\nfrom os.path import dirname, join\n\nimport numpy as np\n\nimport pandas as pd\n\nimport pyarrow as pa\nimport pyarrow.parquet as pq\n\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import column, gridplot, row\nfrom bokeh.models import ColumnDataSource, DataRange1d, Select, HoverTool, Panel, Tabs, LinearColorMapper, Range1d\nfrom bokeh.models import NumeralTickFormatter, Title, Label, Paragraph, Div, CustomJSHover, BoxAnnotation\nfrom bokeh.models import ColorBar\nfrom bokeh.palettes import brewer, Spectral6\nfrom bokeh.plotting import figure\nfrom bokeh.embed import server_document\nfrom bokeh.transform import factor_cmap\n\n#################################################################################\n# This just loads in the data...\n# Alot of this was built of this \"cross-fire demo\"\n# https://github.com/bokeh/bokeh/blob/branch-2.3/examples/app/crossfilter/main.py\n\nstart_date = dt.datetime(2017,7,1)\nend_date = dt.datetime(2022,1,1)\n\nbackground = \"#ffffff\"\n\nfile = \"./data\"+ \"/data.parquet\"\n\ndf = pq.read_table(file).to_pandas()\n\ndf.sort_index(inplace=True)\n\noptions = df.index.unique(0).to_list()\n\n#print(options)\n\nproduct = \"HS CODE 72, IRON AND STEEL\"\n\nlevel = \"US Dollars\"\n\n#################################################################################\n#These are functions used in the plot...\n\ndef growth_trade(foo):\n # what this function does is take a dataframe and create a relative \n \n return 100*((foo[\"china_exports\"]/foo[\"china_exports\"].shift(12)) - 1)\n\ndef cum_trade(foo):\n \n outdf = pd.DataFrame([])\n \n outdf[\"cuml_trade_2017\"] = foo[\"china_exports\"].loc[\"2017\"].cumsum()\n \n outdf.index = pd.date_range(start=\"2020-01-01\", end=\"2020-12-01\", freq = \"MS\")\n \n outdf[\"cuml_trade_2020\"] = foo[\"china_exports\"].loc[\"2020\"].cumsum()\n \n return outdf\n\n#################################################################################\n# Then this makes the simple plots:\n\ndef make_plot():\n \n height = int(1.15*533)\n width = int(1.15*750)\n \n foo = df.loc[product_select.value]\n #foo = df.query(\"@a < a\")\n # below there is an object of selections which will be one of the values in \n # the list of options. So the .value then grabs that particular option selected.\n\n x = foo.index\n \n if level_select.value == 'US Dollars':\n y = foo['china_exports']\n \n if level_select.value == 'Year over Year % Change':\n y = growth_trade(foo)\n \n if level_select.value == \"Cumulative Purchases 2020 vs 2017\":\n cuml = cum_trade(foo)\n x = cuml.index\n y2017 = cuml[\"cuml_trade_2017\"]\n y2020 = cuml[\"cuml_trade_2020\"] \n\n \n title = \"US Exports to China of \" + product_select.value.title().upper()\n \n if level_select.value != \"Cumulative Purchases 2020 vs 2017\":\n \n # This is standard bokeh stuff so far\n plot = figure(x_axis_type=\"datetime\", plot_height = height, plot_width=width, toolbar_location = 'below',\n tools = \"box_zoom, reset, pan, xwheel_zoom\", title = title,\n x_range = (start_date,end_date) )\n\n plot.line(x = x,\n y = y, line_width=3.5, line_alpha=0.75, line_color = \"slategray\")\n \n if level_select.value == \"Cumulative Purchases 2020 vs 2017\":\n \n plot = figure(x_axis_type=\"datetime\", plot_height = height, plot_width=width, toolbar_location = 'below',\n tools = \"box_zoom, reset, pan\", title = title,\n x_range = (dt.datetime(2020,1,1),dt.datetime(2021,2,1)) )\n\n plot.line(x = x,\n y = y2017, line_width=3.5, line_alpha=0.5, line_color = \"red\", line_dash = \"dashed\"\n , legend_label= \"2017\")\n \n plot.line(x = x,\n y = y2020, line_width=3.5, line_alpha=0.75, line_color = \"darkblue\"\n , legend_label= \"2020\")\n \n plot.legend.title = 'Cumulative Purchases'\n plot.legend.location = \"top_left\"\n plot.legend.title_text_font_style = \"bold\"\n \n # fixed attributes\n plot.xaxis.axis_label = None\n plot.yaxis.axis_label = \"\"\n plot.axis.axis_label_text_font_style = \"bold\"\n plot.grid.grid_line_alpha = 0.3\n \n TIMETOOLTIPS = \"\"\"\n <div style=\"background-color:#F5F5F5; opacity: 0.95; border: 15px 15px 15px 15px;\">\n <div style = \"text-align:left;\">\"\"\"\n \n if level_select.value == 'Year over Year % Change':\n \n TIMETOOLTIPS = TIMETOOLTIPS + \"\"\"\n <span style=\"font-size: 13px; font-weight: bold\"> $data_x{%b %Y}: $data_y{0}%</span> \n </div>\n </div>\n \"\"\"\n \n plot.add_tools(HoverTool(tooltips = TIMETOOLTIPS, line_policy='nearest', formatters={'$data_x': 'datetime'}))\n \n if level_select.value == 'US Dollars':\n \n TIMETOOLTIPS = TIMETOOLTIPS + \"\"\"\n <span style=\"font-size: 13px; font-weight: bold\"> $data_x{%b %Y}: $data_y{$0.0a}</span> \n </div>\n </div>\n \"\"\"\n plot.add_tools(HoverTool(tooltips = TIMETOOLTIPS, line_policy='nearest', formatters={'$data_x': 'datetime'}))\n \n if level_select.value == \"Cumulative Purchases 2020 vs 2017\":\n #################################################################################\n singlesource2020 = ColumnDataSource({\n 'xs': x.values,\n 'ys': y2020.values,\n \"dates\": np.array(x),\n })\n\n \n c2020 = plot.circle(x=\"xs\", y=\"ys\", size=35,\n source = singlesource2020, color = \"crimson\",alpha=0.0)\n \n singlesource2017 = ColumnDataSource({\n 'xs': x.values,\n 'ys': y2017.values,\n \"dates\": np.array(pd.date_range(start=\"2017-01-01\", end=\"2017-12-01\", freq = \"MS\")),\n })\n \n c2017 = plot.circle(x=\"xs\", y=\"ys\", size=35,\n source = singlesource2017, color = \"darkblue\",alpha=0.0)\n\n \n TIMETOOLTIPS = TIMETOOLTIPS + \"\"\"\n <span style=\"font-size: 13px; font-weight: bold\"> @dates{%b %Y}: $data_y{$0.0a}</span> \n </div>\n </div>\n \"\"\"\n \n plot.add_tools(HoverTool(tooltips = TIMETOOLTIPS, line_policy='nearest', formatters={'@dates': 'datetime'}, renderers = [c2017,c2020]))\n \n if level_select.value == 'Year over Year % Change':\n if y.max() > 1500:\n plot.y_range.end = 1500\n \n \n \n plot.title.text_font_size = '13pt'\n plot.background_fill_color = background \n plot.background_fill_alpha = 0.75\n plot.border_fill_color = background \n \n tradewar_box = BoxAnnotation(left=dt.datetime(2018,7,1), right=dt.datetime(2019,10,11), fill_color='red', fill_alpha=0.1)\n plot.add_layout(tradewar_box)\n \n tradewar_box = BoxAnnotation(left=dt.datetime(2020,1,1), right=dt.datetime(2021,12,31), fill_color='blue', fill_alpha=0.1)\n plot.add_layout(tradewar_box)\n \n #p.yaxis.axis_label = \n plot.yaxis.axis_label_text_font_style = 'bold'\n plot.yaxis.axis_label_text_font_size = \"13px\"\n \n plot.sizing_mode= \"scale_both\"\n \n \n if level_select.value != 'Year over Year % Change':\n \n plot.yaxis.formatter = NumeralTickFormatter(format=\"($0. a)\")\n \n plot.yaxis.axis_label = \"US Dollars\"\n \n if level_select.value == 'Year over Year % Change':\n \n plot.yaxis.axis_label = level_select.value\n \n plot.max_height = height\n plot.max_width = width\n \n plot.min_height = int(0.25*height)\n plot.min_width = int(0.25*width)\n \n return plot\n\ndef update_plot(attrname, old, new):\n layout.children[0] = make_plot()\n \n# This part is still not clear to me. but it tells it what to update and where to put it\n# so it updates the layout and [0] is the first option (see below there is a row with the\n# first entry the plot, then the controls)\n\nlevel_select = Select(value=level, title='Tranformations', options=['US Dollars', 'Year over Year % Change', \"Cumulative Purchases 2020 vs 2017\"])\nlevel_select.on_change('value', update_plot)\n\n#print(sorted(options))\n\nproduct_select = Select(value=product, title='Product', options=sorted(options), width=400)\n# This is the key thing that creates teh selection object\n\nproduct_select.on_change('value', update_plot)\n# Change the value upone selection via the update plot \n\ndiv0 = Div(text = \"\"\"Categories are at both the HS2 and HS4 level. Only Phase One covered products as defined in Annex 6-1 of The Agreement within that HS Code are shown. Red marks the period of Section 301 tariffs and retaliation. Blue is period of agreement.\\n\n \\n\n \\n\n \"\"\", width=400, background = background, style={\"justify-content\": \"space-between\", \"display\": \"flex\"} )\n\ndiv1 = Div(text = \"\"\"Transformations: US Dollars, year over year growth rate and cumulative purchases in 2017 vs 2020.\\n The later transformation cumulates Chinese purchases over each month in 2017 and 2020 and compares each. Because 2017 is the benchmark year for The Agreement, this measure provides a sense, for each product category, China's progress towards meeting their purchase commitments.\\n\n \"\"\", width=400, background = background, style={\"justify-content\": \"space-between\", \"display\": \"flex\"} )\n\ncontrols = column(product_select, div0, level_select, div1)\n\nheight = int(1.95*533)\nwidth = int(1.95*675)\n\nlayout = row(make_plot(), controls, sizing_mode = \"scale_height\", max_height = height, max_width = width,\n min_height = int(0.25*height), min_width = int(0.25*width))\n\ncurdoc().add_root(layout)\ncurdoc().title = \"us-china-products\"\n"
] | [
[
"numpy.array",
"pandas.DataFrame",
"pandas.date_range"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
cramsay/PYNQ | [
"7e190122aea5a19b910cc2501a8f7eef58fa4cbc"
] | [
"pynq/ps.py"
] | [
"# Copyright (c) 2016, Xilinx, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\n# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport numpy as np\nimport os\nimport warnings\nfrom .mmio import MMIO\nfrom .registers import Register\n\n__author__ = \"Yun Rock Qu\"\n__copyright__ = \"Copyright 2017, Xilinx\"\n__email__ = \"[email protected]\"\n\nZYNQ_ARCH = \"armv7l\"\nZU_ARCH = \"aarch64\"\nCPU_ARCH = os.uname().machine\nCPU_ARCH_IS_SUPPORTED = CPU_ARCH in [ZYNQ_ARCH, ZU_ARCH]\n\nDEFAULT_PL_CLK_MHZ = 100.0\n\n\nclass _ClocksMeta(type):\n \"\"\"Meta class for all the PS and PL clocks not exposed to users.\n\n Since this is the abstract base class for all the clocks, no\n attributes or methods are exposed to users. Users should use the class\n `Clocks` instead.\n\n Note\n ----\n If this class is parsed on an unsupported architecture it will issue\n a warning and leave class variables undefined\n\n \"\"\"\n @property\n def cpu_mhz(cls):\n \"\"\"The getter method for CPU clock.\n\n The returned clock rate is measured in MHz.\n\n \"\"\"\n return cls.get_cpu_mhz()\n\n @cpu_mhz.setter\n def cpu_mhz(cls, clk_mhz):\n \"\"\"The setter method for CPU clock.\n\n Since the CPU clock should not be changed, setting it will raise\n an exception.\n\n \"\"\"\n raise RuntimeError(\"Not allowed to change CPU clock.\")\n\n @property\n def fclk0_mhz(cls):\n \"\"\"The getter method for PL clock 0.\n\n This method will read the register values, do the calculation,\n and return the current clock rate.\n\n Returns\n -------\n float\n The returned clock rate measured in MHz.\n\n \"\"\"\n return cls.get_pl_clk(0)\n\n @fclk0_mhz.setter\n def fclk0_mhz(cls, clk_mhz):\n \"\"\"The setter method for PL clock 0.\n\n Parameters\n ----------\n clk_mhz : float\n The clock rate in MHz.\n\n \"\"\"\n cls.set_pl_clk(0, clk_mhz=clk_mhz)\n\n @property\n def fclk1_mhz(cls):\n \"\"\"The getter method for PL clock 1.\n\n This method will read the register values, do the calculation,\n and return the current clock rate.\n\n Returns\n -------\n float\n The returned clock rate measured in MHz.\n\n \"\"\"\n return cls.get_pl_clk(1)\n\n @fclk1_mhz.setter\n def fclk1_mhz(cls, clk_mhz):\n \"\"\"The setter method for PL clock 1.\n\n Parameters\n ----------\n clk_mhz : float\n The clock rate in MHz.\n\n \"\"\"\n cls.set_pl_clk(1, clk_mhz=clk_mhz)\n\n @property\n def fclk2_mhz(cls):\n \"\"\"The getter method for PL clock 2.\n\n This method will read the register values, do the calculation,\n and return the current clock rate.\n\n Returns\n -------\n float\n The returned clock rate measured in MHz.\n\n \"\"\"\n return cls.get_pl_clk(2)\n\n @fclk2_mhz.setter\n def fclk2_mhz(cls, clk_mhz):\n \"\"\"The setter method for PL clock 2.\n\n Parameters\n ----------\n clk_mhz : float\n The clock rate in MHz.\n\n \"\"\"\n cls.set_pl_clk(2, clk_mhz=clk_mhz)\n\n @property\n def fclk3_mhz(cls):\n \"\"\"The getter method for PL clock 3.\n\n This method will read the register values, do the calculation,\n and return the current clock rate.\n\n Returns\n -------\n float\n The returned clock rate measured in MHz.\n\n \"\"\"\n return cls.get_pl_clk(3)\n\n @fclk3_mhz.setter\n def fclk3_mhz(cls, clk_mhz):\n \"\"\"The setter method for PL clock 3.\n\n Parameters\n ----------\n clk_mhz : float\n The clock rate in MHz.\n\n \"\"\"\n cls.set_pl_clk(3, clk_mhz=clk_mhz)\n\n @classmethod\n def get_pl_clk(mcs, clk_idx):\n \"\"\"This method will return the clock frequency.\n\n This method is not exposed to users.\n\n Parameters\n ----------\n clk_idx : int\n The index of the PL clock to be changed, from 0 to 3.\n\n \"\"\"\n if clk_idx not in range(4):\n raise ValueError(\"Valid PL clock index is 0 - 3.\")\n\n pl_clk_reg = mcs.PL_CLK_CTRLS[clk_idx]\n src_clk_idx = pl_clk_reg[mcs.PL_CLK_SRC_FIELD]\n src_clk_mhz = mcs._get_src_clk_mhz(src_clk_idx)\n pl_clk_odiv0 = pl_clk_reg[mcs.PL_CLK_ODIV0_FIELD]\n pl_clk_odiv1 = pl_clk_reg[mcs.PL_CLK_ODIV1_FIELD]\n\n return round(src_clk_mhz / (pl_clk_odiv0 * pl_clk_odiv1), 6)\n\n @classmethod\n def set_pl_clk(mcs, clk_idx, div0=None, div1=None,\n clk_mhz=DEFAULT_PL_CLK_MHZ):\n \"\"\"This method sets a PL clock frequency.\n\n Users have to specify the index of the PL clock to be changed.\n For example, for fclk1 (Zynq) or pl_clk_1 (ZynqUltrascale),\n `clk_idx` is 1.\n\n The CPU, and other source clocks, by default, should not get changed.\n\n Users have two options:\n 1. specify the two frequency divider values directly (div0, div1), or\n 2. specify the clock rate, in which case the divider values will be\n calculated.\n\n Note\n ----\n In case `div0` and `div1` are both specified, the parameter `clk_mhz`\n will be ignored.\n\n Parameters\n ----------\n clk_idx : int\n The index of the PL clock to be changed, from 0 to 3.\n div0 : int\n The first frequency divider value.\n div1 : int\n The second frequency divider value.\n clk_mhz : float\n The clock rate in MHz.\n\n \"\"\"\n if clk_idx not in range(4):\n raise ValueError(\"Valid PL clock index is 0 - 3.\")\n\n pl_clk_reg = mcs.PL_CLK_CTRLS[clk_idx]\n div0_width = Register.count(mcs.PL_CLK_ODIV0_FIELD)\n div1_width = Register.count(mcs.PL_CLK_ODIV1_FIELD)\n src_clk_idx = pl_clk_reg[mcs.PL_CLK_SRC_FIELD]\n src_clk_mhz = mcs._get_src_clk_mhz(src_clk_idx)\n\n if div0 is None and div1 is None:\n div0, div1 = mcs._get_2_divisors(src_clk_mhz, clk_mhz,\n div0_width, div1_width)\n elif div0 is not None and div1 is None:\n div1 = round(src_clk_mhz / clk_mhz / div0)\n elif div1 is not None and div0 is None:\n div0 = round(src_clk_mhz / clk_mhz / div1)\n\n if div0 <= 0 or div0 > ((1 << div0_width) - 1):\n raise ValueError(\"Frequency divider 0 value out of range.\")\n if div1 <= 0 or div1 > ((1 << div1_width) - 1):\n raise ValueError(\"Frequency divider 1 value out of range.\")\n\n pl_clk_reg[mcs.PL_CLK_ODIV0_FIELD] = div0\n pl_clk_reg[mcs.PL_CLK_ODIV1_FIELD] = div1\n\n @classmethod\n def _get_src_clk_mhz(mcs, clk_idx):\n \"\"\"The getter method for PL clock (pl_clk) sources.\n\n The returned clock rate is measured in MHz.\n\n \"\"\"\n if clk_idx not in range(4):\n raise ValueError(\"Valid PL clock index is 0 - 3.\")\n\n src_pll_reg = mcs.PL_SRC_PLL_CTRLS[clk_idx]\n return round(mcs.get_pll_mhz(src_pll_reg), 6)\n\n @classmethod\n def _get_2_divisors(mcs, freq_high, freq_desired, reg0_width, reg1_width):\n \"\"\"Return 2 divisors of the specified width for frequency divider.\n\n Warning will be raised if the closest clock rate achievable\n differs more than 1 percent of the desired value.\n\n Parameters\n ----------\n freq_high : float\n High frequency to be divided.\n freq_desired : float\n Desired frequency to be get.\n reg0_width: int\n The register width of the first divisor.\n reg1_width : int\n The register width of the second divisor.\n\n Returns\n -------\n tuple\n A pair of 2 divisors, each of 6 bits at most.\n\n \"\"\"\n div_product_desired = round(freq_high / freq_desired, 6)\n _, q0 = min(enumerate(mcs.VALID_CLOCK_DIV_PRODUCTS),\n key=lambda x: abs(x[1] - div_product_desired))\n if abs(freq_desired - freq_high / q0) > 0.01 * freq_desired:\n warnings.warn(\n \"Setting frequency to the closet possible value {}MHz.\".format(\n round(freq_high / q0, 5)))\n\n max_val0 = 1 << reg0_width\n max_val1 = 1 << reg1_width\n for i in range(1, max_val0):\n for j in range(1, max_val1):\n if i * j == q0:\n return i, j\n\n\nclass _ClocksUltrascale(_ClocksMeta):\n \"\"\"Implementation class for all Zynq Ultrascale PS and PL clocks\n not exposed to users.\n\n Since this is the abstract base class for all Zynq Ultrascale clocks, no\n attributes or methods are exposed to users. Users should use the class\n `Clocks` instead.\n\n \"\"\"\n DEFAULT_SRC_CLK_MHZ = 33.333\n\n # Registers in the CRL \"Namespace\"\n CRL_APB_ADDRESS = 0xFF5E0000\n IOPLL_CTRL_OFFSET = 0x20\n RPLL_CTRL_OFFSET = 0x30\n\n PL0_CTRL_OFFSET = 0xC0\n PL1_CTRL_OFFSET = 0xC4\n PL2_CTRL_OFFSET = 0xC8\n PL3_CTRL_OFFSET = 0xCC\n PLX_CTRL_CLKACT_FIELD = 24\n PLX_CTRL_ODIV1_FIELD = slice(21, 16)\n PLX_CTRL_ODIV0_FIELD = slice(13, 8)\n PLX_CTRL_SRC_FIELD = slice(2, 0)\n\n PLX_CTRL_SRC_DEFAULT = 0\n\n PL_CLK_SRC_FIELD = PLX_CTRL_SRC_FIELD\n PL_CLK_ODIV0_FIELD = PLX_CTRL_ODIV0_FIELD\n PL_CLK_ODIV1_FIELD = PLX_CTRL_ODIV1_FIELD\n\n # Registers in the CRF \"Namespace\"\n CRF_APB_ADDRESS = 0xFD1A0000\n APLL_CTRL_OFFSET = 0x20\n DPLL_CTRL_OFFSET = 0x2C\n VPLL_CTRL_OFFSET = 0x38\n\n ACPU_CTRL_OFFSET = 0x60\n ACPU_CTRL_CLKHALF_FIELD = 25\n ACPU_CTRL_CLKFULL_FIELD = 24\n ACPU_CTRL_ODIV_FIELD = slice(13, 8)\n ACPU_CTRL_SRC_FIELD = slice(2, 0)\n\n # Fields shared between CRF and CRL \"Namespaces\"\n CRX_APB_SRC_DEFAULT = 0\n CRX_APB_SRC_FIELD = slice(22, 20)\n CRX_APB_ODIVBY2_FIELD = 16\n CRX_APB_FBDIV_FIELD = slice(14, 8)\n\n PLX_CTRL_ODIV1_WIDTH = (PLX_CTRL_ODIV1_FIELD.start -\n PLX_CTRL_ODIV1_FIELD.stop + 1)\n PLX_CTRL_ODIV0_WIDTH = (PLX_CTRL_ODIV0_FIELD.start -\n PLX_CTRL_ODIV0_FIELD.stop + 1)\n VALID_CLOCK_DIV_PRODUCTS = sorted(list(set(\n (np.multiply(\n np.arange(1 << PLX_CTRL_ODIV1_WIDTH).reshape(\n 1 << PLX_CTRL_ODIV1_WIDTH, 1),\n np.arange(1 << PLX_CTRL_ODIV0_WIDTH))).reshape(-1))))\n\n if CPU_ARCH_IS_SUPPORTED:\n IOPLL_CTRL = Register(CRL_APB_ADDRESS + IOPLL_CTRL_OFFSET)\n RPLL_CTRL = Register(CRL_APB_ADDRESS + RPLL_CTRL_OFFSET)\n\n PL_CLK_CTRLS = [Register(CRL_APB_ADDRESS + PL0_CTRL_OFFSET),\n Register(CRL_APB_ADDRESS + PL1_CTRL_OFFSET),\n Register(CRL_APB_ADDRESS + PL2_CTRL_OFFSET),\n Register(CRL_APB_ADDRESS + PL3_CTRL_OFFSET)]\n\n ACPU_CTRL = Register(CRF_APB_ADDRESS + ACPU_CTRL_OFFSET)\n\n APLL_CTRL = Register(CRF_APB_ADDRESS + APLL_CTRL_OFFSET)\n DPLL_CTRL = Register(CRF_APB_ADDRESS + DPLL_CTRL_OFFSET)\n VPLL_CTRL = Register(CRF_APB_ADDRESS + VPLL_CTRL_OFFSET)\n\n PL_SRC_PLL_CTRLS = [IOPLL_CTRL, IOPLL_CTRL, RPLL_CTRL, DPLL_CTRL]\n ACPU_SRC_PLL_CTRLS = [APLL_CTRL, APLL_CTRL, DPLL_CTRL, VPLL_CTRL]\n else:\n warnings.warn(\"Pynq does not support the CPU Architecture: {}\"\n .format(CPU_ARCH), ResourceWarning)\n\n\n @classmethod\n def set_pl_clk(mcs, clk_idx, div0=None, div1=None,\n clk_mhz=DEFAULT_PL_CLK_MHZ):\n \"\"\"This method sets a PL clock frequency.\n\n Users have to specify the index of the PL clock to be changed.\n\n The CPU, and other source clocks, by default, should not get changed.\n\n Users have two options:\n 1. specify the two frequency divider values directly (div0, div1), or\n 2. specify the clock rate, in which case the divider values will be\n calculated.\n\n Note\n ----\n In case `div0` and `div1` are both specified, the parameter `clk_mhz`\n will be ignored.\n\n Parameters\n ----------\n clk_idx : int\n The index of the PL clock to be changed, from 0 to 3.\n div0 : int\n The first frequency divider value.\n div1 : int\n The second frequency divider value.\n clk_mhz : float\n The clock rate in MHz.\n\n \"\"\"\n pl_clk_reg = mcs.PL_CLK_CTRLS[clk_idx]\n pl_clk_reg[mcs.PLX_CTRL_CLKACT_FIELD] = 1\n pl_clk_reg[mcs.PLX_CTRL_SRC_FIELD] = mcs.PLX_CTRL_SRC_DEFAULT\n super().set_pl_clk(clk_idx, div0, div1, clk_mhz)\n\n @classmethod\n def get_pll_mhz(mcs, pll_reg):\n \"\"\"The getter method for PLL output clocks.\n\n Parameters\n ----------\n pll_reg : Register\n The control register for a PLL\n\n Returns\n -------\n float\n The PLL output clock rate measured in MHz.\n\n \"\"\"\n if pll_reg[mcs.CRX_APB_SRC_FIELD] != mcs.CRX_APB_SRC_DEFAULT:\n raise ValueError(\"Invalid PLL Source\")\n\n pll_fbdiv = pll_reg[mcs.CRX_APB_FBDIV_FIELD]\n if pll_reg[mcs.CRX_APB_ODIVBY2_FIELD] == 1:\n pll_odiv2 = 2\n else:\n pll_odiv2 = 1\n\n return mcs.DEFAULT_SRC_CLK_MHZ * pll_fbdiv / pll_odiv2\n\n @classmethod\n def get_cpu_mhz(mcs):\n \"\"\"The getter method for CPU clock.\n\n The returned clock rate is measured in MHz.\n\n \"\"\"\n arm_src_pll_idx = mcs.ACPU_CTRL[mcs.ACPU_CTRL_SRC_FIELD]\n arm_clk_odiv = mcs.ACPU_CTRL[mcs.ACPU_CTRL_ODIV_FIELD]\n src_pll_reg = mcs.ACPU_SRC_PLL_CTRLS[arm_src_pll_idx]\n return round(mcs.get_pll_mhz(src_pll_reg) / arm_clk_odiv, 6)\n\n\nclass _ClocksZynq(_ClocksMeta):\n \"\"\"Implementation class for all Zynq 7-Series PS and PL clocks\n not exposed to users.\n\n Since this is the abstract base class for all Zynq 7-Series clocks, no\n attributes or methods are exposed to users. Users should use the class\n `Clocks` instead.\n\n \"\"\"\n DEFAULT_SRC_CLK_MHZ = 50.0\n\n SLCR_BASE_ADDRESS = 0xF8000000\n ARM_PLL_CTRL_OFFSET = 0x100\n DDR_PLL_CTRL_OFFSET = 0x104\n IO_PLL_CTRL_OFFSET = 0x108\n SRC_PLL_FBDIV_FIELD = slice(18, 12)\n\n FCLK0_CTRL_OFFSET = 0x170\n FCLK1_CTRL_OFFSET = 0x180\n FCLK2_CTRL_OFFSET = 0x190\n FCLK3_CTRL_OFFSET = 0x1A0\n FCLKX_CTRL_ODIV1_FIELD = slice(25, 20)\n FCLKX_CTRL_ODIV0_FIELD = slice(13, 8)\n FCLKX_CTRL_SRC_FIELD = slice(5, 4)\n\n PL_CLK_SRC_FIELD = FCLKX_CTRL_SRC_FIELD\n PL_CLK_ODIV0_FIELD = FCLKX_CTRL_ODIV0_FIELD\n PL_CLK_ODIV1_FIELD = FCLKX_CTRL_ODIV1_FIELD\n\n ARM_CLK_CTRL_OFFSET = 0x120\n ARM_CLK_ODIV_FIELD = slice(13, 8)\n ARM_CLK_SRC_FIELD = slice(5, 4)\n\n FCLKX_CTRL_ODIV1_WIDTH = (FCLKX_CTRL_ODIV1_FIELD.start -\n FCLKX_CTRL_ODIV1_FIELD.stop + 1)\n FCLKX_CTRL_ODIV0_WIDTH = (FCLKX_CTRL_ODIV0_FIELD.start -\n FCLKX_CTRL_ODIV0_FIELD.stop + 1)\n VALID_CLOCK_DIV_PRODUCTS = sorted(list(set(\n (np.multiply(\n np.arange(1 << FCLKX_CTRL_ODIV1_WIDTH).reshape(\n 1 << FCLKX_CTRL_ODIV1_WIDTH, 1),\n np.arange(1 << FCLKX_CTRL_ODIV0_WIDTH))).reshape(-1))))\n\n if CPU_ARCH_IS_SUPPORTED:\n ARM_PLL_CTRL = Register(SLCR_BASE_ADDRESS + ARM_PLL_CTRL_OFFSET)\n DDR_PLL_CTRL = Register(SLCR_BASE_ADDRESS + DDR_PLL_CTRL_OFFSET)\n IO_PLL_CTRL = Register(SLCR_BASE_ADDRESS + IO_PLL_CTRL_OFFSET)\n\n PL_SRC_PLL_CTRLS = [IO_PLL_CTRL, IO_PLL_CTRL,\n ARM_PLL_CTRL, DDR_PLL_CTRL]\n\n PL_CLK_CTRLS = [Register(SLCR_BASE_ADDRESS + FCLK0_CTRL_OFFSET),\n Register(SLCR_BASE_ADDRESS + FCLK1_CTRL_OFFSET),\n Register(SLCR_BASE_ADDRESS + FCLK2_CTRL_OFFSET),\n Register(SLCR_BASE_ADDRESS + FCLK3_CTRL_OFFSET)]\n\n ARM_CLK_CTRL = Register(SLCR_BASE_ADDRESS + ARM_CLK_CTRL_OFFSET)\n\n ARM_SRC_PLL_CTRLS = [ARM_PLL_CTRL, ARM_PLL_CTRL,\n DDR_PLL_CTRL, IO_PLL_CTRL]\n else:\n warnings.warn(\"Pynq does not support the CPU Architecture: {}\"\n .format(CPU_ARCH), ResourceWarning)\n\n\n @classmethod\n def set_pl_clk(mcs, clk_idx, div0=None, div1=None,\n clk_mhz=DEFAULT_PL_CLK_MHZ):\n \"\"\"This method sets a PL clock frequency.\n\n Users have to specify the index of the PL clock to be changed.\n\n The CPU, and other source clocks, by default, should not get changed.\n\n Users have two options:\n 1. specify the two frequency divider values directly (div0, div1), or\n 2. specify the clock rate, in which case the divider values will be\n calculated.\n\n Note\n ----\n In case `div0` and `div1` are both specified, the parameter `clk_mhz`\n will be ignored.\n\n Parameters\n ----------\n clk_idx : int\n The index of the PL clock to be changed, from 0 to 3.\n div0 : int\n The first frequency divider value.\n div1 : int\n The second frequency divider value.\n clk_mhz : float\n The clock rate in MHz.\n\n \"\"\"\n super().set_pl_clk(clk_idx, div0, div1, clk_mhz)\n\n @classmethod\n def get_pll_mhz(mcs, pll_reg):\n \"\"\"The getter method for PLL output clocks.\n\n Parameters\n ----------\n pll_reg : Register\n The control register for a PLL\n\n Returns\n -------\n float\n The PLL output clock rate measured in MHz.\n\n \"\"\"\n pll_fbdiv = pll_reg[mcs.SRC_PLL_FBDIV_FIELD]\n clk_mhz = mcs.DEFAULT_SRC_CLK_MHZ * pll_fbdiv\n\n return round(clk_mhz, 6)\n\n @classmethod\n def get_cpu_mhz(mcs):\n \"\"\"The getter method for the CPU clock.\n\n Returns\n -------\n float\n The CPU clock rate measured in MHz.\n\n \"\"\"\n arm_src_pll_idx = mcs.ARM_CLK_CTRL[mcs.ARM_CLK_SRC_FIELD]\n arm_clk_odiv = mcs.ARM_CLK_CTRL[mcs.ARM_CLK_ODIV_FIELD]\n src_pll_reg = mcs.ARM_SRC_PLL_CTRLS[arm_src_pll_idx]\n return round(mcs.get_pll_mhz(src_pll_reg) / arm_clk_odiv, 6)\n\n\nif CPU_ARCH == ZU_ARCH:\n _ClockParent = _ClocksUltrascale\nelif CPU_ARCH == ZYNQ_ARCH:\n _ClockParent = _ClocksZynq\nelse:\n _ClockParent = object\n warnings.warn(\"PYNQ does not support the CPU Architecture: \"\n \"{}\".format(CPU_ARCH))\n\n\nclass Clocks(_ClockParent, metaclass=_ClocksMeta):\n \"\"\"Class for all the PS and PL clocks exposed to users.\n\n With this class, users can get the CPU clock and all the PL clocks. Users\n can also set PL clocks to other values using this class.\n\n Attributes\n ----------\n cpu_mhz : float\n The clock rate of the CPU, measured in MHz.\n fclk0_mhz : float\n The clock rate of the PL clock 0, measured in MHz.\n fclk1_mhz : float\n The clock rate of the PL clock 1, measured in MHz.\n fclk2_mhz : float\n The clock rate of the PL clock 2, measured in MHz.\n fclk3_mhz : float\n The clock rate of the PL clock 3, measured in MHz.\n\n \"\"\"\n pass\n"
] | [
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tejaskannan/adaptive-sensor-security | [
"4c6dd1eb55eb30a8330c4bf3537e06c7d7802c0b"
] | [
"adaptiveleak/analysis/mutual_information.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom argparse import ArgumentParser\nfrom collections import Counter, defaultdict, OrderedDict, namedtuple\nfrom typing import Dict, List, DefaultDict, Optional, Tuple\n\nfrom adaptiveleak.analysis.plot_utils import COLORS, PLOT_STYLE, LINE_WIDTH, MARKER, MARKER_SIZE, to_label\nfrom adaptiveleak.analysis.plot_utils import LEGEND_FONT, AXIS_FONT, PLOT_SIZE, TITLE_FONT\nfrom adaptiveleak.analysis.plot_utils import iterate_policy_folders, dataset_label\nfrom adaptiveleak.utils.constants import POLICIES, SMALL_NUMBER\nfrom adaptiveleak.utils.file_utils import read_json_gz, iterate_dir\n\n\ndef plot(information_results: DefaultDict[str, Dict[float, float]], dataset: str, output_file: Optional[str]):\n with plt.style.context('seaborn-ticks'):\n fig, ax = plt.subplots(figsize=PLOT_SIZE)\n\n names: List[str] = []\n policy_values: List[float] = []\n\n for name in POLICIES:\n encodings = ['standard', 'padded', 'group'] if name not in ('uniform', 'random') else ['standard']\n\n for encoding in encodings:\n\n policy_name = '{0}_{1}'.format(name, encoding)\n\n if (policy_name not in information_results) and (name not in information_results):\n continue\n\n if name in information_results:\n policy_name = name\n\n information = information_results[policy_name]\n\n energy = sorted(information.keys())\n values = [information[e] for e in energy]\n\n ax.plot(energy, values, label=to_label(policy_name), color=COLORS[policy_name], linewidth=LINE_WIDTH, marker=MARKER, markersize=MARKER_SIZE)\n\n names.append(policy_name)\n policy_values.append((np.median(values), np.max(values)))\n\n ax.legend(fontsize=LEGEND_FONT, loc='center')\n\n ax.set_title('Mutual Info between Message Size and Label on the {0} Dataset'.format(dataset_label(dataset)), fontsize=AXIS_FONT)\n ax.set_xlabel('Energy Budget (mJ)', fontsize=AXIS_FONT)\n ax.set_ylabel('Empirical Normalized Mutual Information', fontsize=AXIS_FONT)\n\n print(' & '.join(names))\n print(' & '.join(map(lambda t: '{0:.2f} ({1:.2f})'.format(t[0], t[1]), policy_values)))\n\n if output_file is None:\n plt.show()\n else:\n plt.savefig(output_file)\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--folder', type=str, required=True, help='The folder containing the experiment logs.')\n parser.add_argument('--dataset', type=str, required=True, help='The name of the dataset.')\n parser.add_argument('--output-file', type=str, help='An optional path in which to save the output plot.')\n args = parser.parse_args()\n\n information_results: DefaultDict[str, Dict[float, float]] = defaultdict(dict)\n\n for folder in iterate_policy_folders([args.folder], dataset=args.dataset):\n for sim_file in iterate_dir(folder, pattern='.*json.gz'):\n model = read_json_gz(sim_file)\n\n if model['policy']['encoding_mode'].lower() in ('single_group', 'group_unshifted', 'pruned'):\n continue\n\n name = '{0}_{1}'.format(model['policy']['policy_name'].lower(), model['policy']['encoding_mode'].lower())\n energy_per_seq = model['policy']['energy_per_seq']\n\n mutual_information = model['mutual_information']['norm_mutual_information']\n\n information_results[name][energy_per_seq] = mutual_information\n\n plot(information_results, dataset=args.dataset, output_file=args.output_file)\n"
] | [
[
"numpy.median",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.style.context",
"numpy.max",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Mayoogh/tensorflow | [
"d20954874dab39dc67fd2222003b9ab5ea4840d5"
] | [
"tensorflow/python/data/kernel_tests/map_test.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `tf.data.Dataset.map()`.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import namedtuple\nimport threading\nimport warnings\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.data.experimental.ops import threading_options\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import map_fn\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import script_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\ndef _make_coordinated_sloppy_dataset(num_elements, num_parallel_calls):\n \"\"\"Produces a dataset iterator and events to control the order of elements.\n\n Args:\n num_elements: the number of input elements\n num_parallel_calls: the degree of map parallelism\n\n Returns:\n A dataset iterator (represented as `get_next` op) and events that can be\n used to control the order of output elements.\n \"\"\"\n\n # Set up threading events used to sequence when items are produced that\n # are subsequently interleaved. These events allow us to deterministically\n # simulate slowdowns and force sloppiness.\n coordination_events = {i: threading.Event() for i in range(num_elements)}\n\n def map_py_fn(x):\n coordination_events[x].wait()\n coordination_events[x].clear()\n return x * x\n\n def map_fn(x):\n return script_ops.py_func(map_py_fn, [x], x.dtype)\n\n options = dataset_ops.Options()\n options.experimental_deterministic = False\n dataset = dataset_ops.Dataset.range(num_elements).map(\n map_fn, num_parallel_calls).with_options(options)\n return dataset, coordination_events\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass MapTest(test_base.DatasetTestBase, parameterized.TestCase):\n\n def _buildMapDataset(self, components, count):\n\n def _map_fn(x, y, z):\n return math_ops.square(x), math_ops.square(y), math_ops.square(z)\n\n dataset = dataset_ops.Dataset.from_tensor_slices(components).map(\n _map_fn).repeat(count)\n self.assertEqual([c.shape[1:] for c in components],\n [shape for shape in dataset.output_shapes])\n return dataset\n\n def testMapDataset(self):\n \"\"\"Test an dataset that maps a TF function across its input elements.\"\"\"\n # The pipeline is TensorSliceDataset -> MapDataset(square_3) ->\n # RepeatDataset(count).\n components = (np.arange(7),\n np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],\n np.array(37.0) * np.arange(7))\n\n # Test single-threaded access to the iterator.\n get_next = self.getNext(self._buildMapDataset(components, 14))\n for _ in range(14):\n for i in range(7):\n result = self.evaluate(get_next())\n for component, result_component in zip(components, result):\n self.assertAllEqual(component[i]**2, result_component)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n # TODO(b/117581999): add eager coverage, different threads run in graph\n # context.\n @test_util.run_v1_only(\"b/120545219\")\n def testSkipEagerMapDatasetMultithreaded(self):\n # Test multi-threaded access to the same iterator.\n components = (np.arange(7),\n np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],\n np.array(37.0) * np.arange(7))\n get_next = self.getNext(self._buildMapDataset(components, 18))\n results = []\n with self.cached_session() as sess:\n def iterator_thread():\n while True:\n try:\n results.append(sess.run(get_next()))\n except errors.OutOfRangeError:\n return\n threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n # `results` will contain the same elements components**2\n # repeated 18 times, but in a non-deterministic order. Sort the\n # results, and assert that each element of components**2 is\n # produced 18 times.\n results.sort(key=lambda x: x[0])\n for i in range(7):\n for j in range(18):\n for component, result_component in zip(components,\n results[i * 18 + j]):\n self.assertAllEqual(component[i]**2, result_component)\n\n def _buildParallelMapDataset(self, components, count, num_parallel_calls,\n output_buffer_size):\n\n def _map_fn(x, y, z):\n return math_ops.square(x), math_ops.square(y), math_ops.square(z)\n\n dataset = dataset_ops.Dataset.from_tensor_slices(components).map(\n _map_fn, num_parallel_calls=num_parallel_calls).prefetch(\n output_buffer_size).repeat(count)\n\n self.assertEqual([c.shape[1:] for c in components],\n [shape for shape in dataset.output_shapes])\n return dataset\n\n def testParallelMapDataset(self):\n \"\"\"Test an dataset that maps a TF function across its input elements.\"\"\"\n\n # The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->\n # RepeatDataset(count).\n def do_test(num_parallel_calls, output_buffer_size):\n\n components = (np.arange(7),\n np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],\n np.array(37.0) * np.arange(7))\n # Test single-threaded access to the iterator.\n get_next = self.getNext(\n self._buildParallelMapDataset(components, 14, num_parallel_calls,\n output_buffer_size))\n for _ in range(14):\n for i in range(7):\n result = self.evaluate(get_next())\n for component, result_component in zip(components, result):\n self.assertAllEqual(component[i]**2, result_component)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n for num_parallel_calls_val, output_buffer_size_val in [(1, 1), (1, 2), (2,\n 2),\n (2, 4), (8, 8),\n (8, 16)]:\n do_test(num_parallel_calls_val, output_buffer_size_val)\n\n # TODO(b/117581999): add eager coverage, different threads run in graph\n # context.\n @test_util.run_v1_only(\"b/120545219\")\n def testSkipEagerParallelMapDatasetMultithreaded(self):\n\n def do_test(num_parallel_calls, output_buffer_size):\n # Test multi-threaded access to the same iterator.\n components = (np.arange(7),\n np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],\n np.array(37.0) * np.arange(7))\n get_next = self.getNext(\n self._buildParallelMapDataset(components, 18, num_parallel_calls,\n output_buffer_size))\n results = []\n with self.cached_session() as sess:\n\n def iterator_thread():\n while True:\n try:\n results.append(sess.run(get_next()))\n except errors.OutOfRangeError:\n return\n threads = [self.checkedThread(target=iterator_thread)\n for _ in range(64)]\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n # `results` will contain the same elements components**2\n # repeated 18 times, but in a non-deterministic order. Sort the\n # results, and assert that each element of components**2 is\n # produced 18 times.\n results.sort(key=lambda x: x[0])\n for i in range(7):\n for j in range(18):\n for component, result_component in zip(components,\n results[i * 18 + j]):\n self.assertAllEqual(component[i]**2, result_component)\n\n for num_parallel_calls_val, output_buffer_size_val in [\n (1, 1), (1, 2), (2, 2), (2, 4), (8, 8), (8, 16)]:\n do_test(num_parallel_calls_val, output_buffer_size_val)\n\n def testImplicitDisposeParallelMapDataset(self):\n # Tests whether a parallel map dataset will be cleaned up correctly when\n # the pipeline does not run it until exhaustion.\n # The pipeline is TensorSliceDataset -> MapDataset(square_3) ->\n # RepeatDataset(1000).\n components = (np.arange(1000),\n np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],\n np.array(37.0) * np.arange(1000))\n\n dataset = self._buildParallelMapDataset(components, 1000, 100, 100)\n # NOTE(mrry): Also test that the prefetching thread is cancelled correctly.\n dataset = dataset.prefetch(100)\n get_next = self.getNext(dataset)\n\n for _ in range(3):\n self.evaluate(get_next())\n\n def testParallelMapUnspecifiedOutputSize(self):\n components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)\n\n dataset = (dataset_ops.Dataset.from_tensor_slices(components)\n .map(lambda x: array_ops.check_numerics(x, \"message\"),\n num_parallel_calls=2))\n get_next = self.getNext(dataset)\n\n for _ in range(3):\n self.evaluate(get_next())\n\n def testParallelMapError(self):\n components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)\n\n dataset = (dataset_ops.Dataset.from_tensor_slices(components)\n .map(lambda x: array_ops.check_numerics(x, \"message\"),\n num_parallel_calls=2))\n get_next = self.getNext(dataset)\n\n for _ in range(3):\n self.evaluate(get_next())\n # The 4th element is NaN, so `array_ops.check_numerics()` should fail.\n with self.assertRaises(errors.InvalidArgumentError):\n self.evaluate(get_next())\n self.evaluate(get_next())\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n def testPrefetchError(self):\n components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)\n\n dataset = (dataset_ops.Dataset.from_tensor_slices(components)\n .map(lambda x: array_ops.check_numerics(x, \"message\"))\n .prefetch(2))\n\n get_next = self.getNext(dataset)\n\n for _ in range(3):\n self.evaluate(get_next())\n # The 4th element is NaN, so `array_ops.check_numerics()` should fail.\n with self.assertRaises(errors.InvalidArgumentError):\n self.evaluate(get_next())\n self.evaluate(get_next())\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n def testCaptureIterator(self):\n\n def _build_ds(iterator):\n\n def _map_fn(x):\n get_next = iterator.get_next()\n return x * get_next\n\n return dataset_ops.Dataset.range(10).map(_map_fn)\n\n def _build_graph():\n if context.executing_eagerly():\n captured_iterator = iter(dataset_ops.Dataset.range(10))\n else:\n captured_iterator = dataset_ops.make_initializable_iterator(\n dataset_ops.Dataset.range(10))\n ds = _build_ds(captured_iterator)\n return captured_iterator, ds\n\n captured_iter, ds = _build_graph()\n if not context.executing_eagerly():\n self.evaluate(captured_iter.initializer)\n get_next = self.getNext(ds, requires_initialization=True)\n for i in range(10):\n self.assertEqual(i * i, self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n def testCaptureHashTable(self):\n # NOTE(mrry): We must use the V2 variants of `HashTable`\n # etc. because these produce a `tf.resource`-typed output that is\n # compatible with the in-graph function implementation.\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer(keys, values), default_val)\n\n input_sentences = dataset_ops.Dataset.from_tensor_slices(\n [\"brain brain tank salad surgery\", \"surgery brain\"])\n\n dataset = input_sentences.map(lambda x: string_ops.string_split([x]).values\n ).map(table.lookup)\n\n get_next = self.getNext(dataset, requires_initialization=True)\n\n self.evaluate(table.initializer)\n self.evaluate(get_next())\n self.evaluate(get_next())\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n @test_util.run_v1_only(\"b/123904513\")\n def testCaptureQueue(self):\n elements = np.random.randint(100, size=[200])\n queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])\n enqueue_op = queue.enqueue_many(elements)\n close_op = queue.close()\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(\n -1).map(lambda _: queue.dequeue())\n\n get_next = self.getNext(dataset, requires_initialization=True)\n self.evaluate(enqueue_op)\n self.evaluate(close_op)\n\n for element in elements:\n self.assertEqual(element, self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n # TODO(b/117581999): Possible deadlock in eager mode, debug.\n @test_util.run_v1_only(\"b/120545219\")\n def testSkipEagerCaptureSameResourceMultipleTimes(self):\n elements = np.random.randint(100, size=[200])\n queue = data_flow_ops.FIFOQueue(\n 200, dtypes.int64, shapes=[], shared_name=\"shared_queue\")\n queue_2 = data_flow_ops.FIFOQueue(\n 200, dtypes.int64, shapes=[], shared_name=\"shared_queue\")\n\n enqueue_op = queue.enqueue_many(elements)\n close_op = queue.close()\n\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(\n -1).map(lambda _: (queue.dequeue(), queue_2.dequeue()))\n\n self.evaluate(enqueue_op)\n self.evaluate(close_op)\n get_next = self.getNext(dataset, requires_initialization=True)\n for i in range(100):\n self.assertCountEqual([elements[i * 2], elements[i * 2 + 1]],\n self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n # TODO(b/121264236): add eager mode coverage when we have mutli-device setup.\n @test_util.run_v1_only(\"b/121264236\")\n def testSkipEagerCaptureConstantsWithConflictingDevices(self):\n config = config_pb2.ConfigProto(device_count={\"CPU\": 3})\n with self.cached_session(config=config):\n with ops.device(\"/device:CPU:0\"):\n a = constant_op.constant(3.0)\n with ops.device(\"/device:CPU:1\"):\n b = constant_op.constant(5.0)\n\n def func(_):\n return math_ops.add(a, b)\n\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(10).map(func)\n expected_output = [8.0] * 10\n self.assertDatasetProduces(dataset, expected_output=expected_output)\n\n # TODO(b/121264236): add eager mode coverage when we have mutli-device setup.\n @test_util.run_v1_only(\n \"defun will convert RefVariables to ResourceVariables.\")\n def testSkipEagerRefVariablesWithConflictingDevices(self):\n config = config_pb2.ConfigProto(device_count={\"CPU\": 3})\n with self.cached_session(config=config):\n\n def func(_):\n with ops.device(\"/device:CPU:0\"):\n a = variables.VariableV1(3.0)\n with ops.device(\"/device:CPU:1\"):\n b = variables.VariableV1(5.0)\n return math_ops.add(a, b)\n\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(10).map(func)\n self.evaluate(variables.global_variables_initializer())\n expected_output = [8.0] * 10\n self.assertDatasetProduces(\n dataset,\n expected_output=expected_output,\n requires_initialization=True)\n\n # TODO(b/121264236): add eager mode coverage when we have mutli-device setup.\n @test_util.run_v1_only(\"b/121264236\")\n def testSkipEagerResourceVariablesWithConflictingDevices(self):\n config = config_pb2.ConfigProto(device_count={\"CPU\": 3})\n with self.cached_session(config=config):\n\n def func(_):\n with ops.device(\"/device:CPU:0\"):\n a = variables.Variable(3.0)\n with ops.device(\"/device:CPU:1\"):\n b = variables.Variable(5.0)\n return math_ops.add(a, b)\n\n # The MapDataset node ends up with two ResourceVariable inputs, one on\n # device CPU:0 and the other on device CPU:1. The placer cannot resolve\n # this as it cannot place the MapDatasetOp on both devices.\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(10).map(func)\n expected_error = (\n errors.InvalidArgumentError,\n \"Could not colocate node with its resource and reference inputs\")\n self.assertDatasetProduces(\n dataset, expected_error=expected_error, requires_initialization=True)\n\n def testCaptureVariable(self):\n counter_var = variable_scope.get_variable(\n \"counter\", (), dtypes.int32, use_resource=True)\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(\n 10).map(lambda _: counter_var.assign_add(1))\n get_next = self.getNext(dataset, requires_initialization=True)\n\n self.evaluate(counter_var.initializer)\n\n for i in range(10):\n self.assertEqual(i, self.evaluate(counter_var))\n self.assertEqual(i + 1, self.evaluate(get_next()))\n self.assertEqual(10, self.evaluate(counter_var))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n self.assertEqual(10, self.evaluate(counter_var))\n\n # TODO(b/117581999): error not captured for eager mode, debug.\n @test_util.run_v1_only(\"b/120545219\")\n def testSkipEagerCaptureUninitializedVariableError(self):\n counter_var = variable_scope.get_variable(\n \"counter\", (), dtypes.int32, use_resource=True)\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(\n 10).map(lambda _: counter_var.assign_add(1))\n\n get_next = self.getNext(dataset, requires_initialization=True)\n\n with self.assertRaises(errors.NotFoundError):\n self.evaluate(get_next())\n\n def testSeededStatefulOperatorIsProperlyStateful(self):\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(\n 10).map(lambda _: random_ops.random_uniform((), seed=11)).batch(2)\n\n get_next = self.getNext(dataset, requires_initialization=True)\n random_values = []\n with self.assertRaises(errors.OutOfRangeError):\n while True:\n random_values.extend(self.evaluate(get_next()))\n self.assertLen(random_values, 10)\n self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)\n\n get_next = self.getNext(dataset, requires_initialization=True)\n random_values_2 = []\n with self.assertRaises(errors.OutOfRangeError):\n while True:\n random_values_2.extend(self.evaluate(get_next()))\n\n # Randomness is repeatable given same seed\n self.assertAllClose(random_values, random_values_2)\n\n def testStatefulMapKeepsStateAcrossIterators(self):\n dataset = dataset_ops.Dataset.from_tensors(0).repeat(10).map(\n lambda _: random_ops.random_uniform((), seed=11)).repeat(1000).batch(10)\n\n get_next = self.getNext(dataset)\n random_values = self.evaluate(get_next())\n\n # Assert that one of the next 99 batches yielded by the iterator is\n # different from the first.\n i = 0\n while i < 99:\n if np.any(random_values != self.evaluate(get_next())):\n break\n i += 1\n self.assertLess(i, 99)\n\n def testStatefulOperationInShortCircuit(self):\n counter_var = variable_scope.get_variable(\n \"counter\", (), dtypes.int32, use_resource=True)\n\n def increment_fn(x):\n counter_var.assign_add(1)\n return x\n\n dataset = dataset_ops.Dataset.range(10).map(increment_fn)\n\n get_next = self.getNext(dataset, requires_initialization=True)\n\n self.evaluate(counter_var.initializer)\n for i in range(10):\n self.assertEqual(i, self.evaluate(counter_var))\n self.assertEqual(i, self.evaluate(get_next()))\n self.assertEqual(10, self.evaluate(counter_var))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n self.assertEqual(10, self.evaluate(counter_var))\n\n def testMapDict(self):\n dataset = dataset_ops.Dataset.range(10).map(\n lambda x: {\"foo\": x * 2, \"bar\": x**2}).map(\n lambda d: d[\"foo\"] + d[\"bar\"])\n self.assertDatasetProduces(\n dataset, expected_output=[i * 2 + i**2 for i in range(10)])\n\n def testMapNamedtuple(self, count=10):\n # construct dataset of tuples\n labels = dataset_ops.Dataset.range(count)\n images = labels.map(lambda l: -l)\n dataset_tuple = dataset_ops.Dataset.zip((labels, images))\n\n # convert dataset of tuples to dataset of namedtuples\n example = namedtuple(\"Example\", [\"label\", \"image\"])\n dataset_namedtuple = dataset_tuple.map(example)\n\n def preprocess_tuple(label, image):\n image = 2 * image\n return label, image\n\n def preprocess_namedtuple(example):\n return example._replace(image=2 * example.image)\n\n # preprocess both datasets\n dataset_tuple = dataset_tuple.map(preprocess_tuple)\n dataset_namedtuple = dataset_namedtuple.map(preprocess_namedtuple)\n\n next_tuple = self.getNext(dataset_tuple)\n next_namedtuple = self.getNext(dataset_namedtuple)\n\n # make sure both datasets contain the same data\n for i in range(count):\n tuple_, namedtuple_ = self.evaluate([next_tuple(), next_namedtuple()])\n self.assertEqual(tuple_, namedtuple_)\n self.assertEqual(tuple_, (i, -2 * i))\n\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_namedtuple())\n\n def testUseStepContainerInMap(self):\n row = np.arange(6)\n dataset = dataset_ops.Dataset.from_tensors(\n row).map(lambda elems: map_fn.map_fn(lambda x: x * x, elems))\n self.assertDatasetProduces(dataset, expected_output=[row**2])\n\n def testCaseAndCondInMap(self):\n\n def control_map_fn(x, y):\n\n def multiply():\n return x * 2\n\n def divide():\n return x // 2\n\n def defaults_two():\n return control_flow_ops.cond(\n math_ops.equal(math_ops.mod(x, 2), 0),\n multiply,\n divide,\n name=\"cond_mult\")\n\n pred_fn_pairs = {\n math_ops.logical_or(math_ops.equal(y, 2), math_ops.equal(y, 3)):\n defaults_two,\n }\n\n return control_flow_ops.case(\n pred_fn_pairs, default=multiply, exclusive=True)\n\n def build_dataset(row, num):\n dataset = dataset_ops.Dataset.from_tensor_slices(\n row).map(lambda x: control_map_fn(x, num))\n return self.getNext(dataset)\n\n row = np.arange(6)\n for num in [2, 3, 4]:\n get_next = build_dataset(row, num)\n for i in range(6):\n self.assertEqual(\n (i // 2 if i % 2 else i * 2) if (num == 2 or num == 3) else i * 2,\n self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n def testCaseInWhileInMap(self):\n\n def control_map_fn(x, y):\n\n def multiply():\n return x * 2\n\n def divide():\n return x // 2\n\n pred_fn_pairs = {\n math_ops.logical_or(math_ops.equal(y, 2), math_ops.equal(y, 3)):\n divide,\n }\n\n return control_flow_ops.case(\n pred_fn_pairs, default=multiply, exclusive=True)\n\n def build_dataset(row, num):\n # pylint: disable=g-long-lambda\n dataset = dataset_ops.Dataset.from_tensors(\n row).map(lambda elems: map_fn.map_fn(\n lambda x: control_map_fn(x, num), elems))\n return self.getNext(dataset)\n\n row = np.arange(6)\n for num in [2, 3, 4]:\n get_next = build_dataset(row, num)\n self.assertAllEqual(\n [x // 2 if (num == 2 or num == 3) else x * 2 for x in row],\n self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n def testCaseAndCondInWhileInMap(self):\n\n def control_map_fn(x, y):\n\n def multiply():\n return x * 2\n\n def divide():\n return x // 2\n\n def defaults_two():\n return control_flow_ops.cond(\n math_ops.equal(math_ops.mod(x, 2), 0),\n multiply,\n divide,\n name=\"cond_mult\")\n\n pred_fn_pairs = {\n math_ops.logical_or(math_ops.equal(y, 2), math_ops.equal(y, 3)):\n defaults_two,\n }\n\n return control_flow_ops.case(\n pred_fn_pairs, default=multiply, exclusive=True)\n\n row = np.arange(6)\n num = 2\n # pylint: disable=g-long-lambda\n dataset = dataset_ops.Dataset.from_tensors(\n row).map(lambda elems: map_fn.map_fn(\n lambda x: control_map_fn(x, num), elems))\n # pylint: enable=g-long-lambda\n get_next = self.getNext(dataset)\n\n self.assertAllEqual([(x // 2 if x % 2 else x * 2) if\n (num == 2 or num == 3) else x * 2 for x in row],\n self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n def testNestedListMapDataset(self):\n dataset = dataset_ops.Dataset.from_tensors(\n [0, 1, 2]).repeat(10).map(lambda a: ([a[1], a[0] + a[2]], a[1]))\n\n expected_output = [(np.array([1, 2]), 1)] * 10\n self.assertDatasetProduces(dataset, expected_output=expected_output)\n\n def testPrefetch(self):\n # We will use this event to test that `_map_py_func()` has been\n # invoked a certain number of times (6 times, to be exact) after\n # consuming fewer elements from the iterator.\n ev = threading.Event()\n\n set_event_during_invocation = 5\n\n def _map_py_func(x):\n if x == set_event_during_invocation:\n ev.set()\n return x * x\n\n def _map_fn(x):\n return script_ops.py_func(_map_py_func, [x], x.dtype)\n\n def do_test(buffer_size):\n dataset = dataset_ops.Dataset.range(100).map(_map_fn).prefetch(\n buffer_size)\n\n get_next = self.getNext(dataset)\n # Simple test that prefetch yields the expected values in the\n # expected order.\n for i in range(100):\n self.assertEqual(i * i, self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n for buffer_size in [1, 10, 100, 1000]:\n do_test(buffer_size)\n\n # We can indirectly observe that varying the buffer size has the\n # intended effect by observing when `ev` is set (on the 6th\n # invocation of `_map_py_func()`).\n # NOTE(mrry): We do not test with `buffer_size ==\n # set_event_during_invocation`, because we must consume at least\n # one element to start the prefetching.\n def do_test_ev(buffer_size):\n dataset = dataset_ops.Dataset.range(100).map(_map_fn).prefetch(\n buffer_size)\n\n get_next = self.getNext(dataset)\n\n event_will_be_set_after_consuming = (\n set_event_during_invocation - buffer_size + 1)\n\n ev.clear()\n for i in range(event_will_be_set_after_consuming):\n self.assertFalse(ev.is_set())\n self.assertEqual(i * i, self.evaluate(get_next()))\n ev.wait()\n for i in range(event_will_be_set_after_consuming, 100):\n self.assertEqual(i * i, self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n for buffer_size in range(1, set_event_during_invocation):\n do_test_ev(buffer_size)\n\n def testReturnList(self):\n dataset = dataset_ops.Dataset.range(\n 10).map(lambda x: [x, constant_op.constant(37.0)])\n self.assertDatasetProduces(\n dataset, expected_output=[(i, 37.0) for i in range(10)])\n\n def testMultiOutputPyFunc(self):\n # The `tf.py_func()` op returns a list of tensors for its outputs.\n def _map_fn(x_tensor):\n def _map_py_func(x):\n return x, np.array(37.0, dtype=np.float64)\n return script_ops.py_func(\n _map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])\n\n dataset = dataset_ops.Dataset.range(10).map(_map_fn)\n self.assertDatasetProduces(\n dataset, expected_output=[(i, 37.0) for i in range(10)])\n\n def testSparse(self):\n\n def _sparse(i):\n return sparse_tensor.SparseTensorValue(\n indices=np.array([[0, 0]]),\n values=(i * np.array([1])),\n dense_shape=np.array([1, 1]))\n\n dataset = dataset_ops.Dataset.range(10).map(_sparse)\n self.assertDatasetProduces(\n dataset, expected_output=[_sparse(i) for i in range(10)])\n\n def testSparseChain(self):\n\n def _sparse(i):\n return sparse_tensor.SparseTensorValue(\n indices=np.array([[0, 0]]),\n values=(i * np.array([1])),\n dense_shape=np.array([1, 1]))\n\n def _check(i):\n self.assertTrue(sparse_tensor.is_sparse(i))\n return sparse_ops.sparse_concat(0, [i, i])\n\n dataset = dataset_ops.Dataset.range(10).map(_sparse).map(_check)\n\n self.assertDatasetProduces(\n dataset,\n expected_output=[self.evaluate(_check(_sparse(i))) for i in range(10)])\n\n @test_util.run_v1_only(\"b/123904513\")\n def testParallelMapOutOfRangeError(self):\n def raising_py_func(i):\n if i == 100:\n raise StopIteration()\n else:\n return i\n\n dataset = dataset_ops.Dataset.range(105).map(\n lambda x: script_ops.py_func(raising_py_func, [x], dtypes.int64),\n num_parallel_calls=2)\n get_next = self.getNext(dataset)\n for i in range(100):\n self.assertEqual(i, self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n def testConstantOutput(self):\n dataset = dataset_ops.Dataset.range(10).map(lambda x: [x, \"hello\", 10])\n self.assertDatasetProduces(dataset, [(i, b\"hello\", 10) for i in range(10)])\n\n def testWarnOnLookupTable(self):\n def collecting_function(x):\n _ = lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer([], []), 0.0, name=\"t1\")\n return x\n\n warnings.simplefilter(\"always\")\n with warnings.catch_warnings(record=True) as w:\n _ = dataset_ops.Dataset.range(10).map(collecting_function)\n # NOTE(mrry): Python 3 prints other warnings in addition to the one we are\n # testing, so we search for the expected warning.\n self.assertGreaterEqual(len(w), 1)\n found_warning = False\n for warning in w:\n if (\"Creating lookup tables inside a function passed to Dataset.map() is \"\n \"not supported.\" in str(warning)):\n found_warning = True\n break\n self.assertTrue(found_warning)\n\n def testNestedDatasetMap(self):\n # TODO(b/110122868): When iterators can yield a `tf.data.Dataset`, remove\n # the `get_single_element()` call.\n dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0]).map(\n dataset_ops.Dataset.from_tensor_slices).map(\n lambda ds: ds.batch(3)).flat_map(lambda x: x)\n\n self.assertDatasetProduces(dataset, expected_output=[[1.0, 2.0, 3.0]])\n\n def testReturnValueError(self):\n dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])\n with self.assertRaisesRegexp(\n TypeError, r\"Unsupported return value from function passed to \"\n r\"Dataset.map\\(\\): None.\"):\n _ = dataset.map(lambda x: None)\n\n def testBrokenFunctionErrorOnInitialization(self):\n dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0, 3.0])\n\n def broken_function(_):\n \"\"\"A function deliberately designed to fail on instantiation.\"\"\"\n value = []\n tensor_value = attr_value_pb2.AttrValue()\n tensor_value.tensor.CopyFrom(\n tensor_util.make_tensor_proto(\n value, dtype=dtypes.float32, shape=[0], verify_shape=False))\n dtype_value = attr_value_pb2.AttrValue(type=dtypes.int32.as_datatype_enum)\n\n # Create a \"Const\" op with a `tf.float32` value and a `tf.int32` type\n # attr.\n const_tensor = ops.get_default_graph().create_op(\n \"Const\", [], [dtypes.int32],\n attrs={\n \"value\": tensor_value,\n \"dtype\": dtype_value\n },\n name=\"BrokenConst\").outputs[0]\n return const_tensor\n\n dataset = dataset.map(broken_function)\n self.assertDatasetProduces(\n dataset, expected_error=(errors.InvalidArgumentError, \"BrokenConst\"))\n\n# pylint: disable=g-long-lambda\n @parameterized.named_parameters(\n (\"Map\", lambda dataset, func:\n dataset_ops.MapDataset(dataset, func, use_inter_op_parallelism=False)),\n (\"ParallelMap\", lambda dataset, func:\n dataset_ops.ParallelMapDataset(dataset, func, num_parallel_calls=1,\n use_inter_op_parallelism=False)),\n )\n def testNoInterOpParallelism(self, make_dataset_fn):\n dataset = dataset_ops.Dataset.from_tensors(0)\n\n def _get_tid():\n return np.int64(threading.current_thread().ident)\n\n def _map_fn(_):\n tids = []\n for _ in range(10):\n tids.append(script_ops.py_func(_get_tid, [], dtypes.int64))\n return tids\n\n dataset = make_dataset_fn(dataset, _map_fn)\n get_next = self.getNext(dataset)\n\n tids = self.evaluate(get_next())\n self.assertTrue(all(tids[0] == tid for tid in tids))\n# pylint: enable=g-long-lambda\n\n @parameterized.named_parameters(\n (\"SequentialIdentity\", None, lambda x: x, None),\n (\"SequentialReplicate\", None, lambda x: (x, x), None),\n (\"SequentialSwap\", (None, None), lambda x, y: (y, x), None),\n (\"SequentialProject\", (None, None), lambda x, y: x, None),\n (\"ParallelIdentity\", None, lambda x: x, 10),\n (\"ParallelReplicate\", None, lambda x: (x, x), 10),\n (\"ParallelSwap\", (None, None), lambda x, y: (y, x), 10),\n (\"ParallelProject\", (None, None), lambda x, y: x, 10),\n )\n def testShortCircuit(self, structure, map_fn, num_parallel_calls):\n dataset = self.structuredDataset(structure).repeat().map(\n map_fn, num_parallel_calls=num_parallel_calls)\n get_next = self.getNext(dataset)\n\n if isinstance(structure, tuple):\n expected = map_fn(*self.evaluate(self.structuredElement(structure)))\n else:\n expected = map_fn(self.evaluate(self.structuredElement(structure)))\n self.assertEqual(expected, self.evaluate(get_next()))\n\n @parameterized.named_parameters(\n (\"Sequential\", None),\n (\"Parallel\", 10),\n )\n def testShortCircuitCapturedInput(self, num_parallel_calls):\n captured_t = variables.Variable(42)\n dataset = self.structuredDataset(None).repeat().map(\n lambda x: captured_t, num_parallel_calls=num_parallel_calls)\n self.evaluate(variables.global_variables_initializer())\n get_next = self.getNext(dataset, requires_initialization=True)\n\n self.assertEqual(42, self.evaluate(get_next()))\n\n @parameterized.named_parameters(\n (\"1\", 1, 1),\n (\"2\", 10, 1),\n (\"3\", 10, 10),\n (\"4\", 100, 1),\n (\"5\", 100, 10),\n (\"6\", 100, 100),\n )\n def testSloppyInterleaveInOrder(self, num_elements, num_parallel_calls):\n dataset, coordination_events = _make_coordinated_sloppy_dataset(\n num_elements, num_parallel_calls)\n options = dataset_ops.Options()\n options.experimental_threading = threading_options.ThreadingOptions()\n options.experimental_threading.private_threadpool_size = (\n num_parallel_calls + 1)\n dataset = dataset.with_options(options)\n get_next = self.getNext(dataset, requires_initialization=True)\n for i in range(num_elements):\n coordination_events[i].set()\n self.assertEqual(i * i, self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n @parameterized.named_parameters(\n (\"1\", 10, 10),\n (\"2\", 100, 10),\n (\"3\", 100, 100),\n )\n def testSloppyInterleaveOutOfOrder(self, num_elements, num_parallel_calls):\n dataset, coordination_events = _make_coordinated_sloppy_dataset(\n num_elements, num_parallel_calls)\n options = dataset_ops.Options()\n options.experimental_threading = threading_options.ThreadingOptions()\n options.experimental_threading.private_threadpool_size = (\n num_parallel_calls + 1)\n dataset = dataset.with_options(options)\n\n get_next = self.getNext(dataset, requires_initialization=True)\n\n elements = [x for x in range(num_elements)]\n for i in [1, 4, 7]:\n elements[i], elements[i + 1] = elements[i + 1], elements[i]\n\n for element in elements:\n coordination_events[element].set()\n self.assertEqual(element * element, self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n @parameterized.named_parameters(\n (\"Map\", None),\n (\"ParallelMap\", 12),\n )\n def testPreserveCardinality(self, num_parallel_calls):\n\n def py_fn(_):\n raise StopIteration()\n\n dataset = dataset_ops.DatasetV2.from_tensors(0).map(\n lambda x: script_ops.py_func(py_fn, [x], dtypes.int64),\n num_parallel_calls=num_parallel_calls)\n get_next = self.getNext(dataset)\n with self.assertRaises(errors.InvalidArgumentError):\n self.evaluate(get_next())\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors",
"tensorflow.python.data.ops.dataset_ops.MapDataset",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.data.ops.dataset_ops.Dataset.range",
"tensorflow.python.framework.ops.device",
"tensorflow.python.eager.context.executing_eagerly",
"numpy.random.randint",
"tensorflow.python.data.ops.dataset_ops.ParallelMapDataset",
"tensorflow.python.ops.sparse_ops.sparse_concat",
"tensorflow.python.data.ops.dataset_ops.DatasetV2.from_tensors",
"tensorflow.python.framework.test_util.run_v1_only",
"numpy.arange",
"tensorflow.python.ops.string_ops.string_split",
"tensorflow.python.ops.math_ops.add",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.lookup_ops.KeyValueTensorInitializer",
"numpy.diff",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.array_ops.check_numerics",
"tensorflow.python.ops.script_ops.py_func",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.ops.variables.VariableV1",
"tensorflow.python.framework.tensor_util.make_tensor_proto",
"tensorflow.core.framework.attr_value_pb2.AttrValue",
"numpy.array",
"tensorflow.python.ops.data_flow_ops.FIFOQueue",
"tensorflow.python.ops.control_flow_ops.case",
"tensorflow.python.framework.sparse_tensor.is_sparse",
"tensorflow.python.data.experimental.ops.threading_options.ThreadingOptions",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.python.ops.map_fn.map_fn",
"tensorflow.python.ops.math_ops.mod",
"tensorflow.python.data.ops.dataset_ops.Options",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.data.ops.dataset_ops.Dataset.zip",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.ops.random_ops.random_uniform",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
jpmorgan98/MCDC-TNT-2 | [
"c437596097caa9af56df95213e7f64db38aac40e"
] | [
"mcdc_tnt/numba_kernels/gpu/advance.py"
] | [
"\"\"\"\nName: Advance\nbreif: inputdeck for MCDC-TNT\nAuthor: Jackson Morgan (OR State Univ - [email protected]) CEMeNT\nDate: Dec 2nd 2021\n\"\"\"\n\nimport math\nimport numpy as np\nimport numba as nb\nfrom numba import cuda\n\n#@cuda.jit(nopython=True)\ndef Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time,\n num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L):\n \n \n \n p_end_trans = np.zeros(num_part, dtype=int)\n end_flag = 0\n max_mesh_index = len(mesh_total_xsec)-1\n \n cycle_count = 0\n \n #copy data to cuda device\n d_p_pos_x = cuda.to_device(p_pos_x)\n d_p_pos_y = cuda.to_device(p_pos_y)\n d_p_pos_z = cuda.to_device(p_pos_z)\n d_p_dir_y = cuda.to_device(p_dir_y)\n d_p_dir_z = cuda.to_device(p_dir_z)\n d_p_dir_x = cuda.to_device(p_dir_x)\n d_p_mesh_cell = cuda.to_device(p_mesh_cell)\n d_p_speed = cuda.to_device(p_speed)\n d_p_time = cuda.to_device(p_time)\n d_p_end_trans = cuda.to_device(p_end_trans)\n d_mesh_total_xsec = cuda.to_device(mesh_total_xsec)\n \n threadsperblock = 32\n blockspergrid = (num_part + (threadsperblock - 1)) // threadsperblock\n #ScatterCuda[blockspergrid, threadsperblock](d_scatter_indices, d_p_dir_x, d_p_dir_y, d_p_dir_z, d_p_rands)\n \n \n while end_flag == 0:\n #allocate randoms\n rands = np.random.rand(num_part)\n d_rands = cuda.to_device(rands)\n #vector of indicies for particle transport\n \n p_dist_travled = np.zeros(num_part, dtype=float)\n d_p_dist_travled = cuda.to_device(p_dist_travled)\n \n pre_p_mesh = p_mesh_cell\n \n AdvanceCuda[blockspergrid, threadsperblock](d_p_pos_x, d_p_pos_y, d_p_pos_z,\n d_p_dir_y, d_p_dir_z, d_p_dir_x, \n d_p_mesh_cell, d_p_speed, d_p_time, \n dx, d_mesh_total_xsec, L,\n d_p_dist_travled, d_p_end_trans, d_rands, num_part)\n \n \n #retrive two important peices of data\n p_dist_travled = d_p_dist_travled.copy_to_host()\n p_dir_z = d_p_dir_z.copy_to_host()\n p_mesh_cell = d_p_mesh_cell.copy_to_host()\n p_end_trans = d_p_end_trans.copy_to_host()\n \n \n end_flag = 1\n for i in range(num_part):\n if (0 < pre_p_mesh[i] < max_mesh_index):\n mesh_dist_traveled[pre_p_mesh[i]] += p_dist_travled[i]\n mesh_dist_traveled_squared[pre_p_mesh[i]] += p_dist_travled[i]**2\n \n if p_end_trans[i] == 0:\n end_flag = 0\n \n summer = p_end_trans.sum()\n cycle_count += 1\n \n print(\"Advance Complete:......{1}% \".format(cycle_count, int(100*summer/num_part)), end = \"\\r\")\n print()\n \n p_pos_x = d_p_pos_x.copy_to_host()\n p_pos_y = d_p_pos_y.copy_to_host()\n p_pos_z = d_p_pos_z.copy_to_host()\n p_dir_y = d_p_dir_y.copy_to_host()\n p_dir_z = d_p_dir_z.copy_to_host()\n p_dir_x = d_p_dir_x.copy_to_host()\n p_speed = d_p_speed.copy_to_host()\n p_time = d_p_time.copy_to_host()\n\n \n return(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, mesh_dist_traveled, mesh_dist_traveled_squared)\n\n\n\[email protected] \ndef AdvanceCuda(p_pos_x, p_pos_y, p_pos_z,\n p_dir_y, p_dir_z, p_dir_x, \n p_mesh_cell, p_speed, p_time, \n dx, mesh_total_xsec, L,\n p_dist_travled, p_end_trans, rands, num_part):\n\n kicker = 1e-10\n i = cuda.grid(1)\n \n if (i < num_part):\n \n if (p_end_trans[i] == 0):\n if (p_pos_x[i] < 0): #exited rhs\n p_end_trans[i] = 1\n elif (p_pos_x[i] >= L): #exited lhs\n p_end_trans[i] = 1\n \n else:\n dist = -math.log(rands[i]) / mesh_total_xsec[p_mesh_cell[i]]\n \n x_loc = (p_dir_x[i] * dist) + p_pos_x[i]\n LB = p_mesh_cell[i] * dx\n RB = LB + dx\n \n if (x_loc < LB): #move partilce into cell at left\n p_dist_travled[i] = (LB - p_pos_x[i])/p_dir_x[i] + kicker\n cell_next = p_mesh_cell[i] - 1\n \n elif (x_loc > RB): #move particle into cell at right\n p_dist_travled[i] = (RB - p_pos_x[i])/p_dir_x[i] + kicker\n cell_next = p_mesh_cell[i] + 1\n \n else: #move particle in cell\n p_dist_travled[i] = dist\n p_end_trans[i] = 1\n cell_next = p_mesh_cell[i]\n \n p_pos_x[i] += p_dir_x[i]*p_dist_travled[i]\n p_pos_y[i] += p_dir_y[i]*p_dist_travled[i]\n p_pos_z[i] += p_dir_z[i]*p_dist_travled[i]\n \n p_mesh_cell[i] = cell_next\n p_time[i] += p_dist_travled[i]/p_speed[i]\n \n\n\n\n\n\ndef StillIn(p_pos_x, surface_distances, p_alive, num_part):\n tally_left = 0\n tally_right = 0\n for i in range(num_part):\n #exit at left\n if p_pos_x[i] <= surface_distances[0]:\n tally_left += 1\n p_alive[i] = False\n \n elif p_pos_x[i] >= surface_distances[len(surface_distances)-1]:\n tally_right += 1\n p_alive[i] = False\n \n return(p_alive, tally_left, tally_right)\n\n\n\n\ndef test_Advance():\n L = 1\n dx = .25\n N_m = 4\n \n num_part = 6\n p_pos_x = np.array([-.01, 0, .1544, .2257, .75, 1.1])\n p_pos_y = 2.1*np.ones(num_part)\n p_pos_z = 3.4*np.ones(num_part)\n \n p_mesh_cell = np.array([-1, 0, 0, 1, 3, 4])\n \n p_dir_x = np.ones(num_part)\n p_dir_x[0] = -1\n p_dir_y = np.zeros(num_part)\n p_dir_z = np.zeros(num_part)\n \n p_speed = np.ones(num_part)\n p_time = np.zeros(num_part)\n p_alive = np.ones(num_part, bool)\n p_alive[5] = False\n \n \n particle_speed = 1\n mesh_total_xsec = np.array([0.1,1,.1,100])\n \n mesh_dist_traveled_squared = np.zeros(N_m)\n mesh_dist_traveled = np.zeros(N_m)\n \n \n [p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, mesh_dist_traveled, mesh_dist_traveled_squared] = Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L)\n \n \n assert (np.sum(mesh_dist_traveled) > 0)\n assert (np.sum(mesh_dist_traveled_squared) > 0)\n assert (p_pos_x[0] == -.01)\n assert (p_pos_x[5] == 1.1)\n assert (p_pos_x[1:4].all() > .75)\n \n \n \ndef test_StillIn(): \n \n num_part = 7\n surface_distances = [0,.25,.75,1]\n p_pos_x = np.array([-.01, 0, .1544, .2257, .75, 1.1, 1])\n p_alive = np.ones(num_part, bool)\n \n [p_alive, tally_left, tally_right] = StillIn(p_pos_x, surface_distances, p_alive, num_part)\n \n assert(p_alive[0] == False)\n assert(p_alive[5] == False)\n assert(tally_left == 2)\n assert(tally_right == 2)\n assert(p_alive[2:4].all() == True)\n\n\nif __name__ == '__main__':\n test_Advance()\n test_StillIn()\n \n"
] | [
[
"numpy.ones",
"numpy.random.rand",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Atomix85/Naball-Evolution-Invasion | [
"2ec2bc9d7a17696ae442e9eca84603a6ba21a1d1"
] | [
"2.77/python/lib/site-packages/numpy/core/setup.py"
] | [
"from __future__ import division, print_function\n\nimport imp\nimport os\nimport sys\nimport shutil\nimport pickle\nimport copy\nimport warnings\nimport re\nfrom os.path import join\nfrom numpy.distutils import log\nfrom distutils.dep_util import newer\nfrom distutils.sysconfig import get_config_var\n\nfrom setup_common import *\n\n# Set to True to enable multiple file compilations (experimental)\nENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', \"1\") != \"0\")\n# Set to True to enable relaxed strides checking. This (mostly) means\n# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.\nNPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', \"0\") != \"0\")\n\n# XXX: ugly, we use a class to avoid calling twice some expensive functions in\n# config.h/numpyconfig.h. I don't see a better way because distutils force\n# config.h generation inside an Extension class, and as such sharing\n# configuration informations between extensions is not easy.\n# Using a pickled-based memoize does not work because config_cmd is an instance\n# method, which cPickle does not like.\n#\n# Use pickle in all cases, as cPickle is gone in python3 and the difference\n# in time is only in build. -- Charles Harris, 2013-03-30\n\nclass CallOnceOnly(object):\n def __init__(self):\n self._check_types = None\n self._check_ieee_macros = None\n self._check_complex = None\n\n def check_types(self, *a, **kw):\n if self._check_types is None:\n out = check_types(*a, **kw)\n self._check_types = pickle.dumps(out)\n else:\n out = copy.deepcopy(pickle.loads(self._check_types))\n return out\n\n def check_ieee_macros(self, *a, **kw):\n if self._check_ieee_macros is None:\n out = check_ieee_macros(*a, **kw)\n self._check_ieee_macros = pickle.dumps(out)\n else:\n out = copy.deepcopy(pickle.loads(self._check_ieee_macros))\n return out\n\n def check_complex(self, *a, **kw):\n if self._check_complex is None:\n out = check_complex(*a, **kw)\n self._check_complex = pickle.dumps(out)\n else:\n out = copy.deepcopy(pickle.loads(self._check_complex))\n return out\n\nPYTHON_HAS_UNICODE_WIDE = True\n\ndef pythonlib_dir():\n \"\"\"return path where libpython* is.\"\"\"\n if sys.platform == 'win32':\n return os.path.join(sys.prefix, \"libs\")\n else:\n return get_config_var('LIBDIR')\n\ndef is_npy_no_signal():\n \"\"\"Return True if the NPY_NO_SIGNAL symbol must be defined in configuration\n header.\"\"\"\n return sys.platform == 'win32'\n\ndef is_npy_no_smp():\n \"\"\"Return True if the NPY_NO_SMP symbol must be defined in public\n header (when SMP support cannot be reliably enabled).\"\"\"\n # Python 2.3 causes a segfault when\n # trying to re-acquire the thread-state\n # which is done in error-handling\n # ufunc code. NPY_ALLOW_C_API and friends\n # cause the segfault. So, we disable threading\n # for now.\n if sys.version[:5] < '2.4.2':\n nosmp = 1\n else:\n # Perhaps a fancier check is in order here.\n # so that threads are only enabled if there\n # are actually multiple CPUS? -- but\n # threaded code can be nice even on a single\n # CPU so that long-calculating code doesn't\n # block.\n try:\n nosmp = os.environ['NPY_NOSMP']\n nosmp = 1\n except KeyError:\n nosmp = 0\n return nosmp == 1\n\ndef win32_checks(deflist):\n from numpy.distutils.misc_util import get_build_architecture\n a = get_build_architecture()\n\n # Distutils hack on AMD64 on windows\n print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %\n (a, os.name, sys.platform))\n if a == 'AMD64':\n deflist.append('DISTUTILS_USE_SDK')\n\n # On win32, force long double format string to be 'g', not\n # 'Lg', since the MS runtime does not support long double whose\n # size is > sizeof(double)\n if a == \"Intel\" or a == \"AMD64\":\n deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')\n\ndef check_math_capabilities(config, moredefs, mathlibs):\n def check_func(func_name):\n return config.check_func(func_name, libraries=mathlibs,\n decl=True, call=True)\n\n def check_funcs_once(funcs_name):\n decl = dict([(f, True) for f in funcs_name])\n st = config.check_funcs_once(funcs_name, libraries=mathlibs,\n decl=decl, call=decl)\n if st:\n moredefs.extend([(fname2def(f), 1) for f in funcs_name])\n return st\n\n def check_funcs(funcs_name):\n # Use check_funcs_once first, and if it does not work, test func per\n # func. Return success only if all the functions are available\n if not check_funcs_once(funcs_name):\n # Global check failed, check func per func\n for f in funcs_name:\n if check_func(f):\n moredefs.append((fname2def(f), 1))\n return 0\n else:\n return 1\n\n #use_msvc = config.check_decl(\"_MSC_VER\")\n\n if not check_funcs_once(MANDATORY_FUNCS):\n raise SystemError(\"One of the required function to build numpy is not\"\n \" available (the list is %s).\" % str(MANDATORY_FUNCS))\n\n # Standard functions which may not be available and for which we have a\n # replacement implementation. Note that some of these are C99 functions.\n\n # XXX: hack to circumvent cpp pollution from python: python put its\n # config.h in the public namespace, so we have a clash for the common\n # functions we test. We remove every function tested by python's\n # autoconf, hoping their own test are correct\n for f in OPTIONAL_STDFUNCS_MAYBE:\n if config.check_decl(fname2def(f),\n headers=[\"Python.h\", \"math.h\"]):\n OPTIONAL_STDFUNCS.remove(f)\n\n check_funcs(OPTIONAL_STDFUNCS)\n\n for h in OPTIONAL_HEADERS:\n if config.check_func(\"\", decl=False, call=False, headers=[h]):\n moredefs.append((fname2def(h).replace(\".\", \"_\"), 1))\n\n for tup in OPTIONAL_INTRINSICS:\n headers = None\n if len(tup) == 2:\n f, args = tup\n else:\n f, args, headers = tup[0], tup[1], [tup[2]]\n if config.check_func(f, decl=False, call=True, call_args=args,\n headers=headers):\n moredefs.append((fname2def(f), 1))\n\n for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:\n if config.check_func(fn, decl='int %s %s(void *);' % (dec, fn),\n call=False):\n moredefs.append((fname2def(fn), 1))\n\n for fn in OPTIONAL_VARIABLE_ATTRIBUTES:\n if config.check_func(fn, decl='int %s a;' % (fn), call=False):\n m = fn.replace(\"(\", \"_\").replace(\")\", \"_\")\n moredefs.append((fname2def(m), 1))\n\n # C99 functions: float and long double versions\n check_funcs(C99_FUNCS_SINGLE)\n check_funcs(C99_FUNCS_EXTENDED)\n\ndef check_complex(config, mathlibs):\n priv = []\n pub = []\n\n try:\n if os.uname()[0] == \"Interix\":\n warnings.warn(\"Disabling broken complex support. See #1365\")\n return priv, pub\n except:\n # os.uname not available on all platforms. blanket except ugly but safe\n pass\n\n # Check for complex support\n st = config.check_header('complex.h')\n if st:\n priv.append(('HAVE_COMPLEX_H', 1))\n pub.append(('NPY_USE_C99_COMPLEX', 1))\n\n for t in C99_COMPLEX_TYPES:\n st = config.check_type(t, headers=[\"complex.h\"])\n if st:\n pub.append(('NPY_HAVE_%s' % type2def(t), 1))\n\n def check_prec(prec):\n flist = [f + prec for f in C99_COMPLEX_FUNCS]\n decl = dict([(f, True) for f in flist])\n if not config.check_funcs_once(flist, call=decl, decl=decl,\n libraries=mathlibs):\n for f in flist:\n if config.check_func(f, call=True, decl=True,\n libraries=mathlibs):\n priv.append((fname2def(f), 1))\n else:\n priv.extend([(fname2def(f), 1) for f in flist])\n\n check_prec('')\n check_prec('f')\n check_prec('l')\n\n return priv, pub\n\ndef check_ieee_macros(config):\n priv = []\n pub = []\n\n macros = []\n\n def _add_decl(f):\n priv.append(fname2def(\"decl_%s\" % f))\n pub.append('NPY_%s' % fname2def(\"decl_%s\" % f))\n\n # XXX: hack to circumvent cpp pollution from python: python put its\n # config.h in the public namespace, so we have a clash for the common\n # functions we test. We remove every function tested by python's\n # autoconf, hoping their own test are correct\n _macros = [\"isnan\", \"isinf\", \"signbit\", \"isfinite\"]\n for f in _macros:\n py_symbol = fname2def(\"decl_%s\" % f)\n already_declared = config.check_decl(py_symbol,\n headers=[\"Python.h\", \"math.h\"])\n if already_declared:\n if config.check_macro_true(py_symbol,\n headers=[\"Python.h\", \"math.h\"]):\n pub.append('NPY_%s' % fname2def(\"decl_%s\" % f))\n else:\n macros.append(f)\n # Normally, isnan and isinf are macro (C99), but some platforms only have\n # func, or both func and macro version. Check for macro only, and define\n # replacement ones if not found.\n # Note: including Python.h is necessary because it modifies some math.h\n # definitions\n for f in macros:\n st = config.check_decl(f, headers = [\"Python.h\", \"math.h\"])\n if st:\n _add_decl(f)\n\n return priv, pub\n\ndef check_types(config_cmd, ext, build_dir):\n private_defines = []\n public_defines = []\n\n # Expected size (in number of bytes) for each type. This is an\n # optimization: those are only hints, and an exhaustive search for the size\n # is done if the hints are wrong.\n expected = {}\n expected['short'] = [2]\n expected['int'] = [4]\n expected['long'] = [8, 4]\n expected['float'] = [4]\n expected['double'] = [8]\n expected['long double'] = [8, 12, 16]\n expected['Py_intptr_t'] = [4, 8]\n expected['PY_LONG_LONG'] = [8]\n expected['long long'] = [8]\n expected['off_t'] = [4, 8]\n\n # Check we have the python header (-dev* packages on Linux)\n result = config_cmd.check_header('Python.h')\n if not result:\n raise SystemError(\n \"Cannot compile 'Python.h'. Perhaps you need to \"\\\n \"install python-dev|python-devel.\")\n res = config_cmd.check_header(\"endian.h\")\n if res:\n private_defines.append(('HAVE_ENDIAN_H', 1))\n public_defines.append(('NPY_HAVE_ENDIAN_H', 1))\n\n # Check basic types sizes\n for type in ('short', 'int', 'long'):\n res = config_cmd.check_decl(\"SIZEOF_%s\" % sym2def(type), headers = [\"Python.h\"])\n if res:\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), \"SIZEOF_%s\" % sym2def(type)))\n else:\n res = config_cmd.check_type_size(type, expected=expected[type])\n if res >= 0:\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % type)\n\n for type in ('float', 'double', 'long double'):\n already_declared = config_cmd.check_decl(\"SIZEOF_%s\" % sym2def(type),\n headers = [\"Python.h\"])\n res = config_cmd.check_type_size(type, expected=expected[type])\n if res >= 0:\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))\n if not already_declared and not type == 'long double':\n private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % type)\n\n # Compute size of corresponding complex type: used to check that our\n # definition is binary compatible with C99 complex type (check done at\n # build time in npy_common.h)\n complex_def = \"struct {%s __x; %s __y;}\" % (type, type)\n res = config_cmd.check_type_size(complex_def, expected=2*expected[type])\n if res >= 0:\n public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % complex_def)\n\n\n for type in ('Py_intptr_t', 'off_t'):\n res = config_cmd.check_type_size(type, headers=[\"Python.h\"],\n library_dirs=[pythonlib_dir()],\n expected=expected[type])\n\n if res >= 0:\n private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % type)\n\n # We check declaration AND type because that's how distutils does it.\n if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):\n res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],\n library_dirs=[pythonlib_dir()],\n expected=expected['PY_LONG_LONG'])\n if res >= 0:\n private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))\n public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % 'PY_LONG_LONG')\n\n res = config_cmd.check_type_size('long long',\n expected=expected['long long'])\n if res >= 0:\n #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))\n public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % 'long long')\n\n if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):\n raise RuntimeError(\n \"Config wo CHAR_BIT is not supported\"\\\n \", please contact the maintainers\")\n\n return private_defines, public_defines\n\ndef check_mathlib(config_cmd):\n # Testing the C math library\n mathlibs = []\n mathlibs_choices = [[], ['m'], ['cpml']]\n mathlib = os.environ.get('MATHLIB')\n if mathlib:\n mathlibs_choices.insert(0, mathlib.split(','))\n for libs in mathlibs_choices:\n if config_cmd.check_func(\"exp\", libraries=libs, decl=True, call=True):\n mathlibs = libs\n break\n else:\n raise EnvironmentError(\"math library missing; rerun \"\n \"setup.py after setting the \"\n \"MATHLIB env variable\")\n return mathlibs\n\ndef visibility_define(config):\n \"\"\"Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty\n string).\"\"\"\n if config.check_compiler_gcc4():\n return '__attribute__((visibility(\"hidden\")))'\n else:\n return ''\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration, dot_join\n from numpy.distutils.system_info import get_info, default_lib_dirs\n\n config = Configuration('core', parent_package, top_path)\n local_dir = config.local_path\n codegen_dir = join(local_dir, 'code_generators')\n\n if is_released(config):\n warnings.simplefilter('error', MismatchCAPIWarning)\n\n # Check whether we have a mismatch between the set C API VERSION and the\n # actual C API VERSION\n check_api_version(C_API_VERSION, codegen_dir)\n\n generate_umath_py = join(codegen_dir, 'generate_umath.py')\n n = dot_join(config.name, 'generate_umath')\n generate_umath = imp.load_module('_'.join(n.split('.')),\n open(generate_umath_py, 'U'), generate_umath_py,\n ('.py', 'U', 1))\n\n header_dir = 'include/numpy' # this is relative to config.path_in_package\n\n cocache = CallOnceOnly()\n\n def generate_config_h(ext, build_dir):\n target = join(build_dir, header_dir, 'config.h')\n d = os.path.dirname(target)\n if not os.path.exists(d):\n os.makedirs(d)\n\n if newer(__file__, target):\n config_cmd = config.get_config_cmd()\n log.info('Generating %s', target)\n\n # Check sizeof\n moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)\n\n # Check math library and C99 math funcs availability\n mathlibs = check_mathlib(config_cmd)\n moredefs.append(('MATHLIB', ','.join(mathlibs)))\n\n check_math_capabilities(config_cmd, moredefs, mathlibs)\n moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])\n moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])\n\n # Signal check\n if is_npy_no_signal():\n moredefs.append('__NPY_PRIVATE_NO_SIGNAL')\n\n # Windows checks\n if sys.platform=='win32' or os.name=='nt':\n win32_checks(moredefs)\n\n # Inline check\n inline = config_cmd.check_inline()\n\n # Check whether we need our own wide character support\n if not config_cmd.check_decl('Py_UNICODE_WIDE', headers=['Python.h']):\n PYTHON_HAS_UNICODE_WIDE = True\n else:\n PYTHON_HAS_UNICODE_WIDE = False\n\n if ENABLE_SEPARATE_COMPILATION:\n moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1))\n\n if NPY_RELAXED_STRIDES_CHECKING:\n moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))\n\n # Get long double representation\n if sys.platform != 'darwin':\n rep = check_long_double_representation(config_cmd)\n if rep in ['INTEL_EXTENDED_12_BYTES_LE',\n 'INTEL_EXTENDED_16_BYTES_LE',\n 'MOTOROLA_EXTENDED_12_BYTES_BE',\n 'IEEE_QUAD_LE', 'IEEE_QUAD_BE',\n 'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE',\n 'DOUBLE_DOUBLE_BE', 'DOUBLE_DOUBLE_LE']:\n moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))\n else:\n raise ValueError(\"Unrecognized long double format: %s\" % rep)\n\n # Py3K check\n if sys.version_info[0] == 3:\n moredefs.append(('NPY_PY3K', 1))\n\n # Generate the config.h file from moredefs\n target_f = open(target, 'w')\n for d in moredefs:\n if isinstance(d, str):\n target_f.write('#define %s\\n' % (d))\n else:\n target_f.write('#define %s %s\\n' % (d[0], d[1]))\n\n # define inline to our keyword, or nothing\n target_f.write('#ifndef __cplusplus\\n')\n if inline == 'inline':\n target_f.write('/* #undef inline */\\n')\n else:\n target_f.write('#define inline %s\\n' % inline)\n target_f.write('#endif\\n')\n\n # add the guard to make sure config.h is never included directly,\n # but always through npy_config.h\n target_f.write(\"\"\"\n#ifndef _NPY_NPY_CONFIG_H_\n#error config.h should never be included directly, include npy_config.h instead\n#endif\n\"\"\")\n\n target_f.close()\n print('File:', target)\n target_f = open(target)\n print(target_f.read())\n target_f.close()\n print('EOF')\n else:\n mathlibs = []\n target_f = open(target)\n for line in target_f:\n s = '#define MATHLIB'\n if line.startswith(s):\n value = line[len(s):].strip()\n if value:\n mathlibs.extend(value.split(','))\n target_f.close()\n\n # Ugly: this can be called within a library and not an extension,\n # in which case there is no libraries attributes (and none is\n # needed).\n if hasattr(ext, 'libraries'):\n ext.libraries.extend(mathlibs)\n\n incl_dir = os.path.dirname(target)\n if incl_dir not in config.numpy_include_dirs:\n config.numpy_include_dirs.append(incl_dir)\n\n return target\n\n def generate_numpyconfig_h(ext, build_dir):\n \"\"\"Depends on config.h: generate_config_h has to be called before !\"\"\"\n # put private include directory in build_dir on search path\n # allows using code generation in headers headers\n config.add_include_dirs(join(build_dir, \"src\", \"private\"))\n\n target = join(build_dir, header_dir, '_numpyconfig.h')\n d = os.path.dirname(target)\n if not os.path.exists(d):\n os.makedirs(d)\n if newer(__file__, target):\n config_cmd = config.get_config_cmd()\n log.info('Generating %s', target)\n\n # Check sizeof\n ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)\n\n if is_npy_no_signal():\n moredefs.append(('NPY_NO_SIGNAL', 1))\n\n if is_npy_no_smp():\n moredefs.append(('NPY_NO_SMP', 1))\n else:\n moredefs.append(('NPY_NO_SMP', 0))\n\n mathlibs = check_mathlib(config_cmd)\n moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])\n moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])\n\n if ENABLE_SEPARATE_COMPILATION:\n moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1))\n\n if NPY_RELAXED_STRIDES_CHECKING:\n moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))\n\n # Check wether we can use inttypes (C99) formats\n if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']):\n moredefs.append(('NPY_USE_C99_FORMATS', 1))\n\n # visibility check\n hidden_visibility = visibility_define(config_cmd)\n moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))\n\n # Add the C API/ABI versions\n moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))\n moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))\n\n # Add moredefs to header\n target_f = open(target, 'w')\n for d in moredefs:\n if isinstance(d, str):\n target_f.write('#define %s\\n' % (d))\n else:\n target_f.write('#define %s %s\\n' % (d[0], d[1]))\n\n # Define __STDC_FORMAT_MACROS\n target_f.write(\"\"\"\n#ifndef __STDC_FORMAT_MACROS\n#define __STDC_FORMAT_MACROS 1\n#endif\n\"\"\")\n target_f.close()\n\n # Dump the numpyconfig.h header to stdout\n print('File: %s' % target)\n target_f = open(target)\n print(target_f.read())\n target_f.close()\n print('EOF')\n config.add_data_files((header_dir, target))\n return target\n\n def generate_api_func(module_name):\n def generate_api(ext, build_dir):\n script = join(codegen_dir, module_name + '.py')\n sys.path.insert(0, codegen_dir)\n try:\n m = __import__(module_name)\n log.info('executing %s', script)\n h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))\n finally:\n del sys.path[0]\n config.add_data_files((header_dir, h_file),\n (header_dir, doc_file))\n return (h_file,)\n return generate_api\n\n generate_numpy_api = generate_api_func('generate_numpy_api')\n generate_ufunc_api = generate_api_func('generate_ufunc_api')\n\n config.add_include_dirs(join(local_dir, \"src\", \"private\"))\n config.add_include_dirs(join(local_dir, \"src\"))\n config.add_include_dirs(join(local_dir))\n\n config.add_data_files('include/numpy/*.h')\n config.add_include_dirs(join('src', 'npymath'))\n config.add_include_dirs(join('src', 'multiarray'))\n config.add_include_dirs(join('src', 'umath'))\n config.add_include_dirs(join('src', 'npysort'))\n\n config.add_define_macros([(\"HAVE_NPY_CONFIG_H\", \"1\")])\n config.add_define_macros([(\"_FILE_OFFSET_BITS\", \"64\")])\n config.add_define_macros([('_LARGEFILE_SOURCE', '1')])\n config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])\n\n config.numpy_include_dirs.extend(config.paths('include'))\n\n deps = [join('src', 'npymath', '_signbit.c'),\n join('include', 'numpy', '*object.h'),\n 'include/numpy/fenv/fenv.c',\n 'include/numpy/fenv/fenv.h',\n join(codegen_dir, 'genapi.py'),\n ]\n\n # Don't install fenv unless we need them.\n if sys.platform == 'cygwin':\n config.add_data_dir('include/numpy/fenv')\n\n #######################################################################\n # dummy module #\n #######################################################################\n\n # npymath needs the config.h and numpyconfig.h files to be generated, but\n # build_clib cannot handle generate_config_h and generate_numpyconfig_h\n # (don't ask). Because clib are generated before extensions, we have to\n # explicitly add an extension which has generate_config_h and\n # generate_numpyconfig_h as sources *before* adding npymath.\n\n config.add_extension('_dummy',\n sources = [join('src', 'dummymodule.c'),\n generate_config_h,\n generate_numpyconfig_h,\n generate_numpy_api]\n )\n\n #######################################################################\n # npymath library #\n #######################################################################\n\n subst_dict = dict([(\"sep\", os.path.sep), (\"pkgname\", \"numpy.core\")])\n def get_mathlib_info(*args):\n # Another ugly hack: the mathlib info is known once build_src is run,\n # but we cannot use add_installed_pkg_config here either, so we only\n # update the substition dictionary during npymath build\n config_cmd = config.get_config_cmd()\n\n # Check that the toolchain works, to fail early if it doesn't\n # (avoid late errors with MATHLIB which are confusing if the\n # compiler does not work).\n st = config_cmd.try_link('int main(void) { return 0;}')\n if not st:\n raise RuntimeError(\"Broken toolchain: cannot link a simple C program\")\n mlibs = check_mathlib(config_cmd)\n\n posix_mlib = ' '.join(['-l%s' % l for l in mlibs])\n msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])\n subst_dict[\"posix_mathlib\"] = posix_mlib\n subst_dict[\"msvc_mathlib\"] = msvc_mlib\n\n npymath_sources = [join('src', 'npymath', 'npy_math.c.src'),\n join('src', 'npymath', 'ieee754.c.src'),\n join('src', 'npymath', 'npy_math_complex.c.src'),\n join('src', 'npymath', 'halffloat.c')]\n config.add_installed_library('npymath',\n sources=npymath_sources + [get_mathlib_info],\n install_dir='lib')\n config.add_npy_pkg_config(\"npymath.ini.in\", \"lib/npy-pkg-config\",\n subst_dict)\n config.add_npy_pkg_config(\"mlib.ini.in\", \"lib/npy-pkg-config\",\n subst_dict)\n\n #######################################################################\n # npysort library #\n #######################################################################\n\n # This library is created for the build but it is not installed\n npysort_sources=[join('src', 'npysort', 'quicksort.c.src'),\n join('src', 'npysort', 'mergesort.c.src'),\n join('src', 'npysort', 'heapsort.c.src'),\n join('src', 'private', 'npy_partition.h.src'),\n join('src', 'npysort', 'selection.c.src'),\n join('src', 'private', 'npy_binsearch.h.src'),\n join('src', 'npysort', 'binsearch.c.src'),\n ]\n config.add_library('npysort',\n sources=npysort_sources,\n include_dirs=[])\n\n\n #######################################################################\n # multiarray module #\n #######################################################################\n\n # Multiarray version: this function is needed to build foo.c from foo.c.src\n # when foo.c is included in another file and as such not in the src\n # argument of build_ext command\n def generate_multiarray_templated_sources(ext, build_dir):\n from numpy.distutils.misc_util import get_cmd\n\n subpath = join('src', 'multiarray')\n sources = [join(local_dir, subpath, 'scalartypes.c.src'),\n join(local_dir, subpath, 'arraytypes.c.src'),\n join(local_dir, subpath, 'nditer_templ.c.src'),\n join(local_dir, subpath, 'lowlevel_strided_loops.c.src'),\n join(local_dir, subpath, 'einsum.c.src')]\n\n # numpy.distutils generate .c from .c.src in weird directories, we have\n # to add them there as they depend on the build_dir\n config.add_include_dirs(join(build_dir, subpath))\n cmd = get_cmd('build_src')\n cmd.ensure_finalized()\n cmd.template_sources(sources, ext)\n\n multiarray_deps = [\n join('src', 'multiarray', 'arrayobject.h'),\n join('src', 'multiarray', 'arraytypes.h'),\n join('src', 'multiarray', 'array_assign.h'),\n join('src', 'multiarray', 'buffer.h'),\n join('src', 'multiarray', 'calculation.h'),\n join('src', 'multiarray', 'common.h'),\n join('src', 'multiarray', 'convert_datatype.h'),\n join('src', 'multiarray', 'convert.h'),\n join('src', 'multiarray', 'conversion_utils.h'),\n join('src', 'multiarray', 'ctors.h'),\n join('src', 'multiarray', 'descriptor.h'),\n join('src', 'multiarray', 'getset.h'),\n join('src', 'multiarray', 'hashdescr.h'),\n join('src', 'multiarray', 'iterators.h'),\n join('src', 'multiarray', 'mapping.h'),\n join('src', 'multiarray', 'methods.h'),\n join('src', 'multiarray', 'multiarraymodule.h'),\n join('src', 'multiarray', 'nditer_impl.h'),\n join('src', 'multiarray', 'numpymemoryview.h'),\n join('src', 'multiarray', 'number.h'),\n join('src', 'multiarray', 'numpyos.h'),\n join('src', 'multiarray', 'refcount.h'),\n join('src', 'multiarray', 'scalartypes.h'),\n join('src', 'multiarray', 'sequence.h'),\n join('src', 'multiarray', 'shape.h'),\n join('src', 'multiarray', 'ucsnarrow.h'),\n join('src', 'multiarray', 'usertypes.h'),\n join('src', 'private', 'lowlevel_strided_loops.h'),\n join('include', 'numpy', 'arrayobject.h'),\n join('include', 'numpy', '_neighborhood_iterator_imp.h'),\n join('include', 'numpy', 'npy_endian.h'),\n join('include', 'numpy', 'arrayscalars.h'),\n join('include', 'numpy', 'noprefix.h'),\n join('include', 'numpy', 'npy_interrupt.h'),\n join('include', 'numpy', 'npy_3kcompat.h'),\n join('include', 'numpy', 'npy_math.h'),\n join('include', 'numpy', 'halffloat.h'),\n join('include', 'numpy', 'npy_common.h'),\n join('include', 'numpy', 'npy_os.h'),\n join('include', 'numpy', 'utils.h'),\n join('include', 'numpy', 'ndarrayobject.h'),\n join('include', 'numpy', 'npy_cpu.h'),\n join('include', 'numpy', 'numpyconfig.h'),\n join('include', 'numpy', 'ndarraytypes.h'),\n join('include', 'numpy', 'npy_1_7_deprecated_api.h'),\n join('include', 'numpy', '_numpyconfig.h.in'),\n # add library sources as distuils does not consider libraries\n # dependencies\n ] + npysort_sources + npymath_sources\n\n multiarray_src = [\n join('src', 'multiarray', 'alloc.c'),\n join('src', 'multiarray', 'arrayobject.c'),\n join('src', 'multiarray', 'arraytypes.c.src'),\n join('src', 'multiarray', 'array_assign.c'),\n join('src', 'multiarray', 'array_assign_scalar.c'),\n join('src', 'multiarray', 'array_assign_array.c'),\n join('src', 'multiarray', 'buffer.c'),\n join('src', 'multiarray', 'calculation.c'),\n join('src', 'multiarray', 'common.c'),\n join('src', 'multiarray', 'convert.c'),\n join('src', 'multiarray', 'convert_datatype.c'),\n join('src', 'multiarray', 'conversion_utils.c'),\n join('src', 'multiarray', 'ctors.c'),\n join('src', 'multiarray', 'datetime.c'),\n join('src', 'multiarray', 'datetime_strings.c'),\n join('src', 'multiarray', 'datetime_busday.c'),\n join('src', 'multiarray', 'datetime_busdaycal.c'),\n join('src', 'multiarray', 'descriptor.c'),\n join('src', 'multiarray', 'dtype_transfer.c'),\n join('src', 'multiarray', 'einsum.c.src'),\n join('src', 'multiarray', 'flagsobject.c'),\n join('src', 'multiarray', 'getset.c'),\n join('src', 'multiarray', 'hashdescr.c'),\n join('src', 'multiarray', 'item_selection.c'),\n join('src', 'multiarray', 'iterators.c'),\n join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),\n join('src', 'multiarray', 'mapping.c'),\n join('src', 'multiarray', 'methods.c'),\n join('src', 'multiarray', 'multiarraymodule.c'),\n join('src', 'multiarray', 'nditer_templ.c.src'),\n join('src', 'multiarray', 'nditer_api.c'),\n join('src', 'multiarray', 'nditer_constr.c'),\n join('src', 'multiarray', 'nditer_pywrap.c'),\n join('src', 'multiarray', 'number.c'),\n join('src', 'multiarray', 'numpymemoryview.c'),\n join('src', 'multiarray', 'numpyos.c'),\n join('src', 'multiarray', 'refcount.c'),\n join('src', 'multiarray', 'sequence.c'),\n join('src', 'multiarray', 'shape.c'),\n join('src', 'multiarray', 'scalarapi.c'),\n join('src', 'multiarray', 'scalartypes.c.src'),\n join('src', 'multiarray', 'usertypes.c'),\n join('src', 'multiarray', 'ucsnarrow.c')]\n\n\n if not ENABLE_SEPARATE_COMPILATION:\n multiarray_deps.extend(multiarray_src)\n multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')]\n multiarray_src.append(generate_multiarray_templated_sources)\n\n config.add_extension('multiarray',\n sources = multiarray_src +\n [generate_config_h,\n generate_numpyconfig_h,\n generate_numpy_api,\n join(codegen_dir, 'generate_numpy_api.py'),\n join('*.py')],\n depends = deps + multiarray_deps,\n libraries = ['npymath', 'npysort'])\n\n #######################################################################\n # umath module #\n #######################################################################\n\n # umath version: this function is needed to build foo.c from foo.c.src\n # when foo.c is included in another file and as such not in the src\n # argument of build_ext command\n def generate_umath_templated_sources(ext, build_dir):\n from numpy.distutils.misc_util import get_cmd\n\n subpath = join('src', 'umath')\n sources = [\n join(local_dir, subpath, 'loops.h.src'),\n join(local_dir, subpath, 'loops.c.src'),\n join(local_dir, subpath, 'simd.inc.src')]\n\n # numpy.distutils generate .c from .c.src in weird directories, we have\n # to add them there as they depend on the build_dir\n config.add_include_dirs(join(build_dir, subpath))\n cmd = get_cmd('build_src')\n cmd.ensure_finalized()\n cmd.template_sources(sources, ext)\n\n\n def generate_umath_c(ext, build_dir):\n target = join(build_dir, header_dir, '__umath_generated.c')\n dir = os.path.dirname(target)\n if not os.path.exists(dir):\n os.makedirs(dir)\n script = generate_umath_py\n if newer(script, target):\n f = open(target, 'w')\n f.write(generate_umath.make_code(generate_umath.defdict,\n generate_umath.__file__))\n f.close()\n return []\n\n umath_src = [\n join('src', 'umath', 'umathmodule.c'),\n join('src', 'umath', 'reduction.c'),\n join('src', 'umath', 'funcs.inc.src'),\n join('src', 'umath', 'simd.inc.src'),\n join('src', 'umath', 'loops.h.src'),\n join('src', 'umath', 'loops.c.src'),\n join('src', 'umath', 'ufunc_object.c'),\n join('src', 'umath', 'ufunc_type_resolution.c')]\n\n umath_deps = [\n generate_umath_py,\n join('src', 'multiarray', 'common.h'),\n join('src', 'umath', 'simd.inc.src'),\n join(codegen_dir, 'generate_ufunc_api.py'),\n join('src', 'private', 'ufunc_override.h')] + npymath_sources\n\n if not ENABLE_SEPARATE_COMPILATION:\n umath_deps.extend(umath_src)\n umath_src = [join('src', 'umath', 'umathmodule_onefile.c')]\n umath_src.append(generate_umath_templated_sources)\n umath_src.append(join('src', 'umath', 'funcs.inc.src'))\n umath_src.append(join('src', 'umath', 'simd.inc.src'))\n\n config.add_extension('umath',\n sources = umath_src +\n [generate_config_h,\n generate_numpyconfig_h,\n generate_umath_c,\n generate_ufunc_api],\n depends = deps + umath_deps,\n libraries = ['npymath'],\n )\n\n #######################################################################\n # scalarmath module #\n #######################################################################\n\n config.add_extension('scalarmath',\n sources = [join('src', 'scalarmathmodule.c.src'),\n join('src', 'private', 'scalarmathmodule.h.src'),\n generate_config_h,\n generate_numpyconfig_h,\n generate_numpy_api,\n generate_ufunc_api],\n depends = deps + npymath_sources,\n libraries = ['npymath'],\n )\n\n #######################################################################\n # _dotblas module #\n #######################################################################\n\n # Configure blasdot\n blas_info = get_info('blas_opt', 0)\n #blas_info = {}\n def get_dotblas_sources(ext, build_dir):\n if blas_info:\n if ('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', []):\n return None # dotblas needs ATLAS, Fortran compiled blas will not be sufficient.\n return ext.depends[:2]\n return None # no extension module will be built\n\n config.add_extension('_dotblas',\n sources = [get_dotblas_sources],\n depends = [join('blasdot', '_dotblas.c'),\n join('blasdot', 'apple_sgemv_patch.c'),\n join('blasdot', 'cblas.h'),\n ],\n include_dirs = ['blasdot'],\n extra_info = blas_info\n )\n\n #######################################################################\n # umath_tests module #\n #######################################################################\n\n config.add_extension('umath_tests',\n sources = [join('src', 'umath', 'umath_tests.c.src')])\n\n #######################################################################\n # custom rational dtype module #\n #######################################################################\n\n config.add_extension('test_rational',\n sources = [join('src', 'umath', 'test_rational.c.src')])\n\n #######################################################################\n # struct_ufunc_test module #\n #######################################################################\n\n config.add_extension('struct_ufunc_test',\n sources = [join('src', 'umath', 'struct_ufunc_test.c.src')])\n\n #######################################################################\n # multiarray_tests module #\n #######################################################################\n\n config.add_extension('multiarray_tests',\n sources = [join('src', 'multiarray', 'multiarray_tests.c.src')])\n\n #######################################################################\n # operand_flag_tests module #\n #######################################################################\n\n config.add_extension('operand_flag_tests',\n sources = [join('src', 'umath', 'operand_flag_tests.c.src')])\n\n config.add_data_dir('tests')\n config.add_data_dir('tests/data')\n\n config.make_svn_version_py()\n\n return config\n\nif __name__=='__main__':\n from numpy.distutils.core import setup\n setup(configuration=configuration)\n"
] | [
[
"numpy.distutils.system_info.get_info",
"numpy.distutils.misc_util.Configuration",
"numpy.distutils.core.setup",
"numpy.distutils.misc_util.get_cmd",
"numpy.distutils.misc_util.get_build_architecture",
"numpy.distutils.log.info",
"numpy.distutils.misc_util.dot_join"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.24",
"1.22",
"1.23"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cirrostratus1/bark | [
"6629a9bbc455d0fd708e09bb8e162425e62c4165"
] | [
"modules/runtime/viewer/viewer.py"
] | [
"# Copyright (c) 2019 fortiss GmbH\n#\n# This software is released under the MIT License.\n# https://opensource.org/licenses/MIT\n\nimport numpy as np\nfrom bark.viewer import Viewer\nfrom bark.geometry import *\nfrom bark.models.dynamic import *\nfrom bark.world.opendrive import *\nfrom bark.world.goal_definition import *\nfrom modules.runtime.commons.parameters import ParameterServer\n\n\nclass BaseViewer(Viewer):\n def __init__(self, params=None, **kwargs):\n if(params is None):\n params = ParameterServer()\n Viewer.__init__(self)\n # color parameters\n # agents\n self.color_other_agents = params[\"Visualization\"][\"Agents\"][\"Color\"][\"Other\", \"Color of other agents\", (0.7,0.7,0.7)]\n self.color_eval_agents = params[\"Visualization\"][\"Agents\"][\"Color\"][\"Controlled\", \"Color of controlled, evaluated agents\", (0.9,0,0)]\n self.alpha_agents = params[\"Visualization\"][\"Agents\"][\"AlphaVehicle\", \"Alpha of agents\", 0.8]\n self.route_color = params[\"Visualization\"][\"Agents\"][\"ColorRoute\", \"Color of agents routes\", (0.2,0.2,0.2)]\n self.draw_route = params[\"Visualization\"][\"Agents\"][\"DrawRoute\", \"Draw Route of each agent\", False]\n self.draw_eval_goals = params[\"Visualization\"][\"Agents\"][\"DrawEvalGoals\", \"Draw Route of eval agent goals\", True]\n self.eval_goal_color = params[\"Visualization\"][\"Agents\"][\"EvalGoalColor\", \"Color of eval agent goals\", (0.0,0.0,0.7)]\n self.draw_history = params[\"Visualization\"][\"Agents\"][\"DrawHistory\", \"Draw history with alpha trace for each agent\", True]\n # map\n self.color_lane_boundaries = params[\"Visualization\"][\"Map\"][\"Lanes\"][\"Boundaries\"][\"Color\", \"Color of agents except ego vehicle\", (0.7,0.7,0.7)]\n self.alpha_lane_boundaries = params[\"Visualization\"][\"Map\"][\"Lanes\"][\"Boundaries\"][\"Alpha\", \"Color of agents except ego vehicle\", 1.0]\n self.plane_color = params[\"Visualization\"][\"Map\"][\"Plane\"][\"Color\", \"Color of the background plane\", (1, 1, 1, 1)]\n self.plane_alpha = params[\"Visualization\"][\"Map\"][\"Plane\"][\"Alpha\", \"Alpha of the background plane\", 1.0]\n\n\n self.parameters = params\n\n self.world_x_range = kwargs.pop(\"x_range\", [-40, 40])\n self.world_y_range = kwargs.pop(\"y_range\", [-40, 40])\n self.use_world_bounds = kwargs.pop(\"use_world_bounds\", False)\n self.follow_agent_id = kwargs.pop(\"follow_agent_id\", None)\n\n self.dynamic_world_x_range = self.world_x_range.copy()\n self.dynamic_world_y_range = self.world_y_range.copy()\n\n def reset():\n pass\n\n def _get_draw_eval_agent_ids(self, world, eval_agent_ids=None, ):\n if self.follow_agent_id is not None:\n if isinstance(self.follow_agent_id, bool) and \\\n eval_agent_ids is not None and \\\n len(eval_agent_ids) == 1:\n draw_eval_agent_id = eval_agent_ids[0]\n else:\n draw_eval_agent_id = self.follow_agent_id\n\n if draw_eval_agent_id in world.agents:\n return draw_eval_agent_id\n\n return None\n\n def _update_world_view_range(self, world, eval_agent_ids=None):\n draw_eval_agent_id = self._get_draw_eval_agent_ids(world, eval_agent_ids)\n\n if draw_eval_agent_id != None:\n follow_agent = world.agents[draw_eval_agent_id]\n state = follow_agent.state\n pose = np.zeros(3)\n pose[0] = state[int(StateDefinition.X_POSITION)]\n pose[1] = state[int(StateDefinition.Y_POSITION)]\n pose[2] = state[int(StateDefinition.THETA_POSITION)]\n\n # center range on agents coordinates\n self.dynamic_world_x_range[0] = pose[0] + self.world_x_range[0]\n self.dynamic_world_x_range[1] = pose[0] + self.world_x_range[1]\n\n self.dynamic_world_y_range[0] = pose[1] + self.world_y_range[0]\n self.dynamic_world_y_range[1] = pose[1] + self.world_y_range[1]\n\n if self.use_world_bounds:\n bb = world.bounding_box\n self.dynamic_world_x_range = [bb[0].x(), bb[1].x()]\n self.dynamic_world_y_range = [bb[0].y(), bb[1].y()]\n\n diffx = abs(self.dynamic_world_x_range[1] - self.dynamic_world_x_range[0])\n diffy = abs(self.dynamic_world_y_range[1] - self.dynamic_world_y_range[0])\n\n # enforce that in both dimensions the same range is covered\n if diffx > diffy:\n self.dynamic_world_y_range[0] -= (diffx - diffy)/2\n self.dynamic_world_y_range[1] += (diffx - diffy)/2\n else:\n self.dynamic_world_x_range[0] -= (diffy - diffx)/2\n self.dynamic_world_x_range[1] += (diffy - diffx)/2\n\n def drawPoint2d(self, point2d, color, alpha):\n pass\n\n def drawLine2d(self, line2d, color, alpha, line_style=None):\n pass\n\n def drawPolygon2d(self, polygon, color, alpha):\n pass\n\n def drawTrajectory(self, trajectory, color):\n pass\n\n def drawObstacle(self, obstacle):\n pass\n\n def drawText(self, position, text, **kwargs):\n pass\n\n def getColor(self, color):\n pass\n\n def show(self,block=False):\n pass\n\n def clear(self):\n pass\n\n def drawAgents(self, world):\n for _, agent in world.agents.items():\n self.drawAgent(agent)\n\n def drawHistory(self, agent, color):\n shape = agent.shape\n if isinstance(shape, Polygon2d):\n history = agent.history\n lh = len(history)\n for idx, state_action in enumerate(history):\n state = state_action[0]\n pose = np.zeros(3)\n # pybind creates column based vectors, initialization maybe row-based -> we consider both\n pose[0] = state[int(StateDefinition.X_POSITION)]\n pose[1] = state[int(StateDefinition.Y_POSITION)]\n pose[2] = state[int(StateDefinition.THETA_POSITION)]\n transformed_polygon = shape.transform(pose)\n alpha=1-0.8*(lh-idx)/4\n alpha = 0 if alpha<0 else alpha\n self.drawPolygon2d(transformed_polygon, color, alpha) # fade to 0.2 after 10 steps\n \n def drawGoalDefinition(self, goal_definition):\n if isinstance(goal_definition, GoalDefinitionPolygon):\n self.drawPolygon2d(goal_definition.goal_shape, self.eval_goal_color, alpha=0.9)\n elif isinstance(goal_definition, GoalDefinitionStateLimits):\n self.drawPolygon2d(goal_definition.xy_limits, self.eval_goal_color, alpha=0.9)\n elif isinstance(goal_definition, GoalDefinitionSequential):\n prev_center = np.array([])\n for idx, goal_def in enumerate(goal_definition.sequential_goals):\n self.drawGoalDefinition(goal_def)\n goal_pos = None\n if isinstance(goal_def, GoalDefinitionPolygon):\n goal_pos = goal_def.goal_shape.center\n elif isinstance(goal_def, GoalDefinitionStateLimits):\n goal_pos = goal_def.xy_limits.center\n self.drawText(position=goal_pos, text=\"Goal{}\".format(idx), coordinate=\"world\")\n if prev_center.any():\n line = Line2d()\n line.addPoint(Point2d(prev_center[0], prev_center[1]))\n line.addPoint(Point2d(goal_pos[0], goal_pos[1]))\n self.drawLine2d(line,color=self.eval_goal_color, alpha=0.9)\n prev_center = goal_pos\n\n def drawWorld(self, world, eval_agent_ids=None, filename=None, scenario_idx=None):\n self.clear()\n self._update_world_view_range(world, eval_agent_ids)\n if world.map:\n self.drawMap(world.map.get_open_drive_map())\n\n # draw agents\n for _, agent in world.agents.items():\n if eval_agent_ids and agent.id in eval_agent_ids:\n color = self.color_eval_agents\n else:\n color = self.color_other_agents\n self.drawAgent(agent, color)\n\n if self.draw_eval_goals and agent.goal_definition:\n self.drawGoalDefinition(agent.goal_definition)\n\n self.drawText(position=(0.1,0.9), text=\"Scenario: {}\".format(scenario_idx), fontsize=18)\n self.drawText(position=(0.1,0.95), text=\"Time: {:.2f}\".format(world.time), fontsize=18)\n\n def drawMap(self, map):\n # draw the boundary of each lane\n for _, road in map.get_roads().items():\n for lane_section in road.lane_sections:\n for _, lane in lane_section.get_lanes().items():\n dashed = False\n # center line is type none and is drawn as broken\n if lane.road_mark.type == RoadMarkType.broken or lane.road_mark.type == RoadMarkType.none: \n dashed = True\n self.drawLine2d(lane.line, self.color_lane_boundaries, self.alpha_lane_boundaries, dashed)\n\n\n def drawAgent(self, agent, color):\n shape = agent.shape\n if isinstance(shape, Polygon2d):\n pose = np.zeros(3)\n # pybind creates column based vectors, initialization maybe row-based -> we consider both\n state = agent.state\n pose[0] = state[int(StateDefinition.X_POSITION)]\n pose[1] = state[int(StateDefinition.Y_POSITION)]\n pose[2] = state[int(StateDefinition.THETA_POSITION)]\n transformed_polygon = shape.transform(pose)\n self.drawPolygon2d(transformed_polygon, color, 1.0)\n\n if self.draw_route:\n self.drawRoute(agent)\n\n # self.drawHistory(agent, color)\n\n def drawDrivingCorridor(self, corridor, color=None):\n if color is None:\n # generate random colour\n color = list(np.random.choice(range(256), size=3)/256)\n if corridor.center.valid() and corridor.inner.valid() and corridor.outer.valid():\n self.drawLine2d(corridor.center, color, 1, True)\n self.drawLine2d(corridor.inner, color, 1)\n self.drawLine2d(corridor.outer, color, 1)\n else:\n print(\"Cannot draw Driving Corridor, as it is empty\") \n\n def drawRoute(self, agent):\n # TODO(@hart): visualize the global as well as the local driving corridor\n self.drawDrivingCorridor(agent.local_map.get_driving_corridor(), self.route_color)\n self.drawDrivingCorridor(agent.local_map.get_horizon_driving_corridor(), (0.8, 0.72, 0.2))\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ChaceAshcraft/ray | [
"a72237f1712e2805f6799de3489e326e2965d624"
] | [
"python/ray/tests/test_reconstruction.py"
] | [
"import os\nimport signal\nimport sys\n\nimport numpy as np\nimport pytest\n\nimport ray\nfrom ray.test_utils import (\n wait_for_condition,\n wait_for_pid_to_exit,\n)\n\nSIGKILL = signal.SIGKILL if sys.platform != \"win32\" else signal.SIGTERM\n\n\ndef test_cached_object(ray_start_cluster):\n config = {\n \"num_heartbeats_timeout\": 10,\n \"raylet_heartbeat_timeout_milliseconds\": 100,\n \"object_timeout_milliseconds\": 200,\n }\n cluster = ray_start_cluster\n # Head node with no resources.\n cluster.add_node(num_cpus=0, _system_config=config)\n ray.init(address=cluster.address)\n # Node to place the initial object.\n node_to_kill = cluster.add_node(\n num_cpus=1, resources={\"node1\": 1}, object_store_memory=10**8)\n cluster.add_node(\n num_cpus=1, resources={\"node2\": 1}, object_store_memory=10**8)\n cluster.wait_for_nodes()\n\n @ray.remote\n def large_object():\n return np.zeros(10**7, dtype=np.uint8)\n\n @ray.remote\n def dependent_task(x):\n return\n\n obj = large_object.options(resources={\"node1\": 1}).remote()\n ray.get(dependent_task.options(resources={\"node2\": 1}).remote(obj))\n\n cluster.remove_node(node_to_kill, allow_graceful=False)\n cluster.add_node(\n num_cpus=1, resources={\"node1\": 1}, object_store_memory=10**8)\n wait_for_condition(\n lambda: not all(node[\"Alive\"] for node in ray.nodes()), timeout=10)\n\n for _ in range(20):\n large_object.options(resources={\"node2\": 1}).remote()\n\n ray.get(dependent_task.remote(obj))\n\n\[email protected](\"reconstruction_enabled\", [False, True])\ndef test_reconstruction_cached_dependency(ray_start_cluster,\n reconstruction_enabled):\n config = {\n \"num_heartbeats_timeout\": 10,\n \"raylet_heartbeat_timeout_milliseconds\": 100,\n \"object_timeout_milliseconds\": 200,\n }\n # Workaround to reset the config to the default value.\n if not reconstruction_enabled:\n config[\"lineage_pinning_enabled\"] = 0\n\n cluster = ray_start_cluster\n # Head node with no resources.\n cluster.add_node(\n num_cpus=0,\n _system_config=config,\n enable_object_reconstruction=reconstruction_enabled)\n ray.init(address=cluster.address)\n # Node to place the initial object.\n node_to_kill = cluster.add_node(\n num_cpus=1, resources={\"node1\": 1}, object_store_memory=10**8)\n cluster.add_node(\n num_cpus=1, resources={\"node2\": 1}, object_store_memory=10**8)\n cluster.wait_for_nodes()\n\n @ray.remote(max_retries=0)\n def large_object():\n return np.zeros(10**7, dtype=np.uint8)\n\n @ray.remote\n def chain(x):\n return x\n\n @ray.remote\n def dependent_task(x):\n return\n\n obj = large_object.options(resources={\"node2\": 1}).remote()\n obj = chain.options(resources={\"node1\": 1}).remote(obj)\n ray.get(dependent_task.options(resources={\"node1\": 1}).remote(obj))\n\n cluster.remove_node(node_to_kill, allow_graceful=False)\n cluster.add_node(\n num_cpus=1, resources={\"node1\": 1}, object_store_memory=10**8)\n wait_for_condition(\n lambda: not all(node[\"Alive\"] for node in ray.nodes()), timeout=10)\n\n for _ in range(20):\n large_object.options(resources={\"node2\": 1}).remote()\n\n if reconstruction_enabled:\n ray.get(dependent_task.remote(obj))\n else:\n with pytest.raises(ray.exceptions.RayTaskError) as e:\n ray.get(dependent_task.remote(obj))\n with pytest.raises(ray.exceptions.ObjectLostError):\n raise e.as_instanceof_cause()\n\n\[email protected](\"reconstruction_enabled\", [False, True])\ndef test_basic_reconstruction(ray_start_cluster, reconstruction_enabled):\n config = {\n \"num_heartbeats_timeout\": 10,\n \"raylet_heartbeat_timeout_milliseconds\": 100,\n \"object_timeout_milliseconds\": 200,\n }\n # Workaround to reset the config to the default value.\n if not reconstruction_enabled:\n config[\"lineage_pinning_enabled\"] = 0\n\n cluster = ray_start_cluster\n # Head node with no resources.\n cluster.add_node(\n num_cpus=0,\n _system_config=config,\n enable_object_reconstruction=reconstruction_enabled)\n ray.init(address=cluster.address)\n # Node to place the initial object.\n node_to_kill = cluster.add_node(\n num_cpus=1, resources={\"node1\": 1}, object_store_memory=10**8)\n cluster.add_node(\n num_cpus=1, resources={\"node2\": 1}, object_store_memory=10**8)\n cluster.wait_for_nodes()\n\n @ray.remote(max_retries=1 if reconstruction_enabled else 0)\n def large_object():\n return np.zeros(10**7, dtype=np.uint8)\n\n @ray.remote\n def dependent_task(x):\n return\n\n obj = large_object.options(resources={\"node1\": 1}).remote()\n ray.get(dependent_task.options(resources={\"node1\": 1}).remote(obj))\n\n cluster.remove_node(node_to_kill, allow_graceful=False)\n cluster.add_node(\n num_cpus=1, resources={\"node1\": 1}, object_store_memory=10**8)\n\n if reconstruction_enabled:\n ray.get(dependent_task.remote(obj))\n else:\n with pytest.raises(ray.exceptions.RayTaskError) as e:\n ray.get(dependent_task.remote(obj))\n with pytest.raises(ray.exceptions.ObjectLostError):\n raise e.as_instanceof_cause()\n\n\[email protected](\"reconstruction_enabled\", [False, True])\ndef test_basic_reconstruction_put(ray_start_cluster, reconstruction_enabled):\n config = {\n \"num_heartbeats_timeout\": 10,\n \"raylet_heartbeat_timeout_milliseconds\": 100,\n \"object_timeout_milliseconds\": 200,\n }\n # Workaround to reset the config to the default value.\n if not reconstruction_enabled:\n config[\"lineage_pinning_enabled\"] = 0\n\n cluster = ray_start_cluster\n # Head node with no resources.\n cluster.add_node(\n num_cpus=0,\n _system_config=config,\n enable_object_reconstruction=reconstruction_enabled)\n ray.init(address=cluster.address)\n # Node to place the initial object.\n node_to_kill = cluster.add_node(\n num_cpus=1, resources={\"node1\": 1}, object_store_memory=10**8)\n cluster.add_node(\n num_cpus=1, resources={\"node2\": 1}, object_store_memory=10**8)\n cluster.wait_for_nodes()\n\n @ray.remote(max_retries=1 if reconstruction_enabled else 0)\n def large_object():\n return np.zeros(10**7, dtype=np.uint8)\n\n @ray.remote\n def dependent_task(x):\n return x\n\n obj = ray.put(np.zeros(10**7, dtype=np.uint8))\n result = dependent_task.options(resources={\"node1\": 1}).remote(obj)\n ray.get(result)\n del obj\n\n cluster.remove_node(node_to_kill, allow_graceful=False)\n cluster.add_node(\n num_cpus=1, resources={\"node1\": 1}, object_store_memory=10**8)\n\n for _ in range(20):\n ray.put(np.zeros(10**7, dtype=np.uint8))\n\n if reconstruction_enabled:\n ray.get(result)\n else:\n # The copy that we fetched earlier may still be local or it may have\n # been evicted.\n try:\n ray.get(result)\n except ray.exceptions.ObjectLostError:\n pass\n\n\[email protected](\"reconstruction_enabled\", [False, True])\ndef test_basic_reconstruction_actor_task(ray_start_cluster,\n reconstruction_enabled):\n config = {\n \"num_heartbeats_timeout\": 10,\n \"raylet_heartbeat_timeout_milliseconds\": 100,\n \"object_timeout_milliseconds\": 200,\n }\n # Workaround to reset the config to the default value.\n if not reconstruction_enabled:\n config[\"lineage_pinning_enabled\"] = 0\n\n cluster = ray_start_cluster\n # Head node with no resources.\n cluster.add_node(\n num_cpus=0,\n _system_config=config,\n enable_object_reconstruction=reconstruction_enabled)\n ray.init(address=cluster.address)\n # Node to place the initial object.\n node_to_kill = cluster.add_node(\n num_cpus=1, resources={\"node1\": 2}, object_store_memory=10**8)\n cluster.add_node(\n num_cpus=1, resources={\"node2\": 1}, object_store_memory=10**8)\n cluster.wait_for_nodes()\n\n @ray.remote(\n max_restarts=-1,\n max_task_retries=-1 if reconstruction_enabled else 0,\n resources={\"node1\": 1},\n num_cpus=0)\n class Actor:\n def __init__(self):\n pass\n\n def large_object(self):\n return np.zeros(10**7, dtype=np.uint8)\n\n def pid(self):\n return os.getpid()\n\n @ray.remote\n def dependent_task(x):\n return\n\n a = Actor.remote()\n pid = ray.get(a.pid.remote())\n obj = a.large_object.remote()\n ray.get(dependent_task.options(resources={\"node1\": 1}).remote(obj))\n\n # Workaround to kill the actor process too since there is a bug where the\n # actor's plasma client hangs after the plasma store has exited.\n os.kill(pid, SIGKILL)\n\n cluster.remove_node(node_to_kill, allow_graceful=False)\n cluster.add_node(\n num_cpus=1, resources={\"node1\": 2}, object_store_memory=10**8)\n\n wait_for_pid_to_exit(pid)\n\n if reconstruction_enabled:\n ray.get(dependent_task.remote(obj))\n else:\n with pytest.raises(ray.exceptions.RayTaskError) as e:\n ray.get(dependent_task.remote(obj))\n with pytest.raises(ray.exceptions.ObjectLostError):\n raise e.as_instanceof_cause()\n\n # Make sure the actor handle is still usable.\n pid = ray.get(a.pid.remote())\n\n\[email protected](sys.platform == \"win32\", reason=\"Test failing on Windows.\")\[email protected](\"reconstruction_enabled\", [False, True])\ndef test_basic_reconstruction_actor_constructor(ray_start_cluster,\n reconstruction_enabled):\n config = {\n \"num_heartbeats_timeout\": 10,\n \"raylet_heartbeat_timeout_milliseconds\": 100,\n \"object_timeout_milliseconds\": 200,\n }\n # Workaround to reset the config to the default value.\n if not reconstruction_enabled:\n config[\"lineage_pinning_enabled\"] = 0\n\n cluster = ray_start_cluster\n # Head node with no resources.\n cluster.add_node(\n num_cpus=0,\n _system_config=config,\n enable_object_reconstruction=reconstruction_enabled)\n ray.init(address=cluster.address)\n # Node to place the initial object.\n node_to_kill = cluster.add_node(\n num_cpus=1, resources={\"node1\": 1}, object_store_memory=10**8)\n cluster.add_node(\n num_cpus=1, resources={\"node2\": 1}, object_store_memory=10**8)\n cluster.wait_for_nodes()\n\n @ray.remote(max_retries=1 if reconstruction_enabled else 0)\n def large_object():\n return np.zeros(10**7, dtype=np.uint8)\n\n # Both the constructor and a method depend on the large object.\n @ray.remote(max_restarts=-1)\n class Actor:\n def __init__(self, x):\n pass\n\n def dependent_task(self, x):\n return\n\n def pid(self):\n return os.getpid()\n\n obj = large_object.options(resources={\"node1\": 1}).remote()\n a = Actor.options(resources={\"node1\": 1}).remote(obj)\n ray.get(a.dependent_task.remote(obj))\n pid = ray.get(a.pid.remote())\n\n # Workaround to kill the actor process too since there is a bug where the\n # actor's plasma client hangs after the plasma store has exited.\n os.kill(pid, SIGKILL)\n\n cluster.remove_node(node_to_kill, allow_graceful=False)\n cluster.add_node(\n num_cpus=1, resources={\"node1\": 1}, object_store_memory=10**8)\n\n wait_for_pid_to_exit(pid)\n\n # Wait for the actor to restart.\n def probe():\n try:\n ray.get(a.dependent_task.remote(obj))\n return True\n except ray.exceptions.RayActorError:\n return False\n except (ray.exceptions.RayTaskError, ray.exceptions.ObjectLostError):\n return True\n\n wait_for_condition(probe)\n\n if reconstruction_enabled:\n ray.get(a.dependent_task.remote(obj))\n else:\n with pytest.raises(ray.exceptions.RayTaskError) as e:\n x = a.dependent_task.remote(obj)\n print(x)\n ray.get(x)\n with pytest.raises(ray.exceptions.ObjectLostError):\n raise e.as_instanceof_cause()\n\n\[email protected](reason=\"This hangs due to a deadlock in admission control.\")\[email protected](\"reconstruction_enabled\", [False, True])\ndef test_multiple_downstream_tasks(ray_start_cluster, reconstruction_enabled):\n config = {\n \"num_heartbeats_timeout\": 10,\n \"raylet_heartbeat_timeout_milliseconds\": 100,\n \"object_timeout_milliseconds\": 200,\n }\n # Workaround to reset the config to the default value.\n if not reconstruction_enabled:\n config[\"lineage_pinning_enabled\"] = 0\n\n cluster = ray_start_cluster\n # Head node with no resources.\n cluster.add_node(\n num_cpus=0,\n _system_config=config,\n enable_object_reconstruction=reconstruction_enabled)\n ray.init(address=cluster.address)\n # Node to place the initial object.\n node_to_kill = cluster.add_node(\n num_cpus=1, resources={\"node1\": 1}, object_store_memory=10**8)\n cluster.add_node(\n num_cpus=1, resources={\"node2\": 1}, object_store_memory=10**8)\n cluster.wait_for_nodes()\n\n @ray.remote(max_retries=1 if reconstruction_enabled else 0)\n def large_object():\n return np.zeros(10**7, dtype=np.uint8)\n\n @ray.remote\n def chain(x):\n return x\n\n @ray.remote\n def dependent_task(x):\n return\n\n obj = large_object.options(resources={\"node2\": 1}).remote()\n downstream = [\n chain.options(resources={\n \"node1\": 1\n }).remote(obj) for _ in range(4)\n ]\n for obj in downstream:\n ray.get(dependent_task.options(resources={\"node1\": 1}).remote(obj))\n\n cluster.remove_node(node_to_kill, allow_graceful=False)\n cluster.add_node(\n num_cpus=1, resources={\"node1\": 1}, object_store_memory=10**8)\n\n if reconstruction_enabled:\n for obj in downstream:\n ray.get(dependent_task.options(resources={\"node1\": 1}).remote(obj))\n else:\n with pytest.raises(ray.exceptions.RayTaskError) as e:\n for obj in downstream:\n ray.get(\n dependent_task.options(resources={\n \"node1\": 1\n }).remote(obj))\n with pytest.raises(ray.exceptions.ObjectLostError):\n raise e.as_instanceof_cause()\n\n\[email protected](reason=\"This hangs due to a deadlock in admission control.\")\[email protected](\"reconstruction_enabled\", [False, True])\ndef test_reconstruction_chain(ray_start_cluster, reconstruction_enabled):\n config = {\n \"num_heartbeats_timeout\": 10,\n \"raylet_heartbeat_timeout_milliseconds\": 100,\n \"object_timeout_milliseconds\": 200,\n }\n # Workaround to reset the config to the default value.\n if not reconstruction_enabled:\n config[\"lineage_pinning_enabled\"] = 0\n\n cluster = ray_start_cluster\n # Head node with no resources.\n cluster.add_node(\n num_cpus=0,\n _system_config=config,\n object_store_memory=10**8,\n enable_object_reconstruction=reconstruction_enabled)\n ray.init(address=cluster.address)\n node_to_kill = cluster.add_node(num_cpus=1, object_store_memory=10**8)\n cluster.wait_for_nodes()\n\n @ray.remote(max_retries=1 if reconstruction_enabled else 0)\n def large_object():\n return np.zeros(10**7, dtype=np.uint8)\n\n @ray.remote\n def chain(x):\n return x\n\n @ray.remote\n def dependent_task(x):\n return x\n\n obj = large_object.remote()\n for _ in range(20):\n obj = chain.remote(obj)\n ray.get(dependent_task.remote(obj))\n\n cluster.remove_node(node_to_kill, allow_graceful=False)\n cluster.add_node(num_cpus=1, object_store_memory=10**8)\n\n if reconstruction_enabled:\n ray.get(dependent_task.remote(obj))\n else:\n with pytest.raises(ray.exceptions.RayTaskError) as e:\n ray.get(dependent_task.remote(obj))\n with pytest.raises(ray.exceptions.ObjectLostError):\n raise e.as_instanceof_cause()\n\n\[email protected](reason=\"This hangs due to a deadlock in admission control.\")\[email protected](sys.platform == \"win32\", reason=\"Failing on Windows.\")\ndef test_reconstruction_stress(ray_start_cluster):\n config = {\n \"num_heartbeats_timeout\": 10,\n \"raylet_heartbeat_timeout_milliseconds\": 100,\n \"max_direct_call_object_size\": 100,\n \"task_retry_delay_ms\": 100,\n \"object_timeout_milliseconds\": 200,\n }\n cluster = ray_start_cluster\n # Head node with no resources.\n cluster.add_node(\n num_cpus=0, _system_config=config, enable_object_reconstruction=True)\n ray.init(address=cluster.address)\n # Node to place the initial object.\n node_to_kill = cluster.add_node(\n num_cpus=1, resources={\"node1\": 1}, object_store_memory=10**8)\n cluster.add_node(\n num_cpus=1, resources={\"node2\": 1}, object_store_memory=10**8)\n cluster.wait_for_nodes()\n\n @ray.remote\n def large_object():\n return np.zeros(10**5, dtype=np.uint8)\n\n @ray.remote\n def dependent_task(x):\n return\n\n for _ in range(3):\n obj = large_object.options(resources={\"node1\": 1}).remote()\n ray.get(dependent_task.options(resources={\"node2\": 1}).remote(obj))\n\n outputs = [\n large_object.options(resources={\n \"node1\": 1\n }).remote() for _ in range(1000)\n ]\n outputs = [\n dependent_task.options(resources={\n \"node2\": 1\n }).remote(obj) for obj in outputs\n ]\n\n cluster.remove_node(node_to_kill, allow_graceful=False)\n node_to_kill = cluster.add_node(\n num_cpus=1, resources={\"node1\": 1}, object_store_memory=10**8)\n\n i = 0\n while outputs:\n ray.get(outputs.pop(0))\n print(i)\n i += 1\n\n\nif __name__ == \"__main__\":\n import pytest\n sys.exit(pytest.main([\"-v\", __file__]))\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aaronkl/emukit | [
"ccd80811a1b8e11ece97dceb2f8c7b92a7a4f236"
] | [
"emukit/bayesian_optimization/acquisitions/expected_improvement.py"
] | [
"# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n\n\nfrom typing import Tuple, Union\n\nfrom GPyOpt.util.general import get_quantiles\nimport numpy as np\n\nfrom ...core.interfaces import IModel, IDifferentiable, IPriorHyperparameters\nfrom ...core.acquisition import Acquisition\n\n\nclass ExpectedImprovement(Acquisition):\n\n def __init__(self, model: Union[IModel, IDifferentiable], jitter: float = float(0))-> None:\n \"\"\"\n This acquisition computes for a given input the improvement over the current best observed value in\n expectation. For more information see:\n\n Efficient Global Optimization of Expensive Black-Box Functions\n Jones, Donald R. and Schonlau, Matthias and Welch, William J.\n Journal of Global Optimization\n\n :param model: model that is used to compute the improvement.\n :param jitter: parameter to encourage extra exploration.\n \"\"\"\n\n self.model = model\n self.jitter = jitter\n\n def evaluate(self, x: np.ndarray) -> np.ndarray:\n \"\"\"\n Computes the Expected Improvement.\n\n :param x: points where the acquisition is evaluated.\n \"\"\"\n\n mean, variance = self.model.predict(x)\n standard_deviation = np.sqrt(variance)\n\n y_minimum = np.min(self.model.Y, axis=0)\n\n pdf, cdf, u = get_quantiles(self.jitter, y_minimum, mean, standard_deviation)\n\n improvement = standard_deviation * (u * cdf + pdf)\n\n return improvement\n\n def evaluate_with_gradients(self, x: np.ndarray) -> Tuple:\n \"\"\"\n Computes the Expected Improvement and its derivative.\n\n :param x: locations where the evaluation with gradients is done.\n \"\"\"\n\n mean, variance = self.model.predict(x)\n standard_deviation = np.sqrt(variance)\n\n y_minimum = np.min(self.model.Y, axis=0)\n\n dmean_dx, dvariance_dx = self.model.get_prediction_gradients(x)\n dstandard_deviation_dx = dvariance_dx / (2 * standard_deviation)\n\n pdf, cdf, u = get_quantiles(self.jitter, y_minimum, mean, standard_deviation)\n\n improvement = standard_deviation * (u * cdf + pdf)\n dimprovement_dx = dstandard_deviation_dx * pdf - cdf * dmean_dx\n\n return improvement, dimprovement_dx\n\n @property\n def has_gradients(self) -> bool:\n \"\"\"Returns that this acquisition has gradients\"\"\"\n return isinstance(self.model, IDifferentiable)\n\n\n\nclass IntegratedExpectedImprovement(Acquisition):\n\n def __init__(self, model: Union[IModel, IDifferentiable, IPriorHyperparameters], jitter: float = float(0),\n n_samples = 10) -> None:\n \"\"\"\n This acquisition computes for a given input the improvement over the current best observed value in\n expectation. This function integrates over hyper-parameters the model by computing the average of the\n expected improvements for all samples. For more information see:\n\n Efficient Global Optimization of Expensive Black-Box Functions\n Jones, Donald R. and Schonlau, Matthias and Welch, William J.\n Journal of Global Optimization\n\n :param model: model that is used to compute the improvement.\n :param jitter: parameter to encourage extra exploration.\n \"\"\"\n\n self.model = model\n self.jitter = jitter\n self.n_samples = n_samples\n self.samples = self.model.generate_hyperparameters_samples(n_samples)\n\n def evaluate(self, x: np.ndarray) -> np.ndarray:\n \"\"\"\n Computes the integrated Expected Improvement with respect to the hyper-parameters of the model. Averages the\n improvement for all the samples.\n\n :param x: points where the acquisition is evaluated.\n :return: numpy array with the integrated expected improvement at the points x.\n \"\"\"\n\n if x.ndim == 1: x = x[None, :]\n improvement = 0\n\n for sample in self.samples:\n self.model.fix_model_hyperparameters(sample)\n acquisition = ExpectedImprovement(self.model, self.jitter)\n improvement += acquisition.evaluate(x)\n\n return improvement/self.n_samples\n\n def evaluate_with_gradients(self, x: np.ndarray) -> Tuple:\n \"\"\"\n Computes the Expected Improvement and its derivative integrating over the hyper-parameters of the model\n\n :param x: locations where the evaluation with gradients is done.\n :return: tuple containing numpy arrays with the integrated expected improvement at the points x\n and its gradient.\n \"\"\"\n\n if x.ndim == 1: x = x[None, :]\n improvement = 0\n dimprovement_dx = 0\n\n for sample in self.samples:\n self.model.fix_model_hyperparameters(sample)\n acquisition = ExpectedImprovement(self.model, self.jitter)\n improvement_sample, dimprovement_dx_sample = acquisition.evaluate_with_gradients(x)\n improvement += improvement_sample\n dimprovement_dx += dimprovement_dx_sample\n\n return improvement/self.n_samples, dimprovement_dx/self.n_samples\n\n @property\n def has_gradients(self) -> bool:\n \"\"\"Returns that this acquisition has gradients\"\"\"\n return isinstance(self.model, IDifferentiable)"
] | [
[
"numpy.sqrt",
"numpy.min"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
timbook/modelmonitor | [
"876fdc8fb2b48e8e0942f9e7809193c62f0aa77e"
] | [
"modelmonitor/modelmonitor.py"
] | [
"import numpy as np\nimport pandas as pd\n\nclass ModelMonitor:\n def __init__(self, metric, labels=None, subset=None, sep='_'):\n self.metric = metric\n self.labels = labels\n self.subset = subset\n self.sep = sep\n \n def evaluate_1dim_pair(self, x1, x2):\n return self.metric(x1, x2)\n \n def evaluate_2dim_pair(self, x1, x2):\n assert x1.shape[1] == x2.shape[1], \\\n \"Input arrays must have the same number of columns!\"\n\n if self.subset and isinstance(x1, pd.DataFrame):\n x1 = x1[self.subset]\n x2 = x2[self.subset]\n \n p = x1.shape[1]\n x1_arr = x1.values if isinstance(x1, pd.DataFrame) else x1\n x2_arr = x2.values if isinstance(x2, pd.DataFrame) else x2\n \n arr_out = pd.Series([\n self.metric(x1_arr[:, col], x2_arr[:, col]) for col in range(p)\n ])\n \n if isinstance(x1, pd.DataFrame):\n arr_out.index = x1.columns\n \n return arr_out\n \n \n def evaluate_1dim_many(self, arrs):\n it = zip(arrs[:-1], arrs[1:])\n arr_out = [self.evaluate_1dim_pair(x1, x2) for x1, x2 in it]\n return arr_out\n \n def evaluate_2dim_many(self, arrs):\n p = arrs[0].shape[1]\n assert all(arr.shape[1] == p for arr in arrs), \\\n \"Input arrays must have the same number of columns!\"\n\n if self.subset and all(isinstance(arr, pd.DataFrame) for arr in arrs):\n arrs = [arr[self.subset] for arr in arrs]\n \n arrs_np = [\n arr.values if isinstance(arr, pd.DataFrame) else arr\n for arr in arrs\n ]\n \n if self.labels:\n it = zip(\n self.labels[:-1],\n self.labels[1:],\n arrs_np[:-1],\n arrs_np[1:]\n )\n dist_dict = {\n str(lbl1) + self.sep + str(lbl2): self.evaluate_2dim_pair(x1, x2)\n for lbl1, lbl2, x1, x2 in it\n }\n else:\n it = zip(np.arange(p), arrs_np[:-1], arrs_np[1:])\n dist_dict = {lbl: self.evaluate_2dim_pair(x1, x2) for lbl, x1, x2 in it}\n\n arr_out = pd.DataFrame(dist_dict)\n \n if isinstance(arrs[0], pd.DataFrame):\n arr_out.index = arrs[0].columns\n \n return arr_out\n \n def evaluate(self, *arrs, groupby=None, labels=None):\n if labels:\n self.set_labels(labels)\n if len(arrs) == 2:\n x1, x2 = arrs\n \n if np.array(x1).ndim == 1 and np.array(x2).ndim == 1:\n return self.evaluate_1dim_pair(x1, x2)\n \n elif np.array(x1).ndim == 2 and np.array(x2).ndim == 2:\n return self.evaluate_2dim_pair(x1, x2)\n \n else:\n raise ValueError(\n \"Arrays must be of the same dimension and either 1 or 2 dimensions!\"\n )\n \n elif len(arrs) > 2:\n if all(np.array(arr).ndim == 1 for arr in arrs):\n return self.evaluate_1dim_many(arrs)\n \n elif all(np.array(arr).ndim == 2 for arr in arrs):\n if labels:\n self.set_labels(labels)\n return self.evaluate_2dim_many(arrs)\n \n else:\n raise ValueError(\n \"Arrays must be of the same dimension and either 1 or 2 dimensions!\"\n )\n\n elif len(arrs) == 1 and groupby:\n arr = arrs[0]\n grps = np.sort(arr[groupby].unique()).tolist()\n data_split = [df[1].drop(columns=groupby) for df in arr.groupby(groupby)]\n self.set_labels(grps)\n return self.evaluate(*data_split)\n\n def set_labels(self, labels, sep=\"_\"):\n self.labels = labels\n self.sep = sep\n\n def set_subset(self, subset):\n self.subset = subset\n"
] | [
[
"numpy.arange",
"numpy.array",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
vmtmxmf5/P-transformer | [
"ba8259d21f72e9a68e782ab6599c7b24bf6a8eb8"
] | [
"PEDec.py"
] | [
"import torch\nimport torch.nn as nn\n\nfrom MHA import MultiHeadedAttention\nfrom FeedForward import PositionwiseFeedForward\nfrom PositionalEncoding import *\nfrom Encoder import EncoderLayer, sequence_mask\nfrom Decoder import DecoderLayer\n\n\nclass subclass(nn.Module):\n def __init__(self,\n d_model,\n nhead,\n d_ff,\n self_attn_type,\n dropout,\n attention_dropout):\n super().__init__()\n self.enc = EncoderLayer(d_model,\n nhead,\n d_ff,\n dropout,\n attention_dropout)\n self.dec = DecoderLayer(d_model,\n nhead,\n d_ff,\n dropout,\n attention_dropout,\n self_attn_type=self_attn_type)\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n \n def forward(self, src_out, src_mask, tgt_out, tgt_pad_mask, step=None):\n # TODO LayerNorm?? Memory 여러개가 각기 다르게 변한다면, 업데이트마다 기존 Transformer보다 변동성이 크다\n # 그러므로 반드시! LayerNorm을 해줘야 학습이 용이하다\n # Encoder\n src_out = self.enc(src_out, src_mask)\n src_out = self.layer_norm(src_out)\n # Decoder\n tgt_out, attn = self.dec(tgt_out,\n src_out,\n src_mask, # (B, 1, src_len)\n tgt_pad_mask,\n step=step)\n return src_out, tgt_out\n\nclass PEDec(nn.Module):\n def __init__(self,\n enc_num_layers,\n dec_num_layers,\n d_model,\n nhead,\n d_ff,\n self_attn_type,\n dropout,\n attention_dropout,\n enc_embeddings,\n dec_embeddings):\n super().__init__()\n self.enc_embeddings = enc_embeddings\n self.dec_embeddings = dec_embeddings\n self.pre_tf = enc_num_layers > dec_num_layers\n self.post_tf = enc_num_layers < dec_num_layers\n if self.pre_tf:\n self.pre_transformer = nn.ModuleList(\n [EncoderLayer(d_model,\n nhead,\n d_ff,\n dropout,\n attention_dropout)\n for i in range(enc_num_layers - dec_num_layers)]) \n enc_num_layers = dec_num_layers\n \n self.transformer = nn.ModuleList(\n [subclass(d_model,\n nhead,\n d_ff,\n self_attn_type,\n dropout,\n attention_dropout)\n for i in range(enc_num_layers)])\n \n elif self.post_tf:\n self.transformer = nn.ModuleList(\n [subclass(d_model,\n nhead,\n d_ff,\n self_attn_type,\n dropout,\n attention_dropout)\n for i in range(enc_num_layers)])\n\n self.post_transformer = nn.ModuleList(\n [DecoderLayer(d_model,\n nhead,\n d_ff,\n dropout,\n attention_dropout,\n self_attn_type=self_attn_type)\n for i in range(dec_num_layers - enc_num_layers)])\n else:\n self.transformer = nn.ModuleList(\n [subclass(d_model,\n nhead,\n d_ff,\n self_attn_type,\n dropout,\n attention_dropout)\n for i in range(enc_num_layers)])\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n \n def forward(self, src, tgt, lengths=None, step=None):\n src_emb = self.enc_embeddings(src) # (B, src_len, d_model)\n # MHA에서 (B, 1, 1, T_src)로 바꿀 예정\n src_mask = ~sequence_mask(lengths).unsqueeze(1) # (B, 1, src_len)\n tgt_emb = self.dec_embeddings(tgt, step=step)\n assert tgt_emb.dim() == 3 # (B, tgt_len, d_model)\n \n pad_idx = self.dec_embeddings.pad_idx\n ## pad_idx와 같으면 True, 아니면 False\n tgt_pad_mask = tgt.data.eq(pad_idx).unsqueeze(1) # (B, 1, tgt_len)\n \n src_out, tgt_out = src_emb, tgt_emb\n\n if self.pre_tf:\n for pre_layer in self.pre_transformer:\n src_out = pre_layer(src_out, src_mask)\n\n for layer in self.transformer:\n src_out, tgt_out = layer(src_out, src_mask, tgt_out, tgt_pad_mask, step)\n \n if self.post_tf:\n for post_layer in self.post_transformer:\n tgt_out = post_layer(tgt_out,\n src_out,\n src_mask, # (B, 1, src_len)\n tgt_pad_mask,\n step=step)\n out = self.layer_norm(tgt_out) # (B, src_len, d_model) \n return out, lengths\n\n def update_dropout(self, dropout, attention_dropout):\n self.embeddings.update_dropout(dropout)\n for layer in self.transformer:\n layer.update_dropout(dropout, attention_dropout)\n\n \n def _compute_dec_mask(self, tgt_pad_mask, future):\n # tgt_pad_mask : (B, 1, tgt_len) // bool\n ## pad_idx만 True\n tgt_len = tgt_pad_mask.size(-1)\n if not future:\n # future_mask : (tgt_len, tgt_len)\n future_mask = torch.ones([tgt_len, tgt_len],\n device=tgt_pad_mask.device,\n dtype=torch.uint8)\n future_mask = future_mask.triu_(1).view(1, tgt_len, tgt_len) # (1, tgt_len, tgt_len)\n\n try:\n ## upper triangle만 True\n future_mask = future_mask.bool()\n except AttributeError:\n pass\n ## torch.gt(A, 0) : A elements > 0 이면 True\n ## pad와 upper triangle은 True, 그 이외에는 False\n dec_mask = torch.gt(tgt_pad_mask + future_mask, 0)\n else:\n dec_mask = tgt_pad_mask\n return dec_mask\n \n def _forward_self_attn(self, inputs_norm, dec_mask, step):\n if isinstance(self.self_attn, MultiHeadedAttention):\n return self.self_attn(inputs_norm,\n inputs_norm,\n inputs_norm,\n mask=dec_mask,\n attn_type='self')\n"
] | [
[
"torch.gt",
"torch.ones",
"torch.nn.LayerNorm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gabrieleagl/qiskit-aqua | [
"521d505a6483985c039dcfb71f7d517471cff441"
] | [
"qiskit/aqua/components/neural_networks/quantum_generator.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2019, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Quantum Generator.\"\"\"\n\nfrom typing import Optional, List, Union, Dict, Any\nimport warnings\nfrom copy import deepcopy\nimport numpy as np\n\nfrom qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\nfrom qiskit.circuit.library import TwoLocal\nfrom qiskit.aqua import aqua_globals\nfrom qiskit.aqua.components.optimizers import ADAM\nfrom qiskit.aqua.components.uncertainty_models import UnivariateVariationalDistribution, \\\n MultivariateVariationalDistribution\nfrom qiskit.aqua.components.neural_networks.generative_network import GenerativeNetwork\n\n# pylint: disable=invalid-name\n\n\nclass QuantumGenerator(GenerativeNetwork):\n \"\"\"Quantum Generator.\n\n The quantum generator is a parametrized quantum circuit which can be trained with the\n :class:`~qiskit.aqua.algorithms.QGAN` algorithm\n to generate a quantum state which approximates the probability\n distribution of given training data. At the beginning of the training the parameters will\n be set randomly, thus, the output will is random. Throughout the training the quantum\n generator learns to represent the target distribution.\n Eventually, the trained generator can be used for state preparation e.g. in QAE.\n \"\"\"\n\n def __init__(self,\n bounds: np.ndarray,\n num_qubits: List[int],\n generator_circuit: Optional[Union[UnivariateVariationalDistribution,\n MultivariateVariationalDistribution,\n QuantumCircuit]] = None,\n init_params: Optional[Union[List[float], np.ndarray]] = None,\n snapshot_dir: Optional[str] = None) -> None:\n \"\"\"\n Args:\n bounds: k min/max data values [[min_1,max_1],...,[min_k,max_k]],\n given input data dim k\n num_qubits: k numbers of qubits to determine representation resolution,\n i.e. n qubits enable the representation of 2**n values [n_1,..., n_k]\n generator_circuit: a UnivariateVariationalDistribution for univariate data,\n a MultivariateVariationalDistribution for multivariate data,\n or a QuantumCircuit implementing the generator.\n init_params: 1D numpy array or list, Initialization for\n the generator's parameters.\n snapshot_dir: str or None, if not None save the optimizer's parameter after every\n update step to the given directory\n\n Raises:\n AquaError: Set multivariate variational distribution to represent multivariate data\n \"\"\"\n super().__init__()\n self._bounds = bounds\n self._num_qubits = num_qubits\n self.generator_circuit = generator_circuit\n if generator_circuit is None:\n circuit = QuantumCircuit(sum(num_qubits))\n circuit.h(circuit.qubits)\n var_form = TwoLocal(sum(num_qubits), 'ry', 'cz', reps=1, entanglement='circular')\n circuit.compose(var_form, inplace=True)\n\n # Set generator circuit\n self.generator_circuit = circuit\n\n if isinstance(generator_circuit, (UnivariateVariationalDistribution,\n MultivariateVariationalDistribution)):\n warnings.warn('Passing a UnivariateVariationalDistribution or MultivariateVariational'\n 'Distribution is as ``generator_circuit`` is deprecated as of Aqua 0.8.0 '\n 'and the support will be removed no earlier than 3 months after the '\n 'release data. You should pass as QuantumCircuit instead.',\n DeprecationWarning, stacklevel=2)\n self._free_parameters = generator_circuit._var_form_params\n self.generator_circuit = generator_circuit._var_form\n else:\n self._free_parameters = list(self.generator_circuit.parameters)\n\n if init_params is None:\n init_params = aqua_globals.random.random(self.generator_circuit.num_parameters) * 2e-2\n\n self._bound_parameters = init_params\n\n # Set optimizer for updating the generator network\n self._optimizer = ADAM(maxiter=1, tol=1e-6, lr=1e-3, beta_1=0.7,\n beta_2=0.99, noise_factor=1e-6,\n eps=1e-6, amsgrad=True, snapshot_dir=snapshot_dir)\n\n if np.ndim(self._bounds) == 1:\n bounds = np.reshape(self._bounds, (1, len(self._bounds)))\n else:\n bounds = self._bounds\n for j, prec in enumerate(self._num_qubits):\n # prepare data grid for dim j\n grid = np.linspace(bounds[j, 0], bounds[j, 1], (2 ** prec))\n if j == 0:\n if len(self._num_qubits) > 1:\n self._data_grid = [grid]\n else:\n self._data_grid = grid\n self._grid_elements = grid\n elif j == 1:\n self._data_grid.append(grid)\n temp = []\n for g_e in self._grid_elements:\n for g in grid:\n temp0 = [g_e]\n temp0.append(g)\n temp.append(temp0)\n self._grid_elements = temp\n else:\n self._data_grid.append(grid)\n temp = []\n for g_e in self._grid_elements:\n for g in grid:\n temp0 = deepcopy(g_e)\n temp0.append(g)\n temp.append(temp0)\n self._grid_elements = deepcopy(temp)\n self._data_grid = np.array(self._data_grid)\n\n self._shots = None\n self._discriminator = None\n self._ret = {} # type: Dict[str, Any]\n\n def set_seed(self, seed):\n \"\"\"\n Set seed.\n\n Args:\n seed (int): seed\n \"\"\"\n aqua_globals.random_seed = seed\n\n def set_discriminator(self, discriminator):\n \"\"\"\n Set discriminator network.\n\n Args:\n discriminator (Discriminator): Discriminator used to compute the loss function.\n \"\"\"\n self._discriminator = discriminator\n\n def construct_circuit(self, params=None):\n \"\"\"\n Construct generator circuit.\n\n Args:\n params (list | dict): parameters which should be used to run the generator.\n\n Returns:\n Instruction: construct the quantum circuit and return as gate\n \"\"\"\n if params is None:\n return self.generator_circuit\n\n if isinstance(params, (list, np.ndarray)):\n params = dict(zip(self._free_parameters, params))\n\n return self.generator_circuit.assign_parameters(params)\n # self.generator_circuit.build(qc=qc, q=q)\n # else:\n # generator_circuit_copy = deepcopy(self.generator_circuit)\n # generator_circuit_copy.params = params\n # generator_circuit_copy.build(qc=qc, q=q)\n\n # # return qc.copy(name='qc')\n # return qc.to_instruction()\n\n def get_output(self, quantum_instance, params=None, shots=None):\n \"\"\"\n Get classical data samples from the generator.\n Running the quantum generator circuit results in a quantum state.\n To train this generator with a classical discriminator, we need to sample classical outputs\n by measuring the quantum state and mapping them to feature space defined by the training\n data.\n\n Args:\n quantum_instance (QuantumInstance): Quantum Instance, used to run the generator\n circuit.\n params (numpy.ndarray): array or None, parameters which should\n be used to run the generator, if None use self._params\n shots (int): if not None use a number of shots that is different from the\n number set in quantum_instance\n\n Returns:\n list: generated samples, array: sample occurrence in percentage\n \"\"\"\n instance_shots = quantum_instance.run_config.shots\n q = QuantumRegister(sum(self._num_qubits), name='q')\n qc = QuantumCircuit(q)\n if params is None:\n params = self._bound_parameters\n qc.append(self.construct_circuit(params), q)\n if quantum_instance.is_statevector:\n pass\n else:\n c = ClassicalRegister(sum(self._num_qubits), name='c')\n qc.add_register(c)\n qc.measure(q, c)\n\n if shots is not None:\n quantum_instance.set_config(shots=shots)\n\n result = quantum_instance.execute(qc)\n\n generated_samples = []\n if quantum_instance.is_statevector:\n result = result.get_statevector(qc)\n values = np.multiply(result, np.conj(result))\n values = list(values.real)\n keys = []\n for j in range(len(values)):\n keys.append(np.binary_repr(j, int(sum(self._num_qubits))))\n else:\n result = result.get_counts(qc)\n keys = list(result)\n values = list(result.values())\n values = [float(v) / np.sum(values) for v in values]\n generated_samples_weights = values\n for i, _ in enumerate(keys):\n index = 0\n temp = []\n for k, p in enumerate(self._num_qubits):\n bin_rep = 0\n j = 0\n while j < p:\n bin_rep += int(keys[i][index]) * 2 ** (int(p) - j - 1)\n j += 1\n index += 1\n if len(self._num_qubits) > 1:\n temp.append(self._data_grid[k][int(bin_rep)])\n else:\n temp.append(self._data_grid[int(bin_rep)])\n generated_samples.append(temp)\n\n # self.generator_circuit._probabilities = generated_samples_weights\n if shots is not None:\n # Restore the initial quantum_instance configuration\n quantum_instance.set_config(shots=instance_shots)\n return generated_samples, generated_samples_weights\n\n def loss(self, x, weights): # pylint: disable=arguments-differ\n \"\"\"\n Loss function for training the generator's parameters.\n\n Args:\n x (numpy.ndarray): sample label (equivalent to discriminator output)\n weights (numpy.ndarray): probability for measuring the sample\n\n Returns:\n float: loss function\n \"\"\"\n try:\n # pylint: disable=no-member\n loss = (-1) * np.dot(np.log(x).transpose(), weights)\n except Exception: # pylint: disable=broad-except\n loss = (-1) * np.dot(np.log(x), weights)\n return loss.flatten()\n\n def _get_objective_function(self, quantum_instance, discriminator):\n \"\"\"\n Get objective function\n\n Args:\n quantum_instance (QuantumInstance): used to run the quantum circuit.\n discriminator (torch.nn.Module): discriminator network to compute the sample labels.\n\n Returns:\n objective_function: objective function for quantum generator optimization\n \"\"\"\n\n def objective_function(params):\n \"\"\"\n Objective function\n\n Args:\n params (numpy.ndarray): generator parameters\n\n Returns:\n self.loss: loss function\n \"\"\"\n generated_data, generated_prob = self.get_output(quantum_instance, params=params,\n shots=self._shots)\n prediction_generated = discriminator.get_label(generated_data, detach=True)\n return self.loss(prediction_generated, generated_prob)\n\n return objective_function\n\n def train(self, quantum_instance=None, shots=None):\n \"\"\"\n Perform one training step w.r.t to the generator's parameters\n\n Args:\n quantum_instance (QuantumInstance): used to run the generator circuit.\n shots (int): Number of shots for hardware or qasm execution.\n\n Returns:\n dict: generator loss(float) and updated parameters (array).\n \"\"\"\n\n self._shots = shots\n # Force single optimization iteration\n self._optimizer._maxiter = 1\n self._optimizer._t = 0\n objective = self._get_objective_function(quantum_instance, self._discriminator)\n self._bound_parameters, loss, _ = self._optimizer.optimize(\n num_vars=len(self._bound_parameters),\n objective_function=objective,\n initial_point=self._bound_parameters\n )\n\n self._ret['loss'] = loss\n self._ret['params'] = self._bound_parameters\n\n return self._ret\n"
] | [
[
"numpy.log",
"numpy.conj",
"numpy.linspace",
"numpy.ndim",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
titu1994/pyshac | [
"63edafb8b80a9d2dec7c27b023569df56a659894"
] | [
"pyshac/config/hyperparameters.py"
] | [
"import os\nfrom abc import ABCMeta, abstractmethod\nfrom collections import OrderedDict\nfrom collections import Iterable\nimport six\nimport sys\nimport inspect\nimport numpy as np\nimport uuid\nimport codecs\n\n\n# compatible with Python 2 *and* 3:\nABC = ABCMeta('ABC', (object,), {'__slots__': ()})\n\n\n_CUSTOM_PARAMETERS = OrderedDict()\n\n\nclass _NoneTypeWrapper(object):\n \"\"\"\n A wrapper to handle cases when `None` is passed as a possible parameter\n value to the engine.\n \"\"\"\n def __init__(self):\n pass\n\n def __call__(self, *args, **kwargs):\n return args[0]\n\n\nclass AbstractHyperParameter(ABC):\n \"\"\"\n Abstract Hyper Parameter that defines the methods that all hyperparameters\n need to supply\n\n # Arguments:\n name (str): Name of the hyper parameter\n values (List, None): A list of values (must all be pickle-able and hashable)\n values or None. If None, it is assumed to be a continuous value generator.\n\n # Raises:\n ValueError: If the `name` is not specified.\n \"\"\"\n def __init__(self, name, values, seed):\n\n if name is None:\n raise ValueError(\"`name` of the hyperparameter cannot be `None`\")\n\n self.name = name\n self.num_choices = len(values) if values is not None else 0\n self.param2id = OrderedDict()\n self.id2param = OrderedDict()\n self.param2type = OrderedDict()\n self.set_seed(seed)\n\n @abstractmethod\n def sample(self):\n \"\"\"\n Abstract method that defines how parameters are sampled.\n\n # Raises:\n NotImplementedError: Must be overridden by the subclass.\n\n # Returns:\n a singular value sampled from possible values.\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def encode(self, x):\n \"\"\"\n Abstract method that defines how the parameter is encoded\n so that the model can properly be trained.\n\n # Arguments:\n x (int | float | str): a single value that needs to be encoded.\n\n # Raises:\n NotImplementedError: Must be overridden by the subclass.\n\n # Returns:\n an encoded representation of the value of `x`.\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def decode(self, x):\n \"\"\"\n Abstract method that defines how the parameter is decoded so\n that the model can be properly trained.\n\n # Arguments:\n x (int | float): an encoded value that needs to be decoded.\n\n # Raises:\n NotImplementedError: Must be overridden by the subclass.\n\n # Returns:\n a decoded value for the encoded input `x`.\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def _cast(self, x):\n \"\"\"\n Casts the given value to its original data type.\n\n # Arguments:\n x (int | float | str): Input sample that will be cast to the\n correct data type.\n\n # Returns:\n the sample cast to the correct data type.\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def get_config(self):\n \"\"\"\n Creates the config of the class with all of its values.\n\n # Returns:\n a dictionary with the config of the class.\n \"\"\"\n config = {\n 'name': self.name,\n 'seed': self.seed,\n }\n return config\n\n @classmethod\n def load_from_config(cls, config):\n \"\"\"\n Utilizes the provided config to instantiate a new\n instance of the class with the same arguments.\n\n # Arguments:\n config (dict): A dictionary having keys as the argument names\n and values as the values specified to the class using its\n constructor.\n\n # Returns:\n A new instance of this class with the correct arguments.\n \"\"\"\n return cls(**config)\n\n def _build_maps(self, values):\n \"\"\"\n Prepares a pair of dictionaries to manage the values provided.\n\n # Arguments:\n values (List, None): A list of values that are embedded into\n a pair of dictionaries. All values must be pickle-able and hashable.\n \"\"\"\n if values is not None:\n for i, v in enumerate(values):\n self.param2id[v] = i\n self.id2param[i] = v\n\n # prepare a type map from string to its type, for fast checks\n if v is not None:\n self.param2type[v] = type(v)\n self.param2type[str(v)] = type(v)\n else:\n self.param2type[v] = _NoneTypeWrapper()\n self.param2type[str(v)] = _NoneTypeWrapper()\n\n def set_seed(self, seed):\n \"\"\"\n Sets the random seed of the local RNG.\n\n # Arguments:\n seed (int | None): Random seed value.\n \"\"\"\n self.seed = seed\n\n if seed is None:\n if six.PY3:\n seed = int.from_bytes(os.urandom(4), byteorder='little')\n else:\n seed = int(codecs.encode(os.urandom(4), 'hex'), 16)\n\n self.random = np.random.RandomState(seed)\n\n def __repr__(self):\n s = self.name + \" : \"\n vals = list(self.param2id.keys())\n return s + str(vals)\n\n\nclass DiscreteHyperParameter(AbstractHyperParameter):\n \"\"\"\n Discrete Hyper Parameter that defines a set of discrete values that it can take.\n\n # Arguments:\n name (str): Name of the hyper parameter.\n values (list): A list of values (must all be pickle-able and hashable)\n values or None.\n\n # Raises:\n ValueError: If the values provided is `None` or length of values is 0.\n\n # Raises:\n ValueError: If the `name` is not specified.\n \"\"\"\n def __init__(self, name, values, seed=None):\n\n super(DiscreteHyperParameter, self).__init__(name, values, seed)\n\n if values is not None and len(values) != 0:\n super(DiscreteHyperParameter, self)._build_maps(values)\n else:\n raise ValueError(\"DiscreteHyperParamter must be passed at least one \"\n \"or more values\")\n\n def sample(self):\n \"\"\"\n Samples a single value from its set of discrete values.\n\n # Returns:\n a single value from its list of possible values.\n \"\"\"\n choice = self.random.randint(0, self.num_choices, size=1, dtype=np.int64)[0]\n param = self.id2param[choice]\n return param\n\n def encode(self, x):\n \"\"\"\n Encodes a single value into an integer index.\n\n # Arguments:\n x (int | float | str): A value sampled from its possible values.\n\n # Returns:\n int value representing its encoded index.\n \"\"\"\n x = self._cast(x)\n return self.param2id[x]\n\n def decode(self, x):\n \"\"\"\n Decodes a single encoded integer into its original value.\n\n # Args:\n x (int): an integer encoded value.\n\n # Returns:\n (int | float | str) representing the actual decoded value.\n \"\"\"\n param = self.id2param[x]\n return self._cast(param)\n\n def _cast(self, x):\n \"\"\"\n Casts the sample to its original data type.\n\n # Arguments:\n x (int | float | str): Input sample that will be cast to the\n correct data type.\n\n # Returns:\n the sample cast to the correct data type.\n \"\"\"\n return self.param2type[x](x)\n\n def get_config(self):\n \"\"\"\n Creates the config of the class with all of its values.\n\n # Returns:\n a dictionary with the config of the class.\n \"\"\"\n config = {\n 'values': list(self.id2param.values()),\n }\n\n base_config = super(DiscreteHyperParameter, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass MultiDiscreteHyperParameter(AbstractHyperParameter):\n \"\"\"\n Discrete Hyper Parameter that defines a set of discrete values that it can take,\n and acts upon a list of samples.\n\n # Arguments:\n name (str): Name of the hyper parameter.\n values (list): A list of values (must all be pickle-able and hashable)\n values or None.\n sample_count (int): Number of samples that are required from this\n hyper parameter.\n\n # Raises:\n ValueError: If the values provided is `None` or length of values is 0.\n\n # Raises:\n ValueError: If the `name` is not specified or if `sample_count` is less\n than 1.\n \"\"\"\n def __init__(self, name, values, sample_count=1, seed=None):\n\n super(MultiDiscreteHyperParameter, self).__init__(name, values, seed)\n\n if sample_count < 1:\n raise ValueError(\"`sample_count` must be greater than 0.\")\n\n self.sample_count = sample_count\n if values is not None and len(values) != 0:\n super(MultiDiscreteHyperParameter, self)._build_maps(values)\n else:\n raise ValueError(\"MultiDiscreteHyperParamter must be passed at \"\n \"least one or more values.\")\n\n def sample(self):\n \"\"\"\n Samples a number of values from its set of discrete values.\n\n # Returns:\n a list of values from its set of possible values.\n \"\"\"\n choices = self.random.randint(0, self.num_choices, size=self.sample_count,\n dtype=np.int64)\n\n param = [self.id2param[choice] for choice in choices]\n return param\n\n def encode(self, x):\n \"\"\"\n Encodes a list of values into a list of the corresponding integer index.\n\n # Arguments:\n x (int | float | str): A list of values sampled from its\n possible values.\n\n # Returns:\n list of int values representing their encoded index.\n \"\"\"\n e = [self.param2id[self._cast(v)] for v in x]\n return e\n\n def decode(self, x):\n \"\"\"\n Decodes a list of encoded integers into their original value.\n\n # Args:\n x (int): a list of integer encoded values.\n\n # Returns:\n list of (int | float | str) representing the actual decoded\n values.\n \"\"\"\n params = [self._cast(self.id2param[v]) for v in x]\n return params\n\n def _cast(self, x):\n \"\"\"\n Casts the sample to its original data type.\n\n # Arguments:\n x (int | float | str): Input sample that will be cast to the\n correct data type.\n\n # Returns:\n the sample cast to the correct data type.\n \"\"\"\n return self.param2type[x](x)\n\n def get_config(self):\n \"\"\"\n Creates the config of the class with all of its values.\n\n # Returns:\n a dictionary with the config of the class.\n \"\"\"\n config = {\n 'values': list(self.id2param.values()),\n 'sample_count': self.sample_count,\n }\n\n base_config = super(MultiDiscreteHyperParameter, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass AbstractContinuousHyperParameter(AbstractHyperParameter):\n \"\"\"\n An abstract hyper parameter that represents a parameter that can take a range\n of values from a certain distribution.\n\n # Arguments:\n name (str): Name of the parameter.\n val1 (float): A symbolic value that is used by subclasses.\n val2 (float): A symbolic value that is used by subclasses.\n log_encode (bool): Determines whether the encoding must be in natural\n log-space or not.\n\n # Raises:\n NotImplementedError: If `sample()` is called.\n \"\"\"\n def __init__(self, name, val1, val2, log_encode=False, seed=None):\n super(AbstractContinuousHyperParameter, self).__init__(name, None, seed)\n\n if val1 is not None and val2 is not None:\n self._val1 = float(val1)\n self._val2 = float(val2)\n else:\n raise ValueError(\"val1 and val2 must be floating point \"\n \"numbers for ContinuousHyperParameters\")\n\n self.log_encode = log_encode\n\n if log_encode:\n if val1 < 0.0:\n raise ValueError(\"When using log encoding, negative values are not allowed for parameters\")\n\n def sample(self):\n \"\"\"\n Abstract method that must be redefined by base classes.\n\n # Returns:\n a float value.\n \"\"\"\n raise NotImplementedError(\"Subclass must implement this method !\")\n\n def encode(self, x):\n \"\"\"\n Encodes the floating point value into log space if `log_space` was set in\n the constructor, else returns its original value.\n\n # Arguments:\n x (float): a single sample.\n\n # Returns:\n float.\n \"\"\"\n x = self._cast(x)\n\n if self.log_encode:\n x = self._cast(np.log(x))\n\n return x\n\n def decode(self, x):\n \"\"\"\n Decodes the floating point value into normal space if `log_space` was set in\n the constructor, else returns its original value.\n\n # Arguments:\n x (float): a single encoded sample.\n\n # Returns:\n float.\n \"\"\"\n x = self._cast(x)\n\n if self.log_encode:\n x = self._cast(np.exp(x))\n\n return x\n\n def _cast(self, x):\n \"\"\"\n Casts the sample to its original data type.\n\n # Arguments:\n x (int | float | str): Input sample that will be cast to the\n correct data type.\n\n # Returns:\n the sample cast to the correct data type.\n \"\"\"\n if isinstance(x, np.ndarray) or hasattr(x, 'dtype'):\n return x.astype(np.float64)\n else:\n return float(x)\n\n def get_config(self):\n \"\"\"\n Creates the config of the class with all of its values.\n\n # Returns:\n a dictionary with the config of the class.\n \"\"\"\n base_config = super(AbstractContinuousHyperParameter, self).get_config()\n return base_config\n\n def __repr__(self):\n s = \"%s : continuous [%0.3f, %0.3f)\\n\" % (self.name, self._val1, self._val2)\n return s\n\n\nclass AbstractMultiContinuousHyperParameter(AbstractHyperParameter):\n \"\"\"\n An abstract hyper parameter that represents a parameter that can take a range\n of values from a certain distribution, sampled multiple times.\n\n # Arguments:\n name (str): Name of the parameter.\n val1 (float): A symbolic value that is used by subclasses.\n val2 (float): A symbolic value that is used by subclasses.\n log_encode (bool): Determines whether the encoding must be in natural\n log-space or not.\n sample_count (int): Number of samples that are required from this\n hyper parameter.\n\n # Raises:\n NotImplementedError: If `sample()` is called.\n ValueErroe: If sample count is less than 1.\n \"\"\"\n def __init__(self, name, val1, val2, log_encode=False, sample_count=1, seed=None):\n super(AbstractMultiContinuousHyperParameter, self).__init__(name, None, seed)\n\n if sample_count < 1:\n raise ValueError(\"`sample_count` must be greater than 0.\")\n\n if val1 is not None and val2 is not None:\n self._val1 = float(val1)\n self._val2 = float(val2)\n else:\n raise ValueError(\"val1 and val2 must be floating point \"\n \"numbers for ContinuousHyperParameters\")\n\n self.log_encode = log_encode\n self.sample_count = sample_count\n\n if log_encode:\n if val1 < 0.0 or val2 < 0.0:\n raise ValueError(\"When using log encoding, negative values are not allowed for parameters\")\n\n def sample(self):\n \"\"\"\n Abstract method that must be redefined by base classes.\n\n # Returns:\n a float value.\n \"\"\"\n raise NotImplementedError(\"Subclass must implement this method !\")\n\n def encode(self, x):\n \"\"\"\n Encodes a list of floating point values into log space if `log_space`\n was set in the constructor, else returns its original value.\n\n # Arguments:\n x (float): a list of samples.\n\n # Returns:\n list of floats.\n \"\"\"\n if self.log_encode:\n x = [self._cast(np.log(v)) for v in x]\n else:\n x = [self._cast(v) for v in x]\n\n return x\n\n def decode(self, x):\n \"\"\"\n Decodes a list of floating point values into normal space if `log_space`\n was set in the constructor, else returns its original value.\n\n # Arguments:\n x (float): a list of encoded samples.\n\n # Returns:\n list of floats.\n \"\"\"\n if self.log_encode:\n x = [self._cast(np.exp(v)) for v in x]\n else:\n x = [self._cast(v) for v in x]\n\n return x\n\n def _cast(self, x):\n \"\"\"\n Casts the sample to its original data type.\n\n # Arguments:\n x (int | float): Input sample that will be cast to the\n correct data type.\n\n # Returns:\n the sample cast to the correct data type.\n \"\"\"\n if isinstance(x, np.ndarray) or hasattr(x, 'dtype'):\n return x.astype(np.float64)\n else:\n return float(x)\n\n def get_config(self):\n \"\"\"\n Creates the config of the class with all of its values.\n\n # Returns:\n a dictionary with the config of the class.\n \"\"\"\n config = {\n 'sample_count': self.sample_count,\n }\n\n base_config = super(AbstractMultiContinuousHyperParameter, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def __repr__(self):\n s = \"%s : continuous [%0.3f, %0.3f)\\n\" % (self.name, self._val1, self._val2)\n return s\n\n\nclass UniformContinuousHyperParameter(AbstractContinuousHyperParameter):\n \"\"\"\n A hyper parameter that represents a parameter that can take a range\n of values from a uniform distribution.\n\n # Arguments:\n name (str): Name of the parameter.\n min_value (float): The minimum value (inclusive) that the uniform\n distribution can take.\n max_value (float): The maximum value (exclusive) that the uniform\n distribution can take.\n log_encode (bool): Determines whether the encoding must be in natural\n log-space or not.\n \"\"\"\n def __init__(self, name, min_value, max_value, log_encode=False, seed=None):\n\n super(UniformContinuousHyperParameter, self).__init__(name, min_value, max_value,\n log_encode, seed)\n\n def sample(self):\n \"\"\"\n Samples uniformly from the range [min_value, max_value).\n\n # Returns:\n float.\n \"\"\"\n value = self.random.uniform(self._val1, self._val2, size=1)[0]\n return value\n\n def get_config(self):\n \"\"\"\n Creates the config of the class with all of its values.\n\n # Returns:\n a dictionary with the config of the class.\n \"\"\"\n config = {\n 'min_value': self.min_value,\n 'max_value': self.max_value,\n 'log_encode': self.log_encode,\n }\n\n base_config = super(UniformContinuousHyperParameter, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @property\n def min_value(self):\n return self._val1\n\n @property\n def max_value(self):\n return self._val2\n\n\nclass MultiUniformContinuousHyperParameter(AbstractMultiContinuousHyperParameter):\n \"\"\"\n A hyper parameter that represents a parameter that can take a range\n of values from a uniform distribution, sampled multiple times.\n\n # Arguments:\n name (str): Name of the parameter.\n min_value (float): The minimum value (inclusive) that the uniform\n distribution can take.\n max_value (float): The maximum value (exclusive) that the uniform\n distribution can take.\n log_encode (bool): Determines whether the encoding must be in natural\n log-space or not.\n sample_count (int): Number of samples that are required from this\n hyper parameter.\n\n # Raises:\n ValueErroe: If sample count is less than 1.\n \"\"\"\n def __init__(self, name, min_value, max_value, log_encode=False, sample_count=1, seed=None):\n\n super(MultiUniformContinuousHyperParameter, self).__init__(name, min_value, max_value,\n log_encode, sample_count,\n seed)\n\n def sample(self):\n \"\"\"\n Samples uniformly from the range [min_value, max_value).\n\n # Returns:\n list of floats.\n \"\"\"\n value = self.random.uniform(self._val1, self._val2, size=self.sample_count).tolist()\n return value\n\n def get_config(self):\n \"\"\"\n Creates the config of the class with all of its values.\n\n # Returns:\n a dictionary with the config of the class.\n \"\"\"\n config = {\n 'min_value': self.min_value,\n 'max_value': self.max_value,\n 'log_encode': self.log_encode,\n }\n\n base_config = super(MultiUniformContinuousHyperParameter, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @property\n def min_value(self):\n return self._val1\n\n @property\n def max_value(self):\n return self._val2\n\n\nclass NormalContinuousHyperParameter(AbstractContinuousHyperParameter):\n \"\"\"\n A hyper parameter that represents a parameter that can take a range\n of values from a normal distribution.\n\n # Arguments:\n name (str): Name of the parameter.\n mean (float): The mean of the normal distribution.\n std (float): The standard deviation of the normal distribution.\n \"\"\"\n def __init__(self, name, mean, std, seed=None):\n super(NormalContinuousHyperParameter, self).__init__(name, mean, std, False, seed)\n\n def sample(self):\n \"\"\"\n Samples from the normal distribution with a mean and standard deviation\n as specified in the constructor.\n\n # Returns:\n float.\n \"\"\"\n value = self.random.normal(self._val1, self._val2, size=1)[0]\n return value\n\n def get_config(self):\n \"\"\"\n Creates the config of the class with all of its values.\n\n # Returns:\n a dictionary with the config of the class.\n \"\"\"\n config = {\n 'mean': self.mean,\n 'std': self.std,\n }\n\n base_config = super(NormalContinuousHyperParameter, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @property\n def mean(self):\n return self._val1\n\n @property\n def std(self):\n return self._val2\n\n\nclass MultiNormalContinuousHyperParameter(AbstractMultiContinuousHyperParameter):\n \"\"\"\n A hyper parameter that represents a parameter that can take a range\n of values from a normal distribution, sampled multiple times.\n\n # Arguments:\n name (str): Name of the parameter.\n mean (float): The mean of the normal distribution.\n std (float): The standard deviation of the normal distribution.\n sample_count (int): Number of samples that are required from this\n hyper parameter.\n\n # Raises:\n ValueErroe: If sample count is less than 1.\n \"\"\"\n def __init__(self, name, mean, std, sample_count=1, seed=None):\n super(MultiNormalContinuousHyperParameter, self).__init__(name, mean, std,\n False, sample_count,\n seed)\n\n def sample(self):\n \"\"\"\n Samples from the normal distribution with a mean and standard deviation\n as specified in the constructor.\n\n # Returns:\n list of float.\n \"\"\"\n value = self.random.normal(self._val1, self._val2, size=self.sample_count).tolist()\n return value\n\n def get_config(self):\n \"\"\"\n Creates the config of the class with all of its values.\n\n # Returns:\n a dictionary with the config of the class.\n \"\"\"\n config = {\n 'mean': self.mean,\n 'std': self.std,\n }\n\n base_config = super(MultiNormalContinuousHyperParameter, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @property\n def mean(self):\n return self._val1\n\n @property\n def std(self):\n return self._val2\n\n\nclass HyperParameterList(AbstractHyperParameter):\n \"\"\"\n A composite hyper parameter, that encloses a list of hyper parameters\n (either discrete or continuous) and provides utility methods for efficient\n handling by the engine.\n\n # Arguments:\n hyper_parameter_list (list(AnstractHyperParameter) | None): A list of\n hyper parameters or None (which initializes this with 0 elements).\n \"\"\"\n def __init__(self, hyper_parameter_list=None, seed=None):\n super(HyperParameterList, self).__init__('parameter_list', None, seed)\n self.name_map = OrderedDict()\n\n self._build_maps(hyper_parameter_list)\n self.set_seed(seed)\n\n def sample(self):\n \"\"\"\n Samples all of its component parameters and returns a list of the samples.\n\n # Returns:\n list of sampled parameters.\n \"\"\"\n values = []\n for v in self.id2param.values(): # type: AbstractHyperParameter\n x = v.sample()\n\n if isinstance(x, Iterable) and not isinstance(x, six.string_types):\n values.extend(x)\n else:\n values.append(x)\n\n return values\n\n def encode(self, x):\n \"\"\"\n Encodes a list of sampled hyper parameters.\n\n # Arguments:\n x (list | np.ndarray): A python list or numpy array of samples\n from the list of hyper parameters.\n\n # Raises:\n ValueError: If a numpy array of more than 1 dimension is provided.\n\n # Returns:\n ndarray(float).\n \"\"\"\n if isinstance(x, np.ndarray):\n if x.ndim != 1:\n raise ValueError(\"When encoding a list of hyper parameters, provide a python list \"\n \"or 1-dim numpy array\")\n else:\n x = x.tolist()\n\n values = []\n index = 0\n\n for param in self.id2param.values():\n if hasattr(param, 'sample_count'):\n v = param.encode(x[index: index + param.sample_count])\n values.extend(v)\n\n index += param.sample_count\n else:\n v = param.encode(x[index])\n values.append(v)\n\n index += 1\n\n values = np.array(values)\n return values\n\n def decode(self, x):\n \"\"\"\n Decodes a list of sampled hyper parameters.\n\n # Arguments:\n x (list(int | float)): a list of encoded integer or floating point\n values that are to be decoded.\n\n # Returns:\n list of decoded samples.\n \"\"\"\n if isinstance(x, np.ndarray):\n if x.ndim != 1:\n raise ValueError(\"When encoding a list of hyper parameters, provide a python list \"\n \"or 1-dim numpy array\")\n else:\n x = x.tolist()\n\n values = []\n index = 0\n\n for param in self.id2param.values():\n if hasattr(param, 'sample_count'):\n v = param.decode(x[index: index + param.sample_count])\n values.extend(v)\n\n index += param.sample_count\n else:\n v = param._cast(param.decode(x[index]))\n values.append(v)\n\n index += 1\n\n return values\n\n def _build_maps(self, values):\n \"\"\"\n Adds the individual hyper parameters to the list.\n\n # Arguments:\n values (list(AbstractHyperParameter) | None): a list of parameters.\n \"\"\"\n if values is not None:\n for param in values: # type: AbstractHyperParameter\n self.add_hyper_parameter(param)\n\n def _cast(self, x):\n \"\"\"\n Casts all of the samples to their original data types.\n\n # Arguments:\n x (list): Input samples that will be cast to their\n correct data types.\n\n # Returns:\n the list of samples cast to their correct data types.\n \"\"\"\n if isinstance(x, np.ndarray):\n if x.ndim != 1:\n raise ValueError(\"When encoding a list of hyper parameters, provide a python list \"\n \"or 1-dim numpy array\")\n else:\n x = x.tolist()\n\n types = []\n index = 0\n\n for param in self.id2param.values():\n if hasattr(param, 'sample_count'):\n for i in range(param.sample_count):\n id = index + i\n v = param._cast(x[id])\n types.append(v)\n\n index += param.sample_count\n else:\n v = param._cast(x[index])\n types.append(v)\n\n index += 1\n\n return types\n\n def get_config(self):\n \"\"\"\n Creates the config of the class with all of its values.\n\n # Returns:\n an ordered dictionary with the config of the class.\n \"\"\"\n config = OrderedDict()\n\n for name, param in zip(self.name_map.values(), self.id2param.values()): # type: (AbstractHyperParameter)\n class_name = param.__class__.__name__\n param_config = param.get_config()\n config[name] = [class_name, param_config]\n\n return config\n\n def set_seed(self, seed):\n \"\"\"\n Sets the seed of all the parameters held by the container.\n\n # Arguments:\n seed (int | None): Seed value for the random state.\n \"\"\"\n super(HyperParameterList, self).set_seed(seed)\n\n for param in self.id2param.values(): # type: (AbstractHyperParameter)\n param.set_seed(seed)\n\n if seed is not None:\n seed += 1\n\n @classmethod\n def load_from_config(cls, config):\n params = []\n\n for name, cls_config in config.items():\n param_class_name, param_config = cls_config\n param_class = get_parameter(param_class_name)\n param = param_class(**param_config)\n\n params.append(param)\n\n return cls(params)\n\n def add_hyper_parameter(self, parameter):\n \"\"\"\n Adds a single hyper parameter (discrete or continuous) to the list\n of hyper parameters managed by this HyperParameterList.\n\n # Arguments:\n parameter (AbstractHyperParameter): a subclass of AbstractHyperParameter,\n which will be embedded into this composite class.\n\n # Raises:\n ValueError: If the passed parameter is `None`, or the name already\n exists in the list of managed parameters.\n \"\"\"\n if parameter is None:\n raise ValueError(\"When adding a hyper parameter, `None` cannot be passed\")\n\n if parameter.name in self.name_map.values():\n raise ValueError('Cannot add two hyper parameters with same name (%s)' %\n parameter.name)\n\n id = str(uuid.uuid4())\n\n self.name_map[id] = parameter.name\n self.id2param[id] = parameter\n self.param2id[parameter.name] = id\n self.num_choices += 1\n\n def remove_hyper_parameter(self, parameter):\n \"\"\"\n Removes a single hyper parameter (discrete or continuous) from the list\n of hyper parameters managed by this HyperParameterList.\n\n # Arguments:\n parameter (AbstractHyperParameter, str): A string name or a subclass\n of AbstractHyperParameter which needs to be removed.\n\n # Raises:\n ValueError: If the passed parameter is `None`.\n \"\"\"\n if parameter is None:\n raise ValueError(\"When adding a hyper parameter, `None` cannot be passed\")\n\n if isinstance(parameter, AbstractHyperParameter):\n id = self.param2id[parameter.name]\n del self.param2id[parameter.name]\n else:\n if parameter in self.param2id:\n id = self.param2id[parameter]\n del self.param2id[parameter]\n else:\n raise KeyError(\"The hyper parameter with name %s has not been added to \"\n \"this list.\" % parameter)\n\n del self.name_map[id]\n del self.id2param[id]\n self.num_choices -= 1\n\n def get_parameter_names(self):\n \"\"\"\n Gets a list of all the parameter names managed by this class.\n\n # Returns:\n a list(str) with the names of the parameters.\n \"\"\"\n name_list = []\n\n for v in self.id2param.values(): # type: AbstractHyperParameter\n if hasattr(v, 'sample_count'):\n for i in range(v.sample_count):\n name_list.append(v.name + \"_%d\" % (i + 1))\n else:\n name_list.append(v.name)\n\n return name_list\n\n def __repr__(self):\n s = \"\"\n for v in self.id2param.values(): # type: AbstractHyperParameter\n s = s + str(v) + \"\\n\"\n return s\n\n def __len__(self):\n return len(self.name_map)\n\n\ndef set_custom_parameter_class(cls):\n \"\"\"\n Utility function to dynamically add a custom hyper parameter\n to the set of available hyper parameters.\n\n # Arguments:\n cls (cls): A class which extends `AbstractHyperParameter` in some way\n and implements the abstract methods.\n \"\"\"\n global _CUSTOM_PARAMETERS\n _CUSTOM_PARAMETERS[cls.__name__] = cls\n\n\ndef get_parameter(name):\n \"\"\"\n Utility method to get the hyper parameter class by its name.\n\n # Arguments:\n name (str): Name of the class or its alias.\n\n # Raises:\n ValueError: If the class with the provided name does not exists in\n the set of available parameters.\n\n # Returns:\n The hyper parameter class.\n \"\"\"\n global _CUSTOM_PARAMETERS\n\n if name in _CUSTOM_PARAMETERS:\n return _CUSTOM_PARAMETERS[name]\n\n module_classes = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n module_classes = dict(module_classes)\n\n if name in module_classes:\n return module_classes[name]\n else:\n raise ValueError('No hyper parameter class with the name %s was found in '\n 'the hyper parameters module !')\n\n\n# Aliases\nDiscreteHP = DiscreteHyperParameter\nUniformHP = UniformContinuousHyperParameter\nNormalHP = NormalContinuousHyperParameter\n\nMultiDiscreteHP = MultiDiscreteHyperParameter\nMultiUniformHP = MultiUniformContinuousHyperParameter\nMultiNormlHP = MultiNormalContinuousHyperParameter\n"
] | [
[
"numpy.exp",
"numpy.array",
"numpy.random.RandomState",
"numpy.log"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jhemedin/ACT | [
"b362a332c44573977fd5523c7065f713b2e66520"
] | [
"act/tests/test_qc.py"
] | [
"from act.io.armfiles import read_netcdf\nfrom act.tests import (EXAMPLE_IRT25m20s, EXAMPLE_METE40, EXAMPLE_CEIL1,\n EXAMPLE_MFRSR, EXAMPLE_MET1, EXAMPLE_CO2FLX4M)\nfrom act.qc.arm import add_dqr_to_qc\nfrom act.qc.radiometer_tests import fft_shading_test\nfrom act.qc.qcfilter import parse_bit, set_bit, unset_bit\nimport numpy as np\nimport pytest\nimport copy\nimport dask.array as da\n\n\ndef test_fft_shading_test():\n obj = read_netcdf(EXAMPLE_MFRSR)\n obj.clean.cleanup()\n obj = fft_shading_test(obj)\n qc_data = obj['qc_diffuse_hemisp_narrowband_filter4']\n assert np.nansum(qc_data.values) == 456\n\n\ndef test_qc_test_errors():\n ds_object = read_netcdf(EXAMPLE_MET1)\n var_name = 'temp_mean'\n\n assert ds_object.qcfilter.add_less_test(var_name, None) is None\n assert ds_object.qcfilter.add_greater_test(var_name, None) is None\n assert ds_object.qcfilter.add_less_equal_test(var_name, None) is None\n assert ds_object.qcfilter.add_equal_to_test(var_name, None) is None\n assert ds_object.qcfilter.add_not_equal_to_test(var_name, None) is None\n\n\ndef test_arm_qc():\n # Test DQR Webservice using known DQR\n variable = 'wspd_vec_mean'\n qc_variable = 'qc_' + variable\n obj = read_netcdf(EXAMPLE_METE40)\n\n # DQR webservice does go down, so ensure it\n # properly runs first before testing\n try:\n obj = add_dqr_to_qc(obj, variable=variable)\n ran = True\n obj.attrs['_datastream'] = obj.attrs['datastream']\n del obj.attrs['datastream']\n obj2 = add_dqr_to_qc(obj, variable=variable, add_qc_variable=qc_variable)\n obj3 = add_dqr_to_qc(obj)\n add_dqr_to_qc(obj, variable=variable, exclude=['D190529.4'])\n add_dqr_to_qc(obj, variable=variable, include=['D400101.1'])\n with np.testing.assert_raises(ValueError):\n del obj.attrs['_datastream']\n add_dqr_to_qc(obj, variable=variable)\n\n except ValueError:\n ran = False\n\n if ran:\n assert qc_variable in obj\n dqr = [True for d in obj[qc_variable].attrs['flag_meanings'] if 'D190529.4' in d]\n assert dqr[0] is True\n assert 'Suspect' not in obj[qc_variable].attrs['flag_assessments']\n assert 'Incorrect' not in obj[qc_variable].attrs['flag_assessments']\n\n assert qc_variable in obj2\n dqr = [True for d in obj2[qc_variable].attrs['flag_meanings'] if 'D190529.4' in d]\n assert dqr[0] is True\n assert 'Suspect' not in obj2[qc_variable].attrs['flag_assessments']\n assert 'Incorrect' not in obj2[qc_variable].attrs['flag_assessments']\n\n assert qc_variable in obj3\n dqr = [True for d in obj3[qc_variable].attrs['flag_meanings'] if 'D190529.4' in d]\n assert dqr[0] is True\n assert 'Suspect' not in obj3[qc_variable].attrs['flag_assessments']\n assert 'Incorrect' not in obj3[qc_variable].attrs['flag_assessments']\n\n\ndef test_qcfilter():\n ds_object = read_netcdf(EXAMPLE_IRT25m20s)\n var_name = 'inst_up_long_dome_resist'\n expected_qc_var_name = 'qc_' + var_name\n\n ds_object.qcfilter.check_for_ancillary_qc(\n var_name, add_if_missing=True, cleanup=False, flag_type=False)\n assert expected_qc_var_name in list(ds_object.keys())\n del ds_object[expected_qc_var_name]\n\n # Perform adding of quality control variables to object\n result = ds_object.qcfilter.add_test(var_name, test_meaning='Birds!')\n assert isinstance(result, dict)\n qc_var_name = result['qc_variable_name']\n assert qc_var_name == expected_qc_var_name\n\n # Check that new linking and describing attributes are set\n assert ds_object[qc_var_name].attrs['standard_name'] == 'quality_flag'\n assert ds_object[var_name].attrs['ancillary_variables'] == qc_var_name\n\n # Check that CF attributes are set including new flag_assessments\n assert 'flag_masks' in ds_object[qc_var_name].attrs.keys()\n assert 'flag_meanings' in ds_object[qc_var_name].attrs.keys()\n assert 'flag_assessments' in ds_object[qc_var_name].attrs.keys()\n\n # Check that the values of the attributes are set correctly\n assert ds_object[qc_var_name].attrs['flag_assessments'][0] == 'Bad'\n assert ds_object[qc_var_name].attrs['flag_meanings'][0] == 'Birds!'\n assert ds_object[qc_var_name].attrs['flag_masks'][0] == 1\n\n # Set some test values\n index = [0, 1, 2, 30]\n ds_object.qcfilter.set_test(var_name, index=index,\n test_number=result['test_number'])\n\n # Add a new test and set values\n index2 = [6, 7, 8, 50]\n ds_object.qcfilter.add_test(var_name, index=index2,\n test_number=9,\n test_meaning='testing high number',\n test_assessment='Suspect')\n\n # Retrieve data from object as numpy masked array. Count number of masked\n # elements and ensure equal to size of index array.\n data = ds_object.qcfilter.get_masked_data(var_name, rm_assessments='Bad')\n assert np.ma.count_masked(data) == len(index)\n\n data = ds_object.qcfilter.get_masked_data(\n var_name, rm_assessments='Suspect', return_nan_array=True)\n assert np.sum(np.isnan(data)) == len(index2)\n\n data = ds_object.qcfilter.get_masked_data(\n var_name, rm_assessments=['Bad', 'Suspect'], ma_fill_value=np.nan)\n assert np.ma.count_masked(data) == len(index + index2)\n\n # Test internal function for returning the index array of where the\n # tests are set.\n assert np.sum(ds_object.qcfilter.get_qc_test_mask(\n var_name, result['test_number'], return_index=True) -\n np.array(index, dtype=np.int)) == 0\n\n # Unset a test\n ds_object.qcfilter.unset_test(var_name, index=0,\n test_number=result['test_number'])\n # Remove the test\n ds_object.qcfilter.remove_test(var_name, test_number=33)\n ds_object.qcfilter.remove_test(var_name, test_number=result['test_number'])\n pytest.raises(ValueError, ds_object.qcfilter.add_test, var_name)\n pytest.raises(ValueError, ds_object.qcfilter.remove_test, var_name)\n\n ds_object.close()\n\n assert np.all(parse_bit([257]) == np.array([1, 9], dtype=np.int32))\n pytest.raises(ValueError, parse_bit, [1, 2])\n pytest.raises(ValueError, parse_bit, -1)\n\n assert set_bit(0, 16) == 32768\n data = range(0, 4)\n assert isinstance(set_bit(list(data), 2), list)\n assert isinstance(set_bit(tuple(data), 2), tuple)\n assert isinstance(unset_bit(list(data), 2), list)\n assert isinstance(unset_bit(tuple(data), 2), tuple)\n\n # Fill in missing tests\n ds_object = read_netcdf(EXAMPLE_IRT25m20s)\n del ds_object[var_name].attrs['long_name']\n # Test creating a qc variable\n ds_object.qcfilter.create_qc_variable(var_name)\n # Test creating a second qc variable and of flag type\n ds_object.qcfilter.create_qc_variable(var_name, flag_type=True)\n result = ds_object.qcfilter.add_test(var_name, index=[1, 2, 3],\n test_number=9,\n test_meaning='testing high number',\n flag_value=True)\n ds_object.qcfilter.set_test(var_name, index=5, test_number=9, flag_value=True)\n data = ds_object.qcfilter.get_masked_data(var_name)\n assert np.isclose(np.sum(data), 42674.766, 0.01)\n data = ds_object.qcfilter.get_masked_data(var_name, rm_assessments='Bad')\n assert np.isclose(np.sum(data), 42643.195, 0.01)\n\n ds_object.qcfilter.unset_test(var_name, test_number=9, flag_value=True)\n ds_object.qcfilter.unset_test(var_name, index=1, test_number=9, flag_value=True)\n assert ds_object.qcfilter.available_bit(result['qc_variable_name']) == 10\n assert ds_object.qcfilter.available_bit(result['qc_variable_name'], recycle=True) == 1\n ds_object.qcfilter.remove_test(var_name, test_number=9, flag_value=True)\n\n ds_object.qcfilter.update_ancillary_variable(var_name)\n # Test updating ancillary variable if does not exist\n ds_object.qcfilter.update_ancillary_variable('not_a_variable_name')\n # Change ancillary_variables attribute to test if add correct qc variable correctly\n ds_object[var_name].attrs['ancillary_variables'] = 'a_different_name'\n ds_object.qcfilter.update_ancillary_variable(var_name,\n qc_var_name=expected_qc_var_name)\n assert (expected_qc_var_name in\n ds_object[var_name].attrs['ancillary_variables'])\n\n # Test flag QC\n var_name = 'inst_sfc_ir_temp'\n qc_var_name = 'qc_' + var_name\n ds_object.qcfilter.create_qc_variable(var_name, flag_type=True)\n assert qc_var_name in list(ds_object.data_vars)\n assert 'flag_values' in ds_object[qc_var_name].attrs.keys()\n assert 'flag_masks' not in ds_object[qc_var_name].attrs.keys()\n del ds_object[qc_var_name]\n\n qc_var_name = ds_object.qcfilter.check_for_ancillary_qc(\n var_name, add_if_missing=True, cleanup=False, flag_type=True)\n assert qc_var_name in list(ds_object.data_vars)\n assert 'flag_values' in ds_object[qc_var_name].attrs.keys()\n assert 'flag_masks' not in ds_object[qc_var_name].attrs.keys()\n del ds_object[qc_var_name]\n\n ds_object.qcfilter.add_missing_value_test(var_name, flag_value=True, prepend_text='arm')\n ds_object.qcfilter.add_test(var_name, index=list(range(0, 20)), test_number=2,\n test_meaning='Testing flag', flag_value=True,\n test_assessment='Suspect')\n assert qc_var_name in list(ds_object.data_vars)\n assert 'flag_values' in ds_object[qc_var_name].attrs.keys()\n assert 'flag_masks' not in ds_object[qc_var_name].attrs.keys()\n assert 'standard_name' in ds_object[qc_var_name].attrs.keys()\n assert ds_object[qc_var_name].attrs['flag_values'] == [1, 2]\n assert ds_object[qc_var_name].attrs['flag_assessments'] == ['Bad', 'Suspect']\n\n ds_object.close()\n\n\ndef test_qcfilter2():\n ds_object = read_netcdf(EXAMPLE_IRT25m20s)\n var_name = 'inst_up_long_dome_resist'\n expected_qc_var_name = 'qc_' + var_name\n\n data = ds_object[var_name].values\n data[0:4] = data[0:4] + 30.\n data[1000:1024] = data[1000:1024] + 30.\n ds_object[var_name].values = data\n\n coef = 1.4\n ds_object.qcfilter.add_iqr_test(var_name, coef=1.4, test_assessment='Bad', prepend_text='arm')\n assert np.sum(ds_object[expected_qc_var_name].values) == 28\n assert ds_object[expected_qc_var_name].attrs['flag_masks'] == [1]\n assert ds_object[expected_qc_var_name].attrs['flag_meanings'] == [\n f'arm: Value outside of interquartile range test range with a coefficient of {coef}']\n\n ds_object.qcfilter.add_iqr_test(var_name, test_number=3, prepend_text='ACT')\n assert np.sum(ds_object[expected_qc_var_name].values) == 140\n assert ds_object[expected_qc_var_name].attrs['flag_masks'] == [1, 4]\n assert ds_object[expected_qc_var_name].attrs['flag_meanings'][-1] == (\n 'ACT: Value outside of interquartile range test range with a coefficient of 1.5')\n\n ds_object.qcfilter.add_gesd_test(var_name, test_assessment='Bad')\n assert np.sum(ds_object[expected_qc_var_name].values) == 204\n assert ds_object[expected_qc_var_name].attrs['flag_masks'] == [1, 4, 8]\n assert ds_object[expected_qc_var_name].attrs['flag_meanings'][-1] == (\n 'Value failed generalized Extreme Studentized Deviate test with an alpha of 0.05')\n\n ds_object.qcfilter.add_gesd_test(var_name, alpha=0.1)\n assert np.sum(ds_object[expected_qc_var_name].values) == 332\n assert ds_object[expected_qc_var_name].attrs['flag_masks'] == [1, 4, 8, 16]\n assert ds_object[expected_qc_var_name].attrs['flag_meanings'][-1] == (\n 'Value failed generalized Extreme Studentized Deviate test with an alpha of 0.1')\n assert ds_object[expected_qc_var_name].attrs['flag_assessments'] == [\n 'Bad', 'Indeterminate', 'Bad', 'Indeterminate']\n\n\ndef test_qctests():\n ds_object = read_netcdf(EXAMPLE_IRT25m20s)\n var_name = 'inst_up_long_dome_resist'\n\n # Add in one missing value and test for that missing value\n data = ds_object[var_name].values\n data[0] = np.nan\n ds_object[var_name].data = da.from_array(data)\n result = ds_object.qcfilter.add_missing_value_test(var_name)\n data = ds_object.qcfilter.get_masked_data(var_name,\n rm_tests=result['test_number'])\n assert data.mask[0]\n\n result = ds_object.qcfilter.add_missing_value_test(var_name, use_dask=True)\n data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)\n assert data == np.array([0])\n ds_object.qcfilter.remove_test(var_name, test_number=result['test_number'])\n\n # less than min test\n limit_value = 6.8\n result = ds_object.qcfilter.add_less_test(var_name, limit_value, prepend_text='arm',\n limit_attr_name='fail_min')\n\n data = ds_object.qcfilter.get_masked_data(var_name, rm_tests=result['test_number'])\n assert 'arm' in result['test_meaning']\n assert np.ma.count_masked(data) == 54\n assert 'fail_min' in ds_object[result['qc_variable_name']].attrs.keys()\n assert (ds_object[result['qc_variable_name']].attrs['fail_min'].dtype ==\n ds_object[result['variable_name']].values.dtype)\n assert np.isclose(ds_object[result['qc_variable_name']].attrs['fail_min'], limit_value)\n\n result = ds_object.qcfilter.add_less_test(var_name, limit_value, test_assessment='Suspect')\n assert 'warn_min' in ds_object[result['qc_variable_name']].attrs.keys()\n\n limit_value = 8\n result = ds_object.qcfilter.add_less_test(var_name, limit_value)\n data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)\n assert np.sum(data) == 2911939\n result = ds_object.qcfilter.add_less_test(var_name, limit_value, use_dask=True)\n data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)\n assert np.sum(data) == 2911939\n\n # greator than max test\n limit_value = 12.7\n result = ds_object.qcfilter.add_greater_test(var_name, limit_value, prepend_text='arm',\n limit_attr_name='fail_max')\n data = ds_object.qcfilter.get_masked_data(var_name, rm_tests=result['test_number'])\n assert 'arm' in result['test_meaning']\n assert np.ma.count_masked(data) == 61\n assert 'fail_max' in ds_object[result['qc_variable_name']].attrs.keys()\n assert (ds_object[result['qc_variable_name']].attrs['fail_max'].dtype ==\n ds_object[result['variable_name']].values.dtype)\n assert np.isclose(ds_object[result['qc_variable_name']].attrs['fail_max'], limit_value)\n\n result = ds_object.qcfilter.add_greater_test(var_name, limit_value, test_assessment='Suspect')\n assert 'warn_max' in ds_object[result['qc_variable_name']].attrs.keys()\n\n result = ds_object.qcfilter.add_greater_test(var_name, limit_value, use_dask=True)\n data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)\n assert np.sum(data) == 125458\n result = ds_object.qcfilter.add_greater_test(var_name, limit_value)\n data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)\n assert np.sum(data) == 125458\n\n # less than or equal test\n limit_value = 6.9\n result = ds_object.qcfilter.add_less_equal_test(var_name, limit_value,\n test_assessment='Suspect',\n prepend_text='arm',\n limit_attr_name='warn_min')\n data = ds_object.qcfilter.get_masked_data(var_name,\n rm_tests=result['test_number'])\n assert 'arm' in result['test_meaning']\n assert np.ma.count_masked(data) == 149\n assert 'warn_min' in ds_object[result['qc_variable_name']].attrs.keys()\n assert (ds_object[result['qc_variable_name']].attrs['warn_min'].dtype ==\n ds_object[result['variable_name']].values.dtype)\n assert np.isclose(ds_object[result['qc_variable_name']].attrs['warn_min'], limit_value)\n\n result = ds_object.qcfilter.add_less_equal_test(var_name, limit_value)\n assert 'fail_min' in ds_object[result['qc_variable_name']].attrs.keys()\n\n result = ds_object.qcfilter.add_less_equal_test(var_name, limit_value, use_dask=True)\n data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)\n assert np.sum(data) == 601581\n result = ds_object.qcfilter.add_less_equal_test(var_name, limit_value)\n data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'], return_index=True)\n assert np.sum(data) == 601581\n\n # greater than or equal test\n result = ds_object.qcfilter.add_greater_equal_test(var_name, None)\n limit_value = 12\n result = ds_object.qcfilter.add_greater_equal_test(var_name, limit_value,\n test_assessment='Suspect',\n prepend_text='arm',\n limit_attr_name='warn_max')\n data = ds_object.qcfilter.get_masked_data(var_name,\n rm_tests=result['test_number'])\n assert 'arm' in result['test_meaning']\n assert np.ma.count_masked(data) == 606\n assert 'warn_max' in ds_object[result['qc_variable_name']].attrs.keys()\n assert (ds_object[result['qc_variable_name']].attrs['warn_max'].dtype ==\n ds_object[result['variable_name']].values.dtype)\n assert np.isclose(ds_object[result['qc_variable_name']].attrs['warn_max'], limit_value)\n\n result = ds_object.qcfilter.add_greater_equal_test(var_name, limit_value)\n assert 'fail_max' in ds_object[result['qc_variable_name']].attrs.keys()\n\n result = ds_object.qcfilter.add_greater_equal_test(var_name, limit_value, use_dask=True)\n data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'],\n return_index=True)\n assert np.sum(data) == 1189873\n result = ds_object.qcfilter.add_greater_equal_test(var_name, limit_value)\n data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'],\n return_index=True)\n assert np.sum(data) == 1189873\n\n # equal to test\n limit_value = 7.6705\n result = ds_object.qcfilter.add_equal_to_test(var_name, limit_value, prepend_text='arm',\n limit_attr_name='fail_equal_to')\n data = ds_object.qcfilter.get_masked_data(var_name, rm_tests=result['test_number'])\n assert 'arm' in result['test_meaning']\n assert np.ma.count_masked(data) == 2\n assert 'fail_equal_to' in ds_object[result['qc_variable_name']].attrs.keys()\n assert (ds_object[result['qc_variable_name']].attrs['fail_equal_to'].dtype ==\n ds_object[result['variable_name']].values.dtype)\n assert np.isclose(ds_object[result['qc_variable_name']].attrs['fail_equal_to'], limit_value)\n\n result = ds_object.qcfilter.add_equal_to_test(var_name, limit_value,\n test_assessment='Indeterminate')\n assert 'warn_equal_to' in ds_object[result['qc_variable_name']].attrs.keys()\n\n result = ds_object.qcfilter.add_equal_to_test(var_name, limit_value, use_dask=True)\n data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'],\n return_index=True)\n assert np.sum(data) == 8631\n result = ds_object.qcfilter.add_equal_to_test(var_name, limit_value)\n data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'],\n return_index=True)\n assert np.sum(data) == 8631\n\n # not equal to test\n limit_value = 7.6705\n result = ds_object.qcfilter.add_not_equal_to_test(var_name, limit_value,\n test_assessment='Indeterminate',\n prepend_text='arm',\n limit_attr_name='warn_not_equal_to')\n data = ds_object.qcfilter.get_masked_data(var_name,\n rm_tests=result['test_number'])\n assert 'arm' in result['test_meaning']\n assert np.ma.count_masked(data) == 4318\n assert 'warn_not_equal_to' in ds_object[result['qc_variable_name']].attrs.keys()\n assert (ds_object[result['qc_variable_name']].attrs['warn_not_equal_to'].dtype ==\n ds_object[result['variable_name']].values.dtype)\n assert np.isclose(ds_object[result['qc_variable_name']].attrs['warn_not_equal_to'], limit_value)\n\n result = ds_object.qcfilter.add_not_equal_to_test(var_name, limit_value)\n assert 'fail_not_equal_to' in ds_object[result['qc_variable_name']].attrs.keys()\n\n result = ds_object.qcfilter.add_not_equal_to_test(var_name, limit_value, use_dask=True)\n data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'],\n return_index=True)\n assert np.sum(data) == 9320409\n result = ds_object.qcfilter.add_not_equal_to_test(var_name, limit_value)\n data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'],\n return_index=True)\n assert np.sum(data) == 9320409\n\n # outside range test\n limit_value1 = 6.8\n limit_value2 = 12.7\n result = ds_object.qcfilter.add_outside_test(\n var_name, limit_value1, limit_value2, prepend_text='arm',\n limit_attr_names=['fail_lower_range', 'fail_upper_range'])\n data = ds_object.qcfilter.get_masked_data(var_name,\n rm_tests=result['test_number'])\n assert 'arm' in result['test_meaning']\n assert np.ma.count_masked(data) == 115\n assert 'fail_lower_range' in ds_object[result['qc_variable_name']].attrs.keys()\n assert (ds_object[result['qc_variable_name']].attrs['fail_lower_range'].dtype ==\n ds_object[result['variable_name']].values.dtype)\n assert np.isclose(ds_object[result['qc_variable_name']].attrs['fail_lower_range'], limit_value1)\n assert 'fail_upper_range' in ds_object[result['qc_variable_name']].attrs.keys()\n assert (ds_object[result['qc_variable_name']].attrs['fail_upper_range'].dtype ==\n ds_object[result['variable_name']].values.dtype)\n assert np.isclose(ds_object[result['qc_variable_name']].attrs['fail_upper_range'], limit_value2)\n\n result = ds_object.qcfilter.add_outside_test(var_name, limit_value1, limit_value2,\n test_assessment='Indeterminate')\n assert 'warn_lower_range' in ds_object[result['qc_variable_name']].attrs.keys()\n assert 'warn_upper_range' in ds_object[result['qc_variable_name']].attrs.keys()\n\n result = ds_object.qcfilter.add_outside_test(var_name, limit_value1, limit_value2,\n use_dask=True)\n data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'],\n return_index=True)\n assert np.sum(data) == 342254\n result = ds_object.qcfilter.add_outside_test(var_name, limit_value1, limit_value2,)\n data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'],\n return_index=True)\n assert np.sum(data) == 342254\n\n # Starting to run out of space for tests. Remove some tests.\n for ii in range(16, 30):\n ds_object.qcfilter.remove_test(var_name, test_number=ii)\n\n # inside range test\n limit_value1 = 7\n limit_value2 = 8\n result = ds_object.qcfilter.add_inside_test(\n var_name, limit_value1, limit_value2, prepend_text='arm',\n limit_attr_names=['fail_lower_range_inner', 'fail_upper_range_inner'])\n data = ds_object.qcfilter.get_masked_data(var_name,\n rm_tests=result['test_number'])\n assert 'arm' in result['test_meaning']\n assert np.ma.count_masked(data) == 479\n assert 'fail_lower_range_inner' in ds_object[result['qc_variable_name']].attrs.keys()\n assert (ds_object[result['qc_variable_name']].attrs['fail_lower_range_inner'].dtype ==\n ds_object[result['variable_name']].values.dtype)\n assert np.isclose(ds_object[result['qc_variable_name']].attrs['fail_lower_range_inner'],\n limit_value1)\n assert 'fail_upper_range_inner' in ds_object[result['qc_variable_name']].attrs.keys()\n assert (ds_object[result['qc_variable_name']].attrs['fail_upper_range_inner'].dtype ==\n ds_object[result['variable_name']].values.dtype)\n assert np.isclose(ds_object[result['qc_variable_name']].attrs['fail_upper_range_inner'],\n limit_value2)\n\n result = ds_object.qcfilter.add_inside_test(var_name, limit_value1, limit_value2,\n test_assessment='Indeterminate')\n assert 'warn_lower_range_inner' in ds_object[result['qc_variable_name']].attrs.keys()\n assert 'warn_upper_range_inner' in ds_object[result['qc_variable_name']].attrs.keys()\n\n result = ds_object.qcfilter.add_inside_test(var_name, limit_value1, limit_value2,\n use_dask=True)\n data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'],\n return_index=True)\n assert np.sum(data) == 1820693\n result = ds_object.qcfilter.add_inside_test(var_name, limit_value1, limit_value2,)\n data = ds_object.qcfilter.get_qc_test_mask(var_name, result['test_number'],\n return_index=True)\n assert np.sum(data) == 1820693\n\n # delta test\n test_limit = 0.05\n result = ds_object.qcfilter.add_delta_test(var_name, test_limit, prepend_text='arm',\n limit_attr_name='warn_delta')\n data = ds_object.qcfilter.get_masked_data(var_name,\n rm_tests=result['test_number'])\n assert 'arm' in result['test_meaning']\n assert np.ma.count_masked(data) == 175\n assert 'warn_delta' in ds_object[result['qc_variable_name']].attrs.keys()\n assert (ds_object[result['qc_variable_name']].attrs['warn_delta'].dtype ==\n ds_object[result['variable_name']].values.dtype)\n assert np.isclose(ds_object[result['qc_variable_name']].attrs['warn_delta'], test_limit)\n\n data = ds_object.qcfilter.get_masked_data(var_name,\n rm_assessments=['Suspect', 'Bad'])\n assert np.ma.count_masked(data) == 1355\n\n result = ds_object.qcfilter.add_delta_test(var_name, test_limit, test_assessment='Bad')\n assert 'fail_delta' in ds_object[result['qc_variable_name']].attrs.keys()\n\n comp_object = read_netcdf(EXAMPLE_IRT25m20s)\n with np.testing.assert_raises(ValueError):\n result = ds_object.qcfilter.add_difference_test(var_name, 'test')\n\n with np.testing.assert_raises(ValueError):\n result = ds_object.qcfilter.add_difference_test(\n var_name, {comp_object.attrs['datastream']: comp_object},\n var_name, diff_limit=None)\n\n assert ds_object.qcfilter.add_difference_test(var_name, set_test_regardless=False) is None\n\n result = ds_object.qcfilter.add_difference_test(\n var_name, {comp_object.attrs['datastream']: comp_object},\n var_name, diff_limit=1, prepend_text='arm')\n data = ds_object.qcfilter.get_masked_data(var_name, rm_tests=result['test_number'])\n assert 'arm' in result['test_meaning']\n assert not (data.mask).all()\n\n comp_object.close()\n ds_object.close()\n\n\ndef test_qctests_dos():\n ds_object = read_netcdf(EXAMPLE_IRT25m20s)\n var_name = 'inst_up_long_dome_resist'\n\n # persistence test\n data = ds_object[var_name].values\n data[1000:2500] = data[1000]\n ds_object[var_name].values = data\n ds_object.qcfilter.add_persistence_test(var_name)\n qc_var_name = ds_object.qcfilter.check_for_ancillary_qc(\n var_name, add_if_missing=False, cleanup=False, flag_type=False)\n test_meaning = ('Data failing persistence test. Standard Deviation over a '\n 'window of 10 values less than 0.0001.')\n assert ds_object[qc_var_name].attrs['flag_meanings'][-1] == test_meaning\n assert np.sum(ds_object[qc_var_name].values) == 1500\n\n ds_object.qcfilter.add_persistence_test(var_name, window=10000, prepend_text='DQO')\n test_meaning = ('DQO: Data failing persistence test. Standard Deviation over a window of '\n '4320 values less than 0.0001.')\n assert ds_object[qc_var_name].attrs['flag_meanings'][-1] == test_meaning\n\n\ndef test_datafilter():\n ds = read_netcdf(EXAMPLE_MET1)\n ds.clean.cleanup()\n\n var_name = 'atmos_pressure'\n\n ds_1 = ds.mean()\n\n ds.qcfilter.add_less_test(var_name, 99, test_assessment='Bad')\n ds.qcfilter.datafilter(rm_assessments='Bad')\n ds_2 = ds.mean()\n\n assert np.isclose(ds_1[var_name].values, 98.86, atol=0.01)\n assert np.isclose(ds_2[var_name].values, 99.15, atol=0.01)\n\n ds.close()\n\n\ndef test_qc_remainder():\n ds = read_netcdf(EXAMPLE_MET1)\n assert ds.clean.get_attr_info(variable='bad_name') is None\n del ds.attrs['qc_bit_comment']\n assert isinstance(ds.clean.get_attr_info(), dict)\n ds.attrs['qc_flag_comment'] = 'testing'\n ds.close()\n\n ds = read_netcdf(EXAMPLE_MET1)\n ds.clean.cleanup(normalize_assessment=True)\n ds['qc_atmos_pressure'].attrs['units'] = 'testing'\n del ds['qc_temp_mean'].attrs['units']\n del ds['qc_temp_mean'].attrs['flag_masks']\n ds.clean.handle_missing_values()\n ds.close()\n\n ds = read_netcdf(EXAMPLE_MET1)\n ds.attrs['qc_bit_1_comment'] = 'tesing'\n data = ds['qc_atmos_pressure'].values.astype(np.int64)\n data[0] = 2**32\n ds['qc_atmos_pressure'].values = data\n ds.clean.get_attr_info(variable='qc_atmos_pressure')\n ds.clean.clean_arm_state_variables('testname')\n ds.clean.cleanup()\n ds['qc_atmos_pressure'].attrs['standard_name'] = 'wrong_name'\n ds.clean.link_variables()\n assert ds['qc_atmos_pressure'].attrs['standard_name'] == 'quality_flag'\n ds.close()\n\n\ndef test_qc_flag_description():\n \"\"\"\n This will check if the cleanup() method will correctly convert convert\n flag_#_description to CF flag_masks and flag_meanings.\n\n \"\"\"\n\n ds = read_netcdf(EXAMPLE_CO2FLX4M)\n ds.clean.cleanup()\n qc_var_name = ds.qcfilter.check_for_ancillary_qc('momentum_flux', add_if_missing=False,\n cleanup=False)\n\n assert isinstance(ds[qc_var_name].attrs['flag_masks'], list)\n assert isinstance(ds[qc_var_name].attrs['flag_meanings'], list)\n assert isinstance(ds[qc_var_name].attrs['flag_assessments'], list)\n assert ds[qc_var_name].attrs['standard_name'] == 'quality_flag'\n\n assert len(ds[qc_var_name].attrs['flag_masks']) == 9\n unique_flag_assessments = list(set(['Acceptable', 'Indeterminate', 'Bad']))\n assert list(set(ds[qc_var_name].attrs['flag_assessments'])) == unique_flag_assessments\n\n\ndef test_clean():\n # Read test data\n ceil_ds = read_netcdf([EXAMPLE_CEIL1])\n # Cleanup QC data\n ceil_ds.clean.cleanup(clean_arm_state_vars=['detection_status'])\n\n # Check that global attribures are removed\n global_attributes = ['qc_bit_comment',\n 'qc_bit_1_description',\n 'qc_bit_1_assessment',\n 'qc_bit_2_description',\n 'qc_bit_2_assessment'\n 'qc_bit_3_description',\n 'qc_bit_3_assessment'\n ]\n\n for glb_att in global_attributes:\n assert glb_att not in ceil_ds.attrs.keys()\n\n # Check that CF attributes are set including new flag_assessments\n var_name = 'qc_first_cbh'\n for attr_name in ['flag_masks', 'flag_meanings', 'flag_assessments']:\n assert attr_name in ceil_ds[var_name].attrs.keys()\n assert isinstance(ceil_ds[var_name].attrs[attr_name], list)\n\n # Check that the flag_mask values are set correctly\n assert ceil_ds['qc_first_cbh'].attrs['flag_masks'] == [1, 2, 4]\n\n # Check that the flag_meanings values are set correctly\n assert (ceil_ds['qc_first_cbh'].attrs['flag_meanings'] ==\n ['Value is equal to missing_value.',\n 'Value is less than the fail_min.',\n 'Value is greater than the fail_max.'])\n\n # Check the value of flag_assessments is as expected\n assert ceil_ds['qc_first_cbh'].attrs['flag_assessments'] == ['Bad', 'Bad', 'Bad']\n\n # Check that ancillary varibles is being added\n assert 'qc_first_cbh' in ceil_ds['first_cbh'].attrs['ancillary_variables'].split()\n\n # Check that state field is updated to CF\n assert 'flag_values' in ceil_ds['detection_status'].attrs.keys()\n assert isinstance(ceil_ds['detection_status'].attrs['flag_values'], list)\n assert ceil_ds['detection_status'].attrs['flag_values'] == [0, 1, 2, 3, 4, 5]\n\n assert 'flag_meanings' in ceil_ds['detection_status'].attrs.keys()\n assert isinstance(ceil_ds['detection_status'].attrs['flag_meanings'], list)\n assert (ceil_ds['detection_status'].attrs['flag_meanings'] ==\n ['No significant backscatter',\n 'One cloud base detected',\n 'Two cloud bases detected',\n 'Three cloud bases detected',\n 'Full obscuration determined but no cloud base detected',\n 'Some obscuration detected but determined to be transparent'])\n\n assert 'flag_0_description' not in ceil_ds['detection_status'].attrs.keys()\n assert ('detection_status' in\n ceil_ds['first_cbh'].attrs['ancillary_variables'].split())\n\n ceil_ds.close()\n\n\ndef test_compare_time_series_trends():\n\n drop_vars = ['base_time', 'time_offset', 'atmos_pressure', 'qc_atmos_pressure',\n 'temp_std', 'rh_mean', 'qc_rh_mean', 'rh_std', 'vapor_pressure_mean',\n 'qc_vapor_pressure_mean', 'vapor_pressure_std', 'wspd_arith_mean',\n 'qc_wspd_arith_mean', 'wspd_vec_mean', 'qc_wspd_vec_mean', 'wdir_vec_mean',\n 'qc_wdir_vec_mean', 'wdir_vec_std', 'tbrg_precip_total', 'qc_tbrg_precip_total',\n 'tbrg_precip_total_corr', 'qc_tbrg_precip_total_corr', 'org_precip_rate_mean',\n 'qc_org_precip_rate_mean', 'pwd_err_code', 'pwd_mean_vis_1min', 'qc_pwd_mean_vis_1min',\n 'pwd_mean_vis_10min', 'qc_pwd_mean_vis_10min', 'pwd_pw_code_inst',\n 'qc_pwd_pw_code_inst', 'pwd_pw_code_15min', 'qc_pwd_pw_code_15min',\n 'pwd_pw_code_1hr', 'qc_pwd_pw_code_1hr', 'pwd_precip_rate_mean_1min',\n 'qc_pwd_precip_rate_mean_1min', 'pwd_cumul_rain', 'qc_pwd_cumul_rain',\n 'pwd_cumul_snow', 'qc_pwd_cumul_snow', 'logger_volt', 'qc_logger_volt',\n 'logger_temp', 'qc_logger_temp', 'lat', 'lon', 'alt']\n ds = read_netcdf(EXAMPLE_MET1, drop_variables=drop_vars)\n ds.clean.cleanup()\n ds2 = copy.deepcopy(ds)\n\n var_name = 'temp_mean'\n qc_var_name = ds.qcfilter.check_for_ancillary_qc(var_name, add_if_missing=False,\n cleanup=False, flag_type=False)\n ds.qcfilter.compare_time_series_trends(var_name=var_name, time_shift=60,\n comp_var_name=var_name, comp_dataset=ds2,\n time_qc_threshold=60 * 10)\n\n test_description = ('Time shift detected with Minimum Difference test. Comparison of '\n 'temp_mean with temp_mean off by 0 seconds exceeding absolute '\n 'threshold of 600 seconds.')\n assert ds[qc_var_name].attrs['flag_meanings'][-1] == test_description\n\n time = ds2['time'].values + np.timedelta64(1, 'h')\n time_attrs = ds2['time'].attrs\n ds2 = ds2.assign_coords({'time': time})\n ds2['time'].attrs = time_attrs\n\n ds.qcfilter.compare_time_series_trends(var_name=var_name, comp_dataset=ds2, time_step=60,\n time_match_threshhold=50)\n\n test_description = ('Time shift detected with Minimum Difference test. Comparison of '\n 'temp_mean with temp_mean off by 3600 seconds exceeding absolute '\n 'threshold of 900 seconds.')\n assert ds[qc_var_name].attrs['flag_meanings'][-1] == test_description\n\n\ndef test_qc_data_type():\n drop_vars = ['base_time', 'time_offset', 'inst_up_long_case_resist',\n 'inst_up_long_hemisp_tp', 'inst_up_short_hemisp_tp',\n 'inst_sfc_ir_temp', 'lat', 'lon', 'alt']\n ds_object = read_netcdf(EXAMPLE_IRT25m20s, drop_variables=drop_vars)\n var_name = 'inst_up_long_dome_resist'\n expected_qc_var_name = 'qc_' + var_name\n ds_object.qcfilter.check_for_ancillary_qc(var_name, add_if_missing=True)\n del ds_object[expected_qc_var_name].attrs['flag_meanings']\n del ds_object[expected_qc_var_name].attrs['flag_assessments']\n ds_object[expected_qc_var_name] = ds_object[expected_qc_var_name].astype(np.int8)\n ds_object.qcfilter.add_test(var_name, index=[1], test_number=9, test_meaning='First test')\n\n assert ds_object[expected_qc_var_name].attrs['flag_masks'][0].dtype == np.uint32\n assert ds_object[expected_qc_var_name].dtype == np.int16\n ds_object.qcfilter.add_test(var_name, index=[1], test_number=17, test_meaning='Second test')\n assert ds_object[expected_qc_var_name].dtype == np.int32\n ds_object.qcfilter.add_test(var_name, index=[1], test_number=33, test_meaning='Third test')\n assert ds_object[expected_qc_var_name].dtype == np.int64\n assert ds_object[expected_qc_var_name].attrs['flag_masks'][0].dtype == np.uint64\n\n ds_object.qcfilter.add_test(var_name, index=[1], test_meaning='Fourth test', recycle=True)\n"
] | [
[
"numpy.isnan",
"numpy.ma.count_masked",
"numpy.timedelta64",
"numpy.nansum",
"numpy.testing.assert_raises",
"numpy.array",
"numpy.sum",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wolfido92/Python-Hackathon | [
"f79224c584322b42b0f476b91acd39bdd3ec15d8"
] | [
"sort_by_BDM.py"
] | [
"from enum import Enum\nimport pandas as pd\nimport xlrd\nfrom pathlib import Path\nimport numpy as np\nimport csv\n\n\nclass Tag(Enum):\n NO_GO = \"no go\"\n GO = \"go\"\n S_H = \"sanity high\"\n S_L = \"sanity low\"\n NO_SHOW = \"no show\"\n\n\nclass Sort_By_BDM:\n \"\"\"\n This class reads a text file of BSM results and excel file of keys of tagging by ranking and\n creates a sorted DataFrame with the snack image name as index.\n the columns of the DataFrame are ranking (1 for the highest Bid), Bid (score by BDM),\n tag (Enum object), show (boolean) and cued (boolean)\n Other attributes: the full text file name, the full excel file name and key DataFrame with\n ranking as index, a tag column (string) and enum_col (Tags)\n \"\"\"\n def __init__(self, text_file_path, key_file_path):\n self.text_file_path = Path(text_file_path)\n self.key_file_path = Path(key_file_path)\n self.sorted_df = None\n self.key_df = None\n\n def create_full_df(self):\n self._read_BDM_results()\n self._read_keys_file()\n self._disp_changes()\n self._add_beep_and_show_cols()\n self._validate_df_is_full()\n return self.sorted_df\n\n def _read_BDM_results(self):\n \"\"\"\n reads the BDM results text file and return a DataFrame with snacks' image names as index and Bid column\n the returned DataFrame is sorted by Bid, descending values\n \"\"\"\n with open(self.text_file_path) as file:\n df = pd.read_table(file, index_col=0)\n # creating a dataframe with the stimulus name as index (image name) and sorted by ranking\n df.set_index('StimName', inplace=True)\n df.pop('RT')\n self.sorted_df = df.sort_values('Bid', ascending=0)\n\n def _read_keys_file(self):\n \"\"\"\n reads an excel keys file that contain a ranking column and tag column\n returns a DataFrame with ranking as index and a tag column\n \"\"\"\n xl = pd.ExcelFile(self.key_file_path)\n key_df = xl.parse()\n key_df.set_index('ranking', inplace=True)\n # adds a column with the Tag object to use instead of the strings\n key_df['enum_col'] = key_df.tag.apply(lambda x: Tag(x))\n self.key_df = key_df\n\n def _disp_changes(self):\n # adds a column of tag to sorted_df\n self.sorted_df['tag'] = self.key_df['enum_col'].tolist()\n # adds a column of ranking\n self.sorted_df.insert(loc=0, column='ranking', value=self.key_df.index.values)\n\n def _add_beep_and_show_cols(self):\n # adds a boolean columns that indicate if we should show a snack and if its cued\n self.sorted_df['show'] = self.sorted_df.tag.apply(lambda x: x.name != 'NO_SHOW')\n self.sorted_df['cued'] = self.sorted_df.tag.apply(lambda x: x.name == 'GO')\n\n\n def _validate_df_is_full(self):\n # verifying the dataframe has no null values\n if self.sorted_df.isnull().values.any():\n raise IndexError('The snacks of the BDM and the Excel file do not match')\n\nif __name__ == '__main__':\n # reading the results of the BDM\n p = r'C:\\Users\\wolfi\\Documents\\PythonCourse\\Final_project\\Python-Hackathon\\Only_6_snacks.txt'\n file_path = Path(p)\n\n # reading and creating a dataframe with tag for each position of the sorted ranking\n key_p = r'C:\\Users\\wolfi\\Documents\\PythonCourse\\Final_project\\Python-Hackathon\\Only_6_snacks_ladder_key.xlsx'\n key_p = Path(key_p)\n print(key_p)\n A = Sort_By_BDM(file_path, key_p)\n print(A.create_full_df())\n\n A.sorted_df.to_csv('Only_6_sorted_BDM_mock_data.csv')\n"
] | [
[
"pandas.read_table",
"pandas.ExcelFile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
longhuang318/Hybrid-DeepRL-Automated-Driving | [
"386f5d43225b2acfcf8300ba644989e68113413b"
] | [
"hybrid-rl/sources/navigation/global_route_planner.py"
] | [
"# This work is licensed under the terms of the MIT license.\n# For a copy, see <https://opensource.org/licenses/MIT>.\n\n\"\"\"\nThis module provides GlobalRoutePlanner implementation.\n\"\"\"\n\nimport math\n\nimport numpy as np\nimport networkx as nx\n\nimport carla\n\nfrom sources.navigation.modified_local_planner import RoadOption\nfrom sources.navigation.misc import vector\n\n\nclass GlobalRoutePlanner(object):\n \"\"\"\n This class provides a very high level route plan.\n Instantiate the class by passing a reference to\n A GlobalRoutePlannerDAO object.\n \"\"\"\n\n def __init__(self, dao):\n \"\"\"\n Constructor\n \"\"\"\n self._dao = dao\n self._topology = None\n self._graph = None\n self._id_map = None\n self._road_id_to_edge = None\n self._intersection_end_node = -1\n self._previous_decision = RoadOption.VOID\n\n def setup(self):\n \"\"\"\n Performs initial server data lookup for detailed topology\n and builds graph representation of the world map.\n \"\"\"\n self._topology = self._dao.get_topology()\n self._graph, self._id_map, self._road_id_to_edge = self._build_graph()\n self._find_loose_ends()\n self._lane_change_link()\n\n def _build_graph(self):\n \"\"\"\n This function builds a networkx graph representation of topology.\n The topology is read from self._topology.\n graph node properties:\n vertex - (x,y,z) position in world map\n graph edge properties:\n entry_vector - unit vector along tangent at entry point\n exit_vector - unit vector along tangent at exit point\n net_vector - unit vector of the chord from entry to exit\n intersection - boolean indicating if the edge belongs to an\n intersection\n return : graph -> networkx graph representing the world map,\n id_map-> mapping from (x,y,z) to node id\n road_id_to_edge-> map from road id to edge in the graph\n \"\"\"\n graph = nx.DiGraph()\n id_map = dict() # Map with structure {(x,y,z): id, ... }\n road_id_to_edge = dict() # Map with structure {road_id: {lane_id: edge, ... }, ... }\n\n for segment in self._topology:\n\n entry_xyz, exit_xyz = segment['entryxyz'], segment['exitxyz']\n path = segment['path']\n entry_wp, exit_wp = segment['entry'], segment['exit']\n intersection = entry_wp.is_junction\n road_id, section_id, lane_id = entry_wp.road_id, entry_wp.section_id, entry_wp.lane_id\n\n for vertex in entry_xyz, exit_xyz:\n # Adding unique nodes and populating id_map\n if vertex not in id_map:\n new_id = len(id_map)\n id_map[vertex] = new_id\n graph.add_node(new_id, vertex=vertex)\n n1 = id_map[entry_xyz]\n n2 = id_map[exit_xyz]\n if road_id not in road_id_to_edge:\n road_id_to_edge[road_id] = dict()\n if section_id not in road_id_to_edge[road_id]:\n road_id_to_edge[road_id][section_id] = dict()\n road_id_to_edge[road_id][section_id][lane_id] = (n1, n2)\n\n entry_carla_vector = entry_wp.transform.rotation.get_forward_vector()\n exit_carla_vector = exit_wp.transform.rotation.get_forward_vector()\n # Adding edge with attributes\n graph.add_edge(\n n1, n2,\n length=len(path) + 1, path=path,\n entry_waypoint=entry_wp, exit_waypoint=exit_wp,\n entry_vector=np.array(\n [entry_carla_vector.x, entry_carla_vector.y, entry_carla_vector.z]),\n exit_vector=np.array(\n [exit_carla_vector.x, exit_carla_vector.y, exit_carla_vector.z]),\n net_vector=vector(entry_wp.transform.location, exit_wp.transform.location),\n intersection=intersection, type=RoadOption.LANEFOLLOW)\n\n return graph, id_map, road_id_to_edge\n\n def _find_loose_ends(self):\n \"\"\"\n This method finds road segments that have an unconnected end and\n adds them to the internal graph representation\n \"\"\"\n count_loose_ends = 0\n hop_resolution = self._dao.get_resolution()\n for segment in self._topology:\n end_wp = segment['exit']\n exit_xyz = segment['exitxyz']\n road_id, section_id, lane_id = end_wp.road_id, end_wp.section_id, end_wp.lane_id\n if road_id in self._road_id_to_edge and \\\n section_id in self._road_id_to_edge[road_id] and \\\n lane_id in self._road_id_to_edge[road_id][section_id]:\n pass\n else:\n count_loose_ends += 1\n if road_id not in self._road_id_to_edge:\n self._road_id_to_edge[road_id] = dict()\n if section_id not in self._road_id_to_edge[road_id]:\n self._road_id_to_edge[road_id][section_id] = dict()\n n1 = self._id_map[exit_xyz]\n n2 = -1*count_loose_ends\n self._road_id_to_edge[road_id][section_id][lane_id] = (n1, n2)\n next_wp = end_wp.next(hop_resolution)\n path = []\n while next_wp is not None and next_wp and \\\n next_wp[0].road_id == road_id and \\\n next_wp[0].section_id == section_id and \\\n next_wp[0].lane_id == lane_id:\n path.append(next_wp[0])\n next_wp = next_wp[0].next(hop_resolution)\n if path:\n n2_xyz = (path[-1].transform.location.x,\n path[-1].transform.location.y,\n path[-1].transform.location.z)\n self._graph.add_node(n2, vertex=n2_xyz)\n self._graph.add_edge(\n n1, n2,\n length=len(path) + 1, path=path,\n entry_waypoint=end_wp, exit_waypoint=path[-1],\n entry_vector=None, exit_vector=None, net_vector=None,\n intersection=end_wp.is_intersection, type=RoadOption.LANEFOLLOW)\n\n def _localize(self, location):\n \"\"\"\n This function finds the road segment closest to given location\n location : carla.Location to be localized in the graph\n return : pair node ids representing an edge in the graph\n \"\"\"\n waypoint = self._dao.get_waypoint(location)\n edge = None\n try:\n edge = self._road_id_to_edge[waypoint.road_id][waypoint.section_id][waypoint.lane_id]\n except KeyError:\n print(\n \"Failed to localize! : \",\n \"Road id : \", waypoint.road_id,\n \"Section id : \", waypoint.section_id,\n \"Lane id : \", waypoint.lane_id,\n \"Location : \", waypoint.transform.location.x,\n waypoint.transform.location.y)\n return edge\n\n def _lane_change_link(self):\n \"\"\"\n This method places zero cost links in the topology graph\n representing availability of lane changes.\n \"\"\"\n\n for segment in self._topology:\n left_found, right_found = False, False\n\n for waypoint in segment['path']:\n if not segment['entry'].is_junction:\n next_waypoint, next_road_option, next_segment = None, None, None\n\n if bool(waypoint.lane_change & carla.LaneChange.Right) and not right_found:\n next_waypoint = waypoint.get_right_lane()\n if next_waypoint is not None and \\\n next_waypoint.lane_type == carla.LaneType.Driving and \\\n waypoint.road_id == next_waypoint.road_id:\n next_road_option = RoadOption.CHANGELANERIGHT\n next_segment = self._localize(next_waypoint.transform.location)\n if next_segment is not None:\n self._graph.add_edge(\n self._id_map[segment['entryxyz']], next_segment[0], entry_waypoint=segment['entry'],\n exit_waypoint=self._graph.edges[next_segment[0], next_segment[1]]['entry_waypoint'],\n path=[], length=0, type=next_road_option, change_waypoint = waypoint)\n right_found = True\n\n if bool(waypoint.lane_change & carla.LaneChange.Left) and not left_found:\n next_waypoint = waypoint.get_left_lane()\n if next_waypoint is not None and next_waypoint.lane_type == carla.LaneType.Driving and \\\n waypoint.road_id == next_waypoint.road_id:\n next_road_option = RoadOption.CHANGELANELEFT\n next_segment = self._localize(next_waypoint.transform.location)\n if next_segment is not None:\n self._graph.add_edge(\n self._id_map[segment['entryxyz']], next_segment[0], entry_waypoint=segment['entry'],\n exit_waypoint=self._graph.edges[next_segment[0], next_segment[1]]['entry_waypoint'],\n path=[], length=0, type=next_road_option, change_waypoint = waypoint)\n left_found = True\n\n if left_found and right_found:\n break\n\n def _distance_heuristic(self, n1, n2):\n \"\"\"\n Distance heuristic calculator for path searching\n in self._graph\n \"\"\"\n l1 = np.array(self._graph.nodes[n1]['vertex'])\n l2 = np.array(self._graph.nodes[n2]['vertex'])\n return np.linalg.norm(l1-l2)\n\n def _path_search(self, origin, destination):\n \"\"\"\n This function finds the shortest path connecting origin and destination\n using A* search with distance heuristic.\n origin : carla.Location object of start position\n destination : carla.Location object of of end position\n return : path as list of node ids (as int) of the graph self._graph\n connecting origin and destination\n \"\"\"\n\n start, end = self._localize(origin), self._localize(destination)\n\n route = nx.astar_path(\n self._graph, source=start[0], target=end[0],\n heuristic=self._distance_heuristic, weight='length')\n route.append(end[1])\n return route\n\n def _successive_last_intersection_edge(self, index, route):\n \"\"\"\n This method returns the last successive intersection edge\n from a starting index on the route.\n\n This helps moving past tiny intersection edges to calculate\n proper turn decisions.\n \"\"\"\n\n last_intersection_edge = None\n last_node = None\n for node1, node2 in [(route[i], route[i+1]) for i in range(index, len(route)-1)]:\n candidate_edge = self._graph.edges[node1, node2]\n if node1 == route[index]:\n last_intersection_edge = candidate_edge\n if candidate_edge['type'] == RoadOption.LANEFOLLOW and \\\n candidate_edge['intersection']:\n last_intersection_edge = candidate_edge\n last_node = node2\n else:\n break\n\n return last_node, last_intersection_edge\n\n def _turn_decision(self, index, route, threshold=math.radians(5)):\n \"\"\"\n This method returns the turn decision (RoadOption) for pair of edges\n around current index of route list\n \"\"\"\n\n decision = None\n previous_node = route[index-1]\n current_node = route[index]\n next_node = route[index+1]\n next_edge = self._graph.edges[current_node, next_node]\n if index > 0:\n if self._previous_decision != RoadOption.VOID and \\\n self._intersection_end_node > 0 and \\\n self._intersection_end_node != previous_node and \\\n next_edge['type'] == RoadOption.LANEFOLLOW and \\\n next_edge['intersection']:\n decision = self._previous_decision\n else:\n self._intersection_end_node = -1\n current_edge = self._graph.edges[previous_node, current_node]\n calculate_turn = current_edge['type'].value == RoadOption.LANEFOLLOW.value and \\\n not current_edge['intersection'] and \\\n next_edge['type'].value == RoadOption.LANEFOLLOW.value and \\\n next_edge['intersection']\n if calculate_turn:\n last_node, tail_edge = self._successive_last_intersection_edge(index, route)\n self._intersection_end_node = last_node\n if tail_edge is not None:\n next_edge = tail_edge\n cv, nv = current_edge['exit_vector'], next_edge['net_vector']\n cross_list = []\n for neighbor in self._graph.successors(current_node):\n select_edge = self._graph.edges[current_node, neighbor]\n if select_edge['type'].value == RoadOption.LANEFOLLOW.value:\n if neighbor != route[index+1]:\n sv = select_edge['net_vector']\n cross_list.append(np.cross(cv, sv)[2])\n next_cross = np.cross(cv, nv)[2]\n deviation = math.acos(np.clip(\n np.dot(cv, nv)/(np.linalg.norm(cv)*np.linalg.norm(nv)), -1.0, 1.0))\n if not cross_list:\n cross_list.append(0)\n if deviation < threshold:\n decision = RoadOption.STRAIGHT\n elif cross_list and next_cross < min(cross_list):\n decision = RoadOption.LEFT\n elif cross_list and next_cross > max(cross_list):\n decision = RoadOption.RIGHT\n elif next_cross < 0:\n decision = RoadOption.LEFT\n elif next_cross > 0:\n decision = RoadOption.RIGHT\n else:\n decision = next_edge['type']\n else:\n decision = next_edge['type']\n self._previous_decision = decision\n\n return decision\n\n def abstract_route_plan(self, origin, destination):\n \"\"\"\n The following function generates the route plan based on\n origin : carla.Location object of the route's start position\n destination : carla.Location object of the route's end position\n return : list of turn by turn navigation decisions as\n agents.navigation.local_planner.RoadOption elements\n Possible values are STRAIGHT, LEFT, RIGHT, LANEFOLLOW, VOID\n CHANGELANELEFT, CHANGELANERIGHT\n \"\"\"\n\n route = self._path_search(origin, destination)\n plan = []\n\n for i in range(len(route) - 1):\n road_option = self._turn_decision(i, route)\n plan.append(road_option)\n\n return plan\n\n def _find_closest_in_list(self, current_waypoint, waypoint_list):\n min_distance = float('inf')\n closest_index = -1\n for i, waypoint in enumerate(waypoint_list):\n distance = waypoint.transform.location.distance(\n current_waypoint.transform.location)\n if distance < min_distance:\n min_distance = distance\n closest_index = i\n\n return closest_index\n\n def trace_route(self, origin, destination):\n \"\"\"\n This method returns list of (carla.Waypoint, RoadOption)\n from origin (carla.Location) to destination (carla.Location)\n \"\"\"\n\n route_trace = []\n route = self._path_search(origin, destination)\n current_waypoint = self._dao.get_waypoint(origin)\n destination_waypoint = self._dao.get_waypoint(destination)\n resolution = self._dao.get_resolution()\n\n for i in range(len(route) - 1):\n road_option = self._turn_decision(i, route)\n edge = self._graph.edges[route[i], route[i+1]]\n path = []\n\n if edge['type'].value != RoadOption.LANEFOLLOW.value and \\\n edge['type'].value != RoadOption.VOID.value:\n route_trace.append((current_waypoint, road_option))\n exit_wp = edge['exit_waypoint']\n n1, n2 = self._road_id_to_edge[exit_wp.road_id][exit_wp.section_id][exit_wp.lane_id]\n next_edge = self._graph.edges[n1, n2]\n if next_edge['path']:\n closest_index = self._find_closest_in_list(current_waypoint, next_edge['path'])\n closest_index = min(len(next_edge['path'])-1, closest_index+5)\n current_waypoint = next_edge['path'][closest_index]\n else:\n current_waypoint = next_edge['exit_waypoint']\n route_trace.append((current_waypoint, road_option))\n\n else:\n path = path + [edge['entry_waypoint']] + edge['path'] + [edge['exit_waypoint']]\n closest_index = self._find_closest_in_list(current_waypoint, path)\n for waypoint in path[closest_index:]:\n current_waypoint = waypoint\n route_trace.append((current_waypoint, road_option))\n if len(route)-i <= 2 and \\\n waypoint.transform.location.distance(destination) < 2*resolution:\n break\n elif len(route)-i <= 2 and \\\n current_waypoint.road_id == destination_waypoint.road_id and \\\n current_waypoint.section_id == destination_waypoint.section_id and \\\n current_waypoint.lane_id == destination_waypoint.lane_id:\n destination_index = self._find_closest_in_list(destination_waypoint, path)\n if closest_index > destination_index:\n break\n\n return route_trace\n"
] | [
[
"numpy.dot",
"numpy.array",
"numpy.linalg.norm",
"numpy.cross"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HoBeom/deep-person-reid | [
"5ca1eedb326e6bfdf083c29ec6e5ce6ab95ee0e7"
] | [
"RL_projects/test/train_acc_test.py"
] | [
"\nfrom torchreid import metrics\nfrom torchreid.utils import re_ranking\nimport numpy as np\nimport torch\n\nfrom torch.nn import functional as F\n\nif __name__ == '__main__':\n ff = torch.load('train_avg_feature.pt')\n f_pids = np.load('train_pids.npy')\n f_camids = np.load('train_camids.npy')\n \n\n # eval_index = 50\n # ef = ff[eval_index].unsqueeze(0).clone()\n # e_pids = f_pids[eval_index].reshape((1,))\n # e_camids = f_camids[eval_index].reshape((1,))\n\n ef = ff.clone()\n dist_metric='euclidean'\n print(\n 'Computing distance matrix with metric={} ...'.format(dist_metric)\n )\n # normalize feature\n # print('Normalizing feature ...')\n # qf = F.normalize(qf, p=2, dim=1)\n # gf = F.normalize(gf, p=2, dim=1)\n\n distmat = metrics.compute_distance_matrix(ff, ef, dist_metric)\n distmat = distmat.numpy()\n\n # print('Applying person re-ranking ...')\n # distmat_qq = metrics.compute_distance_matrix(qf, qf, dist_metric)\n # distmat_gg = metrics.compute_distance_matrix(gf, gf, dist_metric)\n # distmat = re_ranking(distmat, distmat_qq, distmat_gg)\n\n print('Computing CMC and mAP ...')\n cmc, mAP = metrics.evaluate_rank(\n distmat,\n f_pids,\n f_pids,\n f_camids,\n f_camids,\n )\n ranks=[1, 5, 10, 20]\n print('** Results **')\n print('mAP: {:.1%}'.format(mAP))\n print('CMC curve')\n for r in ranks:\n print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))\n \n ef = torch.zeros_like(ff)\n\n\n distmat = metrics.compute_distance_matrix(ff, ef, dist_metric)\n distmat = distmat.numpy()\n\n # print('Applying person re-ranking ...')\n # distmat_qq = metrics.compute_distance_matrix(qf, qf, dist_metric)\n # distmat_gg = metrics.compute_distance_matrix(gf, gf, dist_metric)\n # distmat = re_ranking(distmat, distmat_qq, distmat_gg)\n\n print('Computing CMC and mAP ...')\n zcmc, zmAP = metrics.evaluate_rank(\n distmat,\n f_pids,\n f_pids,\n f_camids,\n f_camids,\n )\n ranks=[1, 5, 10, 20]\n print('** Results **')\n print('mAP: {:.1%}'.format(zmAP))\n print('CMC curve')\n for r in ranks:\n print('Rank-{:<3}: {:.1%}'.format(r, zcmc[r - 1]))\n print(zmAP - mAP)\n print(sum(zcmc[:20] - cmc[:20])+ (zmAP - mAP))\n \n"
] | [
[
"numpy.load",
"torch.zeros_like",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cmayne28/mission_to_mars | [
"f87048e5b0dbe264f0919816500b54afe8a1e054"
] | [
"scrape_mars.py"
] | [
"from splinter import Browser\nfrom bs4 import BeautifulSoup\nimport time\nimport pandas as pd \n\ndef init_browser():\n executable_path = {\"executable_path\": \"/usr/local/bin/chromedriver\"}\n return Browser(\"chrome\", **executable_path, headless=False)\n\ndef scrape():\n browser = init_browser()\n\n #NASA Mars News \n url = \"https://mars.nasa.gov/news/\"\n\n browser.visit(url)\n\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n\n article = soup.find(\"div\", class_=\"list_text\")\n news_title = article.find(\"div\", class_=\"content_title\").text\n news_p = article.find(\"div\", class_=\"article_teaser_body\").text\n\n #JPL Mars Space Images - Featured Image\n url_jpl = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\n browser.visit(url_jpl)\n\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n\n img = soup.find(class_=\"carousel_item\")[\"style\"]\n img1 = img.split(\"'\")\n img2 = img1[1]\n\n featured_image_url = \"https://www.jpl.nasa.gov\" + img2 \n\n # Mars Weather\n\n url_weather = \"https://twitter.com/marswxreport?lang=en\"\n browser.visit(url_weather)\n\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n mars_weather = soup.find('div', class_=\"js-tweet-text-container\").text.strip()\n\n # Mars Facts\n\n url_facts = \"https://space-facts.com/mars/\"\n browser.visit(url_facts)\n\n #use Pandas to scrape the table containing facts about the planet including Diameter, Mass, etc.\n #Use Pandas to convert the data to a HTML table string.\n\n tables = pd.read_html(url_facts)\n\n df = tables[0]\n\n df.columns = ['', 'Value']\n\n df1 = df.set_index(\"\")\n\n mars_table = df.to_html()\n\n\n #Mars Hemispheres\n\n url_hemispheres = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n browser.visit(url_hemispheres)\n\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n #print(soup.prettify())\n\n hemisphere_names = []\n mars_hemis = []\n\n for x in range(4):\n hems = soup.find_all('h3')[x].text\n hemisphere_names.append(hems)\n\n for x in range(4):\n mars_hem_images = browser.find_by_tag('h3')\n mars_hem_images[x].click()\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n image = soup.find(\"img\", class_=\"wide-image\")[\"src\"]\n img_url = 'https://astrogeology.usgs.gov'+ image\n mars_hemis.append(img_url)\n browser.back()\n\n hemisphere_image_urls = [{\"title\": hemisphere_names[x], \"img_url\": mars_hemis[x]} for x in range(4)]\n\n # Store data in a dictionary\n mars_data = {\n \"news_title\": news_title,\n \"news_p\": news_p,\n \"featured_image_url\": featured_image_url,\n \"mars_weather\": mars_weather,\n \"hemisphere_image_urls\" : hemisphere_image_urls,\n \"mars_table\": mars_table}\n \n # Close the browser after scraping\n browser.quit()\n\n # Return results\n return mars_data\n"
] | [
[
"pandas.read_html"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
dtch1997/cookiecutter-pytorch | [
"b35b9f63a99a7021aaf92d76ae1dba29ad543477"
] | [
"{{cookiecutter.repo_name}}/{{cookiecutter.package_name}}/model/loss.py"
] | [
"import torch.nn.functional as F\n\n\ndef nll_loss(output, target):\n return F.nll_loss(output, target)\n"
] | [
[
"torch.nn.functional.nll_loss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
balling/cycle-gated-gan | [
"f7c3929a5361f650b8b01a93fe4787c1cd6f3c9b"
] | [
"models/networks.py"
] | [
"import torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport functools\nfrom torch.optim import lr_scheduler\nimport math\nimport numbers\nfrom torch.nn import functional as F\n\n\n###############################################################################\n# Helper Functions\n###############################################################################\n\n\nclass Identity(nn.Module):\n def forward(self, x):\n return x\n\n\ndef get_norm_layer(norm_type='instance'):\n \"\"\"Return a normalization layer\n\n Parameters:\n norm_type (str) -- the name of the normalization layer: batch | instance | none\n\n For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).\n For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.\n \"\"\"\n if norm_type == 'batch':\n norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)\n elif norm_type == 'instance':\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)\n elif norm_type == 'none':\n norm_layer = lambda x: Identity()\n else:\n raise NotImplementedError('normalization layer [%s] is not found' % norm_type)\n return norm_layer\n\n\ndef get_scheduler(optimizer, opt):\n \"\"\"Return a learning rate scheduler\n\n Parameters:\n optimizer -- the optimizer of the network\n opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. \n opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine\n\n For 'linear', we keep the same learning rate for the first <opt.niter> epochs\n and linearly decay the rate to zero over the next <opt.niter_decay> epochs.\n For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.\n See https://pytorch.org/docs/stable/optim.html for more details.\n \"\"\"\n if opt.lr_policy == 'linear':\n def lambda_rule(epoch):\n lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)\n return lr_l\n scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)\n elif opt.lr_policy == 'step':\n scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)\n elif opt.lr_policy == 'plateau':\n scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)\n elif opt.lr_policy == 'cosine':\n scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)\n else:\n return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)\n return scheduler\n\n\ndef init_weights(net, init_type='normal', init_gain=0.02):\n \"\"\"Initialize network weights.\n\n Parameters:\n net (network) -- network to be initialized\n init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal\n init_gain (float) -- scaling factor for normal, xavier and orthogonal.\n\n We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might\n work better for some applications. Feel free to try yourself.\n \"\"\"\n def init_func(m): # define the initialization function\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n if init_type == 'normal':\n init.normal_(m.weight.data, 0.0, init_gain)\n elif init_type == 'xavier':\n init.xavier_normal_(m.weight.data, gain=init_gain)\n elif init_type == 'kaiming':\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n init.orthogonal_(m.weight.data, gain=init_gain)\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.\n init.normal_(m.weight.data, 1.0, init_gain)\n init.constant_(m.bias.data, 0.0)\n\n print('initialize network with %s' % init_type)\n net.apply(init_func) # apply the initialization function <init_func>\n\n\ndef init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):\n \"\"\"Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights\n Parameters:\n net (network) -- the network to be initialized\n init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal\n gain (float) -- scaling factor for normal, xavier and orthogonal.\n gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2\n\n Return an initialized network.\n \"\"\"\n if len(gpu_ids) > 0:\n assert(torch.cuda.is_available())\n net.to(gpu_ids[0])\n net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs\n init_weights(net, init_type, init_gain=init_gain)\n return net\n\n\ndef define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):\n \"\"\"Create a generator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n ngf (int) -- the number of filters in the last conv layer\n netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128\n norm (str) -- the name of normalization layers used in the network: batch | instance | none\n use_dropout (bool) -- if use dropout layers.\n init_type (str) -- the name of our initialization method.\n init_gain (float) -- scaling factor for normal, xavier and orthogonal.\n gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2\n\n Returns a generator\n\n Our current implementation provides two types of generators:\n U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)\n The original U-Net paper: https://arxiv.org/abs/1505.04597\n\n Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)\n Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.\n We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).\n\n\n The generator has been initialized by <init_net>. It uses RELU for non-linearity.\n \"\"\"\n net = None\n norm_layer = get_norm_layer(norm_type=norm)\n\n if netG == 'resnet_9blocks':\n net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)\n elif netG == 'resnet_6blocks':\n net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)\n elif netG == 'unet_128':\n net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)\n elif netG == 'unet_256':\n net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)\n else:\n raise NotImplementedError('Generator model name [%s] is not recognized' % netG)\n return init_net(net, init_type, init_gain, gpu_ids)\n\n\ndef define_Gated_G(input_nc, input_nclass, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[], n_content=None):\n \"\"\"Create a generator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n ngf (int) -- the number of filters in the last conv layer\n netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128\n norm (str) -- the name of normalization layers used in the network: batch | instance | none\n use_dropout (bool) -- if use dropout layers.\n init_type (str) -- the name of our initialization method.\n init_gain (float) -- scaling factor for normal, xavier and orthogonal.\n gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2\n\n Returns a generator\n\n Our current implementation provides two types of generators:\n U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)\n The original U-Net paper: https://arxiv.org/abs/1505.04597\n\n Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)\n Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.\n We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).\n\n\n The generator has been initialized by <init_net>. It uses RELU for non-linearity.\n \"\"\"\n net = None\n norm_layer = get_norm_layer(norm_type=norm)\n if netG == 'gated_resnet_6blocks':\n net = GatedResnetGenerator(input_nc, input_nclass, n_content, output_nc, ngf, False)\n elif netG == 'auto_gated_resnet_6blocks':\n net = GatedResnetGenerator(input_nc, input_nclass, n_content, output_nc, ngf, True)\n else:\n raise NotImplementedError('Generator model name [%s] is not recognized' % netG)\n return init_net(net, init_type, init_gain, gpu_ids)\n\n\ndef define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[], input_nclass=None, input_ncontent=None):\n \"\"\"Create a discriminator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the first conv layer\n netD (str) -- the architecture's name: basic | n_layers | pixel\n n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'\n norm (str) -- the type of normalization layers used in the network.\n init_type (str) -- the name of the initialization method.\n init_gain (float) -- scaling factor for normal, xavier and orthogonal.\n gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2\n\n Returns a discriminator\n\n Our current implementation provides three types of discriminators:\n [basic]: 'PatchGAN' classifier described in the original pix2pix paper.\n It can classify whether 70×70 overlapping patches are real or fake.\n Such a patch-level discriminator architecture has fewer parameters\n than a full-image discriminator and can work on arbitrarily-sized images\n in a fully convolutional fashion.\n\n [n_layers]: With this mode, you cna specify the number of conv layers in the discriminator\n with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)\n\n [pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.\n It encourages greater color diversity but has no effect on spatial statistics.\n\n The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.\n \"\"\"\n net = None\n norm_layer = get_norm_layer(norm_type=norm)\n\n if netD == 'basic': # default PatchGAN classifier\n net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, input_nclass=input_nclass, input_ncontent=input_ncontent)\n elif netD == 'n_layers': # more options\n net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, input_nclass=input_nclass, input_ncontent=input_ncontent)\n elif netD == 'pixel': # classify if each pixel is real or fake\n net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)\n else:\n raise NotImplementedError('Discriminator model name [%s] is not recognized' % net)\n return init_net(net, init_type, init_gain, gpu_ids)\n\n\n##############################################################################\n# Classes\n##############################################################################\nclass GANLoss(nn.Module):\n \"\"\"Define different GAN objectives.\n\n The GANLoss class abstracts away the need to create the target label tensor\n that has the same size as the input.\n \"\"\"\n\n def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):\n \"\"\" Initialize the GANLoss class.\n\n Parameters:\n gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.\n target_real_label (bool) - - label for a real image\n target_fake_label (bool) - - label of a fake image\n\n Note: Do not use sigmoid as the last layer of Discriminator.\n LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.\n \"\"\"\n super(GANLoss, self).__init__()\n self.register_buffer('real_label', torch.tensor(target_real_label))\n self.register_buffer('fake_label', torch.tensor(target_fake_label))\n self.gan_mode = gan_mode\n if gan_mode == 'lsgan':\n self.loss = nn.MSELoss()\n elif gan_mode == 'vanilla':\n self.loss = nn.BCEWithLogitsLoss()\n elif gan_mode in ['wgangp']:\n self.loss = None\n else:\n raise NotImplementedError('gan mode %s not implemented' % gan_mode)\n\n def get_target_tensor(self, prediction, target_is_real):\n \"\"\"Create label tensors with the same size as the input.\n\n Parameters:\n prediction (tensor) - - tpyically the prediction from a discriminator\n target_is_real (bool) - - if the ground truth label is for real images or fake images\n\n Returns:\n A label tensor filled with ground truth label, and with the size of the input\n \"\"\"\n\n if target_is_real:\n target_tensor = self.real_label\n else:\n target_tensor = self.fake_label\n return target_tensor.expand_as(prediction)\n\n def __call__(self, prediction, target_is_real):\n \"\"\"Calculate loss given Discriminator's output and grount truth labels.\n\n Parameters:\n prediction (tensor) - - tpyically the prediction output from a discriminator\n target_is_real (bool) - - if the ground truth label is for real images or fake images\n\n Returns:\n the calculated loss.\n \"\"\"\n if self.gan_mode in ['lsgan', 'vanilla']:\n target_tensor = self.get_target_tensor(prediction, target_is_real)\n loss = self.loss(prediction, target_tensor)\n elif self.gan_mode == 'wgangp':\n if target_is_real:\n loss = -prediction.mean()\n else:\n loss = prediction.mean()\n return loss\n\n\ndef cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):\n \"\"\"Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028\n\n Arguments:\n netD (network) -- discriminator network\n real_data (tensor array) -- real images\n fake_data (tensor array) -- generated images from the generator\n device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')\n type (str) -- if we mix real and fake data or not [real | fake | mixed].\n constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2\n lambda_gp (float) -- weight for this loss\n\n Returns the gradient penalty loss\n \"\"\"\n if lambda_gp > 0.0:\n if type == 'real': # either use real images, fake images, or a linear interpolation of two.\n interpolatesv = real_data\n elif type == 'fake':\n interpolatesv = fake_data\n elif type == 'mixed':\n alpha = torch.rand(real_data.shape[0], 1)\n alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)\n alpha = alpha.to(device)\n interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)\n else:\n raise NotImplementedError('{} not implemented'.format(type))\n interpolatesv.requires_grad_(True)\n disc_interpolates = netD(interpolatesv)\n gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,\n grad_outputs=torch.ones(disc_interpolates.size()).to(device),\n create_graph=True, retain_graph=True, only_inputs=True)\n gradients = gradients[0].view(real_data.size(0), -1) # flat the data\n gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps\n return gradient_penalty, gradients\n else:\n return 0.0, None\n\nclass GatedResnetGenerator(nn.Module):\n \"\"\"Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.\n\n We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)\n \"\"\"\n\n def __init__(self, input_nc, input_nclass, n_content, output_nc, ngf=64, use_identity=False, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=5, padding_type='reflect'):\n \"\"\"Construct a Resnet-based generator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n ngf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n use_dropout (bool) -- if use dropout layers\n n_blocks (int) -- the number of ResNet blocks\n padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero\n \"\"\"\n assert(n_blocks >= 0)\n super(GatedResnetGenerator, self).__init__()\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n encoder = [nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),\n norm_layer(ngf),\n nn.ReLU(True)]\n\n self.n_style = input_nclass + (1 if use_identity else 0)\n self.n_content = n_content\n\n n_downsampling = 2\n for i in range(n_downsampling): # add downsampling layers\n mult = 2 ** i\n encoder += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),\n norm_layer(ngf * mult * 2),\n nn.ReLU(True)]\n\n if n_content:\n if use_identity:\n self.n_content += 1\n n_blocks -= 1\n content_transformers = [ResnetBlock(ngf * mult * 2, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)\n for i in range(n_content)]\n if use_identity:\n content_transformers.append(nn.Identity())\n self.content_transformers = nn.ModuleList(content_transformers)\n \n # add transformer\n style_transformers = [ResnetBlock(ngf * mult * 2, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)\n for i in range(input_nclass)]\n if use_identity:\n style_transformers.append(nn.Identity())\n self.transformers = nn.ModuleList(style_transformers)\n\n decoder = []\n mult = 2 ** n_downsampling\n for i in range(n_blocks): # add ResNet blocks\n decoder += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]\n \n for i in range(n_downsampling): # add upsampling layers\n mult = 2 ** (n_downsampling - i)\n decoder += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),\n kernel_size=3, stride=2,\n padding=1, output_padding=1,\n bias=use_bias),\n norm_layer(int(ngf * mult / 2)),\n nn.ReLU(True)]\n decoder += [nn.ReflectionPad2d(3)]\n decoder += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]\n decoder += [nn.Tanh()]\n\n self.encoder = nn.Sequential(*encoder)\n self.decoder = nn.Sequential(*decoder)\n\n def forward(self, input, style_label, auto=False, content_label=None):\n # input (N, C, H, W)\n # style_label (N, class)\n # return value: (N, whatever output shape)\n \"\"\"Standard forward\"\"\"\n encoded = self.encoder(input)\n batch_size, C, H, W = encoded.shape\n transformed = encoded\n if self.n_content:\n transformed = torch.stack([trans(transformed) for trans in self.content_transformers])\n transformed = torch.matmul(content_label.float().unsqueeze(1), transformed.view(self.n_content, batch_size, -1).transpose(0, 1)).squeeze(1).view(-1, C, H, W)\n transformed = torch.stack([trans(transformed) for trans in self.transformers])\n transformed = torch.matmul(style_label.float().unsqueeze(1), transformed.view(self.n_style, batch_size, -1).transpose(0, 1)).squeeze(1).view(-1, C, H, W)\n# if auto:\n# assert torch.equal(encoded, transformed)\n return self.decoder(transformed)\n\nclass ResnetGenerator(nn.Module):\n \"\"\"Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.\n\n We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)\n \"\"\"\n\n def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):\n \"\"\"Construct a Resnet-based generator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n ngf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n use_dropout (bool) -- if use dropout layers\n n_blocks (int) -- the number of ResNet blocks\n padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero\n \"\"\"\n assert(n_blocks >= 0)\n super(ResnetGenerator, self).__init__()\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n model = [nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),\n norm_layer(ngf),\n nn.ReLU(True)]\n\n n_downsampling = 2\n for i in range(n_downsampling): # add downsampling layers\n mult = 2 ** i\n model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),\n norm_layer(ngf * mult * 2),\n nn.ReLU(True)]\n\n mult = 2 ** n_downsampling\n for i in range(n_blocks): # add ResNet blocks\n\n model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]\n\n for i in range(n_downsampling): # add upsampling layers\n mult = 2 ** (n_downsampling - i)\n model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),\n kernel_size=3, stride=2,\n padding=1, output_padding=1,\n bias=use_bias),\n norm_layer(int(ngf * mult / 2)),\n nn.ReLU(True)]\n model += [nn.ReflectionPad2d(3)]\n model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]\n model += [nn.Tanh()]\n\n self.model = nn.Sequential(*model)\n\n def forward(self, input):\n \"\"\"Standard forward\"\"\"\n return self.model(input)\n\n\nclass ResnetBlock(nn.Module):\n \"\"\"Define a Resnet block\"\"\"\n\n def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n \"\"\"Initialize the Resnet block\n\n A resnet block is a conv block with skip connections\n We construct a conv block with build_conv_block function,\n and implement skip connections in <forward> function.\n Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf\n \"\"\"\n super(ResnetBlock, self).__init__()\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)\n\n def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n \"\"\"Construct a convolutional block.\n\n Parameters:\n dim (int) -- the number of channels in the conv layer.\n padding_type (str) -- the name of padding layer: reflect | replicate | zero\n norm_layer -- normalization layer\n use_dropout (bool) -- if use dropout layers.\n use_bias (bool) -- if the conv layer uses bias or not\n\n Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))\n \"\"\"\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]\n\n return nn.Sequential(*conv_block)\n\n def forward(self, x):\n \"\"\"Forward function (with skip connections)\"\"\"\n out = x + self.conv_block(x) # add skip connections\n return out\n\n\nclass UnetGenerator(nn.Module):\n \"\"\"Create a Unet-based generator\"\"\"\n\n def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):\n \"\"\"Construct a Unet generator\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,\n image of size 128x128 will become of size 1x1 # at the bottleneck\n ngf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n\n We construct the U-Net from the innermost layer to the outermost layer.\n It is a recursive process.\n \"\"\"\n super(UnetGenerator, self).__init__()\n # construct unet structure\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer\n for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)\n # gradually reduce the number of filters from ngf * 8 to ngf\n unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer\n\n def forward(self, input):\n \"\"\"Standard forward\"\"\"\n return self.model(input)\n\n\nclass UnetSkipConnectionBlock(nn.Module):\n \"\"\"Defines the Unet submodule with skip connection.\n X -------------------identity----------------------\n |-- downsampling -- |submodule| -- upsampling --|\n \"\"\"\n\n def __init__(self, outer_nc, inner_nc, input_nc=None,\n submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):\n \"\"\"Construct a Unet submodule with skip connections.\n\n Parameters:\n outer_nc (int) -- the number of filters in the outer conv layer\n inner_nc (int) -- the number of filters in the inner conv layer\n input_nc (int) -- the number of channels in input images/features\n submodule (UnetSkipConnectionBlock) -- previously defined submodules\n outermost (bool) -- if this module is the outermost module\n innermost (bool) -- if this module is the innermost module\n norm_layer -- normalization layer\n user_dropout (bool) -- if use dropout layers.\n \"\"\"\n super(UnetSkipConnectionBlock, self).__init__()\n self.outermost = outermost\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n if input_nc is None:\n input_nc = outer_nc\n downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,\n stride=2, padding=1, bias=use_bias)\n downrelu = nn.LeakyReLU(0.2, True)\n downnorm = norm_layer(inner_nc)\n uprelu = nn.ReLU(True)\n upnorm = norm_layer(outer_nc)\n\n if outermost:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1)\n down = [downconv]\n up = [uprelu, upconv, nn.Tanh()]\n model = down + [submodule] + up\n elif innermost:\n upconv = nn.ConvTranspose2d(inner_nc, outer_nc,\n kernel_size=4, stride=2,\n padding=1, bias=use_bias)\n down = [downrelu, downconv]\n up = [uprelu, upconv, upnorm]\n model = down + up\n else:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1, bias=use_bias)\n down = [downrelu, downconv, downnorm]\n up = [uprelu, upconv, upnorm]\n\n if use_dropout:\n model = down + [submodule] + up + [nn.Dropout(0.5)]\n else:\n model = down + [submodule] + up\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n if self.outermost:\n return self.model(x)\n else: # add skip connections\n return torch.cat([x, self.model(x)], 1)\n\n\nclass NLayerDiscriminator(nn.Module):\n \"\"\"Defines a PatchGAN discriminator\"\"\"\n\n def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, input_nclass=None, input_ncontent=None):\n \"\"\"Construct a PatchGAN discriminator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the last conv layer\n n_layers (int) -- the number of conv layers in the discriminator\n norm_layer -- normalization layer\n \"\"\"\n super(NLayerDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func != nn.BatchNorm2d\n else:\n use_bias = norm_layer != nn.BatchNorm2d\n\n kw = 4\n padw = 1\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n self.nclass= input_nclass\n self.ncontent = input_ncontent\n self.model = nn.Sequential(*sequence)\n self.prediction = nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw) # output 1 channel prediction map\n if input_nclass:\n self.classifier = nn.Conv2d(ndf * nf_mult, input_nclass + 1, kernel_size=kw, stride=1, padding=padw) # output n class channel prediction map\n if input_ncontent:\n self.content_classifier = nn.Conv2d(ndf * nf_mult, input_ncontent + 1, kernel_size=kw, stride=1, padding=padw) # output n content class channel prediction map\n\n def forward(self, input):\n \"\"\"Standard forward.\"\"\"\n final_layer = self.model(input)\n if self.nclass and self.ncontent:\n return self.prediction(final_layer), self.classifier(final_layer), self.content_classifier(final_layer)\n elif self.nclass:\n return self.prediction(final_layer), self.classifier(final_layer)\n else:\n return self.prediction(final_layer)\n\n\nclass PixelDiscriminator(nn.Module):\n \"\"\"Defines a 1x1 PatchGAN discriminator (pixelGAN)\"\"\"\n\n def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):\n \"\"\"Construct a 1x1 PatchGAN discriminator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n \"\"\"\n super(PixelDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func != nn.InstanceNorm2d\n else:\n use_bias = norm_layer != nn.InstanceNorm2d\n\n self.net = [\n nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),\n norm_layer(ndf * 2),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]\n\n self.net = nn.Sequential(*self.net)\n\n def forward(self, input):\n \"\"\"Standard forward.\"\"\"\n return self.net(input)\n\n#https://discuss.pytorch.org/t/is-there-anyway-to-do-gaussian-filtering-for-an-image-2d-3d-in-pytorch/12351/10\nclass GaussianSmoothing(nn.Module):\n \"\"\"\n Apply gaussian smoothing on a\n 1d, 2d or 3d tensor. Filtering is performed seperately for each channel\n in the input using a depthwise convolution.\n Arguments:\n channels (int, sequence): Number of channels of the input tensors. Output will\n have this number of channels as well.\n kernel_size (int, sequence): Size of the gaussian kernel.\n sigma (float, sequence): Standard deviation of the gaussian kernel.\n dim (int, optional): The number of dimensions of the data.\n Default value is 2 (spatial).\n \"\"\"\n def __init__(self, channels, kernel_size, sigma, dim=2):\n super(GaussianSmoothing, self).__init__()\n if isinstance(kernel_size, numbers.Number):\n kernel_size = [kernel_size] * dim\n if isinstance(sigma, numbers.Number):\n sigma = [sigma] * dim\n\n # The gaussian kernel is the product of the\n # gaussian function of each dimension.\n kernel = 1\n meshgrids = torch.meshgrid(\n [\n torch.arange(size, dtype=torch.float32)\n for size in kernel_size\n ]\n )\n for size, std, mgrid in zip(kernel_size, sigma, meshgrids):\n mean = (size - 1) / 2\n kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \\\n torch.exp(-((mgrid - mean) / std) ** 2 / 2)\n\n # Make sure sum of values in gaussian kernel equals 1.\n kernel = kernel / torch.sum(kernel)\n\n # Reshape to depthwise convolutional weight\n kernel = kernel.view(1, 1, *kernel.size())\n kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))\n\n self.register_buffer('weight', kernel)\n self.groups = channels\n\n if dim == 1:\n self.conv = F.conv1d\n elif dim == 2:\n self.conv = F.conv2d\n elif dim == 3:\n self.conv = F.conv3d\n else:\n raise RuntimeError(\n 'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim)\n )\n\n def forward(self, input):\n \"\"\"\n Apply gaussian filter to input.\n Arguments:\n input (torch.Tensor): Input to apply gaussian filter on.\n Returns:\n filtered (torch.Tensor): Filtered output.\n \"\"\"\n return self.conv(input, weight=self.weight, groups=self.groups, padding=2)\n"
] | [
[
"torch.optim.lr_scheduler.LambdaLR",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.sum",
"torch.nn.BCEWithLogitsLoss",
"torch.cuda.is_available",
"torch.nn.ReplicationPad2d",
"torch.nn.Dropout",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.tensor",
"torch.rand",
"torch.arange",
"torch.optim.lr_scheduler.StepLR",
"torch.nn.Sequential",
"torch.nn.ConvTranspose2d",
"torch.nn.init.constant_",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.init.xavier_normal_",
"torch.exp",
"torch.nn.DataParallel",
"torch.nn.init.normal_",
"torch.nn.LeakyReLU",
"torch.nn.ReflectionPad2d",
"torch.nn.Tanh",
"torch.nn.Identity",
"torch.nn.init.orthogonal_",
"torch.nn.ReLU",
"torch.nn.MSELoss",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sauloal/cnidaria | [
"fe6f8c8dfed86d39c80f2804a753c05bb2e485b4"
] | [
"scripts/venv/lib/python2.7/site-packages/cogent/phylo/tree_space.py"
] | [
"#!/usr/bin/env python\nfrom __future__ import division\nimport numpy\nimport itertools\nfrom cogent.core.tree import TreeBuilder\nfrom cogent.phylo.tree_collection import ScoredTreeCollection\nfrom cogent.util import parallel, checkpointing, progress_display as UI\n\n__author__ = \"Peter Maxwell\"\n__copyright__ = \"Copyright 2007-2012, The Cogent Project\"\n__credits__ = [\"Peter Maxwell\"]\n__license__ = \"GPL\"\n__version__ = \"1.5.3\"\n__maintainer__ = \"Peter Maxwell\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n\ndef ismallest(data, size):\n \"\"\"There are many ways to get the k smallest items from an N sequence, and\n which one performs best depends on k, N and k/N. This algorithm appears to\n beat anything heapq can do, and stays with a factor of 2 of sort() and\n min(). Is uses memory O(2*k) and so is particularly suitable for lazy \n application to large N. It returns the smallest k sorted too.\"\"\"\n limit = 2 * size\n data = iter(data)\n best = list(itertools.islice(data, limit))\n while True:\n best.sort()\n if len(best) <= size:\n break\n del best[size:]\n worst_of_best = best[-1]\n for item in data:\n if item < worst_of_best:\n best.append(item)\n if len(best) > limit:\n break\n return best\n\n# Trees are represented as \"ancestry\" matricies in which A[i,j] iff j is an\n# ancestor of i. For LS calculations the ancestry matrix is converted\n# to a \"paths\" matrix or \"split metric\" in which S[p,j] iff the path between\n# the pth pair of tips passes through edge j. For ML calculations the\n# ancestry matrix is converted back into an ordinary cogent tree object.\n\ndef tree2ancestry(tree, order=None):\n nodes = tree.unrooted().getEdgeVector()[:-1]\n if order is not None:\n lookup = dict([(k,i) for (i,k) in enumerate(order)])\n def _ordered_tips_first(n):\n if n.Children:\n return len(order)\n else:\n return lookup[n.Name]\n nodes.sort(key=_ordered_tips_first)\n\n n = len(nodes)\n A = numpy.zeros([n, n], int)\n seen = {}\n for (i, node) in enumerate(nodes):\n A[i, i] = 1\n seen[id(node)] = i\n for c in node.Children:\n A[:,i] |= A[:,seen[id(c)]]\n names = [n.Name for n in nodes if not n.Children]\n lengths = [n.Length for n in nodes]\n return (A, names, lengths)\n\ndef ancestry2tree(A, lengths, tip_names):\n \"\"\"Convert edge x edge ancestry matrix to a cogent Tree object\"\"\"\n tips = {}\n tip = 0\n for i in range(len(A)):\n if numpy.sum(A[:,i]) == 1:\n tips[i] = tip_names[tip]\n tip += 1\n assert tip == len(tip_names)\n \n constructor = TreeBuilder().createEdge\n free = {}\n for i in numpy.argsort(numpy.sum(A, axis=0)):\n children = [j for j in range(len(A)) if A[j, i] and j != i]\n child_nodes = [free.pop(j) for j in children if j in free]\n if child_nodes:\n name = None\n else:\n name = tips[i]\n if lengths is None:\n params = {}\n else:\n params = {'length':lengths[i]}\n node = constructor(child_nodes, name, params)\n free[i] = node\n return constructor(free.values(), 'root', {})\n\ndef grown(B, split_edge):\n \"\"\"Ancestry matrix 'B' with one extra leaf added at 'split_edge'.\n Row/column order within the matrix is independent of the topology it \n represents. The added leaf will be the last one in the matrix, which keeps \n the leaf node order the same as the order in which they are added, which is \n what is assumed by ancestry2tree and ancestry2paths\"\"\"\n n = len(B)\n A = numpy.zeros([n+2, n+2], int)\n A[:n, :n] = B\n (sibling, parent) = (n, n + 1)\n A[sibling] = A[parent] = A[split_edge]\n A[:,parent] = A[:,split_edge]\n A[sibling,split_edge] = 0\n A[parent, split_edge] = 0\n A[sibling,sibling] = 1\n A[parent,parent] = 1\n A[sibling,parent] = 1\n A[split_edge,parent] = 1\n return A\n\nclass TreeEvaluator(object):\n \"\"\"Subclass must provide makeTreeScorer and result2output\"\"\"\n \n def results2output(self, results):\n return ScoredTreeCollection(results)\n \n def evaluateTopology(self, tree):\n \"\"\"Optimal (score, tree) for the one topology 'tree'\"\"\"\n (ancestry, names, lengths) = tree2ancestry(tree)\n evaluate = self.makeTreeScorer(names)\n (err, lengths) = evaluate(ancestry)\n return self.result2output(err, ancestry, lengths, names)\n \n def evaluateTree(self, tree):\n \"\"\"score for 'tree' with lengths as-is\"\"\"\n (ancestry, names, lengths) = tree2ancestry(tree)\n evaluate = self.makeTreeScorer(names)\n (err, result) = evaluate(ancestry, lengths=lengths)\n return err\n \n def _consistentNameOrder(self, fixed_names, ordered_names=None):\n \"\"\"fixed_names followed by ordered_names without duplicates\"\"\"\n all_names = set(self.names)\n\n fixed_names_set = set(fixed_names)\n assert fixed_names_set.issubset(all_names)\n\n if ordered_names:\n assert set(ordered_names).issubset(all_names)\n else:\n ordered_names = self.names\n names = list(fixed_names) + [n for n in ordered_names \n if n not in fixed_names_set]\n return names\n \n @UI.display_wrap\n def trex(self, a=8, k=1000, start=None, order=None, return_all=False, \n filename=None, interval=None, ui=None):\n \"\"\"TrexML policy for tree sampling - all trees up to size 'a' and\n then keep no more than 'k' best trees at each tree size.\n 'order' is an optional list of tip names. \n 'start' is an optional list of initial trees. Each of the trees must\n contain the same tips.\n 'filename' and 'interval' control checkpointing.\n \n Advanced step-wise addition algorithm\n M. J. Wolf, S. Easteal, M. Kahn, B. D. McKay, and L. S. Jermiin.\n Trexml: a maximum-likelihood approach for extensive tree-space\n exploration.\n Bioinformatics, 16(4):383 94, 2000.\"\"\"\n \n checkpointer = checkpointing.Checkpointer(filename, interval)\n if checkpointer.available():\n (init_tree_size, fixed_names, trees) = checkpointer.load()\n names = self._consistentNameOrder(fixed_names, order)\n elif start is not None:\n if not isinstance(start, list):\n start = [start]\n fixed_names = start[0].getTipNames()\n names = self._consistentNameOrder(fixed_names, order)\n trees = []\n for tree in start:\n # check the start tree represents a subset of tips\n assert set(tree.getTipNames()) < set(self.names), \\\n \"Starting tree names not a subset of the sequence names\"\n \n (ancestry, fixed_names2, lengths) = tree2ancestry(\n tree, order=fixed_names)\n assert fixed_names2 == fixed_names\n trees.append((None, None, ancestry))\n init_tree_size = len(fixed_names)\n else:\n trees = [(None, None, numpy.identity(3, int))]\n names = self._consistentNameOrder([], order)\n init_tree_size = 3\n \n tree_size = len(names)\n assert tree_size > 3\n if a > tree_size:\n a = tree_size\n if a < 4:\n a = 4\n\n # All trees of size a-1, no need to compare them\n for n in range(init_tree_size+1, a):\n trees2 = []\n for (err2, lengths2, ancestry) in trees:\n for split_edge in range(len(ancestry)):\n ancestry2 = grown(ancestry, split_edge)\n trees2.append((None, None, ancestry2))\n trees = trees2\n init_tree_size = n\n \n # Pre calculate how much work is to be done, for progress display\n tree_count = len(trees)\n total_work = 0\n work_done = [0] * (init_tree_size+1)\n for n in range(init_tree_size+1, tree_size+1):\n evals = tree_count * (n*2-5)\n total_work += evals * n\n tree_count = min(k, evals)\n work_done.append(total_work)\n \n # For each tree size, grow at each edge of each tree. Keep best k.\n for n in range(init_tree_size+1, tree_size+1):\n evaluate = self.makeTreeScorer(names[:n]) \n\n def grown_tree(spec):\n (tree_ordinal, tree, split_edge) = spec\n (old_err, old_lengths, old_ancestry) = tree\n ancestry = grown(old_ancestry, split_edge)\n (err, lengths) = evaluate(ancestry)\n return (err, tree_ordinal, split_edge, lengths, ancestry)\n \n specs = [(i, tree, edge) \n for (i,tree) in enumerate(trees) \n for edge in range(n*2-5)]\n\n candidates = ui.imap(grown_tree, specs, noun=('%s leaf tree' % n),\n start=work_done[n-1]/total_work, end=work_done[n]/total_work)\n \n best = ismallest(candidates, k)\n \n trees = [(err, lengths, ancestry) for (err, parent_ordinal, \n split_edge, lengths, ancestry) in best]\n \n checkpointer.record((n, names[:n], trees))\n \n results = (self.result2output(err, ancestry, lengths, names)\n for (err, lengths, ancestry) in trees)\n if return_all:\n result = self.results2output(results)\n else:\n result = results.next()\n return result\n \n"
] | [
[
"numpy.identity",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jcl5m1/CVToolsPython | [
"02f5911abbc6463c96c12a55acb043230dc2b30c"
] | [
"RandomDotCorrespondence/homography.py"
] | [
"########################################################################\n# Module to compute homographies\t\t\t\t\t\t\t\t\t #\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n# Author : Alexis Mignon\t\t\t\t\t\t\t\t\t\t\t #\n# email : [email protected]\t\t\t\t\t\t\t #\n# date : 10/03/2010\t\t\t\t\t\t\t\t\t\t\t\t #\n########################################################################\n\n\"\"\" \n\tModule to compute homographies between two sets of 2D points\n\t\n\timplemented functions :\n\t - find_homography(points1,points2) : finds the homography between \n\t two sets of 2D points\n\t - find_affine_homography(points1,points2) : finds the affine \n\t homography between two sets of 2D points\n\t - apply_homography(H,points) : applies homography H to the set of \n\t 2D points 'points'\n\n\texample :\n\t>>> from homography import *\n\t>>> \n\t>>> points1 = np.array([[ 0., 0. ],\n\t>>>\t\t\t\t\t [ 1., 0. ],\n\t>>>\t\t\t\t\t [ 0., 1. ],\n\t>>>\t\t\t\t\t [ 1., 1. ]])\n\t>>> \n\t>>> points2 = np.array([[ 0. , 0. ],\n\t>>>\t\t\t\t\t [ 1. , 0. ],\n\t>>>\t\t\t\t\t [ 0.25, 1. ],\n\t>>>\t\t\t\t\t [ 0.75, 1. ]])\n\t>>> \n\t>>> points3 = np.array([[-1., 0.],\n\t>>>\t\t\t\t\t [ 0.,-1.],\n\t>>>\t\t\t\t\t [ 0., 1.],\n\t>>>\t\t\t\t\t [ 1., 0.]])\n\t>>> \n\t>>> H1 = find_homography(points1,points2)\n\t>>> print H1\n\t>>> print apply_homography(H1,points1)\n\t>>> H2 = find_affine_homography(points1,points3)\n\t>>> print H2\n\t>>> print apply_homography(H2,points1)\n\"\"\"\n\n\nimport numpy as np\nfrom scipy.linalg import svd, lstsq\n\ndef find_homography(points1, points2):\n\tif points1.shape[0] != points2.shape[0] : raise ValueError(\"The number of input and output points mismatches\")\n\tif points1.shape[1] == 2 :\n\t\tp1 = np.ones((len(points1),3),'float64')\n\t\tp1[:,:2] = points1\n\telif points1.shape[1] == 3 : p1 = points1\n\telse : raise ValueError(\"Bad shape for input points\")\n\t\n\tif points2.shape[1] == 2 :\n\t\tp2 = np.ones((len(points2),3),'float64')\n\t\tp2[:,:2] = points2\n\telif points2.shape[1] == 3 : p2 = points2\n\telse : raise ValueError(\"Bad shape for output points\")\n\t\n\tnpoints = len(points1)\n\t\n\tA = np.zeros((3*npoints,9),'float64')\n\t\n\tfor i in xrange(npoints):\n\t\tp1i = p1[i]\n\t\tx2i,y2i,w2i = p2[i]\n\t\txpi = x2i*p1i\n\t\typi = y2i*p1i\n\t\twpi = w2i*p1i\n\t\t\n\t\tA[i*3 ,3:6] = -wpi\n\t\tA[i*3 ,6:9] = ypi\n\t\tA[i*3+1,0:3] = wpi\n\t\tA[i*3+1,6:9] = -xpi\n\t\tA[i*3+2,0:3] = -ypi\n\t\tA[i*3+2,3:6] = xpi\n\n\tU,s,Vt = svd(A,full_matrices = False, overwrite_a = True)\n\tdel U,s\n\th = Vt[-1]\n\tH = h.reshape(3,3)\n\treturn H\n\ndef find_affine_homography(points1,points2):\n\tif points1.shape[0] != points2.shape[0] : raise ValueError(\"The number of input and output points mismatches\")\n\tif points1.shape[1] == 2 :\n\t\tp1 = np.ones((len(points1),3),'float64')\n\t\tp1[:,:2] = points1\n\telif points1.shape[1] == 3 : p1 = points1\n\telse : raise ValueError(\"Bad shape for input points\")\n\t\n\tif points2.shape[1] == 2 :\n\t\tp2 = np.ones((len(points2),3),'float64')\n\t\tp2[:,:2] = points2\n\telif points2.shape[1] == 3 : p2 = points2\n\telse : raise ValueError(\"Bad shape for output points\")\n\t\n\tnpoints = len(points1)\n\t\n\tA = np.zeros((3*npoints,6),'float64')\n\tb = np.zeros((3*npoints,1),'float64')\n\tfor i in xrange(npoints):\n\t\tp1i = p1[i]\n\t\tx2i,y2i,w2i = p2[i]\n\t\txpi = x2i*p1i\n\t\typi = y2i*p1i\n\t\twpi = w2i*p1i\n\t\t\n\t\tA[i*3 ,3:6] = -wpi\n\t\tA[i*3+1,0:3] = wpi\n\t\tA[i*3+2,0:3] = -ypi\n\t\tA[i*3+2,3:6] = xpi\n\t\t\n\t\tb[i*3 ] = -y2i*p1i[2]\n\t\tb[i*3+1] = x2i*p1i[2]\n\n\th = lstsq(A,b,overwrite_a = True, overwrite_b = True)[0]\n\tH = np.zeros( (3,3) , 'float64' )\n\tH[:2,:] = h.reshape(2,3)\n\tH[2,2] = 1\n\treturn H\n\ndef apply_homography(H, points):\n\tp = np.ones((len(points), 3), 'float64')\n\tp[:, :2] = points\n\tpp = np.dot(p, H.T)\n\tpp[:, :2]/=pp[:, 2].reshape(len(p), 1)\n\treturn pp[:, :2]\n"
] | [
[
"numpy.dot",
"scipy.linalg.svd",
"numpy.zeros",
"scipy.linalg.lstsq"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
}
] |
effigies/mriqc | [
"de60ff0f65e4fe0e315143fe3b75ecd940beb2b1"
] | [
"mriqc/bin/abide2bids.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author: oesteban\n# @Date: 2016-03-16 11:28:27\n# @Last Modified by: oesteban\n# @Last Modified time: 2016-11-15 09:30:00\n\n\"\"\"\nABIDE2BIDS downloader tool\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport os.path as op\nimport errno\nimport shutil\nimport json\nimport subprocess as sp\nimport tempfile\nfrom xml.etree import ElementTree as et\nfrom multiprocessing import Pool\nfrom argparse import ArgumentParser, RawTextHelpFormatter\nimport numpy as np\n\ndef main():\n \"\"\"Entry point\"\"\"\n parser = ArgumentParser(description='ABIDE2BIDS downloader',\n formatter_class=RawTextHelpFormatter)\n g_input = parser.add_argument_group('Inputs')\n g_input.add_argument('-i', '--input-abide-catalog', action='store',\n required=True)\n g_input.add_argument('-n', '--dataset-name', action='store',\n default='ABIDE Dataset')\n g_input.add_argument('-u', '--nitrc-user', action='store',\n default=os.getenv('NITRC_USER'))\n g_input.add_argument('-p', '--nitrc-password', action='store',\n default=os.getenv('NITRC_PASSWORD'))\n\n\n g_outputs = parser.add_argument_group('Outputs')\n g_outputs.add_argument('-o', '--output-dir', action='store',\n default='ABIDE-BIDS')\n\n opts = parser.parse_args()\n\n if opts.nitrc_user is None or opts.nitrc_password is None:\n raise RuntimeError('NITRC user and password are required')\n\n dataset_desc = {'BIDSVersion': '1.0.0rc3',\n 'License': 'CC Attribution-NonCommercial-ShareAlike 3.0 Unported',\n 'Name': opts.dataset_name}\n\n out_dir = op.abspath(opts.output_dir)\n try:\n os.makedirs(out_dir)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise exc\n\n with open(op.join(out_dir, 'dataset_description.json'), 'w') as dfile:\n json.dump(dataset_desc, dfile)\n\n catalog = et.parse(opts.input_abide_catalog).getroot()\n urls = [el.get('URI') for el in catalog.iter() if el.get('URI') is not None]\n\n pool = Pool()\n args_list = [(url, opts.nitrc_user, opts.nitrc_password, out_dir)\n for url in urls]\n res = pool.map(fetch, args_list)\n\n tsv_data = np.array([('subject_id', 'site_name')] + res)\n np.savetxt(op.join(out_dir, 'participants.tsv'), tsv_data, fmt='%s', delimiter='\\t')\n\n\ndef fetch(args):\n \"\"\" Downloads a subject and formats it into BIDS \"\"\"\n out_dir = None\n if len(args) == 3:\n url, user, password = args\n else:\n url, user, password, out_dir = args\n\n tmpdir = tempfile.mkdtemp()\n if out_dir is None:\n out_dir = os.getcwd()\n else:\n out_dir = op.abspath(out_dir)\n\n pkg_id = [u[9:] for u in url.split('/') if u.startswith('NITRC_IR_')][0]\n sub_file = op.join(tmpdir, '%s.zip' % pkg_id)\n\n cmd = ['curl', '-s', '-u', '%s:%s' % (user, password), '-o', sub_file, url]\n sp.check_call(cmd)\n sp.check_call(['unzip', '-qq', '-d', tmpdir, '-u', sub_file])\n\n abide_root = op.join(tmpdir, 'ABIDE')\n files = []\n for root, path, fname in os.walk(abide_root):\n if fname and (fname[0].endswith('nii') or fname[0].endswith('nii.gz')):\n if path:\n root = op.join(root, path[0])\n files.append(op.join(root, fname[0]))\n\n site_name, sub_str = files[0][len(abide_root) + 1:].split('/')[0].split('_')\n subject_id = 'sub-' + sub_str\n\n for i in files:\n ext = '.nii.gz'\n if i.endswith('.nii'):\n ext = '.nii'\n if 'mprage' in i:\n bids_dir = op.join(out_dir, subject_id, 'anat')\n try:\n os.makedirs(bids_dir)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise exc\n shutil.copy(i, op.join(bids_dir, subject_id + '_T1w' + ext))\n\n if 'rest' in i:\n bids_dir = op.join(out_dir, subject_id, 'func')\n try:\n os.makedirs(bids_dir)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise exc\n shutil.copy(i, op.join(bids_dir, subject_id + '_rest_bold' + ext))\n\n shutil.rmtree(tmpdir, ignore_errors=True, onerror=_myerror)\n\n print('Successfully processed subject %s from site %s' % (subject_id[4:], site_name))\n return subject_id[4:], site_name\n\ndef _myerror(msg):\n print('WARNING: Error deleting temporal files: %s' % msg)\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DT6A/UTGN | [
"ad8eda3a47d9f0f3b278ff86a0a6ede3a3391846"
] | [
"UTGN/model/main.py"
] | [
"\"\"\"Main script for executing training / testing.\n\"\"\"\n\nimport os\nimport sys\nimport signal\nimport time\nimport argparse\nimport fileinput\nfrom copy import deepcopy\nfrom glob import glob\nfrom shutil import rmtree\nfrom pprint import pprint\nimport numpy as np\nimport tensorflow as tf\nfrom utils import *\nfrom model import RGNModel\nfrom config import RGNConfig, RunConfig\n\n# for mac, to avoid OMP: Error #15\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'True'\n\n# directory names\nRUNS_DIRNAME = 'runs'\nDATAS_DIRNAME = 'data'\nCHECKPOINTS_DIRNAME = 'checkpoints'\nLOGS_DIRNAME = 'logs'\nALPHABETS_DIRNAME = 'alphabets'\nFULL_TRAINING_DIRNAME = 'training'\nSAMPLE_VALIDATION_DIRNAME = 'validation'\nFULL_TESTING_DIRNAME = 'testing'\nTRAINING_OUTPUTS_DIRNAME = 'outputsTraining'\nVALIDATION_OUTPUTS_DIRNAME = 'outputsValidation'\nTESTING_OUTPUTS_DIRNAME = 'outputsTesting'\n\n\n# exception classes\nclass MilestoneError(RuntimeError):\n \"\"\" Exception raised for missing milestone \"\"\"\n pass\n\n\nclass DeadGradientError(RuntimeError):\n \"\"\" Exception raised for zero gradient \"\"\"\n pass\n\n\n# logging functions\ndef evaluate_and_log(log_file, configs, models, session):\n \"\"\"Evaluate model and log.\n\n \"\"\"\n # evaluation of weighted losses\n wt_train_loss_dict = models['eval_wt_train'].evaluate(session) \\\n if configs['run'].evaluation['include_weighted_training'] \\\n else {}\n wt_val_loss_dict = models['eval_wt_val'].evaluate(session) \\\n if configs['run'].evaluation['include_weighted_validation'] \\\n else {}\n wt_test_loss_dict = models['eval_wt_test'].evaluate(session) \\\n if configs['run'].evaluation['include_weighted_testing'] \\\n else {}\n\n # diagnostics\n if configs['run'].evaluation['include_diagnostics']: \n diagnostics = models['training'].diagnose(session)\n else:\n diagnostics = {k: float('nan') for k in (\n 'min_weight',\n 'max_weight',\n 'min_grad',\n 'max_grad',\n 'curriculum_step',\n 'curriculum_quantiles')}\n\n # Retrieve the correct loss.\n for loss_key in ['tertiary_loss_all']:\n if loss_key in wt_train_loss_dict:\n wt_train_loss = wt_train_loss_dict[loss_key]\n break\n else:\n wt_train_loss = float('nan')\n\n if configs['run'].evaluation['include_weighted_validation']:\n wt_val_loss = {}\n for loss_type in ['tertiary_loss', 'min_tertiary_loss_achieved']:\n for subgroup in ['all'] + configs['eval_wt_val'].io['evaluation_sub_groups']:\n loss_key = loss_type + '_' + subgroup\n wt_val_loss.update({loss_key: wt_val_loss_dict.get(loss_key, float('nan'))})\n wt_val_loss_subgroups_string = ''.join(\n ['\\tValidation_' + grp + ': {tertiary_loss_' + grp + ':.3f}' \\\n for grp in configs['eval_wt_val'].io['evaluation_sub_groups']])\n else:\n wt_val_loss = {'tertiary_loss_all': float('nan')}\n wt_val_loss_subgroups_string = ''\n\n for loss_key in ['tertiary_loss_all']:\n if loss_key in wt_test_loss_dict:\n wt_test_loss = wt_test_loss_dict[loss_key]\n break\n else:\n wt_test_loss = float('nan')\n\n # Log string\n global_step = models['training'].current_step(session)\n base_log = ('Iteration: {0}\\tTrain: {1:.3f}\\t' +\n 'Validation: {2:.3f}\\tTest: {3:.3f}\\t' +\n 'Weight: {min_weight:.4e} {max_weight:.4e}\\t' +\n 'Update: {min_grad:.4e} {max_grad:.4e}' +\n wt_val_loss_subgroups_string\n ).format(\n global_step,\n wt_train_loss,\n wt_val_loss['tertiary_loss_all'],\n wt_test_loss,\n **merge_dicts(diagnostics, wt_val_loss))\n\n # Additional diagnostics and losses if there's a curriculum.\n if configs['training'].curriculum['mode'] is not None:\n # evaluation of unweighted losses\n unwt_train_loss_dict = models['eval_unwt_train'].evaluate(session)\\\n if configs['run'].evaluation['include_unweighted_training']\\\n else {}\n unwt_val_loss_dict = models['eval_unwt_val'].evaluate(session)\\\n if configs['run'].evaluation['include_unweighted_validation']\\\n else {}\n unwt_test_loss_dict = models['eval_unwt_test'].evaluate(session)\\\n if configs['run'].evaluation['include_unweighted_testing']\\\n else {}\n\n # Retrieve the correct loss.\n for loss_key in ['tertiary_loss_all']:\n if loss_key in unwt_train_loss_dict:\n unwt_train_loss = unwt_train_loss_dict[loss_key]\n break\n else:\n unwt_train_loss = float('nan')\n\n if configs['run'].evaluation['include_unweighted_validation']:\n unwt_val_loss = {}\n for loss_type in ['tertiary_loss', 'min_tertiary_loss_achieved']:\n for subgroup in ['all'] + configs['eval_unwt_val'].io['evaluation_sub_groups']:\n loss_key = loss_type + '_' + subgroup\n unwt_val_loss.update({loss_key: unwt_val_loss_dict.get(loss_key, float('nan'))})\n unwt_val_loss_subgroups_string = ''.join(\n ['\\tUnweighted Validation_' + grp + ': {tertiary_loss_' + grp + ':.3f}'\\\n for grp in configs['eval_unwt_val'].io['evaluation_sub_groups']])\n else:\n unwt_val_loss = {'tertiary_loss_all': float('nan')}\n unwt_val_loss_subgroups_string = ''\n\n for loss_key in ['tertiary_loss_all']:\n if loss_key in unwt_test_loss_dict:\n unwt_test_loss = unwt_test_loss_dict[loss_key]\n break\n else:\n unwt_test_loss = float('nan')\n\n # Log string\n extended_log = ('\\tCurriculum Iteration: {curriculum_step:.3f}\\t' + \\\n 'Unweighted Train: {0:.3f}\\t' + \\\n 'Unweighted Validation: {1:.3f}\\t' + \\\n 'Unweighted Test: {2:.3f}\\t' + \\\n 'Curriculum Quantile: {curriculum_quantiles}' + \\\n unwt_val_loss_subgroups_string\n ).format(unwt_train_loss, unwt_val_loss['tertiary_loss_all'], unwt_test_loss, **merge_dicts(diagnostics, unwt_val_loss))\n else:\n extended_log = ''\n\n # Log to disk\n with open(log_file, 'a') as f: \n f.write(base_log + extended_log + '\\n')\n\n if 'alphabet' in diagnostics: \n with open(log_file + '.alphabet', 'a') as f:\n np.savetxt(f, diagnostics['alphabet'], footer='\\n')\n\n # prep return 'package'\n diagnostics.update({ 'wt_train_loss': wt_train_loss, 'wt_val_loss': wt_val_loss, 'wt_test_loss': wt_test_loss})\n if configs['training'].curriculum['mode'] is not None:\n diagnostics.update({'unwt_train_loss': unwt_train_loss, 'unwt_val_loss': unwt_val_loss, 'unwt_test_loss': unwt_test_loss})\n\n return diagnostics\n\ndef predict_and_log(log_dir, configs, models, session):\n \"\"\"Predict 3D structure and log.\n\n Args:\n log_dir: directory to log\n configs: config dict\n models: tf models\n session: tf session\n\n Returns:\n None\n \"\"\"\n # assumes that the validation reference designation \n # (wt vs. unwt) can be used for the training and test sets as well\n val_ref_set_prefix = 'un' if configs['run'].optimization['validation_reference'] == 'unweighted' else ''\n\n for label, model in models.items():\n if 'eval' in label:\n generate = True\n\n for case in switch(label):\n if case('eval_' + val_ref_set_prefix + 'wt_train'):\n outputs_dir = os.path.join(log_dir, TRAINING_OUTPUTS_DIRNAME)\n elif case('eval_' + val_ref_set_prefix + 'wt_val'):\n outputs_dir = os.path.join(log_dir, VALIDATION_OUTPUTS_DIRNAME)\n elif case('eval_' + val_ref_set_prefix + 'wt_test'):\n outputs_dir = os.path.join(log_dir, TESTING_OUTPUTS_DIRNAME)\n else:\n generate = False\n\n if generate:\n if not os.path.exists(outputs_dir): \n os.makedirs(outputs_dir)\n\n for _ in range(configs[label].queueing['num_evaluation_invocations']):\n dicts = model.predict(session)\n for idx, dict_ in dicts.items():\n if 'tertiary' in dict_:\n np.savetxt(\n os.path.join(\n outputs_dir,\n idx.decode(\"utf-8\") + '.tertiary'), \n dict_['tertiary'], header='\\n')\n if 'recurrent_states' in dict_ and idx is not None:\n print(idx, type(idx))\n np.savetxt(\n os.path.join(outputs_dir, idx.decode(\"utf-8\") + '.recurrent_states'), \n dict_['recurrent_states'])\n\ndef run_model(args):\n \"\"\"Either train a model or use it to predict.\n \n Restart if training failed. \n\n Args:\n args: Parsed arguments from command line.\n\n Returns:\n whether to restart or not.\n \"\"\"\n\n # create config and model collection objects, and retrieve the run config\n configs = {}\n configs.update({'run': RunConfig(args.config_file)})\n\n # set GPU-related environmental options and config settings\n # set it on the terminal instead\n # TODO: add this back in\n os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) if args.gpu is not None else ''\n # os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\n # files and directories\n base_dir = args.base_directory\n run_dir = os.path.join(\n base_dir, \n RUNS_DIRNAME,\n configs['run'].names['run'], \n configs['run'].names['dataset'])\n data_dir = os.path.join(\n base_dir, \n DATAS_DIRNAME,\n configs['run'].names['dataset'])\n checkpoints_dir = os.path.join(\n run_dir,\n CHECKPOINTS_DIRNAME)\n logs_dir = os.path.join(\n run_dir,\n LOGS_DIRNAME)\n stdout_err_file = os.path.join(\n base_dir, \n LOGS_DIRNAME,\n configs['run'].names['run'] + '.log')\n alphabet_file = os.path.join(\n data_dir, \n ALPHABETS_DIRNAME, \n configs['run'].names['alphabet'] + '.csv') \\\n if configs['run'].names['alphabet'] is not None else None\n\n # this is all for evaluation models \n # (including training, so training_batch_size is for evaluation)\n full_training_glob = os.path.join(\n data_dir, \n FULL_TRAINING_DIRNAME, \n configs['run'].io['full_training_glob'])\n sample_training_glob = os.path.join(\n data_dir, \n FULL_TRAINING_DIRNAME,\n configs['run'].io['sample_training_glob'])\n training_batch_size = configs['run'].evaluation['num_training_samples']\n training_invocations = configs['run'].evaluation['num_training_invocations']\n\n validation_glob = os.path.join(\n data_dir, \n SAMPLE_VALIDATION_DIRNAME, \n configs['run'].io['sample_validation_glob'])\n validation_batch_size = configs['run'].evaluation['num_validation_samples']\n validation_invocations = configs['run'].evaluation['num_validation_invocations']\n\n testing_glob = os.path.join(\n data_dir, \n FULL_TESTING_DIRNAME,\n configs['run'].io['full_testing_glob'])\n testing_batch_size = configs['run'].evaluation['num_testing_samples']\n testing_invocations = configs['run'].evaluation['num_testing_invocations']\n\n if args.prediction_only:\n eval_num_epochs = 1\n training_batch_size = validation_batch_size = testing_batch_size = 1\n training_invocations = validation_invocations = testing_invocations = 1\n else:\n eval_num_epochs = None\n \n # redirect stdout/err to file\n sys.stderr.flush()\n if not os.path.exists(os.path.dirname(stdout_err_file)): \n os.makedirs(os.path.dirname(stdout_err_file))\n stdout_err_file_handle = open(stdout_err_file, 'w')\n os.dup2(stdout_err_file_handle.fileno(), sys.stderr.fileno())\n sys.stdout = stdout_err_file_handle\n\n # select device placement taking into consideration the \n # interaction between training and evaluation models\n # fod: functions on device\n # dd: default device\n if configs['run'].computing['training_device'] == 'GPU' \\\n and configs['run'].computing['evaluation_device'] == 'GPU':\n fod_training = {'/cpu:0': ['point_to_coordinate']}\n fod_evaluation = {'/cpu:0': ['point_to_coordinate']}\n dd_training = ''\n dd_evaluation = ''\n elif configs['run'].computing['training_device'] == 'GPU' \\\n and configs['run'].computing['evaluation_device'] == 'CPU':\n fod_training = {'/cpu:0': ['point_to_coordinate', 'loss_history']}\n fod_evaluation = {}\n dd_training = ''\n dd_evaluation = '/cpu:0'\n else:\n fod_training = {}\n fod_evaluation = {}\n dd_training = '/cpu:0'\n dd_evaluation = '/cpu:0'\n\n # create models configuration templates\n configs.update(\n {'training': RGNConfig(\n args.config_file, \n {\n 'name': 'training',\n 'dataFilesGlob': full_training_glob,\n 'checkpointsDirectory': checkpoints_dir,\n 'logsDirectory': logs_dir,\n 'fileQueueCapacity': configs['run'].queueing['training_file_queue_capacity'],\n 'batchQueueCapacity': configs['run'].queueing['training_batch_queue_capacity'],\n 'minAfterDequeue': configs['run'].queueing['training_min_after_dequeue'],\n 'shuffle': configs['run'].queueing['training_shuffle'],\n 'tertiaryNormalization': configs['run'].loss['training_tertiary_normalization'],\n 'batchDependentNormalization': configs['run'].loss['training_batch_dependent_normalization'],\n 'alphabetFile': alphabet_file,\n 'functionsOnDevices': fod_training,\n 'defaultDevice': dd_training})})\n\n configs.update(\n {'evaluation': RGNConfig(\n args.config_file,\n {\n 'fileQueueCapacity': configs['run'].queueing['evaluation_file_queue_capacity'],\n 'batchQueueCapacity': configs['run'].queueing['evaluation_batch_queue_capacity'],\n 'minAfterDequeue': configs['run'].queueing['evaluation_min_after_dequeue'],\n 'shuffle': configs['run'].queueing['evaluation_shuffle'],\n 'tertiaryNormalization': configs['run'].loss['evaluation_tertiary_normalization'],\n 'batchDependentNormalization': configs['run'].loss['evaluation_batch_dependent_normalization'],\n 'alphabetFile': alphabet_file,\n 'functionsOnDevices': fod_evaluation,\n 'defaultDevice': dd_evaluation,\n 'numEpochs': eval_num_epochs,\n 'bucketBoundaries': None})})\n\n # Override included evaluation models with list from command-line if specified \n # (assumes none are included and then includes ones that are specified)\n if args.evaluation_model:\n for prefix in ['', 'un']:\n for group in ['training', 'validation', 'testing']:\n configs['run'].evaluation.update(\n {'include_' + prefix + 'weighted_' + group: False})\n for entry in args.evaluation_model:\n configs['run'].evaluation.update({'include_' + entry: True})\n\n # If predicting, turn off evaluation loss.\n # If not, ensure that correct validation reference is chosen.\n if args.prediction_only:\n configs['evaluation'].loss['include'] = False\n else:\n if ((not configs['run'].evaluation['include_weighted_validation']) \\\n and configs['run'].optimization['validation_reference'] == 'weighted') \\\n or ((not configs['run'].evaluation['include_unweighted_validation']) \\\n and configs['run'].optimization['validation_reference'] == 'unweighted'):\n raise RuntimeError('Chosen validation reference is not included in run.')\n \n\n # ??? but result is not >=1 \n # rescaling needed to adjust for how frequently loss_history is updated\n if configs['training'].curriculum['behavior'] == 'loss_change': \n configs['training'].curriculum['change_num_iterations'] \\\n //= configs['run'].io['evaluation_frequency'] # result must be >=1\n configs['evaluation'].curriculum['change_num_iterations'] \\\n //= configs['run'].io['evaluation_frequency'] # ditto\n\n # create training model\n models = {}\n models.update({'training': RGNModel('training', configs['training'])})\n print('*** training configuration ***')\n pprint(configs['training'].__dict__)\n\n # create weighted training evaluation model (conditional)\n if configs['run'].evaluation['include_weighted_training']:\n configs.update({'eval_wt_train': deepcopy(configs['evaluation'])})\n configs['eval_wt_train'].io['name'] = 'evaluation_wt_training'\n configs['eval_wt_train'].io['data_files_glob'] = sample_training_glob\n configs['eval_wt_train'].optimization['batch_size'] = training_batch_size\n configs['eval_wt_train'].queueing['num_evaluation_invocations'] = training_invocations\n models.update({'eval_wt_train': RGNModel('evaluation', configs['eval_wt_train'])})\n print('\\n\\n\\n*** weighted training evaluation configuration ***')\n pprint(configs['eval_wt_train'].__dict__)\n\n # create weighted validation evaluation model (conditional)\n if configs['run'].evaluation['include_weighted_validation']:\n configs.update({'eval_wt_val': deepcopy(configs['evaluation'])})\n configs['eval_wt_val'].io['name'] = 'evaluation_wt_validation'\n configs['eval_wt_val'].io['data_files_glob'] = validation_glob\n configs['eval_wt_val'].optimization['batch_size'] = validation_batch_size\n configs['eval_wt_val'].queueing['num_evaluation_invocations'] = validation_invocations\n if configs['run'].optimization['validation_reference'] == 'weighted': \n configs['eval_wt_val'].curriculum['update_loss_history'] = True\n models.update({'eval_wt_val': RGNModel('evaluation', configs['eval_wt_val'])})\n print('\\n\\n\\n*** weighted validation evaluation configuration ***')\n pprint(configs['eval_wt_val'].__dict__)\n\n # create weighted testing evaluation model (conditional)\n if configs['run'].evaluation['include_weighted_testing']:\n configs.update({'eval_wt_test': deepcopy(configs['evaluation'])})\n configs['eval_wt_test'].io['name'] = 'evaluation_wt_testing'\n configs['eval_wt_test'].io['data_files_glob'] = testing_glob\n configs['eval_wt_test'].optimization['batch_size'] = testing_batch_size\n configs['eval_wt_test'].queueing['num_evaluation_invocations'] = testing_invocations\n models.update({'eval_wt_test': RGNModel('evaluation', configs['eval_wt_test'])})\n print('\\n\\n\\n*** weighted testing evaluation configuration ***')\n pprint(configs['eval_wt_test'].__dict__)\n\n # create equivalents for unweighted loss if there's a curriculum.\n if configs['training'].curriculum['mode'] is not None:\n # create unweighted training evaluation model (conditional)\n if configs['run'].evaluation['include_unweighted_training']:\n configs.update({'eval_unwt_train': deepcopy(configs['evaluation'])})\n configs['eval_unwt_train'].io['name'] = 'evaluation_unwt_training'\n configs['eval_unwt_train'].io['data_files_glob'] = sample_training_glob\n configs['eval_unwt_train'].optimization['batch_size'] = training_batch_size\n configs['eval_unwt_train'].queueing['num_evaluation_invocations'] = training_invocations\n configs['eval_unwt_train'].curriculum['mode'] = None\n configs['eval_unwt_train'].curriculum['behavior'] = None\n models.update({'eval_unwt_train': RGNModel('evaluation', configs['eval_unwt_train'])})\n \n # create unweighted validation evaluation model (conditional)\n if configs['run'].evaluation['include_unweighted_validation']:\n configs.update({'eval_unwt_val': deepcopy(configs['evaluation'])})\n configs['eval_unwt_val'].io['name'] = 'evaluation_unwt_validation'\n configs['eval_unwt_val'].io['data_files_glob'] = validation_glob\n configs['eval_unwt_val'].optimization['batch_size'] = validation_batch_size\n configs['eval_unwt_val'].queueing['num_evaluation_invocations'] = validation_invocations\n configs['eval_unwt_val'].curriculum['mode'] = None\n configs['eval_unwt_val'].curriculum['behavior'] = None\n if configs['run'].optimization['validation_reference'] == 'unweighted': \n configs['eval_unwt_val'].curriculum['update_loss_history'] = True\n models.update({'eval_unwt_val': RGNModel('evaluation', configs['eval_unwt_val'])})\n\n # create unweighted testing evaluation model (conditional)\n if configs['run'].evaluation['include_unweighted_testing']:\n configs.update({'eval_unwt_test': deepcopy(configs['evaluation'])})\n configs['eval_unwt_test'].io['name'] = 'evaluation_unwt_testing'\n configs['eval_unwt_test'].io['data_files_glob'] = testing_glob\n configs['eval_unwt_test'].optimization['batch_size'] = testing_batch_size\n configs['eval_unwt_test'].queueing['num_evaluation_invocations'] = testing_invocations\n configs['eval_unwt_test'].curriculum['mode'] = None\n configs['eval_unwt_test'].curriculum['behavior'] = None\n models.update({'eval_unwt_test': RGNModel('evaluation', configs['eval_unwt_test'])})\n\n # start head model and related prep\n stdout_err_file_handle.flush()\n session = models['training'].start(list(models.values()))\n global_step = models['training'].current_step(session)\n current_log_step = (global_step // configs['run'].io['prediction_frequency']) + 1\n log_dir = os.path.join(run_dir, str(current_log_step))\n restart = False\n\n trainable_params = count_trainable_params()\n print(\"\\n\\n\\n*** Trainable Parameters: {} ***\".format(trainable_params))\n # from sys import exit\n # exit()\n\n # predict or train depending on set mode behavior\n if args.prediction_only:\n try:\n while not models['training'].is_done():\n predict_and_log(log_dir, configs, models, session)\n except tf.errors.OutOfRangeError:\n pass\n except:\n print(('Unexpected error: ', sys.exc_info()[0]))\n raise\n finally:\n if models['training']._is_started: \n models['training'].finish(session, save=False)\n stdout_err_file_handle.close()\n else:\n # clean up post last checkpoint residue if any\n if global_step != 0:\n # remove future directories\n #last_log_step = sorted(\n # [int(os.path.basename(os.path.normpath(dir))) \\\n # for dir in glob(os.path.join(run_dir, '*[0-9]'))])[-1]\n #for step in range(current_log_step + 1, last_log_step + 1): \n # rmtree(os.path.join(run_dir, str(step))) \n\n # remove future log entries in current log files\n log_file = os.path.join(log_dir, 'error.log')\n if os.path.exists(log_file):\n # with open(log_file, 'rw+') as f:\n with open(log_file, 'w+') as f:\n while True:\n new_line = f.readline().split()\n if len(new_line) > 1:\n step = int(new_line[1])\n if step == global_step:\n f.truncate()\n break\n # reached end without seeing global_step, \n # means checkpoint is ahead of last recorded log entry\n else:\n break\n\n # training loop\n try:\n while not models['training'].is_done():\n # Train for one step\n global_step, ids = models['training'].train(session)\n\n # Set and create logging directory and files if needed\n log_dir = os.path.join(\n run_dir, \n str((global_step // configs['run'].io['prediction_frequency']) + 1))\n log_file = os.path.join(log_dir, 'error.log')\n if not os.path.exists(log_dir): \n os.makedirs(log_dir)\n\n # Evaluate error, get diagnostics, and raise exceptions if necessary\n if global_step % configs['run'].io['evaluation_frequency'] == 0:\n diagnostics = evaluate_and_log(\n log_file, \n configs, \n models, \n session)\n\n # restart if a milestone is missed\n val_ref_set_prefix = 'un' if configs['run'].optimization['validation_reference'] == 'unweighted' else ''\n min_loss_achieved = diagnostics[val_ref_set_prefix + 'wt_val_loss']['min_tertiary_loss_achieved_all']\n for step, loss in configs['run'].optimization['validation_milestone'].items():\n if global_step >= step and min_loss_achieved > loss:\n raise MilestoneError(\n 'Milestone at step ' \\\n + str(global_step) \\\n + ' missed because minimum loss achieved so far is ' \\\n + str(min_loss_achieved))\n\n # restart if gradients are zero\n if (diagnostics['min_grad'] == 0 \\\n and diagnostics['max_grad'] == 0) \\\n or (configs['run'].evaluation['include_diagnostics'] \\\n and (np.isnan(diagnostics['min_grad']) \\\n or np.isnan(diagnostics['max_grad']))):\n raise DeadGradientError('Gradient is dead.')\n\n # Predict structures. Currently assumes that weighted training \n # and validation models are available, and fails if they're not.\n if global_step % configs['run'].io['prediction_frequency'] == 0:\n predict_and_log(log_dir, configs, models, session)\n\n # Checkpoint\n if global_step % configs['run'].io['checkpoint_frequency'] == 0:\n models['training'].save(session)\n\n except tf.errors.OutOfRangeError:\n print('Epoch limit reached.')\n\n # except (tf.errors.InvalidArgumentError, DeadGradientError): # InvalidArgumentError is usually triggered by a nan\n # TODO: turn the previous exception back on\n except (DeadGradientError): # InvalidArgumentError is usually triggered by a nan\n\n models['training'].finish(session, save=False)\n\n if args.restart_on_dead_gradient:\n print('Nan or dead gradient encountered;')\n print('model will be resumed from last checkpoint if one exists, or restarted from scratch otherwise.') \n if not os.path.isdir(checkpoints_dir):\n for sub_dir in next(os.walk(run_dir))[1]: \n rmtree(os.path.join(run_dir, sub_dir)) # erase all old directories \n restart = True\n else:\n print('Nan or dead gradient encountered; model will be terminated.') \n\n except MilestoneError:\n models['training'].finish(session, save=False)\n\n if args.restart_on_missed_milestone:\n print('Milestone missed; model will be restarted from scratch with an incremented seed.')\n \n for sub_dir in next(os.walk(run_dir))[1]: \n rmtree(os.path.join(run_dir, sub_dir)) # erase all old directories\n\n # modify configuration file with new seed\n old_seed = configs['training'].initialization['graph_seed']\n new_seed = old_seed + args.seed_increment\n for line in fileinput.input(args.config_file, inplace=True):\n print(line.replace('randSeed ' + str(old_seed), 'randSeed ' + str(new_seed)), end=' ')\n \n restart = True\n else:\n print('Milestone missed; model will be terminated.')\n \n except:\n print(('Unexpected error: ', sys.exc_info()[0]))\n raise\n\n finally:\n # Wrap up (ask threads to stop, save final checkpoint, etc.)\n if models['training']._is_started: \n models['training'].finish(session, save=args.checkpoint_on_finish)\n stdout_err_file_handle.close()\n \n return restart\n\nif __name__ == '__main__':\n # parse command-line arguments\n parser = argparse.ArgumentParser(description=\"Run RGN model.\")\n parser.add_argument(\n '-d', '--base_directory',\n default='.',\n help='Parent directory containing runs, data, checkpoints, and logs')\n parser.add_argument(\n '-p', '--prediction_only',\n action='store_true', \n help='If set, make a prediction on a single batch.')\n parser.add_argument(\n '-e', '--evaluation_model',\n action='append', \n help='Evaluation model to include (more than one is allowed). ' \\\n + 'Must be of the form [un]weighted_[training,validation,testing]. (???)')\n parser.add_argument(\n '-r', '--restart_on_dead_gradient',\n action='store_true', \n help='If model encounters zero gradients or NAN, ' \\\n + 'restart from last checkpoint or from scratch if no checkpoint is found. ' \\\n + 'Default behavior is for model to terminate. (requires include_diagnostics)')\n parser.add_argument(\n '-R', '--restart_on_missed_milestone', \n action='store_true', \n help='If a validation milestone is missed, restart from scratch with a new seed ' \\\n + '(incremented by seed_increment). Default behavior is for model to terminate.')\n parser.add_argument(\n '-c', '--checkpoint_on_finish',\n action='store_true', \n help='Checkpoint when the last epoch is completed.')\n parser.add_argument(\n '-s', '--seed_increment',\n type=int, \n default=8, \n help='Amount to increment seed by if milestones are not met.')\n parser.add_argument(\n '-g', '--gpu',\n type=int, \n help='GPU device to use.')\n parser.add_argument(\n 'config_file',\n help='Configuration file containing specification of RGN model.')\n args = parser.parse_args()\n\n # set up signal for premature interruption\n signal.signal(signal.SIGINT, lambda _, __: exit(0))\n\n start_time = time.time()\n\n while run_model(args): \n pass\n\n end_time = time.time()\n hours_elapsed = (end_time - start_time) / 3600\n sys.stderr.write(\"\\n\\n\\n*** Train time: {} hours ***\".format(hours_elapsed))\n\n \n"
] | [
[
"numpy.savetxt",
"numpy.isnan"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jeromeshan/mimas | [
"4e4250bc74a0e69560a13053a9d556bcfd42c6c4"
] | [
"pbgca/__init__.py"
] | [
"from pbgca.cluster import Cluster\nfrom pbgca.clusterer import Clusterer\nfrom pbgca.cube_generator import CubeGenerator\nfrom pbgca.connectivity_matrix import ConnectivityMatrix\nfrom pbgca.cm_python import connectivity_matrix_python\nimport pyximport\nimport numpy as np\npyximport.install(setup_args={\"include_dirs\":np.get_include()},\n reload_support=True)\nfrom pbgca.cm_cython import connectivity_matrix_cython\n\nfrom pbgca.plot_generator import get_clusters_plot"
] | [
[
"numpy.get_include"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tlkh/jupyter_tensorboard | [
"eab6c6fa2f8f9de303a68f6d94232baec4eb9761"
] | [
"tests/test_tensorboard_integration.py"
] | [
"# -*- coding:utf-8 -*-\n\nimport sys\nimport time\nimport logging\nimport json\n\nimport pytest\nfrom tornado.testing import AsyncHTTPTestCase\n\n\[email protected](scope=\"session\")\ndef tf_logs(tmpdir_factory):\n\n import numpy as np\n import tensorflow as tf\n x = np.random.rand(5)\n y = 3 * x + 1 + 0.05 * np.random.rand(5)\n\n a = tf.Variable(0.1)\n b = tf.Variable(0.)\n err = a*x+b-y\n\n loss = tf.norm(err)\n tf.summary.scalar(\"loss\", loss)\n tf.summary.scalar(\"a\", a)\n tf.summary.scalar(\"b\", b)\n merged = tf.summary.merge_all()\n\n optimizor = tf.train.GradientDescentOptimizer(0.01).minimize(loss)\n\n with tf.Session() as sess:\n log_dir = tmpdir_factory.mktemp(\"logs\", numbered=False)\n log_dir = str(log_dir)\n\n train_write = tf.summary.FileWriter(log_dir, sess.graph)\n tf.global_variables_initializer().run()\n for i in range(1000):\n _, merged_ = sess.run([optimizor, merged])\n train_write.add_summary(merged_, i)\n\n return log_dir\n\n\[email protected](scope=\"session\")\ndef nb_app():\n sys.argv = [\"--port=6005\", \"--ip=127.0.0.1\", \"--no-browser\", \"--debug\"]\n from notebook.notebookapp import NotebookApp\n app = NotebookApp()\n app.log_level = logging.DEBUG\n app.ip = '127.0.0.1'\n # TODO: Add auth check tests\n app.token = ''\n app.password = ''\n app.disable_check_xsrf = True\n app.initialize()\n return app.web_app\n\n\nclass TestJupyterExtension(AsyncHTTPTestCase):\n\n @pytest.fixture(autouse=True)\n def init_jupyter(self, tf_logs, nb_app, tmpdir_factory):\n self.app = nb_app\n self.log_dir = tf_logs\n self.tmpdir_factory = tmpdir_factory\n\n def get_app(self):\n return self.app\n\n def test_tensorboard(self):\n\n content = {\"logdir\": self.log_dir}\n content_type = {\"Content-Type\": \"application/json\"}\n response = self.fetch(\n '/api/tensorboard',\n method='POST',\n body=json.dumps(content),\n headers=content_type)\n\n response = self.fetch('/api/tensorboard')\n instances = json.loads(response.body.decode())\n assert len(instances) > 0\n\n response = self.fetch('/api/tensorboard/1')\n instance = json.loads(response.body.decode())\n instance2 = None\n for inst in instances:\n if inst[\"name\"] == instance[\"name\"]:\n instance2 = inst\n assert instance == instance2\n\n response = self.fetch('/tensorboard/1/#graphs')\n assert response.code == 200\n\n response = self.fetch('/tensorboard/1/data/plugins_listing')\n plugins_list = json.loads(response.body.decode())\n assert plugins_list[\"graphs\"]\n assert plugins_list[\"scalars\"]\n\n response = self.fetch(\n '/api/tensorboard/1',\n method='DELETE')\n assert response.code == 204\n\n response = self.fetch('/api/tensorboard/1')\n error_msg = json.loads(response.body.decode())\n assert error_msg[\"message\"].startswith(\n \"TensorBoard instance not found:\")\n\n def test_instance_reload(self):\n content = {\"logdir\": self.log_dir, \"reload_interval\": 4}\n content_type = {\"Content-Type\": \"application/json\"}\n response = self.fetch(\n '/api/tensorboard',\n method='POST',\n body=json.dumps(content),\n headers=content_type)\n instance = json.loads(response.body.decode())\n assert instance is not None\n name = instance[\"name\"]\n reload_time = instance[\"reload_time\"]\n\n time.sleep(5)\n response = self.fetch('/api/tensorboard/{}'.format(name))\n instance2 = json.loads(response.body.decode())\n assert instance2[\"reload_time\"] != reload_time\n"
] | [
[
"tensorflow.norm",
"tensorflow.summary.FileWriter",
"tensorflow.Variable",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.summary.merge_all",
"numpy.random.rand",
"tensorflow.Session",
"tensorflow.summary.scalar"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
yougoforward/hlzhu-tensorflow-deeplab-resnet | [
"e592ec2c5480872747c9e7a6cde03971aa1acd9e"
] | [
"npy2ckpt50gcn.py"
] | [
"\"\"\"Conversion of the .npy weights into the .ckpt ones.\n\nThis script converts the weights of the DeepLab-ResNet model\nfrom the numpy format into the TensorFlow one.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport os\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom deeplab_resnet import DeepLabResNetModelOri50gcn\n\nSAVE_DIR = './'\n\n\ndef get_arguments():\n \"\"\"Parse all the arguments provided from the CLI.\n\n Returns:\n A list of parsed arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"NPY to CKPT converter.\")\n parser.add_argument(\"npy_path\", type=str,\n help=\"Path to the .npy file, which contains the weights.\")\n parser.add_argument(\"--save-dir\", type=str, default=SAVE_DIR,\n help=\"Where to save the converted .ckpt file.\")\n return parser.parse_args()\n\n\ndef save(saver, sess, logdir):\n model_name = 'init50gcn.ckpt'\n checkpoint_path = os.path.join(logdir, model_name)\n\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n\n saver.save(sess, checkpoint_path, write_meta_graph=False)\n print('The weights have been converted to {}.'.format(checkpoint_path))\n\n\ndef main():\n \"\"\"Create the model and start the training.\"\"\"\n args = get_arguments()\n\n # Default image.\n image_batch = tf.constant(0, tf.float32, shape=[1, 321, 321, 3])\n # Create network.\n net = DeepLabResNetModelOri50gcn({'data': image_batch})\n var_list = tf.global_variables()\n\n # Set up tf session and initialize variables.\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n\n with tf.Session(config=config) as sess:\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # Loading .npy weights.\n with tf.variable_scope(tf.get_variable_scope(), reuse=None):\n net.load(args.npy_path, sess)\n\n # Saver for converting the loaded weights into .ckpt.\n saver = tf.train.Saver(var_list=var_list, write_version=1)\n save(saver, sess, args.save_dir)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"tensorflow.constant",
"tensorflow.global_variables",
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.get_variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
lgvaz/torchrl | [
"cfff8acaf70d1fec72169162b95ab5ad3547d17a"
] | [
"reward/utils/utils.py"
] | [
"import numpy as np\nimport torch\nfrom numbers import Number\nfrom reward.utils import EPSILON\nfrom collections import Iterable\n\n\ndef to_np(v): \n if isinstance(v, torch.Tensor): return v.detach().cpu().numpy()\n else: return np.array(v, copy=False)\n\ndef is_np(v): return isinstance(v, (np.ndarray, np.generic))\n\ndef listify(p=None):\n if p is None: return []\n elif not isinstance(p, list): return [p]\n else: return p\n\ndef delistify(x): return x[0] if len(x) == 1 else x\n\ndef explained_var(target, preds):\n \"Calculates the explained variance between two datasets. Useful for estimating the quality of the value function\"\n return 1 - (target.squeeze() - preds.squeeze()).var() / target.view(-1).var()\n\ndef normalize(array):\n \"Normalize an array by subtracting the mean and diving by the std dev.\"\n return (array - array.mean()) / (array.std() + EPSILON)\n\ndef map_range(old_low, old_high, new_low, new_high):\n old_span = old_high - old_low\n new_span = new_high - new_low\n def get(value):\n norm_value = (value - old_low) / old_span\n return new_low + (norm_value * new_span)\n return get\n\ndef make_callable(x):\n if callable(x): return x\n try: return [make_callable(v) for v in x]\n except TypeError: return lambda *args, **kwargs: x\n\ndef one_hot(array, num_classes): return np.eye(num_classes)[array]\n\ndef join_first_dims(x, num_dims): return x.reshape((-1, *x.shape[num_dims:]))\n\n\nclass ScalarStats:\n def __init__(self, window):\n self.arr, self.w, self._sum = [], window, 0\n\n def append(self, v):\n self.arr.append(v)\n self._sum += v\n if len(self.arr) > self.w: self._sum -= self.arr[-self.w]\n\n def sum(self): return self._sum\n def mean(self): return self._sum / min(self.w, len(self.arr))\n\n"
] | [
[
"numpy.eye",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MariusCautun/Weighing_the_MW_with_ML | [
"6941b6d9c019c9087bcd14bbafc196d7681579b2"
] | [
"include_files/create_features.py"
] | [
"import numpy as np\nimport h5py as h5\n\n\ndef load_data_and_create_features( inputFile ):\n \"\"\"Function for loading the data for the galaxies and their satellite systems used to constrain the mass of the Milky Way by training various ML algorithms. This function also manipulates some of the feature by transforming them to log-space.\n \n INPUT:\n inputFile - the name of the data input file\n \n OUTPUT:\n data_input - a (N x m) numpy array giving the input 'm' features for the 'N' \n entries in the file\n data_output - a (N x 1) numpy array giving the target output for each entry\n name_input - the list of names for each feature in the input data\n name_output - the name of the output variable\n \"\"\"\n with h5.File( inputFile, 'r' ) as hf:\n mw_Mhalo = np.array( hf[\"M_halo\"] )\n mw_Mstar = np.array( hf[\"M_star\"] )\n mw_lum_func = np.array( hf[\"luminosity_function\"] )\n mw_vel_dis = np.array( hf[\"velocity_dispersion\"] )\n mw_vel_rad = np.array( hf[\"velocity_dispersion_radial\"] )\n mw_ang_mom = np.array( hf[\"mean_angular_momentum\"] )\n mw_dis = np.array( hf[\"mean_distance\"] )\n\n num_systems = mw_Mhalo.shape[0]\n num_features = 1 + mw_lum_func.shape[1] + 1*4\n\n print( \"The '%s' input file contains:\" % inputFile )\n print( \"MW-analogues: %i\" % mw_Mhalo.shape[0] )\n\n\n # transform the masses, velocity dispersion and angular momentum to log values\n mw_Mhalo = np.log10(mw_Mhalo)\n \n mw_Mstar = np.log10(mw_Mstar)\n mw_Mstar[mw_Mstar<8.] = 8. # get rid of the tail -- very few entries have these values\n \n # calculate the PDF of the luminosity function\n # (the file contains the CDF)\n for i in range(mw_lum_func.shape[1]-1):\n mw_lum_func[:,i] -= mw_lum_func[:,i+1]\n \n # calculate the tangetial velocity dispersion\n mw_vel_dis -= mw_vel_rad\n \n sel = mw_vel_dis > 100. # all the systems with valid values\n mw_vel_dis[ sel] = np.log10(mw_vel_dis[sel])\n mw_vel_dis[~sel] = np.log10(100.)\n\n sel = mw_vel_rad > 100. # all the systems with valid values\n mw_vel_rad[ sel] = np.log10(mw_vel_rad[sel])\n mw_vel_rad[~sel] = np.log10(100.)\n\n sel = mw_ang_mom > 1 # all the systems with valid values\n mw_ang_mom[ sel] = np.log10(mw_ang_mom[sel])\n mw_ang_mom[~sel] = 0.\n\n\n # define lists storing the name of each feature\n name_input = [ \"M_star\", \"N_sat 1.e6\", \"N_sat 1.e7\", \"N_sat 1.e8\", \"N_sat 1.e9\", \"N_sat 1.e10\", \n \"vel. tan.\", \"vel. radial\", \"mean L\", \"mean d\"\n ]\n name_output = [ \"M_halo\" ]\n\n # merge the input and output features in two different array\n data_input = np.column_stack( ( mw_Mstar,mw_lum_func, mw_vel_dis, mw_vel_rad, mw_ang_mom, mw_dis) )\n data_output= mw_Mhalo.reshape(-1,1)\n \n return data_input, data_output, name_input, name_output "
] | [
[
"numpy.array",
"numpy.log10",
"numpy.column_stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Mstronach/turicreate | [
"c5e0e58d44adc1c1414eac486f3dc17a721a296e"
] | [
"src/python/turicreate/test/test_sframe.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright © 2017 Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can\n# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\nfrom __future__ import print_function as _\nfrom __future__ import division as _\nfrom __future__ import absolute_import as _\nfrom ..data_structures.sframe import SFrame\nfrom ..data_structures.sarray import SArray\nfrom ..data_structures.image import Image\nfrom ..util import _assert_sframe_equal, generate_random_sframe\nfrom .. import _launch, load_sframe, aggregate\nfrom . import util\n\nimport pandas as pd\nfrom .._cython.cy_flexible_type import GMT\nfrom pandas.util.testing import assert_frame_equal\nimport unittest\nimport datetime as dt\nimport tempfile\nimport os\nimport csv\nimport gzip\nimport string\nimport time\nimport numpy as np\nimport array\nimport math\nimport random\nimport shutil\nimport functools\nimport sys\nimport mock\nimport sqlite3\nfrom .dbapi2_mock import dbapi2_mock\n\n\nclass SFrameTest(unittest.TestCase):\n def setUp(self):\n self.int_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n self.float_data = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]\n self.string_data = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\"]\n self.a_to_z = [str(chr(97 + i)) for i in range(0, 26)]\n self.dataframe = pd.DataFrame({'int_data': self.int_data, 'float_data': self.float_data, 'string_data': self.string_data})\n self.url = \"http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz\"\n\n self.int_data2 = range(50,60)\n self.float_data2 = [1.0 * i for i in range(50,60)]\n self.string_data2 = [str(i) for i in range(50,60)]\n self.dataframe2 = pd.DataFrame({'int_data': self.int_data2, 'float_data': self.float_data2, 'string_data': self.string_data2})\n self.vec_data = [array.array('d', [i, i+1]) for i in self.int_data]\n self.list_data = [[i, str(i), i * 1.0] for i in self.int_data]\n self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]\n self.datetime_data = [dt.datetime(2013, 5, 7, 10, 4, 10),\n dt.datetime(1902, 10, 21, 10, 34, 10).replace(tzinfo=GMT(0.0))]\n self.all_type_cols = [self.int_data,\n self.float_data,\n self.string_data,\n self.vec_data,\n self.list_data,\n self.dict_data,\n self.datetime_data*5]\n self.sf_all_types = SFrame({\"X\"+str(i[0]):i[1] for i in zip(range(1,8),\n self.all_type_cols)})\n\n # Taken from http://en.wikipedia.org/wiki/Join_(SQL) for fun.\n self.employees_sf = SFrame()\n self.employees_sf.add_column(SArray(['Rafferty','Jones','Heisenberg','Robinson','Smith','John']), 'last_name', inplace=True)\n self.employees_sf.add_column(SArray([31,33,33,34,34,None]), 'dep_id', inplace=True)\n\n # XXX: below are only used by one test!\n self.departments_sf = SFrame()\n self.departments_sf.add_column(SArray([31,33,34,35]), 'dep_id', inplace=True)\n self.departments_sf.add_column(SArray(['Sales','Engineering','Clerical','Marketing']), 'dep_name', inplace=True)\n\n def __assert_sarray_equal(self, sa1, sa2):\n l1 = list(sa1)\n l2 = list(sa2)\n self.assertEqual(len(l1), len(l2))\n for i in range(len(l1)):\n v1 = l1[i]\n v2 = l2[i]\n if v1 is None:\n self.assertEqual(v2, None)\n else:\n if type(v1) == dict:\n self.assertEqual(len(v1), len(v2))\n for key in v1:\n self.assertTrue(key in v1)\n self.assertEqual(v1[key], v2[key])\n\n elif (hasattr(v1, \"__iter__\")):\n self.assertEqual(len(v1), len(v2))\n for j in range(len(v1)):\n t1 = v1[j]; t2 = v2[j]\n if (type(t1) == float):\n if (math.isnan(t1)):\n self.assertTrue(math.isnan(t2))\n else:\n self.assertEqual(t1, t2)\n else:\n self.assertEqual(t1, t2)\n else:\n self.assertEqual(v1, v2)\n\n def test_split_datetime(self):\n from_zone = GMT(0)\n to_zone = GMT(4.5)\n utc = dt.datetime.strptime('2011-01-21 02:37:21', '%Y-%m-%d %H:%M:%S')\n utc = utc.replace(tzinfo=from_zone)\n central = utc.astimezone(to_zone)\n\n sa = SArray([utc,central])\n\n expected = SFrame()\n expected ['X.year'] = [2011,2011]\n expected ['X.month'] = [1,1]\n expected ['X.day'] = [21,21]\n expected ['X.hour'] = [2,7]\n expected ['X.minute'] = [37,7]\n expected ['X.second'] = [21,21]\n expected ['X.timezone'] = [0.0,4.5]\n result = sa.split_datetime(timezone=True)\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n # column names\n expected = SFrame()\n expected ['ttt.year'] = [2011,2011]\n expected ['ttt.minute'] = [37,7]\n expected ['ttt.second'] = [21,21]\n\n result = sa.split_datetime(column_name_prefix='ttt',limit=['year','minute','second'])\n self.assertEqual(result.column_names(), ['ttt.year', 'ttt.minute', 'ttt.second'])\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n sf = SFrame({'datetime': sa})\n result = sf.split_datetime('datetime', column_name_prefix='ttt',limit=['year','minute','second'])\n self.assertEqual(result.column_names(), ['ttt.year', 'ttt.minute', 'ttt.second'])\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n\n def __test_equal(self, sf, df):\n # asserts two frames are equal, ignoring column ordering.\n self.assertEqual(sf.num_rows(), df.shape[0])\n self.assertEqual(sf.num_columns(), df.shape[1])\n assert_frame_equal(sf.to_dataframe(), df[sf.column_names()])\n\n def __create_test_df(self, size):\n int_data = []\n float_data = []\n string_data = []\n for i in range(0,size):\n int_data.append(i)\n float_data.append(float(i))\n string_data.append(str(i))\n\n return pd.DataFrame({'int_data': int_data,\n 'float_data': float_data,\n 'string_data': string_data})\n\n # Test if the rows are all the same...row order does not matter.\n # (I do expect column order to be the same)\n def __assert_join_results_equal(self, sf, expected_sf):\n _assert_sframe_equal(sf, expected_sf, check_row_order=False)\n\n def test_creation_from_dataframe(self):\n # created from empty dataframe\n sf_empty = SFrame(data=pd.DataFrame())\n self.__test_equal(sf_empty, pd.DataFrame())\n\n sf = SFrame(data=self.dataframe, format='dataframe')\n self.__test_equal(sf, self.dataframe)\n\n sf = SFrame(data=self.dataframe, format='auto')\n self.__test_equal(sf, self.dataframe)\n\n original_p = pd.DataFrame({'a':[1.0, float('nan')]})\n effective_p = pd.DataFrame({'a':[1.0, None]})\n sf = SFrame(data=original_p)\n self.__test_equal(sf, effective_p)\n\n original_p = pd.DataFrame({'a':['a',None,'b']})\n sf = SFrame(data=original_p)\n self.__test_equal(sf, original_p)\n\n def test_auto_parse_csv_with_bom(self):\n with tempfile.NamedTemporaryFile(mode='w', delete=False) as csvfile:\n df = pd.DataFrame({'float_data': self.float_data,\n 'int_data': self.int_data,\n 'string_data': self.a_to_z[:len(self.int_data)]})\n df.to_csv(csvfile, index=False)\n csvfile.close()\n\n import codecs\n with open(csvfile.name, 'rb') as f:\n content = f.read()\n with open(csvfile.name, 'wb') as f:\n f.write(codecs.BOM_UTF8)\n f.write(content)\n\n sf = SFrame.read_csv(csvfile.name, header=True)\n self.assertEqual(sf.dtype, [float, int, str])\n self.__test_equal(sf, df)\n\n def test_auto_parse_csv(self):\n with tempfile.NamedTemporaryFile(mode='w', delete=False) as csvfile:\n df = pd.DataFrame({'float_data': self.float_data,\n 'int_data': self.int_data,\n 'string_data': self.a_to_z[:len(self.int_data)]})\n df.to_csv(csvfile, index=False)\n csvfile.close()\n\n sf = SFrame.read_csv(csvfile.name, header=True)\n\n self.assertEqual(sf.dtype, [float, int, str])\n self.__test_equal(sf, df)\n\n def test_parse_csv(self):\n with tempfile.NamedTemporaryFile(mode='w', delete=False) as csvfile:\n self.dataframe.to_csv(csvfile, index=False)\n csvfile.close()\n\n # list type hints\n sf = SFrame.read_csv(csvfile.name,\n column_type_hints=[int, int, str])\n self.assertEqual(sf.dtype, [int, int, str])\n sf['int_data'] = sf['int_data'].astype(int)\n sf['float_data'] = sf['float_data'].astype(float)\n sf['string_data'] = sf['string_data'].astype(str)\n self.__test_equal(sf, self.dataframe)\n\n # list type hints, incorrect number of columns\n self.assertRaises(RuntimeError,\n lambda: SFrame.read_csv(csvfile.name,\n column_type_hints=[int, float]))\n\n # dictionary type hints\n sf = SFrame.read_csv(csvfile.name,\n column_type_hints={'int_data': int,\n 'float_data': float,\n 'string_data': str})\n self.__test_equal(sf, self.dataframe)\n\n # partial dictionary type hints\n sf = SFrame.read_csv(csvfile.name,\n column_type_hints={'float_data': float,\n 'string_data': str})\n self.__test_equal(sf, self.dataframe)\n\n # single value type hints\n sf = SFrame.read_csv(csvfile.name, column_type_hints=str)\n self.assertEqual(sf.dtype, [str, str, str])\n all_string_column_df = self.dataframe.apply(lambda x: [str(ele) for ele in x])\n self.__test_equal(sf, all_string_column_df)\n\n # single value type hints row limit\n sf = SFrame.read_csv(csvfile.name, column_type_hints=str, nrows=5)\n self.assertEqual(sf.dtype, [str, str, str])\n all_string_column_df = self.dataframe.apply(lambda x: [str(ele) for ele in x])\n self.assertEqual(len(sf), 5)\n self.__test_equal(sf, all_string_column_df[0:len(sf)])\n\n\n sf = SFrame.read_csv(csvfile.name)\n sf2 = SFrame(csvfile.name, format='csv')\n self.__test_equal(sf2, sf.to_dataframe())\n\n f = open(csvfile.name, \"w\")\n f.write('a,b,c\\n')\n f.write('NA,PIKA,CHU\\n')\n f.write('1.0,2,3\\n')\n f.close()\n sf = SFrame.read_csv(csvfile.name,\n na_values=['NA','PIKA','CHU'],\n column_type_hints={'a':float,'b':int,'c':str})\n t = list(sf['a'])\n self.assertEqual(t[0], None)\n self.assertEqual(t[1], 1.0)\n t = list(sf['b'])\n self.assertEqual(t[0], None)\n self.assertEqual(t[1], 2)\n t = list(sf['c'])\n self.assertEqual(t[0], None)\n self.assertEqual(t[1], \"3\")\n\n def test_parse_csv_non_multi_line_unmatched_quotation(self):\n data = [{'type': 'foo', 'text_string': 'foo foo.'},\n {'type': 'bar', 'text_string': 'bar \" bar.'},\n {'type': 'foo', 'text_string': 'foo\".'}]\n\n with tempfile.NamedTemporaryFile(mode='w', delete=False) as csvfile:\n with open(csvfile.name, 'w') as f:\n f.write(\"type,text_string\\n\") # header\n for l in data:\n f.write(l['type'] + ',' + l['text_string'] + '\\n')\n\n sf = SFrame.read_csv(csvfile.name, quote_char=None)\n self.assertEqual(len(sf), len(data))\n for i in range(len(sf)):\n self.assertEqual(sf[i], data[i])\n\n def test_save_load_file_cleanup(self):\n # when some file is in use, file should not be deleted\n with util.TempDirectory() as f:\n sf = SFrame()\n sf['a'] = SArray(range(1,1000000))\n sf.save(f)\n\n # many for each sarray, 1 sframe_idx, 1 object.bin, 1 ini\n file_count = len(os.listdir(f))\n self.assertTrue(file_count > 3)\n\n # sf1 now references the on disk file\n sf1 = SFrame(f)\n\n # create another SFrame and save to the same location\n sf2 = SFrame()\n sf2['b'] = SArray([str(i) for i in range(1,100000)])\n sf2['c'] = SArray(range(1, 100000))\n sf2.save(f)\n\n file_count = len(os.listdir(f))\n self.assertTrue(file_count > 3)\n\n # now sf1 should still be accessible\n self.__test_equal(sf1, sf.to_dataframe())\n\n # and sf2 is correct too\n sf3 = SFrame(f)\n self.__test_equal(sf3, sf2.to_dataframe())\n\n # when sf1 goes out of scope, the tmp files should be gone\n sf1 = 1\n time.sleep(1) # give time for the files being deleted\n file_count = len(os.listdir(f))\n self.assertTrue(file_count > 3)\n\n def test_save_load(self):\n\n # Check top level load function, with no suffix\n with util.TempDirectory() as f:\n sf = SFrame(data=self.dataframe, format='dataframe')\n sf.save(f)\n sf2 = load_sframe(f)\n self.__test_equal(sf2, self.dataframe)\n\n # Check individual formats with the SFrame constructor\n formats = ['.csv']\n\n for suffix in formats:\n f = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)\n sf = SFrame(data=self.dataframe, format='dataframe')\n sf.save(f.name)\n sf2 = SFrame(f.name)\n sf2['int_data'] = sf2['int_data'].astype(int)\n sf2['float_data'] = sf2['float_data'].astype(float)\n sf2['string_data'] = sf2['string_data'].astype(str)\n self.__test_equal(sf2, self.dataframe)\n g=SArray([['a','b',3],[{'a':'b'}],[1,2,3]])\n g2=SFrame()\n g2['x']=g\n g2.save(f.name)\n g3=SFrame.read_csv(f.name,column_type_hints=list)\n self.__test_equal(g2, g3.to_dataframe())\n f.close()\n os.unlink(f.name)\n\n # Make sure this file don't exist before testing\n self.assertRaises(IOError, lambda: SFrame(data='__no_such_file__.frame_idx', format='sframe'))\n\n del sf2\n\n\n def test_save_load_reference(self):\n\n # Check top level load function, with no suffix\n with util.TempDirectory() as f:\n sf = SFrame(data=self.dataframe, format='dataframe')\n originallen = len(sf)\n sf.save(f)\n del sf\n\n sf = SFrame(f)\n # make a new column of \"1s and save it back\n int_data2 = sf['int_data'] + 1\n int_data2.materialize()\n sf['int_data2'] = int_data2\n sf._save_reference(f)\n del sf\n\n sf = SFrame(f)\n self.assertTrue(((sf['int_data2'] - sf['int_data']) == 1).all())\n\n # try to append and save reference\n expected = sf.to_dataframe()\n sf = sf.append(sf)\n sf._save_reference(f)\n\n sf = SFrame(f)\n self.assertTrue(((sf['int_data2'] - sf['int_data']) == 1).all())\n self.assertEqual(2 * originallen, len(sf))\n assert_frame_equal(sf[originallen:].to_dataframe(), expected)\n assert_frame_equal(sf[:originallen].to_dataframe(), expected)\n\n def test_save_to_csv(self):\n f = tempfile.NamedTemporaryFile(suffix='.csv', delete=False)\n sf = SFrame(data=self.dataframe, format='dataframe')\n sf.save(f.name, format='csv')\n sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str})\n self.__test_equal(sf2, self.dataframe)\n\n sf.export_csv(f.name, delimiter=':')\n sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':')\n self.__test_equal(sf2, self.dataframe)\n\n sf.export_csv(f.name, delimiter=':', line_terminator='\\r\\n')\n sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\\r\\n')\n self.__test_equal(sf2, self.dataframe)\n\n sf.export_csv(f.name, delimiter=':', line_terminator='\\r\\n', double_quote=False)\n sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\\r\\n', double_quote=False)\n self.__test_equal(sf2, self.dataframe)\n\n sf.export_csv(f.name, delimiter=':', line_terminator='\\r\\n', double_quote=False, quote_char='\\'')\n sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\\r\\n', double_quote=False, quote_char='\\'')\n self.__test_equal(sf2, self.dataframe)\n\n import csv\n sf.export_csv(f.name, delimiter=':', line_terminator='\\r\\n', double_quote=False, quote_char='\\'', quote_level=csv.QUOTE_MINIMAL)\n sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\\r\\n', double_quote=False, quote_char='\\'')\n self.__test_equal(sf2, self.dataframe)\n\n sf.export_csv(f.name, delimiter=':', line_terminator='\\r\\n', double_quote=False, quote_char='\\'', quote_level=csv.QUOTE_ALL)\n sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\\r\\n', double_quote=False, quote_char='\\'')\n self.__test_equal(sf2, self.dataframe)\n\n\n sf.export_csv(f.name, delimiter=':', line_terminator='\\r\\n', double_quote=False, quote_char='\\'', quote_level=csv.QUOTE_NONE)\n sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\\r\\n', double_quote=False, quote_char='\\'')\n self.__test_equal(sf2, self.dataframe)\n\n # Pandas compatibility options\n sf.export_csv(f.name, sep=':', lineterminator='\\r\\n', doublequote=False, quotechar='\\'', quote_level=csv.QUOTE_NONE)\n sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, sep=':', lineterminator='\\r\\n', doublequote=False, quotechar='\\'')\n self.__test_equal(sf2, self.dataframe)\n f.close()\n os.unlink(f.name)\n\n def test_save_to_json(self):\n f = tempfile.NamedTemporaryFile(suffix='.json', delete=False)\n sf = SFrame(data=self.dataframe, format='dataframe')\n sf.save(f.name, format='json')\n sf2 = SFrame.read_json(f.name)\n # the float column will be parsed as integer\n sf2['float_data'] = sf2['float_data'].astype(float)\n self.__test_equal(sf2, self.dataframe)\n\n sf = SFrame(data=self.dataframe, format='dataframe')\n sf.export_json(f.name)\n sf2 = SFrame.read_json(f.name)\n sf2['float_data'] = sf2['float_data'].astype(float)\n self.__test_equal(sf2, self.dataframe)\n\n with open(f.name, 'w') as out:\n out.write('[\\n]')\n sf = SFrame.read_json(f.name)\n self.__test_equal(SFrame(), sf.to_dataframe())\n\n with open(f.name, 'w') as out:\n out.write('')\n sf = SFrame.read_json(f.name, orient='lines')\n self.__test_equal(SFrame(), sf.to_dataframe())\n\n sf = SFrame(data=self.dataframe, format='dataframe')\n sf.export_json(f.name, orient='lines')\n sf2 = SFrame.read_json(f.name, orient='lines')\n sf2['float_data'] = sf2['float_data'].astype(float)\n self.__test_equal(sf2, self.dataframe)\n f.close()\n os.unlink(f.name)\n\n def _remove_sframe_files(self, prefix):\n filelist = [ f for f in os.listdir(\".\") if f.startswith(prefix) ]\n for f in filelist:\n os.remove(f)\n\n def test_creation_from_txt(self):\n f = tempfile.NamedTemporaryFile(suffix='.txt', delete=False)\n df = self.dataframe[['string_data']]\n df.to_csv(f.name, index=False)\n sf = SFrame(f.name)\n self.assertEqual(sf['string_data'].dtype, int)\n sf['string_data'] = sf['string_data'].astype(str)\n self.__test_equal(sf, df)\n\n fgzip = tempfile.NamedTemporaryFile(suffix='.txt.gz', delete=False)\n f_in = open(f.name, 'rb')\n f_out = gzip.open(fgzip.name, 'wb')\n f_out.writelines(f_in)\n f_out.close()\n f_in.close()\n sf = SFrame(fgzip.name)\n self.assertEqual(sf['string_data'].dtype, int)\n sf['string_data'] = sf['string_data'].astype(str)\n self.__test_equal(sf, df)\n\n fgzip.close()\n os.unlink(fgzip.name)\n f.close()\n os.unlink(f.name)\n\n def test_creation_from_csv_on_local(self):\n if os.path.exists('./foo.csv'):\n os.remove('./foo.csv')\n with open('./foo.csv', 'w') as f:\n url = f.name\n basesf = SFrame(self.dataframe)\n basesf.save(url, format=\"csv\")\n f.close()\n sf = SFrame('./foo.csv')\n self.assertEqual(sf['float_data'].dtype, int)\n sf['float_data'] = sf['float_data'].astype(float)\n self.assertEqual(sf['string_data'].dtype, int)\n sf['string_data'] = sf['string_data'].astype(str)\n self.__test_equal(sf, self.dataframe)\n sf = SFrame(url)\n self.assertEqual(sf['float_data'].dtype, int)\n sf['float_data'] = sf['float_data'].astype(float)\n self.assertEqual(sf['string_data'].dtype, int)\n sf['string_data'] = sf['string_data'].astype(str)\n self.__test_equal(sf, self.dataframe)\n os.remove(url)\n\n def test_alternate_line_endings(self):\n # test Windows line endings\n if os.path.exists('./windows_lines.csv'):\n os.remove('./windows_lines.csv')\n windows_file_url = None\n with open('./windows_lines.csv', 'w') as f:\n windows_file_url = f.name\n def_writer = csv.writer(f, dialect='excel')\n column_list = ['numbers']\n def_writer.writerow(column_list)\n for i in self.int_data:\n def_writer.writerow([i])\n\n sf = SFrame.read_csv('./windows_lines.csv', column_type_hints={'numbers':int})\n self.assertEqual(sf.column_names(), column_list)\n self.assertEqual(sf.column_types(), [int])\n self.assertEqual(list(sf['numbers'].head()), self.int_data)\n\n sf = SFrame.read_csv('./windows_lines.csv', column_type_hints={'numbers':list}, error_bad_lines=False)\n self.assertEqual(sf.column_names(), column_list)\n self.assertEqual(sf.num_rows(), 0)\n\n os.remove(windows_file_url)\n\n def test_skip_rows(self):\n # test line skipping\n if os.path.exists('./skip_lines.csv'):\n os.remove('./skip_lines.csv')\n skip_file_url = None\n with open('./skip_lines.csv', 'w') as f:\n f.write(\"trash\\n\")\n f.write(\"junk\\n\")\n skip_file_url = f.name\n def_writer = csv.writer(f, dialect='excel')\n column_list = ['numbers']\n def_writer.writerow(column_list)\n for i in self.int_data:\n def_writer.writerow([i])\n\n sf = SFrame.read_csv('./skip_lines.csv', skiprows=2, column_type_hints={'numbers':int})\n self.assertEqual(sf.column_names(), column_list)\n self.assertEqual(sf.column_types(), [int])\n self.assertEqual(list(sf['numbers'].head()), self.int_data)\n\n sf = SFrame.read_csv('./skip_lines.csv', skiprows=2, column_type_hints={'numbers':list}, error_bad_lines=False)\n self.assertEqual(sf.column_names(), column_list)\n self.assertEqual(sf.num_rows(), 0)\n\n os.remove(skip_file_url)\n\n\n def test_creation_from_csv_on_http(self):\n pass\n # sf = SFrame(data=self.url, use_header=False)\n # self.__test_equal(sf, pd.DataFrame({'1': self.a_to_z}))\n\n def test_creation_from_csv_on_s3(self):\n # Requires s3 account for jenkins\n # sf = SFrame(data='s3://turicreate-testdata/foo.csv')\n # print sf.head(sf.num_rows())\n pass\n\n def test_creation_from_csv_dir_local(self):\n csv_dir = \"./csv_dir\"\n\n if os.path.exists(csv_dir):\n shutil.rmtree(csv_dir)\n os.mkdir(csv_dir)\n\n for i in range(0, 100):\n with open(os.path.join(csv_dir, 'foo.%d.csv' % i), 'w') as f:\n url = f.name\n self.dataframe.to_csv(url, index=False)\n f.close()\n\n singleton_sf = SFrame.read_csv(os.path.join(csv_dir, \"foo.0.csv\"))\n self.assertEqual(singleton_sf.num_rows(), 10)\n\n many_sf = SFrame.read_csv(csv_dir)\n self.assertEqual(many_sf.num_rows(), 1000)\n\n glob_sf = SFrame.read_csv(os.path.join(csv_dir, \"foo.*2.csv\"))\n self.assertEqual(glob_sf.num_rows(), 100)\n\n with self.assertRaises(IOError):\n SFrame.read_csv(\"missingdirectory\")\n\n with self.assertRaises(ValueError):\n SFrame.read_csv(\"\")\n\n shutil.rmtree(csv_dir)\n\n def test_creation_from_iterable(self):\n # Normal dict of lists\n the_dict = {'ints':self.int_data,'floats':self.float_data,'strings':self.string_data}\n sf = SFrame(the_dict)\n df = pd.DataFrame(the_dict)\n self.__test_equal(sf, df)\n\n # Test that a missing value does not change the data type\n the_dict['ints'][0] = None\n sf = SFrame(the_dict)\n self.assertEqual(sf['ints'].dtype, int)\n\n # numpy.nan is actually a float, so it should cast the column to float\n the_dict['ints'][0] = np.nan\n sf = SFrame(the_dict)\n self.assertEqual(sf['ints'].dtype, float)\n\n # Just a single list\n sf = SFrame(self.int_data)\n df = pd.DataFrame(self.int_data)\n df.columns = ['X1']\n self.__test_equal(sf, df)\n\n # Normal list of lists\n list_of_lists = [[1.0,2.0,3.0],[4.0,5.0,6.0],[7.0,8.0,9.0]]\n sf = SFrame(list_of_lists)\n cntr = 0\n for i in sf:\n self.assertEqual(list_of_lists[cntr], list(i['X1']))\n cntr += 1\n\n self.assertEqual(sf.num_columns(), 1)\n\n the_dict = {'ints':self.int_data,'floats':self.float_data,'strings':self.string_data}\n sf = SFrame(the_dict)\n sf2 = SFrame({'ints':sf['ints'],'floats':sf['floats'],'strings':sf['strings']})\n df = pd.DataFrame(the_dict)\n self.__test_equal(sf2, df)\n sf2 = SFrame([sf['ints'],sf['floats'],sf['strings']])\n self.assertEqual(['X1','X2','X3'],sf2.column_names())\n sf2.rename({'X1':'ints','X2':'floats','X3':'strings'}, inplace=True)\n sf2=sf2[['floats','ints','strings']]\n self.__test_equal(sf2, df)\n\n sf = SFrame({'text': ('foo', 'bar', 'biz')})\n df = pd.DataFrame({'text': ['foo', 'bar', 'biz']})\n self.__test_equal(sf, df)\n\n def test_head_tail(self):\n sf = SFrame(data=self.dataframe)\n assert_frame_equal(sf.head(4).to_dataframe(), self.dataframe.head(4))\n # Cannot test for equality the same way because of dataframe indices\n taildf = sf.tail(4)\n for i in range(0, 4):\n self.assertEqual(taildf['int_data'][i], self.dataframe['int_data'][i+6])\n self.assertEqual(taildf['float_data'][i], self.dataframe['float_data'][i+6])\n self.assertEqual(taildf['string_data'][i], self.dataframe['string_data'][i+6])\n\n def test_head_tail_edge_case(self):\n sf = SFrame()\n self.assertEqual(sf.head().num_columns(), 0)\n self.assertEqual(sf.tail().num_columns(), 0)\n self.assertEqual(sf.head().num_rows(), 0)\n self.assertEqual(sf.tail().num_rows(), 0)\n sf = SFrame()\n sf['a'] = []\n self.assertEqual(sf.head().num_columns(), 1)\n self.assertEqual(sf.tail().num_columns(), 1)\n self.assertEqual(sf.head().num_rows(), 0)\n self.assertEqual(sf.tail().num_rows(), 0)\n\n def test_transform(self):\n sf = SFrame(data=self.dataframe)\n for i in range(sf.num_columns()):\n colname = sf.column_names()[i]\n sa = sf.apply(lambda x: x[colname], sf.column_types()[i])\n self.__assert_sarray_equal(sa, sf[sf.column_names()[i]])\n\n sa = sf.apply(lambda x: x['int_data'] + x['float_data'], float)\n self.__assert_sarray_equal(sf['int_data'] + sf['float_data'], sa)\n\n def test_transform_with_recursion(self):\n sf = SFrame(data={'a':[0,1,2,3,4], 'b':['0','1','2','3','4']})\n # this should be the equivalent to sf.apply(lambda x:x since a is\n # equivalent to range(4)\n sa = sf.apply(lambda x: sf[x['a']])\n sb = sf.apply(lambda x: x)\n self.__assert_sarray_equal(sa, sb)\n\n def test_transform_with_type_inference(self):\n sf = SFrame(data=self.dataframe)\n for i in range(sf.num_columns()):\n colname = sf.column_names()[i]\n sa = sf.apply(lambda x: x[colname])\n self.__assert_sarray_equal(sa, sf[sf.column_names()[i]])\n\n sa = sf.apply(lambda x: x['int_data'] + x['float_data'])\n self.__assert_sarray_equal(sf['int_data'] + sf['float_data'], sa)\n\n # SFrame apply returns list of vector of numeric should be vector, not list\n sa = sf.apply(lambda x: [x['int_data'], x['float_data']])\n self.assertEqual(sa.dtype, array.array)\n\n def test_transform_with_exception(self):\n sf = SFrame(data=self.dataframe)\n self.assertRaises(KeyError, lambda: sf.apply(lambda x: x['some random key'])) # cannot find the key\n self.assertRaises(TypeError, lambda: sf.apply(lambda x: sum(x.values()))) # lambda cannot sum int and str\n self.assertRaises(ZeroDivisionError, lambda: sf.apply(lambda x: x['int_data'] / 0)) # divide by 0 error\n self.assertRaises(IndexError, lambda: sf.apply(lambda x: list(x.values())[10])) # index out of bound error\n\n def test_empty_transform(self):\n sf = SFrame()\n b = sf.apply(lambda x:x)\n self.assertEqual(len(b.head()), 0)\n\n def test_flatmap(self):\n # Correctness of typical usage\n n = 10\n sf = SFrame({'id': range(n)})\n new_sf = sf.flat_map([\"id_range\"], lambda x: [[str(i)] for i in range(x['id'])])\n self.assertEqual(new_sf.column_names(), [\"id_range\"])\n self.assertEqual(new_sf.column_types(), [str])\n expected_col = [str(x) for i in range(n) for x in range(i)]\n self.assertListEqual(list(new_sf['id_range']), expected_col)\n\n # Empty SFrame, without explicit column types\n sf = SFrame()\n with self.assertRaises(TypeError):\n new_sf = sf.flat_map(['id_range'],\n lambda x: [[i] for i in range(x['id'])])\n\n # Empty rows successfully removed\n sf = SFrame({'id': range(15)})\n new_sf = sf.flat_map(['id'],\n lambda x: [[x['id']]] if x['id'] > 8 else [])\n self.assertEqual(new_sf.num_rows(), 6)\n\n # First ten rows are empty raises error\n with self.assertRaises(TypeError):\n new_sf = sf.flat_map(['id'],\n lambda x: [[x['id']]] if x['id'] > 9 else [])\n\n\n\n def test_select_column(self):\n sf = SFrame(data=self.dataframe)\n\n sub_sf = sf.select_columns(['int_data', 'string_data'])\n exp_df = pd.DataFrame({'int_data': self.int_data, 'string_data': self.string_data})\n self.__test_equal(sub_sf, exp_df)\n\n with self.assertRaises(ValueError):\n sf.select_columns(['int_data', 'string_data', 'int_data'])\n\n # test indexing\n sub_col = sf['float_data']\n self.assertEqual(list(sub_col.head(10)), self.float_data)\n\n with self.assertRaises(TypeError):\n sub_sf = sf.select_columns(['duh',1])\n\n with self.assertRaises(TypeError):\n sub_sf = sf.select_columns(0)\n\n with self.assertRaises(RuntimeError):\n sub_sf = sf.select_columns(['not_a_column'])\n\n self.assertEqual(sf.select_columns([int]).column_names(), ['int_data'])\n self.assertEqual(sf.select_columns([int, str]).column_names(), ['int_data', 'string_data'])\n\n self.assertEqual(sf[int].column_names(), ['int_data'])\n self.assertEqual(sf[[int, str]].column_names(), ['int_data', 'string_data'])\n self.assertEqual(sf[int, str].column_names(), ['int_data', 'string_data'])\n self.assertEqual(sf['int_data', 'string_data'].column_names(), ['int_data', 'string_data'])\n self.assertEqual(sf['string_data', 'int_data'].column_names(), ['string_data', 'int_data'])\n\n sf = SFrame()\n with self.assertRaises(RuntimeError):\n sf.select_column('x')\n\n with self.assertRaises(RuntimeError):\n sf.select_columns(['x'])\n\n sf.add_column(SArray(), 'x', inplace=True)\n # does not throw\n sf.select_column('x')\n sf.select_columns(['x'])\n with self.assertRaises(RuntimeError):\n sf.select_column('y')\n\n with self.assertRaises(RuntimeError):\n sf.select_columns(['y'])\n\n def test_topk(self):\n sf = SFrame(data=self.dataframe)\n\n # Test that order is preserved\n df2 = sf.topk('int_data').to_dataframe()\n df2_expected = self.dataframe.sort_values('int_data', ascending=False)\n df2_expected.index = range(df2.shape[0])\n assert_frame_equal(df2, df2_expected)\n\n df2 = sf.topk('float_data', 3).to_dataframe()\n df2_expected = self.dataframe.sort_values('float_data', ascending=False).head(3)\n df2_expected.index = range(3)\n assert_frame_equal(df2, df2_expected)\n\n df2 = sf.topk('string_data', 3).to_dataframe()\n for i in range(0, 3):\n self.assertEqual(df2['int_data'][2-i], i + 7)\n\n with self.assertRaises(TypeError):\n sf.topk(2,3)\n\n sf = SFrame()\n sf.add_column(SArray([1,2,3,4,5]), 'a', inplace=True)\n sf.add_column(SArray([1,2,3,4,5]), 'b', inplace=True)\n\n sf.topk('a', 1) # should not fail\n\n\n def test_filter(self):\n sf = SFrame(data=self.dataframe)\n\n filter_sa = SArray([1,1,1,0,0,0,0,1,1,1])\n\n sf2 = sf[filter_sa]\n exp_df = sf.head(3).append(sf.tail(3))\n self.__test_equal(sf2, exp_df.to_dataframe())\n\n # filter by 1s\n sf2 = sf[SArray(self.int_data)]\n exp_df = sf.head(10).to_dataframe()\n self.__test_equal(sf2, exp_df)\n\n # filter by 0s\n sf2 = sf[SArray([0,0,0,0,0,0,0,0,0,0])]\n exp_df = sf.head(0).to_dataframe()\n self.__test_equal(sf2, exp_df)\n\n # wrong size\n with self.assertRaises(IndexError):\n sf2 = sf[SArray([0,1,205])]\n\n # slightly bigger size\n sf = SFrame()\n n = 1000000\n sf['a'] = range(n)\n result = sf[sf['a'] == -1]\n self.assertEqual(len(result), 0)\n\n result = sf[sf['a'] > n - 123]\n self.assertEqual(len(result), 122)\n l = list(result['a'])\n for i in range(len(result)):\n self.assertEqual(i + n - 122, l[i])\n\n result = sf[sf['a'] < 2000]\n self.assertEqual(len(result), 2000)\n l = list(result['a'])\n for i in range(len(result)):\n self.assertEqual(i, l[i])\n\n # map input type\n toy_data = SFrame({'a': range(100)})\n map_result = map(lambda x: x+1, [1, 30])\n result = toy_data.filter_by(map_result, 'a')\n self.assertEqual(len(result), 2)\n self.assertEqual(result[0]['a'], 2)\n self.assertEqual(result[1]['a'], 31)\n\n\n def test_sample_split(self):\n sf = SFrame(data=self.__create_test_df(100))\n entry_list = set()\n for i in sf:\n entry_list.add(str(i))\n\n\n sample_sf = sf.sample(.12, 9)\n sample_sf2 = sf.sample(.12, 9)\n self.assertEqual(len(sample_sf), len(sample_sf2))\n assert_frame_equal(sample_sf.head().to_dataframe(), sample_sf2.head().to_dataframe())\n self.assertEqual(len(sf.sample(0.5,1,exact=True)), 50)\n self.assertEqual(len(sf.sample(0.5,2,exact=True)), 50)\n\n for i in sample_sf:\n self.assertTrue(str(i) in entry_list)\n\n with self.assertRaises(ValueError):\n sf.sample(3)\n\n sample_sf = SFrame().sample(.12, 9)\n self.assertEqual(len(sample_sf), 0)\n\n a_split = sf.random_split(.12, 9)\n\n first_split_entries = set()\n for i in a_split[0]:\n first_split_entries.add(str(i))\n\n for i in a_split[1]:\n self.assertTrue(str(i) in entry_list)\n self.assertTrue(str(i) not in first_split_entries)\n\n with self.assertRaises(ValueError):\n sf.random_split(3)\n\n self.assertEqual(len(SFrame().random_split(.4)[0]), 0)\n self.assertEqual(len(SFrame().random_split(.4)[1]), 0)\n\n self.assertEqual(len(sf.random_split(0.5,1,exact=True)[0]), 50)\n self.assertEqual(len(sf.random_split(0.5,2,exact=True)[0]), 50)\n\n # tests add_column, rename\n def test_edit_column_ops(self):\n sf = SFrame()\n\n # typical add column stuff\n sf.add_column(SArray(self.int_data), inplace=True)\n sf.add_column(SArray(self.float_data), inplace=True)\n sf.add_column(SArray(self.string_data), inplace=True)\n\n # Make sure auto names work\n names = sf.column_names()\n cntr = 1\n for i in names:\n self.assertEqual(\"X\"+str(cntr), i)\n cntr = cntr + 1\n\n # Remove a column\n del sf['X2']\n\n # names\n names = sf.column_names()\n self.assertEqual(len(names), 2)\n self.assertEqual('X1', names[0])\n self.assertEqual('X3', names[1])\n\n # check content\n self.assertEqual(list(sf['X1'].head(10)), self.int_data)\n self.assertEqual(list(sf['X3'].head(10)), self.string_data)\n\n # check that a new automatically named column will not conflict\n sf.add_column(SArray(self.string_data), inplace=True)\n\n names = sf.column_names()\n self.assertEqual(len(names), 3)\n uniq_set = set()\n for i in names:\n uniq_set.add(i)\n if len(uniq_set) == 1:\n self.assertEqual(list(sf[i].head(10)), self.int_data)\n else:\n self.assertEqual(list(sf[i].head(10)), self.string_data)\n self.assertEqual(len(uniq_set), 3)\n\n # replacing columns preserves order\n names = sf.column_names()\n for n in names:\n sf[n] = sf[n].apply(lambda x: x)\n self.assertEqual(sf.column_names(), names)\n\n # do it again!\n del sf['X1']\n\n sf.add_column(SArray(self.string_data), inplace=True)\n names = sf.column_names()\n self.assertEqual(len(names), 3)\n uniq_set = set()\n for i in names:\n uniq_set.add(i)\n self.assertEqual(list(sf[i].head(10)), self.string_data)\n self.assertEqual(len(uniq_set), len(names))\n\n # standard rename\n rename_dict = {'X3':'data','X3.1':'more_data','X3.2':'even_more'}\n sf.rename(rename_dict, inplace=True)\n self.assertEqual(sf.column_names(), ['data','more_data','even_more'])\n\n # rename a column to a name that's already taken\n with self.assertRaises(RuntimeError):\n sf.rename({'data':'more_data'}, inplace=True)\n\n # try to rename a column that doesn't exist\n with self.assertRaises(ValueError):\n sf.rename({'foo':'bar'}, inplace=True)\n\n # pass something other than a dict\n with self.assertRaises(TypeError):\n sf.rename('foo', inplace=True)\n\n # Setting a column to const preserves order\n names = sf.column_names()\n for n in names:\n sf[n] = 1\n self.assertEqual(sf.column_names(), names)\n\n def test_duplicate_add_column_failure(self):\n sf = SFrame()\n\n # typical add column stuff\n sf.add_column(SArray(self.int_data), \"hello\", inplace=True)\n with self.assertRaises(RuntimeError):\n sf.add_column(SArray(self.float_data), \"hello\", inplace=True)\n\n def test_remove_column(self):\n sf = SFrame()\n\n # typical add column stuff\n sf.add_column(SArray(self.int_data), inplace=True)\n sf.add_column(SArray(self.int_data), inplace=True)\n sf.add_column(SArray(self.int_data), inplace=True)\n sf.add_column(SArray(self.float_data), inplace=True)\n sf.add_column(SArray(self.string_data), inplace=True)\n\n self.assertEqual(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])\n\n sf2 = sf.remove_column('X3', inplace=True)\n\n assert sf is sf2\n\n self.assertEqual(sf.column_names(), ['X1', 'X2', 'X4', 'X5'])\n\n sf2 = sf.remove_columns(['X2', 'X5'], inplace=True)\n\n assert sf is sf2\n\n self.assertEqual(sf.column_names(), ['X1', 'X4'])\n\n # with a generator expression\n sf2 = sf.remove_columns((n for n in ['X1', 'X5'] if n in sf.column_names()), inplace=True)\n\n assert sf is sf2\n\n self.assertEqual(sf.column_names(), ['X4'])\n\n\n def test_remove_bad_column(self):\n sf = SFrame()\n\n # typical add column stuff\n sf.add_column(SArray(self.int_data), inplace=True)\n sf.add_column(SArray(self.int_data), inplace=True)\n sf.add_column(SArray(self.int_data), inplace=True)\n sf.add_column(SArray(self.float_data), inplace=True)\n sf.add_column(SArray(self.string_data), inplace=True)\n\n self.assertEqual(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])\n\n self.assertRaises(KeyError, lambda: sf.remove_column('bad', inplace=True))\n\n self.assertEqual(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])\n\n self.assertRaises(KeyError, lambda: sf.remove_columns(['X1', 'X2', 'X3', 'bad', 'X4'], inplace=True))\n\n self.assertEqual(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])\n\n\n def __generate_synthetic_sframe__(self, num_users):\n \"\"\"\n synthetic collaborative data.\n generate 1000 users, user i watched movie 0, ... i-1.\n rating(i, j) = i + j\n length(i, j) = i - j\n \"\"\"\n sf = SFrame()\n sparse_matrix = {}\n for i in range(1, num_users + 1):\n sparse_matrix[i] = [(j, i + j, i - j) for j in range(1, i + 1)]\n user_ids = []\n movie_ids = []\n ratings = []\n length_of_watching = []\n for u in sparse_matrix:\n user_ids += [u] * len(sparse_matrix[u])\n movie_ids += [x[0] for x in sparse_matrix[u]]\n ratings += [x[1] for x in sparse_matrix[u]]\n length_of_watching += [x[2] for x in sparse_matrix[u]]\n # typical add column stuff\n sf['user_id'] = (SArray(user_ids, int))\n sf['movie_id'] = (SArray(movie_ids, str))\n sf['rating'] = (SArray(ratings, float))\n sf['length'] = (SArray(length_of_watching, int))\n return sf\n\n def test_aggregate_ops(self):\n \"\"\"\n Test builtin groupby aggregators\n \"\"\"\n for m in [1, 10, 20, 50, 100]:\n values = range(m)\n vector_values = [[random.randint(1,100) for num in range(10)] \\\n for y in range(m)]\n nd_values = [np.array([float(random.randint(1,100)) for num in range(10)]).reshape(2,5) \\\n for y in range(m)]\n sf = SFrame()\n sf['key'] = [1] * m\n sf['value'] = values\n sf['vector_values'] = vector_values\n sf['nd_values'] = nd_values\n sf.materialize()\n built_ins = [aggregate.COUNT(), aggregate.SUM('value'),\n aggregate.AVG('value'), aggregate.MIN('value'),\n aggregate.MAX('value'), aggregate.VAR('value'),\n aggregate.STDV('value'), aggregate.SUM('vector_values'),\n aggregate.MEAN('vector_values'),\n aggregate.COUNT_DISTINCT('value'),\n aggregate.DISTINCT('value'),\n aggregate.FREQ_COUNT('value'),\n aggregate.SUM('nd_values'),\n aggregate.MEAN('nd_values')]\n sf2 = sf.groupby('key', built_ins)\n self.assertEqual(len(sf2), 1)\n self.assertEqual(sf2['Count'][0], m)\n self.assertEqual(sf2['Sum of value'][0], sum(values))\n self.assertAlmostEqual(sf2['Avg of value'][0], np.mean(values))\n self.assertEqual(sf2['Min of value'][0], min(values))\n self.assertEqual(sf2['Max of value'][0], max(values))\n self.assertAlmostEqual(sf2['Var of value'][0], np.var(values))\n self.assertAlmostEqual(sf2['Stdv of value'][0], np.std(values))\n np.testing.assert_almost_equal(list(sf2['Vector Sum of vector_values'][0]),\n list(np.sum(vector_values, axis=0)))\n np.testing.assert_almost_equal(list(sf2['Vector Avg of vector_values'][0]),\n list(np.mean(vector_values, axis=0)))\n np.testing.assert_almost_equal(list(sf2['Vector Sum of nd_values'][0]),\n list(np.sum(nd_values, axis=0)))\n np.testing.assert_almost_equal(list(sf2['Vector Avg of nd_values'][0]),\n list(np.mean(nd_values, axis=0)))\n self.assertEqual(sf2['Count Distinct of value'][0],\n len(np.unique(values)))\n self.assertEqual(sorted(sf2['Distinct of value'][0]),\n sorted(list(np.unique(values))))\n self.assertEqual(sf2['Frequency Count of value'][0],\n {k:1 for k in np.unique(values)})\n\n # For vectors\n\n\n def test_min_max_with_missing_values(self):\n \"\"\"\n Test builtin groupby aggregators\n \"\"\"\n sf = SFrame()\n sf['key'] = [1,1,1,1,1,1,2,2,2,2]\n sf['value'] = [1,None,None,None,None,None, None,None,None,None]\n built_ins = [aggregate.COUNT(), aggregate.SUM('value'),\n aggregate.AVG('value'), aggregate.MIN('value'),\n aggregate.MAX('value'), aggregate.VAR('value'),\n aggregate.STDV('value'), aggregate.COUNT_DISTINCT('value'),\n aggregate.DISTINCT('value'), aggregate.FREQ_COUNT('value')]\n sf2 = sf.groupby('key', built_ins).sort('key')\n self.assertEqual(list(sf2['Count']), [6,4])\n self.assertEqual(list(sf2['Sum of value']), [1, 0])\n self.assertEqual(list(sf2['Avg of value']), [1, None])\n self.assertEqual(list(sf2['Min of value']), [1, None])\n self.assertEqual(list(sf2['Max of value']), [1, None])\n self.assertEqual(list(sf2['Var of value']), [0, 0])\n self.assertEqual(list(sf2['Stdv of value']), [0, 0])\n self.assertEqual(list(sf2['Count Distinct of value']), [2, 1])\n self.assertEqual(set(sf2['Distinct of value'][0]), set([1, None]))\n self.assertEqual(set(sf2['Distinct of value'][1]), set([None]))\n self.assertEqual(sf2['Frequency Count of value'][0], {1:1, None:5})\n self.assertEqual(sf2['Frequency Count of value'][1], {None:4})\n\n\n def test_aggregate_ops_on_lazy_frame(self):\n \"\"\"\n Test builtin groupby aggregators\n \"\"\"\n for m in [1, 10, 20, 50, 100]:\n values = range(m)\n vector_values = [[random.randint(1,100) for num in range(10)] \\\n for y in range(m)]\n sf = SFrame()\n sf['key'] = [1] * m\n sf['value'] = values\n sf['vector_values'] = vector_values\n sf['value'] = sf['value'] + 0\n built_ins = [aggregate.COUNT(), aggregate.SUM('value'),\n aggregate.AVG('value'), aggregate.MIN('value'),\n aggregate.MAX('value'), aggregate.VAR('value'),\n aggregate.STDV('value'), aggregate.SUM('vector_values'),\n aggregate.MEAN('vector_values'),\n aggregate.COUNT_DISTINCT('value'),\n aggregate.DISTINCT('value')]\n sf2 = sf.groupby('key', built_ins)\n self.assertEqual(len(sf2), 1)\n self.assertEqual(sf2['Count'][0], m)\n self.assertEqual(sf2['Sum of value'][0], sum(values))\n self.assertAlmostEqual(sf2['Avg of value'][0], np.mean(values))\n self.assertEqual(sf2['Min of value'][0], min(values))\n self.assertEqual(sf2['Max of value'][0], max(values))\n self.assertAlmostEqual(sf2['Var of value'][0], np.var(values))\n self.assertAlmostEqual(sf2['Stdv of value'][0], np.std(values))\n np.testing.assert_almost_equal(list(sf2['Vector Sum of vector_values'][0]),\n list(np.sum(vector_values, axis=0)))\n np.testing.assert_almost_equal(list(sf2['Vector Avg of vector_values'][0]),\n list(np.mean(vector_values, axis=0)))\n self.assertEqual(sf2['Count Distinct of value'][0],\n len(np.unique(values)))\n self.assertEqual(sorted(sf2['Distinct of value'][0]),\n sorted(np.unique(values)))\n\n def test_aggregate_ops2(self):\n \"\"\"\n Test builtin groupby aggregators using explicit named columns\n \"\"\"\n for m in [1, 10, 20, 50, 100]:\n values = range(m)\n vector_values = [[random.randint(1,100) for num in range(10)] \\\n for y in range(m)]\n sf = SFrame()\n sf['key'] = [1] * m\n sf['value'] = values\n sf['vector_values'] = vector_values\n built_ins = {'count':aggregate.COUNT,\n 'sum':aggregate.SUM('value'),\n 'avg':aggregate.AVG('value'),\n 'avg2':aggregate.MEAN('value'),\n 'min':aggregate.MIN('value'),\n 'max':aggregate.MAX('value'),\n 'var':aggregate.VAR('value'),\n 'var2':aggregate.VARIANCE('value'),\n 'stdv':aggregate.STD('value'),\n 'stdv2':aggregate.STDV('value'),\n 'vector_sum': aggregate.SUM('vector_values'),\n 'vector_mean': aggregate.MEAN('vector_values'),\n 'count_unique':aggregate.COUNT_DISTINCT('value'),\n 'unique':aggregate.DISTINCT('value'),\n 'frequency':aggregate.FREQ_COUNT('value')}\n sf2 = sf.groupby('key', built_ins)\n self.assertEqual(len(sf2), 1)\n self.assertEqual(sf2['count'][0], m)\n self.assertEqual(sf2['sum'][0], sum(values))\n self.assertAlmostEqual(sf2['avg'][0], np.mean(values))\n self.assertAlmostEqual(sf2['avg2'][0], np.mean(values))\n self.assertEqual(sf2['min'][0], min(values))\n self.assertEqual(sf2['max'][0], max(values))\n self.assertAlmostEqual(sf2['var'][0], np.var(values))\n self.assertAlmostEqual(sf2['var2'][0], np.var(values))\n self.assertAlmostEqual(sf2['stdv'][0], np.std(values))\n self.assertAlmostEqual(sf2['stdv2'][0], np.std(values))\n np.testing.assert_almost_equal(sf2['vector_sum'][0], list(np.sum(vector_values, axis=0)))\n np.testing.assert_almost_equal(sf2['vector_mean'][0], list(np.mean(vector_values, axis=0)))\n self.assertEqual(sf2['count_unique'][0], len(np.unique(values)))\n self.assertEqual(sorted(sf2['unique'][0]),\n sorted(np.unique(values)))\n self.assertEqual(sf2['frequency'][0],\n {k:1 for k in np.unique(values)})\n\n def test_groupby(self):\n \"\"\"\n Test builtin groupby and aggregate on different column types\n \"\"\"\n num_users = 500\n sf = self.__generate_synthetic_sframe__(num_users=num_users)\n\n built_ins = [aggregate.COUNT(), aggregate.SUM('rating'),\n aggregate.AVG('rating'), aggregate.MIN('rating'),\n aggregate.MAX('rating'), aggregate.VAR('rating'),\n aggregate.STDV('rating')]\n\n built_in_names = ['Sum', 'Avg', 'Min', 'Max', 'Var', 'Stdv']\n\n \"\"\"\n Test groupby user_id and aggregate on rating\n \"\"\"\n sf_user_rating = sf.groupby('user_id', built_ins)\n actual = sf_user_rating.column_names()\n expected = ['%s of rating' % v for v in built_in_names] \\\n + ['user_id'] + ['Count']\n self.assertSetEqual(set(actual), set(expected))\n for row in sf_user_rating:\n uid = row['user_id']\n mids = range(1, uid + 1)\n ratings = [uid + i for i in mids]\n expected = [len(ratings), sum(ratings), np.mean(ratings),\n min(ratings), max(ratings), np.var(ratings),\n np.sqrt(np.var(ratings))]\n actual = [row['Count']] + [row['%s of rating' % op] \\\n for op in built_in_names]\n for i in range(len(actual)):\n self.assertAlmostEqual(actual[i], expected[i])\n\n \"\"\"\n Test that count can be applied on empty aggregate column.\n \"\"\"\n sf_user_rating = sf.groupby(\"user_id\", {'counter': aggregate.COUNT()})\n actual = {x['user_id']: x['counter'] for x in sf_user_rating}\n expected = {i: i for i in range(1, num_users + 1)}\n self.assertDictEqual(actual, expected)\n\n \"\"\"\n Test groupby movie_id and aggregate on length_of_watching\n \"\"\"\n built_ins = [aggregate.COUNT(), aggregate.SUM('length'),\n aggregate.AVG('length'), aggregate.MIN('length'),\n aggregate.MAX('length'), aggregate.VAR('length'),\n aggregate.STDV('length')]\n sf_movie_length = sf.groupby('movie_id', built_ins)\n actual = sf_movie_length.column_names()\n expected = ['%s of length' % v for v in built_in_names] \\\n + ['movie_id'] + ['Count']\n self.assertSetEqual(set(actual), set(expected))\n for row in sf_movie_length:\n mid = row['movie_id']\n uids = range(int(mid), num_users + 1)\n values = [i - int(mid) for i in uids]\n expected = [len(values), sum(values), np.mean(values), min(values),\n max(values), np.var(values), np.std(values)]\n actual = [row['Count']] + [row['%s of length' % op] \\\n for op in built_in_names]\n for i in range(len(actual)):\n self.assertAlmostEqual(actual[i], expected[i])\n\n def test_quantile_groupby(self):\n sf = self.__generate_synthetic_sframe__(num_users=500)\n # max and min rating for each user\n g = sf.groupby('user_id', [aggregate.MIN('rating'),\n aggregate.MAX('rating'),\n aggregate.QUANTILE('rating', 0, 1)])\n self.assertEqual(len(g), 500)\n for row in g:\n minrating = row['Min of rating']\n maxrating = row['Max of rating']\n arr = list(row['Quantiles of rating'])\n self.assertEqual(len(arr), 2)\n self.assertEqual(arr[0], minrating)\n self.assertEqual(arr[1], maxrating)\n\n def test_argmax_argmin_groupby(self):\n sf = self.__generate_synthetic_sframe__(num_users=500)\n sf_ret = sf.groupby('user_id',\n {'movie with max rating' : aggregate.ARGMAX('rating','movie_id'),\n 'movie with min rating' : aggregate.ARGMIN('rating','movie_id')})\n self.assertEqual(len(sf_ret), 500)\n self.assertEqual(sf_ret[\"movie with max rating\"].dtype, str)\n self.assertEqual(sf_ret[\"movie with min rating\"].dtype, str)\n self.assertEqual(sf_ret[\"user_id\"].dtype, int)\n # make sure we have computed correctly.\n max_d = {}\n min_d = {}\n for i in sf:\n key = i['user_id']\n if key not in max_d:\n max_d[key] = (i['movie_id'],i['rating'])\n min_d[key] = (i['movie_id'],i['rating'])\n else:\n if max_d[key][1] < i['rating']:\n max_d[key] = (i['movie_id'],i['rating'])\n if min_d[key][1] > i['rating']:\n min_d[key] = (i['movie_id'],i['rating'])\n for i in sf_ret:\n key = i['user_id']\n self.assertEqual(i[\"movie with max rating\"],max_d[key][0])\n self.assertEqual(i[\"movie with min rating\"],min_d[key][0])\n\n def test_multicolumn_groupby(self):\n sf = self.__generate_synthetic_sframe__(num_users=500)\n sf_um = sf.groupby([\"user_id\", \"movie_id\"], aggregate.COUNT)\n # I can query it\n t = sf_um.to_dataframe()\n self.assertEqual(sf_um[\"user_id\"].dtype, int)\n self.assertEqual(sf_um[\"movie_id\"].dtype, str)\n # make sure we have counted correctly\n d = {}\n for i in sf:\n key = str(i['user_id']) + \",\" + i[\"movie_id\"]\n if key not in d:\n d[key] = 0\n d[key] = d[key] + 1\n\n for i in sf_um:\n key = str(i['user_id']) + \",\" + i[\"movie_id\"]\n self.assertTrue(key in d)\n self.assertEqual(i['Count'], d[key])\n\n sf_um = sf.groupby([\"movie_id\", \"user_id\"], aggregate.COUNT())\n # I can query it\n t = sf_um.to_dataframe()\n self.assertEqual(sf_um[\"user_id\"].dtype, int)\n self.assertEqual(sf_um[\"movie_id\"].dtype, str)\n\n # make sure we have counted correctly\n d = {}\n for i in sf:\n key = str(i['user_id']) + \",\" + i[\"movie_id\"]\n if key not in d:\n d[key] = 0\n d[key] = d[key] + 1\n\n for i in sf_um:\n key = str(i['user_id']) + \",\" + i[\"movie_id\"]\n self.assertTrue(key in d)\n self.assertEqual(i['Count'], d[key])\n\n def __assert_concat_result_equal(self, result, expected, list_columns):\n self.assertEqual(result.num_columns(), expected.num_columns())\n for column in result.column_names():\n c1 = result[column]\n c2 = expected[column]\n self.assertEqual(c1.dtype, c2.dtype)\n self.assertEqual(len(c1), len(c2))\n if (column in list_columns):\n for i in range(len(c1)):\n if (c1[i] is None):\n self.assertTrue(c2[i] is None)\n continue\n if (c1.dtype == dict):\n for k in c1[i]:\n self.assertEqual(c2[i][k], c1[i][k])\n else:\n s1 = list(c1[i])\n if s1 is not None: s1.sort()\n s2 = list(c2[i])\n if s2 is not None: s2.sort()\n self.assertEqual(s1, s2)\n else:\n self.assertEqual(list(c1),list(c2))\n\n def test_groupby_dict_key(self):\n t = SFrame({'a':[{1:2},{3:4}]})\n with self.assertRaises(TypeError):\n t.groupby('a', {})\n\n def test_concat(self):\n sf = SFrame()\n sf['a'] = [1,1,1,1, 2,2,2, 3, 4,4, 5]\n sf['b'] = [1,2,1,2, 3,3,1, 4, None, 2, None]\n sf['c'] = ['a','b','a','b', 'e','e', None, 'h', 'i','j', 'k']\n sf['d'] = [1.0,2.0,1.0,2.0, 3.0,3.0,1.0, 4.0, None, 2.0, None]\n sf['e'] = [{'x': 1}] * len(sf['a'])\n\n print(sf['b'].dtype)\n\n result = sf.groupby('a', aggregate.CONCAT('b'))\n expected_result = SFrame({\n 'a': [1,2,3,4, 5],\n 'List of b': [[1.,1.,2.,2.],[1.,3.,3.],[4.],[2.], []]\n })\n expected_result['List of b'] = expected_result['List of b'].astype(list)\n self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['List of b'])\n\n\n result = sf.groupby('a', aggregate.CONCAT('d'))\n\n expected_result = SFrame({\n 'a': [1,2,3,4, 5],\n 'List of d': [[1,1,2,2],[1,3,3],[4],[2], []]\n })\n self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['List of d'])\n\n\n result = sf.groupby('a', {'c_c' :aggregate.CONCAT('c')})\n expected_result = SFrame({\n 'a': [1,2,3,4, 5],\n 'c_c': [['a','b','a','b'],['e','e'],['h'],['i','j'], ['k']]\n })\n\n self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['c_c'])\n\n result = sf.groupby('a', aggregate.CONCAT('b','c'))\n expected_result = SFrame({\n 'a': [1,2,3,4,5],\n 'Dict of b_c': [{1:'a',2:'b'},{3:'e', 1: None},{4:'h'},{2:'j'}, {}]\n })\n\n self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['Dict of b_c'])\n\n result = sf.groupby('a', {'c_b':aggregate.CONCAT('c','b')})\n expected_result = SFrame({\n 'a': [1,2,3,4,5],\n 'c_b': [{'a':1, 'b':2},{'e':3},{'h':4},{'i':None, 'j':2},{'k':None}]\n })\n\n self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['c_b'])\n\n result = sf.groupby('a', {'cs':aggregate.CONCAT('c'), 'bs':aggregate.CONCAT('b')})\n expected_result = SFrame({\n 'a': [1,2,3,4,5],\n 'bs': [[1,1,2,2],[1,3,3],[4],[2], []],\n 'cs': [['a','b','a','b'],['e','e'],['h'],['i','j'], ['k']]\n })\n expected_result['bs'] = expected_result['bs'].astype(list)\n self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['bs','cs'])\n\n #exception fail if there is not column\n with self.assertRaises(TypeError):\n sf.groupby('a', aggregate.CONCAT())\n\n with self.assertRaises(KeyError):\n sf.groupby('a', aggregate.CONCAT('nonexist'))\n\n with self.assertRaises(TypeError):\n sf.groupby('a', aggregate.CONCAT('e', 'a'))\n\n def test_select_one(self):\n sf = SFrame({'a':[1,1,2,2,3,3,4,4,5,5],'b':[1,2,3,4,5,6,7,8,9,10]})\n res = list(sf.groupby('a', {'b':aggregate.SELECT_ONE('b')}))\n self.assertEqual(len(res), 5)\n for i in res:\n self.assertTrue(i['b'] == 2 * i['a'] or i['b'] == 2 * i['a'] - 1)\n\n def test_unique(self):\n sf = SFrame({'a':[1,1,2,2,3,3,4,4,5,5],'b':[1,2,3,4,5,6,7,8,9,10]})\n self.assertEqual(len(sf.unique()), 10)\n\n vals = [1,1,2,2,3,3,4,4, None, None]\n sf = SFrame({'a':vals,'b':vals})\n res = sf.unique()\n self.assertEqual(len(res), 5)\n self.assertEqual(set(res['a']), set([1,2,3,4,None]))\n self.assertEqual(set(res['b']), set([1,2,3,4,None]))\n\n def test_append_empty(self):\n sf_with_data = SFrame(data=self.dataframe)\n empty_sf = SFrame()\n self.assertFalse(sf_with_data.append(empty_sf) is sf_with_data)\n self.assertFalse(empty_sf.append(sf_with_data) is sf_with_data)\n self.assertFalse(empty_sf.append(empty_sf) is empty_sf)\n\n def test_append_all_match(self):\n sf1 = SFrame(data=self.dataframe)\n sf2 = SFrame(data=self.dataframe2)\n\n new_sf = sf1.append(sf2)\n assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())\n\n def test_append_lazy(self):\n sf1 = SFrame(data=self.dataframe)\n sf2 = SFrame(data=self.dataframe2)\n\n new_sf = sf1.append(sf2)\n self.assertTrue(new_sf.__is_materialized__())\n\n filter_sf1 = SArray([1 for i in range(sf1.num_rows())] + [0 for i in range(sf2.num_rows())])\n filter_sf2 = SArray([0 for i in range(sf1.num_rows())] + [1 for i in range(sf2.num_rows())])\n new_sf1 = new_sf[filter_sf1]\n new_sf2 = new_sf[filter_sf2]\n assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())\n assert_frame_equal(sf1.to_dataframe(), new_sf1.to_dataframe())\n assert_frame_equal(sf2.to_dataframe(), new_sf2.to_dataframe())\n\n row = sf1.head(1)\n sf = SFrame()\n for i in range(10):\n sf = sf.append(row)\n df = sf.to_dataframe()\n for i in range(10):\n self.assertEqual(list(df.iloc[[i]]), list(sf.head(1).to_dataframe().iloc[[0]]))\n\n def test_recursive_append(self):\n sf = SFrame()\n for i in range(200):\n sf = sf.append(SFrame(data = self.dataframe))\n\n #consume\n sf.materialize()\n\n def test_print_sframe(self):\n sf = SFrame()\n\n def _test_print():\n sf.__repr__()\n sf._repr_html_()\n try:\n from StringIO import StringIO\n except ImportError:\n from io import StringIO\n output = StringIO()\n sf.print_rows(output_file=output)\n\n n = 20\n sf['int'] = [i for i in range(n)]\n sf['float'] = [float(i) for i in range(n)]\n sf['str'] = [str(i) for i in range(n)]\n uc = '\\xe5\\xa4\\xa7\\xe5\\xa4\\xb4'\n sf['unicode'] = [uc for i in range(n)]\n sf['array'] = [array.array('d', [i]) for i in range(n)]\n sf['list'] = [[i, float(i), [i]] for i in range(n)]\n utc = dt.datetime.strptime('2011-01-21 02:37:21', '%Y-%m-%d %H:%M:%S')\n sf['dt'] = [utc for i in range(n)]\n sf['img'] = [Image() for i in range(n)]\n sf['long_str'] = [\"\".join([str(i)] * 50) for i in range(n)]\n sf['long_unicode'] = [\"\".join([uc] * 50) for i in range(n)]\n sf['bad_unicode'] = ['\\x9d' + uc for i in range(n)]\n _test_print()\n\n def test_print_lazy_sframe(self):\n sf1 = SFrame(data=self.dataframe)\n self.assertTrue(sf1.__is_materialized__())\n sf2 = sf1[sf1['int_data'] > 3]\n sf2.__repr__()\n sf2.__str__()\n self.assertFalse(sf2.__is_materialized__())\n len(sf2)\n self.assertTrue(sf2.__is_materialized__())\n\n def test_append_order_diff(self):\n # name match but column order not match\n sf1 = SFrame(data=self.dataframe)\n sf2 = SFrame(data=self.dataframe2)\n sf2.swap_columns('int_data', 'string_data', inplace=True)\n\n new_sf = sf1.append(sf2)\n assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())\n\n def test_append_empty_sframe(self):\n sf = SFrame(data=self.dataframe)\n other = SFrame()\n\n # non empty append empty\n assert_frame_equal(sf.append(other).to_dataframe(), self.dataframe)\n\n # empty append non empty\n assert_frame_equal(other.append(sf).to_dataframe(), self.dataframe)\n\n #empty append empty\n assert_frame_equal(other.append(other).to_dataframe(), pd.DataFrame())\n\n def test_append_exception(self):\n sf = SFrame(data=self.dataframe)\n\n # column number not match\n other = SFrame()\n other.add_column(SArray(), \"test\", inplace=True)\n self.assertRaises(RuntimeError, lambda: sf.append(other)) # column not the same\n\n # column name not match\n other = SFrame()\n names = sf.column_names()\n for name in sf.column_names():\n other.add_column(SArray(), name, inplace=True)\n names[0] = 'some name not match'\n self.assertRaises(RuntimeError, lambda: sf.append(other))\n\n # name match but column type order not match\n sf1 = SFrame(data=self.dataframe)\n sf2 = SFrame(data=self.dataframe2)\n\n #change one column type\n sf1[\"int_data\"] = sf2.select_column(\"int_data\").astype(float)\n self.assertRaises(RuntimeError, lambda: sf.append(other))\n\n def test_simple_joins(self):\n inner_expected = SFrame()\n inner_expected.add_column(SArray(['Robinson','Jones','Smith','Heisenberg','Rafferty']), 'last_name', inplace=True)\n inner_expected.add_column(SArray([34,33,34,33,31]), 'dep_id', inplace=True)\n inner_expected.add_column(SArray(['Clerical','Engineering','Clerical','Engineering','Sales']), 'dep_name', inplace=True)\n\n # Tests the \"natural join\" case\n beg = time.time()\n res = self.employees_sf.join(self.departments_sf)\n end = time.time()\n print(\"Really small join: \" + str(end-beg) + \" s\")\n\n self.__assert_join_results_equal(res, inner_expected)\n\n left_join_row = SFrame()\n left_join_row.add_column(SArray(['John']), 'last_name', inplace=True)\n left_join_row.add_column(SArray([None], int), 'dep_id', inplace=True)\n left_join_row.add_column(SArray([None], str), 'dep_name', inplace=True)\n\n left_expected = inner_expected.append(left_join_row)\n\n # Left outer join, passing string to 'on'\n res = self.employees_sf.join(self.departments_sf, how='left', on='dep_id')\n self.__assert_join_results_equal(res, left_expected)\n\n right_join_row = SFrame()\n right_join_row.add_column(SArray([None], str), 'last_name', inplace=True)\n right_join_row.add_column(SArray([35]), 'dep_id', inplace=True)\n right_join_row.add_column(SArray(['Marketing']), 'dep_name', inplace=True)\n\n right_expected = inner_expected.append(right_join_row)\n\n # Right outer join, passing list to 'on'\n res = self.employees_sf.join(self.departments_sf, how='right', on=['dep_id'])\n self.__assert_join_results_equal(res, right_expected)\n\n outer_expected = left_expected.append(right_join_row)\n\n # Full outer join, passing dict to 'on'\n res = self.employees_sf.join(self.departments_sf, how='outer', on={'dep_id':'dep_id'})\n self.__assert_join_results_equal(res, outer_expected)\n\n # Test a join on non-matching key\n res = self.employees_sf.join(self.departments_sf, on={'last_name':'dep_name'})\n self.assertEqual(res.num_rows(), 0)\n self.assertEqual(res.num_columns(), 3)\n self.assertEqual(res.column_names(), ['last_name', 'dep_id', 'dep_id.1'])\n\n # Test a join on a non-unique key\n bad_departments = SFrame()\n bad_departments['dep_id'] = SArray([33,33,31,31])\n bad_departments['dep_name'] = self.departments_sf['dep_name']\n\n no_pk_expected = SFrame()\n no_pk_expected['last_name'] = SArray(['Rafferty','Rafferty','Heisenberg','Jones','Heisenberg','Jones'])\n no_pk_expected['dep_id'] = SArray([31,31,33,33,33,33])\n no_pk_expected['dep_name'] = SArray(['Clerical','Marketing','Sales','Sales','Engineering','Engineering'])\n res = self.employees_sf.join(bad_departments, on='dep_id')\n self.__assert_join_results_equal(res, no_pk_expected)\n\n # Left join on non-unique key\n bad_departments = bad_departments.append(right_join_row[['dep_id', 'dep_name']])\n bad_departments = bad_departments.append(right_join_row[['dep_id', 'dep_name']])\n no_pk_expected = no_pk_expected.append(right_join_row)\n no_pk_expected = no_pk_expected.append(right_join_row)\n no_pk_expected = no_pk_expected[['dep_id', 'dep_name', 'last_name']]\n res = bad_departments.join(self.employees_sf, on='dep_id', how='left')\n self.__assert_join_results_equal(res, no_pk_expected)\n\n def test_simple_joins_with_customized_name(self):\n # redundant name conflict resolution\n with self.assertRaises(KeyError):\n self.employees_sf.join(self.departments_sf, alter_name={'non_existing_name': 'random_name'})\n\n with self.assertRaises(KeyError):\n self.employees_sf.join(self.departments_sf, alter_name={'dep_id': 'random_name'})\n\n with self.assertRaises(ValueError):\n self.employees_sf.join(self.departments_sf, alter_name={'dep_name': 'last_name'})\n\n # nothing should happen\n # Tests the \"natural join\" case\n inner_expected = SFrame()\n inner_expected.add_column(SArray(['Robinson','Jones','Smith','Heisenberg','Rafferty']), 'last_name', inplace=True)\n inner_expected.add_column(SArray([34,33,34,33,31]), 'dep_id', inplace=True)\n inner_expected.add_column(SArray(['Marketing','Engineering','Cooking','Clerical','Sales']), 'dep_name', inplace=True)\n\n # add extra column for employee table\n employees_sf_extra = self.employees_sf.add_column(SArray(\n ['Sales', 'Engineering', 'Clerical', 'Marketing', 'Cooking', 'Basketball']), 'dep_name');\n\n res = employees_sf_extra.join(self.departments_sf, on='dep_id')\n inner_expected_tmp = inner_expected.add_column(SArray(['Clerical', 'Engineering', 'Clerical', 'Engineering', 'Sales']), 'dep_name.1')\n self.__assert_join_results_equal(res, inner_expected_tmp)\n\n inner_expected_tmp = inner_expected.add_column(SArray(['Clerical', 'Engineering', 'Clerical', 'Engineering', 'Sales']), 'X')\n res = employees_sf_extra.join(self.departments_sf, on='dep_id', alter_name={'dep_name': 'X'})\n self.__assert_join_results_equal(res, inner_expected_tmp)\n\n ###### A simple and navive test start ######\n employees_ = SFrame()\n employees_.add_column(SArray(['A','B','C','D']), 'last_name', inplace=True)\n employees_.add_column(SArray([31,32,33,None]), 'dep_id', inplace=True)\n employees_.add_column(SArray([1,2,3,4]), 'org_id', inplace=True)\n employees_.add_column(SArray([1,2,3,4]), 'bed_id', inplace=True)\n\n departments_ = SFrame()\n departments_.add_column(SArray([31,33,34]), 'dep_id', inplace=True)\n departments_.add_column(SArray(['A','C','F']), 'last_name', inplace=True)\n departments_.add_column(SArray(['Sales','Engineering', None]), 'dep_name', inplace=True)\n # intentionally dup at the second last\n departments_.add_column(SArray([1,3,5]), 'bed_id', inplace=True)\n departments_.add_column(SArray([1,3,None]), 'car_id', inplace=True)\n\n join_keys_ = ['dep_id', 'last_name']\n\n ## left\n expected_ = SFrame()\n expected_.add_column(SArray(['A','B','C','D']), 'last_name', inplace=True)\n expected_.add_column(SArray([31,32,33,None]), 'dep_id', inplace=True)\n expected_.add_column(SArray([1,2,3,4]), 'org_id', inplace=True)\n expected_.add_column(SArray([1,2,3,4]), 'bed_id', inplace=True)\n expected_.add_column(SArray(['Sales', None, 'Engineering', None]), 'dep_name', inplace=True)\n expected_.add_column(SArray([1,None,3,None]), 'bed_id.1', inplace=True)\n expected_.add_column(SArray([1,None,3,None]), 'car_id', inplace=True)\n\n res = employees_.join(departments_, on=join_keys_, how='left')\n self.__assert_join_results_equal(res, expected_)\n\n expected_ = SFrame()\n expected_.add_column(SArray(['A','B','C','D']), 'last_name', inplace=True)\n expected_.add_column(SArray([31,32,33,None]), 'dep_id', inplace=True)\n expected_.add_column(SArray([1,2,3,4]), 'org_id', inplace=True)\n expected_.add_column(SArray([1,2,3,4]), 'bed_id', inplace=True)\n expected_.add_column(SArray(['Sales', None, 'Engineering', None]), 'dep_name', inplace=True)\n expected_.add_column(SArray([1,None,3,None]), 'Y', inplace=True)\n expected_.add_column(SArray([1,None,3,None]), 'car_id', inplace=True)\n\n res = employees_.join(departments_, on=join_keys_, how='left', alter_name={'car_id': 'X', 'bed_id': 'Y'})\n self.__assert_join_results_equal(res, expected_)\n\n ## left size is smaller than right\n expected_ = SFrame()\n expected_.add_column(SArray([31,33,34]), 'dep_id', inplace=True)\n expected_.add_column(SArray(['A','C','F']), 'last_name', inplace=True)\n expected_.add_column(SArray(['Sales','Engineering', None]), 'dep_name', inplace=True)\n expected_.add_column(SArray([1,3,5]), 'bed_id', inplace=True)\n expected_.add_column(SArray([1,3,None]), 'car_id', inplace=True)\n expected_.add_column(SArray([1,3,None]), 'org_id', inplace=True)\n expected_.add_column(SArray([1,3,None]), 'Y', inplace=True)\n\n res = departments_.join(employees_, on=join_keys_, how='left', alter_name={'bed_id': 'Y'})\n self.__assert_join_results_equal(res, expected_)\n\n ## right\n expected_ = SFrame()\n expected_.add_column(SArray(['A','C','F']), 'last_name', inplace=True)\n expected_.add_column(SArray([31,33,34]), 'dep_id', inplace=True)\n expected_.add_column(SArray([1,3,None]), 'org_id', inplace=True)\n expected_.add_column(SArray([1,3,None]), 'bed_id', inplace=True)\n expected_.add_column(SArray(['Sales','Engineering', None]), 'dep_name', inplace=True)\n expected_.add_column(SArray([1,3,5]), 'Y', inplace=True)\n expected_.add_column(SArray([1,3,None]), 'car_id', inplace=True)\n\n res = employees_.join(departments_, on=join_keys_, how='right', alter_name={'car_id': 'X', 'bed_id': 'Y'})\n self.__assert_join_results_equal(res, expected_)\n\n ## outer\n expected_ = SFrame()\n expected_.add_column(SArray(['A','B','C','D','F']), 'last_name', inplace=True)\n expected_.add_column(SArray([31,32,33,None,34]), 'dep_id', inplace=True)\n expected_.add_column(SArray([1,2,3,4,None]), 'org_id', inplace=True)\n expected_.add_column(SArray([1,2,3,4,None]), 'bed_id', inplace=True)\n expected_.add_column(SArray(['Sales', None, 'Engineering', None, None]), 'dep_name', inplace=True)\n expected_.add_column(SArray([1,None,3,None,5]), 'Y', inplace=True)\n expected_.add_column(SArray([1,None,3,None,None]), 'car_id', inplace=True)\n\n res = employees_.join(departments_, on=join_keys_, how='outer', alter_name={'car_id': 'X', 'bed_id': 'Y'})\n self.__assert_join_results_equal(res, expected_)\n\n ## error cases\n with self.assertRaises(KeyError):\n res = employees_.join(departments_, on=join_keys_, how='right', alter_name={\n 'some_id': 'car_id', 'bed_id': 'Y'})\n\n with self.assertRaises(ValueError):\n res = employees_.join(departments_, on=join_keys_, how='right', alter_name={\n 'car_id': 'car_id', 'bed_id': 'car_id'})\n\n ## resolution order is not independent\n with self.assertRaises(ValueError):\n res = employees_.join(departments_, on=join_keys_, how='right', alter_name={\n 'car_id': 'X', 'bed_id': 'car_id'})\n\n with self.assertRaises(ValueError):\n res = employees_.join(departments_, on=join_keys_, how='right', alter_name={\n 'car_id': 'bed_id', 'bed_id': 'car_id'})\n\n ## duplicate values\n with self.assertRaises(RuntimeError):\n res = employees_.join(departments_, on=join_keys_, how='right', alter_name={\n 'car_id': 'X', 'bed_id': 'X'})\n\n def test_big_composite_join(self):\n # Create a semi large SFrame with composite primary key (letter, number)\n letter_keys = []\n number_keys = []\n data = []\n for i in string.ascii_lowercase:\n for j in range(0,100):\n letter_keys.append(i)\n number_keys.append(j)\n which = j % 3\n if which == 0:\n data.append(string.ascii_uppercase)\n elif which == 1:\n data.append(string.digits)\n elif which == 2:\n data.append(string.hexdigits)\n pk_gibberish = SFrame()\n pk_gibberish['letter'] = SArray(letter_keys, str)\n pk_gibberish['number'] = SArray(number_keys, int)\n pk_gibberish['data'] = SArray(data, str)\n\n # Some rows that won't match\n more_data = []\n more_letter_keys = []\n more_number_keys = []\n for i in range(0,40000):\n more_data.append('fish')\n more_letter_keys.append('A')\n more_number_keys.append(200)\n for i in range(0,80):\n for j in range(100,1000):\n more_data.append('waffles')\n more_letter_keys.append(letter_keys[j])\n more_number_keys.append(number_keys[j])\n # Non-matching row in this stretch\n if j == 147:\n more_letter_keys[-1] = 'A'\n for i in range(0,5000):\n more_data.append('pizza')\n more_letter_keys.append('Z')\n more_number_keys.append(400)\n\n join_with_gibberish = SFrame()\n join_with_gibberish['data'] = SArray(more_data, str)\n join_with_gibberish['moredata'] = SArray(more_data, str)\n join_with_gibberish['a_number'] = SArray(more_number_keys, int)\n join_with_gibberish['a_letter'] = SArray(more_letter_keys, str)\n\n expected_answer = SFrame()\n exp_letter = []\n exp_number = []\n exp_data = []\n for i in range(0,80):\n exp_letter.extend(letter_keys[100:147])\n exp_number.extend(number_keys[100:147])\n exp_letter.extend(letter_keys[148:1000])\n exp_number.extend(number_keys[148:1000])\n exp_data.extend(data[100:147])\n exp_data.extend(data[148:1000])\n expected_answer['letter'] = SArray(exp_letter, str)\n expected_answer['number'] = SArray(exp_number, int)\n expected_answer['data'] = SArray(exp_data, str)\n expected_answer['data.1'] = 'waffles'\n expected_answer['moredata'] = 'waffles'\n\n beg = time.time()\n res = pk_gibberish.join(join_with_gibberish, on={'letter':'a_letter','number':'a_number'})\n end = time.time()\n print(\"Join took \" + str(end-beg) + \" seconds\")\n self.__assert_join_results_equal(res, expected_answer)\n\n def test_convert_dataframe_empty(self):\n sf = SFrame()\n sf['a'] = SArray([], int)\n df = sf.to_dataframe()\n self.assertEqual(df['a'].dtype, int)\n sf1 = SFrame(df)\n self.assertEqual(sf1['a'].dtype, int)\n self.assertEqual(sf1.num_rows(), 0)\n\n def test_replace_one_column(self):\n sf = SFrame()\n sf['a'] = [1,2,3]\n self.assertEqual(list(sf['a']), [1,2,3])\n\n # this should succeed as we are replacing a new column\n sf['a'] = [1,2]\n self.assertEqual(list(sf['a']), [1,2])\n\n # failed to add new column should revert original sframe\n with self.assertRaises(TypeError):\n sf['a'] = [1,2,'a']\n\n self.assertEqual(list(sf['a']), [1,2])\n\n # add a column with different length should fail if there are more than one column\n sf = SFrame()\n sf['a'] = [1,2,3]\n sf['b'] = ['a', 'b', 'c']\n with self.assertRaises(RuntimeError):\n sf['a'] = [1,2]\n\n def test_filter_by(self):\n # Set up SFrame to filter by\n sf = SFrame()\n sf.add_column(SArray(self.int_data), \"ints\", inplace=True)\n sf.add_column(SArray(self.float_data), \"floats\", inplace=True)\n sf.add_column(SArray(self.string_data), \"strings\", inplace=True)\n\n # filter by None should take no effect on data set without missing val\n res = sf.filter_by(None, \"ints\", exclude=True)\n self.__assert_join_results_equal(res, sf)\n res = sf.filter_by(None, \"floats\", exclude=True)\n self.__assert_join_results_equal(res, sf)\n res = sf.filter_by(None, \"strings\", exclude=True)\n self.__assert_join_results_equal(res, sf)\n\n res = sf.filter_by(None, \"ints\")\n self.assertEqual(len(res), 0)\n res = sf.filter_by(None, \"strings\")\n self.assertEqual(len(res), 0)\n\n # private use only\n def __build_data_list_with_none(data_lst):\n data_lst.insert(len(data_lst) // 2, None)\n data_lst.insert(0, None)\n data_lst.append(None)\n return data_lst\n\n sf_none = SFrame()\n sf_none.add_column(SArray(__build_data_list_with_none(self.int_data[:])), \"ints\", inplace=True)\n sf_none.add_column(SArray(__build_data_list_with_none(self.float_data[:])), \"floats\", inplace=True)\n sf_none.add_column(SArray(__build_data_list_with_none(self.string_data[:])), \"strings\", inplace=True)\n\n res = sf_none.filter_by(None, \"ints\")\n self.assertEqual(len(res), 3)\n res = sf_none.filter_by(None, \"ints\", exclude=True)\n self.__assert_join_results_equal(res, sf)\n\n res = sf_none.filter_by(None, \"floats\")\n self.assertEqual(len(res), 3)\n res = sf_none.filter_by(None, \"floats\", exclude=True)\n self.__assert_join_results_equal(res, sf)\n\n res = sf_none.filter_by(None, \"strings\")\n self.assertEqual(len(res), 3)\n res = sf_none.filter_by(None, \"strings\", exclude=True)\n self.__assert_join_results_equal(res, sf)\n\n # by generator, filter, map, range\n res = sf.filter_by(range(10), \"ints\")\n self.assertEqual(len(res), 9)\n self.assertEqual(res[\"ints\"][0], 1)\n res = sf.filter_by(range(10), \"ints\", exclude=True)\n self.assertEqual(len(res), 1)\n self.assertEqual(res[\"ints\"][0], 10)\n\n res = sf.filter_by(map(lambda x : x - 5., self.float_data), \"floats\")\n self.assertEqual(len(res), 5)\n self.assertEqual(res[\"floats\"][0], self.float_data[0])\n res = sf.filter_by(map(lambda x : x - 5., self.float_data), \"floats\", exclude=True)\n self.assertEqual(len(res), 5)\n self.assertEqual(res[\"floats\"][0], self.float_data[5])\n\n res = sf.filter_by(filter(lambda x : len(x) > 1, self.string_data), \"strings\")\n self.assertEqual(len(res), 1)\n self.assertEqual(res[\"strings\"][0], self.string_data[-1])\n res = sf.filter_by(filter(lambda x : len(x) > 1, self.string_data), \"strings\", exclude=True)\n self.assertEqual(len(res), 9)\n self.assertEqual(res[\"strings\"][0], self.string_data[0])\n\n # Normal cases\n res = sf.filter_by(SArray(self.int_data), \"ints\")\n self.__assert_join_results_equal(res, sf)\n res = sf.filter_by(SArray(self.int_data), \"ints\", exclude=True)\n self.assertEqual(list(res), [])\n\n res = sf.filter_by([5,6], \"ints\")\n exp = SFrame()\n exp.add_column(SArray(self.int_data[4:6]), \"ints\", inplace=True)\n exp.add_column(SArray(self.float_data[4:6]), \"floats\", inplace=True)\n exp.add_column(SArray(self.string_data[4:6]), \"strings\", inplace=True)\n self.__assert_join_results_equal(res, exp)\n exp_opposite = SFrame()\n exp_opposite.add_column(SArray(self.int_data[:4]+self.int_data[6:]), \"ints\", inplace=True)\n exp_opposite.add_column(SArray(self.float_data[:4]+self.float_data[6:]), \"floats\", inplace=True)\n exp_opposite.add_column(SArray(self.string_data[:4]+self.string_data[6:]), \"strings\", inplace=True)\n res = sf.filter_by([5,6], \"ints\", exclude=True)\n self.__assert_join_results_equal(res, exp_opposite)\n\n exp_one = SFrame()\n exp_one.add_column(SArray(self.int_data[4:5]), \"ints\", inplace=True)\n exp_one.add_column(SArray(self.float_data[4:5]), \"floats\", inplace=True)\n exp_one.add_column(SArray(self.string_data[4:5]), \"strings\", inplace=True)\n exp_all_but_one = SFrame()\n exp_all_but_one.add_column(SArray(self.int_data[:4]+self.int_data[5:]), \"ints\", inplace=True)\n exp_all_but_one.add_column(SArray(self.float_data[:4]+self.float_data[5:]), \"floats\", inplace=True)\n exp_all_but_one.add_column(SArray(self.string_data[:4]+self.string_data[5:]), \"strings\", inplace=True)\n\n res = sf.filter_by(5, \"ints\")\n self.__assert_join_results_equal(res, exp_one)\n res = sf.filter_by(5, \"ints\", exclude=True)\n self.__assert_join_results_equal(res, exp_all_but_one)\n\n res = sf.filter_by(\"5\", \"strings\")\n self.__assert_join_results_equal(res, exp_one)\n res = sf.filter_by(5, \"ints\", exclude=True)\n self.__assert_join_results_equal(res, exp_all_but_one)\n\n # Only missing values\n res = sf.filter_by([77,77,88,88], \"ints\")\n # Test against empty SFrame with correct columns/types\n self.__assert_join_results_equal(res, exp_one[exp_one['ints'] == 9000])\n res = sf.filter_by([77,77,88,88], \"ints\", exclude=True)\n self.__assert_join_results_equal(res, sf)\n\n\n # Duplicate values\n res = sf.filter_by([6,6,5,5,6,5,5,6,5,5,5], \"ints\")\n self.__assert_join_results_equal(res, exp)\n res = sf.filter_by([6,6,5,5,6,5,5,6,5,5,5], \"ints\", exclude=True)\n self.__assert_join_results_equal(res, exp_opposite)\n\n # Duplicate and missing\n res = sf.filter_by([11,12,46,6,6,55,5,5], \"ints\")\n self.__assert_join_results_equal(res, exp)\n res = sf.filter_by([11,12,46,6,6,55,5,5], \"ints\", exclude=True)\n self.__assert_join_results_equal(res, exp_opposite)\n\n\n # Type mismatch\n with self.assertRaises(TypeError):\n res = sf.filter_by([\"hi\"], \"ints\")\n\n # Column doesn't exist\n with self.assertRaises(KeyError):\n res = sf.filter_by([1,2], \"intssss\")\n\n # Something that can't be turned into an SArray\n with self.assertRaises(Exception):\n res = sf.filter_by({1:2,3:4}, \"ints\")\n\n # column_name not given as string\n with self.assertRaises(TypeError):\n res = sf.filter_by(1,2)\n\n # Duplicate column names after join. Should be last because of the\n # renames.\n sf.rename({'ints':'id','floats':'id1','strings':'id11'}, inplace=True)\n exp.rename({'ints':'id','floats':'id1','strings':'id11'}, inplace=True)\n exp_opposite.rename({'ints':'id','floats':'id1','strings':'id11'}, inplace=True)\n res = sf.filter_by([5,6], \"id\")\n self.__assert_join_results_equal(res, exp)\n res = sf.filter_by([5,6], \"id\", exclude=True)\n self.__assert_join_results_equal(res, exp_opposite)\n\n # XXXXXX: should be inner function\n def __test_to_from_dataframe(self, data, type):\n sf = SFrame()\n sf['a'] = data\n df = sf.to_dataframe()\n sf1 = SFrame(df)\n self.assertTrue(sf1.dtype[0]== type)\n\n df = pd.DataFrame({'val': data})\n sf1 = SFrame(df)\n self.assertTrue(sf1.dtype[0]== type)\n\n def test_to_from_dataframe(self):\n self.__test_to_from_dataframe([1,2,3], int)\n self.__test_to_from_dataframe(['a', 'b', 'c'], str)\n self.__test_to_from_dataframe([1.0, 2.0, 3.0], float)\n self.__test_to_from_dataframe([[1, 'b', {'a': 1}], [1,2,3]], list)\n self.__test_to_from_dataframe([{'a':1, 1:None}, {'b':2}], dict)\n self.__test_to_from_dataframe([[1,2],[1,2],[]], array.array)\n\n def test_pack_columns_exception(self):\n sf = SFrame()\n sf['a'] = [1, 2, 3, None, None]\n sf['b'] = [None, '2', '3', None, '5']\n sf['c'] = [None, 2.0, 3.0, None, 5.0]\n\n # cannot pack non array value into array\n with self.assertRaises(TypeError):\n sf.pack_columns(dtype=array.array)\n\n # cannot given non numeric na vlaue to array\n with self.assertRaises(ValueError):\n sf.pack_columns(dtype=array.array, fill_na='c')\n\n # cannot pack non exist columns\n with self.assertRaises(ValueError):\n sf.pack_columns(['d','a'])\n\n # dtype has to be dict/array/list\n with self.assertRaises(ValueError):\n sf.pack_columns(dtype=str)\n\n # pack duplicate columns\n with self.assertRaises(ValueError):\n sf.pack_columns(['a','a'])\n\n # pack partial columns to array, should fail if for columns that are not numeric\n with self.assertRaises(TypeError):\n sf.pack_columns(['a','b'], dtype=array.array)\n\n with self.assertRaises(TypeError):\n sf.pack_columns(column_name_prefix = 1)\n\n with self.assertRaises(ValueError):\n sf.pack_columns(column_name_prefix = '1')\n\n with self.assertRaises(ValueError):\n sf.pack_columns(column_name_prefix = 'c', column_names=['a', 'b'])\n\n def test_pack_columns2(self):\n sf = SFrame()\n sf['id'] = [1, 2, 3, 4]\n sf['category.a'] = [None, '2', '3', None]\n sf['category.b'] = [None, 2.0, None, 4.0]\n\n expected = SArray([\n [None, None],\n ['2', 2.0],\n ['3', None],\n [None, 4.0]])\n result = sf.pack_columns(column_name_prefix='category')\n self.assertEqual(result.column_names(), ['id', 'category'])\n self.__assert_sarray_equal(result['id'], sf['id'])\n self.__assert_sarray_equal(result['category'], expected)\n\n result = sf.pack_columns(column_name_prefix='category', new_column_name=\"new name\")\n self.assertEqual(result.column_names(), ['id', 'new name'])\n self.__assert_sarray_equal(result['id'], sf['id'])\n self.__assert_sarray_equal(result['new name'], expected)\n\n # default dtype is list\n result = sf.pack_columns(column_name_prefix='category', dtype=list)\n self.assertEqual(result.column_names(), ['id', 'category'])\n self.__assert_sarray_equal(result['category'], expected)\n\n # remove prefix == True by default\n expected = SArray([\n {},\n {'a':'2', 'b':2.0},\n {'a':'3'},\n {'b':4.0}\n ])\n result = sf.pack_columns(column_name_prefix='category', dtype=dict)\n self.__assert_sarray_equal(result['category'], expected)\n\n # remove prefix == False\n expected = SArray([\n {},\n {'category.a':'2', 'category.b':2.0},\n {'category.a':'3'},\n {'category.b':4.0}\n ])\n result = sf.pack_columns(column_name_prefix='category', dtype=dict, remove_prefix=False)\n self.assertEqual(result.column_names(), ['id', 'category'])\n self.__assert_sarray_equal(result['category'], expected)\n\n # fill_na\n expected = SArray([\n {'a':1, 'b':1},\n {'a':'2', 'b':2.0},\n {'a':'3', 'b':1},\n {'a':1, 'b':4.0}\n ])\n result = sf.pack_columns(column_name_prefix='category', dtype=dict, fill_na = 1)\n self.__assert_sarray_equal(result['category'], expected)\n\n expected = SArray([\n [1],\n [2],\n [3],\n [4]], list)\n result = sf.pack_columns(['id'], new_column_name='id')\n self.assertEqual(sorted(result.column_names()), sorted(['id', 'category.a', 'category.b']))\n self.__assert_sarray_equal(result['id'], expected)\n\n def test_pack_columns(self):\n sf = SFrame()\n sf['id'] = [1, 2, 3, 4, 5]\n sf['b'] = [None, '2', '3', None, '5']\n sf['c'] = [None, 2.0, 3.0, None, 5.0]\n\n expected_all_default = SArray([\n [1, None, None],\n [2, '2', 2.0],\n [3, '3', 3.0],\n [4, None, None],\n [5, '5', 5.0]\n ])\n\n # pack all columns, all default values\n self.__assert_sarray_equal(sf.pack_columns()['X1'], expected_all_default)\n\n expected_ab_default = SArray([\n [1, None],\n [2, '2'],\n [3, '3'],\n [4, None],\n [5, '5']\n ])\n\n expected_all_fillna_1 = SArray([\n [1, -1, -1],\n [2, '2', 2.0],\n [3, '3', 3.0],\n [4, -1, -1],\n [5, '5', 5.0]\n ])\n\n # pack all columns do not drop na and also fill with some value\n result = sf.pack_columns(fill_na=-1)\n self.assertEqual(result.column_names(), ['X1'])\n self.__assert_sarray_equal(result['X1'], expected_all_fillna_1)\n\n # pack partial columns, all default value\n result = sf.pack_columns(['id','b'])\n self.assertEqual(result.column_names(), ['c','X2'])\n self.__assert_sarray_equal(result['c'], sf['c'])\n self.__assert_sarray_equal(result['X2'], expected_ab_default)\n\n expected_sarray_ac_fillna_default = SArray([\n [1, float('NaN')],\n [2, 2.0],\n [3, 3.0],\n [4, float('NaN')],\n [5, 5.0]\n ])\n\n result = sf.pack_columns(['id','c'], dtype=array.array)\n self.assertEqual(result.column_names(), ['b', 'X2'])\n self.__assert_sarray_equal(result['b'], sf['b'])\n self.__assert_sarray_equal(result['X2'], expected_sarray_ac_fillna_default)\n\n expected_dict_default = SArray([\n {'id': 1},\n {'id': 2, 'b':'2', 'c': 2.0},\n {'id': 3, 'b':'3', 'c': 3.0},\n {'id':4 },\n {'id':5, 'b':'5', 'c': 5.0}\n ])\n\n result = sf.pack_columns(dtype=dict)\n self.__assert_sarray_equal(result['X1'], expected_dict_default)\n\n expected_dict_fillna = SArray([\n {'id': 1, 'b':-1, 'c': -1},\n {'id': 2, 'b':'2', 'c': 2.0},\n {'id': 3, 'b':'3', 'c': 3.0},\n {'id': 4, 'b':-1, 'c': -1},\n {'id': 5, 'b':'5', 'c': 5.0}\n ])\n\n result = sf.pack_columns(dtype=dict, fill_na=-1)\n self.__assert_sarray_equal(result['X1'], expected_dict_fillna)\n\n # pack large number of rows\n sf = SFrame()\n num_rows = 100000\n sf['a'] = range(0, num_rows)\n sf['b'] = range(0, num_rows)\n result = sf.pack_columns(['a', 'b'])\n self.assertEqual(len(result), num_rows)\n\n def test_pack_columns_dtype(self):\n a = SFrame({'name':[-140500967,-1405039672],'data':[3,4]})\n b = a.pack_columns(['name','data'],dtype=array.array)\n expected = SArray([[-140500967, 3],[-1405039672,4]])\n self.__assert_sarray_equal(b['X1'], expected)\n\n def test_unpack_dict_mixtype(self):\n sf = SFrame({'a':[{'a':[\"haha\", \"hoho\"]}, {'a':array.array('d', [1,2,3])}]})\n sf = sf.unpack('a', column_name_prefix = '')\n self.assertEqual(sf['a'].dtype, list)\n\n sf = SFrame({'a':[{'a':[\"haha\", \"hoho\"]}, {'a':array.array('d', [1,2,3])}]})\n sf = sf.unpack()\n self.assertEqual(sf['a'].dtype, list)\n\n sf = SFrame({'a':[{'a':[\"haha\", \"hoho\"]}, {'a':None}]})\n sf = sf.unpack('a', column_name_prefix = '')\n self.assertEqual(sf['a'].dtype, list)\n\n sf = SFrame({'a':[{'a':[\"haha\", \"hoho\"]}, {'a':None}]})\n sf = sf.unpack('a', column_name_prefix = '')\n self.assertEqual(sf['a'].dtype, list)\n\n sa = SArray([{'a':array.array('d', [1,2,3])}, {'a':None}])\n sf = sa.unpack(column_name_prefix = '')\n self.assertEqual(sf['a'].dtype, array.array)\n\n sa = SArray([{'a':array.array('d', [1,2,3])}, {'a':{'b':1}}])\n sf = sa.unpack(column_name_prefix = '')\n self.assertEqual(sf['a'].dtype, str)\n\n sa = SArray([{'a': 1, 'b': 0.1}, {'a': 0.1, 'b': 1}])\n sf = sa.unpack(column_name_prefix = '')\n self.assertEqual(sf['a'].dtype, float)\n self.assertEqual(sf['b'].dtype, float)\n\n\n def test_unpack_list(self):\n sa = SArray([\n [1, None, None],\n [2, '2', 2.0],\n [3, '3', 3.0],\n [4, None, None],\n [5, '5', 5.0]\n ])\n\n expected = SFrame()\n expected ['a'] = [1, 2, 3, 4, 5]\n expected ['b'] = [None, '2', '3', None, '5']\n expected ['c'] = [None, 2.0, 3.0, None, 5.0]\n\n result = sa.unpack()\n result.rename(dict(zip(result.column_names(), ['a','b','c'])), inplace=True)\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n result = sa.unpack(column_name_prefix='ttt')\n self.assertEqual(result.column_names(), ['ttt.0', 'ttt.1', 'ttt.2'])\n result.rename(dict(zip(result.column_names(), ['a','b','c'])), inplace=True)\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n # column types\n result = sa.unpack(column_types=[int, str, float])\n result.rename(dict(zip(result.column_names(), ['a','b','c'])), inplace=True)\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n # more column types\n result = sa.unpack(column_types=[int, str, float, int])\n result.rename(dict(zip(result.column_names(), ['a','b','c','d'])), inplace=True)\n e = expected.select_columns(['a','b','c'])\n e.add_column(SArray([None for i in range(5)], int),'d', inplace=True)\n assert_frame_equal(result.to_dataframe(), e.to_dataframe())\n\n # less column types\n result = sa.unpack(column_types=[int, str])\n result.rename(dict(zip(result.column_names(), ['a','b'])), inplace=True)\n e = expected.select_columns(['a','b'])\n assert_frame_equal(result.to_dataframe(), e.to_dataframe())\n\n # fill na_value\n e = SFrame()\n e['a'] = [1, 2, None, 4, 5]\n e['b'] = [None, '2', '3', None, '5']\n e['c'] = [None, 2.0, None, None, 5.0]\n result = sa.unpack(na_value=3)\n result.rename(dict(zip(result.column_names(), ['a','b','c'])), inplace=True)\n assert_frame_equal(result.to_dataframe(), e.to_dataframe())\n\n # wrong length\n with self.assertRaises(TypeError):\n sa.unpack(column_name_prefix=['a','b'])\n\n # wrong type\n with self.assertRaises(RuntimeError):\n sa.unpack(column_types = [str, int, float])\n\n # wrong limit types\n with self.assertRaises(TypeError):\n sa.unpack(limit=[\"1\"])\n\n # int array cannot be unpacked\n with self.assertRaises(TypeError):\n SArray([1,2,3,4]).unpack()\n\n # column name must be a string\n with self.assertRaises(TypeError):\n sa.unpack(1)\n\n # invalid column type\n with self.assertRaises(TypeError):\n sa.unpack(column_types = int)\n\n # invalid column type\n with self.assertRaises(TypeError):\n sa.unpack(column_types = [np.array])\n\n # cannot infer type if no values\n with self.assertRaises(RuntimeError):\n SArray([], list).unpack()\n\n def test_unpack_array(self):\n import array\n sa = SArray([\n array.array('d', [1, 1, 0]),\n array.array('d', [2, -1, 1]),\n array.array('d', [3, 3, 2]),\n array.array('d', [-1, 2, 3]),\n array.array('d', [5, 5, 4])\n ])\n\n expected = SFrame()\n expected ['a'] = [1.0, 2.0, 3.0, -1.0, 5.0]\n expected ['b'] = [1.0, -1.0, 3.0, 2.0, 5.0]\n expected ['c'] = [0.0, 1.0, 2.0, 3.0, 4.0]\n\n result = sa.unpack()\n result.rename(dict(zip(result.column_names(), ['a','b','c'])), inplace=True)\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n # right amount column names\n result = sa.unpack(column_name_prefix = 'unpacked')\n result.rename(dict(zip(result.column_names(), ['t.0', 't.1', 't.2'])), inplace=True)\n result.rename(dict(zip(result.column_names(), ['a','b','c'])), inplace=True)\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n # column types\n result = sa.unpack(column_types=[int, str, float])\n result.rename(dict(zip(result.column_names(), ['a','b','c'])), inplace=True)\n expected['a'] = expected['a'].astype(int)\n expected['b'] = expected['b'].astype(str)\n expected['c'] = expected['c'].astype(float)\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n # more column types\n result = sa.unpack(column_types=[int, str, float, int])\n result.rename(dict(zip(result.column_names(), ['a','b','c','d'])), inplace=True)\n e = expected.select_columns(['a','b','c'])\n e.add_column(SArray([None for i in range(5)], int),'d', inplace=True)\n assert_frame_equal(result.to_dataframe(), e.to_dataframe())\n\n # less column types\n result = sa.unpack(column_types=[int, str])\n result.rename(dict(zip(result.column_names(), ['a','b'])), inplace=True)\n e = expected.select_columns(['a','b'])\n assert_frame_equal(result.to_dataframe(), e.to_dataframe())\n\n # fill na_value\n e = SFrame()\n e['a'] = SArray([1, 2, 3, None, 5], float)\n e['b'] = SArray([1, None, 3, 2, 5], float)\n e['c'] = SArray([0, 1, 2, 3, 4], float)\n result = sa.unpack(na_value=-1)\n result.rename(dict(zip(result.column_names(), ['a','b','c'])), inplace=True)\n assert_frame_equal(result.to_dataframe(), e.to_dataframe())\n\n def test_unpack_dict(self):\n\n sf = SFrame([{'a':1,'b':2,'c':3},{'a':4,'b':5,'c':6}])\n expected_sf = SFrame()\n expected_sf[\"a\"] = [1,4]\n expected_sf[\"b\"] = [2,5]\n expected_sf[\"c\"] = [3,6]\n unpacked_sf = sf.unpack()\n assert_frame_equal(unpacked_sf.to_dataframe(), expected_sf.to_dataframe())\n\n expected_sf = SFrame()\n expected_sf[\"xx.a\"] = [1,4]\n expected_sf[\"xx.b\"] = [2,5]\n expected_sf[\"xx.c\"] = [3,6]\n unpacked_sf = sf.unpack(column_name_prefix='xx')\n assert_frame_equal(unpacked_sf.to_dataframe(), expected_sf.to_dataframe())\n\n packed_sf = SFrame({\"X1\":{'a':1,'b':2,'c':3},\"X2\":{'a':4,'b':5,'c':6}})\n\n with self.assertRaises(RuntimeError):\n packed_sf.unpack()\n\n sf = SFrame()\n\n sf[\"user_id\"] = [1,2,3,4,5,6,7]\n sf[\"is_restaurant\"] = [1, 1,0,0, 1, None, None]\n sf[\"is_retail\"] = [None,1,1,None,1, None, None]\n sf[\"is_electronics\"] = [\"yes\", \"no\",\"yes\",None,\"no\", None, None]\n\n\n packed_sf = SFrame()\n packed_sf['user_id'] = sf['user_id']\n packed_sf[\"category\"] = [\n {\"is_restaurant\": 1, \"is_electronics\": \"yes\"},\n {\"is_restaurant\": 1, \"is_retail\": 1, \"is_electronics\": \"no\"},\n {\"is_restaurant\": 0, \"is_retail\": 1, \"is_electronics\": \"yes\"},\n {\"is_restaurant\": 0 },\n {\"is_restaurant\": 1, \"is_retail\": 1, \"is_electronics\": \"no\"},\n { },\n None]\n\n with self.assertRaises(TypeError):\n packed_sf['user_id'].unpack()\n\n with self.assertRaises(TypeError):\n packed_sf['category'].unpack(1)\n\n with self.assertRaises(TypeError):\n packed_sf['category'].unpack(value_types = [int])\n\n # unpack only one column\n expected_sf = SFrame()\n expected_sf[\"is_retail\"] = sf[\"is_retail\"]\n unpacked_sf = packed_sf['category'].unpack(limit=[\"is_retail\"], column_types=[int], column_name_prefix=None)\n assert_frame_equal(unpacked_sf.to_dataframe(), expected_sf.to_dataframe())\n\n\n # unpack all\n unpacked_sf = packed_sf['category'].unpack(column_name_prefix=None, column_types=[int, int, str], limit=[\"is_restaurant\", \"is_retail\", \"is_electronics\"])\n assert_frame_equal(unpacked_sf.to_dataframe(), sf[[\"is_restaurant\", \"is_retail\", \"is_electronics\"]].to_dataframe())\n\n # auto infer types, the column order may be different, so use order here before comparison\n unpacked_sf = packed_sf[\"category\"].unpack()\n unpacked_sf.rename({\n \"X.is_restaurant\": \"is_restaurant\",\n \"X.is_retail\": \"is_retail\",\n \"X.is_electronics\": \"is_electronics\"\n }, inplace=True)\n assert_frame_equal(unpacked_sf.to_dataframe().sort_index(axis=1), sf[[\"is_restaurant\", \"is_retail\", \"is_electronics\"]].to_dataframe().sort_index(axis=1))\n\n unpacked_sf = packed_sf[\"category\"].unpack(na_value = 0, column_name_prefix=\"new\")\n expected = SFrame()\n expected[\"new.is_restaurant\"] = [1, 1,None,None, 1, None, None]\n expected[\"new.is_retail\"] = [None,1,1,None,1, None, None]\n expected[\"new.is_electronics\"] = [\"yes\", \"no\",\"yes\",None,\"no\", None, None]\n assert_frame_equal(unpacked_sf.to_dataframe().sort_index(axis=1), expected.to_dataframe().sort_index(axis=1))\n\n # unpack a dictionary key integer as key\n sa = SArray([\n {1: 'a'},\n {2: 'b'}\n ])\n result = sa.unpack()\n expected = SFrame({'X.1':['a', None], 'X.2':[None, 'b']})\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n result = sa.unpack(limit=[2])\n expected = SFrame({'X.2':[None, 'b']})\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n result = sa.unpack(limit=[2], column_name_prefix=\"expanded\")\n expected = SFrame({'expanded.2':[None, 'b']})\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n sa = SArray([{i:i} for i in range(500)])\n unpacked_sa = sa.unpack()\n self.assertEqual(len(unpacked_sa), len(sa))\n i = 0\n for v in unpacked_sa:\n for j in range(500):\n val = v['X.' + str(j)]\n if (j == i):\n self.assertEqual(val, i)\n else:\n self.assertEqual(val, None)\n i = i + 1\n\n # if types don't agree, convert to string automatically\n sa = SArray([{'a':1},{'a': 'a_3'}])\n sf = sa.unpack()\n self.assertEqual(sf.column_types(), [str])\n\n sa = SArray([{'a':None}, {'a': 1}])\n sf = sa.unpack()\n self.assertEqual(sf.column_types(), [int])\n\n sa = SArray([{'a':1}, {'a': None}])\n sf = sa.unpack()\n self.assertEqual(sf.column_types(), [int])\n\n # type inference is already at server side even if limit is given\n sa = SArray([{'c'+str(i): i if i % 2 == 0 else 'v' + str(i)} for i in range(1000)])\n unpacked = sa.unpack(limit=['c'+str(i) for i in range(10)], column_name_prefix=\"\")\n for i in range(10):\n v = unpacked[i]\n for j in range(10):\n if (j != i):\n self.assertEqual(v['c'+str(j)], None)\n elif j % 2 == 0:\n self.assertEqual(v['c'+str(j)], j)\n else:\n self.assertEqual(v['c'+str(j)], 'v' + str(j))\n\n\n\n def test_unpack_sframe(self):\n sf = SFrame()\n sf['user_id'] = range(7)\n sf[\"category\"] = [\n {\"is_restaurant\": 1, \"is_electronics\": \"yes\"},\n {\"is_restaurant\": 1, \"is_retail\": 1, \"is_electronics\": \"no\"},\n {\"is_restaurant\": 0, \"is_retail\": 1, \"is_electronics\": \"yes\"},\n {\"is_restaurant\": 0 },\n {\"is_restaurant\": 1, \"is_retail\": 1, \"is_electronics\": \"no\"},\n { },\n None]\n sf['list'] = [\n None,\n range(1),\n range(2),\n range(3),\n range(1),\n range(2),\n range(3),\n ]\n\n with self.assertRaises(TypeError):\n sf.unpack('user_id')\n\n expected = SFrame()\n expected['user_id'] = sf['user_id']\n expected['list'] = sf['list']\n expected[\"is_restaurant\"] = [1, 1,0,0, 1, None, None]\n expected[\"is_retail\"] = [None,1,1,None,1, None, None]\n expected[\"is_electronics\"] = [\"yes\", \"no\",\"yes\",None,\"no\", None, None]\n\n result = sf.unpack('category')\n result.rename({\n 'category.is_restaurant': 'is_restaurant',\n 'category.is_retail': 'is_retail',\n 'category.is_electronics': 'is_electronics'\n }, inplace=True)\n assert_frame_equal(expected.to_dataframe().sort_index(axis=1), result.to_dataframe().sort_index(axis=1))\n\n result = sf.unpack(column_name='category', column_name_prefix=\"\")\n assert_frame_equal(expected.to_dataframe().sort_index(axis=1), result.to_dataframe().sort_index(axis=1))\n\n result = sf.unpack(column_name='category', column_name_prefix=\"abc\")\n result.rename({\n 'abc.is_restaurant': 'is_restaurant',\n 'abc.is_retail': 'is_retail',\n 'abc.is_electronics': 'is_electronics'\n }, inplace=True)\n assert_frame_equal(expected.to_dataframe().sort_index(axis=1), result.to_dataframe().sort_index(axis=1))\n\n result = sf.unpack(column_name='category', column_name_prefix=\"\", column_types=[str], limit=['is_restaurant'])\n new_expected = expected[['user_id', 'list', 'is_restaurant']]\n new_expected['is_restaurant'] = new_expected['is_restaurant'].astype(str)\n assert_frame_equal(new_expected.to_dataframe().sort_index(axis=1), result.to_dataframe().sort_index(axis=1))\n\n result = sf.unpack(column_name='category', column_name_prefix=\"\", na_value = None)\n assert_frame_equal(expected.to_dataframe().sort_index(axis=1), result.to_dataframe().sort_index(axis=1))\n\n result = sf.unpack(column_name='list')\n expected = SFrame()\n expected['user_id'] = sf['user_id']\n expected['list.0'] = [None,0,0,0, 0,0,0]\n expected['list.1'] = [None,None,1,1, None,1,1]\n expected['list.2'] = [None,None,None,2, None, None,2]\n expected['category'] = sf['category']\n assert_frame_equal(expected.to_dataframe().sort_index(axis=1), result.to_dataframe().sort_index(axis=1))\n\n result = sf.unpack(column_name='list', na_value= 2)\n expected = SFrame()\n expected['user_id'] = sf['user_id']\n expected['list.0'] = [None,0,0,0, 0,0,0]\n expected['list.1'] = [None,None,1,1, None,1,1]\n expected['list.2'] = [None,None,None,None, None, None,None]\n expected['category'] = sf['category']\n assert_frame_equal(expected.to_dataframe().sort_index(axis=1), result.to_dataframe().sort_index(axis=1))\n\n # auto resolving conflicting names\n sf = SFrame()\n sf['a'] = range(100)\n sf['b'] = [range(5) for i in range(100)]\n sf['b.0'] = range(100)\n sf['b.0.1'] = range(100)\n result = sf.unpack('b')\n self.assertEqual(result.column_names(), ['a', 'b.0', 'b.0.1', 'b.0.1.1', 'b.1.1.1', 'b.2.1.1', 'b.3.1.1', 'b.4.1.1'])\n\n sf = SFrame()\n sf['a'] = range(100)\n sf['b'] = [{'str1': i, 'str2':i + 1} for i in range(100)]\n sf['b.str1'] = range(100)\n result = sf.unpack('b')\n self.assertEqual(len(result.column_names()), 4)\n\n def test_stack_dict(self):\n sf = SFrame()\n sf[\"user_id\"] = [1,2,3,4,5]\n sf[\"user_name\"] = ['user' + str(i) for i in list(sf['user_id'])]\n sf[\"category\"] = [\n {\"is_restaurant\": 1, },\n {\"is_restaurant\": 0, \"is_retail\": 1 },\n { \"is_retail\": 0 },\n {},\n None]\n\n expected_sf = SFrame()\n expected_sf[\"user_id\"] = [1,2, 2, 3,4,5]\n expected_sf[\"user_name\"] = ['user' + str(i) for i in list(expected_sf['user_id'])]\n expected_sf['category'] = ['is_restaurant', 'is_restaurant', 'is_retail', 'is_retail', None, None]\n expected_sf['value'] = [1,0,1,0, None, None]\n df_expected = expected_sf.to_dataframe().sort_values(['user_id', 'category']).reset_index(drop=True)\n\n with self.assertRaises(TypeError):\n sf.stack()\n\n with self.assertRaises(ValueError):\n sf.stack('sss')\n\n with self.assertRaises(ValueError):\n sf.stack('category', ['user_id', 'value'])\n\n # normal case\n stacked_sf = sf.stack('category', ['category', 'value'])\n assert_frame_equal(stacked_sf.to_dataframe().sort_values([\"user_id\", \"category\"]).reset_index(drop=True), df_expected)\n\n # set column types\n stacked_sf = sf.stack('category')\n self.assertTrue(stacked_sf.column_types()[2] == str)\n self.assertTrue(stacked_sf.column_types()[3] == int)\n\n # auto generate column names\n stacked_sf = sf.stack('category')\n new_column_names = stacked_sf.column_names()\n self.assertTrue(len(new_column_names) == 4)\n expected_sf.rename({'category':new_column_names[2], 'value':new_column_names[3]}, inplace=True)\n df_expected = expected_sf.to_dataframe().sort_values(['user_id', new_column_names[2]]).reset_index(drop=True)\n assert_frame_equal(stacked_sf.to_dataframe().sort_values([\"user_id\", new_column_names[2]]).reset_index(drop=True), df_expected)\n\n #dropna\n expected_sf = SFrame()\n expected_sf[\"user_id\"] = [1,2, 2, 3, 4, 5]\n expected_sf[\"user_name\"] = ['user' + str(i) for i in list(expected_sf['user_id'])]\n expected_sf['category'] = ['is_restaurant', 'is_restaurant', 'is_retail', 'is_retail', None, None]\n expected_sf['value'] = [1,0,1,0, None, None]\n df_expected = expected_sf.to_dataframe().sort_values(['user_id', 'category']).reset_index(drop=True)\n\n stacked_sf = sf.stack('category', ['category','value'], drop_na = False)\n assert_frame_equal(stacked_sf.to_dataframe().sort_values([\"user_id\", \"category\"]).reset_index(drop=True), df_expected)\n\n sf = SFrame()\n sf['a'] = SArray(([{}] * 100) + [{'a':1}])\n\n # its a dict need 2 types\n with self.assertRaises(ValueError):\n sf.stack('a',['key', 'value'], new_column_type=[str])\n with self.assertRaises(ValueError):\n sf.stack('a',['key', 'value'], new_column_type=str)\n\n sf.stack('a',['key', 'value'], new_column_type=[str, int])\n expected_sf = SFrame()\n expected_sf['key'] = SArray([None] * 100 + [\"a\"])\n expected_sf['value'] = SArray([None] * 100 + [1])\n\n def test_stack_list(self):\n sf = SFrame()\n sf[\"a\"] = [1,2,3,4,5]\n sf[\"b\"] = [['a', 'b'], ['c'], ['d'],['e', None], None]\n expected_result = SFrame()\n expected_result['a'] = [1,1,2,3,4,4,5]\n expected_result['X1'] = ['a','b','c','d','e',None, None]\n\n with self.assertRaises(TypeError):\n sf.stack()\n\n with self.assertRaises(ValueError):\n sf.stack('sss')\n\n with self.assertRaises(TypeError):\n sf.stack('a')\n\n with self.assertRaises(TypeError):\n sf.stack('b', [\"something\"])\n\n result = sf.stack(\"b\", drop_na = False)\n stacked_column_name = result.column_names()[1]\n expected_result.rename({'X1':stacked_column_name}, inplace=True)\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n # default drop_na=False\n result = sf.stack(\"b\")\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n result = sf.stack(\"b\", new_column_name = \"b\", drop_na = False)\n expected_result.rename({stacked_column_name: 'b'}, inplace=True)\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n result = sf.stack(\"b\", new_column_name = \"b\", drop_na = False)\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n # drop_na=True\n result = sf.stack(\"b\", drop_na = True)\n expected_result = SFrame()\n expected_result['a'] = [1,1,2,3,4,4]\n expected_result[result.column_names()[1]] = ['a','b','c','d','e',None]\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n\n sf = SFrame()\n n = 1000000\n sf['a'] = range(1,n)\n sf['b'] = [[str(i), str(i+1)] for i in range(1,n)]\n result = sf.stack('b')\n self.assertTrue(len(result), n * 2)\n\n\n sf = SFrame()\n sf['a'] = SArray(([[]] * 100) + [['a','b']])\n\n # its a dict need 2 types\n with self.assertRaises(ValueError):\n sf.stack('a', 'a', new_column_type=[str, int])\n\n sf.stack('a', 'a', new_column_type=str)\n expected_sf = SFrame()\n expected_sf['a'] = SArray([None] * 100 + [\"a\", \"b\"])\n\n def test_stack_vector(self):\n sf = SFrame()\n sf[\"a\"] = [1,2,3,4,5]\n sf[\"b\"] = [[1],[1,2],[1,2,3],[1,2,3,4],None]\n expected_result = SFrame()\n expected_result['a'] = [1,2,2,3,3,3,4,4,4,4,5]\n expected_result['X1'] = [1,1,2,1,2,3,1,2,3,4,None]\n\n with self.assertRaises(TypeError):\n sf.stack()\n\n with self.assertRaises(ValueError):\n sf.stack('sss')\n\n with self.assertRaises(TypeError):\n sf.stack('a')\n\n with self.assertRaises(TypeError):\n sf.stack('b', [\"something\"])\n\n result = sf.stack(\"b\", drop_na = False)\n stacked_column_name = result.column_names()[1]\n expected_result.rename({'X1':stacked_column_name}, inplace=True)\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n # default drop_na=False\n result = sf.stack(\"b\")\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n\n result = sf.stack(\"b\", new_column_name = \"b\", drop_na = False)\n expected_result.rename({stacked_column_name: 'b'}, inplace=True)\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n result = sf.stack(\"b\", new_column_name = \"b\", drop_na = False)\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n # drop_na=True\n result = sf.stack(\"b\", drop_na = True)\n expected_result = SFrame()\n expected_result['a'] = [1,2,2,3,3,3,4,4,4,4]\n expected_result[result.column_names()[1]] = SArray([1,1,2,1,2,3,1,2,3,4], float)\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n import array\n sf = SFrame()\n sf['a'] = SArray(([array.array('d')] * 100) + [array.array('d',[1.0,2.0])])\n\n # its a dict need 2 types\n with self.assertRaises(ValueError):\n sf.stack('a', 'a', new_column_type=[str, int])\n\n sf.stack('a', 'a', new_column_type=int)\n expected_sf = SFrame()\n expected_sf['a'] = SArray([None] * 100 + [1, 2])\n\n def test_unstack_dict(self):\n sf = SFrame()\n sf[\"user_id\"] = [1,2,3,4]\n sf[\"user_name\"] = ['user' + str(i) for i in list(sf['user_id'])]\n sf[\"categories\"] = [\n {\"is_restaurant\": 1, },\n {\"is_restaurant\": 0, \"is_retail\": 1 },\n { \"is_retail\": 0 },\n None]\n\n stacked_sf = sf.stack('categories', ['category', 'value'], drop_na=False)\n\n # normal unstack\n unstacked_sf = stacked_sf.unstack(column_names=['category', 'value'], new_column_name = 'categories')\n # these frames are *almost* equal except user4 will be {} instead of None\n assert_frame_equal(sf.fillna('categories',{}).to_dataframe(), unstacked_sf.to_dataframe().sort_values(\"user_id\").reset_index(drop=True))\n\n # missing new column name\n unstacked_sf = stacked_sf.unstack(['category', 'value'])\n self.assertEqual(len(unstacked_sf.column_names()), 3)\n unstacked_sf.rename({unstacked_sf.column_names()[2] : 'categories'}, inplace=True)\n assert_frame_equal(sf.fillna('categories',{}).to_dataframe(), unstacked_sf.to_dataframe().sort_values(\"user_id\").reset_index(drop=True))\n\n # missing column names\n with self.assertRaises(KeyError):\n stacked_sf.unstack(['category','value1'])\n\n # wrong input\n with self.assertRaises(TypeError):\n stacked_sf.unstack(['category'])\n\n # duplicate new column name\n with self.assertRaises(RuntimeError):\n unstacked_sf = stacked_sf.unstack(['category', 'value'], 'user_name')\n\n def test_unstack_list(self):\n sf = SFrame()\n sf['a'] = [1,2,3,4]\n sf['b'] = [range(10), range(20), range(30), range(50)]\n stacked_sf = sf.stack('b', new_column_name = 'new_b')\n unstacked_sf = stacked_sf.unstack('new_b', new_column_name = 'b')\n self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])\n\n unstacked_sf = stacked_sf.unstack('new_b')\n unstacked_sf.rename({unstacked_sf.column_names()[1]: 'b'}, inplace=True)\n self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])\n\n unstacked_sf = stacked_sf.unstack('new_b', new_column_name='b')\n unstacked_sf.rename({unstacked_sf.column_names()[1]: 'b'}, inplace=True)\n self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])\n\n with self.assertRaises(RuntimeError):\n stacked_sf.unstack('new_b', new_column_name='a')\n\n with self.assertRaises(TypeError):\n stacked_sf.unstack(['new_b'])\n\n with self.assertRaises(KeyError):\n stacked_sf.unstack('non exist')\n\n def test_content_identifier(self):\n sf = SFrame({\"a\":[1,2,3,4],\"b\":[\"1\",\"2\",\"3\",\"4\"]})\n a1 = sf['a'].__get_content_identifier__()\n a2 = sf['a'].__get_content_identifier__()\n self.assertEqual(a1, a2)\n\n def test_random_access(self):\n t1 = list(range(0,100000))\n t2 = [str(i) for i in t1]\n t = [{'t1':t1[i], 't2':t2[i]} for i in range(len(t1))]\n s = SFrame({'t1':t1,'t2':t2})\n # simple slices\n self.__test_equal(s[1:10000], pd.DataFrame(t[1:10000]))\n self.__test_equal(s[0:10000:3], pd.DataFrame(t[0:10000:3]))\n self.__test_equal(s[1:10000:3], pd.DataFrame(t[1:10000:3]))\n self.__test_equal(s[2:10000:3], pd.DataFrame(t[2:10000:3]))\n self.__test_equal(s[3:10000:101], pd.DataFrame(t[3:10000:101]))\n # negative slices\n self.__test_equal(s[-5:], pd.DataFrame(t[-5:]))\n self.__test_equal(s[-1:], pd.DataFrame(t[-1:]))\n self.__test_equal(s[-100:-10], pd.DataFrame(t[-100:-10]))\n self.__test_equal(s[-100:-10:2], pd.DataFrame(t[-100:-10:2]))\n # single element reads\n self.assertEqual(s[511], t[511])\n self.assertEqual(s[1912],t[1912])\n self.assertEqual(s[-1], t[-1])\n self.assertEqual(s[-10],t[-10])\n\n # edge case oddities\n self.__test_equal(s[10:100:100], pd.DataFrame(t[10:100:100]))\n self.__test_equal(s[-100:len(s):10], pd.DataFrame(t[-100:len(t):10]))\n self.assertEqual(len(s[-1:-2]), 0)\n self.assertEqual(len(s[-1:-1000:2]), 0)\n with self.assertRaises(IndexError):\n s[len(s)]\n\n def sort_n_rows(self, nrows=100):\n nrows += 1\n sf = SFrame()\n sf['a'] = range(1, nrows)\n sf['b'] = [float(i) for i in range(1,nrows)]\n sf['c'] = [str(i) for i in range(1,nrows)]\n sf['d'] = [[i, i+1] for i in range(1,nrows)]\n\n reversed_sf = SFrame()\n reversed_sf['a'] = range(nrows-1, 0, -1)\n reversed_sf['b'] = [float(i) for i in range(nrows-1, 0, -1)]\n reversed_sf['c'] = [str(i) for i in range(nrows-1, 0, -1)]\n reversed_sf['d'] = [[i, i+1] for i in range(nrows-1, 0, -1)]\n\n with self.assertRaises(TypeError):\n sf.sort()\n\n with self.assertRaises(TypeError):\n sf.sort(1)\n\n with self.assertRaises(TypeError):\n sf.sort(\"d\")\n\n with self.assertRaises(ValueError):\n sf.sort(\"nonexist\")\n\n with self.assertRaises(TypeError):\n sf.sort({'a':True})\n\n result = sf.sort('a')\n assert_frame_equal(sf.to_dataframe(), result.to_dataframe())\n\n # try a lazy input\n result = sf[sf['a'] > 10].sort('a')\n assert_frame_equal(sf[sf['a'] > 10].to_dataframe(), result.to_dataframe())\n\n result = sf.sort('a', ascending = False)\n assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe())\n\n # lazy reversed\n result = sf[sf['a'] > 10].sort('a', ascending = False)\n assert_frame_equal(reversed_sf[reversed_sf['a'] > 10].to_dataframe(), result.to_dataframe())\n\n # lazy reversed\n result = sf[sf['a'] > 10].sort('a', ascending = False)\n assert_frame_equal(reversed_sf[reversed_sf['a'] > 10].to_dataframe(), result.to_dataframe())\n\n # sort two columns\n result = sf.sort(['a', 'b'])\n assert_frame_equal(sf.to_dataframe(), result.to_dataframe())\n\n result = sf.sort(['a', 'c'], ascending = False)\n assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe())\n\n result = sf.sort([('a', True), ('b', False)])\n assert_frame_equal(sf.to_dataframe(), result.to_dataframe())\n\n result = sf.sort([('a', False), ('b', True)])\n assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe())\n\n # empty sort should not throw\n sf = SFrame({'x':[]})\n sf.sort('x')\n\n def test_sort(self):\n #self.sort_n_rows(100)\n for i in range(1, 10):\n self.sort_n_rows(i)\n\n def test_dropna(self):\n # empty case\n sf = SFrame()\n self.assertEqual(len(sf.dropna()), 0)\n\n # normal case\n self.__test_equal(self.employees_sf.dropna(), self.employees_sf[0:5].to_dataframe())\n test_split = self.employees_sf.dropna_split()\n self.__test_equal(test_split[0], self.employees_sf[0:5].to_dataframe())\n self.__test_equal(test_split[1], self.employees_sf[5:6].to_dataframe())\n\n # test recursively removing nan\n test_sf = SFrame({'array':SArray([[1,1],[2,np.nan],[3,3],[4,4],[5,5],[6,np.nan],[7,7],[8, np.nan]], np.ndarray),\n 'lists':SArray([[1], None, [], [4], [5, 5], [6, np.nan], [7], None], list),\n 'dicts':SArray([{1:2},{2:3},{3:4},{},{5:None},{6:7},{7:[7,[7,np.nan]]},None], dict)})\n\n # non-recursive dropna\n self.__test_equal(test_sf.dropna(how='any'),\n test_sf[0:1].append(test_sf[2:7]).to_dataframe())\n test_split = test_sf.dropna_split()\n self.__test_equal(test_split[0], test_sf[0:1].append(test_sf[2:7]).to_dataframe())\n\n self.__test_equal(test_sf.dropna(how='all'), test_sf.to_dataframe());\n test_split = test_sf.dropna_split(how='all')\n self.assertEqual(len(test_split[1]), 0)\n\n # recursive dropna\n self.__test_equal(test_sf.dropna(recursive=True),\n pd.DataFrame({'array':[[1,1],[3,3],[4,4]],'lists':[[1],[],[4]],'dicts':[{1:2},{3:4},{}]}))\n test_split = test_sf.dropna_split(recursive=True)\n self.__test_equal(test_split[0], test_sf[0:1].append(test_sf[2:4]).to_dataframe())\n # nan is not comparable, so we don't check the nan part\n # self.__test_equal(test_split[1], test_sf[1:2].append(test_sf[4:8]).to_dataframe())\n\n # the 'all' case\n self.__test_equal(test_sf.dropna(how='all', recursive=True), test_sf[0:7].to_dataframe())\n test_split = test_sf.dropna_split(how='all', recursive=True)\n self.__test_equal(test_split[0], test_sf[0:7].to_dataframe())\n\n # test 'split' cases\n self.__test_equal(test_sf.dropna('array', recursive=True),\n test_sf[0:1].append(test_sf[2:5]).append(test_sf[6:7]).to_dataframe())\n test_split = test_sf.dropna_split('array', recursive=True)\n self.__test_equal(test_split[0],\n test_sf[0:1].append(test_sf[2:5]).append(test_sf[6:7]).to_dataframe())\n\n self.__test_equal(test_sf.dropna('lists', recursive=True),\n test_sf[0:1].append(test_sf[2:5]).append(test_sf[6:7]).to_dataframe())\n test_split = test_sf.dropna_split('lists', recursive=True)\n self.__test_equal(test_split[0],\n test_sf[0:1].append(test_sf[2:5]).append(test_sf[6:7]).to_dataframe())\n\n self.__test_equal(test_sf.dropna('dicts', recursive=True),\n test_sf[0:4].append(test_sf[5:6]).to_dataframe())\n test_split = test_sf.dropna_split('dicts', recursive=True)\n self.__test_equal(test_split[0],\n test_sf[0:4].append(test_sf[5:6]).to_dataframe())\n\n # create some other test sframe\n test_sf = SFrame({'ints':SArray([None,None,3,4,None], int),\n 'floats':SArray([np.nan,2.,3.,4.,np.nan],float),\n 'strs':SArray(['1',np.nan,'','4',None], str),\n 'lists':SArray([[1],None,[],[1,1,1,1],None], list),\n 'dicts':SArray([{1:2},{2:3},{},{4:5},None], dict)})\n\n # another normal, but more interesting case\n self.__test_equal(test_sf.dropna(),\n pd.DataFrame({'ints':[3,4],'floats':[3.,4.],'strs':['','4'],'lists':[[],[1,1,1,1]],'dicts':[{},{4:5}]}))\n test_split = test_sf.dropna_split()\n self.__test_equal(test_split[0], test_sf[2:4].to_dataframe())\n self.__test_equal(test_split[1], test_sf[0:2].append(test_sf[4:5]).to_dataframe())\n\n # the 'all' case\n self.__test_equal(test_sf.dropna(how='all'), test_sf[0:4].to_dataframe())\n test_split = test_sf.dropna_split(how='all')\n self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())\n self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())\n\n # select some columns\n self.__test_equal(test_sf.dropna(['ints','floats'], how='all'), test_sf[1:4].to_dataframe())\n test_split = test_sf.dropna_split(['ints','floats'], how='all')\n self.__test_equal(test_split[0], test_sf[1:4].to_dataframe())\n self.__test_equal(test_split[1], test_sf[0:1].append(test_sf[4:5]).to_dataframe())\n\n self.__test_equal(test_sf.dropna('strs'), test_sf[0:4].to_dataframe())\n test_split = test_sf.dropna_split('strs')\n self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())\n self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())\n\n self.__test_equal(test_sf.dropna(['strs','dicts']), test_sf[0:4].to_dataframe())\n test_split = test_sf.dropna_split(['strs','dicts'])\n self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())\n self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())\n\n # bad stuff\n with self.assertRaises(TypeError):\n test_sf.dropna(1)\n test_sf.dropna([1,2])\n test_sf.dropna('strs', how=1)\n test_sf.dropna_split(1)\n test_sf.dropna_split([1,2])\n test_sf.dropna_split('strs', how=1)\n\n with self.assertRaises(ValueError):\n test_sf.dropna('ints', how='blah')\n test_sf.dropna_split('ints', how='blah')\n\n with self.assertRaises(RuntimeError):\n test_sf.dropna('dontexist')\n test_sf.dropna_split('dontexist')\n\n def test_add_row_number(self):\n sf = SFrame(self.__create_test_df(400000))\n\n sf = sf.add_row_number('id')\n self.assertEqual(list(sf['id']), list(range(0,400000)))\n\n del sf['id']\n\n sf = sf.add_row_number('id', -20000)\n self.assertEqual(list(sf['id']), list(range(-20000,380000)))\n del sf['id']\n\n sf = sf.add_row_number('id', 40000)\n self.assertEqual(list(sf['id']), list(range(40000,440000)))\n\n with self.assertRaises(RuntimeError):\n sf.add_row_number('id')\n\n with self.assertRaises(TypeError):\n sf = sf.add_row_number(46)\n sf = sf.add_row_number('id2',start='hi')\n\n def test_inplace_not_inplace(self):\n # add row number\n sf = SFrame(self.__create_test_df(1000))\n sf2 = sf.add_row_number('id', inplace=False)\n self.assertTrue(sf2 is not sf)\n self.assertTrue('id' in sf2.column_names())\n self.assertTrue('id' not in sf.column_names())\n\n sf2 = sf.add_row_number('id', inplace=True)\n self.assertTrue(sf2 is sf)\n self.assertTrue('id' in sf2.column_names())\n\n # add column\n sf = SFrame(self.__create_test_df(1000))\n newcol = SArray(range(1000))\n sf2 = sf.add_column(newcol, 'newcol', inplace=False)\n self.assertTrue(sf2 is not sf)\n self.assertTrue('newcol' in sf2.column_names())\n self.assertTrue('newcol' not in sf.column_names())\n sf2 = sf.add_column(newcol, 'newcol', inplace=True)\n self.assertTrue(sf2 is sf)\n self.assertTrue('newcol' in sf2.column_names())\n\n # add columns\n sf = SFrame(self.__create_test_df(1000))\n newcols = SFrame({'newcol':range(1000), 'newcol2':range(1000)})\n sf2 = sf.add_columns(newcols, inplace=False)\n self.assertTrue(sf2 is not sf)\n self.assertTrue('newcol' in sf2.column_names())\n self.assertTrue('newcol2' in sf2.column_names())\n self.assertTrue('newcol' not in sf.column_names())\n self.assertTrue('newcol2' not in sf.column_names())\n sf2 = sf.add_columns(newcols, inplace=True)\n self.assertTrue(sf2 is sf)\n self.assertTrue('newcol' in sf2.column_names())\n self.assertTrue('newcol2' in sf2.column_names())\n\n # remove column\n sf = SFrame(self.__create_test_df(1000))\n sf2 = sf.remove_column('int_data', inplace=False)\n self.assertTrue(sf2 is not sf)\n self.assertTrue('int_data' in sf.column_names())\n self.assertTrue('int_data' not in sf2.column_names())\n sf2 = sf.remove_column('int_data', inplace=True)\n self.assertTrue(sf2 is sf)\n self.assertTrue('int_data' not in sf2.column_names())\n\n # remove columns\n sf = SFrame(self.__create_test_df(1000))\n sf2 = sf.remove_columns(['int_data', 'float_data'], inplace=False)\n self.assertTrue(sf2 is not sf)\n self.assertTrue('int_data' in sf.column_names())\n self.assertTrue('float_data' in sf.column_names())\n self.assertTrue('int_data' not in sf2.column_names())\n self.assertTrue('float_data' not in sf2.column_names())\n sf2 = sf.remove_columns(['int_data', 'float_data'], inplace=True)\n self.assertTrue(sf2 is sf)\n self.assertTrue('int_data' not in sf2.column_names())\n self.assertTrue('float_data' not in sf2.column_names())\n\n # rename\n sf = SFrame(self.__create_test_df(1000))\n sf2 = sf.rename({'int_data':'int','float_data':'float'}, inplace=False)\n self.assertTrue(sf2 is not sf)\n self.assertTrue('int_data' in sf.column_names())\n self.assertTrue('float_data' in sf.column_names())\n self.assertTrue('int' not in sf.column_names())\n self.assertTrue('float' not in sf.column_names())\n self.assertTrue('int_data' not in sf2.column_names())\n self.assertTrue('float_data' not in sf2.column_names())\n self.assertTrue('int' in sf2.column_names())\n self.assertTrue('float' in sf2.column_names())\n sf2 = sf.rename({'int_data':'int','float_data':'float'}, inplace=True)\n self.assertTrue(sf2 is sf)\n self.assertTrue('int_data' not in sf2.column_names())\n self.assertTrue('float_data' not in sf2.column_names())\n self.assertTrue('int' in sf2.column_names())\n self.assertTrue('float' in sf2.column_names())\n\n # swap\n sf = SFrame(self.__create_test_df(1000))\n old_cnames = sf.column_names()\n\n # swap int_data and float_data\n new_cnames = sf.column_names()\n int_data_idx = new_cnames.index('int_data')\n float_data_idx = new_cnames.index('float_data')\n new_cnames[int_data_idx],new_cnames[float_data_idx] = new_cnames[float_data_idx],new_cnames[int_data_idx]\n\n\n\n sf2 = sf.swap_columns('int_data', 'float_data', inplace=False)\n self.assertTrue(sf2 is not sf)\n self.assertEqual(sf.column_names(), old_cnames)\n self.assertEqual(sf2.column_names(), new_cnames)\n\n sf2 = sf.swap_columns('int_data', 'float_data', inplace=True)\n self.assertTrue(sf2 is sf)\n self.assertEqual(sf2.column_names(), new_cnames)\n\n\n def test_check_lazy_sframe_size(self):\n # empty sframe, materialized, has_size\n sf = SFrame()\n self.assertTrue(sf.__is_materialized__())\n self.assertTrue(sf.__has_size__())\n\n # add one column, not materialized, has_size\n sf['a'] = range(1000)\n self.assertTrue(sf.__is_materialized__())\n self.assertTrue(sf.__has_size__())\n\n # materialize it, materialized, has_size\n sf['a'] = range(1000)\n sf.materialize()\n self.assertTrue(sf.__is_materialized__())\n self.assertTrue(sf.__has_size__())\n\n # logical filter, not materialized, not has_size\n sf = sf[sf['a'] > 5000]\n self.assertFalse(sf.__is_materialized__())\n self.assertFalse(sf.__has_size__())\n\n def test_lazy_logical_filter_sarray(self):\n g=SArray(range(10000))\n g2=SArray(range(10000))\n a=g[g>10]\n a2=g2[g>10]\n z=a[a2>20]\n self.assertEqual(len(z), 9979)\n\n def test_lazy_logical_filter_sframe(self):\n g=SFrame({'a':range(10000)})\n g2=SFrame({'a':range(10000)})\n a=g[g['a']>10]\n a2=g2[g['a']>10]\n z=a[a2['a']>20]\n self.assertEqual(len(z), 9979)\n\n\n def test_column_manipulation_of_lazy_sframe(self):\n g=SFrame({'a':[1,2,3,4,5],'id':[1,2,3,4,5]})\n g = g[g['id'] > 2]\n del g['id']\n # if lazy column deletion is quirky, this will cause an exception\n self.assertEqual(list(g[0:2]['a']), [3,4])\n g=SFrame({'a':[1,2,3,4,5],'id':[1,2,3,4,5]})\n g = g[g['id'] > 2]\n g.swap_columns('a','id', inplace=True)\n # if lazy column swap is quirky, this will cause an exception\n self.assertEqual(list(g[0:2]['a']), [3,4])\n\n def test_empty_sarray(self):\n with util.TempDirectory() as f:\n sf = SArray()\n sf.save(f)\n sf2 = SArray(f)\n self.assertEqual(len(sf2), 0)\n\n def test_empty_sframe(self):\n with util.TempDirectory() as f:\n sf = SFrame()\n sf.save(f)\n sf2 = SFrame(f)\n self.assertEqual(len(sf2), 0)\n self.assertEqual(sf2.num_columns(), 0)\n\n def test_none_column(self):\n sf = SFrame({'a':[1,2,3,4,5]})\n sf['b'] = None\n self.assertEqual(sf['b'].dtype, float)\n df = pd.DataFrame({'a': [1,2,3,4,5], 'b': [None,None,None,None,None]})\n self.__test_equal(sf, df)\n\n sa = SArray.from_const(None, 100)\n self.assertEqual(list(sa), [None] * 100)\n self.assertEqual(sa.dtype, float)\n\n def test_apply_with_partial(self):\n sf = SFrame({'a': [1, 2, 3, 4, 5]})\n\n def concat_fn(character, row):\n return '%s%d' % (character, row['a'])\n\n my_partial_fn = functools.partial(concat_fn, 'x')\n sa = sf.apply(my_partial_fn)\n self.assertEqual(list(sa), ['x1', 'x2', 'x3', 'x4', 'x5'])\n\n def test_apply_with_functor(self):\n sf = SFrame({'a': [1, 2, 3, 4, 5]})\n\n class Concatenator(object):\n def __init__(self, character):\n self.character = character\n\n def __call__(self, row):\n return '%s%d' % (self.character, row['a'])\n\n concatenator = Concatenator('x')\n sa = sf.apply(concatenator)\n self.assertEqual(list(sa), ['x1', 'x2', 'x3', 'x4', 'x5'])\n\n def test_save_sframe(self):\n '''save lazily evaluated SFrame should not materialize to target folder\n '''\n data = SFrame()\n data['x'] = range(100)\n data['x'] = data['x'] > 50\n #lazy and good\n tmp_dir = tempfile.mkdtemp()\n data.save(tmp_dir)\n shutil.rmtree(tmp_dir)\n print(data)\n\n def test_empty_argmax_does_not_fail(self):\n # an empty argmax should not result in a crash\n sf = SFrame({'id': [0, 0, 0, 1, 1, 2, 2],\n 'value': [3.0, 2.0, 2.3, None, None, 4.3, 1.3],\n 'category': ['A', 'B', 'A', 'E', 'A', 'A', 'B']})\n sf.groupby('id', aggregate.ARGMAX('value', 'category'))\n\n def test_cache_invalidation(self):\n # Changes to the SFrame should invalidate the indexing cache.\n\n X = SFrame({'a' : range(4000),\n 'b' : range(4000)})\n\n for i in range(0, 4000, 20):\n self.assertEqual(X[i], {'a' : i, 'b' : i})\n\n X['a'] = range(1000, 5000)\n\n for i in range(0, 4000, 20):\n self.assertEqual(X[i], {'a' : 1000 + i, 'b' : i})\n\n del X['b']\n\n for i in range(0, 4000, 20):\n self.assertEqual(X[i], {'a' : 1000 + i})\n\n X['b'] = X['a']\n\n for i in range(0, 4000, 20):\n self.assertEqual(X[i], {'a' : 1000 + i, 'b' : 1000 + i})\n\n X.rename({'b' : 'c'}, inplace=True)\n\n for i in range(0, 4000, 20):\n self.assertEqual(X[i], {'a' : 1000 + i, 'c' : 1000 + i})\n\n def test_to_numpy(self):\n X = SFrame({'a' : range(100),\n 'b' : range(100)})\n import numpy as np\n import numpy.testing as nptest\n Y = np.transpose(np.array([range(100), range(100)]))\n nptest.assert_array_equal(X.to_numpy(), Y)\n\n X['b'] = X['b'].astype(str)\n s = [str(i) for i in range(100)]\n Y = np.transpose(np.array([s, s]))\n nptest.assert_array_equal(X.to_numpy(), Y)\n\n @mock.patch(__name__+'.sqlite3.Cursor', spec=True)\n @mock.patch(__name__+'.sqlite3.Connection', spec=True)\n def test_from_sql(self, mock_conn, mock_cursor):\n # Set up mock connection and cursor\n conn = mock_conn('example.db')\n curs = mock_cursor()\n conn.cursor.return_value = curs\n sf_type_codes = [44,44,41,22,114,199,43]\n\n sf_data = list(zip(*self.all_type_cols))\n sf_iter = sf_data.__iter__()\n\n def mock_fetchone():\n try:\n return next(sf_iter)\n except StopIteration:\n return None\n\n def mock_fetchmany(size=1):\n count = 0\n ret_list = []\n for i in sf_iter:\n if count == curs.arraysize:\n break\n ret_list.append(i)\n count += 1\n\n return ret_list\n\n curs.fetchone.side_effect = mock_fetchone\n curs.fetchmany.side_effect = mock_fetchmany\n\n curs.description = [['X'+str(i+1),sf_type_codes[i]]+[None for j in range(5)] for i in range(len(sf_data[0]))]\n\n # bigger than cache, no Nones\n sf = SFrame.from_sql(conn, \"SELECT * FROM test_table\", type_inference_rows=5, dbapi_module=dbapi2_mock())\n _assert_sframe_equal(sf, self.sf_all_types)\n\n # smaller than cache, no Nones\n sf_iter = sf_data.__iter__()\n sf = SFrame.from_sql(conn, \"SELECT * FROM test_table\", type_inference_rows=100, dbapi_module=dbapi2_mock())\n _assert_sframe_equal(sf, self.sf_all_types)\n\n none_col = [None for i in range(5)]\n nones_in_cache = list(zip(*[none_col for i in range(len(sf_data[0]))]))\n none_sf = SFrame({'X'+str(i):none_col for i in range(1,len(sf_data[0])+1)})\n test_data = (nones_in_cache+sf_data)\n sf_iter = test_data.__iter__()\n\n # more None rows than cache & types in description\n sf = SFrame.from_sql(conn, \"SELECT * FROM test_table\", type_inference_rows=5, dbapi_module=dbapi2_mock())\n sf_inferred_types = SFrame()\n expected_types = [float,float,str,str,str,str,dt.datetime]\n for i in zip(self.sf_all_types.column_names(),expected_types):\n new_col = SArray(none_col).astype(i[1])\n new_col = new_col.append(self.sf_all_types[i[0]].apply(lambda x: i[1](x) if i[1] is not dt.datetime else x))\n sf_inferred_types.add_column(new_col, inplace=True)\n\n # Don't test the string representation of dict and list; there are\n # funky consistency issues with the string representations of these\n sf.remove_columns(['X5', 'X6'], inplace=True)\n sf_inferred_types.remove_columns(['X5', 'X6'], inplace=True)\n _assert_sframe_equal(sf, sf_inferred_types)\n\n # more None rows than cache & no type information\n for i in range(len(curs.description)):\n curs.description[i][1] = None\n sf_iter = test_data.__iter__()\n sf = SFrame.from_sql(conn, \"SELECT * FROM test_table\", type_inference_rows=5, dbapi_module=dbapi2_mock())\n\n sf_inferred_types = SFrame()\n expected_types = [str for i in range(len(sf_data[0]))]\n for i in zip(self.sf_all_types.column_names(),expected_types):\n new_col = SArray(none_col).astype(i[1])\n new_col = new_col.append(self.sf_all_types[i[0]].apply(lambda x: str(x)))\n sf_inferred_types.add_column(new_col, inplace=True)\n\n # Don't test the string representation of dict, could be out of order\n sf.remove_columns(['X5', 'X6'], inplace=True)\n sf_inferred_types.remove_columns(['X5', 'X6'], inplace=True)\n _assert_sframe_equal(sf, sf_inferred_types)\n\n ### column_type_hints tests\n sf_iter = test_data.__iter__()\n sf = SFrame.from_sql(conn, \"SELECT * FROM test_table\", type_inference_rows=5,\n dbapi_module=dbapi2_mock(), column_type_hints=str)\n sf.remove_columns(['X5', 'X6'], inplace=True)\n _assert_sframe_equal(sf, sf_inferred_types)\n\n # Provide unhintable types\n sf_iter = test_data.__iter__()\n expected_types = [int,float,str,array.array,list,dict,dt.datetime]\n with self.assertRaises(TypeError):\n sf = SFrame.from_sql(conn,\n \"SELECT * FROM test_table\", type_inference_rows=5,\n dbapi_module=dbapi2_mock(), column_type_hints=expected_types)\n\n sf_iter = test_data.__iter__()\n expected_types = {'X'+str(i+1):expected_types[i] for i in range(3)}\n sf = SFrame.from_sql(conn,\n \"SELECT * FROM test_table\", type_inference_rows=10,\n dbapi_module=dbapi2_mock(), column_type_hints=expected_types)\n _assert_sframe_equal(sf[5:],self.sf_all_types)\n\n # Test a float forced to a str\n sf_iter = test_data.__iter__()\n expected_types['X2'] = str\n self.sf_all_types['X2'] = self.sf_all_types['X2'].apply(lambda x: str(x))\n sf = SFrame.from_sql(conn,\n \"SELECT * FROM test_table\", type_inference_rows=10,\n dbapi_module=dbapi2_mock(), column_type_hints=expected_types)\n _assert_sframe_equal(sf[5:],self.sf_all_types)\n\n # Type unsupported by sframe\n curs.description = [['X1',44],['X2',44]]\n sf_iter = [[complex(4.5,3),1], [complex(3.4,5),2]].__iter__()\n sf = SFrame.from_sql(conn, \"SELECT * FROM test_table\")\n expected_sf = SFrame({'X1':[\"(4.5+3j)\",\"(3.4+5j)\"],'X2':[1,2]})\n _assert_sframe_equal(sf, expected_sf)\n\n # bad DBAPI version!\n bad_version = dbapi2_mock()\n bad_version.apilevel = \"1.0 \"\n with self.assertRaises(NotImplementedError):\n sf = SFrame.from_sql(conn, \"SELECT * FROM test_table\", dbapi_module=bad_version)\n\n # Bad module\n with self.assertRaises(AttributeError):\n sf = SFrame.from_sql(conn, \"SELECT * FROM test_table\", dbapi_module=os)\n\n # Bad connection\n with self.assertRaises(AttributeError):\n sf = SFrame.from_sql(4, \"SELECT * FROM test_table\")\n\n # Empty query result\n curs.description = []\n sf = SFrame.from_sql(conn, \"SELECT * FROM test_table\", dbapi_module=dbapi2_mock())\n _assert_sframe_equal(sf, SFrame())\n\n @mock.patch(__name__+'.sqlite3.Cursor', spec=True)\n @mock.patch(__name__+'.sqlite3.Connection', spec=True)\n def test_to_sql(self, mock_conn, mock_cursor):\n conn = mock_conn('example.db')\n curs = mock_cursor()\n insert_stmt = \"INSERT INTO ins_test (X1,X2,X3,X4,X5,X6,X7) VALUES ({0},{1},{2},{3},{4},{5},{6})\"\n num_cols = len(self.sf_all_types.column_names())\n test_cases = [\n ('qmark',insert_stmt.format(*['?' for i in range(num_cols)])),\n ('numeric',insert_stmt.format(*[':'+str(i) for i in range(1,num_cols+1)])),\n ('named',insert_stmt.format(*[':X'+str(i) for i in range(1,num_cols+1)])),\n ('format',insert_stmt.format(*['%s' for i in range(num_cols)])),\n ('pyformat',insert_stmt.format(*['%(X'+str(i)+')s' for i in range(1,num_cols+1)])),\n ]\n for i in test_cases:\n conn.cursor.return_value = curs\n\n mock_mod = dbapi2_mock()\n mock_mod.paramstyle = i[0]\n self.sf_all_types.to_sql(conn, \"ins_test\", dbapi_module=mock_mod)\n conn.cursor.assert_called_once_with()\n calls = []\n col_names = self.sf_all_types.column_names()\n for j in self.sf_all_types:\n if i[0] == 'named' or i[0] == 'pyformat':\n calls.append(mock.call(i[1],j))\n else:\n calls.append(mock.call(i[1],[j[k] for k in col_names]))\n curs.execute.assert_has_calls(calls, any_order=False)\n self.assertEqual(curs.execute.call_count, len(self.sf_all_types))\n conn.commit.assert_called_once_with()\n curs.close.assert_called_once_with()\n\n conn.reset_mock()\n curs.reset_mock()\n\n # bad DBAPI version!\n bad_version = dbapi2_mock()\n bad_version.apilevel = \"1.0 \"\n with self.assertRaises(NotImplementedError):\n self.sf_all_types.to_sql(conn, \"ins_test\", dbapi_module=bad_version)\n\n # bad paramstyle\n bad_paramstyle = dbapi2_mock()\n bad_paramstyle.paramstyle = 'foo'\n with self.assertRaises(TypeError):\n self.sf_all_types.to_sql(conn, \"ins_test\", dbapi_module=bad_paramstyle)\n\n\n def test_materialize(self):\n sf = SFrame({'a':range(100)})\n sf = sf[sf['a'] > 10]\n self.assertFalse(sf.is_materialized())\n sf.materialize()\n self.assertTrue(sf.is_materialized())\n\n def test_materialization_slicing(self):\n # Has been known to fail.\n g=SFrame({'a':range(100)})[:10]\n g['b'] = g['a'] + 1\n g['b'].materialize()\n g.materialize()\n\n def test_copy(self):\n from copy import copy\n sf = generate_random_sframe(100, \"Cns\")\n sf_copy = copy(sf)\n\n assert sf is not sf_copy\n\n _assert_sframe_equal(sf, sf_copy)\n\n def test_deepcopy(self):\n from copy import deepcopy\n sf = generate_random_sframe(100, \"Cns\")\n sf_copy = deepcopy(sf)\n\n assert sf is not sf_copy\n\n _assert_sframe_equal(sf, sf_copy)\n\n def test_builtins(self):\n import builtins\n import six\n\n sf = SFrame({'dict': [builtins.dict({'foo': 'bar'})],\n 'float': [builtins.float(3.14)],\n 'int': [builtins.int(12)],\n 'bool': [builtins.bool(False)],\n 'list': [builtins.list([1,2,3])],\n 'str': [builtins.str('foo')],\n 'tuple': [builtins.tuple((1,2))],\n })\n sf2 = SFrame({'dict': [{'foo': 'bar'}],\n 'float': [3.14],\n 'int': [12],\n 'bool': [False],\n 'list': [[1,2,3]],\n 'str': ['foo'],\n 'tuple': [(1,2)],\n })\n\n if six.PY2:\n sf = sf.add_columns(SFrame(\n {'long': [builtins.long(12)], 'unicode': [builtins.unicode('foo')]}))\n sf2 = sf2.add_columns(SFrame(\n {'long': [12], 'unicode': [unicode('foo')]}))\n\n _assert_sframe_equal(sf, sf2)\n\n def test_add_column_nonSArray(self):\n sf = SFrame()\n sf = sf.add_column([1,2,3,4],'x')\n\n sf_test = SFrame()\n sf_test['x'] = SArray([1,2,3,4])\n\n _assert_sframe_equal(sf, sf_test)\n\n\n def test_add_column_noniterable1(self):\n sf = SFrame()\n sf = sf.add_column([1,2,3,4],'x')\n sf = sf.add_column(5,'y')\n\n sf_test = SFrame()\n sf_test['x'] = SArray([1,2,3,4])\n sf_test['y'] = 5\n\n _assert_sframe_equal(sf, sf_test)\n\n\n\n def test_add_column_noniterable2(self):\n # If SFrame is empty then the passed data should be treated as an SArray of size 1\n sf = SFrame()\n sf = sf.add_column(5,'y')\n\n sf_test = SFrame()\n sf_test['y'] = SArray([5])\n\n _assert_sframe_equal(sf, sf_test)\n\n\nif __name__ == \"__main__\":\n\n import sys\n\n # Check if we are supposed to connect to another server\n for i, v in enumerate(sys.argv):\n if v.startswith(\"ipc://\"):\n _launch(v)\n\n # The rest of the arguments need to get passed through to\n # the unittest module\n del sys.argv[i]\n break\n\n unittest.main()\n"
] | [
[
"numpy.unique",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal",
"numpy.std",
"numpy.mean",
"numpy.var",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
kamalkraj/tensorflow | [
"284b0dd7a3379c3ae6a1def7a30603b0fc1e108e"
] | [
"tensorflow/python/keras/engine/base_layer.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n\"\"\"Contains the base Layer class, from which all layers inherit.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport copy\nimport functools\nimport itertools\nimport threading\nimport weakref\n\nimport numpy as np\nimport six\nfrom six.moves import zip # pylint: disable=redefined-builtin\n\nfrom google.protobuf import json_format\nfrom tensorflow.core.framework import node_def_pb2\nfrom tensorflow.python import tf2\nfrom tensorflow.python.autograph.core import ag_ctx\nfrom tensorflow.python.autograph.impl import api as autograph\nfrom tensorflow.python.distribute import distribution_strategy_context as ds_context\nfrom tensorflow.python.distribute import sharded_variable\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import execute\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.eager import monitoring\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import func_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras import constraints\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras.engine import base_layer_utils\nfrom tensorflow.python.keras.engine import input_spec\nfrom tensorflow.python.keras.engine import keras_tensor\nfrom tensorflow.python.keras.engine import node as node_module\nfrom tensorflow.python.keras.mixed_precision.experimental import autocast_variable\nfrom tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer\nfrom tensorflow.python.keras.mixed_precision.experimental import policy\nfrom tensorflow.python.keras.saving.saved_model import layer_serialization\nfrom tensorflow.python.keras.utils import generic_utils\nfrom tensorflow.python.keras.utils import layer_utils\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.keras.utils import version_utils\n# A module that only depends on `keras.layers` import these from here.\nfrom tensorflow.python.keras.utils.generic_utils import to_snake_case # pylint: disable=unused-import\nfrom tensorflow.python.keras.utils.tf_utils import is_tensor_or_tensor_list # pylint: disable=unused-import\nfrom tensorflow.python.module import module\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.platform import tf_logging\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.training.tracking import data_structures\nfrom tensorflow.python.training.tracking import layer_utils as trackable_layer_utils\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import object_identity\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util.tf_export import keras_export\nfrom tensorflow.tools.docs import doc_controls\n\n# Prefix that is added to the TF op layer names.\n_TF_OP_LAYER_NAME_PREFIX = 'tf_op_layer_'\n\n# TODO(mdan): Should we have a single generic type for types that can be passed\n# to tf.cast?\n_AUTOCAST_TYPES = (ops.Tensor, sparse_tensor.SparseTensor,\n ragged_tensor.RaggedTensor)\n\n_keras_layers_gauge = monitoring.BoolGauge('/tensorflow/api/keras/layers',\n 'keras layers usage', 'method')\n_keras_model_gauge = monitoring.BoolGauge(\n '/tensorflow/api/keras/premade_models', 'premade keras model usage', 'type')\n\n\n@keras_export('keras.layers.Layer')\nclass Layer(module.Module, version_utils.LayerVersionSelector):\n \"\"\"This is the class from which all layers inherit.\n\n A layer is a callable object that takes as input one or more tensors and\n that outputs one or more tensors. It involves *computation*, defined\n in the `call()` method, and a *state* (weight variables), defined\n either in the constructor `__init__()` or in the `build()` method.\n\n Users will just instantiate a layer and then treat it as a callable.\n\n Arguments:\n trainable: Boolean, whether the layer's variables should be trainable.\n name: String name of the layer.\n dtype: The dtype of the layer's computations and weights (default of\n `None` means use `tf.keras.backend.floatx` in TensorFlow 2, or the type\n of the first input in TensorFlow 1).\n dynamic: Set this to `True` if your layer should only be run eagerly, and\n should not be used to generate a static computation graph.\n This would be the case for a Tree-RNN or a recursive network,\n for example, or generally for any layer that manipulates tensors\n using Python control flow. If `False`, we assume that the layer can\n safely be used to generate a static computation graph.\n\n Attributes:\n name: The name of the layer (string).\n dtype: The dtype of the layer's computations and weights. If mixed\n precision is used with a `tf.keras.mixed_precision.experimental.Policy`,\n this is instead just the dtype of the layer's weights, as the computations\n are done in a different dtype.\n trainable_weights: List of variables to be included in backprop.\n non_trainable_weights: List of variables that should not be\n included in backprop.\n weights: The concatenation of the lists trainable_weights and\n non_trainable_weights (in this order).\n trainable: Whether the layer should be trained (boolean), i.e. whether\n its potentially-trainable weights should be returned as part of\n `layer.trainable_weights`.\n input_spec: Optional (list of) `InputSpec` object(s) specifying the\n constraints on inputs that can be accepted by the layer.\n\n We recommend that descendants of `Layer` implement the following methods:\n\n * `__init__()`: Defines custom layer attributes, and creates layer state\n variables that do not depend on input shapes, using `add_weight()`.\n * `build(self, input_shape)`: This method can be used to create weights that\n depend on the shape(s) of the input(s), using `add_weight()`. `__call__()`\n will automatically build the layer (if it has not been built yet) by\n calling `build()`.\n * `call(self, *args, **kwargs)`: Called in `__call__` after making sure\n `build()` has been called. `call()` performs the logic of applying the\n layer to the input tensors (which should be passed in as argument).\n Two reserved keyword arguments you can optionally use in `call()` are:\n - `training` (boolean, whether the call is in\n inference mode or training mode)\n - `mask` (boolean tensor encoding masked timesteps in the input, used\n in RNN layers)\n * `get_config(self)`: Returns a dictionary containing the configuration used\n to initialize this layer. If the keys differ from the arguments\n in `__init__`, then override `from_config(self)` as well.\n This method is used when saving\n the layer or a model that contains this layer.\n\n Examples:\n\n Here's a basic example: a layer with two variables, `w` and `b`,\n that returns `y = w . x + b`.\n It shows how to implement `build()` and `call()`.\n Variables set as attributes of a layer are tracked as weights\n of the layers (in `layer.weights`).\n\n ```python\n class SimpleDense(Layer):\n\n def __init__(self, units=32):\n super(SimpleDense, self).__init__()\n self.units = units\n\n def build(self, input_shape): # Create the state of the layer (weights)\n w_init = tf.random_normal_initializer()\n self.w = tf.Variable(\n initial_value=w_init(shape=(input_shape[-1], self.units),\n dtype='float32'),\n trainable=True)\n b_init = tf.zeros_initializer()\n self.b = tf.Variable(\n initial_value=b_init(shape=(self.units,), dtype='float32'),\n trainable=True)\n\n def call(self, inputs): # Defines the computation from inputs to outputs\n return tf.matmul(inputs, self.w) + self.b\n\n # Instantiates the layer.\n linear_layer = SimpleDense(4)\n\n # This will also call `build(input_shape)` and create the weights.\n y = linear_layer(tf.ones((2, 2)))\n assert len(linear_layer.weights) == 2\n\n # These weights are trainable, so they're listed in `trainable_weights`:\n assert len(linear_layer.trainable_weights) == 2\n ```\n\n Note that the method `add_weight()` offers a shortcut to create weights:\n\n ```python\n class SimpleDense(Layer):\n\n def __init__(self, units=32):\n super(SimpleDense, self).__init__()\n self.units = units\n\n def build(self, input_shape):\n self.w = self.add_weight(shape=(input_shape[-1], self.units),\n initializer='random_normal',\n trainable=True)\n self.b = self.add_weight(shape=(self.units,),\n initializer='random_normal',\n trainable=True)\n\n def call(self, inputs):\n return tf.matmul(inputs, self.w) + self.b\n ```\n\n Besides trainable weights, updated via backpropagation during training,\n layers can also have non-trainable weights. These weights are meant to\n be updated manually during `call()`. Here's a example layer that computes\n the running sum of its inputs:\n\n ```python\n class ComputeSum(Layer):\n\n def __init__(self, input_dim):\n super(ComputeSum, self).__init__()\n # Create a non-trainable weight.\n self.total = tf.Variable(initial_value=tf.zeros((input_dim,)),\n trainable=False)\n\n def call(self, inputs):\n self.total.assign_add(tf.reduce_sum(inputs, axis=0))\n return self.total\n\n my_sum = ComputeSum(2)\n x = tf.ones((2, 2))\n\n y = my_sum(x)\n print(y.numpy()) # [2. 2.]\n\n y = my_sum(x)\n print(y.numpy()) # [4. 4.]\n\n assert my_sum.weights == [my_sum.total]\n assert my_sum.non_trainable_weights == [my_sum.total]\n assert my_sum.trainable_weights == []\n ```\n\n For more information about creating layers, see the guide\n [Writing custom layers and models with Keras](\n https://www.tensorflow.org/guide/keras/custom_layers_and_models)\n\n About the layer's `dtype` attribute:\n\n Each layer has a dtype, which is typically the dtype of the layer's\n computations and variables. A layer's dtype can be queried via the\n `Layer.dtype` property. The dtype is specified with the `dtype` constructor\n argument. In TensorFlow 2, the dtype defaults to `tf.keras.backend.floatx()`\n if no dtype is passed. `floatx()` itself defaults to \"float32\". Additionally,\n layers will cast their inputs to the layer's dtype in TensorFlow 2. When mixed\n precision is used, layers may have different computation and variable dtypes.\n See `tf.keras.mixed_precision.experimental.Policy` for details on layer\n dtypes.\n \"\"\"\n\n # See tf.Module for the usage of this property.\n # The key for _obj_reference_counts_dict is a Trackable, which could be a\n # variable or layer etc. tf.Module._flatten will fail to flatten the key\n # since it is trying to convert Trackable to a string. This attribute can be\n # ignored even after the fix of nest lib, since the trackable object should\n # already been available as individual attributes. _obj_reference_counts_dict\n # just contains a copy of them.\n _TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(\n ('_obj_reference_counts_dict',),\n module.Module._TF_MODULE_IGNORED_PROPERTIES\n ))\n\n # When loading from a SavedModel, Layers typically can be revived into a\n # generic Layer wrapper. Sometimes, however, layers may implement methods\n # that go beyond this wrapper, as in the case of PreprocessingLayers'\n # `adapt` method. When this is the case, layer implementers can override\n # must_restore_from_config to return True; layers with this property must\n # be restored into their actual objects (and will fail if the object is\n # not available to the restoration code).\n _must_restore_from_config = False\n\n @trackable.no_automatic_dependency_tracking\n def __init__(self,\n trainable=True,\n name=None,\n dtype=None,\n dynamic=False,\n **kwargs):\n # These properties should be set by the user via keyword arguments.\n # note that 'dtype', 'input_shape' and 'batch_input_shape'\n # are only applicable to input layers: do not pass these keywords\n # to non-input layers.\n allowed_kwargs = {\n 'input_dim',\n 'input_shape',\n 'batch_input_shape',\n 'batch_size',\n 'weights',\n 'activity_regularizer',\n 'autocast',\n }\n # Validate optional keyword arguments.\n generic_utils.validate_kwargs(kwargs, allowed_kwargs)\n\n # Mutable properties\n # Indicates whether the layer's weights are updated during training\n # and whether the layer's updates are run during training.\n self._trainable = trainable\n # A stateful layer is a layer whose updates are run during inference too,\n # for instance stateful RNNs.\n self._stateful = False\n # Indicates whether `build` needs to be called upon layer call, to create\n # the layer's weights.\n self.built = False\n # Record the build input shape for loading purposes.\n # TODO(kathywu): Move this to Layer._set_save_spec once cl/290121460 is\n # submitted.\n self._build_input_shape = None\n self._saved_model_inputs_spec = None\n # Provides information about which inputs are compatible with the layer.\n self._input_spec = None\n\n # `Layer.compute_mask` will be called at the end of `Layer.__call__` if\n # `Layer.compute_mask` is overridden, or if the `Layer` subclass sets\n # `self.supports_masking=True`.\n self._supports_masking = not generic_utils.is_default(self.compute_mask)\n\n self._init_set_name(name)\n self._activity_regularizer = regularizers.get(\n kwargs.pop('activity_regularizer', None))\n self._maybe_create_attribute('_trainable_weights', [])\n self._maybe_create_attribute('_non_trainable_weights', [])\n self._updates = []\n # Object to store all thread local layer properties.\n self._thread_local = threading.local()\n # A list of zero-argument lambdas which return Tensors, used for variable\n # regularizers.\n self._callable_losses = []\n # A list of symbolic Tensors containing activity regularizers and losses\n # manually added through `add_loss` in graph-building mode.\n self._losses = []\n # A list of metric instances corresponding to the symbolic metric tensors\n # added using the `add_metric` API.\n self._metrics = []\n # Ensures the same metric is not added multiple times in `MirroredStrategy`.\n self._metrics_lock = threading.Lock()\n\n # Both graph and subclassed networks have a dtype policy. For graph\n # networks, the policy's compute and variable dtypes are ignored, but other\n # fields, like the loss scale, are used by Models. For subclassed networks,\n # the compute and variable dtypes are used as like any ordinary layer.\n self._set_dtype_policy(dtype)\n # Boolean indicating whether the layer automatically casts its inputs to the\n # layer's compute_dtype.\n self._autocast = kwargs.get('autocast',\n base_layer_utils.v2_dtype_behavior_enabled())\n\n # Dependencies tracked via attribute assignment.\n # All layers in order of horizontal graph traversal.\n # Entries are unique. For models includes input and output layers.\n self._maybe_create_attribute('_layers', [])\n\n # These lists will be filled via successive calls\n # to self._add_inbound_node().\n # Used in symbolic mode only, only in conjunction with graph-networks\n self._inbound_nodes = []\n self._outbound_nodes = []\n\n self._init_call_fn_args()\n\n # Whether the `call` method can be used to build a TF graph without issues.\n # This attribute has no effect if the model is created using the Functional\n # API. Instead, `model.dynamic` is determined based on the internal layers.\n self._dynamic = dynamic\n\n # Manage input shape information if passed.\n if 'input_dim' in kwargs and 'input_shape' not in kwargs:\n # Backwards compatibility: alias 'input_dim' to 'input_shape'.\n kwargs['input_shape'] = (kwargs['input_dim'],)\n if 'input_shape' in kwargs or 'batch_input_shape' in kwargs:\n # In this case we will later create an input layer\n # to insert before the current layer\n if 'batch_input_shape' in kwargs:\n batch_input_shape = tuple(kwargs['batch_input_shape'])\n elif 'input_shape' in kwargs:\n if 'batch_size' in kwargs:\n batch_size = kwargs['batch_size']\n else:\n batch_size = None\n batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])\n self._batch_input_shape = batch_input_shape\n\n # Manage initial weight values if passed.\n self._initial_weights = kwargs.get('weights', None)\n\n # Whether the layer will track any layers that is set as attribute on itself\n # as sub-layers, the weights from the sub-layers will be included in the\n # parent layer's variables() as well.\n # Default to True, which means auto tracking is turned on. Certain subclass\n # might want to turn it off, like Sequential model.\n self._auto_track_sub_layers = True\n\n @trackable.no_automatic_dependency_tracking\n @generic_utils.default\n def build(self, input_shape):\n \"\"\"Creates the variables of the layer (optional, for subclass implementers).\n\n This is a method that implementers of subclasses of `Layer` or `Model`\n can override if they need a state-creation step in-between\n layer instantiation and layer call.\n\n This is typically used to create the weights of `Layer` subclasses.\n\n Arguments:\n input_shape: Instance of `TensorShape`, or list of instances of\n `TensorShape` if the layer expects a list of inputs\n (one instance per input).\n \"\"\"\n # Only record the build input shapes of overridden build methods.\n if not hasattr(self.build, '_is_default'):\n self._build_input_shape = input_shape\n self.built = True\n\n @doc_controls.for_subclass_implementers\n def call(self, inputs, **kwargs): # pylint: disable=unused-argument\n \"\"\"This is where the layer's logic lives.\n\n Note here that `call()` method in `tf.keras` is little bit different\n from `keras` API. In `keras` API, you can pass support masking for\n layers as additional arguments. Whereas `tf.keras` has `compute_mask()`\n method to support masking.\n\n Arguments:\n inputs: Input tensor, or list/tuple of input tensors.\n **kwargs: Additional keyword arguments. Currently unused.\n\n Returns:\n A tensor or list/tuple of tensors.\n \"\"\"\n return inputs\n\n @doc_controls.for_subclass_implementers\n def _add_trackable(self, trackable_object, trainable):\n \"\"\"Adds a Trackable object to this layer's state.\n\n Arguments:\n trackable_object: The tf.tracking.Trackable object to add.\n trainable: Boolean, whether the variable should be part of the layer's\n \"trainable_variables\" (e.g. variables, biases) or\n \"non_trainable_variables\" (e.g. BatchNorm mean and variance).\n\n Returns:\n The TrackableWeightHandler used to track this object.\n \"\"\"\n handler = base_layer_utils.TrackableWeightHandler(trackable_object)\n if trainable:\n self._trainable_weights.append(handler)\n else:\n self._non_trainable_weights.append(handler)\n return handler\n\n @doc_controls.for_subclass_implementers\n def add_weight(self,\n name=None,\n shape=None,\n dtype=None,\n initializer=None,\n regularizer=None,\n trainable=None,\n constraint=None,\n partitioner=None,\n use_resource=None,\n synchronization=tf_variables.VariableSynchronization.AUTO,\n aggregation=tf_variables.VariableAggregation.NONE,\n **kwargs):\n \"\"\"Adds a new variable to the layer.\n\n Arguments:\n name: Variable name.\n shape: Variable shape. Defaults to scalar if unspecified.\n dtype: The type of the variable. Defaults to `self.dtype` or `float32`.\n initializer: Initializer instance (callable).\n regularizer: Regularizer instance (callable).\n trainable: Boolean, whether the variable should be part of the layer's\n \"trainable_variables\" (e.g. variables, biases)\n or \"non_trainable_variables\" (e.g. BatchNorm mean and variance).\n Note that `trainable` cannot be `True` if `synchronization`\n is set to `ON_READ`.\n constraint: Constraint instance (callable).\n partitioner: Partitioner to be passed to the `Trackable` API.\n use_resource: Whether to use `ResourceVariable`.\n synchronization: Indicates when a distributed a variable will be\n aggregated. Accepted values are constants defined in the class\n `tf.VariableSynchronization`. By default the synchronization is set to\n `AUTO` and the current `DistributionStrategy` chooses\n when to synchronize. If `synchronization` is set to `ON_READ`,\n `trainable` must not be set to `True`.\n aggregation: Indicates how a distributed variable will be aggregated.\n Accepted values are constants defined in the class\n `tf.VariableAggregation`.\n **kwargs: Additional keyword arguments. Accepted values are `getter`,\n `collections`, `experimental_autocast` and `caching_device`.\n\n Returns:\n The created variable. Usually either a `Variable` or `ResourceVariable`\n instance. If `partitioner` is not `None`, a `PartitionedVariable`\n instance is returned.\n\n Raises:\n RuntimeError: If called with partitioned variable regularization and\n eager execution is enabled.\n ValueError: When giving unsupported dtype and no initializer or when\n trainable has been set to True with synchronization set as `ON_READ`.\n \"\"\"\n if shape is None:\n shape = ()\n # Validate optional keyword arguments.\n for kwarg in kwargs:\n if kwarg not in ['getter', 'collections', 'experimental_autocast',\n 'caching_device']:\n raise TypeError('Unknown keyword argument:', kwarg)\n getter = kwargs.pop('getter', base_layer_utils.make_variable)\n collections_arg = kwargs.pop('collections', None)\n # 'experimental_autocast' can be set to False by the caller to indicate an\n # AutoCastVariable should never be created.\n autocast = kwargs.pop('experimental_autocast', True)\n # See the docstring for tf.Variable about the details for caching_device.\n caching_device = kwargs.pop('caching_device', None)\n\n if dtype is None:\n dtype = self.dtype or backend.floatx()\n dtype = dtypes.as_dtype(dtype)\n if self._dtype_policy.variable_dtype is None:\n # The policy is \"_infer\", so we infer the policy from the variable dtype.\n self._set_dtype_policy(policy.Policy(dtype.base_dtype.name))\n initializer = initializers.get(initializer)\n regularizer = regularizers.get(regularizer)\n constraint = constraints.get(constraint)\n\n if synchronization == tf_variables.VariableSynchronization.ON_READ:\n if trainable:\n raise ValueError(\n 'Synchronization value can be set to '\n 'VariableSynchronization.ON_READ only for non-trainable variables. '\n 'You have specified trainable=True and '\n 'synchronization=VariableSynchronization.ON_READ.')\n else:\n # Set trainable to be false when variable is to be synced on read.\n trainable = False\n elif trainable is None:\n trainable = True\n\n # Initialize variable when no initializer provided\n if initializer is None:\n # If dtype is DT_FLOAT, provide a uniform unit scaling initializer\n if dtype.is_floating:\n initializer = initializers.get('glorot_uniform')\n # If dtype is DT_INT/DT_UINT, provide a default value `zero`\n # If dtype is DT_BOOL, provide a default value `FALSE`\n elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:\n initializer = initializers.get('zeros')\n # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?\n else:\n raise ValueError('An initializer for variable %s of type %s is required'\n ' for layer %s' % (name, dtype.base_dtype, self.name))\n\n if (autocast and self._dtype_policy.should_cast_variables and\n dtype.is_floating):\n # Wrap 'getter' with a version that returns an AutoCastVariable.\n old_getter = getter\n def getter(*args, **kwargs): # pylint: disable=function-redefined\n variable = old_getter(*args, **kwargs)\n return autocast_variable.create_autocast_variable(variable)\n # Also the caching_device does not work with the mixed precision API,\n # disable it if it is specified.\n # TODO(b/142020079): Reenable it once the bug is fixed.\n if caching_device is not None:\n tf_logging.warn('`caching_device` does not work with mixed precision '\n 'API. Ignoring user specified `caching_device`.')\n caching_device = None\n\n variable = self._add_variable_with_custom_getter(\n name=name,\n shape=shape,\n # TODO(allenl): a `make_variable` equivalent should be added as a\n # `Trackable` method.\n getter=getter,\n # Manage errors in Layer rather than Trackable.\n overwrite=True,\n initializer=initializer,\n dtype=dtype,\n constraint=constraint,\n trainable=trainable,\n partitioner=partitioner,\n use_resource=use_resource,\n collections=collections_arg,\n synchronization=synchronization,\n aggregation=aggregation,\n caching_device=caching_device)\n if regularizer is not None:\n # TODO(fchollet): in the future, this should be handled at the\n # level of variable creation, and weight regularization losses\n # should be variable attributes.\n name_in_scope = variable.name[:variable.name.find(':')]\n self._handle_weight_regularization(name_in_scope,\n variable,\n regularizer)\n if isinstance(\n variable,\n (tf_variables.PartitionedVariable, sharded_variable.ShardedVariable)):\n for v in variable:\n backend.track_variable(v)\n if trainable:\n self._trainable_weights.append(v)\n else:\n self._non_trainable_weights.append(v)\n else:\n backend.track_variable(variable)\n if trainable:\n self._trainable_weights.append(variable)\n else:\n self._non_trainable_weights.append(variable)\n return variable\n\n @generic_utils.default\n def get_config(self):\n \"\"\"Returns the config of the layer.\n\n A layer config is a Python dictionary (serializable)\n containing the configuration of a layer.\n The same layer can be reinstantiated later\n (without its trained weights) from this configuration.\n\n The config of a layer does not include connectivity\n information, nor the layer class name. These are handled\n by `Network` (one layer of abstraction above).\n\n Returns:\n Python dictionary.\n \"\"\"\n all_args = tf_inspect.getfullargspec(self.__init__).args\n config = {\n 'name': self.name,\n 'trainable': self.trainable,\n }\n if hasattr(self, '_batch_input_shape'):\n config['batch_input_shape'] = self._batch_input_shape\n config['dtype'] = policy.serialize(self._dtype_policy)\n if hasattr(self, 'dynamic'):\n # Only include `dynamic` in the `config` if it is `True`\n if self.dynamic:\n config['dynamic'] = self.dynamic\n elif 'dynamic' in all_args:\n all_args.remove('dynamic')\n expected_args = config.keys()\n # Finds all arguments in the `__init__` that are not in the config:\n extra_args = [arg for arg in all_args if arg not in expected_args]\n # Check that either the only argument in the `__init__` is `self`,\n # or that `get_config` has been overridden:\n if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'):\n raise NotImplementedError('Layer %s has arguments in `__init__` and '\n 'therefore must override `get_config`.' %\n self.__class__.__name__)\n return config\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Creates a layer from its config.\n\n This method is the reverse of `get_config`,\n capable of instantiating the same layer from the config\n dictionary. It does not handle layer connectivity\n (handled by Network), nor weights (handled by `set_weights`).\n\n Arguments:\n config: A Python dictionary, typically the\n output of get_config.\n\n Returns:\n A layer instance.\n \"\"\"\n return cls(**config)\n\n def compute_output_shape(self, input_shape):\n \"\"\"Computes the output shape of the layer.\n\n If the layer has not been built, this method will call `build` on the\n layer. This assumes that the layer will later be used with inputs that\n match the input shape provided here.\n\n Arguments:\n input_shape: Shape tuple (tuple of integers)\n or list of shape tuples (one per output tensor of the layer).\n Shape tuples can include None for free dimensions,\n instead of an integer.\n\n Returns:\n An input shape tuple.\n \"\"\"\n if context.executing_eagerly():\n # In this case we build the model first in order to do shape inference.\n # This is acceptable because the framework only calls\n # `compute_output_shape` on shape values that the layer would later be\n # built for. It would however cause issues in case a user attempts to\n # use `compute_output_shape` manually with shapes that are incompatible\n # with the shape the Layer will be called on (these users will have to\n # implement `compute_output_shape` themselves).\n self._maybe_build(input_shape)\n with func_graph.FuncGraph(str(self.name) + '_scratch_graph').as_default():\n input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)\n def _make_placeholder_like(shape):\n ph = backend.placeholder(shape=shape, dtype=self.dtype)\n ph._keras_mask = None\n return ph\n inputs = nest.map_structure(_make_placeholder_like, input_shape)\n try:\n outputs = self(inputs, training=False)\n except TypeError as e:\n six.raise_from(\n NotImplementedError(\n 'We could not automatically infer the static shape of the '\n 'layer\\'s output. Please implement the '\n '`compute_output_shape` method on your layer (%s).' %\n self.__class__.__name__), e)\n return nest.map_structure(lambda t: t.shape, outputs)\n raise NotImplementedError\n\n @doc_controls.for_subclass_implementers\n def compute_output_signature(self, input_signature):\n \"\"\"Compute the output tensor signature of the layer based on the inputs.\n\n Unlike a TensorShape object, a TensorSpec object contains both shape\n and dtype information for a tensor. This method allows layers to provide\n output dtype information if it is different from the input dtype.\n For any layer that doesn't implement this function,\n the framework will fall back to use `compute_output_shape`, and will\n assume that the output dtype matches the input dtype.\n\n Args:\n input_signature: Single TensorSpec or nested structure of TensorSpec\n objects, describing a candidate input for the layer.\n\n Returns:\n Single TensorSpec or nested structure of TensorSpec objects, describing\n how the layer would transform the provided input.\n\n Raises:\n TypeError: If input_signature contains a non-TensorSpec object.\n \"\"\"\n def check_type_return_shape(s):\n if not isinstance(s, tensor_spec.TensorSpec):\n raise TypeError(\n 'Only TensorSpec signature types are supported, '\n 'but saw signature signature entry: {}.'.format(s))\n return s.shape\n input_shape = nest.map_structure(check_type_return_shape, input_signature)\n output_shape = self.compute_output_shape(input_shape)\n dtype = self._compute_dtype\n if dtype is None:\n input_dtypes = [s.dtype for s in nest.flatten(input_signature)]\n # Default behavior when self.dtype is None, is to use the first input's\n # dtype.\n dtype = input_dtypes[0]\n return nest.map_structure(\n lambda s: tensor_spec.TensorSpec(dtype=dtype, shape=s),\n output_shape)\n\n def _keras_tensor_symbolic_call(self, inputs, input_masks, args, kwargs):\n if self.dynamic:\n # We will use static shape inference to return symbolic tensors\n # matching the specifications of the layer outputs.\n # Since `self.dynamic` is True, we will never attempt to\n # run the underlying TF graph (which is disconnected).\n # TODO(fchollet): consider py_func as an alternative, which\n # would enable us to run the underlying graph if needed.\n input_signature = nest.map_structure(\n lambda x: tensor_spec.TensorSpec(shape=x.shape, dtype=x.dtype),\n inputs)\n output_signature = self.compute_output_signature(input_signature)\n return nest.map_structure(keras_tensor.KerasTensor, output_signature)\n else:\n return self._infer_output_signature(inputs, args, kwargs, input_masks)\n\n def _infer_output_signature(self, inputs, args, kwargs, input_masks):\n \"\"\"TODO(kaftan): Docstring.\"\"\"\n\n call_fn = self.call\n # Wrapping `call` function in autograph to allow for dynamic control\n # flow and control dependencies in call. We are limiting this to\n # subclassed layers as autograph is strictly needed only for\n # subclassed layers and models.\n # tf_convert will respect the value of autograph setting in the\n # enclosing tf.function, if any.\n if (base_layer_utils.is_subclassed(self) and\n not base_layer_utils.from_saved_model(self)):\n call_fn = autograph.tf_convert(self.call, ag_ctx.control_status_ctx())\n\n # We enter a scratch graph and build placeholder inputs inside of it that\n # match the input args.\n # We then call the layer inside of the scratch graph to identify the\n # output signatures, then we build KerasTensors corresponding to those\n # outputs.\n scratch_graph = func_graph.FuncGraph(str(self.name) + '_scratch_graph')\n with scratch_graph.as_default():\n inputs = nest.map_structure(\n keras_tensor.keras_tensor_to_placeholder, inputs)\n args = nest.map_structure(\n keras_tensor.keras_tensor_to_placeholder, args)\n kwargs = nest.map_structure(\n keras_tensor.keras_tensor_to_placeholder, kwargs)\n input_masks = nest.map_structure(\n keras_tensor.keras_tensor_to_placeholder, input_masks)\n\n inputs = self._maybe_cast_inputs(inputs)\n\n # try:\n with backend.name_scope(self._name_scope()):\n with ops.enable_auto_cast_variables(self._compute_dtype_object):\n # Build layer if applicable (if the `build` method has been\n # overridden).\n # TODO(kaftan): do we maybe_build here, or have we already done it?\n self._maybe_build(inputs)\n outputs = call_fn(inputs, *args, **kwargs)\n\n self._handle_activity_regularization(inputs, outputs)\n self._set_mask_metadata(inputs, outputs, input_masks,\n build_graph=False)\n\n outputs = nest.map_structure(keras_tensor.keras_tensor_from_tensor, outputs)\n if hasattr(self, '_set_inputs') and not self.inputs:\n # TODO(kaftan): figure out if we ned to do this at all\n # Subclassed network: explicitly set metadata normally set by\n # a call to self._set_inputs().\n self._set_inputs(inputs, outputs)\n del scratch_graph\n return outputs\n\n @generic_utils.default\n def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument\n \"\"\"Computes an output mask tensor.\n\n Arguments:\n inputs: Tensor or list of tensors.\n mask: Tensor or list of tensors.\n\n Returns:\n None or a tensor (or list of tensors,\n one per output tensor of the layer).\n \"\"\"\n if not self._supports_masking:\n if any(m is not None for m in nest.flatten(mask)):\n raise TypeError('Layer ' + self.name + ' does not support masking, '\n 'but was passed an input_mask: ' + str(mask))\n # masking not explicitly supported: return None as mask.\n return None\n # if masking is explicitly supported, by default\n # carry over the input mask\n return mask\n\n def __call__(self, *args, **kwargs):\n \"\"\"Wraps `call`, applying pre- and post-processing steps.\n\n Arguments:\n *args: Positional arguments to be passed to `self.call`.\n **kwargs: Keyword arguments to be passed to `self.call`.\n\n Returns:\n Output tensor(s).\n\n Note:\n - The following optional keyword arguments are reserved for specific uses:\n * `training`: Boolean scalar tensor of Python boolean indicating\n whether the `call` is meant for training or inference.\n * `mask`: Boolean input mask.\n - If the layer's `call` method takes a `mask` argument (as some Keras\n layers do), its default value will be set to the mask generated\n for `inputs` by the previous layer (if `input` did come from\n a layer that generated a corresponding mask, i.e. if it came from\n a Keras layer with masking support.\n\n Raises:\n ValueError: if the layer's `call` method returns None (an invalid value).\n RuntimeError: if `super().__init__()` was not called in the constructor.\n \"\"\"\n if not hasattr(self, '_thread_local'):\n raise RuntimeError(\n 'You must call `super().__init__()` in the layer constructor.')\n\n # `inputs` (the first arg in the method spec) is special cased in\n # layer call due to historical reasons.\n # This special casing currently takes the form of:\n # - 'inputs' must be explicitly passed. A layer cannot have zero arguments,\n # and inputs cannot have been provided via the default value of a kwarg.\n # - numpy/scalar values in `inputs` get converted to tensors\n # - implicit masks / mask metadata are only collected from 'inputs`\n # - Layers are built using shape info from 'inputs' only\n # - input_spec compatibility is only checked against `inputs`\n # - mixed precision casting (autocast) is only applied to `inputs`,\n # not to any other argument.\n # - setting the SavedModel saving spec.\n inputs, args, kwargs = self._split_out_first_arg(args, kwargs)\n input_list = nest.flatten(inputs)\n\n # Functional Model construction mode is invoked when `Layer`s are called on\n # symbolic `KerasTensor`s, i.e.:\n # >> inputs = tf.keras.Input(10)\n # >> outputs = MyLayer()(inputs) # Functional construction mode.\n # >> model = tf.keras.Model(inputs, outputs)\n if _in_functional_construction_mode(inputs, args, kwargs, input_list):\n return self._functional_construction_call(inputs, args, kwargs,\n input_list)\n\n # Maintains info about the `Layer.call` stack.\n call_context = base_layer_utils.call_context()\n\n # Accept NumPy and scalar inputs by converting to Tensors.\n if any(isinstance(x, (np.ndarray, float, int)) for x in input_list):\n inputs = nest.map_structure(_convert_numpy_or_python_types, inputs)\n input_list = nest.flatten(inputs)\n\n # Handle `mask` propagation from previous layer to current layer. Masks can\n # be propagated explicitly via the `mask` argument, or implicitly via\n # setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed\n # explicitly take priority.\n input_masks, mask_is_implicit = self._get_input_masks(\n inputs, input_list, args, kwargs)\n if self._expects_mask_arg and mask_is_implicit:\n kwargs['mask'] = input_masks\n\n # Training mode for `Layer.call` is set via (in order of priority):\n # (1) The `training` argument passed to this `Layer.call`, if it is not None\n # (2) The training mode of an outer `Layer.call`.\n # (3) The default mode set by `tf.keras.backend.set_learning_phase` (if set)\n # (4) Any non-None default value for `training` specified in the call\n # signature\n # (5) False (treating the layer as if it's in inference)\n args, kwargs, training_mode = self._set_training_mode(\n args, kwargs, call_context)\n\n # Losses are cleared for all sublayers on the outermost `Layer.call`.\n # Losses are not cleared on inner `Layer.call`s, because sublayers can be\n # called multiple times.\n if not call_context.in_call:\n self._clear_losses()\n\n eager = context.executing_eagerly()\n with call_context.enter(\n layer=self,\n inputs=inputs,\n build_graph=not eager,\n training=training_mode):\n\n if self._autocast:\n inputs = self._maybe_cast_inputs(inputs, input_list)\n\n if eager:\n call_fn = self.call\n name_scope = self._name\n else:\n input_spec.assert_input_compatibility(self.input_spec, inputs,\n self.name)\n name_scope = self._name_scope() # Avoid autoincrementing.\n call_fn = self._autographed_call()\n\n with ops.name_scope_v2(name_scope):\n if not self.built:\n self._maybe_build(inputs)\n\n with ops.enable_auto_cast_variables(self._compute_dtype_object):\n outputs = call_fn(inputs, *args, **kwargs)\n\n if self._activity_regularizer:\n self._handle_activity_regularization(inputs, outputs)\n if self._supports_masking:\n self._set_mask_metadata(inputs, outputs, input_masks, not eager)\n if self._saved_model_inputs_spec is None:\n self._set_save_spec(inputs)\n\n return outputs\n\n def _functional_construction_call(self, inputs, args, kwargs, input_list):\n call_context = base_layer_utils.call_context()\n\n # Accept NumPy and scalar inputs by converting to Tensors.\n if any(isinstance(x, (np.ndarray, float, int)) for x in input_list):\n\n def _convert_non_tensor(x):\n # Don't call `ops.convert_to_tensor_v2` on all `inputs` because\n # `SparseTensors` can't be converted to `Tensor`.\n if isinstance(x, (np.ndarray, float, int)):\n return ops.convert_to_tensor_v2(x)\n return x\n\n inputs = nest.map_structure(_convert_non_tensor, inputs)\n input_list = nest.flatten(inputs)\n\n # Handle `mask` propagation from previous layer to current layer. Masks can\n # be propagated explicitly via the `mask` argument, or implicitly via\n # setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed\n # explicitly take priority.\n mask_arg_passed_by_framework = False\n input_masks, mask_is_implicit = self._get_input_masks(\n inputs, input_list, args, kwargs)\n if self._expects_mask_arg and mask_is_implicit:\n kwargs['mask'] = input_masks\n mask_arg_passed_by_framework = True\n\n # If `training` argument is None or not explicitly passed,\n # propagate `training` value from this layer's calling layer.\n training_value = None\n training_arg_passed_by_framework = False\n # Priority 1: `training` was explicitly passed a non-None value.\n if self._call_arg_was_passed('training', args, kwargs):\n training_value = self._get_call_arg_value('training', args, kwargs)\n if not self._expects_training_arg:\n kwargs.pop('training')\n\n if training_value is None:\n # Priority 2: `training` was passed to a parent layer.\n if call_context.training is not None:\n training_value = call_context.training\n # Priority 3: `learning_phase()` has been set.\n elif backend.global_learning_phase_is_set():\n training_value = backend.learning_phase()\n # Force the training_value to be bool type which matches to the contract\n # for layer/model call args.\n if tensor_util.is_tensor(training_value):\n training_value = math_ops.cast(training_value, dtypes.bool)\n else:\n training_value = bool(training_value)\n # Priority 4: trace layer with the default training argument specified\n # in the `call` signature (or in inference mode if the `call` signature\n # specifies no non-None default).\n else:\n training_value = self._default_training_arg\n # In cases (2), (3), (4) the training argument is passed automatically\n # by the framework, and will not be hard-coded into the model.\n if self._expects_training_arg:\n args, kwargs = self._set_call_arg_value('training', training_value,\n args, kwargs)\n training_arg_passed_by_framework = True\n\n if keras_tensor.keras_tensors_enabled():\n with call_context.enter(\n layer=self, inputs=inputs, build_graph=True, training=training_value):\n # Check input assumptions set after layer building, e.g. input shape.\n outputs = self._keras_tensor_symbolic_call(\n inputs, input_masks, args, kwargs)\n\n if outputs is None:\n raise ValueError('A layer\\'s `call` method should return a '\n 'Tensor or a list of Tensors, not None '\n '(layer: ' + self.name + ').')\n if training_arg_passed_by_framework:\n args, kwargs = self._set_call_arg_value(\n 'training', None, args, kwargs, pop_kwarg_if_none=True)\n if mask_arg_passed_by_framework:\n kwargs.pop('mask')\n # Node connectivity does not special-case the first argument.\n outputs = self._set_connectivity_metadata((inputs,) + args, kwargs,\n outputs)\n return outputs\n\n # Only create Keras history if at least one tensor originates from a\n # `keras.Input`. Otherwise this Layer may be being used outside the Keras\n # framework.\n # TODO(kaftan): make this not special case inputs\n if base_layer_utils.needs_keras_history(inputs):\n base_layer_utils.create_keras_history(inputs)\n\n with call_context.enter(\n layer=self, inputs=inputs, build_graph=True, training=training_value):\n # Symbolic execution on symbolic tensors. We will attempt to build\n # the corresponding TF subgraph inside `backend.get_graph()`\n # TODO(reedwm): We should assert input compatibility after the inputs\n # are casted, not before.\n input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)\n graph = backend.get_graph()\n # Use `self._name_scope()` to avoid auto-incrementing the name.\n with graph.as_default(), backend.name_scope(self._name_scope()):\n # Build layer if applicable (if the `build` method has been\n # overridden).\n self._maybe_build(inputs)\n cast_inputs = self._maybe_cast_inputs(inputs, input_list)\n\n if not self.dynamic:\n # Wrapping `call` function in autograph to allow for dynamic control\n # flow and control dependencies in call. We are limiting this to\n # subclassed layers as autograph is strictly needed only for\n # subclassed layers and models.\n # tf_convert will respect the value of autograph setting in the\n # enclosing tf.function, if any.\n if (base_layer_utils.is_subclassed(self) and\n not base_layer_utils.from_saved_model(self)):\n call_fn = autograph.tf_convert(self.call,\n ag_ctx.control_status_ctx())\n else:\n call_fn = self.call\n\n try:\n with ops.enable_auto_cast_variables(self._compute_dtype_object):\n outputs = call_fn(cast_inputs, *args, **kwargs)\n\n except errors.OperatorNotAllowedInGraphError as e:\n raise TypeError('You are attempting to use Python control '\n 'flow in a layer that was not declared to be '\n 'dynamic. Pass `dynamic=True` to the class '\n 'constructor.\\nEncountered error:\\n\"\"\"\\n' + str(e) +\n '\\n\"\"\"')\n else:\n # We will use static shape inference to return symbolic tensors\n # matching the specifications of the layer outputs.\n # Since `self.dynamic` is True, we will never attempt to\n # run the underlying TF graph (which is disconnected).\n # TODO(fchollet): consider py_func as an alternative, which\n # would enable us to run the underlying graph if needed.\n outputs = self._symbolic_call(inputs)\n\n if outputs is None:\n raise ValueError('A layer\\'s `call` method should return a '\n 'Tensor or a list of Tensors, not None '\n '(layer: ' + self.name + ').')\n # TODO(kaftan): This should be 'any' and check all args\n if base_layer_utils.have_all_keras_metadata(inputs):\n if training_arg_passed_by_framework:\n args, kwargs = self._set_call_arg_value(\n 'training', None, args, kwargs, pop_kwarg_if_none=True)\n if mask_arg_passed_by_framework:\n kwargs.pop('mask')\n # Node connectivity does not special-case the first argument.\n outputs = self._set_connectivity_metadata((inputs,) + args, kwargs,\n outputs)\n self._handle_activity_regularization(inputs, outputs)\n self._set_mask_metadata(inputs, outputs, input_masks, True)\n if hasattr(self, '_set_inputs') and not self.inputs:\n # Subclassed network: explicitly set metadata normally set by\n # a call to self._set_inputs().\n self._set_inputs(cast_inputs, outputs)\n\n return outputs\n\n def _set_training_mode(self, args, kwargs, call_context):\n training_mode = None\n if self._expects_training_arg:\n # (1) `training` was passed to this `Layer.call`.\n if self._call_arg_was_passed('training', args, kwargs):\n training_mode = self._get_call_arg_value('training', args, kwargs)\n # If no `training` arg was passed, or `None` was explicitly passed,\n # the framework will make a decision about the training mode is.\n if training_mode is None:\n call_ctx_training = call_context.training\n # (2) `training` mode is inferred from an outer `Layer.call`.\n if call_ctx_training is not None:\n training_mode = call_ctx_training\n # (3) User set `tf.keras.backend.set_learning_phase`.\n elif backend.global_learning_phase_is_set():\n training_mode = backend.learning_phase()\n # Ensure value is a `bool` or `tf.bool`.\n if isinstance(training_mode, bool):\n pass\n elif tensor_util.is_tensor(training_mode):\n training_mode = math_ops.cast(training_mode, dtypes.bool)\n else:\n training_mode = bool(training_mode)\n # (4) We default to using `call`'s default value for `training`,\n # or treating the layer as if it is in inference if no non-None default\n # is specified in the `call` signature.\n else:\n training_mode = self._default_training_arg\n\n # For case (2), (3), (4) `training` arg is passed by framework.\n args, kwargs = self._set_call_arg_value('training', training_mode, args,\n kwargs)\n else:\n if 'training' in kwargs:\n # `training` was passed to this `Layer` but is not needed for\n # `Layer.call`. It will set the default mode for inner `Layer.call`s.\n training_mode = kwargs.pop('training')\n else:\n # Grab the current `training` mode from any outer `Layer.call`.\n training_mode = call_context.training\n\n return args, kwargs, training_mode\n\n def _autographed_call(self):\n # Wrapping `call` function in autograph to allow for dynamic control\n # flow and control dependencies in call. We are limiting this to\n # subclassed layers as autograph is strictly needed only for\n # subclassed layers and models.\n # tf_convert will respect the value of autograph setting in the\n # enclosing tf.function, if any.\n if (base_layer_utils.is_subclassed(self) and\n not base_layer_utils.from_saved_model(self)):\n return autograph.tf_convert(self.call, ag_ctx.control_status_ctx())\n else:\n return self.call\n\n @property\n def dtype(self):\n \"\"\"Dtype used by the weights of the layer, set in the constructor.\"\"\"\n return self._dtype_policy.variable_dtype\n\n @property\n def name(self):\n \"\"\"Name of the layer (string), set in the constructor.\"\"\"\n return self._name\n\n @property\n def supports_masking(self):\n \"\"\"Whether this layer supports computing a mask using `compute_mask`.\"\"\"\n return self._supports_masking\n\n @supports_masking.setter\n def supports_masking(self, value):\n self._supports_masking = value\n\n @property\n def dynamic(self):\n \"\"\"Whether the layer is dynamic (eager-only); set in the constructor.\"\"\"\n return any(layer._dynamic for layer in self._flatten_layers())\n\n @property\n @doc_controls.do_not_doc_inheritable\n def stateful(self):\n return any(layer._stateful for layer in self._flatten_layers())\n\n @stateful.setter\n def stateful(self, value):\n self._stateful = value\n\n @property\n def trainable(self):\n return self._trainable\n\n @trainable.setter\n def trainable(self, value):\n for layer in self._flatten_layers():\n layer._trainable = value\n\n @property\n def activity_regularizer(self):\n \"\"\"Optional regularizer function for the output of this layer.\"\"\"\n return self._activity_regularizer\n\n @activity_regularizer.setter\n def activity_regularizer(self, regularizer):\n \"\"\"Optional regularizer function for the output of this layer.\"\"\"\n self._activity_regularizer = regularizer\n\n @property\n def input_spec(self):\n \"\"\"`InputSpec` instance(s) describing the input format for this layer.\n\n When you create a layer subclass, you can set `self.input_spec` to enable\n the layer to run input compatibility checks when it is called.\n Consider a `Conv2D` layer: it can only be called on a single input tensor\n of rank 4. As such, you can set, in `__init__()`:\n\n ```python\n self.input_spec = tf.keras.layers.InputSpec(ndim=4)\n ```\n\n Now, if you try to call the layer on an input that isn't rank 4\n (for instance, an input of shape `(2,)`, it will raise a nicely-formatted\n error:\n\n ```\n ValueError: Input 0 of layer conv2d is incompatible with the layer:\n expected ndim=4, found ndim=1. Full shape received: [2]\n ```\n\n Input checks that can be specified via `input_spec` include:\n - Structure (e.g. a single input, a list of 2 inputs, etc)\n - Shape\n - Rank (ndim)\n - Dtype\n\n For more information, see `tf.keras.layers.InputSpec`.\n\n Returns:\n A `tf.keras.layers.InputSpec` instance, or nested structure thereof.\n \"\"\"\n return self._input_spec\n\n @input_spec.setter\n # Must be decorated to prevent tracking, since the input_spec can be nested\n # InputSpec objects.\n @trackable.no_automatic_dependency_tracking\n def input_spec(self, value):\n for v in nest.flatten(value):\n if v is not None and not isinstance(v, InputSpec):\n raise TypeError('Layer input_spec must be an instance of InputSpec. '\n 'Got: {}'.format(v))\n self._input_spec = value\n\n @property\n def trainable_weights(self):\n \"\"\"List of all trainable weights tracked by this layer.\n\n Trainable weights are updated via gradient descent during training.\n\n Returns:\n A list of trainable variables.\n \"\"\"\n if self.trainable:\n children_weights = self._gather_children_attribute('trainable_weights')\n return self._dedup_weights(self._trainable_weights + children_weights)\n else:\n return []\n\n @property\n def non_trainable_weights(self):\n \"\"\"List of all non-trainable weights tracked by this layer.\n\n Non-trainable weights are *not* updated during training. They are expected\n to be updated manually in `call()`.\n\n Returns:\n A list of non-trainable variables.\n \"\"\"\n if self.trainable:\n children_weights = self._gather_children_attribute(\n 'non_trainable_weights')\n non_trainable_weights = self._non_trainable_weights + children_weights\n else:\n children_weights = self._gather_children_attribute('weights')\n non_trainable_weights = (\n self._trainable_weights + self._non_trainable_weights +\n children_weights)\n return self._dedup_weights(non_trainable_weights)\n\n @property\n def weights(self):\n \"\"\"Returns the list of all layer variables/weights.\n\n Returns:\n A list of variables.\n \"\"\"\n return self.trainable_weights + self.non_trainable_weights\n\n @property\n @deprecation.deprecated(\n date=None,\n instructions='This property should not be used in TensorFlow 2.0, '\n 'as updates are applied automatically.')\n @doc_controls.do_not_generate_docs\n def updates(self):\n if keras_tensor.keras_tensors_enabled():\n return []\n\n collected_updates = []\n all_layers = self._flatten_layers()\n with backend.get_graph().as_default():\n for layer in all_layers:\n if not layer.trainable and not layer.stateful:\n continue\n for u in layer._updates:\n if callable(u):\n u = u()\n collected_updates.append(u)\n return collected_updates\n\n @property\n def losses(self):\n \"\"\"List of losses added using the `add_loss()` API.\n\n Variable regularization tensors are created when this property is accessed,\n so it is eager safe: accessing `losses` under a `tf.GradientTape` will\n propagate gradients back to the corresponding variables.\n\n Examples:\n\n >>> class MyLayer(tf.keras.layers.Layer):\n ... def call(self, inputs):\n ... self.add_loss(tf.abs(tf.reduce_mean(inputs)))\n ... return inputs\n >>> l = MyLayer()\n >>> l(np.ones((10, 1)))\n >>> l.losses\n [1.0]\n\n >>> inputs = tf.keras.Input(shape=(10,))\n >>> x = tf.keras.layers.Dense(10)(inputs)\n >>> outputs = tf.keras.layers.Dense(1)(x)\n >>> model = tf.keras.Model(inputs, outputs)\n >>> # Activity regularization.\n >>> model.add_loss(tf.abs(tf.reduce_mean(x)))\n >>> model.losses\n [<tf.Tensor 'Abs:0' shape=() dtype=float32>]\n\n >>> inputs = tf.keras.Input(shape=(10,))\n >>> d = tf.keras.layers.Dense(10, kernel_initializer='ones')\n >>> x = d(inputs)\n >>> outputs = tf.keras.layers.Dense(1)(x)\n >>> model = tf.keras.Model(inputs, outputs)\n >>> # Weight regularization.\n >>> model.add_loss(lambda: tf.reduce_mean(d.kernel))\n >>> model.losses\n [<tf.Tensor: shape=(), dtype=float32, numpy=1.0>]\n\n Returns:\n A list of tensors.\n \"\"\"\n collected_losses = []\n for layer in self._flatten_layers():\n # If any eager losses are present, we assume the model to be part of an\n # eager training loop (either a custom one or the one used when\n # `run_eagerly=True`) and so we always return just the eager losses.\n if layer._eager_losses:\n # Filter placeholder losses that may have been added by revived layers.\n # (see base_layer_utils for details).\n if (layer._eager_losses[0] is\n not base_layer_utils.REVIVED_LOSS_PLACEHOLDER):\n collected_losses.extend(layer._eager_losses)\n else:\n collected_losses.extend(layer._losses)\n for regularizer in layer._callable_losses:\n loss_tensor = regularizer()\n if loss_tensor is not None:\n collected_losses.append(loss_tensor)\n return collected_losses\n\n def add_loss(self, losses, **kwargs):\n \"\"\"Add loss tensor(s), potentially dependent on layer inputs.\n\n Some losses (for instance, activity regularization losses) may be dependent\n on the inputs passed when calling a layer. Hence, when reusing the same\n layer on different inputs `a` and `b`, some entries in `layer.losses` may\n be dependent on `a` and some on `b`. This method automatically keeps track\n of dependencies.\n\n This method can be used inside a subclassed layer or model's `call`\n function, in which case `losses` should be a Tensor or list of Tensors.\n\n Example:\n\n ```python\n class MyLayer(tf.keras.layers.Layer):\n def call(self, inputs):\n self.add_loss(tf.abs(tf.reduce_mean(inputs)))\n return inputs\n ```\n\n This method can also be called directly on a Functional Model during\n construction. In this case, any loss Tensors passed to this Model must\n be symbolic and be able to be traced back to the model's `Input`s. These\n losses become part of the model's topology and are tracked in `get_config`.\n\n Example:\n\n ```python\n inputs = tf.keras.Input(shape=(10,))\n x = tf.keras.layers.Dense(10)(inputs)\n outputs = tf.keras.layers.Dense(1)(x)\n model = tf.keras.Model(inputs, outputs)\n # Activity regularization.\n model.add_loss(tf.abs(tf.reduce_mean(x)))\n ```\n\n If this is not the case for your loss (if, for example, your loss references\n a `Variable` of one of the model's layers), you can wrap your loss in a\n zero-argument lambda. These losses are not tracked as part of the model's\n topology since they can't be serialized.\n\n Example:\n\n ```python\n inputs = tf.keras.Input(shape=(10,))\n d = tf.keras.layers.Dense(10)\n x = d(inputs)\n outputs = tf.keras.layers.Dense(1)(x)\n model = tf.keras.Model(inputs, outputs)\n # Weight regularization.\n model.add_loss(lambda: tf.reduce_mean(d.kernel))\n ```\n\n Arguments:\n losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses\n may also be zero-argument callables which create a loss tensor.\n **kwargs: Additional keyword arguments for backward compatibility.\n Accepted values:\n inputs - Deprecated, will be automatically inferred.\n \"\"\"\n kwargs.pop('inputs', None)\n if kwargs:\n raise TypeError('Unknown keyword arguments: %s' % (kwargs.keys(),))\n\n def _tag_callable(loss):\n \"\"\"Tags callable loss tensor as `_unconditional_loss`.\"\"\"\n if callable(loss):\n # We run the loss without autocasting, as regularizers are often\n # numerically unstable in float16.\n with ops.enable_auto_cast_variables(None):\n loss = loss()\n if loss is None:\n return None # Will be filtered out when computing the .losses property\n if not tensor_util.is_tensor(loss):\n loss = ops.convert_to_tensor_v2(loss, dtype=backend.floatx())\n loss._unconditional_loss = True # pylint: disable=protected-access\n return loss\n\n losses = nest.flatten(losses)\n\n callable_losses = []\n eager_losses = []\n symbolic_losses = []\n for loss in losses:\n if callable(loss):\n callable_losses.append(functools.partial(_tag_callable, loss))\n continue\n if loss is None:\n continue\n if not tensor_util.is_tensor(loss) and not isinstance(\n loss, keras_tensor.KerasTensor):\n loss = ops.convert_to_tensor_v2(loss, dtype=backend.floatx())\n # TF Functions should take the eager path.\n if ((tf_utils.is_symbolic_tensor(loss) or\n isinstance(loss, keras_tensor.KerasTensor)) and\n not base_layer_utils.is_in_tf_function()):\n symbolic_losses.append(loss)\n elif tensor_util.is_tensor(loss):\n eager_losses.append(loss)\n\n self._callable_losses.extend(callable_losses)\n\n in_call_context = base_layer_utils.call_context().in_call\n if eager_losses and not in_call_context:\n raise ValueError(\n 'Expected a symbolic Tensors or a callable for the loss value. '\n 'Please wrap your loss computation in a zero argument `lambda`.')\n\n self._eager_losses.extend(eager_losses)\n\n if in_call_context and not keras_tensor.keras_tensors_enabled():\n for symbolic_loss in symbolic_losses:\n self._losses.append(symbolic_loss)\n else:\n for symbolic_loss in symbolic_losses:\n if getattr(self, '_is_graph_network', False):\n self._graph_network_add_loss(symbolic_loss)\n else:\n # Possible a loss was added in a Layer's `build`.\n self._losses.append(symbolic_loss)\n\n def _clear_losses(self):\n \"\"\"Used every step in eager to reset losses.\"\"\"\n # Set to thread local directly to avoid Layer.__setattr__ overhead.\n if not getattr(self, '_layers', None): # Fast path for single Layer.\n self._thread_local._eager_losses = []\n else:\n for layer in self._flatten_layers():\n layer._thread_local._eager_losses = []\n\n @property\n def metrics(self):\n \"\"\"List of metrics added using the `add_metric()` API.\n\n Example:\n\n >>> input = tf.keras.layers.Input(shape=(3,))\n >>> d = tf.keras.layers.Dense(2)\n >>> output = d(input)\n >>> d.add_metric(tf.reduce_max(output), name='max')\n >>> d.add_metric(tf.reduce_min(output), name='min')\n >>> [m.name for m in d.metrics]\n ['max', 'min']\n\n Returns:\n A list of tensors.\n \"\"\"\n collected_metrics = []\n for layer in self._flatten_layers():\n with layer._metrics_lock:\n collected_metrics.extend(layer._metrics)\n return collected_metrics\n\n def add_metric(self, value, name=None, **kwargs):\n \"\"\"Adds metric tensor to the layer.\n\n This method can be used inside the `call()` method of a subclassed layer\n or model.\n\n ```python\n class MyMetricLayer(tf.keras.layers.Layer):\n def __init__(self):\n super(MyMetricLayer, self).__init__(name='my_metric_layer')\n self.mean = metrics_module.Mean(name='metric_1')\n\n def call(self, inputs):\n self.add_metric(self.mean(x))\n self.add_metric(math_ops.reduce_sum(x), name='metric_2')\n return inputs\n ```\n\n This method can also be called directly on a Functional Model during\n construction. In this case, any tensor passed to this Model must\n be symbolic and be able to be traced back to the model's `Input`s. These\n metrics become part of the model's topology and are tracked when you\n save the model via `save()`.\n\n ```python\n inputs = tf.keras.Input(shape=(10,))\n x = tf.keras.layers.Dense(10)(inputs)\n outputs = tf.keras.layers.Dense(1)(x)\n model = tf.keras.Model(inputs, outputs)\n model.add_metric(math_ops.reduce_sum(x), name='metric_1')\n ```\n\n Note: Calling `add_metric()` with the result of a metric object on a\n Functional Model, as shown in the example below, is not supported. This is\n because we cannot trace the metric result tensor back to the model's inputs.\n\n ```python\n inputs = tf.keras.Input(shape=(10,))\n x = tf.keras.layers.Dense(10)(inputs)\n outputs = tf.keras.layers.Dense(1)(x)\n model = tf.keras.Model(inputs, outputs)\n model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')\n ```\n\n Args:\n value: Metric tensor.\n name: String metric name.\n **kwargs: Additional keyword arguments for backward compatibility.\n Accepted values:\n `aggregation` - When the `value` tensor provided is not the result of\n calling a `keras.Metric` instance, it will be aggregated by default\n using a `keras.Metric.Mean`.\n \"\"\"\n kwargs_keys = list(kwargs.keys())\n if (len(kwargs_keys) > 1 or\n (len(kwargs_keys) == 1 and kwargs_keys[0] != 'aggregation')):\n raise TypeError('Unknown keyword arguments: ', str(kwargs.keys()))\n\n from_metric_obj = hasattr(value, '_metric_obj')\n if keras_tensor.keras_tensors_enabled():\n is_symbolic = isinstance(value, keras_tensor.KerasTensor)\n else:\n is_symbolic = tf_utils.is_symbolic_tensor(value)\n in_call_context = base_layer_utils.call_context().in_call\n\n if name is None and not from_metric_obj:\n # Eg. `self.add_metric(math_ops.reduce_sum(x))`\n # In eager mode, we use metric name to lookup a metric. Without a name,\n # a new Mean metric wrapper will be created on every model/layer call.\n # So, we raise an error when no name is provided.\n # We will do the same for symbolic mode for consistency although a name\n # will be generated if no name is provided.\n\n # We will not raise this error in the foll use case for the sake of\n # consistency as name in provided in the metric constructor.\n # mean = metrics.Mean(name='my_metric')\n # model.add_metric(mean(outputs))\n raise ValueError('Please provide a name for your metric like '\n '`self.add_metric(tf.reduce_sum(inputs), '\n 'name=\\'mean_activation\\')`')\n elif from_metric_obj:\n name = value._metric_obj.name\n\n if not in_call_context and not is_symbolic:\n raise ValueError('Expected a symbolic Tensor for the metric value, '\n 'received: ' + str(value))\n\n # If a metric was added in a Layer's `call` or `build`.\n if in_call_context or not getattr(self, '_is_graph_network', False):\n # TF Function path should take the eager path.\n\n # If the given metric is available in `metrics` list we just update state\n # on it, otherwise we create a new metric instance and\n # add it to the `metrics` list.\n metric_obj = getattr(value, '_metric_obj', None)\n # Tensors that come from a Metric object already updated the Metric state.\n should_update_state = not metric_obj\n name = metric_obj.name if metric_obj else name\n\n with self._metrics_lock:\n match = self._get_existing_metric(name)\n if match:\n metric_obj = match\n elif metric_obj:\n self._metrics.append(metric_obj)\n else:\n from tensorflow.python.keras import metrics as metrics_mod # pylint:disable=g-import-not-at-top\n # Build the metric object with the value's dtype if it defines one\n metric_obj = metrics_mod.Mean(\n name=name, dtype=getattr(value, 'dtype', None))\n self._metrics.append(metric_obj)\n\n if should_update_state:\n metric_obj(value)\n else:\n if from_metric_obj:\n raise ValueError('Using the result of calling a `Metric` object '\n 'when calling `add_metric` on a Functional '\n 'Model is not supported. Please pass the '\n 'Tensor to monitor directly.')\n\n # Insert layers into the Keras Graph Network.\n aggregation = None if from_metric_obj else 'mean'\n self._graph_network_add_metric(value, aggregation, name)\n\n @deprecation.deprecated_args(None, '`inputs` is now automatically inferred',\n 'inputs')\n @doc_controls.do_not_doc_inheritable\n def add_update(self, updates, inputs=None):\n \"\"\"Add update op(s), potentially dependent on layer inputs.\n\n Weight updates (for instance, the updates of the moving mean and variance\n in a BatchNormalization layer) may be dependent on the inputs passed\n when calling a layer. Hence, when reusing the same layer on\n different inputs `a` and `b`, some entries in `layer.updates` may be\n dependent on `a` and some on `b`. This method automatically keeps track\n of dependencies.\n\n This call is ignored when eager execution is enabled (in that case, variable\n updates are run on the fly and thus do not need to be tracked for later\n execution).\n\n Arguments:\n updates: Update op, or list/tuple of update ops, or zero-arg callable\n that returns an update op. A zero-arg callable should be passed in\n order to disable running the updates by setting `trainable=False`\n on this Layer, when executing in Eager mode.\n inputs: Deprecated, will be automatically inferred.\n \"\"\"\n call_context = base_layer_utils.call_context()\n # No need to run updates during Functional API construction.\n if call_context.in_keras_graph:\n return\n\n # Callable updates are disabled by setting `trainable=False`.\n if not call_context.frozen:\n for update in nest.flatten(updates):\n if callable(update):\n update()\n\n def set_weights(self, weights):\n \"\"\"Sets the weights of the layer, from Numpy arrays.\n\n The weights of a layer represent the state of the layer. This function\n sets the weight values from numpy arrays. The weight values should be\n passed in the order they are created by the layer. Note that the layer's\n weights must be instantiated before calling this function by calling\n the layer.\n\n For example, a Dense layer returns a list of two values-- per-output\n weights and the bias value. These can be used to set the weights of another\n Dense layer:\n\n >>> a = tf.keras.layers.Dense(1,\n ... kernel_initializer=tf.constant_initializer(1.))\n >>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]]))\n >>> a.get_weights()\n [array([[1.],\n [1.],\n [1.]], dtype=float32), array([0.], dtype=float32)]\n >>> b = tf.keras.layers.Dense(1,\n ... kernel_initializer=tf.constant_initializer(2.))\n >>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]]))\n >>> b.get_weights()\n [array([[2.],\n [2.],\n [2.]], dtype=float32), array([0.], dtype=float32)]\n >>> b.set_weights(a.get_weights())\n >>> b.get_weights()\n [array([[1.],\n [1.],\n [1.]], dtype=float32), array([0.], dtype=float32)]\n\n Arguments:\n weights: a list of Numpy arrays. The number\n of arrays and their shape must match\n number of the dimensions of the weights\n of the layer (i.e. it should match the\n output of `get_weights`).\n\n Raises:\n ValueError: If the provided weights list does not match the\n layer's specifications.\n \"\"\"\n params = self.weights\n\n expected_num_weights = 0\n for param in params:\n if isinstance(param, base_layer_utils.TrackableWeightHandler):\n expected_num_weights += param.num_tensors\n else:\n expected_num_weights += 1\n\n if expected_num_weights != len(weights):\n raise ValueError(\n 'You called `set_weights(weights)` on layer \"%s\" '\n 'with a weight list of length %s, but the layer was '\n 'expecting %s weights. Provided weights: %s...' %\n (self.name, len(weights), expected_num_weights, str(weights)[:50]))\n\n weight_index = 0\n weight_value_tuples = []\n for param in params:\n if isinstance(param, base_layer_utils.TrackableWeightHandler):\n num_tensors = param.num_tensors\n tensors = weights[weight_index:weight_index + num_tensors]\n param.set_weights(tensors)\n weight_index += num_tensors\n else:\n weight = weights[weight_index]\n ref_shape = param.shape\n if not ref_shape.is_compatible_with(weight.shape):\n raise ValueError(\n 'Layer weight shape %s not compatible with provided weight '\n 'shape %s' % (ref_shape, weight.shape))\n weight_value_tuples.append((param, weight))\n weight_index += 1\n\n backend.batch_set_value(weight_value_tuples)\n\n def get_weights(self):\n \"\"\"Returns the current weights of the layer.\n\n The weights of a layer represent the state of the layer. This function\n returns both trainable and non-trainable weight values associated with this\n layer as a list of Numpy arrays, which can in turn be used to load state\n into similarly parameterized layers.\n\n For example, a Dense layer returns a list of two values-- per-output\n weights and the bias value. These can be used to set the weights of another\n Dense layer:\n\n >>> a = tf.keras.layers.Dense(1,\n ... kernel_initializer=tf.constant_initializer(1.))\n >>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]]))\n >>> a.get_weights()\n [array([[1.],\n [1.],\n [1.]], dtype=float32), array([0.], dtype=float32)]\n >>> b = tf.keras.layers.Dense(1,\n ... kernel_initializer=tf.constant_initializer(2.))\n >>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]]))\n >>> b.get_weights()\n [array([[2.],\n [2.],\n [2.]], dtype=float32), array([0.], dtype=float32)]\n >>> b.set_weights(a.get_weights())\n >>> b.get_weights()\n [array([[1.],\n [1.],\n [1.]], dtype=float32), array([0.], dtype=float32)]\n\n Returns:\n Weights values as a list of numpy arrays.\n \"\"\"\n weights = self.weights\n output_weights = []\n for weight in weights:\n if isinstance(weight, base_layer_utils.TrackableWeightHandler):\n output_weights.extend(weight.get_tensors())\n else:\n output_weights.append(weight)\n return backend.batch_get_value(output_weights)\n\n @deprecation.deprecated(\n date=None, instructions='Please use `layer.updates` instead.')\n @doc_controls.do_not_generate_docs\n def get_updates_for(self, inputs):\n \"\"\"Deprecated, do NOT use!\n\n Retrieves updates relevant to a specific set of inputs.\n\n Arguments:\n inputs: Input tensor or list/tuple of input tensors.\n\n Returns:\n List of update ops of the layer that depend on `inputs`.\n \"\"\"\n return self.updates\n\n @deprecation.deprecated(\n date=None, instructions='Please use `layer.losses` instead.')\n @doc_controls.do_not_generate_docs\n def get_losses_for(self, inputs):\n \"\"\"Deprecated, do NOT use!\n\n Retrieves losses relevant to a specific set of inputs.\n\n Arguments:\n inputs: Input tensor or list/tuple of input tensors.\n\n Returns:\n List of loss tensors of the layer that depend on `inputs`.\n \"\"\"\n return self.losses\n\n @doc_controls.do_not_doc_inheritable\n def get_input_mask_at(self, node_index):\n \"\"\"Retrieves the input mask tensor(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A mask tensor\n (or list of tensors if the layer has multiple inputs).\n \"\"\"\n inputs = self.get_input_at(node_index)\n if isinstance(inputs, list):\n return [getattr(x, '_keras_mask', None) for x in inputs]\n else:\n return getattr(inputs, '_keras_mask', None)\n\n @doc_controls.do_not_doc_inheritable\n def get_output_mask_at(self, node_index):\n \"\"\"Retrieves the output mask tensor(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A mask tensor\n (or list of tensors if the layer has multiple outputs).\n \"\"\"\n output = self.get_output_at(node_index)\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)\n\n @property\n @doc_controls.do_not_doc_inheritable\n def input_mask(self):\n \"\"\"Retrieves the input mask tensor(s) of a layer.\n\n Only applicable if the layer has exactly one inbound node,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Input mask tensor (potentially None) or list of input\n mask tensors.\n\n Raises:\n AttributeError: if the layer is connected to\n more than one incoming layers.\n \"\"\"\n inputs = self.input\n if isinstance(inputs, list):\n return [getattr(x, '_keras_mask', None) for x in inputs]\n else:\n return getattr(inputs, '_keras_mask', None)\n\n @property\n @doc_controls.do_not_doc_inheritable\n def output_mask(self):\n \"\"\"Retrieves the output mask tensor(s) of a layer.\n\n Only applicable if the layer has exactly one inbound node,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Output mask tensor (potentially None) or list of output\n mask tensors.\n\n Raises:\n AttributeError: if the layer is connected to\n more than one incoming layers.\n \"\"\"\n output = self.output\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)\n\n @doc_controls.do_not_doc_inheritable\n def get_input_shape_at(self, node_index):\n \"\"\"Retrieves the input shape(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A shape tuple\n (or list of shape tuples if the layer has multiple inputs).\n\n Raises:\n RuntimeError: If called in Eager mode.\n \"\"\"\n return self._get_node_attribute_at_index(node_index, 'input_shapes',\n 'input shape')\n\n @doc_controls.do_not_doc_inheritable\n def get_output_shape_at(self, node_index):\n \"\"\"Retrieves the output shape(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A shape tuple\n (or list of shape tuples if the layer has multiple outputs).\n\n Raises:\n RuntimeError: If called in Eager mode.\n \"\"\"\n return self._get_node_attribute_at_index(node_index, 'output_shapes',\n 'output shape')\n\n @doc_controls.do_not_doc_inheritable\n def get_input_at(self, node_index):\n \"\"\"Retrieves the input tensor(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A tensor (or list of tensors if the layer has multiple inputs).\n\n Raises:\n RuntimeError: If called in Eager mode.\n \"\"\"\n return self._get_node_attribute_at_index(node_index, 'input_tensors',\n 'input')\n\n @doc_controls.do_not_doc_inheritable\n def get_output_at(self, node_index):\n \"\"\"Retrieves the output tensor(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A tensor (or list of tensors if the layer has multiple outputs).\n\n Raises:\n RuntimeError: If called in Eager mode.\n \"\"\"\n return self._get_node_attribute_at_index(node_index, 'output_tensors',\n 'output')\n\n @property\n def input(self):\n \"\"\"Retrieves the input tensor(s) of a layer.\n\n Only applicable if the layer has exactly one input,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Input tensor or list of input tensors.\n\n Raises:\n RuntimeError: If called in Eager mode.\n AttributeError: If no inbound nodes are found.\n \"\"\"\n if not self._inbound_nodes:\n raise AttributeError('Layer ' + self.name +\n ' is not connected, no input to return.')\n return self._get_node_attribute_at_index(0, 'input_tensors', 'input')\n\n @property\n def output(self):\n \"\"\"Retrieves the output tensor(s) of a layer.\n\n Only applicable if the layer has exactly one output,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Output tensor or list of output tensors.\n\n Raises:\n AttributeError: if the layer is connected to more than one incoming\n layers.\n RuntimeError: if called in Eager mode.\n \"\"\"\n if not self._inbound_nodes:\n raise AttributeError('Layer ' + self.name + ' has no inbound nodes.')\n return self._get_node_attribute_at_index(0, 'output_tensors', 'output')\n\n @property\n @doc_controls.do_not_doc_inheritable\n def input_shape(self):\n \"\"\"Retrieves the input shape(s) of a layer.\n\n Only applicable if the layer has exactly one input,\n i.e. if it is connected to one incoming layer, or if all inputs\n have the same shape.\n\n Returns:\n Input shape, as an integer shape tuple\n (or list of shape tuples, one tuple per input tensor).\n\n Raises:\n AttributeError: if the layer has no defined input_shape.\n RuntimeError: if called in Eager mode.\n \"\"\"\n if not self._inbound_nodes:\n raise AttributeError('The layer has never been called '\n 'and thus has no defined input shape.')\n all_input_shapes = set(\n [str(node.input_shapes) for node in self._inbound_nodes])\n if len(all_input_shapes) == 1:\n return self._inbound_nodes[0].input_shapes\n else:\n raise AttributeError('The layer \"' + str(self.name) +\n ' has multiple inbound nodes, '\n 'with different input shapes. Hence '\n 'the notion of \"input shape\" is '\n 'ill-defined for the layer. '\n 'Use `get_input_shape_at(node_index)` '\n 'instead.')\n\n def count_params(self):\n \"\"\"Count the total number of scalars composing the weights.\n\n Returns:\n An integer count.\n\n Raises:\n ValueError: if the layer isn't yet built\n (in which case its weights aren't yet defined).\n \"\"\"\n if not self.built:\n if getattr(self, '_is_graph_network', False):\n with tf_utils.maybe_init_scope(self):\n self._maybe_build(self.inputs)\n else:\n raise ValueError('You tried to call `count_params` on ' + self.name +\n ', but the layer isn\\'t built. '\n 'You can build it manually via: `' + self.name +\n '.build(batch_input_shape)`.')\n return layer_utils.count_params(self.weights)\n\n @property\n @doc_controls.do_not_doc_inheritable\n def output_shape(self):\n \"\"\"Retrieves the output shape(s) of a layer.\n\n Only applicable if the layer has one output,\n or if all outputs have the same shape.\n\n Returns:\n Output shape, as an integer shape tuple\n (or list of shape tuples, one tuple per output tensor).\n\n Raises:\n AttributeError: if the layer has no defined output shape.\n RuntimeError: if called in Eager mode.\n \"\"\"\n if not self._inbound_nodes:\n raise AttributeError('The layer has never been called '\n 'and thus has no defined output shape.')\n all_output_shapes = set(\n [str(node.output_shapes) for node in self._inbound_nodes])\n if len(all_output_shapes) == 1:\n return self._inbound_nodes[0].output_shapes\n else:\n raise AttributeError('The layer \"%s\"'\n ' has multiple inbound nodes, '\n 'with different output shapes. Hence '\n 'the notion of \"output shape\" is '\n 'ill-defined for the layer. '\n 'Use `get_output_shape_at(node_index)` '\n 'instead.' % self.name)\n\n @property\n @doc_controls.do_not_doc_inheritable\n def inbound_nodes(self):\n \"\"\"Deprecated, do NOT use! Only for compatibility with external Keras.\"\"\"\n return self._inbound_nodes\n\n @property\n @doc_controls.do_not_doc_inheritable\n def outbound_nodes(self):\n \"\"\"Deprecated, do NOT use! Only for compatibility with external Keras.\"\"\"\n return self._outbound_nodes\n\n ##############################################################################\n # Methods & attributes below are public aliases of other methods. #\n ##############################################################################\n\n @deprecation.deprecated(\n date=None, instructions='Please use `layer.__call__` method instead.')\n @doc_controls.do_not_doc_inheritable\n def apply(self, inputs, *args, **kwargs):\n \"\"\"Deprecated, do NOT use!\n\n This is an alias of `self.__call__`.\n\n Arguments:\n inputs: Input tensor(s).\n *args: additional positional arguments to be passed to `self.call`.\n **kwargs: additional keyword arguments to be passed to `self.call`.\n\n Returns:\n Output tensor(s).\n \"\"\"\n return self.__call__(inputs, *args, **kwargs)\n\n @deprecation.deprecated(\n date=None, instructions='Please use `layer.add_weight` method instead.')\n @doc_controls.do_not_doc_inheritable\n def add_variable(self, *args, **kwargs):\n \"\"\"Deprecated, do NOT use! Alias for `add_weight`.\"\"\"\n return self.add_weight(*args, **kwargs)\n\n @property\n @doc_controls.do_not_generate_docs\n def variables(self):\n \"\"\"Returns the list of all layer variables/weights.\n\n Alias of `self.weights`.\n\n Returns:\n A list of variables.\n \"\"\"\n return self.weights\n\n @property\n @doc_controls.do_not_generate_docs\n def trainable_variables(self):\n return self.trainable_weights\n\n @property\n @doc_controls.do_not_generate_docs\n def non_trainable_variables(self):\n return self.non_trainable_weights\n\n ##############################################################################\n # Methods & attributes below are all private and only used by the framework. #\n ##############################################################################\n\n def _set_dtype_policy(self, dtype):\n \"\"\"Sets self._dtype_policy.\"\"\"\n if isinstance(dtype, policy.Policy):\n self._dtype_policy = dtype\n elif isinstance(dtype, dict):\n self._dtype_policy = policy.deserialize(dtype)\n elif dtype:\n self._dtype_policy = policy.Policy(dtypes.as_dtype(dtype).name)\n else:\n self._dtype_policy = policy.global_policy()\n if (self._dtype_policy.name == 'mixed_float16' and\n not loss_scale_optimizer.strategy_supports_loss_scaling()):\n # Although only loss scaling doesn't support certain strategies, to avoid\n # confusion, we disallow the 'mixed_float16' policy with unsupported\n # strategies. This is because 'mixed_float16' requires loss scaling for\n # numeric stability.\n strategy = ds_context.get_strategy()\n raise ValueError('Mixed precision is not supported with the '\n 'tf.distribute.Strategy: %s. Either stop using mixed '\n 'precision by removing the use of the \"%s\" policy or '\n 'use a different Strategy, e.g. a MirroredStrategy.' %\n (strategy.__class__.__name__, self._dtype_policy.name))\n\n # This has no impact on the layer behavior, and is only used for printing\n # warnings.\n self._dtype_defaulted_to_floatx = (not dtype and\n policy.policy_defaults_to_floatx())\n\n # Performance optimization: cache the compute dtype as a Dtype object or\n # None, so that str to Dtype conversion doesn't happen in Layer.__call__.\n # TODO(b/157486353): Investigate returning DTypes in Policy.\n if self._dtype_policy.compute_dtype:\n self._compute_dtype_object = dtypes.as_dtype(\n self._dtype_policy.compute_dtype)\n else:\n self._compute_dtype_object = None\n\n # TODO(reedwm): Expose this property?\n @property\n def _compute_dtype(self):\n \"\"\"The layer's compute dtype.\n\n Unless mixed-precision is used, this is the same as `Layer.dtype`.\n\n If self._autocast is True, layer's will cast floating-point inputs to this.\n\n Returns:\n The layer's compute dtype.\n \"\"\"\n return self._dtype_policy.compute_dtype\n\n def _maybe_cast_inputs(self, inputs, input_list=None):\n \"\"\"Maybe casts the inputs to the compute dtype.\n\n If self._compute_dtype is floating-point, and self_autocast is True,\n floating-point inputs are casted to self._compute_dtype.\n\n Args:\n inputs: Input tensor, or structure of input tensors.\n input_list: Flat list of input tensors.\n\n Returns:\n `inputs`, but tensors may have been casted to self._compute_dtype\n \"\"\"\n if not input_list:\n input_list = nest.flatten(inputs)\n\n compute_dtype_object = self._compute_dtype_object\n should_autocast = (\n self._autocast and compute_dtype_object and\n compute_dtype_object.is_floating)\n\n if (should_autocast and\n any(map(self._should_cast_single_input, input_list))):\n # Only perform expensive `nest` operation when needed.\n return nest.map_structure(self._cast_single_input, inputs)\n else:\n return inputs\n\n def _should_cast_single_input(self, x):\n if isinstance(x, _AUTOCAST_TYPES):\n return (self._compute_dtype_object and\n x.dtype != self._compute_dtype_object and x.dtype.is_floating)\n return False\n\n def _cast_single_input(self, x):\n \"\"\"Cast a single Tensor or TensorSpec to the compute dtype.\"\"\"\n if self._should_cast_single_input(x):\n if self._dtype_defaulted_to_floatx:\n self._warn_about_input_casting(x.dtype.base_dtype)\n return math_ops.cast(x, self._compute_dtype_object)\n else:\n return x\n\n def _warn_about_input_casting(self, input_dtype):\n # self._already_warned_about_input_casting is only retrieved or set in this\n # function.\n already_warned = getattr(self, '_already_warned_about_input_casting', False)\n if not already_warned:\n tf_logging.warn(\n \"Layer {self.name} is casting an input tensor from dtype \"\n \"{input_dtype} to the layer's dtype of {layer_dtype}, which is new \"\n \"behavior in TensorFlow 2. The layer has dtype {layer_dtype} \"\n 'because its dtype defaults to floatx.\\n\\n'\n \"\"\n \"If you intended to run this layer in {layer_dtype}, you can safely \"\n \"ignore this warning. If in doubt, this warning is likely only an \"\n \"issue if you are porting a TensorFlow 1.X model to TensorFlow 2.\\n\\n\"\n \"\"\n \"To change all layers to have dtype {input_dtype} by default, call \"\n \"`tf.keras.backend.set_floatx('{input_dtype}')`. To change just this \"\n \"layer, pass dtype='{input_dtype}' to the layer constructor. If you \"\n \"are the author of this layer, you can disable autocasting by \"\n \"passing autocast=False to the base Layer constructor.\\n\".format(\n self=self,\n input_dtype=input_dtype.name,\n layer_dtype=self._compute_dtype))\n self._already_warned_about_input_casting = True\n\n # _dtype used to be an attribute set in the constructor. We still expose it\n # because some clients still use it.\n # TODO(reedwm): Deprecate, then remove the _dtype property.\n @property\n def _dtype(self):\n # This is equivalent to returning self.dtype . We do not return self.dtype\n # as it would cause infinite recursion in a few subclasses, which override\n # \"dtype\" to return self._dtype.\n return self._dtype_policy.variable_dtype\n\n @_dtype.setter\n def _dtype(self, value):\n value = dtypes.as_dtype(value).name\n self._set_dtype_policy(policy.Policy(value))\n\n def _name_scope(self):\n if not tf2.enabled():\n return self.name\n name_scope = self.name\n current_name_scope = ops.get_name_scope()\n if current_name_scope:\n name_scope = current_name_scope + '/' + name_scope\n if name_scope:\n # Note that the trailing `/` prevents autogenerated\n # numerical suffixes to get appended. It will also fully reset\n # nested name scope (i.e. the outer name scope has no effect).\n name_scope += '/'\n return name_scope\n\n def _init_set_name(self, name, zero_based=True):\n if not name:\n self._name = backend.unique_object_name(\n generic_utils.to_snake_case(self.__class__.__name__),\n zero_based=zero_based)\n else:\n self._name = name\n\n def _get_existing_metric(self, name=None):\n match = [m for m in self._metrics if m.name == name]\n if not match:\n return\n if len(match) > 1:\n raise ValueError(\n 'Please provide different names for the metrics you have added. '\n 'We found {} metrics with the name: \"{}\"'.format(len(match), name))\n return match[0]\n\n def _handle_weight_regularization(self, name, variable, regularizer):\n \"\"\"Create lambdas which compute regularization losses.\"\"\"\n\n def _loss_for_variable(v):\n \"\"\"Creates a regularization loss `Tensor` for variable `v`.\"\"\"\n with backend.name_scope(name + '/Regularizer'):\n regularization = regularizer(v)\n return regularization\n\n if isinstance(variable, tf_variables.PartitionedVariable):\n for v in variable:\n self.add_loss(functools.partial(_loss_for_variable, v))\n else:\n self.add_loss(functools.partial(_loss_for_variable, variable))\n\n def _handle_activity_regularization(self, inputs, outputs):\n # Apply activity regularization.\n # Note that it should be applied every time the layer creates a new\n # output, since it is output-specific.\n if self._activity_regularizer:\n output_list = nest.flatten(outputs)\n with backend.name_scope('ActivityRegularizer'):\n for output in output_list:\n activity_loss = self._activity_regularizer(output)\n batch_size = math_ops.cast(\n array_ops.shape(output)[0], activity_loss.dtype)\n # Make activity regularization strength batch-agnostic.\n mean_activity_loss = activity_loss / batch_size\n self.add_loss(mean_activity_loss)\n\n def _set_mask_metadata(self, inputs, outputs, previous_mask, build_graph):\n # Many `Layer`s don't need to call `compute_mask`.\n # This method is optimized to do as little work as needed for the common\n # case.\n if not self._supports_masking:\n return\n\n flat_outputs = nest.flatten(outputs)\n\n mask_already_computed = (\n getattr(self, '_compute_output_and_mask_jointly', False) or\n all(getattr(x, '_keras_mask', None) is not None for x in flat_outputs))\n if mask_already_computed:\n if build_graph:\n self._set_mask_keras_history_checked(flat_outputs)\n return\n\n output_masks = self.compute_mask(inputs, previous_mask)\n if output_masks is None:\n return\n\n flat_masks = nest.flatten(output_masks)\n for tensor, mask in zip(flat_outputs, flat_masks):\n try:\n tensor._keras_mask = mask\n except AttributeError:\n # C Type such as np.ndarray.\n pass\n\n if build_graph:\n self._set_mask_keras_history_checked(flat_outputs)\n\n def _set_mask_keras_history_checked(self, flat_outputs):\n for output in flat_outputs:\n if getattr(output, '_keras_mask', None) is not None:\n # Do not track masks for `TensorFlowOpLayer` construction.\n output._keras_mask._keras_history_checked = True\n\n def _get_input_masks(self, inputs, input_list, args, kwargs):\n if not self._supports_masking and not self._expects_mask_arg:\n # Input masks only need to be retrieved if they are needed for `call`\n # or `compute_mask`.\n input_masks = None\n implicit_mask = False\n elif self._call_arg_was_passed('mask', args, kwargs):\n input_masks = self._get_call_arg_value('mask', args, kwargs)\n implicit_mask = False\n else:\n input_masks = [getattr(t, '_keras_mask', None) for t in input_list]\n if all(mask is None for mask in input_masks):\n input_masks = None\n implicit_mask = False\n else:\n # Only do expensive `nest` op when masking is actually being used.\n input_masks = nest.pack_sequence_as(inputs, input_masks)\n implicit_mask = True\n return input_masks, implicit_mask\n\n def _call_arg_was_passed(self, arg_name, args, kwargs, inputs_in_args=False):\n # Performance optimization: do no work in most common case.\n if not args and not kwargs:\n return False\n\n if arg_name in kwargs:\n return True\n call_fn_args = self._call_fn_args\n if not inputs_in_args:\n # Ignore `inputs` arg.\n call_fn_args = call_fn_args[1:]\n return arg_name in dict(zip(call_fn_args, args))\n\n def _get_call_arg_value(self, arg_name, args, kwargs, inputs_in_args=False):\n if arg_name in kwargs:\n return kwargs[arg_name]\n call_fn_args = self._call_fn_args\n if not inputs_in_args:\n # Ignore `inputs` arg.\n call_fn_args = call_fn_args[1:]\n args_dict = dict(zip(call_fn_args, args))\n return args_dict[arg_name]\n\n def _set_call_arg_value(\n self, arg_name, new_value, args,\n kwargs, inputs_in_args=False, pop_kwarg_if_none=False):\n arg_pos = self._call_fn_arg_positions.get(arg_name, None)\n if arg_pos is not None:\n if not inputs_in_args:\n # Ignore `inputs` arg.\n arg_pos = arg_pos - 1\n if len(args) > arg_pos:\n args = list(args)\n args[arg_pos] = new_value\n return tuple(args), kwargs\n if new_value is None and pop_kwarg_if_none:\n kwargs.pop(arg_name, None)\n else:\n kwargs[arg_name] = new_value\n return args, kwargs\n\n def _set_connectivity_metadata(self, args, kwargs, outputs):\n # If the layer returns tensors from its inputs unmodified,\n # we copy them to avoid loss of KerasHistory metadata.\n flat_outputs = nest.flatten(outputs)\n flat_inputs = nest.flatten((args, kwargs))\n inputs_set = object_identity.ObjectIdentitySet(flat_inputs)\n outputs_copy = []\n for x in flat_outputs:\n if x in inputs_set:\n with backend.name_scope(self.name):\n x = array_ops.identity(x)\n outputs_copy.append(x)\n outputs = nest.pack_sequence_as(outputs, outputs_copy)\n\n # Create node, Node wires itself to inbound and outbound layers.\n # The Node constructor actually updates this layer's self._inbound_nodes,\n # sets _keras_history on the outputs, and adds itself to the\n # `_outbound_nodes` of the layers that produced the inputs to this\n # layer call.\n node_module.Node(self, call_args=args, call_kwargs=kwargs, outputs=outputs)\n return outputs\n\n def _get_node_attribute_at_index(self, node_index, attr, attr_name):\n \"\"\"Private utility to retrieves an attribute (e.g. inputs) from a node.\n\n This is used to implement the methods:\n - get_input_shape_at\n - get_output_shape_at\n - get_input_at\n etc...\n\n Arguments:\n node_index: Integer index of the node from which\n to retrieve the attribute.\n attr: Exact node attribute name.\n attr_name: Human-readable attribute name, for error messages.\n\n Returns:\n The layer's attribute `attr` at the node of index `node_index`.\n\n Raises:\n RuntimeError: If the layer has no inbound nodes, or if called in Eager\n mode.\n ValueError: If the index provided does not match any node.\n \"\"\"\n if not self._inbound_nodes:\n raise RuntimeError('The layer has never been called '\n 'and thus has no defined ' + attr_name + '.')\n if not len(self._inbound_nodes) > node_index:\n raise ValueError('Asked to get ' + attr_name + ' at node ' +\n str(node_index) + ', but the layer has only ' +\n str(len(self._inbound_nodes)) + ' inbound nodes.')\n values = getattr(self._inbound_nodes[node_index], attr)\n if isinstance(values, list) and len(values) == 1:\n return values[0]\n else:\n return values\n\n def _maybe_build(self, inputs):\n # Check input assumptions set before layer building, e.g. input rank.\n if not self.built:\n input_spec.assert_input_compatibility(\n self.input_spec, inputs, self.name)\n input_list = nest.flatten(inputs)\n if input_list and self._dtype_policy.compute_dtype is None:\n try:\n dtype = input_list[0].dtype.base_dtype.name\n except AttributeError:\n pass\n else:\n self._set_dtype_policy(policy.Policy(dtype))\n input_shapes = None\n # Converts Tensors / CompositeTensors to TensorShapes.\n if all(hasattr(x, 'shape') for x in input_list):\n input_shapes = tf_utils.get_shapes(inputs)\n else:\n # Converts input shape to TensorShapes.\n try:\n input_shapes = tf_utils.convert_shapes(inputs, to_tuples=False)\n except ValueError:\n pass\n # Only call `build` if the user has manually overridden the build method.\n if not hasattr(self.build, '_is_default'):\n # Any setup work performed only once should happen in an `init_scope`\n # to avoid creating symbolic Tensors that will later pollute any eager\n # operations.\n with tf_utils.maybe_init_scope(self):\n self.build(input_shapes) # pylint:disable=not-callable\n # We must set also ensure that the layer is marked as built, and the build\n # shape is stored since user defined build functions may not be calling\n # `super.build()`\n Layer.build(self, input_shapes)\n\n # Optionally load weight values specified at layer instantiation.\n if self._initial_weights is not None:\n if ops.executing_eagerly_outside_functions():\n with ops.init_scope():\n # Using `init_scope` since we want variable assignment in\n # `set_weights` to be treated like variable initialization.\n self.set_weights(self._initial_weights)\n else:\n self.set_weights(self._initial_weights)\n self._initial_weights = None\n\n def _symbolic_call(self, inputs):\n input_shapes = nest.map_structure(lambda x: x.shape, inputs)\n output_shapes = self.compute_output_shape(input_shapes)\n # Convert to TensorShape so that nest.map_structure will not map into\n # individual dim of the shape.\n output_shapes = tf_utils.convert_shapes(output_shapes, to_tuples=False)\n\n def _make_placeholder_like(shape):\n ph = backend.placeholder(shape=shape, dtype=self.dtype)\n ph._keras_mask = None\n return ph\n return nest.map_structure(_make_placeholder_like, output_shapes)\n\n def _get_trainable_state(self):\n \"\"\"Get the `trainable` state of each sublayer.\n\n Returns:\n A dict mapping all sublayers to their `trainable` value.\n \"\"\"\n trainable_state = weakref.WeakKeyDictionary()\n for layer in self._flatten_layers():\n trainable_state[layer] = layer.trainable\n return trainable_state\n\n def _set_trainable_state(self, trainable_state):\n \"\"\"Set `trainable` state for each sublayer.\"\"\"\n for layer in self._flatten_layers():\n if layer in trainable_state:\n layer.trainable = trainable_state[layer]\n\n @property\n def _obj_reference_counts(self):\n \"\"\"A dictionary counting the number of attributes referencing an object.\"\"\"\n self._maybe_create_attribute('_obj_reference_counts_dict',\n object_identity.ObjectIdentityDictionary())\n return self._obj_reference_counts_dict\n\n @trackable.no_automatic_dependency_tracking\n def _maybe_create_attribute(self, name, default_value):\n \"\"\"Create the attribute with the default value if it hasn't been created.\n\n This is useful for fields that is used for tracking purpose,\n _trainable_weights, or _layers. Note that user could create a layer subclass\n and assign an internal field before invoking the Layer.__init__(), the\n __setattr__() need to create the tracking fields and __init__() need to not\n override them.\n\n Args:\n name: String, the name of the attribute.\n default_value: Object, the default value of the attribute.\n \"\"\"\n if not hasattr(self, name):\n super(Layer, self).__setattr__(name, default_value)\n\n def __delattr__(self, name):\n # For any super.__delattr__() call, we will directly use the implementation\n # in Trackable and skip the behavior in AutoTrackable. The Layer was\n # originally use Trackable as base class, the change of using Module as base\n # class forced us to have AutoTrackable in the class hierarchy. Skipping\n # the __delattr__ and __setattr__ in AutoTrackable will keep the status quo.\n existing_value = getattr(self, name, None)\n\n # If this value is replacing an existing object assigned to an attribute, we\n # should clean it out to avoid leaking memory. First we check if there are\n # other attributes referencing it.\n reference_counts = self._obj_reference_counts\n if existing_value not in reference_counts:\n super(tracking.AutoTrackable, self).__delattr__(name)\n return\n\n reference_count = reference_counts[existing_value]\n if reference_count > 1:\n # There are other remaining references. We can't remove this object from\n # _layers etc.\n reference_counts[existing_value] = reference_count - 1\n super(tracking.AutoTrackable, self).__delattr__(name)\n return\n else:\n # This is the last remaining reference.\n del reference_counts[existing_value]\n\n super(tracking.AutoTrackable, self).__delattr__(name)\n\n if (isinstance(existing_value, Layer)\n or trackable_layer_utils.has_weights(existing_value)):\n super(tracking.AutoTrackable, self).__setattr__(\n '_layers',\n [l for l in self._layers if l is not existing_value])\n if isinstance(existing_value, tf_variables.Variable):\n super(tracking.AutoTrackable, self).__setattr__(\n '_trainable_weights',\n [w for w in self._trainable_weights if w is not existing_value])\n super(tracking.AutoTrackable, self).__setattr__(\n '_non_trainable_weights',\n [w for w in self._non_trainable_weights if w is not existing_value])\n\n def __setattr__(self, name, value):\n if (name == '_self_setattr_tracking' or\n not getattr(self, '_self_setattr_tracking', True) or\n # Exclude @property.setters from tracking\n hasattr(self.__class__, name)):\n try:\n super(tracking.AutoTrackable, self).__setattr__(name, value)\n except AttributeError:\n raise AttributeError(\n ('Can\\'t set the attribute \"{}\", likely because it conflicts with '\n 'an existing read-only @property of the object. Please choose a '\n 'different name.').format(name))\n return\n\n # Keep track of trackable objects, for the needs of `Network.save_weights`.\n value = data_structures.sticky_attribute_assignment(\n trackable=self, value=value, name=name)\n\n reference_counts = self._obj_reference_counts\n reference_counts[value] = reference_counts.get(value, 0) + 1\n\n # Clean out the old attribute, which clears _layers and _trainable_weights\n # if necessary.\n try:\n self.__delattr__(name)\n except AttributeError:\n pass\n\n # Keep track of metric instance created in subclassed layer.\n from tensorflow.python.keras import metrics as metrics_module # pylint: disable=g-import-not-at-top\n for val in nest.flatten(value):\n if isinstance(val, metrics_module.Metric) and hasattr(self, '_metrics'):\n self._metrics.append(val)\n\n # TODO(scottzhu): Need to track Module object as well for weight tracking.\n # Be careful about metric if it becomes a Module in future.\n # Append value to self._layers if relevant\n if (getattr(self, '_auto_track_sub_layers', True) and\n (isinstance(value, Layer) or trackable_layer_utils.has_weights(value))):\n self._maybe_create_attribute('_layers', [])\n # We need to check object identity to avoid de-duplicating empty\n # container types which compare equal.\n if not any((layer is value for layer in self._layers)):\n self._layers.append(value)\n if hasattr(value, '_use_resource_variables'):\n # Legacy layers (V1 tf.layers) must always use\n # resource variables.\n value._use_resource_variables = True\n\n # Append value to list of trainable / non-trainable weights if relevant\n # TODO(b/125122625): This won't pick up on any variables added to a\n # list/dict after creation.\n for val in nest.flatten(value):\n # TODO(b/126450014): Remove `_UnreadVariable` check here when assign ops\n # no longer return True for isinstance Variable checks.\n if not isinstance(val, tf_variables.Variable):\n continue\n if isinstance(val, resource_variable_ops._UnreadVariable): # pylint: disable=protected-access\n continue\n\n # Users may add extra weights/variables\n # simply by assigning them to attributes (invalid for graph networks)\n self._maybe_create_attribute('_trainable_weights', [])\n self._maybe_create_attribute('_non_trainable_weights', [])\n if val.trainable:\n if any(val is w for w in self._trainable_weights):\n continue\n self._trainable_weights.append(val)\n else:\n if any(val is w for w in self._non_trainable_weights):\n continue\n self._non_trainable_weights.append(val)\n\n backend.track_variable(val)\n\n # Skip the auto trackable from tf.Module to keep status quo. See the comment\n # at __delattr__.\n super(tracking.AutoTrackable, self).__setattr__(name, value)\n\n def _gather_children_attribute(self, attribute):\n assert attribute in {\n 'weights', 'trainable_weights', 'non_trainable_weights'\n }\n if hasattr(self, '_layers'):\n nested_layers = trackable_layer_utils.filter_empty_layer_containers(\n self._layers)\n return list(\n itertools.chain.from_iterable(\n getattr(layer, attribute) for layer in nested_layers))\n return []\n\n def _flatten_layers(self, recursive=True, include_self=True):\n if include_self:\n yield self\n\n # Only instantiate set and deque if needed.\n layers_or_containers = getattr(self, '_layers', None)\n if layers_or_containers:\n seen_object_ids = set()\n deque = collections.deque(layers_or_containers)\n while deque:\n layer_or_container = deque.popleft()\n\n layer_or_container_id = id(layer_or_container)\n if layer_or_container_id in seen_object_ids:\n continue\n seen_object_ids.add(layer_or_container_id)\n\n if isinstance(layer_or_container, Layer):\n yield layer_or_container\n # Introspect recursively through sublayers.\n if recursive:\n sublayers = getattr(layer_or_container, '_layers', None)\n if sublayers:\n deque.extendleft(reversed(sublayers))\n elif isinstance(layer_or_container,\n data_structures.TrackableDataStructure):\n # Data structures are introspected even with `recursive=False`.\n tracked_values = layer_or_container._values\n if tracked_values:\n deque.extendleft(reversed(tracked_values))\n\n # This is a hack so that the is_layer (within\n # training/trackable/layer_utils.py) check doesn't get the weights attr.\n # TODO(b/110718070): Remove when fixed.\n def _is_layer(self):\n return True\n\n def _init_call_fn_args(self):\n # Clear cached call function arguments.\n self.__class__._call_full_argspec.fget.cache.pop(self, None)\n self.__class__._call_fn_args.fget.cache.pop(self, None)\n self.__class__._call_accepts_kwargs.fget.cache.pop(self, None)\n\n call_fn_args = self._call_fn_args\n self._expects_training_arg = ('training' in call_fn_args or\n self._call_accepts_kwargs)\n # The default training arg will be any (non-None) default specified in the\n # method signature, or None if no value is specified.\n self._default_training_arg = self._call_fn_arg_defaults.get(\n 'training')\n self._expects_mask_arg = ('mask' in call_fn_args or\n self._call_accepts_kwargs)\n\n @property\n @tracking.cached_per_instance\n def _call_full_argspec(self):\n # Argspec inspection is expensive and the call spec is used often, so it\n # makes sense to cache the result.\n return tf_inspect.getfullargspec(self.call)\n\n @property\n @tracking.cached_per_instance\n def _call_fn_args(self):\n all_args = self._call_full_argspec.args\n # Scrub `self` that appears if a decorator was applied.\n if all_args and all_args[0] == 'self':\n return all_args[1:]\n return all_args\n\n @property\n @tracking.cached_per_instance\n def _call_fn_arg_defaults(self):\n call_fn_args = self._call_fn_args\n call_fn_defaults = self._call_full_argspec.defaults or []\n defaults = dict()\n\n # The call arg defaults are an n-tuple of the last n elements of the args\n # list. (n = # of elements that have a default argument)\n for i in range(-1 * len(call_fn_defaults), 0):\n defaults[call_fn_args[i]] = call_fn_defaults[i]\n return defaults\n\n @property\n @tracking.cached_per_instance\n def _call_fn_arg_positions(self):\n call_fn_arg_positions = dict()\n for pos, arg in enumerate(self._call_fn_args):\n call_fn_arg_positions[arg] = pos\n return call_fn_arg_positions\n\n @property\n @tracking.cached_per_instance\n def _call_accepts_kwargs(self):\n return self._call_full_argspec.varkw is not None\n\n @property\n def _eager_losses(self):\n # A list of loss values containing activity regularizers and losses\n # manually added through `add_loss` during eager execution. It is cleared\n # after every batch.\n # Because we plan on eventually allowing a same model instance to be trained\n # in eager mode or graph mode alternatively, we need to keep track of\n # eager losses and symbolic losses via separate attributes.\n if not hasattr(self._thread_local, '_eager_losses'):\n self._thread_local._eager_losses = []\n return self._thread_local._eager_losses\n\n @_eager_losses.setter\n def _eager_losses(self, losses):\n self._thread_local._eager_losses = losses\n\n def _dedup_weights(self, weights):\n \"\"\"Dedupe weights while maintaining order as much as possible.\"\"\"\n output, seen_weights = [], object_identity.ObjectIdentitySet()\n for w in weights:\n if w not in seen_weights:\n output.append(w)\n # Track the Variable's identity to avoid __eq__ issues.\n seen_weights.add(w)\n return output\n\n def _split_out_first_arg(self, args, kwargs):\n # Grab the argument corresponding to the first argument in the\n # layer's `call` method spec. This will either be the first positional\n # argument, or it will be provided as a keyword argument.\n if args:\n inputs = args[0]\n args = args[1:]\n elif self._call_fn_args[0] in kwargs:\n kwargs = copy.copy(kwargs)\n inputs = kwargs.pop(self._call_fn_args[0])\n else:\n raise ValueError(\n 'The first argument to `Layer.call` must always be passed.')\n return inputs, args, kwargs\n\n # SavedModel properties. Please see keras/saving/saved_model for details.\n\n @trackable.no_automatic_dependency_tracking\n def _set_save_spec(self, inputs):\n if self._saved_model_inputs_spec is not None:\n return # Already set.\n\n self._saved_model_inputs_spec = nest.map_structure(tf_utils.get_tensor_spec,\n inputs)\n\n def _get_save_spec(self, dynamic_batch=True):\n if self._saved_model_inputs_spec is None:\n return None\n\n return nest.map_structure(\n lambda t: tf_utils.get_tensor_spec(t, dynamic_batch=dynamic_batch),\n self._saved_model_inputs_spec)\n\n @property\n def _trackable_saved_model_saver(self):\n return layer_serialization.LayerSavedModelSaver(self)\n\n @property\n def _object_identifier(self):\n return self._trackable_saved_model_saver.object_identifier\n\n @property\n def _tracking_metadata(self):\n return self._trackable_saved_model_saver.tracking_metadata\n\n def _list_extra_dependencies_for_serialization(self, serialization_cache):\n return (self._trackable_saved_model_saver\n .list_extra_dependencies_for_serialization(serialization_cache))\n\n def _list_functions_for_serialization(self, serialization_cache):\n return (self._trackable_saved_model_saver\n .list_functions_for_serialization(serialization_cache))\n\n def __getstate__(self):\n # Override to support `copy.deepcopy` and pickling.\n # Thread-local objects cannot be copied in Python 3, so pop these.\n # Thread-local objects are used to cache losses in MirroredStrategy, and\n # so shouldn't be copied.\n state = self.__dict__.copy()\n state.pop('_thread_local', None)\n state.pop('_metrics_lock', None)\n return state\n\n def __setstate__(self, state):\n state['_thread_local'] = threading.local()\n state['_metrics_lock'] = threading.Lock()\n # Bypass Trackable logic as `__dict__` already contains this info.\n object.__setattr__(self, '__dict__', state)\n\n\nclass TensorFlowOpLayer(Layer):\n \"\"\"Wraps a TensorFlow Operation in a Layer.\n\n This class is used internally by the Functional API. When a user\n uses a raw TensorFlow Operation on symbolic tensors originating\n from an `Input` Layer, the resultant operation will be wrapped\n with this Layer object in order to make the operation compatible\n with the Keras API.\n\n This Layer will create a new, identical operation (except for inputs\n and outputs) every time it is called. If `run_eagerly` is `True`,\n the op creation and calculation will happen inside an Eager function.\n\n Instances of this Layer are created when `autolambda` is called, which\n is whenever a Layer's `__call__` encounters symbolic inputs that do\n not have Keras metadata, or when a Network's `__init__` encounters\n outputs that do not have Keras metadata.\n\n Attributes:\n node_def: String, the serialized NodeDef of the Op this layer will wrap.\n name: String, the name of the Layer.\n constants: Dict of NumPy arrays, the values of any Tensors needed for this\n Operation that do not originate from a Keras `Input` Layer. Since all\n placeholders must come from Keras `Input` Layers, these Tensors must be\n treated as constant in the Functional API.\n trainable: Bool, whether this Layer is trainable. Currently Variables are\n not supported, and so this parameter has no effect.\n dtype: The default dtype of this Layer. Inherited from `Layer` and has no\n effect on this class, however is used in `get_config`.\n \"\"\"\n\n @trackable.no_automatic_dependency_tracking\n def __init__(self,\n node_def,\n name,\n constants=None,\n trainable=True,\n dtype=None):\n # Pass autocast=False, as if inputs are cast, input types might not match\n # Operation type.\n super(TensorFlowOpLayer, self).__init__(\n name=_TF_OP_LAYER_NAME_PREFIX + name, trainable=trainable, dtype=dtype,\n autocast=False)\n _keras_layers_gauge.get_cell('TensorflowOpLayer').set(True)\n if isinstance(node_def, dict):\n self.node_def = json_format.ParseDict(node_def, node_def_pb2.NodeDef())\n else:\n if not isinstance(node_def, bytes):\n node_def = node_def.encode('utf-8')\n self.node_def = node_def_pb2.NodeDef.FromString(node_def)\n # JSON serialization stringifies keys which are integer input indices.\n self.constants = ({\n int(index): constant for index, constant in constants.items()\n } if constants is not None else {})\n # Layer uses original op unless it is called on new inputs.\n # This means `built` is not set in `__call__`.\n self.built = True\n\n def call(self, inputs):\n if context.executing_eagerly():\n return self._defun_call(inputs)\n return self._make_op(inputs)\n\n def _make_node_def(self, graph):\n node_def = node_def_pb2.NodeDef()\n node_def.CopyFrom(self.node_def)\n # Used in TPUReplicateContext to indicate whether this node has been cloned\n # and to not add TPU attributes.\n node_def.attr['_cloned'].b = True\n node_def.name = graph.unique_name(node_def.name)\n return node_def\n\n def _make_op(self, inputs):\n inputs = nest.flatten(inputs)\n graph = inputs[0].graph\n node_def = self._make_node_def(graph)\n with graph.as_default():\n for index, constant in self.constants.items():\n # Recreate constant in graph to add distribution context.\n value = tensor_util.constant_value(constant)\n if value is not None:\n constant = constant_op.constant(value, name=node_def.input[index])\n inputs.insert(index, constant)\n c_op = ops._create_c_op(graph, node_def, inputs, control_inputs=[])\n op = graph._create_op_from_tf_operation(c_op)\n op._control_flow_post_processing()\n\n # Record the gradient because custom-made ops don't go through the\n # code-gen'd eager call path\n op_type = compat.as_str(op.op_def.name)\n attr_names = [compat.as_str(attr.name) for attr in op.op_def.attr]\n attrs = []\n for attr_name in attr_names:\n attrs.append(attr_name)\n attrs.append(op.get_attr(attr_name))\n attrs = tuple(attrs)\n execute.record_gradient(op_type, op.inputs, attrs, op.outputs)\n\n if len(op.outputs) == 1:\n return op.outputs[0]\n return op.outputs\n\n @function.defun\n def _defun_call(self, inputs):\n \"\"\"Wraps the op creation method in an Eager function for `run_eagerly`.\"\"\"\n return self._make_op(inputs)\n\n def get_config(self):\n config = super(TensorFlowOpLayer, self).get_config()\n config.update({\n # `__init__` prefixes the name. Revert to the constructor argument.\n 'name': config['name'][len(_TF_OP_LAYER_NAME_PREFIX):],\n 'node_def': json_format.MessageToDict(self.node_def),\n 'constants': {\n i: backend.get_value(c) for i, c in self.constants.items()\n }\n })\n return config\n\n\nclass AddLoss(Layer):\n \"\"\"Adds its inputs as a loss.\n\n Attributes:\n unconditional: Whether or not the loss should be conditioned on the inputs.\n \"\"\"\n\n def __init__(self, unconditional, **kwargs):\n # Pass autocast=False, as there is no reason to cast loss to a different\n # dtype.\n kwargs['autocast'] = False\n super(AddLoss, self).__init__(**kwargs)\n self.unconditional = unconditional\n\n def call(self, inputs):\n self.add_loss(inputs, inputs=(not self.unconditional))\n return inputs\n\n def get_config(self):\n config = super(AddLoss, self).get_config()\n config.update({'unconditional': self.unconditional})\n return config\n\n\nclass AddMetric(Layer):\n \"\"\"Adds its inputs as a metric.\n\n Attributes:\n aggregation: 'mean' or None. How the inputs should be aggregated.\n metric_name: The name to use for this metric.\n \"\"\"\n\n def __init__(self, aggregation=None, metric_name=None, **kwargs):\n super(AddMetric, self).__init__(**kwargs)\n self.aggregation = aggregation\n self.metric_name = metric_name\n\n def call(self, inputs):\n self.add_metric(inputs, aggregation=self.aggregation, name=self.metric_name)\n return inputs\n\n def get_config(self):\n config = super(AddMetric, self).get_config()\n config.update({\n 'aggregation': self.aggregation,\n 'metric_name': self.metric_name\n })\n return config\n\n\ndef _in_functional_construction_mode(inputs, args, kwargs, input_list): # pylint: disable=unused-argument\n \"\"\"Check the arguments to see if we are constructing a functional model.\"\"\"\n if keras_tensor.keras_tensors_enabled():\n # We are constructing a functional model if any of the inputs\n # are KerasTensors\n return any(\n isinstance(tensor, keras_tensor.KerasTensor)\n for tensor in nest.flatten([inputs, args, kwargs]))\n else:\n if context.executing_eagerly():\n return all(tf_utils.is_symbolic_tensor(t) for t in input_list)\n else:\n return (base_layer_utils.is_in_keras_graph() or\n all(hasattr(t, '_keras_history') for t in input_list))\n\n\ndef _convert_numpy_or_python_types(x):\n if isinstance(x, (np.ndarray, float, int)):\n return ops.convert_to_tensor_v2(x)\n return x\n\n\n# Avoid breaking users who directly import this symbol from this file.\n# TODO(fchollet): remove this.\nInputSpec = input_spec.InputSpec # pylint:disable=invalid-name\n"
] | [
[
"tensorflow.python.keras.mixed_precision.experimental.policy.serialize",
"tensorflow.python.keras.backend.batch_get_value",
"tensorflow.python.keras.mixed_precision.experimental.policy.policy_defaults_to_floatx",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.training.tracking.layer_utils.has_weights",
"tensorflow.python.keras.backend.track_variable",
"tensorflow.python.keras.engine.input_spec.assert_input_compatibility",
"tensorflow.python.keras.engine.base_layer_utils.is_in_tf_function",
"tensorflow.python.keras.constraints.get",
"tensorflow.python.keras.utils.generic_utils.is_default",
"tensorflow.python.util.deprecation.deprecated_args",
"tensorflow.python.keras.engine.node.Node",
"tensorflow.python.autograph.core.ag_ctx.control_status_ctx",
"tensorflow.python.keras.utils.generic_utils.validate_kwargs",
"tensorflow.python.keras.backend.get_graph",
"tensorflow.python.keras.engine.base_layer_utils.TrackableWeightHandler",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.keras.backend.global_learning_phase_is_set",
"tensorflow.python.framework.ops.convert_to_tensor_v2",
"tensorflow.python.keras.mixed_precision.experimental.policy.global_policy",
"tensorflow.python.eager.execute.record_gradient",
"tensorflow.python.keras.mixed_precision.experimental.policy.Policy",
"tensorflow.python.keras.utils.tf_utils.maybe_init_scope",
"tensorflow.python.framework.ops.enable_auto_cast_variables",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.distribute.distribution_strategy_context.get_strategy",
"tensorflow.python.keras.backend.get_value",
"tensorflow.python.keras.engine.base_layer_utils.have_all_keras_metadata",
"tensorflow.python.keras.backend.batch_set_value",
"tensorflow.python.keras.utils.generic_utils.to_snake_case",
"tensorflow.python.platform.tf_logging.warn",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.keras.engine.base_layer_utils.from_saved_model",
"tensorflow.python.keras.engine.base_layer_utils.v2_dtype_behavior_enabled",
"tensorflow.python.keras.utils.tf_utils.is_symbolic_tensor",
"tensorflow.core.framework.node_def_pb2.NodeDef.FromString",
"tensorflow.python.keras.engine.base_layer_utils.is_subclassed",
"tensorflow.python.framework.ops.name_scope_v2",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.util.tf_inspect.getfullargspec",
"tensorflow.python.keras.backend.placeholder",
"tensorflow.python.keras.mixed_precision.experimental.loss_scale_optimizer.strategy_supports_loss_scaling",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.python.keras.utils.tf_utils.convert_shapes",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.keras.backend.floatx",
"tensorflow.python.util.compat.as_str",
"tensorflow.python.keras.saving.saved_model.layer_serialization.LayerSavedModelSaver",
"tensorflow.python.keras.engine.base_layer_utils.call_context",
"tensorflow.python.framework.tensor_util.is_tensor",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.keras.mixed_precision.experimental.policy.deserialize",
"tensorflow.python.framework.ops.get_name_scope",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.keras.utils.tf_utils.get_shapes",
"tensorflow.python.util.object_identity.ObjectIdentitySet",
"tensorflow.python.keras.engine.base_layer_utils.create_keras_history",
"tensorflow.core.framework.node_def_pb2.NodeDef",
"tensorflow.python.tf2.enabled",
"tensorflow.python.keras.backend.learning_phase",
"tensorflow.python.training.tracking.layer_utils.filter_empty_layer_containers",
"tensorflow.python.keras.backend.name_scope",
"tensorflow.python.keras.regularizers.get",
"tensorflow.python.keras.engine.base_layer_utils.needs_keras_history",
"tensorflow.python.eager.monitoring.BoolGauge",
"tensorflow.python.keras.engine.base_layer_utils.is_in_keras_graph",
"tensorflow.python.keras.mixed_precision.experimental.autocast_variable.create_autocast_variable",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.framework.ops._create_c_op",
"tensorflow.python.training.tracking.data_structures.sticky_attribute_assignment",
"tensorflow.python.keras.utils.layer_utils.count_params",
"tensorflow.python.keras.engine.keras_tensor.keras_tensors_enabled",
"tensorflow.python.util.object_identity.ObjectIdentityDictionary",
"tensorflow.python.keras.initializers.get",
"tensorflow.python.keras.utils.tf_utils.get_tensor_spec",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.3",
"2.4"
]
}
] |
ebouilhol/deepcell-tf | [
"2e609ba4daab526595ad628782a1594952320a5f"
] | [
"deepcell/model_zoo/resnet_3D.py"
] | [
"# 3D adaptation of ResNet50, based on the 2D version of : https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n\nfrom scipy import interpolate\nfrom skimage import io\nfrom keras.layers import Input, Add, Dense, Activation, ZeroPadding3D, BatchNormalization, Flatten, Conv3D, AveragePooling3D, MaxPooling3D\nfrom keras.models import Model\nfrom IPython.display import SVG\nfrom keras.utils.vis_utils import model_to_dot\nfrom keras.utils import plot_model\nfrom keras.initializers import glorot_uniform\nimport numpy as np\nimport deepcell\nimport keras.backend as K\nK.set_learning_phase(1)\n\ndef interpolation_z_axis(data, multiplicator):\n result_img = np.empty((data.shape[0], data.shape[1] * multiplicator, data.shape[2], data.shape[3]))\n\n for i in range(data.shape[0]):\n print(i)\n X = data[i]\n temp_image = np.empty((X.shape[0] * multiplicator, X.shape[1], X.shape[2]))\n temp_image[:] = np.nan\n for j in range(X.shape[0]):\n temp_image[j * multiplicator] = X[j]\n temp_image[-1] = temp_image[-2]\n indexes = np.arange(temp_image.shape[0])\n good = np.isfinite(temp_image).all(axis=(1, 2))\n f = interpolate.interp1d(indexes[good], temp_image[good], bounds_error=False, axis=0)\n B = f(indexes)\n result_img[i] = B\n\n io.imsave('InterpolatedX_train_' + str(multiplicator) + '.tiff', result_img.astype('uint8'), 'tifffile')\n print(\"Final shape : \", result_img.shape)\n return result_img.astype('uint8')\n\n\n\n\ndef identity_block(X, f, filters, stage, block):\n \"\"\"The identity block is the block that has no conv layer at shortcut.\n # Arguments\n input_tensor: input tensor\n kernel_size: defualt 3, the kernel size of middle conv layer at main path\n filters: list of integers, the filterss of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n # Returns\n Output tensor for the block.\n \"\"\"\n\n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n # Retrieve Filters\n F1, F2, F3 = filters\n\n # Save the input value.\n X_shortcut = X\n\n # First component of main path\n X = Conv3D(filters=F1, kernel_size=(1, 1, 1), strides=(1, 1, 1), padding='valid', name=conv_name_base + '2a',\n kernel_initializer=glorot_uniform(seed=0), data_format = 'channels_first')(X)\n X = BatchNormalization(axis=1, name=bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n\n # Second component of main path\n X = Conv3D(filters=F2, kernel_size=(f, f, f), strides=(1, 1, 1), padding='same', name=conv_name_base + '2b',\n kernel_initializer=glorot_uniform(seed=0), data_format = 'channels_first')(X)\n X = BatchNormalization(axis=1, name=bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path\n X = Conv3D(filters=F3, kernel_size=(1, 1, 1), strides=(1, 1, 1), padding='valid', name=conv_name_base + '2c',\n kernel_initializer=glorot_uniform(seed=0), data_format = 'channels_first')(X)\n X = BatchNormalization(axis=1, name=bn_name_base + '2c')(X)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)\n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n\n return X\n\n\ndef convolutional_block(X, f, filters, stage, block, s=2):\n \"\"\"conv_block is the block that has a conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: defualt 3, the kernel size of middle conv layer at main path\n filters: list of integers, the filterss of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n # Returns\n Output tensor for the block.\n Note that from stage 3, the first conv layer at main path is with strides=(2,2)\n And the shortcut should have strides=(2,2) as well\n \"\"\"\n\n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n # Retrieve Filters\n F1, F2, F3 = filters\n\n # Save the input value\n X_shortcut = X\n\n ##### MAIN PATH #####\n # First component of main path\n X = Conv3D(F1, (1, 1, 1), strides=(s, s, s), name=conv_name_base + '2a', kernel_initializer=glorot_uniform(seed=0), data_format = 'channels_first')(X)\n X = BatchNormalization(axis=1, name=bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n\n # Second component of main path\n X = Conv3D(filters=F2, kernel_size=(f, f, f), strides=(1, 1, 1), padding='same', name=conv_name_base + '2b',\n kernel_initializer=glorot_uniform(seed=0), data_format = 'channels_first')(X)\n X = BatchNormalization(axis=1, name=bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path\n X = Conv3D(filters=F3, kernel_size=(1, 1, 1), strides=(1, 1, 1), padding='valid', name=conv_name_base + '2c',\n kernel_initializer=glorot_uniform(seed=0), data_format = 'channels_first')(X)\n X = BatchNormalization(axis=1, name=bn_name_base + '2c')(X)\n\n ##### SHORTCUT PATH ####\n X_shortcut = Conv3D(filters=F3, kernel_size=(1, 1, 1), strides=(s, s, s), padding='valid', name=conv_name_base + '1',\n kernel_initializer=glorot_uniform(seed=0), data_format = 'channels_first')(X_shortcut)\n X_shortcut = BatchNormalization(axis=1, name=bn_name_base + '1')(X_shortcut)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation\n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n\n return X\n\ndef ResNet50(input_shape, classes=6):\n \"\"\"\n Implementation of the popular ResNet50 the following architecture:\n CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3\n -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER\n\n Arguments:\n input_shape -- shape of the images of the dataset\n classes -- integer, number of classes\n\n Returns:\n model -- a Model() instance in Keras\n \"\"\"\n\n # Define the input as a tensor with shape input_shape\n X_input = Input(input_shape)\n print(\"Input shape : \", X_input.shape)\n\n # Zero-Padding\n X = ZeroPadding3D(padding=(3, 3, 3), data_format = 'channels_first')(X_input)\n print(\"X shape Zero Padding : \", X.shape)\n\n # Stage 1\n X = Conv3D(64, (7, 7, 7), strides=(2, 2, 2), name='conv1', kernel_initializer=glorot_uniform(seed=0), data_format = 'channels_first')(X)\n print(\"X shape conv3D: \", X.shape)\n\n X = BatchNormalization(axis=1, name='bn_conv1')(X)\n print(\"X shape BatchNormalization: \", X.shape)\n\n X = Activation('relu')(X)\n print(\"X shape Activation: \", X.shape)\n\n X = MaxPooling3D((3, 3, 3), strides=(1, 1, 1))(X)\n print(\"X shape Stage 1 : \", X.shape)\n\n # Stage 2\n X = convolutional_block(X, f=3, filters=[64, 64, 64], stage=2, block='a', s=1)\n X = identity_block(X, 3, [64, 64, 64], stage=2, block='b')\n X = identity_block(X, 3, [64, 64, 64], stage=2, block='c')\n\n print(\"X shape Stage 2 : \", X.shape)\n\n # Stage 3\n X = convolutional_block(X, f=3, filters=[128, 128, 128], stage=3, block='a', s=2)\n X = identity_block(X, 3, [128, 128, 128], stage=3, block='b')\n X = identity_block(X, 3, [128, 128, 128], stage=3, block='c')\n X = identity_block(X, 3, [128, 128, 128], stage=3, block='d')\n\n print(\"X shape Stage 3 : \", X.shape)\n\n # Stage 4\n X = convolutional_block(X, f=3, filters=[256, 256, 256], stage=4, block='a', s=2)\n X = identity_block(X, 3, [256, 256, 256], stage=4, block='b')\n X = identity_block(X, 3, [256, 256, 256], stage=4, block='c')\n X = identity_block(X, 3, [256, 256, 256], stage=4, block='d')\n X = identity_block(X, 3, [256, 256, 256], stage=4, block='e')\n X = identity_block(X, 3, [256, 256, 256], stage=4, block='f')\n\n print(\"X shape Stage 4 : \", X.shape)\n\n # Stage 5\n X = convolutional_block(X, f=3, filters=[512, 512, 512], stage=5, block='a', s=2)\n X = identity_block(X, 3, [512, 512, 512], stage=5, block='b')\n X = identity_block(X, 3, [512, 512, 512], stage=5, block='c')\n\n print(\"X shape Stage 5 : \", X.shape)\n\n # AVGPOOL\n X = AveragePooling3D((7, 7, 7), name=\"avg_pool\")(X)\n\n # output layer\n X = Flatten()(X)\n X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer=glorot_uniform(seed=0))(X)\n\n # Create model\n model = Model(inputs=X_input, outputs=X, name='ResNet50')\n\n return model\n\ndef main():\n\n # Download the data (saves to ~/.keras/datasets)\n filename = 'mousebrain.npz'\n (X_train, y_train), (X_test, y_test) = deepcell.datasets.mousebrain.load_data(filename)\n X_train_2 = X_train[:,:,:,:,0]\n y_train_2 = y_train[:,:,:,:,0]\n\n print('X_train_2.shape: {}\\ny.shape: {}'.format(X_train_2.shape, y_train.shape))\n\n io.imsave('InterpolatedX_train_' + str('zut') + '.tiff', y_train_2.astype('uint8'), 'tifffile')\n\n\n\n X = X_train_2[1:]\n print(\"X_train_2[1:]\", X.shape)\n\n X = X[np.newaxis]\n print('X.shape: {}\\ny.shape: {}'.format(X.shape, y_train.shape))\n X = X[1:]\n\n print(\"X.shape[1:]\", X.shape[1:])\n #model = ResNet50(input_shape=X.shape[1:], classes=6\n model = ResNet50(input_shape=(1,110,256,256), classes=6)\n #print(model.summary())\n\n # model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n #\n # X_train_interpol = interpolation_z_axis(X_train[:,:,:,:,0], 8)\n # y_train_interpol = interpolation_z_axis(y_train[:,:,:,:,0] ,8)\n #\n # X_train_interpol = X_train_interpol[:,:,:,:,np.newaxis]\n # y_train_interpol = y_train_interpol[:, :, :, :, np.newaxis]\n # print('X train inerpol : \"', X_train_interpol.shape)\n #\n # model.fit(X_train_interpol, y_train_interpol, epochs=25, batch_size=32)\n #\n # preds = model.evaluate(X_test, y_test)\n # print (\"Loss = \" + str(preds[0]))\n # print (\"Test Accuracy = \" + str(preds[1]))\n #\n # model.summary()\n #\n # plot_model(model, to_file='model.png')\n # SVG(model_to_dot(model).create(prog='dot', format='svg'))\n\nmain()\n"
] | [
[
"numpy.arange",
"scipy.interpolate.interp1d",
"numpy.isfinite",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
Saadkhalid913/K-Means-Cluster-Model | [
"50167876af9b8ab38e70ecac9ccd9f9fa7804b53"
] | [
"KMeans.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_blobs\nimport pandas as pd\nfrom sklearn.preprocessing import Normalizer, MinMaxScaler\nfrom math import sqrt\nfrom sklearn.metrics import confusion_matrix, accuracy_score\n\n\nclass KMeansModel():\n\n def __init__(self, K_clusters, X):\n self.K = K_clusters\n self.X = X\n self.Centres = np.random.rand(self.K, self.X.shape[1])\n self.Normalize()\n self.Classes = None\n self.N_Samples = self.X.shape[0]\n\n def GetEudlideanDistance(self, v1, v2):\n return sqrt(np.sum((v1-v2)**2))\n\n def FindClosestCentre(self, v1):\n # loops through all centres and returns the index of closest centre\n ClosestCentreDistance = float(\"inf\")\n ClosestCentreIndex = None\n for i in range(self.Centres.shape[0]):\n distance = self.GetEudlideanDistance(v1, self.Centres[i])\n if distance < ClosestCentreDistance:\n ClosestCentreDistance = distance\n ClosestCentreIndex = i\n return ClosestCentreIndex\n\n def MapClosestCentre(self):\n # This is done for each sample and creates a column vector where the \n # element at index i of the vector corresponds to the closest centre of\n # the ith sample\n Vec = []\n for i in range(self.N_Samples):\n Vec.append(self.FindClosestCentre(self.X[i]))\n\n self.Classes = np.array(Vec).reshape((-1, 1))\n\n def Normalize(self):\n # Normalizing function to express values between 0 and 1 \n N = MinMaxScaler()\n self.X = N.fit_transform(self.X)\n\n def ChangeCentres(self):\n # updates each centre to be the take on the mean position of each \n # sample which takes on the corresponding class of the centre\n for i in range(self.Centres.shape[0]): # we loop through all the centres \n self.Centres[i] = np.sum(\n # We use boolean indexing to only take the rows which correspond to the ith centre (the one we are currently re-calculating) \n self.X[np.array(km.Classes == i).reshape(1, -1)[0]], axis=0) / len(self.Classes[self.Classes == i]) # we divide by the number of samples in the class \n\nif __name__ == \"__main__\":\n # Creating some testing data \n k=2\n TrainingX, TrainingY = make_blobs(\n n_samples=1000, n_features=10, cluster_std=4, centers=k)\n km = KMeansModel(k, TrainingX)\n \n km.MapClosestCentre()\n\n\n for i in range(50):\n km.ChangeCentres()\n data = np.concatenate((km.X, km.Classes), axis=1)\n km.MapClosestCentre()\n \n # only uncomment if n_features =2 \n # plt.scatter(data[:, : -2], data[:, 1: -1], c=\"r\")\n # plt.scatter(km.Centres[:, [0]], km.Centres[:, [1]], c=\"b\")\n # plt.show()\n # print(np.concatenate((km.X, km.Classes), axis=1))\n\n # this code either yields a very low or high accuracy in most cases, this is just\n # becuase this is an unsupervised learning model and that the names of classes \n # are completely arbitrary. A better measure of accuracy the the confusion matrix, \n # which shows classes and their frequencies \n pred = km.Classes\n act = TrainingY.reshape(-1,1)\n print(np.concatenate((pred,act ), axis=1 ))\n print(confusion_matrix(pred,act ))\n print(accuracy_score(pred, act) *100,\"%\")\n"
] | [
[
"sklearn.metrics.confusion_matrix",
"numpy.concatenate",
"numpy.random.rand",
"sklearn.preprocessing.MinMaxScaler",
"numpy.array",
"numpy.sum",
"sklearn.datasets.make_blobs",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NastasjaEO/eotopia | [
"a29576075065428cbd61aa54b071f56ae0ff876c"
] | [
"visualization/visualization_eopatch/eopatch_visualization.py"
] | [
"\"\"\"\nThis module implements visualizations for EOPatch\n\nCredits:\nCopyright (c) 2017-2019 Matej Aleksandrov, Matej Batič, Andrej Burja, Eva Erzin (Sinergise)\nCopyright (c) 2017-2019 Grega Milčinski, Matic Lubej, Devis Peresutti, Jernej Puc, Tomislav Slijepčević (Sinergise)\nCopyright (c) 2017-2019 Blaž Sovdat, Nejc Vesel, Jovan Višnjić, Anže Zupanc, Lojze Žust (Sinergise)\n\nThis source code is licensed under the MIT license found in the LICENSE\nfile in the root directory of this source tree.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport geopandas as gpd\nimport xarray as xr\nimport holoviews as hv\nimport geoviews as gv\n\nimport hvplot # pylint: disable=unused-import\nimport hvplot.xarray # pylint: disable=unused-import\nimport hvplot.pandas # pylint: disable=unused-import\n\nfrom cartopy import crs as ccrs\nfrom shapely.geometry import Polygon\nimport shapely\nshapely.speedups.disable()\n\nfrom sentinelhub import CRS\n\n## TODO!\n# make using sys obsolete\nimport sys\nsys.path.append(\"D:/Code/eotopia/repo_core\")\nfrom constants import FeatureType, FeatureTypeSet\nfrom utilities import FeatureParser\n\nsys.path.append(\"D:/Code/eotopia/repo_visualization_eopatchcore\")\nfrom xarray_utils_viz import array_to_dataframe, new_coordinates, string_to_variable\n\nPLOT_WIDTH = 800\nPLOT_HEIGHT = 500\n\n\nclass EOPatchVisualization:\n \"\"\"\n Plot class for making visulizations.\n\n :param eopatch: eopatch\n :type eopatch: EOPatch\n :param feature: feature of eopatch\n :type feature: (FeatureType, str)\n :param rgb: bands for creating RGB image\n :type rgb: [int, int, int]\n :param rgb_factor: multiplication factor for constructing rgb image\n :type rgb_factor: float\n :param vdims: value dimensions for plotting geopandas.GeoDataFrame\n :type vdims: str\n :param timestamp_column: geopandas.GeoDataFrame columns with timestamps\n :type timestamp_column: str\n :param geometry_column: geopandas.GeoDataFrame columns with geometry\n :type geometry_column: geometry\n :param pixel: wheather plot data for each pixel (line), for FeatureType.DATA and FeatureType.MASK\n :type pixel: bool\n :param mask: name of the FeatureType.MASK to apply to data\n :type mask: str\n\n \"\"\"\n def __init__(self, eopatch, feature, rgb=None, rgb_factor=3.5, vdims=None,\n timestamp_column='TIMESTAMP', geometry_column='geometry', pixel=False, mask=None):\n self.eopatch = eopatch\n self.feature = feature\n self.rgb = list(rgb) if isinstance(rgb, tuple) else rgb\n self.rgb_factor = rgb_factor\n self.vdims = vdims\n self.timestamp_column = timestamp_column\n self.geometry_column = geometry_column\n self.pixel = pixel\n self.mask = mask\n\n def plot(self):\n \"\"\" Plots eopatch\n\n :return: plot\n :rtype: holovies/bokeh\n \"\"\"\n\n features = list(FeatureParser(self.feature))\n feature_type, feature_name = features[0]\n if self.pixel and feature_type in FeatureTypeSet.RASTER_TYPES_4D:\n vis = self.plot_pixel(feature_type, feature_name)\n elif feature_type in (FeatureType.MASK, *FeatureTypeSet.RASTER_TYPES_3D):\n vis = self.plot_raster(feature_type, feature_name)\n elif self.rgb and feature_type is FeatureType.DATA:\n vis = self.plot_data_rgb(feature_name)\n elif feature_type is FeatureType.VECTOR:\n vis = self.plot_vector(feature_name)\n elif feature_type is FeatureType.VECTOR_TIMELESS:\n vis = self.plot_vector_timeless(feature_name)\n else: # elif feature_type in (FeatureType.SCALAR, FeatureType.LABEL):\n vis = self.plot_scalar_label(feature_type, feature_name)\n\n return vis.opts(plot=dict(width=PLOT_WIDTH, height=PLOT_HEIGHT))\n\n def plot_data_rgb(self, feature_name):\n \"\"\" Plots the FeatureType.DATA of eopatch.\n\n :param feature_name: name of the eopatch feature\n :type feature_name: str\n :return: visualization\n :rtype: holoview/geoviews/bokeh\n \"\"\"\n crs = self.eopatch.bbox.crs\n crs = CRS.POP_WEB if crs is CRS.WGS84 else crs\n data_da = array_to_dataframe(self.eopatch, (FeatureType.DATA, feature_name), crs=crs)\n if self.mask:\n data_da = self.mask_data(data_da)\n timestamps = self.eopatch.timestamp\n crs = self.eopatch.bbox.crs\n if not self.rgb:\n return data_da.hvplot(x='x', y='y', crs=ccrs.epsg(crs.epsg))\n data_rgb = self.eopatch_da_to_rgb(data_da, feature_name, crs)\n rgb_dict = {timestamp_: self.plot_rgb_one(data_rgb, timestamp_) for timestamp_ in timestamps}\n\n return hv.HoloMap(rgb_dict, kdims=['time'])\n\n @staticmethod\n def plot_rgb_one(eopatch_da, timestamp): # OK\n \"\"\" Returns visualization for one timestamp for FeatureType.DATA\n :param eopatch_da: eopatch converted to xarray DataArray\n :type eopatch_da: xarray DataArray\n :param timestamp: timestamp to make plot for\n :type timestamp: datetime\n :return: visualization\n :rtype: holoviews/geoviews/bokeh\n \"\"\"\n return eopatch_da.sel(time=timestamp).drop('time').hvplot(x='x', y='y')\n\n def plot_raster(self, feature_type, feature_name):\n \"\"\" Makes visualization for raster data (except for FeatureType.DATA)\n\n :param feature_type: type of eopatch feature\n :type feature_type: FeatureType\n :param feature_name: name of eopatch feature\n :type feature_name: str\n :return: visualization\n :rtype: holoviews/geoviews/bokeh\n \"\"\"\n crs = self.eopatch.bbox.crs\n crs = CRS.POP_WEB if crs is CRS.WGS84 else crs\n data_da = array_to_dataframe(self.eopatch, (feature_type, feature_name), crs=crs)\n data_min = data_da.values.min()\n data_max = data_da.values.max()\n data_levels = len(np.unique(data_da))\n data_levels = 11 if data_levels > 11 else data_levels\n data_da = data_da.where(data_da > 0).fillna(-1)\n vis = data_da.hvplot(x='x', y='y',\n crs=ccrs.epsg(crs.epsg)).opts(clim=(data_min, data_max),\n clipping_colors={'min': 'transparent'},\n color_levels=data_levels)\n return vis\n\n def plot_vector(self, feature_name):\n \"\"\" Visualizaton for vector (FeatureType.VECTOR) data\n\n :param feature_name: name of eopatch feature\n :type feature_name: str\n :return: visualization\n :rtype: holoviews/geoviews/bokeh\n\n \"\"\"\n crs = self.eopatch.bbox.crs\n timestamps = self.eopatch.timestamp\n data_gpd = self.fill_vector(FeatureType.VECTOR, feature_name)\n if crs is CRS.WGS84:\n crs = CRS.POP_WEB\n data_gpd = data_gpd.to_crs(crs.pyproj_crs())\n shapes_dict = {timestamp_: self.plot_shapes_one(data_gpd, timestamp_, crs)\n for timestamp_ in timestamps}\n return hv.HoloMap(shapes_dict, kdims=['time'])\n\n def fill_vector(self, feature_type, feature_name):\n \"\"\" Adds timestamps from eopatch to GeoDataFrame.\n\n :param feature_type: type of eopatch feature\n :type feature_type: FeatureType\n :param feature_name: name of eopatch feature\n :type feature_name: str\n :return: GeoDataFrame with added data\n :rtype: geopandas.GeoDataFrame\n \"\"\"\n vector = self.eopatch[feature_type][feature_name].copy()\n vector['valid'] = True\n eopatch_timestamps = self.eopatch.timestamp\n vector_timestamps = set(vector[self.timestamp_column])\n blank_timestamps = [timestamp for timestamp in eopatch_timestamps if timestamp not in vector_timestamps]\n dummy_geometry = self.create_dummy_polygon(0.0000001)\n\n temp_df = self.create_dummy_dataframe(vector,\n blank_timestamps=blank_timestamps,\n dummy_geometry=dummy_geometry)\n\n final_vector = gpd.GeoDataFrame(pd.concat((vector, temp_df), ignore_index=True),\n crs=vector.crs)\n return final_vector\n\n def create_dummy_dataframe(self, geodataframe, blank_timestamps, dummy_geometry,\n fill_str='', fill_numeric=1):\n \"\"\" Creates geopadnas GeoDataFrame to fill with dummy data (for visualization)\n\n :param geodataframe: dataframe to append rows to\n :type geodataframe: geopandas.GeoDataFrame\n :param blank_timestamps: timestamps for constructing dataframe\n :type blank_timestamps: list of timestamps\n :param dummy_geometry: geometry to plot when there is no data\n :type dummy_geometry: shapely.geometry.Polygon\n :param fill_str: insert when there is no value in str column\n :type fill_str: str\n :param fill_numeric: insert when\n :type fill_numeric: float\n :return: dataframe with dummy data\n :rtype: geopandas.GeoDataFrame\n \"\"\"\n dataframe = pd.DataFrame(data=blank_timestamps, columns=[self.timestamp_column])\n\n for column in geodataframe.columns:\n if column == self.timestamp_column:\n continue\n\n if column == self.geometry_column:\n dataframe[column] = dummy_geometry\n elif column == 'valid':\n dataframe[column] = False\n elif geodataframe[column].dtype in (int, float):\n dataframe[column] = fill_numeric\n else:\n dataframe[column] = fill_str\n\n return dataframe\n\n def create_dummy_polygon(self, addition_factor):\n \"\"\" Creates geometry/polygon to plot if there is no data (at timestamp)\n\n :param addition_factor: size of the 'blank polygon'\n :type addition_factor: float\n :return: polygon\n :rtype: shapely.geometry.Polygon\n \"\"\"\n x_blank, y_blank = self.eopatch.bbox.lower_left\n dummy_geometry = Polygon([[x_blank, y_blank],\n [x_blank + addition_factor, y_blank],\n [x_blank + addition_factor, y_blank + addition_factor],\n [x_blank, y_blank + addition_factor]])\n\n return dummy_geometry\n\n def plot_scalar_label(self, feature_type, feature_name):\n \"\"\" Line plot for FeatureType.SCALAR, FeatureType.LABEL\n\n :param feature_type: type of eopatch feature\n :type feature_type: FeatureType\n :param feature_name: name of eopatch feature\n :type feature_name: str\n :return: visualization\n :rtype: holoviews/geoviews/bokeh\n \"\"\"\n data_da = array_to_dataframe(self.eopatch, (feature_type, feature_name))\n return data_da.hvplot()\n\n def plot_shapes_one(self, data_gpd, timestamp, crs):\n \"\"\" Plots shapes for one timestamp from geopandas GeoDataFrame\n\n :param data_gpd: data to plot\n :type data_gpd: geopandas.GeoDataFrame\n :param timestamp: timestamp to plot data for\n :type timestamp: datetime\n :param crs: in which crs is the data to plot\n :type crs: sentinelhub.crs\n :return: visualization\n :rtype: geoviews\n \"\"\"\n out = data_gpd.loc[data_gpd[self.timestamp_column] == timestamp]\n return gv.Polygons(out, crs=ccrs.epsg(int(crs.value)))\n\n def plot_vector_timeless(self, feature_name):\n \"\"\" Plot FeatureType.VECTOR_TIMELESS data\n\n :param feature_name: name of the eopatch featrue\n :type feature_name: str\n :return: visalization\n :rtype: geoviews\n \"\"\"\n crs = self.eopatch.bbox.crs\n data_gpd = self.eopatch[FeatureType.VECTOR_TIMELESS][feature_name]\n if crs is CRS.WGS84:\n crs = CRS.POP_WEB\n data_gpd = data_gpd.to_crs(crs.pyproj_crs())\n\n return gv.Polygons(data_gpd, crs=ccrs.epsg(crs.epsg), vdims=self.vdims)\n\n def eopatch_da_to_rgb(self, eopatch_da, feature_name, crs):\n \"\"\" Creates new xarray DataArray (from old one) to plot rgb image with hv.Holomap\n\n :param eopatch_da: eopatch DataArray\n :type eopatch_da: DataArray\n :param feature_name: name of the feature to plot\n :type feature_name: str\n :param crs: in which crs are the data\n :type crs: sentinelhub.constants.crs\n :return: eopatch DataArray with proper coordinates, dimensions, crs\n :rtype: xarray.DataArray\n \"\"\"\n timestamps = eopatch_da.coords['time'].values\n bands = eopatch_da[..., self.rgb] * self.rgb_factor\n bands = bands.rename({string_to_variable(feature_name, '_dim'): 'band'}).transpose('time', 'band', 'y', 'x')\n x_values, y_values = new_coordinates(eopatch_da, crs, CRS.POP_WEB)\n eopatch_rgb = xr.DataArray(data=np.clip(bands.data, 0, 1),\n coords={'time': timestamps,\n 'band': self.rgb,\n 'y': np.flip(y_values),\n 'x': x_values},\n dims=('time', 'band', 'y', 'x'))\n return eopatch_rgb\n\n def plot_pixel(self, feature_type, feature_name):\n \"\"\"\n Plots one pixel through time\n :return: visualization\n :rtype: holoviews\n \"\"\"\n data_da = array_to_dataframe(self.eopatch, (feature_type, feature_name))\n if self.mask:\n data_da = self.mask_data(data_da)\n return data_da.hvplot(x='time')\n\n def mask_data(self, data_da):\n \"\"\"\n Creates a copy of array and insert 0 where data is masked.\n :param data_da: dataarray\n :type data_da: xarray.DataArray\n :return: dataaray\n :rtype: xarray.DataArray\n \"\"\"\n mask = self.eopatch[FeatureType.MASK][self.mask]\n if len(data_da.values.shape) == 4:\n mask = np.repeat(mask, data_da.values.shape[-1], -1)\n else:\n mask = np.squeeze(mask, axis=-1)\n data_da = data_da.copy()\n data_da.values[~mask] = 0\n\n return data_da\n"
] | [
[
"pandas.concat",
"numpy.unique",
"numpy.clip",
"numpy.squeeze",
"pandas.DataFrame",
"numpy.repeat",
"numpy.flip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
bastustrump/genimpro | [
"3ef8f66ec0d1e8275b5d559090f1672a1788e47c"
] | [
"importwrapper.py"
] | [
"from __future__ import division, print_function, absolute_import\n\nimport struct\nimport warnings \nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport genimpro as genimpro\nfrom pprint import pprint\nimport struct\nfrom IPython.core.display import HTML\nimport scipy.constants as const\nimport scipy\nfrom scipy.io import wavfile\nimport matplotlib.transforms as transforms\nimport matplotlib.cm as cm\nfrom IPython.display import Audio\nfrom IPython.display import Math\nimport IPython.core.display\nimport pydub\nimport cStringIO\nimport StringIO\nimport base64\n\nsamplerate=44100.0\ntuningFrequency = 440\n\nnp.seterr(all='ignore')\n\nglobal audio\n\nimport MySQLdb\ndb = MySQLdb.connect(host=\"localhost\", \n user=\"genimpro\",\n passwd=\"genimpropw#2016\",\n db=\"genimpro\")\nc = db.cursor() \n\ndef float2pcm(sig, dtype='int16'):\n \"\"\"Convert floating point signal with a range from -1 to 1 to PCM.\n\n Parameters\n ----------\n sig : array_like\n Input array, must have floating point type.\n dtype : data type, optional\n Desired (integer) data type.\n\n Returns\n -------\n ndarray\n integer data.\n\n See Also\n --------\n pcm2float, dtype\n\n \"\"\"\n # TODO: allow unsigned (e.g. 8-bit) data\n\n sig = np.asarray(sig)\n if sig.dtype.kind != 'f':\n raise TypeError(\"'sig' must be a float array\")\n dtype = np.dtype(dtype)\n if dtype.kind != 'i':\n raise TypeError(\"'dtype' must be signed integer type\")\n\n return (sig * np.iinfo(dtype).max).astype(dtype)\n\ndef wavPlayer(data, rate):\n \n silence = np.zeros(10000)\n audio = []\n audio.extend(silence)\n audio.extend(data)\n\n data = float2pcm(audio)\n\n buffer = StringIO.StringIO()\n buffer.write(b'RIFF')\n buffer.write(b'\\x00\\x00\\x00\\x00')\n buffer.write(b'WAVE')\n buffer.write(b'fmt ')\n if data.ndim == 1:\n noc = 1\n else:\n noc = data.shape[1]\n bits = data.dtype.itemsize * 8\n sbytes = rate*(bits // 8)*noc\n ba = noc * (bits // 8)\n buffer.write(struct.pack('<ihHIIHH', 16, 1, noc, rate, sbytes, ba, bits))\n\n # data chunk\n buffer.write(b'data')\n buffer.write(struct.pack('<i', data.nbytes))\n\n if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and sys.byteorder == 'big'):\n data = data.byteswap()\n\n buffer.write(data.tostring())\n size = buffer.tell()\n buffer.seek(4)\n buffer.write(struct.pack('<i', size-8))\n \n val = buffer.getvalue()\n \n wave = pydub.AudioSegment.from_wav(cStringIO.StringIO(val)) \n mp3audio = wave.export(cStringIO.StringIO(), format='mp3')\n mp3audio.reset()\n mp3audio = mp3audio.read()\n\n # src = \"\"\"\n # <body>\n # <audio controls=\"controls\" style=\"width:600px\" >\n # <source controls src=\"data:audio/wav;base64,{base64}\" type=\"audio/wav\" />\n # Your browser does not support the audio element.\n # </audio>\n # </body>\n # \"\"\".format(base64=base64.encodestring(val))\n\n\n src = \"\"\"\n <body>\n <audio controls=\"controls\" style=\"width:600px\" >\n <source controls src=\"data:audio/mpeg;base64,{base64}\" type=\"audio/mpeg\" />\n Your browser does not support the audio element.\n </audio>\n </body>\n \"\"\".format(base64=base64.encodestring(mp3audio))\n \n IPython.core.display.display(HTML(src))\n\n\n# def plotFeature(feature,event,t_features,color,context,x,y,h_columns):\n# context.plot(t_features,event[\"features\"][feature][\"raw\"],label=feature,linewidth=2,color=color)\n# context.axhline(y=event[\"features\"][feature][\"mean\"],linewidth=1,color=color)\n# context.errorbar(x, y, yerr=event[\"features\"][feature][\"stdev\"], fmt='-o',color=color,linewidth=5)\n# context.plot(t_features,event[\"features\"][feature][\"mean\"]+(t_features*event[\"features\"][feature][\"corrcoef\"]),linewidth=1,color=color,ls='dashed')\n# context.text(x + h_columns/10, y*1.1, 'mean = %f\\nstdev = %f\\ncorrcoeff = %f' % (event[\"features\"][feature][\"mean\"],event[\"features\"][feature][\"stdev\"],event[\"features\"][feature][\"corrcoef\"]), style='italic', bbox={'facecolor':color, 'alpha':0.7, 'pad':10})\n\n# def plotWithPlayer(event):\n# t_audio = np.linspace(0, len(event[\"audio\"])/samplerate, num=len(event[\"audio\"]))\n# t_features = np.linspace(0, len(event[\"audio\"])/samplerate, num=len(event[\"features\"][\"Loudness\"][\"raw\"]))\n\n# colors = cm.rainbow(np.linspace(0, 1, len(event[\"features\"])-1))\n# colorindex = 0\n \n# fig = plt.figure(figsize=(18, 12), dpi=400)\n# ax = fig.add_subplot(111)\n# plt.xlabel('Zeit (Sekunden)')\n\n# ax.plot(t_audio,event[\"audio\"],'0.5',label=\"Audio Waveform\",linewidth=0.7)\n# h_columns=t_features[-1]/6\n \n# for feature in event[\"features\"]:\n# if type(event[\"features\"][feature])==type(dict()):\n# x = h_columns*(colorindex+1)\n# y = event[\"features\"][feature][\"mean\"]\n# color = colors[colorindex]\n \n# if event[\"features\"][feature][\"max\"] > 1.0:\n# ax2 = ax.twinx()\n# ax2.set_ylabel(feature,color=color)\n# plotFeature(feature,event,t_features,color,ax2,x,y,h_columns)\n# else:\n# plotFeature(feature,event,t_features,color,ax,x,y,h_columns)\n# colorindex+=1\n \n# #ax.legend(loc=4)\n \n# lines, labels = ax.get_legend_handles_labels()\n# lines2, labels2 = ax2.get_legend_handles_labels()\n# ax.legend(lines+lines2, labels+labels2, loc=0)\n# plt.tight_layout()\n \n# wavPlayer(event[\"audio\"],samplerate)\n\ndef plotOnsets(onsetRange,audio):\n plt.figure(figsize=(40, 16), dpi=600)\n plt.plot(audio[onsetRange[0]:onsetRange[-1]],'0.5',label=\"Audio Waveform\",linewidth=0.7)\n for onset in onsetRange:\n plt.axvline(x=onset-onsetRange[0],linewidth=0.5,color='g')\n wavPlayer(audio[onsetRange[0]:onsetRange[-1]],samplerate)\n\ndef plotOnsetsAndSonicevents(onsetRange,sonicevents,audio):\n plt.figure(figsize=(40, 16), dpi=600)\n plt.plot(audio[onsetRange[0]:onsetRange[-1]],'0.5',label=\"Audio Waveform\",linewidth=0.7)\n SELoudness=[]\n SELoudnessTimestamp=[]\n\n for onset in onsetRange:\n plt.axvline(x=onset-onsetRange[0],linewidth=0.5,color='g')\n ax2 = plt.twinx()\n for i in range(0,len(sonicevents)):\n if onsetRange[0]<=sonicevents[i][\"start\"]<onsetRange[-1]:\n plt.axvline(x=sonicevents[i][\"start\"]-onsetRange[0],linewidth=2,color='r',alpha=0.5)\n SELoudness.append(sonicevents[i][\"loudness\"])\n SELoudnessTimestamp.append(sonicevents[i][\"start\"]-onsetRange[0])\n \n ax2.plot(SELoudnessTimestamp,SELoudness,color='b',marker='o',lw=0,ms=10)\n ax2.set_ylabel(\"loudness\",color='b')\n wavPlayer(audio[onsetRange[0]:onsetRange[-1]],samplerate)\n\ndef plotStatistics(feature,event,t_features,color,context,x,y,h_columns):\n #context.plot(t_features,event[\"features\"][feature][\"raw\"],label=feature,linewidth=2,color=color)\n context.axhline(y=event[\"features\"][feature][\"mean\"],linewidth=1,color=color)\n context.errorbar(x, y, yerr=event[\"features\"][feature][\"stdev\"], fmt='-o',color=color,linewidth=5)\n context.plot(t_features,event[\"features\"][feature][\"mean\"]+(t_features*event[\"features\"][feature][\"corrcoef\"]),linewidth=1,color=color,ls='dashed')\n context.text(x + h_columns/10, y*1.1, 'mean = %f\\nstdev = %f\\ncorrcoeff = %f' % (event[\"features\"][feature][\"mean\"],event[\"features\"][feature][\"stdev\"],event[\"features\"][feature][\"corrcoef\"]), style='italic', bbox={'facecolor':color, 'alpha':0.7, 'pad':10})\n\n\ndef make_patch_spines_invisible(ax):\n ax.set_frame_on(True)\n ax.patch.set_visible(False)\n for sp in ax.spines.itervalues():\n sp.set_visible(False)\n\ndef plotFeaturesWithPlayer(events,audio,showFeatures=[],showNumbers=0,showStatistics=0):\n\n samples = audio[events[0][\"start\"]:events[len(events)-1][\"end\"]]\n startsample = events[0][\"start\"]\n t_audio = np.linspace(0, len(samples)/samplerate, num=len(samples))\n\n if (showFeatures==[]) & showStatistics:\n for feature in events[0][\"features\"]:\n showFeatures.append(feature)\n\n #create different colors for feature distinction\n colors = cm.rainbow(np.linspace(0, 1, len(events[0][\"features\"])))\n colorindex = 0\n featureindex = 0\n \n fig = plt.figure(figsize=(70, 40), dpi=600)\n ax1 = fig.add_subplot(221)\n plt.xlabel('time (seconds)')\n plt.plot(t_audio,samples,'0.7',label=\"Audio Waveform\",linewidth=2,clip_on=False)\n trans = transforms.blended_transform_factory(ax1.transData, ax1.transAxes)\n \n ax = {}\n featurecolor={}\n for feature in events[0][\"features\"]:\n featurecolor[feature] = colors[colorindex]\n colorindex+=1\n if (feature in showFeatures) & (type(events[0][\"features\"][feature])==type(dict())):\n ax[feature]=plt.twinx()\n ax[feature].plot(0,0,label=feature,linewidth=2,color=featurecolor[feature])\n \n for i in range (0,len(events)):\n\n markBegin = (events[i][\"start\"]-startsample)/samplerate\n markEnd = (events[i][\"start\"]-startsample+events[i][\"features\"][\"effLength\"])/samplerate\n eventEnd = (events[i][\"start\"]-startsample + (events[i][\"end\"]-events[i][\"start\"]))/samplerate\n \n featureindex = 0\n spineindex = 0\n for feature in events[i][\"features\"]:\n if (type(events[i][\"features\"][feature])==type(dict())) & (feature in showFeatures):\n t_feature = np.linspace(markBegin, markEnd, num=len(events[i][\"features\"][feature][\"raw\"]))\n \n tkw = dict(size=1, width=1)\n if events[i][\"features\"][feature][\"max\"] > 1.0:\n feature_ax=ax[feature]\n feature_ax.plot(t_feature,events[i][\"features\"][feature][\"raw\"],linewidth=1.6,color=featurecolor[feature])\n make_patch_spines_invisible(feature_ax)\n feature_ax.tick_params(axis='y', colors=featurecolor[feature], **tkw)\n feature_ax.spines[\"right\"].set_visible(True)\n feature_ax.spines[\"right\"].set_color(featurecolor[feature])\n feature_ax.spines[\"right\"].set_linewidth(0.5)\n feature_ax.spines[\"right\"].set_position(('outward',35 * spineindex))\n feature_ax.yaxis.set_label_position(\"right\")\n feature_ax.yaxis.set_ticks_position('right')\n spineindex += 1\n \n else:\n feature_ax=ax[feature]\n feature_ax.plot(t_feature,events[i][\"features\"][feature][\"raw\"],linewidth=1.6,color=featurecolor[feature])\n make_patch_spines_invisible(feature_ax)\n feature_ax.spines[\"right\"].set_color(featurecolor[feature])\n feature_ax.yaxis.set_ticklabels([])\n \n if showStatistics:\n h_columns=t_audio[-1]/(len(events[i][\"features\"])-4)\n x = h_columns*(featureindex +1)\n y = events[i][\"features\"][feature][\"mean\"]\n plotStatistics(feature,events[i],t_feature,featurecolor[feature],ax[feature],x,y,h_columns)\n \n featureindex += 1\n\n plt.axvline(x=markBegin,linewidth=1,color='r')\n if showNumbers:\n plt.text(markBegin+0.05, 0.05,'%i'% (i+1),bbox=dict(boxstyle='round', \\\n facecolor='r', alpha=0.5),fontsize=14,horizontalalignment='left',transform=trans)\n \n lines, labels = ax1.get_legend_handles_labels()\n try:\n for feature in ax: \n lines2, labels2 = ax[feature].get_legend_handles_labels()\n lines += lines2\n labels += labels2\n ax1.legend(lines, labels, loc=1)\n except NameError:\n ax1.legend(lines, labels, loc=1)\n \n wavPlayer(samples,samplerate)\n\n\n\n\n"
] | [
[
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.twinx",
"numpy.asarray",
"matplotlib.transforms.blended_transform_factory",
"numpy.dtype",
"matplotlib.pyplot.plot",
"numpy.seterr",
"numpy.iinfo",
"matplotlib.pyplot.xlabel",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sauhaardac/LIDAR-Landslide-Detection | [
"bc9ac0188f03f192a4e3eb8b6460864fe64d1372"
] | [
"clean_data.py"
] | [
"\"\"\"\nThis script operates on data from the qtm_extracted_data folder and populates the cleaned_data folder.\n\nThe objective of the script is to take the data from geographical format to a more simplified image based numerical format.\n\"\"\"\n\nimport os\nimport pandas as pd\nimport gdal\nfrom affine import Affine\n\nlandslides = pd.read_csv('data/qtm_extracted_data/landslides.csv', header=0)\nlandslides['DTM Path'] = [f'data/qtm_extracted_data/{name[-2]}.tif' for name in landslides['Name']] # path to tif file\n\npixels = []\nfor i, row in landslides.iterrows():\n ds = gdal.Open(row['DTM Path'])\n reverse = ~Affine.from_gdal(*ds.GetGeoTransform())\n coord = reverse * (row['X'], row['Y'])\n pixels.append((round(coord[0]), round(coord[1])))\n\nlandslides['X'] = [pixel[0] for pixel in pixels]\nlandslides['Y'] = [pixel[1] for pixel in pixels]\n\npoints = []\nfor tif in landslides['DTM Path'].unique():\n tif_landslides = landslides[landslides['DTM Path'] == tif]\n points.append(list(zip(tif_landslides['X'], tif_landslides['Y'])))\n\nclean_df = pd.DataFrame({'DTM Path': landslides['DTM Path'].unique(), 'Landslides': points},\n columns=['DTM Path', 'Landslides'])\nclean_df.to_pickle('data/cleaned_data/landslides_cleaned.pkl')\n# landslides[['Name', 'X', 'Y', 'DTM Path']].to_csv('cleaned_data/landslides_cleaned.csv')\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
chichi0119/speech | [
"130124ccaf23fabe3e7a6f138d9403a7c0946ef3"
] | [
"examples/conformer/test.py"
] | [
"# Copyright 2020 Huy Le Nguyen (@usimarit)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom tqdm import tqdm\nimport argparse\nfrom tensorflow_asr.utils import env_util, file_util\n\nlogger = env_util.setup_environment()\nimport tensorflow as tf\n\nDEFAULT_YAML = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"config.yml\")\n\ntf.keras.backend.clear_session()\n\nparser = argparse.ArgumentParser(prog=\"Conformer Testing\")\n\nparser.add_argument(\"--config\", type=str, default=DEFAULT_YAML, help=\"The file path of model configuration file\")\n\nparser.add_argument(\"--saved\", type=str, default=None, help=\"Path to saved model\")\n\nparser.add_argument(\"--mxp\", default=False, action=\"store_true\", help=\"Enable mixed precision\")\n\nparser.add_argument(\"--bs\", type=int, default=None, help=\"Test batch size\")\n\nparser.add_argument(\"--sentence_piece\", default=False, action=\"store_true\", help=\"Whether to use `SentencePiece` model\")\n\nparser.add_argument(\"--subwords\", default=False, action=\"store_true\", help=\"Use subwords\")\n\nparser.add_argument(\"--device\", type=int, default=0, help=\"Device's id to run test on\")\n\nparser.add_argument(\"--cpu\", default=False, action=\"store_true\", help=\"Whether to only use cpu\")\n\nparser.add_argument(\"--output\", type=str, default=\"test.tsv\", help=\"Result filepath\")\n\nargs = parser.parse_args()\n\nassert args.saved\n\ntf.config.optimizer.set_experimental_options({\"auto_mixed_precision\": args.mxp})\n\nenv_util.setup_devices([args.device], cpu=args.cpu)\n\nfrom tensorflow_asr.configs.config import Config\nfrom tensorflow_asr.datasets.asr_dataset import ASRSliceDataset\nfrom tensorflow_asr.featurizers.speech_featurizers import TFSpeechFeaturizer\nfrom tensorflow_asr.featurizers.text_featurizers import SubwordFeaturizer, SentencePieceFeaturizer, CharFeaturizer\nfrom tensorflow_asr.models.transducer.conformer import Conformer\nfrom tensorflow_asr.utils import app_util\n\nconfig = Config(args.config)\nspeech_featurizer = TFSpeechFeaturizer(config.speech_config)\n\nif args.sentence_piece:\n logger.info(\"Use SentencePiece ...\")\n text_featurizer = SentencePieceFeaturizer(config.decoder_config)\nelif args.subwords:\n logger.info(\"Use subwords ...\")\n text_featurizer = SubwordFeaturizer(config.decoder_config)\nelse:\n logger.info(\"Use characters ...\")\n text_featurizer = CharFeaturizer(config.decoder_config)\n\ntf.random.set_seed(0)\n\ntest_dataset = ASRSliceDataset(\n speech_featurizer=speech_featurizer,\n text_featurizer=text_featurizer,\n **vars(config.learning_config.test_dataset_config)\n)\n\n# build model\nconformer = Conformer(**config.model_config, vocabulary_size=text_featurizer.num_classes)\nconformer.make(speech_featurizer.shape)\nconformer.load_weights(args.saved, by_name=True)\nconformer.summary(line_length=100)\nconformer.add_featurizers(speech_featurizer, text_featurizer)\n\nbatch_size = args.bs or config.learning_config.running_config.batch_size\ntest_data_loader = test_dataset.create(batch_size)\n\nwith file_util.save_file(file_util.preprocess_paths(args.output)) as filepath:\n overwrite = True\n if tf.io.gfile.exists(filepath):\n overwrite = input(f\"Overwrite existing result file {filepath} ? (y/n): \").lower() == \"y\"\n if overwrite:\n results = conformer.predict(test_data_loader, verbose=1)\n logger.info(f\"Saving result to {args.output} ...\")\n with open(filepath, \"w\") as openfile:\n openfile.write(\"PATH\\tDURATION\\tGROUNDTRUTH\\tGREEDY\\tBEAMSEARCH\\n\")\n progbar = tqdm(total=test_dataset.total_steps, unit=\"batch\")\n for i, pred in enumerate(results):\n groundtruth, greedy, beamsearch = [x.decode('utf-8') for x in pred]\n path, duration, _ = test_dataset.entries[i]\n openfile.write(f\"{path}\\t{duration}\\t{groundtruth}\\t{greedy}\\t{beamsearch}\\n\")\n progbar.update(1)\n progbar.close()\n app_util.evaluate_results(filepath)\n"
] | [
[
"tensorflow.io.gfile.exists",
"tensorflow.keras.backend.clear_session",
"tensorflow.config.optimizer.set_experimental_options",
"tensorflow.random.set_seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
serjik85kg/UnetV2-pytorch-segmentation | [
"c53598d00aeb0c79cb9dd52b6ab3b4976d127f21"
] | [
"python/sd_lib/geometry/polygon.py"
] | [
"# coding: utf-8\n\nimport cv2\nimport numpy as np\n\nfrom shapely.geometry import mapping, Polygon as ShapelyPolygon\n\nfrom sd_lib.geometry.conversions import shapely_figure_to_coords_list\nfrom sd_lib.geometry.point_location import row_col_list_to_points, points_to_row_col_list\nfrom sd_lib.geometry.vector_geometry import VectorGeometry\nfrom sd_lib.geometry.constants import EXTERIOR, INTERIOR, POINTS, UPDATED_AT, CREATED_AT, ID, CLASS_ID #, , LABELER_LOGIN,\nfrom sd_lib.geometry import validation\nfrom sd_lib.sly_logger import logger\n\n\nclass Polygon(VectorGeometry):\n '''\n This is a class for creating and using Polygon objects for Labels\n '''\n @staticmethod\n def geometry_name():\n return 'polygon'\n\n def __init__(self, exterior, interior,\n sly_id=None, class_id=None, labeler_login=None, updated_at=None, created_at=None):\n '''\n :param exterior: list of PointLocation objects, the object contour is defined with these points\n :param interior: list of elements that has the same structure like the \"exterior\" field. This is the list of polygons that define object holes.\n '''\n if len(exterior) < 3:\n raise ValueError('\"{}\" field must contain at least 3 points to create \"Polygon\" object.'.format(EXTERIOR))\n if any(len(element) < 3 for element in interior):\n raise ValueError('\"{}\" element must contain at least 3 points.'.format(INTERIOR))\n\n super().__init__(exterior, interior, sly_id=sly_id, class_id=class_id, labeler_login=labeler_login,\n updated_at=updated_at, created_at=created_at)\n\n\n def crop(self, rect):\n '''\n Crop the current Polygon with a given rectangle, if polygon cat't be cropped it generate exception error\n :param rect: Rectangle class object\n :return: list of Poligon class objects\n '''\n try:\n clipping_window_shpl = ShapelyPolygon(points_to_row_col_list(rect.corners))\n self_shpl = ShapelyPolygon(self.exterior_np, holes=self.interior_np)\n intersections_shpl = self_shpl.buffer(0).intersection(clipping_window_shpl)\n mapping_shpl = mapping(intersections_shpl)\n except Exception:\n logger.warn('Polygon cropping exception, shapely.', exc_info=False)\n raise\n\n intersections = shapely_figure_to_coords_list(mapping_shpl)\n\n # Check for bad cropping cases (e.g. empty points list)\n out_polygons = []\n for intersection in intersections:\n if isinstance(intersection, list) and len(intersection) > 0 and len(intersection[0]) >= 3:\n exterior = row_col_list_to_points(intersection[0], do_round=True)\n interiors = []\n for interior_contour in intersection[1:]:\n if len(interior_contour) > 2:\n interiors.append(row_col_list_to_points(interior_contour, do_round=True))\n out_polygons.append(Polygon(exterior, interiors))\n return out_polygons\n\n def _draw_impl(self, bitmap, color, thickness=1, config=None):\n exterior = self.exterior_np[:, ::-1]\n interior = [x[:, ::-1] for x in self.interior_np]\n bmp_to_draw = np.zeros(bitmap.shape[:2], np.uint8)\n cv2.fillPoly(bmp_to_draw, pts=[exterior], color=1)\n cv2.fillPoly(bmp_to_draw, pts=interior, color=0)\n bool_mask = bmp_to_draw.astype(bool)\n bitmap[bool_mask] = color\n\n def _draw_contour_impl(self, bitmap, color, thickness=1, config=None):\n exterior = self.exterior_np[:, ::-1]\n interior = [x[:, ::-1] for x in self.interior_np]\n\n poly_lines = [exterior] + interior\n cv2.polylines(bitmap, pts=poly_lines, isClosed=True, color=color, thickness=thickness)\n\n # @TODO: extend possibilities, consider interior\n # returns area of exterior figure only\n @property\n def area(self):\n '''\n :return: area of current Poligon(exterior figure only)\n '''\n exterior = self.exterior_np\n return self._get_area_by_gauss_formula(exterior[:, 0], exterior[:, 1])\n\n @staticmethod\n def _get_area_by_gauss_formula(rows, cols):\n return 0.5 * np.abs(np.dot(rows, np.roll(cols, 1)) - np.dot(cols, np.roll(rows, 1)))\n\n def approx_dp(self, epsilon):\n '''\n The function approx_dp approximates a polygonal curve with the specified precision\n :param epsilon: Parameter specifying the approximation accuracy. This is the maximum distance between the original curve and its approximation.\n :return: Poligon class object\n '''\n exterior_np = self._approx_ring_dp(self.exterior_np, epsilon, closed=True).tolist()\n interior_np = [self._approx_ring_dp(x, epsilon, closed=True).tolist() for x in self.interior_np]\n exterior = row_col_list_to_points(exterior_np, do_round=True)\n interior = [row_col_list_to_points(x, do_round=True) for x in interior_np]\n return Polygon(exterior, interior)\n"
] | [
[
"numpy.zeros",
"numpy.roll"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chinmaydas96/U-2-Net | [
"af81098edf8ec4c4417b6038b893845af790fc8f"
] | [
"Pytorch2Onnx.py"
] | [
"# import libraries\nfrom torchvision import models\nimport torch\n\n# create a dummy input with correct shape for the network\ndummy_input = torch.randn(16, 3, 224, 224, device='cuda')\n\n# created a resnet50 model\nmodel = models.resnet50(pretrained=True).cuda()\nmodel.eval()\n\n# Created dynamic axes for dynamic batch_size not required for static batch_size\ndynamic_axes = {\"actual_input_1\":{0:\"batch_size\"}, \"output1\":{0:\"batch_size\"}}\ninput_names = [ \"actual_input_1\" ]\noutput_names = [ \"output1\" ]\n\n# Export the model to onnx\ntorch.onnx.export(model, dummy_input, \"resnet50_dynamic.onnx\", \n verbose=False,input_names=input_names,\n output_names=output_names,dynamic_axes=dynamic_axes, export_params=True)\n"
] | [
[
"torch.randn",
"torch.onnx.export"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
peterroelants/datashader | [
"fd938888feca3a42bdfb42462d098f758a954dd8"
] | [
"datashader/tests/test_transfer_functions.py"
] | [
"from __future__ import division, absolute_import\n\nfrom io import BytesIO\n\nimport numpy as np\nimport xarray as xr\nimport dask.array as da\nimport PIL\nimport pytest\nfrom collections import OrderedDict\nimport datashader.transfer_functions as tf\nfrom datashader.tests.test_pandas import assert_eq_xr\n\ncoords = OrderedDict([('x_axis', [3, 4, 5]), ('y_axis', [0, 1, 2])])\ndims = ['y_axis', 'x_axis']\n\n# CPU\ndef build_agg(array_module=np):\n a = array_module.arange(10, 19, dtype='u4').reshape((3, 3))\n a[[0, 1, 2], [0, 1, 2]] = 0\n s_a = xr.DataArray(a, coords=coords, dims=dims)\n b = array_module.arange(10, 19, dtype='f4').reshape((3, 3))\n b[[0, 1, 2], [0, 1, 2]] = array_module.nan\n s_b = xr.DataArray(b, coords=coords, dims=dims)\n c = array_module.arange(10, 19, dtype='f8').reshape((3, 3))\n c[[0, 1, 2], [0, 1, 2]] = array_module.nan\n s_c = xr.DataArray(c, coords=coords, dims=dims)\n agg = xr.Dataset(dict(a=s_a, b=s_b, c=s_c))\n return agg\n\n\ndef build_agg_dask():\n # Dask arrays are immutable `build_agg(da)` won't work.\n # Create numpy based DataArray and convert to Dask by forcing chunking.\n return build_agg(np).chunk({d: 1 for d in dims})\n\n\ndef create_dask_array_np(*args, **kwargs):\n \"\"\"Create a dask array wrapping around a numpy array.\"\"\"\n return da.from_array(np.array(*args, **kwargs))\n\n\ntry:\n import cupy\n aggs = [build_agg(np), build_agg(cupy), build_agg_dask()]\n arrays = [np.array, cupy.array, create_dask_array_np]\nexcept ImportError:\n cupy = None\n aggs = [build_agg(np), build_agg_dask()]\n arrays = [np.array, create_dask_array_np]\n\nint_span = [11, 17]\nfloat_span = [11.0, 17.0]\n\nsolution_lists = {\n 'log':\n [[0, 4291543295, 4286741503],\n [4283978751, 0, 4280492543],\n [4279242751, 4278190335, 0]],\n 'cbrt':\n [[0, 4291543295, 4284176127],\n [4282268415, 0, 4279834879],\n [4278914047, 4278190335, 0]],\n 'linear':\n [[0, 4291543295, 4289306879],\n [4287070463, 0, 4282597631],\n [4280361215, 4278190335, 0]]\n}\n\nsolutions = {how: tf.Image(np.array(v, dtype='u4'),\n coords=coords, dims=dims)\n for how, v in solution_lists.items()}\n\neq_hist_sol = {'a': np.array([[0, 4291543295, 4288846335],\n [4286149631, 0, 4283518207],\n [4280821503, 4278190335, 0]], dtype='u4'),\n 'b': np.array([[0, 4291543295, 4288846335],\n [4286609919, 0, 4283518207],\n [4281281791, 4278190335, 0]], dtype='u4')}\neq_hist_sol['c'] = eq_hist_sol['b']\n\n\ndef check_span(x, cmap, how, sol):\n # Copy inputs that will be modified\n sol = sol.copy()\n if isinstance(x, xr.DataArray) and isinstance(x.data, da.Array):\n x = x.compute()\n else:\n x = x.copy()\n\n # All data no span\n img = tf.shade(x, cmap=cmap, how=how, span=None)\n assert_eq_xr(img, sol)\n\n # All data with span\n img = tf.shade(x, cmap=cmap, how=how, span=float_span)\n assert_eq_xr(img, sol)\n\n # Decrease smallest. This value should be clipped to span[0] and the\n # resulting image should be identical\n x[0, 1] = 10\n x_input = x.copy()\n img = tf.shade(x, cmap=cmap, how=how, span=float_span)\n assert_eq_xr(img, sol)\n\n # Check that clipping doesn't alter input array\n x.equals(x_input)\n\n # Increase largest. This value should be clipped to span[1] and the\n # resulting image should be identical\n x[2, 1] = 18\n x_input = x.copy()\n img = tf.shade(x, cmap=cmap, how=how, span=float_span)\n assert_eq_xr(img, sol)\n\n # Check that clipping doesn't alter input array\n x.equals(x_input)\n\n # zero out smallest. If span is working properly the zeroed out pixel\n # will be masked out and all other pixels will remain unchanged\n x[0, 1] = 0 if x.dtype.kind in ('i', 'u') else np.nan\n img = tf.shade(x, cmap=cmap, how=how, span=float_span)\n sol[0, 1] = sol[0, 0]\n assert_eq_xr(img, sol)\n\n # zero out the largest value\n x[2, 1] = 0 if x.dtype.kind in ('i', 'u') else np.nan\n img = tf.shade(x, cmap=cmap, how=how, span=float_span)\n sol[2, 1] = sol[0, 0]\n assert_eq_xr(img, sol)\n\n\[email protected]('agg', aggs)\[email protected]('attr', ['a', 'b', 'c'])\[email protected]('span', [None, int_span, float_span])\ndef test_shade(agg, attr, span):\n x = getattr(agg, attr)\n cmap = ['pink', 'red']\n\n img = tf.shade(x, cmap=cmap, how='log', span=span)\n sol = solutions['log']\n assert_eq_xr(img, sol)\n # Check dims/coordinates order\n assert list(img.coords) == ['x_axis', 'y_axis']\n assert list(img.dims) == ['y_axis', 'x_axis']\n\n img = tf.shade(x, cmap=cmap, how='cbrt', span=span)\n sol = solutions['cbrt']\n assert_eq_xr(img, sol)\n\n img = tf.shade(x, cmap=cmap, how='linear', span=span)\n sol = solutions['linear']\n assert_eq_xr(img, sol)\n\n # span option not supported with how='eq_hist'\n img = tf.shade(x, cmap=cmap, how='eq_hist')\n sol = tf.Image(eq_hist_sol[attr], coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n\n img = tf.shade(x, cmap=cmap,\n how=lambda x, mask: np.where(mask, np.nan, x ** 2))\n sol = np.array([[0, 4291543295, 4291148543],\n [4290030335, 0, 4285557503],\n [4282268415, 4278190335, 0]], dtype='u4')\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n\n\[email protected]('agg', aggs)\[email protected]('attr', ['a', 'b', 'c'])\[email protected]('how', ['linear', 'log', 'cbrt'])\ndef test_span_cmap_list(agg, attr, how):\n # Get input\n x = getattr(agg, attr).copy()\n\n # Build colormap\n cmap = ['pink', 'red']\n\n # Get expected solution for interpolation method\n sol = solutions[how]\n\n # Check span\n check_span(x, cmap, how, sol)\n\n\[email protected]('agg', aggs)\[email protected]('cmap', ['black', (0, 0, 0), '#000000'])\ndef test_span_cmap_single(agg, cmap):\n # Get input\n x = agg.a\n\n # Build expected solution DataArray\n sol = np.array([[0, 671088640, 1946157056],\n [2701131776, 0, 3640655872],\n [3976200192, 4278190080, 0]])\n sol = tf.Image(sol, coords=coords, dims=dims)\n\n # Check span\n check_span(x, cmap, 'log', sol)\n\n\[email protected]('agg', aggs)\ndef test_span_cmap_mpl(agg):\n # Get inputs\n x = agg.a\n\n # Get MPL colormap\n cm = pytest.importorskip('matplotlib.cm')\n cmap = cm.viridis\n\n # Build expected solution Data Array\n sol = np.array([[0, 4283695428, 4287524142],\n [4287143710, 0, 4282832267],\n [4280213706, 4280608765, 0]])\n sol = tf.Image(sol, coords=coords, dims=dims)\n\n # Check span\n check_span(x, cmap, 'log', sol)\n\n\ndef test_shade_bool():\n data = ~np.eye(3, dtype='bool')\n x = tf.Image(data, coords=coords, dims=dims)\n sol = tf.Image(np.where(data, 4278190335, 0).astype('uint32'),\n coords=coords, dims=dims)\n img = tf.shade(x, cmap=['pink', 'red'], how='log')\n assert_eq_xr(img, sol)\n img = tf.shade(x, cmap=['pink', 'red'], how='cbrt')\n assert_eq_xr(img, sol)\n img = tf.shade(x, cmap=['pink', 'red'], how='linear')\n assert_eq_xr(img, sol)\n img = tf.shade(x, cmap=['pink', 'red'], how='eq_hist')\n assert_eq_xr(img, sol)\n\n\[email protected]('agg', aggs)\ndef test_shade_cmap(agg):\n cmap = ['red', (0, 255, 0), '#0000FF']\n img = tf.shade(agg.a, how='log', cmap=cmap)\n sol = np.array([[0, 4278190335, 4278236489],\n [4280344064, 0, 4289091584],\n [4292225024, 4294901760, 0]])\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n\n\[email protected]('agg', aggs)\[email protected]('cmap', ['black', (0, 0, 0), '#000000'])\ndef test_shade_cmap_non_categorical_alpha(agg, cmap):\n img = tf.shade(agg.a, how='log', cmap=cmap)\n sol = np.array([[ 0, 671088640, 1946157056],\n [2701131776, 0, 3640655872],\n [3976200192, 4278190080, 0]])\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n\n\[email protected]('agg', aggs)\ndef test_shade_cmap_errors(agg):\n with pytest.raises(ValueError):\n tf.shade(agg.a, cmap='foo')\n\n with pytest.raises(ValueError):\n tf.shade(agg.a, cmap=[])\n\n\[email protected]('agg', aggs)\ndef test_shade_mpl_cmap(agg):\n cm = pytest.importorskip('matplotlib.cm')\n img = tf.shade(agg.a, how='log', cmap=cm.viridis)\n sol = np.array([[0, 4283695428, 4287524142],\n [4287143710, 0, 4282832267],\n [4280213706, 4280608765, 0]])\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n\n\[email protected]('array', arrays)\ndef test_shade_category(array):\n coords = [np.array([0, 1]), np.array([2, 5])]\n cat_agg = tf.Image(array([[(0, 12, 0), (3, 0, 3)], [(12, 12, 12), (24, 0, 0)]], dtype='u4'),\n coords=(coords + [['a', 'b', 'c']]),\n dims=(dims + ['cats']))\n\n colors = [(255, 0, 0), '#0000FF', 'orange']\n\n img = tf.shade(cat_agg, color_key=colors, how='log', min_alpha=20)\n sol = np.array([[2583625728, 335565567],\n [4283774890, 3707764991]], dtype='u4')\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n # Check dims/coordinates order\n assert list(img.coords) == ['x_axis', 'y_axis']\n assert list(img.dims) == ['y_axis', 'x_axis']\n\n colors = dict(zip('abc', colors))\n\n img = tf.shade(cat_agg, color_key=colors, how='cbrt', min_alpha=20)\n sol = np.array([[2650734592, 335565567],\n [4283774890, 3657433343]], dtype='u4')\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n\n img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=20)\n sol = np.array([[1140785152, 335565567],\n [4283774890, 2701132031]], dtype='u4')\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n\n img = tf.shade(cat_agg, color_key=colors,\n how=lambda x, m: np.where(m, np.nan, x) ** 2,\n min_alpha=20)\n sol = np.array([[503250944, 335565567],\n [4283774890, 1744830719]], dtype='u4')\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n\n # all pixels should be at min_alpha\n img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=0, span=(50, 100))\n sol = np.array([[16711680, 21247],\n [5584810, 255]], dtype='u4')\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n # redundant verification that alpha channel is all 0x00\n assert ((img.data[0,0] >> 24) & 0xFF) == 0\n assert ((img.data[0,1] >> 24) & 0xFF) == 0\n assert ((img.data[1,0] >> 24) & 0xFF) == 0\n assert ((img.data[1,1] >> 24) & 0xFF) == 0\n\n # all pixels should be at max_alpha\n img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=0, span=(0, 2))\n sol = np.array([[4294901760, 4278211327],\n [4283774890, 4278190335]], dtype='u4')\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n # redundant verification that alpha channel is all 0xFF\n assert ((img.data[0,0] >> 24) & 0xFF) == 255\n assert ((img.data[0,1] >> 24) & 0xFF) == 255\n assert ((img.data[1,0] >> 24) & 0xFF) == 255\n assert ((img.data[1,1] >> 24) & 0xFF) == 255\n\n # One pixel should be min-alpha, the other max-alpha\n img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=0, span=(6, 36))\n sol = np.array([[872349696, 21247],\n [4283774890, 2566914303]], dtype='u4')\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n # redundant verification that alpha channel is correct\n assert ((img.data[0,0] >> 24) & 0xFF) == 51 # (6 / 30) * 255\n assert ((img.data[0,1] >> 24) & 0xFF) == 0\n assert ((img.data[1,0] >> 24) & 0xFF) == 255\n assert ((img.data[1,1] >> 24) & 0xFF) == 153 # ( 18 /30) * 255\n\n # One pixel should be min-alpha, the other max-alpha\n img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=0, span=(0, 72))\n sol = np.array([[721354752, 352342783],\n [2136291242, 1426063615]], dtype='u4')\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n # redundant verification that alpha channel is correct\n assert ((img.data[0,0] >> 24) & 0xFF) == 42 # (12 / 72) * 255\n assert ((img.data[0,1] >> 24) & 0xFF) == 21 # (6 / 72) * 255\n assert ((img.data[1,0] >> 24) & 0xFF) == 127 # ( 36 / 72) * 255\n assert ((img.data[1,1] >> 24) & 0xFF) == 85 # ( 24 /72 ) * 255\n\n # test that empty coordinates are always fully transparent, even when\n # min_alpha is non-zero\n cat_agg = tf.Image(array([[(0, 0, 0), (3, 0, 3)],\n [(12, 12, 12), (24, 0, 0)]], dtype='u4'),\n coords=(coords + [['a', 'b', 'c']]),\n dims=(dims + ['cats']))\n\n # First test auto-span\n img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=20)\n sol = np.array([[5584810, 335565567],\n [4283774890, 2701132031]], dtype='u4')\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n\n # redundant verification that alpha channel is correct\n assert ((img.data[0,0] >> 24) & 0xFF) == 0 # fully transparent\n assert ((img.data[0,1] >> 24) & 0xFF) != 0 # not fully transparent\n assert ((img.data[1,0] >> 24) & 0xFF) != 0 # not fully transparent\n assert ((img.data[1,1] >> 24) & 0xFF) != 0 # not fully transparent\n\n # Next test manual-span\n img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=20, span=(6, 36))\n sol = np.array([[5584810, 335565567],\n [4283774890, 2701132031]], dtype='u4')\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n\n # redundant verification that alpha channel is correct\n assert ((img.data[0,0] >> 24) & 0xFF) == 0 # fully transparent\n assert ((img.data[0,1] >> 24) & 0xFF) != 0 # not fully transparent\n assert ((img.data[1,0] >> 24) & 0xFF) != 0 # not fully transparent\n assert ((img.data[1,1] >> 24) & 0xFF) != 0 # not fully transparent\n\n\n # Categorical aggregations with some reductions (such as sum) can result in negative\n # values in the data here we test positive and negative values\n cat_agg = tf.Image(array([[(0, -30, 0), (18, 0, -18)],\n [(-2, 2, -2), (-18, 9, 12)]], dtype='i4'),\n coords=(coords + [['a', 'b', 'c']]),\n dims=(dims + ['cats']))\n\n img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=20)\n sol = np.array([[335565567, 3914667690],\n [3680253090, 4285155988]], dtype='u4')\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n assert ((img.data[0,0] >> 24) & 0xFF) == 20\n assert ((img.data[0,1] >> 24) & 0xFF) == 233\n assert ((img.data[1,0] >> 24) & 0xFF) == 219\n assert ((img.data[1,1] >> 24) & 0xFF) == 255\n\n img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=20, span=(0, 3))\n sol = np.array([[335565567, 341120682],\n [341587106, 4285155988]], dtype='u4')\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n assert ((img.data[0,0] >> 24) & 0xFF) == 20 # min alpha\n assert ((img.data[0,1] >> 24) & 0xFF) == 20 # min alpha\n assert ((img.data[1,0] >> 24) & 0xFF) == 20 # min alpha\n assert ((img.data[1,1] >> 24) & 0xFF) == 255\n\n img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=20, color_baseline=9)\n sol = np.array([[341129130, 3909091583],\n [3679795114, 4278232575]], dtype='u4')\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n assert ((img.data[0,0] >> 24) & 0xFF) == 20\n assert ((img.data[0,1] >> 24) & 0xFF) == 233\n assert ((img.data[1,0] >> 24) & 0xFF) == 219\n assert ((img.data[1,1] >> 24) & 0xFF) == 255\n\n # Categorical aggregations with some reductions (such as sum) can result in negative\n # values in the data, here we test all negative values\n cat_agg = tf.Image(array([[(0, -30, 0), (-18, 0, -18)],\n [(-2, -2, -2), (-18, 0, 0)]], dtype='i4'),\n coords=(coords + [['a', 'b', 'c']]),\n dims=(dims + ['cats']))\n\n img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=20)\n sol = np.array([[1124094719, 344794225],\n [4283774890, 2708096148]], dtype='u4')\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n assert ((img.data[0,0] >> 24) & 0xFF) == 67\n assert ((img.data[0,1] >> 24) & 0xFF) == 20\n assert ((img.data[1,0] >> 24) & 0xFF) == 255\n assert ((img.data[1,1] >> 24) & 0xFF) == 161\n\n img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=20, span=(6, 36))\n sol = np.array([[335565567, 344794225],\n [341129130, 342508692]], dtype='u4')\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n assert ((img.data[0,0] >> 24) & 0xFF) == 20 # min alpha\n assert ((img.data[0,1] >> 24) & 0xFF) == 20 # min alpha\n assert ((img.data[1,0] >> 24) & 0xFF) == 20 # min alpha\n assert ((img.data[1,1] >> 24) & 0xFF) == 20 # min alpha\n\[email protected]('array', arrays)\ndef test_shade_zeros(array):\n coords = [np.array([0, 1]), np.array([2, 5])]\n cat_agg = tf.Image(array([[(0, 0, 0), (0, 0, 0)],\n [(0, 0, 0), (0, 0, 0)]], dtype='u4'),\n coords=(coords + [['a', 'b', 'c']]),\n dims=(dims + ['cats']))\n\n colors = [(255, 0, 0), '#0000FF', 'orange']\n\n img = tf.shade(cat_agg, color_key=colors, how='linear', min_alpha=0)\n sol = np.array([[5584810, 5584810],\n [5584810, 5584810]], dtype='u4')\n sol = tf.Image(sol, coords=coords, dims=dims)\n assert_eq_xr(img, sol)\n\n\ncoords2 = [np.array([0, 2]), np.array([3, 5])]\nimg1 = tf.Image(np.array([[0xff00ffff, 0x00000000],\n [0x00000000, 0xff00ff7d]], dtype='uint32'),\n coords=coords2, dims=dims)\nimg2 = tf.Image(np.array([[0x00000000, 0x00000000],\n [0x000000ff, 0x7d7d7dff]], dtype='uint32'),\n coords=coords2, dims=dims)\n\n\ndef test_set_background():\n out = tf.set_background(img1)\n assert out.equals(img1)\n sol = tf.Image(np.array([[0xff00ffff, 0xff0000ff],\n [0xff0000ff, 0xff00ff7d]], dtype='uint32'),\n coords=coords2, dims=dims)\n out = tf.set_background(img1, 'red')\n assert out.equals(sol)\n\n\ndef test_stack():\n img = tf.stack(img1, img2)\n out = np.array([[0xff00ffff, 0x00000000],\n [0x00000000, 0xff3dbfbc]], dtype='uint32')\n assert (img.x_axis == img1.x_axis).all()\n assert (img.y_axis == img1.y_axis).all()\n np.testing.assert_equal(img.data, out)\n\n img = tf.stack(img2, img1)\n out = np.array([[0xff00ffff, 0x00000000],\n [0x00000000, 0xff00ff7d]], dtype='uint32')\n assert (img.x_axis == img1.x_axis).all()\n assert (img.y_axis == img1.y_axis).all()\n np.testing.assert_equal(img.data, out)\n\n img = tf.stack(img1, img2, how='add')\n out = np.array([[0xff00ffff, 0x00000000],\n [0x00000000, 0xff3dfffa]], dtype='uint32')\n assert (img.x_axis == img1.x_axis).all()\n assert (img.y_axis == img1.y_axis).all()\n np.testing.assert_equal(img.data, out)\n\n\ndef test_masks():\n # Square\n mask = tf._square_mask(2)\n np.testing.assert_equal(mask, np.ones((5, 5), dtype='bool'))\n np.testing.assert_equal(tf._square_mask(0), np.ones((1, 1), dtype='bool'))\n # Circle\n np.testing.assert_equal(tf._circle_mask(0), np.ones((1, 1), dtype='bool'))\n out = np.array([[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]], dtype='bool')\n np.testing.assert_equal(tf._circle_mask(1), out)\n out = np.array([[0, 0, 1, 1, 1, 0, 0],\n [0, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1],\n [0, 1, 1, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 0, 0]], dtype='bool')\n np.testing.assert_equal(tf._circle_mask(3), out)\n\n\ndef test_rgb_spread():\n p = 0x7d00007d\n g = 0x7d00FF00\n b = 0x7dFF0000\n data = np.array([[p, p, 0, 0, 0],\n [p, g, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, b, 0],\n [0, 0, 0, 0, 0]], dtype='uint32')\n coords = [np.arange(5), np.arange(5)]\n img = tf.Image(data, coords=coords, dims=dims)\n\n s = tf.spread(img)\n o = np.array([[0xed00863b, 0xed00863b, 0xbc00a82a, 0x00000000, 0x00000000],\n [0xed00863b, 0xed00863b, 0xbc00a82a, 0x00000000, 0x00000000],\n [0xbc00a82a, 0xbc00a82a, 0xbca85600, 0x7dff0000, 0x7dff0000],\n [0x00000000, 0x00000000, 0x7dff0000, 0x7dff0000, 0x7dff0000],\n [0x00000000, 0x00000000, 0x7dff0000, 0x7dff0000, 0x7dff0000]])\n np.testing.assert_equal(s.data, o)\n assert (s.x_axis == img.x_axis).all()\n assert (s.y_axis == img.y_axis).all()\n assert s.dims == img.dims\n\n s = tf.spread(img, px=2)\n o = np.array([[0xed00863b, 0xed00863b, 0xed00863b, 0xbc00a82a, 0x00000000],\n [0xed00863b, 0xed00863b, 0xf581411c, 0xdc904812, 0x7dff0000],\n [0xed00863b, 0xf581411c, 0xed864419, 0xbca85600, 0x7dff0000],\n [0xbc00a82a, 0xdc904812, 0xbca85600, 0x7dff0000, 0x7dff0000],\n [0x00000000, 0x7dff0000, 0x7dff0000, 0x7dff0000, 0x7dff0000]])\n np.testing.assert_equal(s.data, o)\n\n s = tf.spread(img, shape='square')\n o = np.array([[0xed00863b, 0xed00863b, 0xbc00a82a, 0x00000000, 0x00000000],\n [0xed00863b, 0xed00863b, 0xbc00a82a, 0x00000000, 0x00000000],\n [0xbc00a82a, 0xbc00a82a, 0xbca85600, 0x7dff0000, 0x7dff0000],\n [0x00000000, 0x00000000, 0x7dff0000, 0x7dff0000, 0x7dff0000],\n [0x00000000, 0x00000000, 0x7dff0000, 0x7dff0000, 0x7dff0000]])\n np.testing.assert_equal(s.data, o)\n\n s = tf.spread(img, how='add')\n o = np.array([[0xff007db7, 0xff007db7, 0xfa007f3e, 0x00000000, 0x00000000],\n [0xff007db7, 0xff007db7, 0xfa007f3e, 0x00000000, 0x00000000],\n [0xfa007f3e, 0xfa007f3e, 0xfa7f7f00, 0x7dff0000, 0x7dff0000],\n [0x00000000, 0x00000000, 0x7dff0000, 0x7dff0000, 0x7dff0000],\n [0x00000000, 0x00000000, 0x7dff0000, 0x7dff0000, 0x7dff0000]])\n np.testing.assert_equal(s.data, o)\n\n mask = np.array([[1, 0, 1],\n [0, 1, 0],\n [1, 0, 1]])\n s = tf.spread(img, mask=mask)\n o = np.array([[0xbc00a82a, 0xbc00007d, 0x7d00ff00, 0x00000000, 0x00000000],\n [0xbc00007d, 0xbc00a82a, 0x7d00007d, 0x00000000, 0x00000000],\n [0x7d00ff00, 0x7d00007d, 0xbca85600, 0x00000000, 0x7dff0000],\n [0x00000000, 0x00000000, 0x00000000, 0x7dff0000, 0x00000000],\n [0x00000000, 0x00000000, 0x7dff0000, 0x00000000, 0x7dff0000]])\n np.testing.assert_equal(s.data, o)\n\n s = tf.spread(img, px=0)\n np.testing.assert_equal(s.data, img.data)\n\n pytest.raises(ValueError, lambda: tf.spread(img, px=-1))\n pytest.raises(ValueError, lambda: tf.spread(img, mask=np.ones(2)))\n pytest.raises(ValueError, lambda: tf.spread(img, mask=np.ones((2, 2))))\n\n\ndef test_uint32_spread():\n data = np.array([[1, 1, 0, 0, 0],\n [1, 2, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 3, 0],\n [0, 0, 0, 0, 0]], dtype='uint32')\n coords = [np.arange(5), np.arange(5)]\n arr = xr.DataArray(data, coords=coords, dims=dims)\n\n s = tf.spread(arr)\n o = np.array([[5, 5, 3, 0, 0],\n [5, 5, 3, 0, 0],\n [3, 3, 5, 3, 3],\n [0, 0, 3, 3, 3],\n [0, 0, 3, 3, 3]])\n np.testing.assert_equal(s.data, o)\n assert (s.x_axis == arr.x_axis).all()\n assert (s.y_axis == arr.y_axis).all()\n assert s.dims == arr.dims\n\n s = tf.spread(arr, px=2)\n o = np.array([[5, 5, 5, 3, 0],\n [5, 5, 8, 6, 3],\n [5, 8, 7, 5, 3],\n [3, 6, 5, 3, 3],\n [0, 3, 3, 3, 3]])\n np.testing.assert_equal(s.data, o)\n\n s = tf.spread(arr, shape='square')\n o = np.array([[5, 5, 3, 0, 0],\n [5, 5, 3, 0, 0],\n [3, 3, 5, 3, 3],\n [0, 0, 3, 3, 3],\n [0, 0, 3, 3, 3]])\n\n np.testing.assert_equal(s.data, o)\n\n s = tf.spread(arr, how='min')\n o = np.array([[1, 1, 1, 0, 0],\n [1, 1, 1, 0, 0],\n [1, 1, 2, 3, 3],\n [0, 0, 3, 3, 3],\n [0, 0, 3, 3, 3]])\n np.testing.assert_equal(s.data, o)\n\n s = tf.spread(arr, how='max')\n\n o = np.array([[2, 2, 2, 0, 0],\n [2, 2, 2, 0, 0],\n [2, 2, 3, 3, 3],\n [0, 0, 3, 3, 3],\n [0, 0, 3, 3, 3]])\n np.testing.assert_equal(s.data, o)\n\n\n mask = np.array([[1, 0, 1],\n [0, 1, 0],\n [1, 0, 1]])\n\n data = np.array([[0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]], dtype='uint32')\n arr = xr.DataArray(data, coords=coords, dims=dims)\n s = tf.spread(arr, mask=mask)\n\n o = np.array([[0, 0, 0, 1, 0],\n [1, 0, 2, 0, 1],\n [0, 1, 0, 0, 0],\n [1, 0, 1, 0, 0],\n [0, 0, 0, 0, 0]])\n np.testing.assert_equal(s.data, o)\n\n s = tf.spread(arr, px=0)\n np.testing.assert_equal(s.data, arr.data)\n\n pytest.raises(ValueError, lambda: tf.spread(arr, px=-1))\n pytest.raises(ValueError, lambda: tf.spread(arr, mask=np.ones(2)))\n pytest.raises(ValueError, lambda: tf.spread(arr, mask=np.ones((2, 2))))\n\n\ndef test_int32_spread():\n data = np.array([[1, 1, 0, 0, 0],\n [1, 2, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 3, 0],\n [0, 0, 0, 0, 0]], dtype='int32')\n coords = [np.arange(5), np.arange(5)]\n arr = xr.DataArray(data, coords=coords, dims=dims)\n\n s = tf.spread(arr)\n o = np.array([[5, 5, 3, 0, 0],\n [5, 5, 3, 0, 0],\n [3, 3, 5, 3, 3],\n [0, 0, 3, 3, 3],\n [0, 0, 3, 3, 3]])\n np.testing.assert_equal(s.data, o)\n assert (s.x_axis == arr.x_axis).all()\n assert (s.y_axis == arr.y_axis).all()\n assert s.dims == arr.dims\n\n s = tf.spread(arr, px=2)\n o = np.array([[5, 5, 5, 3, 0],\n [5, 5, 8, 6, 3],\n [5, 8, 7, 5, 3],\n [3, 6, 5, 3, 3],\n [0, 3, 3, 3, 3]])\n np.testing.assert_equal(s.data, o)\n\n s = tf.spread(arr, shape='square')\n o = np.array([[5, 5, 3, 0, 0],\n [5, 5, 3, 0, 0],\n [3, 3, 5, 3, 3],\n [0, 0, 3, 3, 3],\n [0, 0, 3, 3, 3]])\n\n np.testing.assert_equal(s.data, o)\n\n s = tf.spread(arr, how='min')\n o = np.array([[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]])\n np.testing.assert_equal(s.data, o)\n\n s = tf.spread(arr, how='max')\n\n o = np.array([[2, 2, 2, 0, 0],\n [2, 2, 2, 0, 0],\n [2, 2, 3, 3, 3],\n [0, 0, 3, 3, 3],\n [0, 0, 3, 3, 3]])\n np.testing.assert_equal(s.data, o)\n\n\n mask = np.array([[1, 0, 1],\n [0, 1, 0],\n [1, 0, 1]])\n\n data = np.array([[0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]], dtype='int32')\n arr = xr.DataArray(data, coords=coords, dims=dims)\n s = tf.spread(arr, mask=mask)\n\n o = np.array([[0, 0, 0, 1, 0],\n [1, 0, 2, 0, 1],\n [0, 1, 0, 0, 0],\n [1, 0, 1, 0, 0],\n [0, 0, 0, 0, 0]])\n np.testing.assert_equal(s.data, o)\n\n s = tf.spread(arr, px=0)\n np.testing.assert_equal(s.data, arr.data)\n\n pytest.raises(ValueError, lambda: tf.spread(arr, px=-1))\n pytest.raises(ValueError, lambda: tf.spread(arr, mask=np.ones(2)))\n pytest.raises(ValueError, lambda: tf.spread(arr, mask=np.ones((2, 2))))\n\n\ndef test_float32_spread():\n data = np.array([[1, 1, np.nan, np.nan, np.nan],\n [1, 2, np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan, 3, np.nan],\n [np.nan, np.nan, np.nan, np.nan, np.nan]], dtype='float32')\n coords = [np.arange(5), np.arange(5)]\n arr = xr.DataArray(data, coords=coords, dims=dims)\n\n s = tf.spread(arr)\n o = np.array([[5, 5, 3, np.nan, np.nan],\n [5, 5, 3, np.nan, np.nan],\n [3, 3, 5, 3, 3],\n [np.nan, np.nan, 3, 3, 3],\n [np.nan, np.nan, 3, 3, 3]])\n np.testing.assert_equal(s.data, o)\n assert (s.x_axis == arr.x_axis).all()\n assert (s.y_axis == arr.y_axis).all()\n assert s.dims == arr.dims\n\n s = tf.spread(arr, px=2)\n o = np.array([[5, 5, 5, 3, np.nan],\n [5, 5, 8, 6, 3],\n [5, 8, 7, 5, 3],\n [3, 6, 5, 3, 3],\n [np.nan, 3, 3, 3, 3]])\n np.testing.assert_equal(s.data, o)\n\n s = tf.spread(arr, shape='square')\n o = np.array([[5, 5, 3, np.nan, np.nan],\n [5, 5, 3, np.nan, np.nan],\n [3, 3, 5, 3, 3],\n [np.nan, np.nan, 3, 3, 3],\n [np.nan, np.nan, 3, 3, 3]])\n\n np.testing.assert_equal(s.data, o)\n\n s = tf.spread(arr, how='min')\n o = np.array([[1, 1, 1, np.nan, np.nan],\n [1, 1, 1, np.nan, np.nan],\n [1, 1, 2, 3, 3],\n [np.nan, np.nan, 3, 3, 3],\n [np.nan, np.nan, 3, 3, 3]])\n np.testing.assert_equal(s.data, o)\n\n s = tf.spread(arr, how='max')\n\n o = np.array([[2, 2, 2, np.nan, np.nan],\n [2, 2, 2, np.nan, np.nan],\n [2, 2, 3, 3, 3],\n [np.nan, np.nan, 3, 3, 3],\n [np.nan, np.nan, 3, 3, 3]])\n np.testing.assert_equal(s.data, o)\n\n\n mask = np.array([[1, 0, 1],\n [0, 1, 0],\n [1, 0, 1]])\n data = np.array([[np.nan, np.nan, np.nan, 1, np.nan],\n [np.nan, np.nan, np.nan, np.nan, np.nan],\n [np.nan, 1, np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan, np.nan, np.nan]], dtype='float32')\n arr = xr.DataArray(data, coords=coords, dims=dims)\n s = tf.spread(arr, mask=mask)\n\n\n o = np.array([[0, 0, 0, 1, 0],\n [1, 0, 2, 0, 1],\n [0, 1, 0, 0, 0],\n [1, 0, 1, 0, 0],\n [0, 0, 0, 0, 0]])\n\n o = np.array([[np.nan, np.nan, np.nan, 1, np.nan],\n [1, np.nan, 2, np.nan, 1],\n [np.nan, 1, np.nan, np.nan, np.nan],\n [1, np.nan, 1, np.nan, np.nan],\n [np.nan, np.nan, np.nan, np.nan, np.nan]])\n np.testing.assert_equal(s.data, o)\n\n s = tf.spread(arr, px=0)\n np.testing.assert_equal(s.data, arr.data)\n\n pytest.raises(ValueError, lambda: tf.spread(arr, px=-1))\n pytest.raises(ValueError, lambda: tf.spread(arr, mask=np.ones(2)))\n pytest.raises(ValueError, lambda: tf.spread(arr, mask=np.ones((2, 2))))\n\n\ndef test_categorical_spread():\n a_data = np.array([[0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]], dtype='int32')\n\n b_data = np.array([[0, 0, 0, 0, 0],\n [0, 2, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]], dtype='int32')\n\n c_data = np.array([[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 3, 0],\n [0, 0, 0, 0, 0]], dtype='int32')\n\n data = np.dstack([a_data, b_data, c_data])\n coords = [np.arange(5), np.arange(5)]\n arr = xr.DataArray(data, coords=coords + [['a', 'b', 'c']],\n dims=dims + ['cat'])\n\n s = tf.spread(arr)\n o = np.array([[1, 1, 1, 0, 0],\n [1, 1, 1, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]])\n np.testing.assert_equal(s.sel(cat='a').data, o)\n\n o = np.array([[2, 2, 2, 0, 0],\n [2, 2, 2, 0, 0],\n [2, 2, 2, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]])\n np.testing.assert_equal(s.sel(cat='b').data, o)\n\n o = np.array([[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 3, 3, 3],\n [0, 0, 3, 3, 3],\n [0, 0, 3, 3, 3]])\n np.testing.assert_equal(s.sel(cat='c').data, o)\n\n\ndef test_rgb_density():\n b = 0xffff0000\n data = np.full((4, 4), b, dtype='uint32')\n assert tf._rgb_density(data) == 1.0\n data = np.zeros((4, 4), dtype='uint32')\n assert tf._rgb_density(data) == np.inf\n data[2, 2] = b\n assert tf._rgb_density(data) == 0\n data[2, 1] = data[1, 2] = data[1, 1] = b\n assert np.allclose(tf._rgb_density(data), 3./8.)\n\ndef test_int_array_density():\n data = np.ones((4, 4), dtype='uint32')\n assert tf._array_density(data, float_type=False) == 1.0\n data = np.zeros((4, 4), dtype='uint32')\n assert tf._array_density(data, float_type=False) == np.inf\n data[2, 2] = 1\n assert tf._array_density(data, float_type=False) == 0\n data[2, 1] = data[1, 2] = data[1, 1] = 1\n assert np.allclose(tf._array_density(data, float_type=False), 3./8.)\n\ndef test_float_array_density():\n data = np.ones((4, 4), dtype='float32')\n assert tf._array_density(data, float_type=True) == 1.0\n data = np.full((4, 4), np.nan, dtype='float32')\n assert tf._array_density(data, float_type=True) == np.inf\n data[2, 2] = 1\n assert tf._array_density(data, float_type=True) == 0\n data[2, 1] = data[1, 2] = data[1, 1] = 1\n assert np.allclose(tf._array_density(data, float_type=True), 3./8.)\n\n\ndef test_rgb_dynspread():\n b = 0xffff0000\n data = np.array([[b, b, 0, 0, 0],\n [b, b, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, b, 0],\n [0, 0, 0, 0, 0]], dtype='uint32')\n coords = [np.arange(5), np.arange(5)]\n img = tf.Image(data, coords=coords, dims=dims)\n assert tf.dynspread(img).equals(tf.spread(img, 1))\n assert tf.dynspread(img, threshold=0.9).equals(tf.spread(img, 2))\n assert tf.dynspread(img, threshold=0).equals(img)\n assert tf.dynspread(img, max_px=0).equals(img)\n\n pytest.raises(ValueError, lambda: tf.dynspread(img, threshold=1.1))\n pytest.raises(ValueError, lambda: tf.dynspread(img, max_px=-1))\n\ndef test_array_dynspread():\n data = np.array([[1, 1, 0, 0, 0],\n [1, 1, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0]], dtype='uint32')\n coords = [np.arange(5), np.arange(5)]\n arr = xr.DataArray(data, coords=coords, dims=dims)\n assert tf.dynspread(arr).equals(tf.spread(arr, 1))\n assert tf.dynspread(arr, threshold=0.9).equals(tf.spread(arr, 2))\n assert tf.dynspread(arr, threshold=0).equals(arr)\n assert tf.dynspread(arr, max_px=0).equals(arr)\n\n pytest.raises(ValueError, lambda: tf.dynspread(arr, threshold=1.1))\n pytest.raises(ValueError, lambda: tf.dynspread(arr, max_px=-1))\n\n\ndef test_categorical_dynspread():\n a_data = np.array([[0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]], dtype='int32')\n\n b_data = np.array([[0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]], dtype='int32')\n\n c_data = np.array([[1, 0, 0, 0, 0],\n [1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0]], dtype='int32')\n\n data = np.dstack([a_data, b_data, c_data])\n coords = [np.arange(5), np.arange(5)]\n arr = xr.DataArray(data, coords=coords + [['a', 'b', 'c']],\n dims=dims + ['cat'])\n assert tf.dynspread(arr).equals(tf.spread(arr, 1))\n assert tf.dynspread(arr, threshold=0.9).equals(tf.spread(arr, 2))\n assert tf.dynspread(arr, threshold=0).equals(arr)\n assert tf.dynspread(arr, max_px=0).equals(arr)\n\n\n\ndef check_eq_hist_cdf_slope(eq):\n # Check that the slope of the cdf is ~1\n # Adapted from scikit-image's test for the same function\n cdf = np.histogram(eq[~np.isnan(eq)], bins=256)[0].cumsum()\n cdf = cdf / cdf[-1]\n slope = np.polyfit(np.linspace(0, 1, cdf.size), cdf, 1)[0]\n assert 0.9 < slope < 1.1\n\n\ndef test_eq_hist():\n # Float\n data = np.random.normal(size=300**2)\n data[np.random.randint(300**2, size=100)] = np.nan\n data = (data - np.nanmin(data)).reshape((300, 300))\n mask = np.isnan(data)\n eq = tf.eq_hist(data, mask)\n check_eq_hist_cdf_slope(eq)\n assert (np.isnan(eq) == mask).all()\n # Integer\n data = np.random.normal(scale=100, size=(300, 300)).astype('i8')\n data = data - data.min()\n eq = tf.eq_hist(data)\n check_eq_hist_cdf_slope(eq)\n\n\ndef test_Image_to_pil():\n img = img1.to_pil()\n assert isinstance(img, PIL.Image.Image)\n\n\ndef test_Image_to_bytesio():\n bytes = img1.to_bytesio()\n assert isinstance(bytes, BytesIO)\n assert bytes.tell() == 0\n\n\ndef test_shade_should_handle_zeros_array():\n data = np.array([[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]], dtype='uint32')\n arr = tf.Image(data, dims=['x', 'y'])\n img = tf.shade(arr, cmap=['white', 'black'], how='linear')\n assert img is not None\n"
] | [
[
"numpy.testing.assert_equal",
"numpy.linspace",
"numpy.isnan",
"numpy.arange",
"numpy.eye",
"numpy.nanmin",
"numpy.dstack",
"numpy.full",
"numpy.ones",
"numpy.random.normal",
"numpy.array",
"numpy.zeros",
"numpy.where",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AidanGlickman/sportsreference | [
"2129b83553e37c42f847e2f2bfbfa7287212bc5f"
] | [
"sportsreference/fb/roster.py"
] | [
"import pandas as pd\nimport re\nfrom .constants import ROSTER_SCHEME, SQUAD_URL\nfrom ..decorators import float_property_decorator, int_property_decorator\nfrom .fb_utils import _lookup_team\nfrom pyquery import PyQuery as pq\nfrom sportsreference.utils import (_get_stats_table,\n _parse_field,\n _remove_html_comment_tags)\n\n\nclass SquadPlayer:\n \"\"\"\n Get player information and stats.\n\n Given a player ID and data, capture all relevant stats and information for\n the player including name, nationality, goals, assists, expected goal\n difference, nutmegs, and much more.\n\n Parameters\n ----------\n player_data : PyQuery object\n A PyQuery object containing all fields of information for a single\n player, represented as one long row by concatenating all tables which\n hold values for the requested player.\n player_id : string\n A ``string`` representation of the player's unique 8-digit ID as shown\n on fbref.com.\n \"\"\"\n def __init__(self, player_data, player_id):\n self._name = None\n self._player_id = player_id\n self._nationality = None\n self._position = None\n self._age = None\n self._matches_played = None\n self._starts = None\n self._minutes = None\n self._goals = None\n self._assists = None\n self._penalty_kicks = None\n self._penalty_kick_attempts = None\n self._yellow_cards = None\n self._red_cards = None\n self._goals_per_90 = None\n self._assists_per_90 = None\n self._goals_and_assists_per_90 = None\n self._goals_non_penalty_per_90 = None\n self._goals_and_assists_non_penalty_per_90 = None\n self._expected_goals = None\n self._expected_goals_non_penalty = None\n self._expected_assists = None\n self._expected_goals_per_90 = None\n self._expected_assists_per_90 = None\n self._expected_goals_and_assists_per_90 = None\n self._expected_goals_non_penalty_per_90 = None\n self._expected_goals_and_assists_non_penalty_per_90 = None\n self._own_goals = None\n # Goalkeeping stats\n self._goals_against = None\n self._own_goals_against = None\n self._goals_against_per_90 = None\n self._shots_on_target_against = None\n self._saves = None\n self._save_percentage = None\n self._wins = None\n self._draws = None\n self._losses = None\n self._clean_sheets = None\n self._clean_sheet_percentage = None\n self._penalty_kicks_attempted = None\n self._penalty_kicks_allowed = None\n self._penalty_kicks_saved = None\n self._penalty_kicks_missed = None\n # Advanced goalkeeping stats\n self._free_kick_goals_against = None\n self._corner_kick_goals_against = None\n self._post_shot_expected_goals = None\n self._post_shot_expected_goals_per_shot = None\n self._post_shot_expected_goals_minus_allowed = None\n self._post_shot_expected_goals_minus_allowed_per_90 = None\n self._launches_completed = None\n self._launches_attempted = None\n self._launch_completion_percentage = None\n self._keeper_passes_attempted = None\n self._throws_attempted = None\n self._launch_percentage = None\n self._average_keeper_pass_length = None\n self._goal_kicks_attempted = None\n self._goal_kick_launch_percentage = None\n self._average_goal_kick_length = None\n self._opponent_cross_attempts = None\n self._opponent_cross_stops = None\n self._opponent_cross_stop_percentage = None\n self._keeper_actions_outside_penalty_area = None\n self._keeper_actions_outside_penalty_area_per_90 = None\n self._average_keeper_action_outside_penalty_distance = None\n # Shooting stats\n self._shots = None\n self._shots_on_target = None\n self._free_kick_shots = None\n self._shots_on_target_percentage = None\n self._shots_per_90 = None\n self._shots_on_target_per_90 = None\n self._goals_per_shot = None\n self._goals_per_shot_on_target = None\n self._expected_goals_non_penalty_per_shot = None\n self._goals_minus_expected = None\n self._non_penalty_minus_expected_non_penalty = None\n # Passing stats\n self._assists_minus_expected = None\n self._key_passes = None\n self._passes_completed = None\n self._passes_attempted = None\n self._pass_completion = None\n self._short_passes_completed = None\n self._short_passes_attempted = None\n self._short_pass_completion = None\n self._medium_passes_completed = None\n self._medium_passes_attempted = None\n self._medium_pass_completion = None\n self._long_passes_completed = None\n self._long_passes_attempted = None\n self._long_pass_completion = None\n self._left_foot_passes = None\n self._right_foot_passes = None\n self._free_kick_passes = None\n self._through_balls = None\n self._corner_kicks = None\n self._throw_ins = None\n self._final_third_passes = None\n self._penalty_area_passes = None\n self._penalty_area_crosses = None\n # Playing time stats\n self._minutes_per_match = None\n self._minutes_played_percentage = None\n self._nineties_played = None\n self._minutes_per_start = None\n self._subs = None\n self._minutes_per_sub = None\n self._unused_sub = None\n self._points_per_match = None\n self._goals_scored_on_pitch = None\n self._goals_against_on_pitch = None\n self._goal_difference_on_pitch = None\n self._goal_difference_on_pitch_per_90 = None\n self._net_difference_on_pitch_per_90 = None\n self._expected_goals_on_pitch = None\n self._expected_goals_against_on_pitch = None\n self._expected_goal_difference = None\n self._expected_goal_difference_per_90 = None\n self._net_expected_goal_difference_per_90 = None\n # Miscellaneous stats\n self._soft_reds = None\n self._fouls_committed = None\n self._fouls_drawn = None\n self._offsides = None\n self._crosses = None\n self._tackles_won = None\n self._interceptions = None\n self._penalty_kicks_won = None\n self._penalty_kicks_conceded = None\n self._successful_dribbles = None\n self._attempted_dribbles = None\n self._dribble_success_rate = None\n self._players_dribbled_past = None\n self._nutmegs = None\n self._dribblers_tackled = None\n self._dribblers_contested = None\n self._tackle_percentage = None\n self._times_dribbled_past = None\n\n self._parse_player_stats(player_data)\n\n def _parse_nationality(self, player_data):\n \"\"\"\n Parse the player's nationality.\n\n If the nationality is listed for a player, it will contain a URI which\n includes the name of the country the player represents. For example, an\n English player would have a URI of the following:\n \"/en/country/ENG/England-Football\". Pulling out the country name and\n returning it as a string is a simple solution to pulling someone's\n nationality.\n\n Parameters\n ----------\n player_data : PyQuery object\n A PyQuery object representing all of the player's stats fields\n combined as a singular row.\n\n Returns\n -------\n string\n Returns a ``string`` of the player's home country, such as\n 'England'.\n \"\"\"\n country = player_data(ROSTER_SCHEME['nationality'])\n if not country:\n return None\n country = country.attr('href')\n country = re.sub(r'.*\\/', '', country)\n country = country.replace('-Football', '')\n return country\n\n def _parse_player_stats(self, player_data):\n \"\"\"\n Parse a value for every attribute.\n\n This method looks through every class attribute with a few exceptions\n and retrieves the value according to the parsing scheme and index of\n the attribute from the passed HTML data. Once the value is retrieved,\n the attribute's value is updated with the returned result.\n\n Parameters\n ----------\n player_data : string\n A ``string`` representation of all of the player's stats fields\n combined as a singular row.\n player_id : string\n A ``string`` of the player's unique 8-digit ID.\n \"\"\"\n for field in self.__dict__:\n # The short field truncates the leading '_' in the attribute name.\n short_field = str(field)[1:]\n if short_field == 'player_id':\n continue\n if short_field == 'nationality':\n value = self._parse_nationality(player_data)\n else:\n value = _parse_field(ROSTER_SCHEME, player_data, short_field)\n setattr(self, field, value)\n\n @property\n def dataframe(self):\n \"\"\"\n Returns a pandas ``DataFame`` containing all other class properties\n and values. The index for the DataFrame is the player ID.\n \"\"\"\n fields_to_include = {\n 'name': self.name,\n 'player_id': self.player_id,\n 'nationality': self.nationality,\n 'position': self.position,\n 'age': self.age,\n 'matches_played': self.matches_played,\n 'starts': self.starts,\n 'minutes': self.minutes,\n 'goals': self.goals,\n 'assists': self.assists,\n 'penalty_kicks': self.penalty_kicks,\n 'penalty_kick_attempts': self.penalty_kick_attempts,\n 'yellow_cards': self.yellow_cards,\n 'red_cards': self.red_cards,\n 'goals_per_90': self.goals_per_90,\n 'assists_per_90': self.assists_per_90,\n 'goals_and_assists_per_90': self.goals_and_assists_per_90,\n 'goals_non_penalty_per_90': self.goals_non_penalty_per_90,\n 'goals_and_assists_non_penalty_per_90':\n self.goals_and_assists_non_penalty_per_90,\n 'expected_goals': self.expected_goals,\n 'expected_goals_non_penalty': self.expected_goals_non_penalty,\n 'expected_assists': self.expected_assists,\n 'expected_goals_per_90': self.expected_goals_per_90,\n 'expected_assists_per_90': self.expected_assists_per_90,\n 'expected_goals_and_assists_per_90':\n self.expected_goals_and_assists_per_90,\n 'expected_goals_non_penalty_per_90':\n self.expected_goals_non_penalty_per_90,\n 'expected_goals_and_assists_non_penalty_per_90':\n self.expected_goals_and_assists_non_penalty_per_90,\n 'own_goals': self.own_goals,\n 'goals_against': self.goals_against,\n 'own_goals_against': self.own_goals_against,\n 'goals_against_per_90': self.goals_against_per_90,\n 'shots_on_target_against': self.shots_on_target_against,\n 'saves': self.saves,\n 'save_percentage': self.save_percentage,\n 'wins': self.wins,\n 'draws': self.draws,\n 'losses': self.losses,\n 'clean_sheets': self.clean_sheets,\n 'clean_sheet_percentage': self.clean_sheet_percentage,\n 'penalty_kicks_attempted': self.penalty_kicks_attempted,\n 'penalty_kicks_allowed': self.penalty_kicks_allowed,\n 'penalty_kicks_saved': self.penalty_kicks_saved,\n 'penalty_kicks_missed': self.penalty_kicks_missed,\n 'free_kick_goals_against': self.free_kick_goals_against,\n 'corner_kick_goals_against': self.corner_kick_goals_against,\n 'post_shot_expected_goals': self.post_shot_expected_goals,\n 'post_shot_expected_goals_per_shot':\n self.post_shot_expected_goals_per_shot,\n 'post_shot_expected_goals_minus_allowed':\n self.post_shot_expected_goals_minus_allowed,\n 'launches_completed': self.launches_completed,\n 'launches_attempted': self.launches_attempted,\n 'launch_completion_percentage': self.launch_completion_percentage,\n 'keeper_passes_attempted': self.keeper_passes_attempted,\n 'throws_attempted': self.throws_attempted,\n 'launch_percentage': self.launch_percentage,\n 'average_keeper_pass_length': self.average_keeper_pass_length,\n 'goal_kicks_attempted': self.goal_kicks_attempted,\n 'goal_kick_launch_percentage': self.goal_kick_launch_percentage,\n 'average_goal_kick_length': self.average_goal_kick_length,\n 'opponent_cross_attempts': self.opponent_cross_attempts,\n 'opponent_cross_stops': self.opponent_cross_stops,\n 'opponent_cross_stop_percentage':\n self.opponent_cross_stop_percentage,\n 'keeper_actions_outside_penalty_area':\n self.keeper_actions_outside_penalty_area,\n 'keeper_actions_outside_penalty_area_per_90':\n self.keeper_actions_outside_penalty_area_per_90,\n 'average_keeper_action_outside_penalty_distance':\n self.average_keeper_action_outside_penalty_distance,\n 'shots': self.shots,\n 'shots_on_target': self.shots_on_target,\n 'free_kick_shots': self.free_kick_shots,\n 'shots_on_target_percentage': self.shots_on_target_percentage,\n 'shots_per_90': self.shots_per_90,\n 'shots_on_target_per_90': self.shots_on_target_per_90,\n 'goals_per_shot': self.goals_per_shot,\n 'goals_per_shot_on_target': self.goals_per_shot_on_target,\n 'expected_goals_non_penalty_per_shot':\n self.expected_goals_non_penalty_per_shot,\n 'goals_minus_expected': self.goals_minus_expected,\n 'non_penalty_minus_expected_non_penalty':\n self.non_penalty_minus_expected_non_penalty,\n 'assists_minus_expected': self.assists_minus_expected,\n 'key_passes': self.key_passes,\n 'passes_completed': self.passes_completed,\n 'passes_attempted': self.passes_attempted,\n 'pass_completion': self.pass_completion,\n 'short_passes_completed': self.short_passes_completed,\n 'short_passes_attempted': self.short_passes_attempted,\n 'short_pass_completion': self.short_pass_completion,\n 'medium_passes_completed': self.medium_passes_completed,\n 'medium_passes_attempted': self.medium_passes_attempted,\n 'medium_pass_completion': self.medium_pass_completion,\n 'long_passes_completed': self.long_passes_completed,\n 'long_passes_attempted': self.long_passes_attempted,\n 'long_pass_completion': self.long_pass_completion,\n 'left_foot_passes': self.left_foot_passes,\n 'right_foot_passes': self.right_foot_passes,\n 'free_kick_passes': self.free_kick_passes,\n 'through_balls': self.through_balls,\n 'corner_kicks': self.corner_kicks,\n 'throw_ins': self.throw_ins,\n 'final_third_passes': self.final_third_passes,\n 'penalty_area_passes': self.penalty_area_passes,\n 'penalty_area_crosses': self.penalty_area_crosses,\n 'minutes_per_match': self.minutes_per_match,\n 'minutes_played_percentage': self.minutes_played_percentage,\n 'nineties_played': self.nineties_played,\n 'minutes_per_start': self.minutes_per_start,\n 'subs': self.subs,\n 'minutes_per_sub': self.minutes_per_sub,\n 'unused_sub': self.unused_sub,\n 'points_per_match': self.points_per_match,\n 'goals_scored_on_pitch': self.goals_scored_on_pitch,\n 'goals_against_on_pitch': self.goals_against_on_pitch,\n 'goal_difference_on_pitch': self.goal_difference_on_pitch,\n 'goal_difference_on_pitch_per_90':\n self.goal_difference_on_pitch_per_90,\n 'net_difference_on_pitch_per_90':\n self.net_difference_on_pitch_per_90,\n 'expected_goals_on_pitch': self.expected_goals_on_pitch,\n 'expected_goals_against_on_pitch':\n self.expected_goals_against_on_pitch,\n 'expected_goal_difference': self.expected_goal_difference,\n 'expected_goal_difference_per_90':\n self.expected_goal_difference_per_90,\n 'net_expected_goal_difference_per_90':\n self.net_expected_goal_difference_per_90,\n 'soft_reds': self.soft_reds,\n 'fouls_committed': self.fouls_committed,\n 'fouls_drawn': self.fouls_drawn,\n 'offsides': self.offsides,\n 'crosses': self.crosses,\n 'tackles_won': self.tackles_won,\n 'interceptions': self.interceptions,\n 'penalty_kicks_won': self.penalty_kicks_won,\n 'penalty_kicks_conceded': self.penalty_kicks_conceded,\n 'successful_dribbles': self.successful_dribbles,\n 'attempted_dribbles': self.attempted_dribbles,\n 'dribble_success_rate': self.dribble_success_rate,\n 'players_dribbled_past': self.players_dribbled_past,\n 'nutmegs': self.nutmegs,\n 'dribblers_tackled': self.dribblers_tackled,\n 'dribblers_contested': self.dribblers_contested,\n 'tackle_percentage': self.tackle_percentage,\n 'times_dribbled_past': self.times_dribbled_past\n }\n return pd.DataFrame([fields_to_include], index=[self.player_id])\n\n @property\n def name(self):\n \"\"\"\n Returns a ``string`` of the player's full name, such as 'Harry Kane'.\n \"\"\"\n return self._name\n\n @property\n def player_id(self):\n \"\"\"\n Returns a ``string`` of the player's 8-digit ID, such as '21a66f6a' for\n Harry Kane.\n \"\"\"\n return self._player_id\n\n @property\n def nationality(self):\n \"\"\"\n Returns a ``string`` of the player's home country, such as 'England'.\n \"\"\"\n return self._nationality\n\n @property\n def position(self):\n \"\"\"\n Returns a ``string`` of the player's primary position(s). Multiple\n positions are separated by commas.\n \"\"\"\n return self._position\n\n @int_property_decorator\n def age(self):\n \"\"\"\n Returns an ``int`` of the player's age as of August 1 for winter\n leagues and February 1 for summer leagues for the given season.\n \"\"\"\n return self._age\n\n @int_property_decorator\n def matches_played(self):\n \"\"\"\n Returns an ``int`` of the number of matches the player has participated\n in.\n \"\"\"\n return self._matches_played\n\n @int_property_decorator\n def starts(self):\n \"\"\"\n Returns an ``int`` of the number of games the player has started.\n \"\"\"\n return self._starts\n\n @int_property_decorator\n def minutes(self):\n \"\"\"\n Returns an ``int`` of the number of minutes the player has spent on the\n field in all competitions.\n \"\"\"\n return self._minutes.replace(',', '')\n\n @int_property_decorator\n def goals(self):\n \"\"\"\n Returns an ``int`` of the number of goals the player has scored.\n \"\"\"\n return self._goals\n\n @int_property_decorator\n def assists(self):\n \"\"\"\n Returns an ``int`` of the number of goals the player has assisted.\n \"\"\"\n return self._assists\n\n @int_property_decorator\n def penalty_kicks(self):\n \"\"\"\n Returns an ``int`` of the number of penalty kicks the player has scored\n during regular play.\n \"\"\"\n return self._penalty_kicks\n\n @int_property_decorator\n def penalty_kick_attempts(self):\n \"\"\"\n Returns an ``int`` of the number of penalty kicks the player has\n attempted.\n \"\"\"\n return self._penalty_kick_attempts\n\n @int_property_decorator\n def yellow_cards(self):\n \"\"\"\n Returns an ``int`` of the number of yellow cards the player has\n accumulated during the season.\n \"\"\"\n return self._yellow_cards\n\n @int_property_decorator\n def red_cards(self):\n \"\"\"\n Returns an ``int`` of the number of red cards the player has\n accumulated during the season.\n \"\"\"\n return self._red_cards\n\n @float_property_decorator\n def goals_per_90(self):\n \"\"\"\n Returns a ``float`` of the number of goals the player has scored per\n 90 minutes on the field.\n \"\"\"\n return self._goals_per_90\n\n @float_property_decorator\n def assists_per_90(self):\n \"\"\"\n Returns a ``float`` of the number of goals the player has assisted per\n 90 minutes on the field.\n \"\"\"\n return self._assists_per_90\n\n @float_property_decorator\n def goals_and_assists_per_90(self):\n \"\"\"\n Returns a ``float`` of the number of goals the player has either scored\n or assisted per 90 minutes on the field.\n \"\"\"\n return self._goals_and_assists_per_90\n\n @float_property_decorator\n def goals_non_penalty_per_90(self):\n \"\"\"\n Returns a ``float`` of the number of non-penalty goals the player has\n scored per 90 minutes on the field.\n \"\"\"\n return self._goals_non_penalty_per_90\n\n @float_property_decorator\n def goals_and_assists_non_penalty_per_90(self):\n \"\"\"\n Returns a ``float`` of the number of non-penalty goals the player has\n either scored or assisted per 90 minutes on the field.\n \"\"\"\n return self._goals_and_assists_non_penalty_per_90\n\n @float_property_decorator\n def expected_goals(self):\n \"\"\"\n Returns a ``float`` of the number of goals the player was expected to\n score based on the quality and quantity of shots taken.\n \"\"\"\n return self._expected_goals\n\n @float_property_decorator\n def expected_goals_non_penalty(self):\n \"\"\"\n Returns a ``float`` of the number of non-penalty goals the player was\n expected to score based on the quality and quantity of shots taken.\n \"\"\"\n return self._expected_goals_non_penalty\n\n @float_property_decorator\n def expected_assists(self):\n \"\"\"\n Returns a ``float`` of the number of goals the player was expected go\n assist based on the quality and quantity of teammate shots taken.\n \"\"\"\n return self._expected_assists\n\n @float_property_decorator\n def expected_goals_per_90(self):\n \"\"\"\n Returns a ``float`` of the player's expected goals per 90 minutes\n played.\n \"\"\"\n return self._expected_goals_per_90\n\n @float_property_decorator\n def expected_assists_per_90(self):\n \"\"\"\n Returns a ``float`` of the player's expected assists per 90 minutes\n played.\n \"\"\"\n return self._expected_assists_per_90\n\n @float_property_decorator\n def expected_goals_and_assists_per_90(self):\n \"\"\"\n Returns a ``float`` of the player's expected goals and assists per 90\n minutes played.\n \"\"\"\n return self._expected_goals_and_assists_per_90\n\n @float_property_decorator\n def expected_goals_non_penalty_per_90(self):\n \"\"\"\n Returns a ``float`` of the player's expected non-penalty goals per 90\n minutes played.\n \"\"\"\n return self._expected_goals_non_penalty_per_90\n\n @float_property_decorator\n def expected_goals_and_assists_non_penalty_per_90(self):\n \"\"\"\n Returns a ``float`` of the player's expected non-penalty goals and\n assists per 90 minutes played.\n \"\"\"\n return self._expected_goals_and_assists_non_penalty_per_90\n\n @int_property_decorator\n def own_goals(self):\n \"\"\"\n Returns an ``int`` of the number of own goals the player has conceded.\n \"\"\"\n return self._own_goals\n\n @int_property_decorator\n def goals_against(self):\n \"\"\"\n Returns an ``int`` of the number of goals a keeper has conceded.\n \"\"\"\n return self._goals_against\n\n @int_property_decorator\n def own_goals_against(self):\n \"\"\"\n Returns an ``int`` of the number of own goals the team scored on a\n keeper.\n \"\"\"\n return self._own_goals_against\n\n @float_property_decorator\n def goals_against_per_90(self):\n \"\"\"\n Returns a ``float`` of the number of goals a keeper has coneceded per\n 90 minutes played.\n \"\"\"\n return self._goals_against_per_90\n\n @int_property_decorator\n def shots_on_target_against(self):\n \"\"\"\n Returns an ``int`` of the number of shots on target a keeper has faced.\n \"\"\"\n return self._shots_on_target_against\n\n @int_property_decorator\n def saves(self):\n \"\"\"\n Returns an ``int`` of the number of shots a keeper has saved.\n \"\"\"\n return self._saves\n\n @float_property_decorator\n def save_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of shots the keeper saved.\n Percentage ranges from 0-1.\n \"\"\"\n return self._save_percentage\n\n @int_property_decorator\n def wins(self):\n \"\"\"\n Returns an ``int`` of the number of games a keeper has won.\n \"\"\"\n return self._wins\n\n @int_property_decorator\n def draws(self):\n \"\"\"\n Returns an ``int`` of the number of games a keeper has drawn.\n \"\"\"\n return self._draws\n\n @int_property_decorator\n def losses(self):\n \"\"\"\n Returns an ``int`` of the number of games a keeper has lost.\n \"\"\"\n return self._losses\n\n @int_property_decorator\n def clean_sheets(self):\n \"\"\"\n Returns an ``int`` of the number of clean sheets a keeper has\n registered.\n \"\"\"\n return self._clean_sheets\n\n @float_property_decorator\n def clean_sheet_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of games a keeper has\n participated in that resulted in a clean sheet.\n \"\"\"\n return self._clean_sheet_percentage\n\n @int_property_decorator\n def penalty_kicks_attempted(self):\n \"\"\"\n Returns an ``int`` of the number of penalty kicks a keeper has faced\n during regular play.\n \"\"\"\n return self._penalty_kicks_attempted\n\n @int_property_decorator\n def penalty_kicks_allowed(self):\n \"\"\"\n Returns an ``int`` of the number of penalty kicks a keeper has conceded\n during regular play.\n \"\"\"\n return self._penalty_kicks_allowed\n\n @int_property_decorator\n def penalty_kicks_saved(self):\n \"\"\"\n Returns an ``int`` of the number of penalty kicks a keeper has saved\n during regular play.\n \"\"\"\n return self._penalty_kicks_saved\n\n @int_property_decorator\n def penalty_kicks_missed(self):\n \"\"\"\n Returns an ``int`` of the number of penalty kicks a keeper has faced\n where the opponent missed the goal.\n \"\"\"\n return self._penalty_kicks_missed\n\n @int_property_decorator\n def free_kick_goals_against(self):\n \"\"\"\n Returns an ``int`` of the number of goals a keeper conceded as a result\n of an opponent's free kick.\n \"\"\"\n return self._free_kick_goals_against\n\n @int_property_decorator\n def corner_kick_goals_against(self):\n \"\"\"\n Returns an ``int`` of the number of goals a keeper conceded as a result\n of an opponent's corner kick.\n \"\"\"\n return self._corner_kick_goals_against\n\n @float_property_decorator\n def post_shot_expected_goals(self):\n \"\"\"\n Returns a ``float`` of the number of goals a keeper was expected to\n concede.\n \"\"\"\n return self._post_shot_expected_goals\n\n @float_property_decorator\n def post_shot_expected_goals_per_shot(self):\n \"\"\"\n Returns a ``float`` of the number of goals a keeper was expected to\n concede per shot faced.\n \"\"\"\n return self._post_shot_expected_goals_per_shot\n\n @float_property_decorator\n def post_shot_expected_goals_minus_allowed(self):\n \"\"\"\n Returns a ``float`` of the number of goals a keeper was expected to\n concede minus the number of goals they actually conceded.\n \"\"\"\n return self._post_shot_expected_goals_minus_allowed\n\n @float_property_decorator\n def post_shot_expected_goals_minus_allowed_per_90(self):\n \"\"\"\n Returns a ``float`` of the number of goals a keeper was expected to\n concede minus the number of goals they actually conceded, per 90\n minutes played.\n \"\"\"\n return self._post_shot_expected_goals_minus_allowed_per_90\n\n @int_property_decorator\n def launches_completed(self):\n \"\"\"\n Returns an ``int`` of the number of passes longer than 40 yards a\n keeper completed.\n \"\"\"\n return self._launches_completed\n\n @int_property_decorator\n def launches_attempted(self):\n \"\"\"\n Returns an ``int`` of the number of passes longer than 40 yards a\n keeper attempted.\n \"\"\"\n return self._launches_attempted\n\n @float_property_decorator\n def launch_completion_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of passes longer than 40 yards a\n keeper completed. Percentage ranges from 0-100.\n \"\"\"\n return self._launch_completion_percentage\n\n @int_property_decorator\n def keeper_passes_attempted(self):\n \"\"\"\n Returns an ``int`` of the number of non-goal kick passes a keeper\n attempted.\n \"\"\"\n return self._keeper_passes_attempted\n\n @int_property_decorator\n def throws_attempted(self):\n \"\"\"\n Returns an ``int`` of the number of throws a keeper attempted.\n \"\"\"\n return self._throws_attempted\n\n @float_property_decorator\n def launch_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of passes a keeper makes longer\n than 40 yards excluding goal kicks. Percentage ranges from 0-100.\n \"\"\"\n return self._launch_percentage\n\n @float_property_decorator\n def average_keeper_pass_length(self):\n \"\"\"\n Returns a ``float`` of the average pass length for a keeper in yards\n excluding goal kicks.\n \"\"\"\n return self._average_keeper_pass_length\n\n @int_property_decorator\n def goal_kicks_attempted(self):\n \"\"\"\n Returns an ``int`` of the number of goal kicks a keeper attempted.\n \"\"\"\n return self._goal_kicks_attempted\n\n @float_property_decorator\n def goal_kick_launch_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of goal kicks a keeper has\n launched further than 40 yards. Percentage ranges from 0-100.\n \"\"\"\n return self._goal_kick_launch_percentage\n\n @float_property_decorator\n def average_goal_kick_length(self):\n \"\"\"\n Returns a ``float`` of the average pass length for goal kicks in yards\n for a keeper.\n \"\"\"\n return self._average_goal_kick_length\n\n @int_property_decorator\n def opponent_cross_attempts(self):\n \"\"\"\n Returns an ``int`` of the number of crosses a keeper has faced.\n \"\"\"\n return self._opponent_cross_attempts\n\n @int_property_decorator\n def opponent_cross_stops(self):\n \"\"\"\n Returns an ``int`` of the number of crosses a keeper has successfully\n stopped.\n \"\"\"\n return self._opponent_cross_stops\n\n @float_property_decorator\n def opponent_cross_stop_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of crosses the keeper has\n successfully stopped. Percentage ranges from 0-100.\n \"\"\"\n return self._opponent_cross_stop_percentage\n\n @int_property_decorator\n def keeper_actions_outside_penalty_area(self):\n \"\"\"\n Returns an ``int`` of the number of defensive actions a keeper made\n outside the penalty area.\n \"\"\"\n return self._keeper_actions_outside_penalty_area\n\n @float_property_decorator\n def keeper_actions_outside_penalty_area_per_90(self):\n \"\"\"\n Returns a ``float`` of the number of defensive actions a keeper made\n outside the penalty area per 90 minutes played.\n \"\"\"\n return self._keeper_actions_outside_penalty_area_per_90\n\n @float_property_decorator\n def average_keeper_action_outside_penalty_distance(self):\n \"\"\"\n Returns a ``float`` of the average distance from goal in yards a keeper\n performed a defensive action outside the penalty area.\n \"\"\"\n return self._average_keeper_action_outside_penalty_distance\n\n @int_property_decorator\n def shots(self):\n \"\"\"\n Returns an ``int`` of the number of shots the player has taken.\n \"\"\"\n return self._shots\n\n @int_property_decorator\n def shots_on_target(self):\n \"\"\"\n Returns an ``int`` of the number of shots on target the player has\n taken.\n \"\"\"\n return self._shots_on_target\n\n @int_property_decorator\n def free_kick_shots(self):\n \"\"\"\n Returns an ``int`` of the number of shots the player has taken from\n free kicks.\n \"\"\"\n return self._free_kick_shots\n\n @float_property_decorator\n def shots_on_target_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of shots taken by the player that\n were on target. Percentage ranges from 0-100.\n \"\"\"\n return self._shots_on_target_percentage\n\n @float_property_decorator\n def shots_per_90(self):\n \"\"\"\n Returns a ``float`` of the number of shots the player has taken per 90\n minutes played.\n \"\"\"\n return self._shots_per_90\n\n @float_property_decorator\n def shots_on_target_per_90(self):\n \"\"\"\n Returns a ``float`` of the number of shots on target the player has\n taken per 90 minutes played.\n \"\"\"\n return self._shots_on_target_per_90\n\n @float_property_decorator\n def goals_per_shot(self):\n \"\"\"\n Returns a ``float`` of the average number of goals scored per shot\n taken by the player.\n \"\"\"\n return self._goals_per_shot\n\n @float_property_decorator\n def goals_per_shot_on_target(self):\n \"\"\"\n Returns a ``float`` of the average number of goals scored per shot on\n target by the player.\n \"\"\"\n return self._goals_per_shot_on_target\n\n @float_property_decorator\n def expected_goals_non_penalty_per_shot(self):\n \"\"\"\n Returns a ``float`` of the nuber of non-penalty goals the player was\n expected to score per shot.\n \"\"\"\n return self._expected_goals_non_penalty_per_shot\n\n @float_property_decorator\n def goals_minus_expected(self):\n \"\"\"\n Returns a ``float`` of the number of goals scored minus the number of\n goals the player was expected to score.\n \"\"\"\n return self._goals_minus_expected\n\n @float_property_decorator\n def non_penalty_minus_expected_non_penalty(self):\n \"\"\"\n Returns a ``float`` of the number of non-penalty goals scored minus the\n number of non-penalty goals the player was expected to score.\n \"\"\"\n return self._non_penalty_minus_expected_non_penalty\n\n @float_property_decorator\n def assists_minus_expected(self):\n \"\"\"\n Returns a ``float`` of the number of assists the player registered\n minus the actual number of assists the player tallied.\n \"\"\"\n return self._assists_minus_expected\n\n @int_property_decorator\n def key_passes(self):\n \"\"\"\n Returns an ``int`` of the number of passes the player made that\n directly lead to a shot.\n \"\"\"\n return self._key_passes\n\n @int_property_decorator\n def passes_completed(self):\n \"\"\"\n Returns an ``int`` of the total number of passes the player has\n completed.\n \"\"\"\n return self._passes_completed\n\n @int_property_decorator\n def passes_attempted(self):\n \"\"\"\n Returns an ``int`` of the total number of passes the player has\n attempted.\n \"\"\"\n return self._passes_attempted\n\n @float_property_decorator\n def pass_completion(self):\n \"\"\"\n Returns a ``float`` of the player's overall pass completion rating.\n Percentage ranges from 0-100.\n \"\"\"\n return self._pass_completion\n\n @int_property_decorator\n def short_passes_completed(self):\n \"\"\"\n Returns an ``int`` of the total number of passes under 5 yards the\n player has completed.\n \"\"\"\n return self._short_passes_completed\n\n @int_property_decorator\n def short_passes_attempted(self):\n \"\"\"\n Returns an ``int`` of the total number of passes under 5 yards the\n player has attempted.\n \"\"\"\n return self._short_passes_attempted\n\n @float_property_decorator\n def short_pass_completion(self):\n \"\"\"\n Returns a ``float`` of the player's overall pass completion rating for\n passes under 5 yards. Percentage ranges from 0-100.\n \"\"\"\n return self._short_pass_completion\n\n @int_property_decorator\n def medium_passes_completed(self):\n \"\"\"\n Returns an ``int`` of the total number of passes between 5 and 25 yards\n the player has completed.\n \"\"\"\n return self._medium_passes_completed\n\n @int_property_decorator\n def medium_passes_attempted(self):\n \"\"\"\n Returns an ``int`` of the total number of passes between 5 and 25 yards\n the player has attempted.\n \"\"\"\n return self._medium_passes_attempted\n\n @float_property_decorator\n def medium_pass_completion(self):\n \"\"\"\n Returns a ``float`` of the player's overall pass completion rating for\n passes between 5 and 25 yards. Percentage ranges from 0-100.\n \"\"\"\n return self._medium_pass_completion\n\n @int_property_decorator\n def long_passes_completed(self):\n \"\"\"\n Returns an ``int`` of the total number of passes greater than 25 yards\n the player has completed.\n \"\"\"\n return self._long_passes_completed\n\n @int_property_decorator\n def long_passes_attempted(self):\n \"\"\"\n Returns an ``int`` of the total number of passes greater than 25 yards\n the player has attempted.\n \"\"\"\n return self._long_passes_attempted\n\n @float_property_decorator\n def long_pass_completion(self):\n \"\"\"\n Returns a ``float`` of the player's overall pass completion rating for\n passes greater than 25 yards. Percentage ranges from 0-100.\n \"\"\"\n return self._long_pass_completion\n\n @int_property_decorator\n def left_foot_passes(self):\n \"\"\"\n Returns an ``int`` of the number of passes the player made with their\n left foot.\n \"\"\"\n return self._left_foot_passes\n\n @int_property_decorator\n def right_foot_passes(self):\n \"\"\"\n Returns an ``int`` of the number of passes the player made with their\n right foot.\n \"\"\"\n return self._right_foot_passes\n\n @int_property_decorator\n def free_kick_passes(self):\n \"\"\"\n Returns an ``int`` of the number of passes the player made from a free\n kick.\n \"\"\"\n return self._free_kick_passes\n\n @int_property_decorator\n def through_balls(self):\n \"\"\"\n Returns an ``int`` of the number of passes the player made between the\n last line of defenders into open space.\n \"\"\"\n return self._through_balls\n\n @int_property_decorator\n def corner_kicks(self):\n \"\"\"\n Returns an ``int`` of the number of corner kicks the player has taken.\n \"\"\"\n return self._corner_kicks\n\n @int_property_decorator\n def throw_ins(self):\n \"\"\"\n Returns an ``int`` of the number of throw-ins the player took.\n \"\"\"\n return self._throw_ins\n\n @int_property_decorator\n def final_third_passes(self):\n \"\"\"\n Returns an ``int`` of the number of passes the player made into the\n final third.\n \"\"\"\n return self._final_third_passes\n\n @int_property_decorator\n def penalty_area_passes(self):\n \"\"\"\n Returns an ``int`` of the number of passes the player made into the\n opposing penalty area.\n \"\"\"\n return self._penalty_area_passes\n\n @int_property_decorator\n def penalty_area_crosses(self):\n \"\"\"\n Returns an ``int`` of the number of non-set piece crosses the player\n made into the penalty area.\n \"\"\"\n return self._penalty_area_crosses\n\n @int_property_decorator\n def minutes_per_match(self):\n \"\"\"\n Returns an ``int`` of the average number of minutes the player played\n per match.\n \"\"\"\n return self._minutes_per_match\n\n @float_property_decorator\n def minutes_played_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of time the player has been on\n the field for all games the team participated in. Percentage ranges\n from 0-100.\n \"\"\"\n return self._minutes_played_percentage\n\n @float_property_decorator\n def nineties_played(self):\n \"\"\"\n Returns a ``float`` of number of the number of minutes the player has\n played divided by 90.\n \"\"\"\n return self._nineties_played\n\n @int_property_decorator\n def minutes_per_start(self):\n \"\"\"\n Returns an ``int`` of the number of minutes the player plays on average\n per game started.\n \"\"\"\n return self._minutes_per_start\n\n @int_property_decorator\n def subs(self):\n \"\"\"\n Returns an ``int`` of the number of times the player has come on as a\n sub.\n \"\"\"\n return self._subs\n\n @int_property_decorator\n def minutes_per_sub(self):\n \"\"\"\n Returns an ``int`` of the average number of minutes the player has\n played per game after coming in as a sub.\n \"\"\"\n return self._minutes_per_sub\n\n @int_property_decorator\n def unused_sub(self):\n \"\"\"\n Returns an ``int`` of the number of times the player was an unused sub\n and spent the entirety of the game on the bench.\n \"\"\"\n return self._unused_sub\n\n @float_property_decorator\n def points_per_match(self):\n \"\"\"\n Returns a ``float`` of the average number of points the team has gained\n per game in which the player participated.\n \"\"\"\n return self._points_per_match\n\n @int_property_decorator\n def goals_scored_on_pitch(self):\n \"\"\"\n Returns an ``int`` of the number of goals the team has scored while the\n player was on the field.\n \"\"\"\n return self._goals_scored_on_pitch\n\n @int_property_decorator\n def goals_against_on_pitch(self):\n \"\"\"\n Returns an ``int`` of the number of goals the team has conceded while\n the player was on the field.\n \"\"\"\n return self._goals_against_on_pitch\n\n @int_property_decorator\n def goal_difference_on_pitch(self):\n \"\"\"\n Returns an ``int`` of the team's goal difference while the player is on\n the field.\n \"\"\"\n return self._goal_difference_on_pitch\n\n @float_property_decorator\n def goal_difference_on_pitch_per_90(self):\n \"\"\"\n Returns a ``float`` of the team's average goal difference while the\n player is on the field, per 90 minutes played.\n \"\"\"\n return self._goal_difference_on_pitch_per_90\n\n @float_property_decorator\n def net_difference_on_pitch_per_90(self):\n \"\"\"\n Returns a ``float`` of the team's goal difference while the player is\n on the pitch minus the team's goal difference while the player is off\n the pitch, per 90 minutes.\n \"\"\"\n return self._net_difference_on_pitch_per_90\n\n @float_property_decorator\n def expected_goals_on_pitch(self):\n \"\"\"\n Returns a ``float`` of the number of goals the team was expected to\n score while the player was on the pitch.\n \"\"\"\n return self._expected_goals_on_pitch\n\n @float_property_decorator\n def expected_goals_against_on_pitch(self):\n \"\"\"\n Returns a ``float`` of the number of goals the team was expected to\n concede while the player was on the pitch.\n \"\"\"\n return self._expected_goals_against_on_pitch\n\n @float_property_decorator\n def expected_goal_difference(self):\n \"\"\"\n Returns a ``float`` of the difference between expected team goals\n scored and conceded while the player was on the pitch.\n \"\"\"\n return self._expected_goal_difference\n\n @float_property_decorator\n def expected_goal_difference_per_90(self):\n \"\"\"\n Returns a ``float`` of the difference between expected team goals\n scored and conceded while the player was on the pitch, per 90 minutes.\n \"\"\"\n return self._expected_goal_difference_per_90\n\n @float_property_decorator\n def net_expected_goal_difference_per_90(self):\n \"\"\"\n Returns a ``float`` of the team's expected goal difference while the\n player is on the pitch minus the team's exepcted goal difference while\n the player is off the pitch, per 90 minutes.\n \"\"\"\n return self._net_expected_goal_difference_per_90\n\n @int_property_decorator\n def soft_reds(self):\n \"\"\"\n Returns an ``int`` of the number of games where the player received two\n yellow cards, resulting in a red, or a \"soft red\".\n \"\"\"\n return self._soft_reds\n\n @int_property_decorator\n def fouls_committed(self):\n \"\"\"\n Returns an ``int`` of the number of fouls the player has committed.\n \"\"\"\n return self._fouls_committed\n\n @int_property_decorator\n def fouls_drawn(self):\n \"\"\"\n Returns an ``int`` of the number of fouls the player has been the\n victim of.\n \"\"\"\n return self._fouls_drawn\n\n @int_property_decorator\n def offsides(self):\n \"\"\"\n Returns an ``int`` of the number of times the player has been called\n offside.\n \"\"\"\n return self._offsides\n\n @int_property_decorator\n def crosses(self):\n \"\"\"\n Returns an ``int`` of the number of times the player has crossed the\n ball.\n \"\"\"\n return self._crosses\n\n @int_property_decorator\n def tackles_won(self):\n \"\"\"\n Returns an ``int`` of the number of tackles the player has won.\n \"\"\"\n return self._tackles_won\n\n @int_property_decorator\n def interceptions(self):\n \"\"\"\n Returns an ``int`` of the number of times the player has intercepted\n the ball.\n \"\"\"\n return self._interceptions\n\n @int_property_decorator\n def penalty_kicks_won(self):\n \"\"\"\n Returns an ``int`` of the number of times the player has won a penalty\n kick for the team.\n \"\"\"\n return self._penalty_kicks_won\n\n @int_property_decorator\n def penalty_kicks_conceded(self):\n \"\"\"\n Returns an ``int`` of the number of times the player has conceded a\n penalty kick to the opposition.\n \"\"\"\n return self._penalty_kicks_conceded\n\n @int_property_decorator\n def successful_dribbles(self):\n \"\"\"\n Returns an ``int`` of the number of dribbles the player has completed\n successfully.\n \"\"\"\n return self._successful_dribbles\n\n @int_property_decorator\n def attempted_dribbles(self):\n \"\"\"\n Returns an ``int`` of the number of times the player has attempted a\n dribble.\n \"\"\"\n return self._attempted_dribbles\n\n @float_property_decorator\n def dribble_success_rate(self):\n \"\"\"\n Returns a ``float`` of the percentage of attempted dribbles the player\n has successfully completed. Percentage ranges from 0-100.\n \"\"\"\n return self._dribble_success_rate\n\n @int_property_decorator\n def players_dribbled_past(self):\n \"\"\"\n Returns an ``int`` of the number of opponents the player dribbled past.\n \"\"\"\n return self._players_dribbled_past\n\n @int_property_decorator\n def nutmegs(self):\n \"\"\"\n Returns an ``int`` of the number of opponents the player has nutmegged.\n \"\"\"\n return self._nutmegs\n\n @int_property_decorator\n def dribblers_tackled(self):\n \"\"\"\n Returns an ``int`` of the number of opponents who were attempting a\n dribble that the player tackled.\n \"\"\"\n return self._dribblers_tackled\n\n @int_property_decorator\n def dribblers_contested(self):\n \"\"\"\n Returns an ``int`` of the number of opponents who were attempting a\n dribble that the player contested.\n \"\"\"\n return self._dribblers_contested\n\n @float_property_decorator\n def tackle_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of opposing dribblers the player\n has successfully tackled. Percentage ranges from 0-100.\n \"\"\"\n return self._tackle_percentage\n\n @int_property_decorator\n def times_dribbled_past(self):\n \"\"\"\n Returns an ``int`` of the number of times the player has been dribbled\n past.\n \"\"\"\n return self._times_dribbled_past\n\n\nclass Roster:\n \"\"\"\n Get stats for all players on a roster.\n\n Request a team's roster for a given season and create instances of the\n Player class for each player, containing a detailed list of the player's\n statistics and information for the season.\n\n Parameters\n ----------\n squad_id : string\n The team's 8-digit squad ID or the team's name, such as '361ca564' or\n 'Tottenham Hotspur'.\n doc : PyQuery object (optional)\n If passed to the class instantiation, this will be used to pull all\n information instead of making another request to the website. If the\n document is not provided, it will be pulled during a later step.\n \"\"\"\n def __init__(self, squad_id, doc=None):\n self._players = []\n\n self._squad_id = _lookup_team(squad_id)\n player_data_dict = self._pull_stats(doc)\n if not player_data_dict:\n return None\n self._instantiate_players(player_data_dict)\n\n def __call__(self, player):\n \"\"\"\n Return a specified player on the roster.\n\n Returns a specific player as requested by the passed name or player ID.\n The input string must either match a player's 8-digit unique ID or the\n named listed on fbref.com for the player.\n\n Parameters\n ----------\n player : string\n A ``string`` of either the player's 8-digit unique ID or the name\n listed on fbref.com for the player.\n\n Returns\n -------\n Player instance\n If the requested player can be found, their Player instance is\n returned.\n\n Raises\n ------\n ValueError\n If the requested player cannot be matched with a player in the\n squad.\n \"\"\"\n for player_instance in self._players:\n if not player_instance.name or not player_instance.player_id:\n continue # pragma: no cover\n if player.lower() == player_instance.player_id.lower():\n return player_instance\n if player.lower().strip() == player_instance.name.lower().strip():\n return player_instance\n raise ValueError('No player found with the requested name or ID')\n\n def __repr__(self):\n \"\"\"\n Returns a ``list`` of all players for the given team.\n \"\"\"\n return self._players\n\n def __iter__(self):\n \"\"\"\n Returns an iterator of all of the players on the given team's roster.\n \"\"\"\n return iter(self.__repr__())\n\n def __len__(self):\n \"\"\"\n Returns the number of player on the given team's roster.\n \"\"\"\n return len(self.__repr__())\n\n def _player_id(self, player_data):\n \"\"\"\n Parse the player's ID from a row.\n\n The player ID is embedded within the header column of each individual\n player's row within a stats table. The specific ID is in a URL and can\n be easily parsed and returned.\n\n Parameters\n ----------\n player_data : PyQuery object\n A PyQuery object representing a single row in a stats table for a\n player.\n\n Returns\n -------\n string\n Returns a ``string`` of the player's unique 8-digit player ID.\n \"\"\"\n player = player_data('th[data-stat=\"player\"]')\n player_id = player('a').attr('href')\n try:\n player_id = re.sub(r'.*\\/players\\/', '', player_id)\n player_id = re.sub(r'\\/.*', '', player_id)\n except TypeError:\n player_id = None\n return player_id\n\n def _add_stats_data(self, stats_table, player_data_dict):\n \"\"\"\n Add each player's stats rows to a dictionary.\n\n Given the player stats are spread throughout many tables, they should\n be combined by player for a single reference for each player for easier\n lookups.\n\n Parameters\n ----------\n stats_table : generator\n A generator of all row items in a given table.\n player_data_dict : {str: {'data': str}} dictionary\n A dictionary where every key is the player's ID and every value is\n another dictionary with a 'data' key which contains the string\n version of the row data for the matched player.\n\n Returns\n -------\n dictionary\n An updated version of the player_data_dict with the passed table\n row information included.\n \"\"\"\n for player_data in stats_table:\n if 'class=\"thead\"' in str(player_data):\n continue # pragma: no cover\n player_id = self._player_id(player_data)\n if not player_id:\n continue\n try:\n player_data_dict[player_id]['data'] += player_data\n except KeyError:\n player_data_dict[player_id] = {'data': player_data}\n return player_data_dict\n\n def _pull_stats(self, doc):\n \"\"\"\n Download the team page and pull all stats.\n\n Download the requested team's season page and pull all of the relevant\n stats tables for later parsing.\n\n Parameters\n ----------\n doc : PyQuery object\n If passed to the class instantiation, this will be used to pull all\n information instead of making another request to the website. If\n the document is not provided, this value will be None.\n\n Returns\n -------\n dictionary\n Returns a ``dictionary`` where every key is the player's ID and\n every value is another dictionary with a 'data' key which contains\n the string version of the row data for the matched player.\n \"\"\"\n if not doc:\n doc = pq(SQUAD_URL % self._squad_id)\n doc = pq(_remove_html_comment_tags(doc))\n stats_table = []\n player_data_dict = {}\n\n # Most leagues use the 'stats_*_ks_combined' tag for competitions, but\n # some, like the MLS in North America, use a different table ID.\n for table_id in ['table#stats_standard_ks_combined',\n 'table#stats_keeper_ks_combined',\n 'table#stats_keeper_adv_ks_combined',\n 'table#stats_shooting_ks_combined',\n 'table#stats_passing_ks_combined',\n 'table#stats_playing_time_ks_combined',\n 'table#stats_misc_ks_combined',\n 'table#stats_standard_10090',\n 'table#stats_keeper_10090',\n 'table#stats_keeper_adv_10090',\n 'table#stats_shooting_10090',\n 'table#stats_passing_10090',\n 'table#stats_playing_time_10090',\n 'table#stats_misc_10090']:\n table = _get_stats_table(doc, table_id)\n if not table:\n continue\n player_data_dict = self._add_stats_data(table, player_data_dict)\n return player_data_dict\n\n def _instantiate_players(self, player_data_dict):\n \"\"\"\n Create Player instances for each squad member.\n\n Given the stats information for all players, an instance of the Player\n class should be created and appended to the overall list of players for\n easy future reference.\n\n Parameters\n ----------\n player_data_dict : {str: {'data': str}} dictionary\n A dictionary where every key is the player's ID and every value is\n another dictionary with a 'data' key which contains the string\n version of the row data for the matched player.\n \"\"\"\n for player_id, player_data in player_data_dict.items():\n player = SquadPlayer(player_data['data'], player_id)\n self._players.append(player)\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
bwalker1/spatial-constrained-clustering-and-pseudotime | [
"1b5fda9c589d29b24537110dd63f106437de1eec"
] | [
"python_codes/sedr/utils_func.py"
] | [
"import os\nimport torch\nimport scanpy as sc\nimport pandas as pd\nfrom pathlib import Path\nfrom scanpy.readwrite import read_visium\nfrom scanpy._utils import check_presence_download\nfrom sklearn.neighbors import kneighbors_graph\n\ndef mk_dir(input_path):\n if not os.path.exists(input_path):\n os.makedirs(input_path)\n return input_path\n\ndef estimate_cutoff_knn(pts, k=10):\n A_knn = kneighbors_graph(pts, n_neighbors=k, mode='distance')\n est_cut = A_knn.sum() / float(A_knn.count_nonzero())\n return est_cut\n\ndef corruption(x, edge_index):\n return x[torch.randperm(x.size(0))], edge_index\n\ndef adata_preprocess(i_adata, min_cells=3, pca_n_comps=300):\n print('===== Preprocessing Data ')\n sc.pp.filter_genes(i_adata, min_cells=min_cells)\n adata_X = sc.pp.normalize_total(i_adata, target_sum=1, exclude_highly_expressed=True, inplace=False)['X']\n adata_X = sc.pp.scale(adata_X)\n adata_X = sc.pp.pca(adata_X, n_comps=pca_n_comps)\n return adata_X\n\n\ndef load_ST_file(file_fold, count_file='filtered_feature_bc_matrix.h5', load_images=True, file_Adj=None):\n adata_h5 = sc.read_visium(file_fold, load_images=load_images, count_file=count_file)\n adata_h5.var_names_make_unique()\n\n if load_images is False:\n if file_Adj is None:\n file_Adj = os.path.join(file_fold, \"spatial/tissue_positions_list.csv\")\n\n positions = pd.read_csv(file_Adj, header=None)\n positions.columns = [\n 'barcode',\n 'in_tissue',\n 'array_row',\n 'array_col',\n 'pxl_col_in_fullres',\n 'pxl_row_in_fullres',\n ]\n positions.index = positions['barcode']\n adata_h5.obs = adata_h5.obs.join(positions, how=\"left\")\n adata_h5.obsm['spatial'] = adata_h5.obs[['pxl_row_in_fullres', 'pxl_col_in_fullres']].to_numpy()\n adata_h5.obs.drop(columns=['barcode', 'pxl_row_in_fullres', 'pxl_col_in_fullres'], inplace=True)\n\n print('adata: (' + str(adata_h5.shape[0]) + ', ' + str(adata_h5.shape[1]) + ')')\n return adata_h5\n\n\n# from scanpy\ndef _download_visium_dataset(\n sample_id: str,\n spaceranger_version: str,\n base_dir='./data/',\n):\n import tarfile\n\n url_prefix = f'https://cf.10xgenomics.com/samples/spatial-exp/{spaceranger_version}/{sample_id}/'\n\n sample_dir = Path(mk_dir(os.path.join(base_dir, sample_id)))\n\n # Download spatial data\n tar_filename = f\"{sample_id}_spatial.tar.gz\"\n tar_pth = Path(os.path.join(sample_dir, tar_filename))\n check_presence_download(filename=tar_pth, backup_url=url_prefix + tar_filename)\n with tarfile.open(tar_pth) as f:\n for el in f:\n if not (sample_dir / el.name).exists():\n f.extract(el, sample_dir)\n\n # Download counts\n check_presence_download(\n filename=sample_dir / \"filtered_feature_bc_matrix.h5\",\n backup_url=url_prefix + f\"{sample_id}_filtered_feature_bc_matrix.h5\",\n )\n\n\ndef load_visium_sge(sample_id='V1_Breast_Cancer_Block_A_Section_1', save_path='./data/'):\n if \"V1_\" in sample_id:\n spaceranger_version = \"1.1.0\"\n else:\n spaceranger_version = \"1.2.0\"\n _download_visium_dataset(sample_id, spaceranger_version, base_dir=save_path)\n adata = read_visium(os.path.join(save_path, sample_id))\n\n print('adata: (' + str(adata.shape[0]) + ', ' + str(adata.shape[1]) + ')')\n return adata\n"
] | [
[
"sklearn.neighbors.kneighbors_graph",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
RayYuqian/standard-es | [
"a7a4bf7a82d1ce18d7a81e57ffae6e3bbc883fcd"
] | [
"algorithms/recombination.py"
] | [
"import numpy as np\nimport random\nfrom utils.distributions import bernoulli\nimport math\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nclass Recombination(object):\n def __init__(self):\n pass\n\n def recombination(self, x):\n pass\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nclass EvolutionStrategyRecombination(Recombination):\n def __init__(self, type='es', bounds=(-np.infty, np.infty), params=None,\n y_real=None, fun=None):\n super().__init__()\n self.type = type\n self.bounds = bounds\n assert (0. <= params['SIGMA'] <= 3.), 'SIGMA must be in [0, 3]'\n # assert type in [], 'type must be one in {}'\n\n self.params = params\n\n self.SIGMA = params['SIGMA']\n self.ALPHA = params['ALPHA']\n self.BETA = params['BETA']\n self.update = params['update']\n self.alpha_matrix = np.zeros((140, 140))\n self.y_real = y_real\n self.pop_size = params['pop_size']\n\n self.sigma_list = np.zeros((140, 1))\n for i in range(self.sigma_list.shape[0]):\n self.sigma_list[i] = self.SIGMA\n\n self.bias_coe = np.random.uniform(-1, 1, 4)\n\n self.fun = fun\n\n def recombination(self, x):\n # discrete recombination\n if self.type in ['es_a1', 'es_a2', 'es_a3', 'es_a4', 'es_a5', 'es_a6', 'es_a7', 'es_a8']:\n\n offsprings = []\n\n for i in range(140):\n\n a = random.randint(0, x.shape[0]-1)\n b = random.randint(0, x.shape[0]-1)\n c = random.randint(0, x.shape[0]-1)\n\n p1 = x[a]\n p2 = x[b]\n p3 = x[c]\n\n parents = [p1, p2, p3]\n offspring = []\n\n for z in range(4):\n rand = random.randint(0, 2)\n offspring.append(parents[rand][z])\n\n offsprings.append(offspring)\n\n offsprings = np.asarray(offsprings)\n\n return offsprings\n\n # intermediate recombination\n elif self.type in ['es_b1', 'es_b2', 'es_b3', 'es_b4', 'es_b5', 'es_b6', 'es_b7', 'es_b8']:\n\n offsprings = []\n\n for i in range(140):\n\n a = random.randint(0, x.shape[0]-1)\n b = random.randint(0, x.shape[0]-1)\n c = random.randint(0, x.shape[0]-1)\n\n p1 = x[a]\n p2 = x[b]\n p3 = x[c]\n\n offspring = []\n\n for z in range(4):\n param = (p1[z] + p2[z] + p3[z]) / 3\n offspring.append(param)\n\n offsprings.append(offspring)\n\n offsprings = np.asarray(offsprings)\n\n return offsprings\n\n def mutation(self, x, c=None, cs=None):\n # uncorrelated mutation with one sigma\n if self.type in ['es_a1', 'es_a2', 'es_b1', 'es_b2']:\n\n iter = 5\n\n if c % iter == 0 and c != 0:\n if cs/c > 1/5:\n self.SIGMA = self.SIGMA / self.update\n elif cs/c < 1/5:\n self.SIGMA = self.SIGMA * self.update\n\n # learning rate\n t = 1 / math.sqrt(x.shape[0])\n\n self.SIGMA = self.SIGMA * math.exp(t * np.random.normal(0, 1))\n\n z = self.SIGMA * np.random.normal(0, 1, (x.shape[0], 4))\n x_new = x + z\n\n x_new = np.clip(x_new, self.bounds[0], self.bounds[1])\n return x_new\n\n # uncorrelated mutation with N sigmas\n elif self.type in ['es_a3', 'es_a4', 'es_b3', 'es_b4']:\n iter = 5\n\n if c % iter == 0 and c != 0:\n if cs / c > 1 / 5:\n self.sigma_list = self.sigma_list / self.update\n elif cs / c < 1 / 5:\n self.sigma_list = self.sigma_list * self.update\n\n x_new = np.zeros((x.shape[0], 4))\n\n t1 = 1 / math.sqrt(2 * math.sqrt(x.shape[0]))\n t2 = 1 / math.sqrt(2 * x.shape[0])\n\n for i in range(4):\n self.sigma_list[i] = self.sigma_list[i] * math.exp((t1 * np.random.normal(0, 1)) +\n (t2 * np.random.normal(0, 1)))\n\n for i in range(x.shape[0]):\n z = np.zeros(4)\n for j in range(4):\n z[j] = self.sigma_list[j] * np.random.normal(0, 1)\n x_new[i] = x[i] + z\n\n x_new = np.clip(x_new, self.bounds[0], self.bounds[1])\n return x_new\n\n # Correlated mutation\n elif self.type in ['es_a5', 'es_a6', 'es_b5', 'es_b6']:\n iter = 5\n\n if c % iter == 0 and c != 0:\n if cs / c > 1 / 5:\n self.SIGMA = self.SIGMA / self.update\n elif cs / c < 1 / 5:\n self.SIGMA = self.SIGMA * self.update\n\n x_new = np.zeros((x.shape[0], 4))\n\n t1 = 1 / math.sqrt(2 * math.sqrt(x.shape[0]))\n t2 = 1 / math.sqrt(2 * x.shape[0])\n\n self.ALPHA = self.ALPHA + self.BETA * np.random.normal(0, 1)\n\n R_xy = np.array(((np.cos(self.ALPHA), -np.sin(self.ALPHA), 0, 0),\n (np.sin(self.ALPHA), np.cos(self.ALPHA), 0, 0),\n (0, 0, 1, 0),\n (0, 0, 0, 1)))\n\n R_xz = np.array(((np.cos(self.ALPHA), 0, -np.sin(self.ALPHA), 0),\n (0, 1, 0, 0),\n (np.sin(self.ALPHA), 0, np.cos(self.ALPHA), 0),\n (0, 0, 0, 1)))\n\n R_xw = np.array(((np.cos(self.ALPHA), 0, 0, -np.sin(self.ALPHA)),\n (0, 1, 0, 0),\n (0, 0, 1, 0),\n (np.sin(self.ALPHA), 0, 0, np.cos(self.ALPHA))))\n\n R_yz = np.array(((1, 0, 0, 0),\n (0, np.cos(self.ALPHA), -np.sin(self.ALPHA), 0),\n (0, np.sin(self.ALPHA), np.cos(self.ALPHA), 0),\n (0, 0, 0, 1)))\n\n R_yw = np.array(((1, 0, 0, 0),\n (0, np.cos(self.ALPHA), 0, -np.sin(self.ALPHA)),\n (0, 0, 1, 0),\n (0, np.sin(self.ALPHA), 0, np.cos(self.ALPHA))))\n\n R_zw = np.array(((1, 0, 0, 0),\n (0, 1, 0, 0),\n (0, 0, np.cos(self.ALPHA), -np.sin(self.ALPHA)),\n (0, 0, np.sin(self.ALPHA), np.cos(self.ALPHA))))\n\n R = np.linalg.multi_dot([R_xy, R_xz, R_xw, R_yz, R_yw, R_zw])\n\n for i in range(4):\n self.sigma_list[i] = self.sigma_list[i] * math.exp((t1 * np.random.normal(0, 1)) +\n (t2 * np.random.normal(0, 1)))\n\n for i in range(x.shape[0]):\n z = np.zeros(4)\n for j in range(4):\n z[j] = self.sigma_list[j] * np.random.normal(0, 1)\n\n z = np.dot(z, R)\n x_new[i] = x[i] + z\n\n x_new = np.clip(x_new, self.bounds[0], self.bounds[1])\n\n return x_new\n\n # BMO\n elif self.type in ['es_a7', 'es_a8', 'es_b7', 'es_b8']:\n iter = 5\n\n if c % iter == 0 and c != 0:\n if cs / c > 1 / 5:\n self.sigma_list = self.sigma_list / self.update\n elif cs / c < 1 / 5:\n self.sigma_list = self.sigma_list * self.update\n\n x_new = np.zeros((x.shape[0], 4))\n\n t1 = 1 / math.sqrt(2 * math.sqrt(x.shape[0]))\n t2 = 1 / math.sqrt(2 * x.shape[0])\n\n for i in range(4):\n self.sigma_list[i] = self.sigma_list[i] * math.exp((t1 * np.random.normal(0, 1)) +\n (t2 * np.random.normal(0, 1)))\n\n for i in range(4):\n self.bias_coe[i] = self.bias_coe[i] + 0.1 * np.random.normal(0, 1)\n\n for i in range(x.shape[0]):\n z = np.zeros(4)\n for j in range(4):\n z[j] = self.sigma_list[j] * np.random.normal(self.bias_coe[j], 1)\n x_new[i] = x[i] + z\n\n x_new = np.clip(x_new, self.bounds[0], self.bounds[1])\n return x_new\n\n# ----------------------------------------------------------------------------------------------------------------------\nclass DifferentialRecombination(Recombination):\n def __init__(self, type='de', bounds=(-np.infty, np.infty), params=None):\n super().__init__()\n self.type = type\n self.bounds = bounds\n\n assert (0. <= params['F'] <= 2.), 'F must be in [0, 2]'\n assert (0. < params['CR'] <= 1.), 'CR must be in (0, 1]'\n assert type in ['de', 'ade', 'revde', 'dex3'], 'type must be one in {de, dex3, ade, revde}'\n\n self.F = params['F']\n self.CR = params['CR']\n\n def recombination(self, x):\n indices_1 = np.arange(x.shape[0])\n # take first parent\n x_1 = x[indices_1]\n # assign second parent (ensure)\n indices_2 = np.random.permutation(x.shape[0])\n x_2 = x_1[indices_2]\n # assign third parent\n indices_3 = np.random.permutation(x.shape[0])\n x_3 = x_2[indices_3]\n\n if self.type == 'de':\n # differential mutation\n y_1 = np.clip(x_1 + self.F * (x_2 - x_3), self.bounds[0], self.bounds[1])\n\n # uniform crossover\n if self.CR < 1.:\n p_1 = bernoulli(self.CR, y_1.shape)\n y_1 = p_1 * y_1 + (1. - p_1) * x_1\n\n return (y_1), (indices_1, indices_2, indices_3)\n\n elif self.type == 'revde':\n y_1 = np.clip(x_1 + self.F * (x_2 - x_3), self.bounds[0], self.bounds[1])\n y_2 = np.clip(x_2 + self.F * (x_3 - y_1), self.bounds[0], self.bounds[1])\n y_3 = np.clip(x_3 + self.F * (y_1 - y_2), self.bounds[0], self.bounds[1])\n\n # uniform crossover\n if self.CR < 1.:\n p_1 = bernoulli(self.CR, y_1.shape)\n p_2 = bernoulli(self.CR, y_2.shape)\n p_3 = bernoulli(self.CR, y_3.shape)\n y_1 = p_1 * y_1 + (1. - p_1) * x_1\n y_2 = p_2 * y_2 + (1. - p_2) * x_2\n y_3 = p_3 * y_3 + (1. - p_3) * x_3\n\n return (y_1, y_2, y_3), (indices_1, indices_2, indices_3)\n\n elif self.type == 'ade':\n y_1 = np.clip(x_1 + self.F * (x_2 - x_3), self.bounds[0], self.bounds[1])\n y_2 = np.clip(x_2 + self.F * (x_3 - x_1), self.bounds[0], self.bounds[1])\n y_3 = np.clip(x_3 + self.F * (x_1 - x_2), self.bounds[0], self.bounds[1])\n\n # uniform crossover\n if self.CR < 1.:\n p_1 = bernoulli(self.CR, y_1.shape)\n p_2 = bernoulli(self.CR, y_2.shape)\n p_3 = bernoulli(self.CR, y_3.shape)\n y_1 = p_1 * y_1 + (1. - p_1) * x_1\n y_2 = p_2 * y_2 + (1. - p_2) * x_2\n y_3 = p_3 * y_3 + (1. - p_3) * x_3\n\n return (y_1, y_2, y_3), (indices_1, indices_2, indices_3)\n\n if self.type == 'dex3':\n # y1\n y_1 = np.clip(x_1 + self.F * (x_2 - x_3), self.bounds[0], self.bounds[1])\n\n # uniform crossover\n if self.CR < 1.:\n p_1 = bernoulli(self.CR, y_1.shape)\n y_1 = p_1 * y_1 + (1. - p_1) * x_1\n\n # y2\n indices_1p = np.arange(x.shape[0])\n # take first parent\n x_1 = x[indices_1p]\n # assign second parent (ensure)\n indices_2p = np.random.permutation(x.shape[0])\n x_2 = x_1[indices_2p]\n # assign third parent\n indices_3p = np.random.permutation(x.shape[0])\n x_3 = x_2[indices_3p]\n\n y_2 = np.clip(x_1 + self.F * (x_2 - x_3), self.bounds[0], self.bounds[1])\n\n # uniform crossover\n if self.CR < 1.:\n p_2 = bernoulli(self.CR, y_2.shape)\n y_2 = p_2 * y_2 + (1. - p_2) * x_1\n\n # y3\n indices_1p = np.arange(x.shape[0])\n # take first parent\n x_1 = x[indices_1p]\n # assign second parent (ensure)\n indices_2p = np.random.permutation(x.shape[0])\n x_2 = x_1[indices_2p]\n # assign third parent\n indices_3p = np.random.permutation(x.shape[0])\n x_3 = x_2[indices_3p]\n\n y_3 = np.clip(x_1 + self.F * (x_2 - x_3), self.bounds[0], self.bounds[1])\n\n # uniform crossover\n if self.CR < 1.:\n p_3 = bernoulli(self.CR, y_3.shape)\n y_3 = p_3 * y_3 + (1. - p_3) * x_1\n\n return (y_1, y_2, y_3), (indices_1, indices_2, indices_3)\n else:\n raise ValueError('Wrong name of the differential mutation!')\n"
] | [
[
"numpy.dot",
"numpy.clip",
"numpy.asarray",
"numpy.arange",
"numpy.linalg.multi_dot",
"numpy.cos",
"numpy.sin",
"numpy.random.normal",
"numpy.random.permutation",
"numpy.random.uniform",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
japaf/foamgen | [
"6f456796e79de344eefb21a1ad121fd869f9fd9e"
] | [
"src/foamgen/smesh.py"
] | [
"\"\"\"\nStructured meshing module\n=========================\n:synopsis: Create structured VTK mesh.\n\n.. moduleauthor:: Pavel Ferkl <[email protected]>\n\"\"\"\nfrom __future__ import division, print_function\nimport os\nimport shutil\nimport subprocess as sp\nimport shlex\nfrom scipy.optimize import root_scalar\nfrom . import vtk_tools\n\n\ndef structured_mesh(fname, porosity, strut_content):\n \"\"\"Create foam discretized on structured cartesian mesh.\n\n Creates foam with desired porosity and strut content. ``root_scalar`` from\n scipy is used for root finding. This method is not optimal, since the\n solver doesn't know that the function takes only integer arguments.\n\n Ultimate output is the ``*SMesh.vtk`` file.\n\n Args:\n fname (str): base filename\n porosity (float): target foam porosity\n strut_content (float): target foam strut content\n \"\"\"\n dsize = 1\n # Binarize and save as .vtk\n if strut_content == 0:\n print(\"Optimizing porosity\")\n res = root_scalar(por_res, args=(fname, porosity), x0=100, x1=120,\n method='secant', rtol=1e-2)\n delta = int(res.root)\n print('box size: {0:d}'.format(delta))\n print(\"Creating and saving optimal foam\")\n # Call it with the optimized box size\n por_res(delta, fname, porosity)\n print(\"Convert binary .vtk to ascii .vtk\")\n origin = [0, 0, 0]\n spacing = [dsize / delta, dsize / delta, dsize / delta]\n vtk_tools.vtk_bin_to_ascii(fname + \"SMesh.vtk\", fname + \"SMesh.vtk\",\n origin, spacing)\n else:\n print(\"Optimizing porosity and strut content\")\n res = root_scalar(por_fs_res,\n args=(fname, dsize, porosity, strut_content),\n x0=100, x1=120, method='secant', rtol=1e-2)\n delta = int(res.root)\n print('box size: {0:d}'.format(delta))\n print(\"Creating and saving optimal foam\")\n # Call it with the optimized box size\n por_fs_res(delta, fname, dsize, porosity, strut_content)\n clean_files()\n\n\ndef por_res(delta, fname, porosity):\n \"\"\"Residual function for finding target porosity.\n\n Adjusts the size of the box, in which the foam is binarized. Bigger box\n leads to thinner walls and higher porosity.\n\n :func:`voxelize_morphology` is used to create walls.\n\n Args:\n delta (float): box size in voxels\n fname (str): base filename\n porosity (float): target porosity\n\n Returns:\n float: squared difference between target and actual porosity\n \"\"\"\n delta = int(delta)\n out = voxelize_morphology(fname, delta)\n for line in out:\n if \"counted\" in line:\n solid_voxel, total_voxel =\\\n [int(s) for s in line.split() if s.isdigit()]\n break\n eps = 1 - solid_voxel / total_voxel\n print(\"dimension: {0:4d}, porosity: {1:f}\".format(delta, eps))\n return (eps - porosity)**2\n\n\ndef por_fs_res(delta, fname, dsize, porosity, strut_content):\n \"\"\"Residual function for finding target porosity and strut content.\n\n Adjusts the size of the box, in which the foam is binarized and strut size\n parameter. Bigger box leads to thinner walls and higher porosity. Higher\n strut size parameter leads to higher strut content.\n\n :func:`voxelize_morphology` is used to create walls.\n ``foamreconstr`` program is used to create struts and optimize strut\n content.\n\n Requires ``*Tessellation.gnu`` file.\n\n Args:\n delta (float): box size in voxels\n fname (str): base filename\n dsize (float): box size\n porosity (float): target foam porosity\n strut_content (float): target foam strut content\n\n Returns:\n float: squared difference between target and actual porosity\n \"\"\"\n delta = int(delta)\n voxelize_morphology(fname, delta)\n origin = [0, 0, 0]\n spacing = [dsize / delta, dsize / delta, dsize / delta]\n vtk_tools.vtk_bin_to_ascii(fname + \"SMesh.vtk\", fname + \"SMesh.vtk\",\n origin, spacing)\n try:\n with open(\"parameters.txt\", \"r\") as fhl:\n dedge = float(fhl.readline())\n except FileNotFoundError:\n dedge = 2\n with open(\"foamreconstr.in\", \"w\") as fhl:\n fhl.write(\"0\\n\")\n fhl.write(\"1\\n\")\n fhl.write(\"0\\n\")\n fhl.write(\"0\\n\")\n fhl.write(\"1\\n\")\n fhl.write(\"0\\n\")\n fhl.write(\"{0:f}\\n\".format(dedge))\n fhl.write(\"{0:f}\\n\".format(1 - strut_content * (1 - porosity)))\n fhl.write(\"0\\n\")\n fhl.write(\"1\\n\")\n fhl.write(\"0\\n\")\n fhl.write(\"0\\n\")\n fhl.write(\"0\\n\")\n fhl.write(\"0\\n\")\n fhl.write(\"0\\n\")\n fhl.write(\"0\\n\")\n fhl.write(\"1\\n\")\n fhl.write(\"0\\n\")\n fhl.write(\"1\\n\")\n fhl.write(\"0\\n\")\n fhl.write(fname + \"SMesh\\n\")\n fhl.write(fname + \"SMesh.vtk\\n\")\n fhl.write(fname + \"Tessellation.gnu\\n\")\n fhl.write(\"name\\n\")\n fhl.write(\"descriptors.txt\" + \"\\n\")\n fhl.write(\"parameters.txt\" + \"\\n\")\n sp.Popen(\"foamreconstr\").wait()\n with open(\"descriptors.txt\", \"r\") as fhl:\n eps = float(fhl.readline())\n fstr = float(fhl.readline())\n print(\"dimension: {0:4d}, porosity: {1:f}\".format(delta, eps) +\n \", strut content: {0:f}\".format(fstr))\n return (eps - porosity)**2\n\n\ndef voxelize_morphology(fname, delta):\n \"\"\"Create foam on equidistant cartesian mesh.\n\n Requires ``*TessellationBox.stl`` file. Creates ``*SMesh.vtk`` file.\n\n Args:\n fname (str): base filename\n delta (float): box size\n\n Returns:\n str: binvox stdout\n \"\"\"\n if os.path.isfile(fname + 'SMesh.vtk'):\n os.remove(fname + 'SMesh.vtk')\n if not os.path.isfile(fname + 'TessellationBox.stl'):\n raise Exception(\".stl file is missing. Nothing to binarize.\")\n shutil.copy2(fname + 'TessellationBox.stl', fname + 'SMesh.stl')\n cmd = shlex.split(\n \"binvox -e -d {0:d} -t vtk \".format(delta) + fname + \"SMesh.stl\"\n )\n call = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)\n out, _ = call.communicate()\n out = out.decode().splitlines()\n if os.path.isfile(fname + 'SMesh.stl'):\n os.unlink(fname + 'SMesh.stl')\n return out\n\n\ndef clean_files():\n \"\"\"Delete unnecessary files.\"\"\"\n flist = [\n 'descriptors.txt',\n 'parameters.txt',\n 'foamreconstr.in',\n ]\n for fil in flist:\n if os.path.exists(fil):\n os.remove(fil)\n"
] | [
[
"scipy.optimize.root_scalar"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"1.5",
"1.2",
"1.7",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
j-faria/vera | [
"96cbdb61c98c3527416611155b29a03a2bc66b15"
] | [
"vera/visibility.py"
] | [
"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport sys\nimport numpy as np\nfrom scipy.optimize import bisect\nimport datetime as dt\nfrom dateutil import tz\nimport pickle\nfrom random import choice\nfrom PyAstronomy import pyasl\nfrom astropy.coordinates import SkyCoord\nfrom astropy.coordinates import name_resolve\nfrom astropy.time import Time\nfrom astropy import units\nimport ephem\nimport argparse\nimport calendar\n\ntry:\n from tqdm import tqdm\nexcept ImportError:\n def tqdm(x): return x\n\nimport io\nimport matplotlib.pyplot as plt\nimport matplotlib\nreplace_figure = True\ntry:\n from PySide.QtGui import QApplication, QImage\nexcept ImportError:\n try:\n from PyQt4.QtGui import QApplication, QImage\n except ImportError:\n try:\n from PyQt5.QtWidgets import QApplication\n from PyQt5.QtGui import QImage\n except ImportError:\n replace_figure = False\n\n\ndef add_clipboard_to_figures():\n # replace the original plt.figure() function with one that supports\n # clipboard-copying\n oldfig = plt.figure\n\n def newfig(*args, **kwargs):\n fig = oldfig(*args, **kwargs)\n\n def clipboard_handler(event):\n if event.key == 'ctrl+c':\n # store the image in a buffer using savefig(), this has the\n # advantage of applying all the default savefig parameters\n # such as background color; those would be ignored if you simply\n # grab the canvas using Qt\n buf = io.BytesIO()\n fig.savefig(buf)\n QApplication.clipboard().setImage(\n QImage.fromData(buf.getvalue()))\n buf.close()\n print('Ctrl+C pressed: image is now in the clipboard')\n\n fig.canvas.mpl_connect('key_press_event', clipboard_handler)\n return fig\n\n plt.figure = newfig\n\n\nif replace_figure:\n add_clipboard_to_figures()\n\n\ndef _parser():\n parser = argparse.ArgumentParser(\n description='Plot altitudes of objects against time for a specific night')\n\n parser.add_argument('targets', help='e.g. HD20010 or HD20010,HD41248',\n nargs='+')\n\n parser.add_argument(\n '-d', '--date', default='today',\n help='Date in format YYYY-MM-DD (or YYYY if starobs). '\n 'Default is today (this year if starobs).')\n\n parser.add_argument(\n '-P', '--period', default=None, type=str, nargs=1,\n help='Specify ESO period (October-March / April-September)')\n\n parser.add_argument(\n '-s', '--site', default='esolasilla',\n help='Observatory. Default is ESO La Silla. '\n 'Common codes are esoparanal, lapalma, keck, lco, Palomar, etc')\n\n parser.add_argument(\n '-l', '--loc', default=None,\n help='Give the location of the observatory.'\n 'Comma-separated altitude, latitude, longitude, timezone')\n\n parser.add_argument('-c', default=False, action='store_true',\n help='Just print \"target RA DEC\" (to use in STARALT)')\n\n parser.add_argument(\n '-m', '--mode', choices=['staralt', 'starobs'], default='staralt',\n help='staralt: plot altitude against time for a particular night; '\n 'starobs: plot how altitude changes over a year')\n\n parser.add_argument('--nomoon', default=False, action='store_true',\n help=\"Don't plot moon altitude\")\n\n parser.add_argument('--sh', default=None, type=float, nargs=1, dest='A',\n help='Include plot of sunless hours above airmass A')\n\n parser.add_argument('--hover', default=False, action='store_true',\n help='Color lines when mouse over')\n\n parser.add_argument(\n '-o', '--save', default=None, type=str, nargs=1,\n help='Save figure in output file (provide file extension)')\n\n parser.add_argument('--remove-watermark', default=False,\n action='store_true',\n help='Remove \"Created with...\" watermark text')\n\n parser.add_argument('--toi', default=False, action='store_true',\n help='Targets are TOIs')\n\n return parser.parse_args()\n\n\ndef decdeg2dms(dd):\n \"\"\" Convert decimal degrees to deg,min,sec \"\"\"\n is_positive = dd >= 0\n dd = abs(dd)\n minutes, seconds = divmod(dd * 3600, 60)\n degrees, minutes = divmod(minutes, 60)\n degrees = degrees if is_positive else -degrees\n return (degrees, minutes, seconds)\n\n\nclass CacheSkyCoord(SkyCoord):\n @classmethod\n def from_name(cls, name, frame='icrs'):\n try:\n cached = pickle.load(open('CachedSkyCoords.pickle', 'rb'))\n except FileNotFoundError:\n cached = {}\n\n if name in cached:\n return cached[name]\n else:\n original = super(CacheSkyCoord, cls).from_name(name, frame)\n # keep the cached dict manageable\n n = len(cached)\n if n > 100:\n # remove a random cached target\n cached.pop(choice(list(cached.keys())))\n cached.update({name: original})\n pickle.dump(cached, open('CachedSkyCoords.pickle', 'wb'))\n return original\n\n\nESO_periods = {\n 104: [(2019, 10, 1), (2020, 3, 31)],\n 103: [(2019, 4, 1), (2019, 9, 30)],\n 102: [(2018, 10, 1), (2019, 3, 31)],\n 101: [(2018, 4, 1), (2018, 9, 30)],\n 100: [(2017, 10, 1), (2018, 3, 31)],\n 99: [(2017, 4, 1), (2017, 9, 30)],\n 98: [(2016, 10, 1), (2017, 3, 31)],\n 97: [(2016, 4, 1), (2016, 9, 30)],\n 96: [(2015, 10, 1), (2016, 3, 31)],\n 95: [(2015, 4, 1), (2015, 9, 30)],\n 94: [(2014, 10, 1), (2015, 3, 31)],\n 93: [(2014, 4, 1), (2014, 9, 30)],\n 92: [(2013, 10, 1), (2014, 3, 31)],\n}\n\n\ndef get_ESO_period(period):\n \"\"\" Return the JD of start and end of ESO period \"\"\"\n assert isinstance(period, str) or isinstance(period, int)\n P = int(period)\n\n def getjd(y, m, d): return pyasl.jdcnv(dt.datetime(y, m, d))\n jd_start, jd_end = [getjd(*d) for d in ESO_periods[P]]\n\n return jd_start, jd_end\n\n\ndef hrs_up(up, down, eve, morn):\n \"\"\"\n If an object comes up past a given point at `up`, and goes down at `down`, \n and evening and morning are at `eve` and `morn`, computes how long object\n is up *and* it's dark.\n \"\"\"\n # if any input is a float, assume it's JD\n if isinstance(up, float):\n up = pyasl.daycnv(up, mode='dt')\n if isinstance(down, float):\n down = pyasl.daycnv(down, mode='dt')\n if isinstance(eve, float):\n eve = pyasl.daycnv(eve, mode='dt')\n if isinstance(morn, float):\n morn = pyasl.daycnv(morn, mode='dt')\n\n SID_RATE = 1.0027379093\n if up < eve:\n if down >= morn:\n return (morn - eve).total_seconds() / 3600 # up all night\n elif down >= eve:\n # careful here ... circumpolar objects can come back *up* a second time\n # before morning. up and down are the ones immediately preceding\n # and following the upper culmination nearest the center of the night,\n # so \"up\" can be on the previous night rather than the one we want. */\n up2 = up + dt.timedelta(days=1.0 / SID_RATE)\n if (up2 > morn): # the usual case ... doesn't rise again\n return (down - eve).total_seconds() / 3600\n else:\n return ((down - eve) + (morn - up2)).total_seconds() / 3600\n else:\n return 0.\n elif down > morn:\n if up > morn:\n return 0.\n else:\n # again, a circumpolar object can be up at evening twilight and come\n # 'round again in the morning ...\n down0 = down - dt.timedelta(days=1.0 / SID_RATE)\n if down0 < eve:\n return (morn - up).total_seconds() / 3600\n else:\n return ((down0 - eve) + (morn - up)).total_seconds() / 3600\n else:\n return (down - up).total_seconds() / 3600\n # up & down the same night ... might happen a second time in pathological\n # cases, but this will be extremely rare except at very high latitudes.\n\n\nSUN = ephem.Sun()\n\n\ndef get_next_sunset(jd, obs, mode='jd'):\n datetime_jd = pyasl.daycnv(jd, mode='dt')\n s = ephem.Observer()\n s.date = datetime_jd\n s.lat = ':'.join([str(i) for i in decdeg2dms(obs['latitude'])])\n s.lon = ':'.join([str(i) for i in decdeg2dms(obs['longitude'])])\n next_sunset = ephem.julian_date(s.next_setting(SUN))\n if mode == 'jd':\n return next_sunset\n elif mode == 'dt':\n return pyasl.daycnv(next_sunset, mode='dt')\n\n\ndef get_next_sunrise(jd, obs, mode='jd'):\n datetime_jd = pyasl.daycnv(jd, mode='dt')\n s = ephem.Observer()\n s.date = datetime_jd\n s.lat = ':'.join([str(i) for i in decdeg2dms(obs['latitude'])])\n s.lon = ':'.join([str(i) for i in decdeg2dms(obs['longitude'])])\n next_sunrise = ephem.julian_date(s.next_rising(SUN))\n if mode == 'jd':\n return next_sunrise\n elif mode == 'dt':\n return pyasl.daycnv(next_sunrise, mode='dt')\n\n\ndef get_next_pass_at_altitude(jd, altitude, target, obs, limit=0.25):\n \"\"\" Next time after jd that target passes at altitude, seen from obs \"\"\"\n def alt(jd, target):\n ra = np.full_like(jd, target.ra.value)\n dec = np.full_like(jd, target.dec.value)\n lon, lat, alt = map(\n obs.__getitem__, ('longitude', 'latitude', 'altitude'))\n hor = pyasl.eq2hor(jd, ra, dec, lon=lon, lat=lat, alt=alt)\n return -altitude + hor[0]\n\n # if target is *already* above altitude at jd, return jd\n if alt(jd, target) > 0:\n return jd\n\n try:\n return bisect(alt, jd, jd + limit, args=(target, ))\n except ValueError:\n try:\n return bisect(alt, jd, jd + 2*limit, args=(target, ))\n except ValueError:\n return -99\n\n\ndef get_previous_pass_at_altitude(jd, altitude, target, obs, limit=0.25):\n \"\"\" \n Previous time, before jd, that target passes at altitude, seen from obs \n \"\"\"\n def alt(jd, target):\n ra = np.full_like(jd, target.ra.value)\n dec = np.full_like(jd, target.dec.value)\n lon, lat, alt = map(obs.__getitem__,\n ('longitude', 'latitude', 'altitude'))\n hor = pyasl.eq2hor(jd, ra, dec, lon=lon, lat=lat, alt=alt)\n return -altitude + hor[0]\n\n # if target is *still* above altitude at jd, return jd\n if alt(jd, target) > 0:\n return jd\n\n try:\n return bisect(alt, jd, jd - limit, args=(target, ))\n except ValueError:\n try:\n return bisect(alt, jd, jd - 2*limit, args=(target, ))\n except ValueError:\n return -99\n\n\ndef hrs_above_altitude(jd, altitude, target, obs):\n # evening\n eve = get_next_sunset(jd, obs)\n # star goes up (above altitude)\n up = get_next_pass_at_altitude(eve, altitude, target, obs)\n # print(eve, up)\n if up == -99:\n return 0.\n\n # morning\n morn = get_next_sunrise(jd, obs)\n if morn < eve: # maybe of next day?\n morn = get_next_sunrise(jd+1, obs)\n # star goes down\n down = get_previous_pass_at_altitude(morn, altitude, target, obs)\n # print(morn, down)\n if down == -99:\n return 0.\n\n return hrs_up(up, down, eve, morn)\n\n\ndef get_visibility_curve(year, target, observatory, period=None):\n\n try:\n target = {'name': target, 'coord': SkyCoord.from_name(target)}\n except name_resolve.NameResolveError:\n print('Could not find target: {0!s}'.format(target))\n\n target_coord = target['coord']\n target_ra = target_coord.ra.deg\n target_dec = target_coord.dec.deg\n\n # set the observatory\n if isinstance(observatory, dict):\n obs = observatory\n else:\n obs = pyasl.observatory(observatory)\n\n if period is not None:\n jd_start, jd_end = get_ESO_period(period)\n else:\n jd_start = pyasl.jdcnv(dt.datetime(year, 1, 1))\n jd_end = pyasl.jdcnv(dt.datetime(year, 12, 31))\n\n jdbinsize = 1 # every day\n each_day = np.arange(jd_start, jd_end, jdbinsize)\n jds = []\n\n ## calculate the mid-dark times\n sun = ephem.Sun()\n for day in each_day:\n date_formatted = '/'.join([str(i) for i in pyasl.daycnv(day)[:-1]])\n s = ephem.Observer()\n s.date = date_formatted\n s.lat = ':'.join([str(i) for i in decdeg2dms(obs['latitude'])])\n s.lon = ':'.join([str(i) for i in decdeg2dms(obs['longitude'])])\n jds.append(ephem.julian_date(s.next_antitransit(sun)))\n jds = np.array(jds)\n\n # Get JD floating point\n jdsub = jds - np.floor(jds[0])\n\n # Get alt/az of object\n altaz = pyasl.eq2hor(jds, np.ones_like(jds)*target_ra, np.ones_like(jds)*target_dec,\n lon=obs['longitude'], lat=obs['latitude'], alt=obs['altitude'])\n # plt.plot( jdsub, altaz[0], '-', color='k')\n\n return jds, altaz[0]\n\n\ndef StarObsPlot(year=None, targets=None, observatory=None, period=None,\n hover=False, sunless_hours=None, remove_watermark=False):\n \"\"\"\n Plot the visibility of target.\n\n Parameters\n ----------\n year: int\n The year for which to calculate the visibility.\n targets: list\n List of targets.\n Each target should be a dictionary with keys 'name' and 'coord'.\n The key 'name' is a string, 'coord' is a SkyCoord object.\n observatory: string\n Name of the observatory that pyasl.observatory can resolve.\n Basically, any of pyasl.listObservatories().keys()\n period: string, optional\n ESO period for which to calculate the visibility. Overrides `year`.\n hover: boolean, optional\n If True, color visibility lines when mouse over.\n sunless_hours: float, optional\n If not None, plot sunless hours above this airmass\n \"\"\"\n\n from mpl_toolkits.axes_grid1 import host_subplot\n from matplotlib.ticker import MultipleLocator\n from matplotlib.font_manager import FontProperties\n from matplotlib import rcParams\n rcParams['xtick.major.pad'] = 12\n font0 = FontProperties()\n font1 = font0.copy()\n font0.set_family('sans-serif')\n font0.set_weight('light')\n font1.set_family('sans-serif')\n font1.set_weight('medium')\n\n # set the observatory\n if isinstance(observatory, dict):\n obs = observatory\n else:\n obs = pyasl.observatory(observatory)\n\n fig = plt.figure(figsize=(15, 10))\n fig.subplots_adjust(left=0.07, right=0.8, bottom=0.15, top=0.88)\n\n # watermak\n if not remove_watermark:\n fig.text(0.99, 0.99,\n 'Created with\\ngithub.com/iastro-pt/ObservationTools',\n fontsize=10, color='gray', ha='right', va='top', alpha=0.5)\n\n # plotting sunless hours?\n shmode = False\n if sunless_hours is not None:\n shmode = True\n # limit in airmass (assumed plane-parallel atm)\n shairmass = sunless_hours\n # correspoing limit in altitude\n def f(alt): return pyasl.airmassPP(alt) - shairmass\n shalt = 90 - bisect(f, 0, 89)\n\n if shmode:\n fig.subplots_adjust(hspace=0.35)\n ax = host_subplot(211)\n axsh = host_subplot(212)\n plt.text(0.5, 0.47,\n \"- sunless hours above airmass {:.1f} - \\n\".format(shairmass),\n transform=fig.transFigure, ha='center', va='bottom',\n fontsize=12)\n plt.text(0.5, 0.465,\n \"the thick line above the curves represents the total sunless hours \"\n \"for each day of the year\",\n transform=fig.transFigure, ha='center', va='bottom', fontsize=10)\n\n else:\n ax = host_subplot(111)\n\n for n, target in enumerate(targets):\n\n target_coord = target['coord']\n target_ra = target_coord.ra.deg\n target_dec = target_coord.dec.deg\n\n if period is not None:\n jd_start, jd_end = get_ESO_period(period)\n else:\n jd_start = pyasl.jdcnv(dt.datetime(year, 1, 1))\n jd_end = pyasl.jdcnv(dt.datetime(year, 12, 31))\n\n jdbinsize = 1 # every day\n each_day = np.arange(jd_start, jd_end, jdbinsize)\n jds = []\n\n ## calculate the mid-dark times\n sun = ephem.Sun()\n for day in each_day:\n date_formatted = '/'.join([str(i) for i in pyasl.daycnv(day)[:-1]])\n s = ephem.Observer()\n s.date = date_formatted\n s.lat = ':'.join([str(i) for i in decdeg2dms(obs['latitude'])])\n s.lon = ':'.join([str(i) for i in decdeg2dms(obs['longitude'])])\n jds.append(ephem.julian_date(s.next_antitransit(sun)))\n\n jds = np.array(jds)\n\n # Get JD floating point\n jdsub = jds - np.floor(jds[0])\n\n # Get alt/az of object\n altaz = pyasl.eq2hor(jds, np.ones_like(jds)*target_ra, np.ones_like(jds)*target_dec,\n lon=obs['longitude'], lat=obs['latitude'], alt=obs['altitude'])\n ax.plot(jdsub, altaz[0], '-', color='k')\n\n # label for each target\n plabel = \"[{0:2d}] {1!s}\".format(n + 1, target['name'])\n\n # number of target at the top of the curve\n ind_label = np.argmax(altaz[0])\n # or at the bottom if the top is too close to the corners\n # if jdsub[ind_label] < 5 or jdsub[ind_label] > jdsub.max()-5:\n # ind_label = np.argmin(altaz[0])\n ax.text(jdsub[ind_label], altaz[0][ind_label], str(n+1), color=\"b\", fontsize=14,\n fontproperties=font1, va=\"bottom\", ha=\"center\")\n\n if n + 1 == 29:\n # too many?\n ax.text(1.1, 1.0-float(n+1)*0.04, \"too many targets\", ha=\"left\", va=\"top\", transform=ax.transAxes,\n fontsize=10, fontproperties=font0, color=\"r\")\n else:\n ax.text(1.1, 1.0-float(n+1)*0.04, plabel, ha=\"left\", va=\"top\", transform=ax.transAxes,\n fontsize=12, fontproperties=font0, color=\"b\")\n\n if shmode:\n sunless_hours = []\n for day in each_day:\n date_formatted = '/'.join([str(i) for i in pyasl.daycnv(day)[:-1]])\n s = ephem.Observer()\n s.date = date_formatted\n s.lat = ':'.join([str(i) for i in decdeg2dms(obs['latitude'])])\n s.lon = ':'.join([str(i) for i in decdeg2dms(obs['longitude'])])\n # hours from sunrise to sunset\n td = pyasl.daycnv(ephem.julian_date(s.next_setting(sun)), mode='dt') \\\n - pyasl.daycnv(ephem.julian_date(s.next_rising(sun)), mode='dt')\n sunless_hours.append(24 - td.total_seconds() / 3600)\n\n days = each_day - np.floor(each_day[0])\n axsh.plot(days, sunless_hours, '-', color='k', lw=2)\n axsh.set(\n ylim=(0, 15), yticks=range(1, 15), ylabel='Useful hours',\n yticklabels=[r'${}^{{\\rm h}}$'.format(n) for n in range(1, 15)])\n\n ax.text(1.1, 1.03, \"List of targets\", ha=\"left\", va=\"top\", transform=ax.transAxes,\n fontsize=12, fontproperties=font0, color=\"b\")\n\n axrange = ax.get_xlim()\n\n if period is None:\n months = range(1, 13)\n ndays = [0] + [calendar.monthrange(year, m)[1] for m in months]\n ax.set_xlim([0, 366])\n ax.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])\n ax.set_xticklabels(\n map(calendar.month_abbr.__getitem__, months), fontsize=10)\n if shmode:\n axsh.set_xlim([0, 366])\n axsh.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])\n axsh.set_xticklabels(\n map(calendar.month_abbr.__getitem__, months), fontsize=10)\n else:\n if int(period) % 2 == 0:\n # even ESO period, Oct -> Mar\n months = [10, 11, 12, 1, 2, 3]\n ndays = [0] + [calendar.monthrange(year, m)[1] for m in months]\n ax.set_xlim([0, 181])\n ax.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])\n ax.set_xticklabels(\n map(calendar.month_abbr.__getitem__, months), fontsize=10)\n if shmode:\n axsh.set_xlim([0, 181])\n axsh.set_xticks(\n np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])\n axsh.set_xticklabels(\n map(calendar.month_abbr.__getitem__, months), fontsize=10)\n else:\n # odd ESO period, Apr -> Sep\n months = range(4, 10)\n ndays = [0] + [calendar.monthrange(year, m)[1] for m in months]\n ax.set_xlim([0, 182])\n ax.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])\n ax.set_xticklabels(\n map(calendar.month_abbr.__getitem__, months), fontsize=10)\n if shmode:\n axsh.set_xlim([0, 182])\n axsh.set_xticks(\n np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])\n axsh.set_xticklabels(\n map(calendar.month_abbr.__getitem__, months), fontsize=10)\n\n if axrange[1] - axrange[0] <= 1.0:\n jdhours = np.arange(0, 3, 1.0 / 24.)\n utchours = (np.arange(0, 72, dtype=int) + 12) % 24\n else:\n jdhours = np.arange(0, 3, 1.0 / 12.)\n utchours = (np.arange(0, 72, 2, dtype=int) + 12) % 24\n\n # Make ax2 responsible for \"top\" axis and \"right\" axis\n ax2 = ax.twin()\n # Set upper x ticks\n ax2.set_xticks(np.cumsum(ndays))\n ax2.set_xlabel(\"Day\")\n\n # plane-parallel airmass\n airmass_ang = np.arange(10, 81, 5)\n geo_airmass = pyasl.airmass.airmassPP(airmass_ang)[::-1]\n ax2.set_yticks(airmass_ang)\n airmassformat = []\n for t in range(geo_airmass.size):\n airmassformat.append(\"{0:2.2f}\".format(geo_airmass[t]))\n ax2.set_yticklabels(airmassformat) # , rotation=90)\n ax2.set_ylabel(\"Relative airmass\", labelpad=32)\n ax2.tick_params(axis=\"y\", pad=6, labelsize=8)\n plt.text(1.02, -0.04, \"Plane-parallel\", transform=ax.transAxes, ha='left',\n va='top', fontsize=10, rotation=90)\n\n ax22 = ax.twin()\n ax22.set_xticklabels([])\n ax22.set_frame_on(True)\n ax22.patch.set_visible(False)\n ax22.yaxis.set_ticks_position('right')\n ax22.yaxis.set_label_position('right')\n ax22.spines['right'].set_position(('outward', 30))\n ax22.spines['right'].set_color('k')\n ax22.spines['right'].set_visible(True)\n airmass2 = list(\n map(\n lambda ang: pyasl.airmass.airmassSpherical(\n 90. - ang, obs['altitude']),\n airmass_ang))\n ax22.set_yticks(airmass_ang)\n airmassformat = []\n for t in range(len(airmass2)):\n airmassformat.append(\" {0:2.2f}\".format(airmass2[t]))\n ax22.set_yticklabels(airmassformat, rotation=90)\n ax22.tick_params(axis=\"y\", pad=8, labelsize=8)\n plt.text(1.05, -0.04, \"Spherical+Alt\", transform=ax.transAxes, ha='left', va='top',\n fontsize=10, rotation=90)\n\n ax.set_ylim([0, 91])\n ax.yaxis.set_major_locator(MultipleLocator(15))\n ax.yaxis.set_minor_locator(MultipleLocator(5))\n yticks = ax.get_yticks()\n ytickformat = []\n for t in range(yticks.size):\n ytickformat.append(str(int(yticks[t])) + r\"$^\\circ$\")\n ax.set_yticklabels(ytickformat, fontsize=11 if shmode else 16)\n ax.set_ylabel(\"Altitude\", fontsize=18)\n yticksminor = np.array(ax.get_yticks(minor=True))\n ymind = np.where(yticksminor % 15. != 0.)[0]\n yticksminor = yticksminor[ymind]\n ax.set_yticks(yticksminor, minor=True)\n m_ytickformat = []\n for t in range(yticksminor.size):\n m_ytickformat.append(str(int(yticksminor[t])) + r\"$^\\circ$\")\n ax.set_yticklabels(m_ytickformat, minor=True)\n ax.set_ylim([0, 91])\n\n ax.yaxis.grid(color='gray', linestyle='dashed')\n ax.yaxis.grid(color='gray', which=\"minor\", linestyle='dotted')\n ax2.xaxis.grid(color='gray', linestyle='dotted')\n\n if period is not None:\n plt.text(\n 0.5, 0.95,\n \"Visibility over P{0!s}\\n - altitudes at mid-dark time -\".format(\n period), transform=fig.transFigure, ha='center', va='bottom',\n fontsize=12)\n else:\n plt.text(\n 0.5, 0.95,\n \"Visibility over {0!s}\\n - altitudes at mid-dark time -\".format(\n year), transform=fig.transFigure, ha='center', va='bottom',\n fontsize=12)\n\n obsco = \"Obs coord.: {0:8.4f}$^\\circ$, {1:8.4f}$^\\circ$, {2:4f} m\".format(\n obs['longitude'], obs['latitude'], obs['altitude'])\n\n plt.text(0.01, 0.97, obsco, transform=fig.transFigure, ha='left',\n va='center', fontsize=10)\n plt.text(0.01, 0.95, obs['name'], transform=fig.transFigure, ha='left',\n va='center', fontsize=10)\n\n # interactive!\n if hover:\n main_axis = fig.axes[0]\n all_lines = set(main_axis.get_lines())\n\n def on_plot_hover(event):\n for line in main_axis.get_lines():\n if line.contains(event)[0]:\n line.set_color('red') # make this line red\n # and all others black\n all_other_lines = all_lines - set([line])\n for other_line in all_other_lines:\n other_line.set_color('black')\n fig.canvas.draw_idle()\n\n fig.canvas.mpl_connect('motion_notify_event', on_plot_hover)\n\n return fig\n\n\ndef StarObsAxis(ax, year=None, targets=None, observatory=None, period=None,\n hover=False, sunless_hours=None, remove_watermark=False):\n \"\"\"\n Plot the visibility of target.\n\n Parameters\n ----------\n year: int\n The year for which to calculate the visibility.\n targets: list\n List of targets.\n Each target should be a dictionary with keys 'name' and 'coord'.\n The key 'name' is a string, 'coord' is a SkyCoord object.\n observatory: string\n Name of the observatory that pyasl.observatory can resolve.\n Basically, any of pyasl.listObservatories().keys()\n period: string, optional\n ESO period for which to calculate the visibility. Overrides `year`.\n hover: boolean, optional\n If True, color visibility lines when mouse over.\n sunless_hours: float, optional\n If not None, plot sunless hours above this airmass\n \"\"\"\n\n from mpl_toolkits.axes_grid1 import host_subplot\n from matplotlib.ticker import MultipleLocator\n from matplotlib.font_manager import FontProperties\n from matplotlib import rcParams\n # rcParams['xtick.major.pad'] = 12\n font0 = FontProperties()\n font1 = font0.copy()\n font0.set_family('sans-serif')\n font0.set_weight('light')\n font1.set_family('sans-serif')\n font1.set_weight('medium')\n\n # set the observatory\n if isinstance(observatory, dict):\n obs = observatory\n else:\n obs = pyasl.observatory(observatory)\n\n # fig = plt.figure(figsize=(15, 10))\n # fig.subplots_adjust(left=0.07, right=0.8, bottom=0.15, top=0.88)\n\n # watermak\n # if not remove_watermark:\n # fig.text(0.99, 0.99,\n # 'Created with\\ngithub.com/iastro-pt/ObservationTools',\n # fontsize=10, color='gray', ha='right', va='top', alpha=0.5)\n\n # plotting sunless hours?\n shmode = False\n if sunless_hours is not None:\n shmode = True\n # limit in airmass (assumed plane-parallel atm)\n shairmass = sunless_hours\n # correspoing limit in altitude\n def f(alt): return pyasl.airmassPP(alt) - shairmass\n shalt = 90 - bisect(f, 0, 89)\n\n if shmode:\n fig.subplots_adjust(hspace=0.35)\n ax = host_subplot(211)\n axsh = host_subplot(212)\n plt.text(0.5, 0.47,\n \"- sunless hours above airmass {:.1f} - \\n\".format(shairmass),\n transform=fig.transFigure, ha='center', va='bottom',\n fontsize=12)\n plt.text(0.5, 0.465,\n \"the thick line above the curves represents the total sunless hours \"\n \"for each day of the year\",\n transform=fig.transFigure, ha='center', va='bottom', fontsize=10)\n\n for n, target in enumerate(targets):\n\n target_coord = target['coord']\n target_ra = target_coord.ra.deg\n target_dec = target_coord.dec.deg\n\n if period is not None:\n jd_start, jd_end = get_ESO_period(period)\n else:\n jd_start = pyasl.jdcnv(dt.datetime(year, 1, 1))\n jd_end = pyasl.jdcnv(dt.datetime(year, 12, 31))\n\n jdbinsize = 1 # every day\n each_day = np.arange(jd_start, jd_end, jdbinsize)\n jds = []\n\n ## calculate the mid-dark times\n sun = ephem.Sun()\n for day in each_day:\n date_formatted = '/'.join([str(i) for i in pyasl.daycnv(day)[:-1]])\n s = ephem.Observer()\n s.date = date_formatted\n s.lat = ':'.join([str(i) for i in decdeg2dms(obs['latitude'])])\n s.lon = ':'.join([str(i) for i in decdeg2dms(obs['longitude'])])\n jds.append(ephem.julian_date(s.next_antitransit(sun)))\n\n jds = np.array(jds)\n\n # Get JD floating point\n jdsub = jds - np.floor(jds[0])\n\n # Get alt/az of object\n altaz = pyasl.eq2hor(jds, np.ones_like(jds)*target_ra, np.ones_like(jds)*target_dec,\n lon=obs['longitude'], lat=obs['latitude'], alt=obs['altitude'])\n ax.plot(jdsub, altaz[0], '-', color='k', lw=0.8)\n ax.plot(jdsub[altaz[0] > 30], altaz[0]\n [altaz[0] > 30], '-', color='g', lw=2)\n\n # label for each target\n # plabel = \"[{0:2d}] {1!s}\".format(n + 1, target['name'])\n\n # # number of target at the top of the curve\n # ind_label = np.argmax(altaz[0])\n # # or at the bottom if the top is too close to the corners\n # # if jdsub[ind_label] < 5 or jdsub[ind_label] > jdsub.max()-5:\n # # ind_label = np.argmin(altaz[0])\n # ax.text( jdsub[ind_label], altaz[0][ind_label], str(n+1), color=\"b\", fontsize=14, \\\n # fontproperties=font1, va=\"bottom\", ha=\"center\")\n\n # if n + 1 == 29:\n # # too many?\n # ax.text(1.1, 1.0-float(n+1)*0.04, \"too many targets\", ha=\"left\", va=\"top\", transform=ax.transAxes, \\\n # fontsize=10, fontproperties=font0, color=\"r\")\n # else:\n # ax.text(1.1, 1.0-float(n+1)*0.04, plabel, ha=\"left\", va=\"top\", transform=ax.transAxes, \\\n # fontsize=12, fontproperties=font0, color=\"b\")\n\n if shmode:\n sunless_hours = []\n for day in each_day:\n date_formatted = '/'.join([str(i) for i in pyasl.daycnv(day)[:-1]])\n s = ephem.Observer()\n s.date = date_formatted\n s.lat = ':'.join([str(i) for i in decdeg2dms(obs['latitude'])])\n s.lon = ':'.join([str(i) for i in decdeg2dms(obs['longitude'])])\n # hours from sunrise to sunset\n td = pyasl.daycnv(ephem.julian_date(s.next_setting(sun)), mode='dt') \\\n - pyasl.daycnv(ephem.julian_date(s.next_rising(sun)), mode='dt')\n sunless_hours.append(24 - td.total_seconds() / 3600)\n\n days = each_day - np.floor(each_day[0])\n axsh.plot(days, sunless_hours, '-', color='k', lw=2)\n axsh.set(\n ylim=(0, 15), yticks=range(1, 15), ylabel='Useful hours',\n yticklabels=[r'${}^{{\\rm h}}$'.format(n) for n in range(1, 15)])\n\n # ax.text(1.1, 1.03, \"List of targets\", ha=\"left\", va=\"top\", transform=ax.transAxes, \\\n # fontsize=12, fontproperties=font0, color=\"b\")\n\n axrange = ax.get_xlim()\n\n if period is None:\n months = range(1, 13)\n ndays = [0] + [calendar.monthrange(year, m)[1] for m in months]\n ax.set_xlim([0, 366])\n ax.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])\n ax.set_xticklabels(\n map(calendar.month_abbr.__getitem__, months), fontsize=8)\n # if shmode:\n # axsh.set_xlim([0, 366])\n # axsh.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])\n # axsh.set_xticklabels(\n # map(calendar.month_abbr.__getitem__, months), fontsize=10)\n else:\n if int(period) % 2 == 0:\n # even ESO period, Oct -> Mar\n months = [10, 11, 12, 1, 2, 3]\n ndays = [0] + [calendar.monthrange(year, m)[1] for m in months]\n ax.set_xlim([0, 181])\n ax.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])\n ax.set_xticklabels(\n map(calendar.month_abbr.__getitem__, months), fontsize=10)\n if shmode:\n axsh.set_xlim([0, 181])\n axsh.set_xticks(\n np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])\n axsh.set_xticklabels(\n map(calendar.month_abbr.__getitem__, months), fontsize=10)\n else:\n # odd ESO period, Apr -> Sep\n months = range(4, 10)\n ndays = [0] + [calendar.monthrange(year, m)[1] for m in months]\n ax.set_xlim([0, 182])\n ax.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])\n ax.set_xticklabels(\n map(calendar.month_abbr.__getitem__, months), fontsize=10)\n if shmode:\n axsh.set_xlim([0, 182])\n axsh.set_xticks(\n np.cumsum(ndays)[:-1] + (np.array(ndays) / 2.)[1:])\n axsh.set_xticklabels(\n map(calendar.month_abbr.__getitem__, months), fontsize=10)\n\n if axrange[1] - axrange[0] <= 1.0:\n jdhours = np.arange(0, 3, 1.0 / 24.)\n utchours = (np.arange(0, 72, dtype=int) + 12) % 24\n else:\n jdhours = np.arange(0, 3, 1.0 / 12.)\n utchours = (np.arange(0, 72, 2, dtype=int) + 12) % 24\n\n ax.vlines(np.cumsum(ndays)[:-1], 0, 90, color='k', alpha=0.2)\n ax.hlines([30], 0, 366, lw=0.8)\n ax.vlines(dt.datetime.now().timetuple().tm_yday, 30, 90, color='b')\n\n # Make ax2 responsible for \"top\" axis and \"right\" axis\n ax2 = ax.twinx()\n # Set upper x ticks\n # ax2.xaxis.tick_top()\n # ax2.set_xticks(np.cumsum(ndays))\n # ax2.set_xlabel(\"Day\")\n # print(ax.get_xlim())\n\n # plane-parallel airmass\n airmass_ang = np.arange(0, 81, 5)\n geo_airmass = pyasl.airmass.airmassPP(airmass_ang)[::-1]\n ax2.set_yticks(airmass_ang)\n airmassformat = []\n for t in range(geo_airmass.size):\n airmassformat.append(\"{0:2.2f}\".format(geo_airmass[t]))\n ax2.set_yticklabels(airmassformat) # , rotation=90)\n ax2.set_ylabel(\"Relative airmass\", labelpad=5)\n ax2.tick_params(axis=\"y\", pad=6, labelsize=8)\n ax2.set_ylim(-9, 80)\n # plt.text(1.02,-0.04, \"Plane-parallel\", transform=ax.transAxes, ha='left', \\\n # va='top', fontsize=10, rotation=90)\n\n # ax22 = ax.twinx()\n # ax22.set_xticklabels([])\n # ax22.set_frame_on(True)\n # ax22.patch.set_visible(False)\n # ax22.yaxis.set_ticks_position('right')\n # ax22.yaxis.set_label_position('right')\n # ax22.spines['right'].set_position(('outward', 30))\n # ax22.spines['right'].set_color('k')\n # ax22.spines['right'].set_visible(True)\n # airmass2 = list(\n # map(\n # lambda ang: pyasl.airmass.airmassSpherical(90. - ang, obs['altitude']),\n # airmass_ang))\n # ax22.set_yticks(airmass_ang)\n # airmassformat = []\n # for t in range(len(airmass2)):\n # airmassformat.append(\" {0:2.2f}\".format(airmass2[t]))\n # ax22.set_yticklabels(airmassformat, rotation=90)\n # ax22.tick_params(axis=\"y\", pad=8, labelsize=8)\n # plt.text(1.05,-0.04, \"Spherical+Alt\", transform=ax.transAxes, ha='left', va='top', \\\n # fontsize=10, rotation=90)\n\n ax.set_ylim([0, 90])\n ax.yaxis.set_major_locator(MultipleLocator(15))\n ax.yaxis.set_minor_locator(MultipleLocator(5))\n yticks = ax.get_yticks()\n ytickformat = []\n for t in range(yticks.size):\n ytickformat.append(str(int(yticks[t])) + r\"$^\\circ$\")\n ax.set_yticklabels(ytickformat, fontsize=10)\n ax.set_ylabel(\"Altitude\", fontsize=10)\n yticksminor = ax.get_yticks(minor=True)\n # ymind = np.where(yticksminor % 15. != 0.)[0]\n # yticksminor = yticksminor[ymind]\n # ax.set_yticks(yticksminor, minor=True)\n # m_ytickformat = []\n # for t in range(yticksminor.size):\n # m_ytickformat.append(str(int(yticksminor[t])) + r\"$^\\circ$\")\n # ax.set_yticklabels(m_ytickformat, minor=True)\n ax.set_ylim([0, 90])\n\n ax.yaxis.grid(color='gray', linestyle='dashed')\n ax.yaxis.grid(color='gray', which=\"minor\", linestyle='dotted')\n ax2.xaxis.grid(color='gray', linestyle='dotted')\n\n # if period is not None:\n # plt.text(\n # 0.5, 0.95,\n # \"Visibility over P{0!s}\\n - altitudes at mid-dark time -\".format(\n # period), transform=fig.transFigure, ha='center', va='bottom',\n # fontsize=12)\n # else:\n # plt.text(\n # 0.5, 0.95,\n # \"Visibility over {0!s}\\n - altitudes at mid-dark time -\".format(\n # year), transform=fig.transFigure, ha='center', va='bottom',\n # fontsize=12)\n\n obsco = \"Obs coord.: {0:8.4f}$^\\circ$, {1:8.4f}$^\\circ$, {2:.0f} m\".format(\n obs['longitude'], obs['latitude'], obs['altitude'])\n\n ax.set_title(obsco, loc='left', fontsize=6)\n ax.set_title('Altitudes at mid-dark time', loc='right', fontsize=8)\n\n # plt.text(0.01, 0.97, obsco, transform=fig.transFigure, ha='left',\n # va='center', fontsize=10)\n # plt.text(0.01, 0.95, obs['name'], transform=fig.transFigure, ha='left',\n # va='center', fontsize=10)\n\n # interactive!\n if hover:\n main_axis = fig.axes[0]\n all_lines = set(main_axis.get_lines())\n\n def on_plot_hover(event):\n for line in main_axis.get_lines():\n if line.contains(event)[0]:\n line.set_color('red') # make this line red\n # and all others black\n all_other_lines = all_lines - set([line])\n for other_line in all_other_lines:\n other_line.set_color('black')\n fig.canvas.draw_idle()\n\n fig.canvas.mpl_connect('motion_notify_event', on_plot_hover)\n\n # return fig\n\n\ndef VisibilityPlot(date=None, targets=None, observatory=None, plotLegend=True,\n showMoon=True, showMoonDist=True, print2file=False,\n remove_watermark=False):\n \"\"\"\n Plot the visibility of target.\n\n Parameters\n ----------\n date: datetime\n The date for which to calculate the visibility.\n targets: list\n List of targets.\n Each target should be a dictionary with keys 'name' and 'coord'.\n The key 'name' is aa string, 'coord' is a SkyCoord object.\n observatory: string\n Name of the observatory that pyasl.observatory can resolve.\n Basically, any of pyasl.listObservatories().keys()\n plotLegend: boolean, optional\n If True (default), show a legend.\n showMoonDist : boolean, optional\n If True (default), the Moon distance will be shown.\n \"\"\"\n\n from mpl_toolkits.axes_grid1 import host_subplot\n from matplotlib.ticker import MultipleLocator\n from matplotlib.font_manager import FontProperties\n from matplotlib import rcParams\n rcParams['xtick.major.pad'] = 12\n\n if isinstance(observatory, dict):\n obs = observatory\n else:\n obs = pyasl.observatory(observatory)\n\n # observer = ephem.Observer()\n # observer.pressure = 0\n # observer.horizon = '-0:34'\n # observer.lat, observer.lon = obs['latitude'], obs['longitude']\n # observer.date = date\n # print(observer.date)\n # print(observer.previous_rising(ephem.Sun()))\n # print(observer.next_setting(ephem.Sun()))\n # print(observer.previous_rising(ephem.Moon()))\n # print(observer.next_setting(ephem.Moon()))\n # observer.horizon = '-6'\n # noon = observer.next_transit(ephem.Sun())\n # print(noon)\n # print(observer.previous_rising(ephem.Sun(), start=noon, use_center=True))\n # print()\n\n fig = plt.figure(figsize=(15, 10))\n fig.subplots_adjust(left=0.07, right=0.8, bottom=0.15, top=0.88)\n\n # watermak\n if not remove_watermark:\n fig.text(0.99, 0.99,\n 'Created with\\ngithub.com/iastro-pt/ObservationTools',\n fontsize=10, color='gray', ha='right', va='top', alpha=0.5)\n\n ax = host_subplot(111)\n\n font0 = FontProperties()\n font1 = font0.copy()\n font0.set_family('sans-serif')\n font0.set_weight('light')\n font1.set_family('sans-serif')\n font1.set_weight('medium')\n\n for n, target in enumerate(targets):\n\n target_coord = target['coord']\n target_ra = target_coord.ra.deg\n target_dec = target_coord.dec.deg\n\n # JD array\n jdbinsize = 1.0 / 24. / 20.\n # jds = np.arange(allData[n][\"Obs jd\"][0], allData[n][\"Obs jd\"][2], jdbinsize)\n jd = pyasl.jdcnv(date)\n jd_start = pyasl.jdcnv(date) - 0.5\n jd_end = pyasl.jdcnv(date) + 0.5\n jds = np.arange(jd_start, jd_end, jdbinsize)\n # Get JD floating point\n jdsub = jds - np.floor(jds[0])\n # Get alt/az of object\n altaz = pyasl.eq2hor(jds, np.ones(jds.size)*target_ra, np.ones(jds.size)*target_dec,\n lon=obs['longitude'], lat=obs['latitude'], alt=obs['altitude'])\n # Get alt/az of Sun\n sun_position = pyasl.sunpos(jd)\n sun_ra, sun_dec = sun_position[1], sun_position[2]\n sunpos_altaz = pyasl.eq2hor(jds, np.ones(jds.size)*sun_ra, np.ones(jds.size)*sun_dec,\n lon=obs['longitude'], lat=obs['latitude'], alt=obs['altitude'])\n\n # Define plot label\n plabel = \"[{0:2d}] {1!s}\".format(n + 1, target['name'])\n\n # Find periods of: day, twilight, and night\n day = np.where(sunpos_altaz[0] >= 0.)[0]\n twi = np.where(\n np.logical_and(sunpos_altaz[0] > -18., sunpos_altaz[0] < 0.))[0]\n night = np.where(sunpos_altaz[0] <= -18.)[0]\n\n if (len(day) == 0) and (len(twi) == 0) and (len(night) == 0):\n print\n print(\"VisibilityPlot - no points to draw\")\n print\n\n if showMoon:\n # plot the moon\n mpos = pyasl.moonpos(jds)\n # mpha = pyasl.moonphase(jds)\n mpos_altaz = pyasl.eq2hor(jds, mpos[0], mpos[1],\n lon=obs['longitude'],\n lat=obs['latitude'], alt=obs['altitude'])\n ax.plot(jdsub, mpos_altaz[0], color='k', alpha=0.3, ls='--',\n label='Moon')\n # moonind = np.where( mpos_altaz[0] > 0. )[0]\n\n if showMoonDist:\n mdist = pyasl.getAngDist(mpos[0], mpos[1], np.ones(jds.size)*target_ra,\n np.ones(jds.size)*target_dec)\n bindist = int((2.0 / 24.) / jdbinsize)\n firstbin = np.random.randint(0, bindist)\n for mp in range(0, int(len(jds) / bindist)):\n bind = firstbin + mp * bindist\n if altaz[0][bind] - 1. < 5.:\n continue\n ax.text(jdsub[bind], altaz[0][bind]-1., str(int(mdist[bind]))+r\"$^\\circ$\", ha=\"center\", va=\"top\",\n fontsize=8, stretch='ultra-condensed', fontproperties=font0, alpha=1.)\n\n if len(twi) > 1:\n # There are points in twilight\n linebreak = np.where(\n (jdsub[twi][1:] - jdsub[twi][:-1]) > 2.0 * jdbinsize)[0]\n if len(linebreak) > 0:\n plotrjd = np.insert(jdsub[twi], linebreak + 1, np.nan)\n plotdat = np.insert(altaz[0][twi], linebreak + 1, np.nan)\n ax.plot(plotrjd, plotdat, \"-\", color='#BEBEBE', linewidth=1.5)\n else:\n ax.plot(jdsub[twi], altaz[0][twi], \"-\", color='#BEBEBE',\n linewidth=1.5)\n\n ax.plot(jdsub[night], altaz[0][night], '.k', label=plabel)\n ax.plot(jdsub[day], altaz[0][day], '.', color='#FDB813')\n\n altmax = np.argmax(altaz[0])\n ax.text(jdsub[altmax], altaz[0][altmax], str(n+1), color=\"b\", fontsize=14,\n fontproperties=font1, va=\"bottom\", ha=\"center\")\n\n if n + 1 == 29:\n ax.text(1.1, 1.0-float(n+1)*0.04, \"too many targets\", ha=\"left\", va=\"top\", transform=ax.transAxes,\n fontsize=10, fontproperties=font0, color=\"r\")\n else:\n ax.text(1.1, 1.0-float(n+1)*0.04, plabel, ha=\"left\", va=\"top\", transform=ax.transAxes,\n fontsize=12, fontproperties=font0, color=\"b\")\n\n ax.text(1.1, 1.03, \"List of targets\", ha=\"left\", va=\"top\", transform=ax.transAxes,\n fontsize=12, fontproperties=font0, color=\"b\")\n\n axrange = ax.get_xlim()\n ax.set_xlabel(\"UT [hours]\")\n\n if axrange[1] - axrange[0] <= 1.0:\n jdhours = np.arange(0, 3, 1.0 / 24.)\n utchours = (np.arange(0, 72, dtype=int) + 12) % 24\n else:\n jdhours = np.arange(0, 3, 1.0 / 12.)\n utchours = (np.arange(0, 72, 2, dtype=int) + 12) % 24\n ax.set_xticks(jdhours)\n ax.set_xlim(axrange)\n ax.set_xticklabels(utchours, fontsize=18)\n\n # Make ax2 responsible for \"top\" axis and \"right\" axis\n ax2 = ax.twin()\n # Set upper x ticks\n ax2.set_xticks(jdhours)\n ax2.set_xticklabels(utchours, fontsize=18)\n ax2.set_xlabel(\"UT [hours]\")\n\n # Horizon angle for airmass\n airmass_ang = np.arange(5., 90., 5.)\n geo_airmass = pyasl.airmass.airmassPP(90. - airmass_ang)\n ax2.set_yticks(airmass_ang)\n airmassformat = []\n for t in range(geo_airmass.size):\n airmassformat.append(\"{0:2.2f}\".format(geo_airmass[t]))\n ax2.set_yticklabels(airmassformat, rotation=90)\n ax2.set_ylabel(\"Relative airmass\", labelpad=32)\n ax2.tick_params(axis=\"y\", pad=10, labelsize=10)\n plt.text(1.015, -0.04, \"Plane-parallel\", transform=ax.transAxes, ha='left',\n va='top', fontsize=10, rotation=90)\n\n ax22 = ax.twin()\n ax22.set_xticklabels([])\n ax22.set_frame_on(True)\n ax22.patch.set_visible(False)\n ax22.yaxis.set_ticks_position('right')\n ax22.yaxis.set_label_position('right')\n ax22.spines['right'].set_position(('outward', 25))\n ax22.spines['right'].set_color('k')\n ax22.spines['right'].set_visible(True)\n airmass2 = list(\n map(\n lambda ang: pyasl.airmass.airmassSpherical(\n 90. - ang, obs['altitude']),\n airmass_ang))\n ax22.set_yticks(airmass_ang)\n airmassformat = []\n for t in airmass2:\n airmassformat.append(\"{0:2.2f}\".format(t))\n ax22.set_yticklabels(airmassformat, rotation=90)\n ax22.tick_params(axis=\"y\", pad=10, labelsize=10)\n plt.text(1.045, -0.04, \"Spherical+Alt\", transform=ax.transAxes, ha='left', va='top',\n fontsize=10, rotation=90)\n\n ax3 = ax.twiny()\n ax3.set_frame_on(True)\n ax3.patch.set_visible(False)\n ax3.xaxis.set_ticks_position('bottom')\n ax3.xaxis.set_label_position('bottom')\n ax3.spines['bottom'].set_position(('outward', 50))\n ax3.spines['bottom'].set_color('k')\n ax3.spines['bottom'].set_visible(True)\n\n ltime, ldiff = pyasl.localtime.localTime(\n utchours, np.repeat(obs['longitude'], len(utchours)))\n jdltime = jdhours - ldiff / 24.\n ax3.set_xticks(jdltime)\n ax3.set_xticklabels(utchours)\n ax3.set_xlim([axrange[0], axrange[1]])\n ax3.set_xlabel(\"Local time [hours]\")\n\n ax.set_ylim([0, 91])\n ax.yaxis.set_major_locator(MultipleLocator(15))\n ax.yaxis.set_minor_locator(MultipleLocator(5))\n yticks = ax.get_yticks()\n ytickformat = []\n for t in range(yticks.size):\n ytickformat.append(str(int(yticks[t])) + r\"$^\\circ$\")\n ax.set_yticklabels(ytickformat, fontsize=16)\n ax.set_ylabel(\"Altitude\", fontsize=18)\n yticksminor = ax.get_yticks(minor=True)\n ymind = np.where(yticksminor % 15. != 0.)[0]\n yticksminor = yticksminor[ymind]\n ax.set_yticks(yticksminor, minor=True)\n m_ytickformat = []\n for t in range(yticksminor.size):\n m_ytickformat.append(str(int(yticksminor[t])) + r\"$^\\circ$\")\n ax.set_yticklabels(m_ytickformat, minor=True)\n ax.set_ylim([0, 91])\n\n ax.yaxis.grid(color='gray', linestyle='dashed')\n ax.yaxis.grid(color='gray', which=\"minor\", linestyle='dotted')\n ax2.xaxis.grid(color='gray', linestyle='dotted')\n\n plt.text(0.5, 0.95, \"Visibility on {0!s}\".format(date.date()),\n transform=fig.transFigure, ha='center', va='bottom', fontsize=20)\n\n if plotLegend:\n line1 = matplotlib.lines.Line2D((0, 0), (1, 1), color='#FDB813',\n linestyle=\"-\", linewidth=2)\n line2 = matplotlib.lines.Line2D((0, 0), (1, 1), color='#BEBEBE',\n linestyle=\"-\", linewidth=2)\n line3 = matplotlib.lines.Line2D((0, 0), (1, 1), color='k',\n linestyle=\"-\", linewidth=2)\n line4 = matplotlib.lines.Line2D((0, 0), (1, 1), color='k', alpha=0.2,\n linestyle=\"--\", linewidth=2)\n\n if showMoon:\n lgd2 = plt.legend((line1, line2, line3, line4),\n (\"day\", \"twilight\", \"night\", \"Moon\"),\n bbox_to_anchor=(0.88, 0.18), loc='best',\n borderaxespad=0, prop={'size': 12}, fancybox=True)\n else:\n lgd2 = plt.legend((line1, line2, line3),\n (\"day\", \"twilight\", \"night\"),\n bbox_to_anchor=(0.88, 0.18), loc='best',\n borderaxespad=0, prop={'size': 12}, fancybox=True)\n\n lgd2.get_frame().set_alpha(.9)\n\n obsco = r\"Obs coord.: {0:8.4f}$^\\circ$, {1:8.4f}$^\\circ$, {2:4.2f} m\"\n obsco = obsco.format(obs['longitude'], obs['latitude'], obs['altitude'])\n\n plt.text(0.01, 0.97, obsco, transform=fig.transFigure, ha='left',\n va='center', fontsize=10)\n plt.text(0.01, 0.95, obs['name'], transform=fig.transFigure, ha='left',\n va='center', fontsize=10)\n\n return fig\n\n\nif __name__ == '__main__':\n args = _parser()\n\n target_names = args.targets[0].split(',')\n\n ## Get coordinates for all the targets\n targets = []\n\n # flush keyword was not backported to Python < 3.3\n if sys.version_info[:2] < (3, 3):\n print('Sending queries to CDS...', end=' ')\n sys.stdout.flush()\n else:\n print('Sending queries to CDS...', end=' ', flush=True)\n\n for target_name in tqdm(target_names):\n if args.toi: # check the table\n # data = np.genfromtxt('TOI-info.csv', delimiter=',', names=True)\n # data = np.loadtxt('TOI-info.csv', delimiter=',', usecols=(1, 16,17), skiprows=1, dtype={'names': ('TOI', 'RA', 'Dec'), 'formats': (np.float, '|S15', '|S15')},)\n data = np.loadtxt('TOI-info.csv', delimiter=',', usecols=(1, 15, 16),\n skiprows=1, dtype={'names': ('TOI', 'RA', 'Dec'), 'formats': 3*[float]})\n ind = np.where(data['TOI'].astype(int) == int(target_name))[0]\n\n if ind.size == 0:\n print('Could not find target: {0!s}'.format(target_name))\n continue\n\n ind = ind[0]\n coord = SkyCoord(data[ind]['RA'], data[ind]['Dec'], unit=units.deg)\n\n targets.append({\n 'name': target_name,\n 'coord': CacheSkyCoord(coord)\n })\n else:\n try:\n targets.append({\n 'name': target_name,\n 'coord': CacheSkyCoord.from_name(target_name)\n })\n except name_resolve.NameResolveError as e:\n print('Could not find target: {0!s}'.format(target_name))\n\n ## Just print coordinates in STARALT format and exit\n if args.c:\n print('Coordinates for {0!s}\\n'.format(args.targets[0]))\n for target in targets:\n ## name hh mm ss ±dd mm ss\n out = '{0!s}'.format(target['name'])\n ra = target['coord'].ra.hms\n out += ' {0:02d} {1:02d} {2:5.3f}'.format(\n int(ra.h), int(ra.m), ra.s)\n dec = target['coord'].dec.dms\n out += ' {0:02d} {1:02d} {2:5.3f}'.format(\n int(dec.d), int(dec.m), dec.s)\n print(out)\n\n sys.exit(0)\n\n ## Actually calculate the visibility curves\n print('Calculating visibility for {0!s}'.format(args.targets[0]))\n\n P = args.period\n if args.period is not None:\n if args.mode != 'starobs':\n print('Specifying ESO period is only possible in \"starobs\" mode')\n sys.exit(1)\n\n P = args.period[0]\n P = P.replace('P', '') # if user gave --period P100, for example\n\n if args.date == 'today':\n if args.mode == 'staralt':\n # now() gives the current *time* which we don't want\n today = dt.datetime.now()\n date = dt.datetime(today.year, today.month, today.day,\n tzinfo=tz.tzutc())\n elif args.mode == 'starobs':\n date = dt.datetime.now().year\n else:\n if args.mode == 'staralt':\n if \"-\" not in args.date:\n raise ValueError(\n \"Date needs to be provided as YYYY-MM-DD for staralt mode.\"\n )\n ymd = [int(i) for i in args.date.split('-')]\n date = dt.datetime(*ymd)\n elif args.mode == 'starobs':\n if \"-\" in args.date:\n date = int(args.date.split('-')[0])\n else:\n date = int(args.date)\n\n ## Find observatory\n if args.loc is None:\n available_sites = pyasl.listObservatories(show=False)\n\n if args.site.lower() in ('paranal', 'vlt', 'UT1', 'UT2', 'UT3', 'UT4'):\n args.site = 'esoparanal'\n\n if args.site.lower() not in available_sites.keys():\n print('\"{0!s}\" is not a valid observatory code. '\n 'Try one of the following:\\n'.format(args.site)\n )\n\n maxCodeLen = max(map(len, available_sites.keys()))\n print((\"{0:\" + str(maxCodeLen) + \"s} \").format(\"Code\") +\n \"Observatory name\")\n print(\"-\" * (21 + maxCodeLen))\n for k in sorted(available_sites.keys(), key=lambda s: s.lower()):\n print((\"{0:\" + str(maxCodeLen) + \"s} --- \").format(k) +\n available_sites[k][\"name\"])\n\n sys.exit(1)\n site = args.site\n\n else:\n loc = list(map(float, args.loc.split(',')))\n site = {\n 'altitude': loc[0],\n 'latitude': loc[1],\n 'longitude': loc[2],\n 'tz': loc[3],\n 'name': 'unknown'\n }\n\n if args.mode == 'staralt':\n fig = VisibilityPlot(date=date, targets=targets, observatory=site,\n remove_watermark=args.remove_watermark,\n showMoon=not args.nomoon)\n\n elif args.mode == 'starobs':\n if args.A is not None:\n am = args.A[0]\n else:\n am = None\n\n fig = StarObsPlot(year=date, targets=targets, observatory=site,\n period=P, hover=args.hover, sunless_hours=am,\n remove_watermark=args.remove_watermark)\n\n if args.save is not None:\n print('Saving the figure to {}'.format(args.save[0]))\n fig.savefig(args.save[0])\n else:\n plt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.ticker.MultipleLocator",
"numpy.cumsum",
"scipy.optimize.bisect",
"numpy.where",
"numpy.random.randint",
"numpy.ones_like",
"numpy.arange",
"numpy.argmax",
"numpy.insert",
"matplotlib.pyplot.text",
"matplotlib.pyplot.figure",
"matplotlib.font_manager.FontProperties",
"numpy.full_like",
"numpy.floor",
"numpy.logical_and",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.lines.Line2D",
"numpy.ones",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
kokandeep/open_model_zoo | [
"efd238d02035f8a5417b7b1e25cd4c997d44351f"
] | [
"tools/downloader/pytorch_to_onnx.py"
] | [
"import argparse\nfrom pathlib import Path\nimport sys\n\nimport onnx\nimport torch\nimport torch.onnx\n\n\ndef positive_int_arg(values):\n \"\"\"Check positive integer type for input argument\"\"\"\n result = []\n for value in values.split(','):\n try:\n ivalue = int(value)\n if ivalue < 0:\n raise argparse.ArgumentTypeError('Argument must be a positive integer')\n result.append(ivalue)\n except Exception as exc:\n print(exc)\n sys.exit('Invalid value for input argument: {!r}, a positive integer is expected'.format(value))\n return result\n\n\ndef model_parameter(parameter):\n param, value = parameter.split('=', 1)\n try:\n value = eval(value, {}, {})\n except NameError as err:\n print('Cannot evaluate {!r} value in {}. For string values use \"{}=\\'{}\\'\" (with all quotes).'\n .format(value, parameter, param, value))\n sys.exit(err)\n return param, value\n\n\ndef parse_args():\n \"\"\"Parse input arguments\"\"\"\n\n parser = argparse.ArgumentParser(description='Conversion of pretrained models from PyTorch to ONNX')\n\n parser.add_argument('--model-name', type=str, required=True,\n help='Model to convert. May be class name or name of constructor function')\n parser.add_argument('--weights', type=str, required=True,\n help='Path to the weights in PyTorch\\'s format')\n parser.add_argument('--input-shape', metavar='INPUT_DIM', type=positive_int_arg, required=True,\n help='Shape of the input blob')\n parser.add_argument('--output-file', type=Path, required=True,\n help='Path to the output ONNX model')\n parser.add_argument('--from-torchvision', action='store_true',\n help='Sets model\\'s origin as Torchvision*')\n parser.add_argument('--model-path', type=str,\n help='Path to PyTorch model\\'s source code if model is not from Torchvision*')\n parser.add_argument('--import-module', type=str, default='',\n help='Name of module, which contains model\\'s constructor.'\n 'Requires if model not from Torchvision')\n parser.add_argument('--input-names', type=str, metavar='L[,L...]',\n help='Space separated names of the input layers')\n parser.add_argument('--output-names', type=str, metavar='L[,L...]',\n help='Space separated names of the output layers')\n parser.add_argument('--model-param', type=model_parameter, default=[], action='append',\n help='Pair \"name\"=\"value\" of model constructor parameter')\n return parser.parse_args()\n\n\ndef load_model(model_name, weights, from_torchvision, model_path, module_name, model_params):\n \"\"\"Import model and load pretrained weights\"\"\"\n\n if from_torchvision:\n try:\n import torchvision.models\n creator = getattr(torchvision.models, model_name)\n model = creator()\n except ImportError as err:\n print(err)\n sys.exit('The torchvision package was not found.'\n 'Please install it to default location or '\n 'update PYTHONPATH environment variable '\n 'with the path to the installed torchvision package.')\n except AttributeError as err:\n print('ERROR: Model {} doesn\\'t exist in torchvision!'.format(model_name))\n sys.exit(err)\n else:\n sys.path.append(model_path)\n try:\n module = __import__(module_name)\n creator = getattr(module, model_name)\n model = creator(**model_params)\n except ImportError as err:\n print('Module {} in {} doesn\\'t exist. Check import path and name'.format(model_name, model_path))\n sys.exit(err)\n except AttributeError as err:\n print('ERROR: Module {} contains no class or function with name {}!'\n .format(module_name, model_name))\n sys.exit(err)\n\n try:\n model.load_state_dict(torch.load(weights, map_location='cpu'))\n except RuntimeError as err:\n print('ERROR: Weights from \\n{}\\n cannot be loaded for model {}! Check matching between model and weights')\n sys.exit(err)\n return model\n\n\ndef convert_to_onnx(model, input_shape, output_file, input_names, output_names):\n \"\"\"Convert PyTorch model to ONNX and check the resulting onnx model\"\"\"\n\n output_file.parent.mkdir(parents=True, exist_ok=True)\n model.eval()\n dummy_input = torch.randn(input_shape)\n model(dummy_input)\n torch.onnx.export(model, dummy_input, str(output_file), verbose=False,\n input_names=input_names.split(','), output_names=output_names.split(','))\n\n # Model check after conversion\n model = onnx.load(str(output_file))\n try:\n onnx.checker.check_model(model)\n print('ONNX check passed successfully.')\n except onnx.onnx_cpp2py_export.checker.ValidationError as exc:\n sys.exit('ONNX check failed with error: ' + str(exc))\n\n\ndef main():\n args = parse_args()\n model = load_model(args.model_name, args.weights, args.from_torchvision,\n args.model_path, args.import_module, dict(args.model_param))\n\n convert_to_onnx(model, args.input_shape, args.output_file, args.input_names, args.output_names)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.randn",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OpenSourceEconomics/ose-scientific-computing-course-wirecard | [
"905c64d8dc7ef93f2231533c8e58614de63402ce"
] | [
"auxiliary/aux_m/function_get_start_point.py"
] | [
"from numpy import *\nimport pandas as pd\nimport random\nimport nlopt\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport numbers\nimport math\nimport random\nimport autograd.numpy as ag\nfrom autograd import grad\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom numpy.lib.function_base import vectorize\nfrom autograd import value_and_grad\nnp.set_printoptions(precision=20)\npd.set_option(\"display.precision\", 14)\n\n\n##### the functions in this file are used to generated random starting points for test functions and our economic application\n\n\ndef get_starting_points(n,problem_info_object,p):\n \n \n ### n: number of desired dimensions of the problem\n ### problem_info_object: object that contains the known information of the problem e.g.: g_1=griewank_info(n,a=200)\n ### p: desired number of starting points you want to draw\n \n ## as Guvenen et al. do not specify how they generate the random starting points I will choose a method\n ### Method:\n # as the starting point has to be a vector fo dimension = dimension of the function, I draw every coordinate\n # for the vector from a uniform distribution\n # repeat this until you get 100 vectors of dimension = dim of function which are randomly generated\n data=[]\n \n lower_b=problem_info_object.lower_bound\n upper_b=problem_info_object.upper_bound\n \n for i in range(n):\n v=np.random.uniform(lower_b[i],upper_b[i],p)\n data.append(v)\n df=pd.DataFrame(data)\n return df.transpose()\n\n\n\ndef get_start_points_application(alpha_dirichlet,p,B):\n \n ## alpha dirichlet is a vector that contains alphas for dirichlet distribution\n ## this also stores the dimension of the problem\n ## p is the number of start points we want to generate\n ## B is the budget we consider\n ## function works\n data=[]\n \n for i in range(p):\n \n vector_dirichlet=np.random.dirichlet(alpha_dirichlet,1)\n vector_budget_adjusted=vector_dirichlet*B\n data.append(vector_budget_adjusted)\n \n df=pd.DataFrame(np.concatenate(data))\n return df\n \n \n \n \n\n\n\n\n\n\n"
] | [
[
"numpy.set_printoptions",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.random.uniform",
"pandas.set_option",
"numpy.random.dirichlet"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Heldenkrieger01/examples | [
"10aa1fb6796f0c275064d64e46699edd116be485"
] | [
"lite/examples/recommendation/ml/data/example_generation_movielens.py"
] | [
"# Lint as: python3\n# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Prepare TF.Examples for on-device recommendation model.\n\nFollowing functions are included: 1) downloading raw data 2) processing to user\nactivity sequence and splitting to train/test data 3) convert to TF.Examples\nand write in output location.\n\nMore information about the movielens dataset can be found here:\nhttps://grouplens.org/datasets/movielens/\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport json\nimport os\n\nfrom absl import app\nfrom absl import flags\nimport pandas as pd\nimport tensorflow as tf\n\nFLAGS = flags.FLAGS\n# Permalinks to download movielens data.\nMOVIELENS_1M_URL = \"http://files.grouplens.org/datasets/movielens/ml-1m.zip\"\nMOVIELENS_ZIP_FILENAME = \"ml-1m.zip\"\nMOVIELENS_ZIP_HASH = \"a6898adb50b9ca05aa231689da44c217cb524e7ebd39d264c56e2832f2c54e20\"\nMOVIELENS_EXTRACTED_DIR = \"ml-1m\"\nRATINGS_FILE_NAME = \"ratings.dat\"\nMOVIES_FILE_NAME = \"movies.dat\"\nRATINGS_DATA_COLUMNS = [\"UserID\", \"MovieID\", \"Rating\", \"Timestamp\"]\nMOVIES_DATA_COLUMNS = [\"MovieID\", \"Title\", \"Genres\"]\nOUTPUT_TRAINING_DATA_FILENAME = \"train_movielens_1m.tfrecord\"\nOUTPUT_TESTING_DATA_FILENAME = \"test_movielens_1m.tfrecord\"\nOUTPUT_MOVIE_VOCAB_FILENAME = \"movie_vocab.json\"\nOOV_MOVIE_ID = 0\n\n\ndef define_flags():\n flags.DEFINE_string(\"data_dir\", \"/tmp\",\n \"Path to download and store movielens data.\")\n flags.DEFINE_string(\"output_dir\", None,\n \"Path to the directory of output files.\")\n flags.DEFINE_bool(\"build_movie_vocab\", True,\n \"If yes, generate sorted movie vocab.\")\n flags.DEFINE_integer(\"min_timeline_length\", 3,\n \"The minimum timeline length to construct examples.\")\n flags.DEFINE_integer(\"max_context_length\", 10,\n \"The maximun length of user context history.\")\n\n\ndef download_and_extract_data(data_directory, url=MOVIELENS_1M_URL):\n \"\"\"Download and extract zip containing MovieLens data to a given directory.\n\n Args:\n data_directory: Local path to extract dataset to.\n url: Direct path to MovieLens dataset .zip file. See constants above for\n examples.\n\n Returns:\n Downloaded and extracted data file directory.\n \"\"\"\n path_to_zip = tf.keras.utils.get_file(\n fname=MOVIELENS_ZIP_FILENAME,\n origin=url,\n file_hash=MOVIELENS_ZIP_HASH,\n hash_algorithm=\"sha256\",\n extract=True,\n cache_dir=data_directory)\n extracted_file_dir = os.path.join(\n os.path.dirname(path_to_zip), MOVIELENS_EXTRACTED_DIR)\n return extracted_file_dir\n\n\ndef read_data(data_directory):\n \"\"\"Read movielens ratings.dat and movies.dat file into dataframe.\"\"\"\n ratings_df = pd.read_csv(\n os.path.join(data_directory, RATINGS_FILE_NAME),\n sep=\"::\",\n names=RATINGS_DATA_COLUMNS)\n ratings_df[\"Timestamp\"] = ratings_df[\"Timestamp\"].apply(int)\n movies_df = pd.read_csv(\n os.path.join(data_directory, MOVIES_FILE_NAME),\n sep=\"::\",\n names=MOVIES_DATA_COLUMNS)\n return ratings_df, movies_df\n\n\ndef convert_to_timelines(ratings_df):\n \"\"\"Convert ratings data to user.\"\"\"\n timelines = collections.defaultdict(list)\n movie_counts = collections.Counter()\n for user_id, movie_id, _, timestamp in ratings_df.values:\n timelines[user_id].append([movie_id, int(timestamp)])\n movie_counts[movie_id] += 1\n # Sort per-user timeline by timestamp\n for (user_id, timeline) in timelines.items():\n timeline.sort(key=lambda x: x[1])\n timelines[user_id] = [movie_id for movie_id, _ in timeline]\n return timelines, movie_counts\n\n\ndef generate_examples_from_timelines(timelines,\n min_timeline_len=3,\n max_context_len=100):\n \"\"\"Convert user timelines to tf examples.\n\n Convert user timelines to tf examples by adding all possible context-label\n pairs in the examples pool.\n\n Args:\n timelines: the user timelines to process.\n min_timeline_len: minimum length of the user timeline.\n max_context_len: maximum length of context signals.\n\n Returns:\n train_examples: tf example list for training.\n test_examples: tf example list for testing.\n \"\"\"\n train_examples = []\n test_examples = []\n for timeline in timelines.values():\n # Skip if timeline is shorter than min_timeline_len.\n if len(timeline) < min_timeline_len:\n continue\n for label_idx in range(1, len(timeline)):\n start_idx = max(0, label_idx - max_context_len)\n context = timeline[start_idx:label_idx]\n # Pad context with out-of-vocab movie id 0.\n while len(context) < max_context_len:\n context.append(OOV_MOVIE_ID)\n label = timeline[label_idx]\n feature = {\n \"context\":\n tf.train.Feature(int64_list=tf.train.Int64List(value=context)),\n \"label\":\n tf.train.Feature(int64_list=tf.train.Int64List(value=[label]))\n }\n tf_example = tf.train.Example(features=tf.train.Features(feature=feature))\n if label_idx == len(timeline) - 1:\n test_examples.append(tf_example.SerializeToString())\n else:\n train_examples.append(tf_example.SerializeToString())\n return train_examples, test_examples\n\n\ndef write_tfrecords(tf_examples, filename):\n \"\"\"Writes tf examples to tfrecord file, and returns the count.\"\"\"\n with tf.io.TFRecordWriter(filename) as file_writer:\n i = 0\n for example in tf_examples:\n file_writer.write(example)\n i += 1\n return i\n\n\ndef generate_sorted_movie_vocab(movies_df, movie_counts):\n \"\"\"Generate vocabulary for movies, and sort by usage count.\"\"\"\n vocab_movies = []\n for movie_id, title, genres in movies_df.values:\n count = movie_counts[movie_id] if movie_id in movie_counts else 0\n vocab_movies.append([movie_id, title, genres, count])\n vocab_movies.sort(key=lambda x: x[3], reverse=True)\n return vocab_movies\n\n\ndef write_vocab_json(vocab_movies, filename):\n \"\"\"Write generated movie vocabulary to specified file.\"\"\"\n with open(filename, \"w\", encoding=\"utf-8\") as jsonfile:\n json.dump(vocab_movies, jsonfile, indent=2)\n\n\ndef generate_datasets(data_dir, output_dir, min_timeline_length,\n max_context_length, build_movie_vocab):\n \"\"\"Generates train and test datasets as TFRecord, and returns stats.\"\"\"\n if not tf.io.gfile.exists(data_dir):\n tf.io.gfile.makedirs(data_dir)\n\n extracted_file_dir = download_and_extract_data(data_directory=data_dir)\n ratings_df, movies_df = read_data(data_directory=extracted_file_dir)\n timelines, movie_counts = convert_to_timelines(ratings_df)\n train_examples, test_examples = generate_examples_from_timelines(\n timelines=timelines,\n min_timeline_len=min_timeline_length,\n max_context_len=max_context_length)\n\n if not tf.io.gfile.exists(output_dir):\n tf.io.gfile.makedirs(output_dir)\n train_file = os.path.join(output_dir, OUTPUT_TRAINING_DATA_FILENAME)\n train_size = write_tfrecords(tf_examples=train_examples, filename=train_file)\n test_file = os.path.join(output_dir, OUTPUT_TESTING_DATA_FILENAME)\n test_size = write_tfrecords(tf_examples=test_examples, filename=test_file)\n stats = {\n \"train_size\": train_size,\n \"test_size\": test_size,\n \"train_file\": train_file,\n \"test_file\": test_file,\n }\n if build_movie_vocab:\n vocab_movies = generate_sorted_movie_vocab(\n movies_df=movies_df, movie_counts=movie_counts)\n vocab_file = os.path.join(output_dir, OUTPUT_MOVIE_VOCAB_FILENAME)\n write_vocab_json(vocab_movies=vocab_movies, filename=vocab_file)\n stats.update(vocab_size=len(vocab_movies), vocab_file=vocab_file)\n return stats\n\n\ndef main(_):\n stats = generate_datasets(FLAGS.data_dir, FLAGS.output_dir,\n FLAGS.min_timeline_length, FLAGS.max_context_length,\n FLAGS.build_movie_vocab)\n tf.compat.v1.logging.info(\"Generated dataset: %s\", stats)\n\n\nif __name__ == \"__main__\":\n define_flags()\n app.run(main)\n"
] | [
[
"tensorflow.io.TFRecordWriter",
"tensorflow.io.gfile.exists",
"tensorflow.io.gfile.makedirs",
"tensorflow.compat.v1.logging.info",
"tensorflow.keras.utils.get_file",
"tensorflow.train.Features",
"tensorflow.train.Int64List"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Chandler/pyFM | [
"7ee034fff55bb333d278503479ce17f32cca4237"
] | [
"pyFM/spectral/convert.py"
] | [
"import numpy as np\nfrom sklearn.neighbors import KDTree\nimport scipy.linalg\n\ntry:\n import pynndescent\n index = pynndescent.NNDescent(np.random.random((100, 3)), n_jobs=2)\n del index\n ANN = True\nexcept ImportError:\n ANN = False\n\n\ndef p2p_to_FM(p2p, eigvects1, eigvects2, A2=None):\n \"\"\"\n Compute a Functional Map from a vertex to vertex maps (with possible subsampling).\n Can compute with the pseudo inverse of eigenvectors (if no subsampling) or least square.\n\n Parameters\n ------------------------------\n p2p : (n2,) vertex to vertex map from target to source (for the functional map).\n For each vertex on the target shape, gives the index of the corresponding vertex on mesh 1.\n eigvects1 : (n1,k1) eigenvectors on source mesh. Possibly subsampled on the first dimension.\n eigvects2 : (n2,k2) eigenvectors on target mesh. Possibly subsampled on the first dimension.\n A2 : (n2,n2) area matrix of the target mesh. If specified, the eigenvectors can't be subsampled\n\n Outputs\n -------------------------------\n FM : (k2,k1) functional map corresponding to the p2p map given.\n Solved with pseudo inverse if A2 is given, else using least square.\n \"\"\"\n if A2 is not None:\n if A2.shape[0] != eigvects2.shape[0]:\n raise ValueError(\"Can't compute pseudo inverse with subsampled eigenvectors\")\n return eigvects2.T @ A2 @ eigvects1[p2p, :] # (k2,k1)\n\n # Solve with least square\n return scipy.linalg.lstsq(eigvects2, eigvects1[p2p, :])[0] # (k2,k1)\n\n\ndef mesh_p2p_to_FM(p2p, mesh1, mesh2, dims=None, subsample=None):\n \"\"\"\n Compute a Functional Map from a vertex to vertex maps (with possible subsampling).\n\n Parameters\n ------------------------------\n p2p : (n2,) or (n2',) vertex to vertex map from mesh2 to mesh1.\n For each vertex on mesh2 gives the index of the corresponding vertex on mesh 1.\n If subsample is specified, gives a index-to-index map between the subsamples.\n mesh1 : source mesh for the functional map. Requires enough processed eigenvectors.\n mesh2 : target mesh for the functional map. Requires enough processed eigenvectors.\n dims : int, or 2-uple of int. Dimension of the functional map to return.\n If None uses all the processed eigenvectors.\n If single int k , returns a (k,k) functional map\n If 2-uple of int (k1,k2), returns a (k2,k1) functional map\n subsample : None or size 2 iterable ((n1',), (n2',)).\n Subsample of vertices for both mesh.\n If specified the p2p map is between the two subsamples.\n \"\"\"\n if dims is None:\n k1,k2 = len(mesh1.eigenvalues),len(mesh2.eigenvalues)\n elif type(dims) is int:\n k1 = dims\n k2 = dims\n else:\n k1,k2 = dims\n\n if subsample is None:\n return p2p_to_FM(p2p, mesh1.eigenvectors[:, :k1], mesh2.eigenvectors[:, :k2], A2=mesh2.A)\n\n sub1,sub2 = subsample\n return p2p_to_FM(p2p, mesh1.eigenvectors[sub1, :k1], mesh2.eigenvectors[sub2, :k2])\n\n\ndef FM_to_p2p(FM, eigvects1, eigvects2, use_ANN=False):\n \"\"\"\n Obtain a point to point map from a functional map using the adjoint.\n For each row in Phi2, looks for the nearest row in Phi1 @ C.T\n\n Parameters\n --------------------------\n FM : (k2,k1) functional map in reduced basis\n eigvects1 : (n1,k1') first k' eigenvectors of the first basis (k1'>k1).\n First dimension can be subsampled.\n eigvects2 : (n2,k2') first k' eigenvectors of the second basis (k2'>k2)\n First dimension can be subsampled.\n use_ANN : Whether to use approximate nearest neighbors\n\n Outputs:\n --------------------------\n p2p : (n2,) match vertex i on shape 2 to vertex p2p[i] on shape 1,\n or equivalent result if the eigenvectors are subsampled.\n \"\"\"\n if use_ANN and not ANN:\n raise ValueError('Please install pydescent to achieve Approximate Nearest Neighbor')\n\n k2,k1 = FM.shape\n\n assert k1 <= eigvects1.shape[1], \\\n f'At least {k1} should be provided, here only {eigvects1.shape[1]} are given'\n assert k2 <= eigvects2.shape[1], \\\n f'At least {k2} should be provided, here only {eigvects2.shape[1]} are given'\n\n if use_ANN:\n index = pynndescent.NNDescent(eigvects1[:, :k1] @ FM.T, n_jobs=8)\n matches,_ = index.query(eigvects2[:, :k2],k=1) # (n2,1)\n matches = matches.flatten() # (n2,)\n else:\n tree = KDTree(eigvects1[:, :k1] @ FM.T) # Tree on (n1,k2)\n matches = tree.query(eigvects2[:, :k2], k=1, return_distance=False).flatten() # (n2,)\n\n return matches # (n2,)\n\n\ndef FM_to_p2p_aux(FM, eigvects1, eigvects2, use_ANN=False):\n \"\"\"\n Obtain a point to point map from a functional map with another method.\n For each row in Phi2 @ C, looks for the nearest row in Phi1\n\n Parameters\n --------------------------\n FM : (k2,k1) functional map in reduced basis\n eigvects1 : (n1,k1') first k' eigenvectors of the first basis (k1'>k1).\n First dimension can be subsampled.\n eigvects2 : (n2,k2') first k' eigenvectors of the second basis (k2'>k2)\n First dimension can be subsampled.\n use_ANN : Whether to use approximate nearest neighbors\n\n Outputs:\n --------------------------\n p2p : (n2,) match vertex i on shape 2 to vertex p2p[i] on shape 1,\n or equivalent result if the eigenvectors are subsampled.\n \"\"\"\n if use_ANN and not ANN:\n raise ValueError('Please install pydescent to achieve Approximate Nearest Neighbor')\n\n k2,k1 = FM.shape\n\n assert k1 <= eigvects1.shape[1], \\\n f'At least {k1} should be provided, here only {eigvects1.shape[1]} are given'\n assert k2 <= eigvects2.shape[1], \\\n f'At least {k2} should be provided, here only {eigvects2.shape[1]} are given'\n\n if use_ANN:\n index = pynndescent.NNDescent(eigvects1[:, :k1], n_jobs=8)\n matches,_ = index.query(eigvects2[:, :k2] @ FM, k=1) # (n2,1)\n matches = matches.flatten() # (n2,)\n else:\n tree = KDTree(eigvects1[:, :k1]) # Tree on (n1,k1)\n matches = tree.query(eigvects2[:, :k2] @ FM, k=1, return_distance=False).flatten() # (n2,)\n\n return matches # (n2,)\n"
] | [
[
"numpy.random.random",
"sklearn.neighbors.KDTree"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bendichter/element-array-ephys | [
"720eb00e60c6df928f4e6cbe938d4db625feab58"
] | [
"element_array_ephys/ephys.py"
] | [
"import datajoint as dj\nimport pathlib\nimport re\nimport numpy as np\nimport inspect\nimport importlib\n\nfrom .readers import spikeglx, kilosort, openephys\nfrom . import probe, find_full_path, find_root_directory, dict_to_uuid\n\nschema = dj.schema()\n\n_linking_module = None\n\n\ndef activate(ephys_schema_name, probe_schema_name=None, *, create_schema=True,\n create_tables=True, linking_module=None):\n \"\"\"\n activate(ephys_schema_name, probe_schema_name=None, *, create_schema=True, create_tables=True, linking_module=None)\n :param ephys_schema_name: schema name on the database server to activate the `ephys` element\n :param probe_schema_name: schema name on the database server to activate the `probe` element\n - may be omitted if the `probe` element is already activated\n :param create_schema: when True (default), create schema in the database if it does not yet exist.\n :param create_tables: when True (default), create tables in the database if they do not yet exist.\n :param linking_module: a module name or a module containing the\n required dependencies to activate the `ephys` element:\n Upstream tables:\n + Session: parent table to ProbeInsertion, typically identifying a recording session\n + SkullReference: Reference table for InsertionLocation, specifying the skull reference\n used for probe insertion location (e.g. Bregma, Lambda)\n Functions:\n + get_ephys_root_data_dir() -> list\n Retrieve the root data directory - e.g. containing the raw ephys recording files for all subject/sessions.\n :return: a string for full path to the root data directory\n + get_session_directory(session_key: dict) -> str\n Retrieve the session directory containing the recorded Neuropixels data for a given Session\n :param session_key: a dictionary of one Session `key`\n :return: a string for full path to the session directory\n \"\"\"\n\n if isinstance(linking_module, str):\n linking_module = importlib.import_module(linking_module)\n assert inspect.ismodule(linking_module),\\\n \"The argument 'dependency' must be a module's name or a module\"\n\n global _linking_module\n _linking_module = linking_module\n\n # activate\n probe.activate(probe_schema_name, create_schema=create_schema,\n create_tables=create_tables)\n schema.activate(ephys_schema_name, create_schema=create_schema,\n create_tables=create_tables, add_objects=_linking_module.__dict__)\n\n\n# -------------- Functions required by the elements-ephys ---------------\n\ndef get_ephys_root_data_dir() -> list:\n \"\"\"\n All data paths, directories in DataJoint Elements are recommended to be stored as\n relative paths, with respect to some user-configured \"root\" directory,\n which varies from machine to machine (e.g. different mounted drive locations)\n\n get_ephys_root_data_dir() -> list\n This user-provided function retrieves the possible root data directories\n containing the ephys data for all subjects/sessions\n (e.g. acquired SpikeGLX or Open Ephys raw files,\n output files from spike sorting routines, etc.)\n :return: a string for full path to the ephys root data directory,\n or list of strings for possible root data directories\n \"\"\"\n return _linking_module.get_ephys_root_data_dir()\n\n\ndef get_session_directory(session_key: dict) -> str:\n \"\"\"\n get_session_directory(session_key: dict) -> str\n Retrieve the session directory containing the\n recorded Neuropixels data for a given Session\n :param session_key: a dictionary of one Session `key`\n :return: a string for full path to the session directory\n \"\"\"\n return _linking_module.get_session_directory(session_key)\n\n\n# ----------------------------- Table declarations ----------------------\n\n\n@schema\nclass AcquisitionSoftware(dj.Lookup):\n definition = \"\"\" # Name of software used for recording of neuropixels probes - SpikeGLX or Open Ephys\n acq_software: varchar(24) \n \"\"\"\n contents = zip(['SpikeGLX', 'Open Ephys'])\n\n\n@schema\nclass ProbeInsertion(dj.Manual):\n definition = \"\"\"\n # Probe insertion implanted into an animal for a given session.\n -> Session\n insertion_number: tinyint unsigned\n ---\n -> probe.Probe\n \"\"\"\n\n\n@schema\nclass InsertionLocation(dj.Manual):\n definition = \"\"\"\n # Brain Location of a given probe insertion.\n -> ProbeInsertion\n ---\n -> SkullReference\n ap_location: decimal(6, 2) # (um) anterior-posterior; ref is 0; more anterior is more positive\n ml_location: decimal(6, 2) # (um) medial axis; ref is 0 ; more right is more positive\n depth: decimal(6, 2) # (um) manipulator depth relative to surface of the brain (0); more ventral is more negative\n theta=null: decimal(5, 2) # (deg) - elevation - rotation about the ml-axis [0, 180] - w.r.t the z+ axis\n phi=null: decimal(5, 2) # (deg) - azimuth - rotation about the dv-axis [0, 360] - w.r.t the x+ axis\n beta=null: decimal(5, 2) # (deg) rotation about the shank of the probe [-180, 180] - clockwise is increasing in degree - 0 is the probe-front facing anterior\n \"\"\"\n\n\n@schema\nclass EphysRecording(dj.Imported):\n definition = \"\"\"\n # Ephys recording from a probe insertion for a given session.\n -> ProbeInsertion \n ---\n -> probe.ElectrodeConfig\n -> AcquisitionSoftware\n sampling_rate: float # (Hz) \n \"\"\"\n\n class EphysFile(dj.Part):\n definition = \"\"\"\n # Paths of files of a given EphysRecording round.\n -> master\n file_path: varchar(255) # filepath relative to root data directory\n \"\"\"\n\n def make(self, key):\n sess_dir = pathlib.Path(get_session_directory(key))\n sess_dir_full = find_full_path(get_ephys_root_data_dir(), sess_dir)\n\n inserted_probe_serial_number = (ProbeInsertion * probe.Probe & key).fetch1('probe')\n\n # search session dir and determine acquisition software\n for ephys_pattern, ephys_acq_type in zip(['*.ap.meta', '*.oebin'],\n ['SpikeGLX', 'Open Ephys']):\n ephys_meta_filepaths = [fp for fp in sess_dir_full.rglob(ephys_pattern)]\n if ephys_meta_filepaths:\n acq_software = ephys_acq_type\n break\n else:\n raise FileNotFoundError(\n f'Ephys recording data not found!'\n f' Neither SpikeGLX nor Open Ephys recording files found'\n f' in {sess_dir}')\n\n if acq_software == 'SpikeGLX':\n for meta_filepath in ephys_meta_filepaths:\n spikeglx_meta = spikeglx.SpikeGLXMeta(meta_filepath)\n if str(spikeglx_meta.probe_SN) == inserted_probe_serial_number:\n break\n else:\n raise FileNotFoundError(\n 'No SpikeGLX data found for probe insertion: {}'.format(key))\n\n if re.search('(1.0|2.0)', spikeglx_meta.probe_model):\n probe_type = spikeglx_meta.probe_model\n electrode_query = probe.ProbeType.Electrode & {'probe_type': probe_type}\n\n probe_electrodes = {\n (shank, shank_col, shank_row): key\n for key, shank, shank_col, shank_row in zip(*electrode_query.fetch(\n 'KEY', 'shank', 'shank_col', 'shank_row'))}\n\n electrode_group_members = [\n probe_electrodes[(shank, shank_col, shank_row)]\n for shank, shank_col, shank_row, _ in spikeglx_meta.shankmap['data']]\n else:\n raise NotImplementedError(\n 'Processing for neuropixels probe model'\n ' {} not yet implemented'.format(spikeglx_meta.probe_model))\n\n self.insert1({**key,\n **generate_electrode_config(probe_type, electrode_group_members),\n 'acq_software': acq_software,\n 'sampling_rate': spikeglx_meta.meta['imSampRate']})\n\n root_dir = find_root_directory(get_ephys_root_data_dir(), meta_filepath)\n self.EphysFile.insert1({\n **key,\n 'file_path': meta_filepath.relative_to(root_dir).as_posix()})\n elif acq_software == 'Open Ephys':\n dataset = openephys.OpenEphys(sess_dir_full)\n for serial_number, probe_data in dataset.probes.items():\n if str(serial_number) == inserted_probe_serial_number:\n break\n else:\n raise FileNotFoundError(\n 'No Open Ephys data found for probe insertion: {}'.format(key))\n\n if re.search('(1.0|2.0)', probe_data.probe_model):\n probe_type = probe_data.probe_model\n electrode_query = probe.ProbeType.Electrode & {'probe_type': probe_type}\n\n probe_electrodes = {key['electrode']: key\n for key in electrode_query.fetch('KEY')}\n\n electrode_group_members = [\n probe_electrodes[channel_idx]\n for channel_idx in probe_data.ap_meta['channels_ids']]\n else:\n raise NotImplementedError(\n 'Processing for neuropixels'\n ' probe model {} not yet implemented'.format(probe_data.probe_model))\n\n self.insert1({**key,\n **generate_electrode_config(probe_type, electrode_group_members),\n 'acq_software': acq_software,\n 'sampling_rate': probe_data.ap_meta['sample_rate']})\n\n root_dir = find_root_directory(\n get_ephys_root_data_dir(),\n probe_data.recording_info['recording_files'][0])\n self.EphysFile.insert([{**key,\n 'file_path': fp.relative_to(root_dir).as_posix()}\n for fp in probe_data.recording_info['recording_files']])\n else:\n raise NotImplementedError(f'Processing ephys files from'\n f' acquisition software of type {acq_software} is'\n f' not yet implemented')\n\n\n@schema\nclass LFP(dj.Imported):\n definition = \"\"\"\n # Acquired local field potential (LFP) from a given Ephys recording.\n -> EphysRecording\n ---\n lfp_sampling_rate: float # (Hz)\n lfp_time_stamps: longblob # (s) timestamps with respect to the start of the recording (recording_timestamp)\n lfp_mean: longblob # (uV) mean of LFP across electrodes - shape (time,)\n \"\"\"\n\n class Electrode(dj.Part):\n definition = \"\"\"\n -> master\n -> probe.ElectrodeConfig.Electrode \n ---\n lfp: longblob # (uV) recorded lfp at this electrode \n \"\"\"\n\n # Only store LFP for every 9th channel, due to high channel density,\n # close-by channels exhibit highly similar LFP\n _skip_channel_counts = 9\n\n def make(self, key):\n acq_software, probe_sn = (EphysRecording\n * ProbeInsertion & key).fetch1('acq_software', 'probe')\n\n electrode_keys, lfp = [], []\n\n if acq_software == 'SpikeGLX':\n spikeglx_meta_filepath = get_spikeglx_meta_filepath(key)\n spikeglx_recording = spikeglx.SpikeGLX(spikeglx_meta_filepath.parent)\n\n lfp_channel_ind = spikeglx_recording.lfmeta.recording_channels[\n -1::-self._skip_channel_counts]\n\n # Extract LFP data at specified channels and convert to uV\n lfp = spikeglx_recording.lf_timeseries[:, lfp_channel_ind] # (sample x channel)\n lfp = (lfp * spikeglx_recording.get_channel_bit_volts('lf')[lfp_channel_ind]).T # (channel x sample)\n\n self.insert1(dict(key,\n lfp_sampling_rate=spikeglx_recording.lfmeta.meta['imSampRate'],\n lfp_time_stamps=(np.arange(lfp.shape[1])\n / spikeglx_recording.lfmeta.meta['imSampRate']),\n lfp_mean=lfp.mean(axis=0)))\n\n electrode_query = (probe.ProbeType.Electrode\n * probe.ElectrodeConfig.Electrode\n * EphysRecording & key)\n probe_electrodes = {\n (shank, shank_col, shank_row): key\n for key, shank, shank_col, shank_row in zip(*electrode_query.fetch(\n 'KEY', 'shank', 'shank_col', 'shank_row'))}\n\n for recorded_site in lfp_channel_ind:\n shank, shank_col, shank_row, _ = spikeglx_recording.apmeta.shankmap['data'][recorded_site]\n electrode_keys.append(probe_electrodes[(shank, shank_col, shank_row)])\n elif acq_software == 'Open Ephys':\n sess_dir = pathlib.Path(get_session_directory(key))\n sess_dir_full = find_full_path(get_ephys_root_data_dir(), sess_dir)\n loaded_oe = openephys.OpenEphys(sess_dir_full)\n oe_probe = loaded_oe.probes[probe_sn]\n\n lfp_channel_ind = np.arange(\n len(oe_probe.lfp_meta['channels_ids']))[-1::-self._skip_channel_counts]\n\n lfp = oe_probe.lfp_timeseries[:, lfp_channel_ind] # (sample x channel)\n lfp = (lfp * np.array(oe_probe.lfp_meta['channels_gains'])[lfp_channel_ind]).T # (channel x sample)\n lfp_timestamps = oe_probe.lfp_timestamps\n\n self.insert1(dict(key,\n lfp_sampling_rate=oe_probe.lfp_meta['sample_rate'],\n lfp_time_stamps=lfp_timestamps,\n lfp_mean=lfp.mean(axis=0)))\n\n electrode_query = (probe.ProbeType.Electrode\n * probe.ElectrodeConfig.Electrode\n * EphysRecording & key)\n probe_electrodes = {key['electrode']: key\n for key in electrode_query.fetch('KEY')}\n\n for channel_idx in np.array(oe_probe.lfp_meta['channels_ids'])[lfp_channel_ind]:\n electrode_keys.append(probe_electrodes[channel_idx])\n else:\n raise NotImplementedError(f'LFP extraction from acquisition software'\n f' of type {acq_software} is not yet implemented')\n\n # single insert in loop to mitigate potential memory issue\n for electrode_key, lfp_trace in zip(electrode_keys, lfp):\n self.Electrode.insert1({**key, **electrode_key, 'lfp': lfp_trace})\n\n\n# ------------ Clustering --------------\n\n@schema\nclass ClusteringMethod(dj.Lookup):\n definition = \"\"\"\n # Method for clustering\n clustering_method: varchar(16)\n ---\n clustering_method_desc: varchar(1000)\n \"\"\"\n\n contents = [('kilosort', 'kilosort clustering method'),\n ('kilosort2', 'kilosort2 clustering method')]\n\n\n@schema\nclass ClusteringParamSet(dj.Lookup):\n definition = \"\"\"\n # Parameter set to be used in a clustering procedure\n paramset_idx: smallint\n ---\n -> ClusteringMethod \n paramset_desc: varchar(128)\n param_set_hash: uuid\n unique index (param_set_hash)\n params: longblob # dictionary of all applicable parameters\n \"\"\"\n\n @classmethod\n def insert_new_params(cls, processing_method: str, paramset_idx: int,\n paramset_desc: str, params: dict):\n param_dict = {'clustering_method': processing_method,\n 'paramset_idx': paramset_idx,\n 'paramset_desc': paramset_desc,\n 'params': params,\n 'param_set_hash': dict_to_uuid(params)}\n param_query = cls & {'param_set_hash': param_dict['param_set_hash']}\n\n if param_query: # If the specified param-set already exists\n existing_paramset_idx = param_query.fetch1('paramset_idx')\n if existing_paramset_idx == paramset_idx: # If the existing set has the same paramset_idx: job done\n return\n else: # If not same name: human error, trying to add the same paramset with different name\n raise dj.DataJointError(\n 'The specified param-set'\n ' already exists - paramset_idx: {}'.format(existing_paramset_idx))\n else:\n cls.insert1(param_dict)\n\n\n@schema\nclass ClusterQualityLabel(dj.Lookup):\n definition = \"\"\"\n # Quality\n cluster_quality_label: varchar(100)\n ---\n cluster_quality_description: varchar(4000)\n \"\"\"\n contents = [\n ('good', 'single unit'),\n ('ok', 'probably a single unit, but could be contaminated'),\n ('mua', 'multi-unit activity'),\n ('noise', 'bad unit')\n ]\n\n\n@schema\nclass ClusteringTask(dj.Manual):\n definition = \"\"\"\n # Manual table for defining a clustering task ready to be run\n -> EphysRecording\n -> ClusteringParamSet\n ---\n clustering_output_dir: varchar(255) # clustering output directory relative to the clustering root data directory\n task_mode='load': enum('load', 'trigger') # 'load': load computed analysis results, 'trigger': trigger computation\n \"\"\"\n\n\n@schema\nclass Clustering(dj.Imported):\n \"\"\"\n A processing table to handle each ClusteringTask:\n + If `task_mode == \"trigger\"`: trigger clustering analysis\n according to the ClusteringParamSet (e.g. launch a kilosort job)\n + If `task_mode == \"load\"`: verify output\n \"\"\"\n definition = \"\"\"\n # Clustering Procedure\n -> ClusteringTask\n ---\n clustering_time: datetime # time of generation of this set of clustering results \n package_version='': varchar(16)\n \"\"\"\n\n def make(self, key):\n task_mode, output_dir = (ClusteringTask & key).fetch1(\n 'task_mode', 'clustering_output_dir')\n kilosort_dir = find_full_path(get_ephys_root_data_dir(), output_dir)\n\n if task_mode == 'load':\n kilosort_dataset = kilosort.Kilosort(kilosort_dir) # check if the directory is a valid Kilosort output\n creation_time, _, _ = kilosort.extract_clustering_info(kilosort_dir)\n elif task_mode == 'trigger':\n raise NotImplementedError('Automatic triggering of'\n ' clustering analysis is not yet supported')\n else:\n raise ValueError(f'Unknown task mode: {task_mode}')\n\n self.insert1({**key, 'clustering_time': creation_time})\n\n\n@schema\nclass Curation(dj.Manual):\n definition = \"\"\"\n # Manual curation procedure\n -> Clustering\n curation_id: int\n ---\n curation_time: datetime # time of generation of this set of curated clustering results \n curation_output_dir: varchar(255) # output directory of the curated results, relative to clustering root data directory\n quality_control: bool # has this clustering result undergone quality control?\n manual_curation: bool # has manual curation been performed on this clustering result?\n curation_note='': varchar(2000) \n \"\"\"\n\n def create1_from_clustering_task(self, key, curation_note=''):\n \"\"\"\n A convenient function to create a new corresponding \"Curation\"\n for a particular \"ClusteringTask\"\n \"\"\"\n if key not in Clustering():\n raise ValueError(f'No corresponding entry in Clustering available'\n f' for: {key}; do `Clustering.populate(key)`')\n\n task_mode, output_dir = (ClusteringTask & key).fetch1(\n 'task_mode', 'clustering_output_dir')\n kilosort_dir = find_full_path(get_ephys_root_data_dir(), output_dir)\n\n creation_time, is_curated, is_qc = kilosort.extract_clustering_info(kilosort_dir)\n # Synthesize curation_id\n curation_id = dj.U().aggr(self & key, n='ifnull(max(curation_id)+1,1)').fetch1('n')\n self.insert1({**key, 'curation_id': curation_id,\n 'curation_time': creation_time, 'curation_output_dir': output_dir,\n 'quality_control': is_qc, 'manual_curation': is_curated,\n 'curation_note': curation_note})\n\n\n@schema\nclass CuratedClustering(dj.Imported):\n definition = \"\"\"\n # Clustering results of a curation.\n -> Curation \n \"\"\"\n\n class Unit(dj.Part):\n definition = \"\"\" \n # Properties of a given unit from a round of clustering (and curation)\n -> master\n unit: int\n ---\n -> probe.ElectrodeConfig.Electrode # electrode with highest waveform amplitude for this unit\n -> ClusterQualityLabel\n spike_count: int # how many spikes in this recording for this unit\n spike_times: longblob # (s) spike times of this unit, relative to the start of the EphysRecording\n spike_sites : longblob # array of electrode associated with each spike\n spike_depths : longblob # (um) array of depths associated with each spike, relative to the (0, 0) of the probe \n \"\"\"\n\n def make(self, key):\n output_dir = (Curation & key).fetch1('curation_output_dir')\n kilosort_dir = find_full_path(get_ephys_root_data_dir(), output_dir)\n\n kilosort_dataset = kilosort.Kilosort(kilosort_dir)\n acq_software = (EphysRecording & key).fetch1('acq_software')\n\n # ---------- Unit ----------\n # -- Remove 0-spike units\n withspike_idx = [i for i, u in enumerate(kilosort_dataset.data['cluster_ids'])\n if (kilosort_dataset.data['spike_clusters'] == u).any()]\n valid_units = kilosort_dataset.data['cluster_ids'][withspike_idx]\n valid_unit_labels = kilosort_dataset.data['cluster_groups'][withspike_idx]\n # -- Get channel and electrode-site mapping\n channel2electrodes = get_neuropixels_channel2electrode_map(key, acq_software)\n\n # -- Spike-times --\n # spike_times_sec_adj > spike_times_sec > spike_times\n spike_time_key = ('spike_times_sec_adj' if 'spike_times_sec_adj' in kilosort_dataset.data\n else 'spike_times_sec' if 'spike_times_sec'\n in kilosort_dataset.data else 'spike_times')\n spike_times = kilosort_dataset.data[spike_time_key]\n kilosort_dataset.extract_spike_depths()\n\n # -- Spike-sites and Spike-depths --\n spike_sites = np.array([channel2electrodes[s]['electrode']\n for s in kilosort_dataset.data['spike_sites']])\n spike_depths = kilosort_dataset.data['spike_depths']\n\n # -- Insert unit, label, peak-chn\n units = []\n for unit, unit_lbl in zip(valid_units, valid_unit_labels):\n if (kilosort_dataset.data['spike_clusters'] == unit).any():\n unit_channel, _ = kilosort_dataset.get_best_channel(unit)\n unit_spike_times = (spike_times[kilosort_dataset.data['spike_clusters'] == unit]\n / kilosort_dataset.data['params']['sample_rate'])\n spike_count = len(unit_spike_times)\n\n units.append({\n 'unit': unit,\n 'cluster_quality_label': unit_lbl,\n **channel2electrodes[unit_channel],\n 'spike_times': unit_spike_times,\n 'spike_count': spike_count,\n 'spike_sites': spike_sites[kilosort_dataset.data['spike_clusters'] == unit],\n 'spike_depths': spike_depths[kilosort_dataset.data['spike_clusters'] == unit]})\n\n self.insert1(key)\n self.Unit.insert([{**key, **u} for u in units])\n\n\n@schema\nclass WaveformSet(dj.Imported):\n definition = \"\"\"\n # A set of spike waveforms for units out of a given CuratedClustering\n -> CuratedClustering\n \"\"\"\n\n class PeakWaveform(dj.Part):\n definition = \"\"\"\n # Mean waveform across spikes for a given unit at its representative electrode\n -> master\n -> CuratedClustering.Unit\n ---\n peak_electrode_waveform: longblob # (uV) mean waveform for a given unit at its representative electrode\n \"\"\"\n\n class Waveform(dj.Part):\n definition = \"\"\"\n # Spike waveforms and their mean across spikes for the given unit\n -> master\n -> CuratedClustering.Unit\n -> probe.ElectrodeConfig.Electrode \n --- \n waveform_mean: longblob # (uV) mean waveform across spikes of the given unit\n waveforms=null: longblob # (uV) (spike x sample) waveforms of a sampling of spikes at the given electrode for the given unit\n \"\"\"\n\n def make(self, key):\n output_dir = (Curation & key).fetch1('curation_output_dir')\n kilosort_dir = find_full_path(get_ephys_root_data_dir(), output_dir)\n\n kilosort_dataset = kilosort.Kilosort(kilosort_dir)\n\n acq_software, probe_serial_number = (EphysRecording * ProbeInsertion & key).fetch1(\n 'acq_software', 'probe')\n\n # -- Get channel and electrode-site mapping\n recording_key = (EphysRecording & key).fetch1('KEY')\n channel2electrodes = get_neuropixels_channel2electrode_map(recording_key, acq_software)\n\n is_qc = (Curation & key).fetch1('quality_control')\n\n # Get all units\n units = {u['unit']: u for u in (CuratedClustering.Unit & key).fetch(\n as_dict=True, order_by='unit')}\n\n if is_qc:\n unit_waveforms = np.load(kilosort_dir / 'mean_waveforms.npy') # unit x channel x sample\n\n def yield_unit_waveforms():\n for unit_no, unit_waveform in zip(kilosort_dataset.data['cluster_ids'],\n unit_waveforms):\n unit_peak_waveform = {}\n unit_electrode_waveforms = []\n if unit_no in units:\n for channel, channel_waveform in zip(\n kilosort_dataset.data['channel_map'],\n unit_waveform):\n unit_electrode_waveforms.append({\n **units[unit_no], **channel2electrodes[channel],\n 'waveform_mean': channel_waveform})\n if channel2electrodes[channel]['electrode'] == units[unit_no]['electrode']:\n unit_peak_waveform = {\n **units[unit_no],\n 'peak_electrode_waveform': channel_waveform}\n yield unit_peak_waveform, unit_electrode_waveforms\n else:\n if acq_software == 'SpikeGLX':\n spikeglx_meta_filepath = get_spikeglx_meta_filepath(key)\n neuropixels_recording = spikeglx.SpikeGLX(spikeglx_meta_filepath.parent)\n elif acq_software == 'Open Ephys':\n sess_dir = pathlib.Path(get_session_directory(key))\n sess_dir_full = find_full_path(get_ephys_root_data_dir(), sess_dir)\n openephys_dataset = openephys.OpenEphys(sess_dir_full)\n neuropixels_recording = openephys_dataset.probes[probe_serial_number]\n\n def yield_unit_waveforms():\n for unit_dict in units.values():\n unit_peak_waveform = {}\n unit_electrode_waveforms = []\n\n spikes = unit_dict['spike_times']\n waveforms = neuropixels_recording.extract_spike_waveforms(\n spikes, kilosort_dataset.data['channel_map']) # (sample x channel x spike)\n waveforms = waveforms.transpose((1, 2, 0)) # (channel x spike x sample)\n for channel, channel_waveform in zip(\n kilosort_dataset.data['channel_map'], waveforms):\n unit_electrode_waveforms.append({\n **unit_dict, **channel2electrodes[channel],\n 'waveform_mean': channel_waveform.mean(axis=0),\n 'waveforms': channel_waveform})\n if channel2electrodes[channel]['electrode'] == unit_dict['electrode']:\n unit_peak_waveform = {\n **unit_dict,\n 'peak_electrode_waveform': channel_waveform.mean(axis=0)}\n\n yield unit_peak_waveform, unit_electrode_waveforms\n\n # insert waveform on a per-unit basis to mitigate potential memory issue\n self.insert1(key)\n for unit_peak_waveform, unit_electrode_waveforms in yield_unit_waveforms():\n self.PeakWaveform.insert1(unit_peak_waveform, ignore_extra_fields=True)\n self.Waveform.insert(unit_electrode_waveforms, ignore_extra_fields=True)\n\n\n# ---------------- HELPER FUNCTIONS ----------------\n\ndef get_spikeglx_meta_filepath(ephys_recording_key):\n # attempt to retrieve from EphysRecording.EphysFile\n spikeglx_meta_filepath = (EphysRecording.EphysFile & ephys_recording_key\n & 'file_path LIKE \"%.ap.meta\"').fetch1('file_path')\n\n try:\n spikeglx_meta_filepath = find_full_path(get_ephys_root_data_dir(),\n spikeglx_meta_filepath)\n except FileNotFoundError:\n # if not found, search in session_dir again\n if not spikeglx_meta_filepath.exists():\n sess_dir = pathlib.Path(get_session_directory(ephys_recording_key))\n inserted_probe_serial_number = (ProbeInsertion * probe.Probe\n & ephys_recording_key).fetch1('probe')\n\n spikeglx_meta_filepaths = [fp for fp in sess_dir.rglob('*.ap.meta')]\n for meta_filepath in spikeglx_meta_filepaths:\n spikeglx_meta = spikeglx.SpikeGLXMeta(meta_filepath)\n if str(spikeglx_meta.probe_SN) == inserted_probe_serial_number:\n spikeglx_meta_filepath = meta_filepath\n break\n else:\n raise FileNotFoundError(\n 'No SpikeGLX data found for probe insertion: {}'.format(ephys_recording_key))\n\n return spikeglx_meta_filepath\n\n\ndef get_neuropixels_channel2electrode_map(ephys_recording_key, acq_software):\n if acq_software == 'SpikeGLX':\n spikeglx_meta_filepath = get_spikeglx_meta_filepath(ephys_recording_key)\n spikeglx_meta = spikeglx.SpikeGLXMeta(spikeglx_meta_filepath)\n electrode_config_key = (EphysRecording * probe.ElectrodeConfig\n & ephys_recording_key).fetch1('KEY')\n\n electrode_query = (probe.ProbeType.Electrode\n * probe.ElectrodeConfig.Electrode & electrode_config_key)\n\n probe_electrodes = {\n (shank, shank_col, shank_row): key\n for key, shank, shank_col, shank_row in zip(*electrode_query.fetch(\n 'KEY', 'shank', 'shank_col', 'shank_row'))}\n\n channel2electrode_map = {\n recorded_site: probe_electrodes[(shank, shank_col, shank_row)]\n for recorded_site, (shank, shank_col, shank_row, _) in enumerate(\n spikeglx_meta.shankmap['data'])}\n elif acq_software == 'Open Ephys':\n sess_dir = pathlib.Path(get_session_directory(ephys_recording_key))\n sess_dir_full = find_full_path(get_ephys_root_data_dir(), sess_dir)\n openephys_dataset = openephys.OpenEphys(sess_dir_full)\n probe_serial_number = (ProbeInsertion & ephys_recording_key).fetch1('probe')\n probe_dataset = openephys_dataset.probes[probe_serial_number]\n\n electrode_query = (probe.ProbeType.Electrode\n * probe.ElectrodeConfig.Electrode\n * EphysRecording & ephys_recording_key)\n\n probe_electrodes = {key['electrode']: key\n for key in electrode_query.fetch('KEY')}\n\n channel2electrode_map = {\n channel_idx: probe_electrodes[channel_idx]\n for channel_idx in probe_dataset.ap_meta['channels_ids']}\n\n return channel2electrode_map\n\n\ndef generate_electrode_config(probe_type: str, electrodes: list):\n \"\"\"\n Generate and insert new ElectrodeConfig\n :param probe_type: probe type (e.g. neuropixels 2.0 - SS)\n :param electrodes: list of the electrode dict (keys of the probe.ProbeType.Electrode table)\n :return: a dict representing a key of the probe.ElectrodeConfig table\n \"\"\"\n # compute hash for the electrode config (hash of dict of all ElectrodeConfig.Electrode)\n electrode_config_hash = dict_to_uuid({k['electrode']: k for k in electrodes})\n\n electrode_list = sorted([k['electrode'] for k in electrodes])\n electrode_gaps = ([-1]\n + np.where(np.diff(electrode_list) > 1)[0].tolist()\n + [len(electrode_list) - 1])\n electrode_config_name = '; '.join([\n f'{electrode_list[start + 1]}-{electrode_list[end]}'\n for start, end in zip(electrode_gaps[:-1], electrode_gaps[1:])])\n\n electrode_config_key = {'electrode_config_hash': electrode_config_hash}\n\n # ---- make new ElectrodeConfig if needed ----\n if not probe.ElectrodeConfig & electrode_config_key:\n probe.ElectrodeConfig.insert1({**electrode_config_key, 'probe_type': probe_type,\n 'electrode_config_name': electrode_config_name})\n probe.ElectrodeConfig.Electrode.insert({**electrode_config_key, **electrode}\n for electrode in electrodes)\n\n return electrode_config_key\n\n"
] | [
[
"numpy.arange",
"numpy.load",
"numpy.array",
"numpy.diff"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mkrack/aiida-cp2k | [
"2cd553c59b184d5df46a550ab1fc3d1f23644256"
] | [
"aiida_cp2k/utils/workchains.py"
] | [
"# -*- coding: utf-8 -*-\n###############################################################################\n# Copyright (c), The AiiDA-CP2K authors. #\n# SPDX-License-Identifier: MIT #\n# AiiDA-CP2K is hosted on GitHub at https://github.com/aiidateam/aiida-cp2k #\n# For further information on the license, see the LICENSE.txt file. #\n###############################################################################\n\"\"\"AiiDA-CP2K utilities for workchains\"\"\"\n\nfrom aiida.engine import calcfunction\nfrom aiida.orm import Dict\nfrom aiida.plugins import DataFactory\n\nStructureData = DataFactory('structure') # pylint: disable=invalid-name\n\nHARTREE2EV = 27.211399\nHARTREE2KJMOL = 2625.500\n\n\ndef merge_dict(dct, merge_dct):\n \"\"\" Taken from https://gist.github.com/angstwad/bf22d1822c38a92ec0a9\n Recursive dict merge. Inspired by :meth:``dict.update()``, instead of\n updating only top-level keys, merge_dict recurses down into dicts nested\n to an arbitrary depth, updating keys. The ``merge_dct`` is merged into\n ``dct``.\n :param dct: dict onto which the merge is executed\n :param merge_dct: dct merged into dct (overwrites dct data if in both)\n :return: None\n \"\"\"\n from collections.abc import Mapping\n for k, _ in merge_dct.items(): # it was .iteritems() in python2\n if (k in dct and isinstance(dct[k], dict) and isinstance(merge_dct[k], Mapping)):\n merge_dict(dct[k], merge_dct[k])\n else:\n dct[k] = merge_dct[k]\n\n\n@calcfunction\ndef merge_Dict(d1, d2): #pylint: disable=invalid-name\n \"\"\" Make all the data in the second Dict overwrite the corrisponding data in the first Dict\"\"\"\n d1_dict = d1.get_dict()\n d2_dict = d2.get_dict()\n merge_dict(d1_dict, d2_dict)\n return Dict(dict=d1_dict)\n\n\ndef get_kinds_section(structure, protocol_settings):\n \"\"\" Write the &KIND sections given the structure and the settings_dict\"\"\"\n kinds = []\n all_atoms = set(structure.get_ase().get_chemical_symbols())\n for atom in all_atoms:\n kinds.append({\n '_': atom,\n 'BASIS_SET': protocol_settings['basis_set'][atom],\n 'POTENTIAL': protocol_settings['pseudopotential'][atom],\n 'MAGNETIZATION': protocol_settings['initial_magnetization'][atom],\n })\n return {'FORCE_EVAL': {'SUBSYS': {'KIND': kinds}}}\n\n\ndef get_input_multiplicity(structure, protocol_settings):\n \"\"\" Compute the total multiplicity of the structure,\n by summing the atomic magnetizations:\n multiplicity = 1 + sum_i ( natoms_i * magnetization_i ), for each atom_type i\n \"\"\"\n multiplicity = 1\n all_atoms = structure.get_ase().get_chemical_symbols()\n for key, value in protocol_settings['initial_magnetization'].items():\n multiplicity += all_atoms.count(key) * value\n multiplicity = int(round(multiplicity))\n multiplicity_dict = {'FORCE_EVAL': {'DFT': {'MULTIPLICITY': multiplicity}}}\n if multiplicity != 1:\n multiplicity_dict['FORCE_EVAL']['DFT']['UKS'] = True\n return multiplicity_dict\n\n\ndef ot_has_small_bandgap(cp2k_input, cp2k_output, bandgap_thr_ev):\n \"\"\" Returns True if the calculation used OT and had a smaller bandgap then the guess needed for the OT.\n (NOTE: It has been observed also negative bandgap with OT in CP2K!)\n cp2k_input: dict\n cp2k_output: dict\n bandgap_thr_ev: float [eV]\n \"\"\"\n list_true = [True, 'T', 't', '.TRUE.', 'True', 'true'] #add more?\n try:\n ot_settings = cp2k_input['FORCE_EVAL']['DFT']['SCF']['OT']\n if '_' not in ot_settings.keys() or ot_settings['_'] in list_true: #pylint: disable=simplifiable-if-statement\n using_ot = True\n else:\n using_ot = False\n except KeyError:\n using_ot = False\n min_bandgap_ev = min(cp2k_output[\"bandgap_spin1_au\"], cp2k_output[\"bandgap_spin2_au\"]) * HARTREE2EV\n is_bandgap_small = (min_bandgap_ev < bandgap_thr_ev)\n return using_ot and is_bandgap_small\n\n\n@calcfunction\ndef check_resize_unit_cell(struct, threshold): #pylint: disable=too-many-locals\n \"\"\"Returns the multiplication factors for the cell vectors to respect, in every direction:\n min(perpendicular_width) > threshold.\n \"\"\"\n from math import cos, sin, sqrt, fabs, ceil\n import numpy as np\n\n # Parsing structure's cell\n def angle(vect1, vect2):\n return np.arccos(np.dot(vect1, vect2) / (np.linalg.norm(vect1) * np.linalg.norm(vect2)))\n\n a_len = np.linalg.norm(struct.cell[0])\n b_len = np.linalg.norm(struct.cell[1])\n c_len = np.linalg.norm(struct.cell[2])\n\n alpha = angle(struct.cell[1], struct.cell[2])\n beta = angle(struct.cell[0], struct.cell[2])\n gamma = angle(struct.cell[0], struct.cell[1])\n\n # Computing triangular cell matrix\n vol = np.sqrt(1 - cos(alpha)**2 - cos(beta)**2 - cos(gamma)**2 + 2 * cos(alpha) * cos(beta) * cos(gamma))\n cell = np.zeros((3, 3))\n cell[0, :] = [a_len, 0, 0]\n cell[1, :] = [b_len * cos(gamma), b_len * sin(gamma), 0]\n cell[2, :] = [\n c_len * cos(beta), c_len * (cos(alpha) - cos(beta) * cos(gamma)) / (sin(gamma)), c_len * vol / sin(gamma)\n ]\n cell = np.array(cell)\n\n # Computing perpendicular widths, as implemented in Raspa\n # for the check (simplified for triangular cell matrix)\n axc1 = cell[0, 0] * cell[2, 2]\n axc2 = -cell[0, 0] * cell[2, 1]\n bxc1 = cell[1, 1] * cell[2, 2]\n bxc2 = -cell[1, 0] * cell[2, 2]\n bxc3 = cell[1, 0] * cell[2, 1] - cell[1, 1] * cell[2, 0]\n det = fabs(cell[0, 0] * cell[1, 1] * cell[2, 2])\n perpwidth = np.zeros(3)\n perpwidth[0] = det / sqrt(bxc1**2 + bxc2**2 + bxc3**2)\n perpwidth[1] = det / sqrt(axc1**2 + axc2**2)\n perpwidth[2] = cell[2, 2]\n\n #prevent from crashing if threshold.value is zero\n if threshold.value == 0:\n thr = 0.1\n else:\n thr = threshold.value\n\n resize = {\n 'nx': int(ceil(thr / perpwidth[0])),\n 'ny': int(ceil(thr / perpwidth[1])),\n 'nz': int(ceil(thr / perpwidth[2]))\n }\n return Dict(dict=resize)\n\n\n@calcfunction\ndef resize_unit_cell(struct, resize):\n \"\"\"Resize the StructureData according to the resize Dict\"\"\"\n resize_tuple = tuple(resize[x] for x in ['nx', 'ny', 'nz'])\n return StructureData(ase=struct.get_ase().repeat(resize_tuple))\n"
] | [
[
"numpy.dot",
"numpy.array",
"numpy.zeros",
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liyongsheng-tech/pytorch-metric-learning | [
"ae3807249a748512a1f7db62b380a524869b5911"
] | [
"pytorch_metric_learning/utils/common_functions.py"
] | [
"import collections\r\nimport torch\r\nfrom torch.autograd import Variable\r\nimport numpy as np\r\n\r\nNUMPY_RANDOM_STATE = np.random.RandomState()\r\n\r\n\r\ndef try_keys(input_dict, keys):\r\n for k in keys:\r\n try:\r\n return input_dict[k]\r\n except BaseException:\r\n pass\r\n return None\r\n\r\n\r\ndef try_next_on_generator(gen, iterable):\r\n try:\r\n return gen, next(gen)\r\n except StopIteration:\r\n gen = iter(iterable)\r\n return gen, next(gen)\r\n\r\n\r\ndef numpy_to_torch(v):\r\n try:\r\n return torch.from_numpy(v)\r\n except BaseException:\r\n return v\r\n\r\ndef torch_to_numpy(v):\r\n try:\r\n return v.cpu().numpy()\r\n except BaseException:\r\n return v\r\n\r\n\r\ndef wrap_variable(batch_data, device):\r\n return Variable(batch_data).to(device)\r\n\r\n\r\ndef get_hierarchy_label(batch_labels, hierarchy_level):\r\n if hierarchy_level == \"all\":\r\n return batch_labels\r\n\r\n try:\r\n if batch_labels.ndim == 2:\r\n batch_labels = batch_labels[:, hierarchy_level]\r\n return batch_labels\r\n except BaseException:\r\n return batch_labels\r\n\r\n\r\ndef map_labels(label_map, labels):\r\n labels = torch_to_numpy(labels)\r\n if labels.ndim == 2:\r\n for h in range(labels.shape[1]):\r\n labels[:, h] = label_map(labels[:, h], h)\r\n else:\r\n labels = label_map(labels, 0)\r\n return labels\r\n\r\ndef process_label(labels, hierarchy_level, label_map):\r\n labels = map_labels(label_map, labels)\r\n labels = get_hierarchy_label(labels, hierarchy_level)\r\n labels = numpy_to_torch(labels)\r\n return labels\r\n\r\ndef pass_data_to_model(model, data, device, **kwargs):\r\n return model(wrap_variable(data, device), **kwargs)\r\n\r\ndef set_requires_grad(model, requires_grad):\r\n for param in model.parameters():\r\n param.requires_grad = requires_grad\r\n\r\n\r\ndef safe_random_choice(input_data, size):\r\n \"\"\"\r\n Randomly samples without replacement from a sequence. It is \"safe\" because\r\n if len(input_data) < size, it will randomly sample WITH replacement\r\n Args:\r\n input_data is a sequence, like a torch tensor, numpy array,\r\n python list, tuple etc\r\n size is the number of elements to randomly sample from input_data\r\n Returns:\r\n An array of size \"size\", randomly sampled from input_data\r\n \"\"\"\r\n replace = len(input_data) < size\r\n return NUMPY_RANDOM_STATE.choice(input_data, size=size, replace=replace)\r\n\r\n\r\ndef longest_list(list_of_lists):\r\n return max(list_of_lists, key=len)\r\n\r\n\r\ndef slice_by_n(input_array, n):\r\n output = []\r\n for i in range(n):\r\n output.append(input_array[i::n])\r\n return output\r\n\r\n\r\ndef unslice_by_n(input_tensors):\r\n n = len(input_tensors)\r\n rows, cols = input_tensors[0].size()\r\n output = torch.zeros((rows * n, cols)).to(input_tensors[0].device)\r\n for i in range(n):\r\n output[i::n] = input_tensors[i]\r\n return output\r\n\r\n\r\ndef set_layers_to_eval(layer_name):\r\n def set_to_eval(m):\r\n classname = m.__class__.__name__\r\n if classname.find(layer_name) != -1:\r\n m.eval()\r\n return set_to_eval\r\n\r\n\r\ndef get_train_dataloader(dataset, batch_size, sampler, num_workers, collate_fn):\r\n return torch.utils.data.DataLoader(\r\n dataset,\r\n batch_size=int(batch_size),\r\n sampler=sampler,\r\n drop_last=True,\r\n num_workers=num_workers,\r\n collate_fn=collate_fn,\r\n shuffle=sampler is None,\r\n pin_memory=False\r\n )\r\n\r\ndef get_eval_dataloader(dataset, batch_size, num_workers, collate_fn):\r\n return torch.utils.data.DataLoader(\r\n dataset,\r\n batch_size=int(batch_size),\r\n drop_last=False,\r\n num_workers=num_workers,\r\n collate_fn=collate_fn,\r\n shuffle=False,\r\n pin_memory=False\r\n )\r\n\r\n\r\ndef try_torch_operation(torch_op, input_val):\r\n return torch_op(input_val) if torch.is_tensor(input_val) else input_val "
] | [
[
"torch.zeros",
"torch.is_tensor",
"torch.from_numpy",
"numpy.random.RandomState",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KITTCAMP-CODE/puppy | [
"f222c935ee474eb6e9dec2ff60f05415eab34c90"
] | [
"lane/laneline_coord_bku1.py"
] | [
"import cv2\nimport urllib.request as request\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\n\nfrom linesearch import *\n\n'''\nPrepare:\n In raspberryPi:\n\n ```\n cd Tools\n bash change_network.sh wifi\n bash remote_camera.sh\n ```\n\n In laptop computer:\n \n ```\n conda env create -f=environment.yml --name py2 --debug -v -v\n source activate py2\n ```\n\n Run:\n\n ```\n python laneline_coord.py\n ```\n\n (array([ 1.36666667e-02, -2.13888889e+01, 4.20000000e+01, -1.00000000e+00, 1.45800000e+03]), 8.224282026290894)\n (array([ 1.36666667e-02, -2.13888889e+01, 4.20000000e+01, 0.00000000e+00, 1.47100000e+03]), 0.5016670227050781)\n (array([ 1.36666667e-02, -2.13888889e+01, 4.20000000e+01, 0.00000000e+00, 1.47100000e+03]), 0.5490009784698486)\n ...\n (array([ 1.36666667e-02, -2.13888889e+01, 4.20000000e+01, 0.00000000e+00, 1.46900000e+03]), 0.39974021911621094)\n\n Means:\n Initiate the first image in 8.22 seconds.\n return array([ 1.36666667e-02, -2.13888889e+01, 4.20000000e+01, -1.00000000e+00, 1.45800000e+03])\n a = 1.36e-2\n b = -2.13e1\n c = 4.2e1\n d = -1\n\n For 2nd, 3rd image, one image were processed in 0.50, 0.55 second and returned a np array respectly.\n'''\n\nhost = \"192.168.3.111:8080\" # ip on raspberryPi\nhoststr = 'http://' + host + '/?action=stream'\n\nsrc = np.array([[\n [0, 350],\n [240,240],\n [400,240],\n [640,350]\n]]).astype(np.float32)\n\ndst = np.array([[\n [10,200],\n [10,0],\n [90,0],\n [90,200]\n]]).astype(np.float32)\nM = cv2.getPerspectiveTransform(src, dst)\n\ninitParams = [\n np.linspace(0.001, 0.02, 10),\n np.array(list(np.linspace(1.5, 50, 10)) + list(np.linspace(-50, 1.5, 10))),\n np.arange( 40, 50, 2),\n np.arange(-15, 15, 2)\n]\nrefPos = [50, 180] # The position of the top-center of chess-board on the ground in img_t\n \ndef imageReadFromraspberryPi(hoststr):\n stream=request.urlopen(hoststr)\n bytes=b''\n while True:\n bytes += stream.read(1024)\n a = bytes.find(b'\\xff\\xd8')\n b = bytes.find(b'\\xff\\xd9')\n if a!=-1 and b!=-1:\n break\n \n jpg = bytes[a:b+2]\n bytes= bytes[b+2:]\n # print(jpg)\n img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),flags=1)\n flipped = cv2.flip(img, -1)\n return flipped\n\n\ndef perspectTransform(img, M_trans):\n # binarize image for white line\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n gray = (gray<180).astype(np.uint8)*255\n \n # perspectTransform binarized image\n img_t = cv2.warpPerspective(gray, M_trans, (100,200), cv2.WARP_INVERSE_MAP)\n img_t[180:,0:10] = 255\n img_t[180:,90:] = 255\n return img_t\n\n\ndef processImage(hoststr, M_trans, initParams, refPos):\n seedParams = []\n timePrev = time.time()\n while 1:\n img = imageReadFromraspberryPi(hoststr)\n img_t = perspectTransform(img, M_trans)\n paramSearch = None\n if len(seedParams) == 0:\n paramSearch = initParams\n else:\n lla = np.linspace(seedParams[0]-(initParams[0][1]-initParams[0][0]),\n seedParams[0]+(initParams[0][1]-initParams[0][0]), 3)\n llb = np.linspace(seedParams[1]-(initParams[1][1]-initParams[1][0]),\n seedParams[1]+(initParams[1][1]-initParams[1][0]), 3)\n llc = np.arange(seedParams[2]-2, seedParams[2]+3, 2)\n lld = np.arange(seedParams[3]-1, seedParams[3]+2)\n paramSearch = [lla, llb, llc, lld]\n\n coords,params = getBestParams(img_t, paramSearch, refPos)\n score = params[4]\n if score < 50:\n seedParams = []\n else:\n seedParams = params\n \n timeNow = time.time()\n print(params, timeNow - timePrev)\n cv2.imwrite('../temp/origin/' + str(params[2]) + \" \" + str(params[3]) + \".jpeg\", img)\n timePrev = timeNow\n\nif __name__ == '__main__':\n processImage(hoststr, M, initParams, refPos)"
] | [
[
"numpy.arange",
"numpy.array",
"numpy.linspace",
"numpy.fromstring"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
milkKarten/IC3Net | [
"98cbc4874d755ca5396f6c8caa24738845c52b44"
] | [
"comm.py"
] | [
"import torch\nimport torch.nn.functional as F\nfrom torch import nn\nimport time\nfrom models import MLP\nfrom action_utils import select_action, translate_action\nfrom networks import ProtoNetwork, ProtoLayer\nfrom noise import OUNoise\nimport numpy as np\n\nclass CommNetMLP(nn.Module):\n \"\"\"\n MLP based CommNet. Uses communication vector to communicate info\n between agents\n \"\"\"\n def __init__(self, args, num_inputs, train_mode=True):\n \"\"\"Initialization method for this class, setup various internal networks\n and weights\n\n Arguments:\n MLP {object} -- Self\n args {Namespace} -- Parse args namespace\n num_inputs {number} -- Environment observation dimension for agents\n \"\"\"\n\n super(CommNetMLP, self).__init__()\n self.args = args\n self.nagents = args.nagents\n self.hid_size = args.hid_size\n self.comm_passes = args.comm_passes\n self.recurrent = args.recurrent\n self.continuous = args.continuous\n # If true, we add noise to the communication being output by each agent.\n self.add_comm_noise = args.add_comm_noise\n\n # TODO: remove this is just for debugging purposes just to verify that the communication is happening in a\n # disrete manner\n self.unique_comms = []\n\n # defining mode which is useful in the case of prototype layers.\n self.train_mode = train_mode\n\n # Only really used when you're using prototypes\n self.exploration_noise = OUNoise(args.comm_dim)\n self.explore_choose_proto_noise = OUNoise(args.num_proto)\n\n # see if you're using discrete communication and using prototypes\n self.discrete_comm = args.discrete_comm\n # self.use_proto = args.use_proto\n\n # num_proto is not really relevant when use_proto is set to False\n self.num_proto = args.num_proto\n\n # this is discrete/proto communication which is not to be confused with discrete action. T\n # Although since the communication is being added to the encoded state directly, it makes things a bit tricky.\n if args.discrete_comm:\n self.proto_layer = ProtoNetwork(args.hid_size, args.comm_dim, args.discrete_comm, num_layers=2,\n hidden_dim=64, num_protos=args.num_proto, constrain_out=False)\n\n if self.continuous:\n self.action_mean = nn.Linear(args.hid_size, args.dim_actions)\n self.action_log_std = nn.Parameter(torch.zeros(1, args.dim_actions))\n else:\n self.heads = nn.ModuleList([nn.Linear(args.hid_size, o)\n for o in args.naction_heads])\n\n\n self.init_std = args.init_std if hasattr(args, 'comm_init_std') else 0.2\n\n # Mask for communication\n if self.args.comm_mask_zero:\n self.comm_mask = torch.zeros(self.nagents, self.nagents)\n else:\n # this just prohibits self communication\n self.comm_mask = torch.ones(self.nagents, self.nagents) \\\n - torch.eye(self.nagents, self.nagents)\n\n\n # Since linear layers in PyTorch now accept * as any number of dimensions\n # between last and first dim, num_agents dimension will be covered.\n # The network below is function r in the paper for encoding\n # initial environment stage\n\n # Note: num_inputs is 29 in the case Predator Prey.\n # TODO: Since currently you directly add the weighted hidden state to the encoded observation\n # the output of the encoder is of the shape hidden. Basically we need to now make sure that in case of\n # discrete also the dimension of the output of the state encoder is same as dimension of the output of the\n # discrete communication.\n\n # self.encoder = nn.Linear(num_inputs, args.hid_size)\n\n # changed this for prototype based method. But should still work in the old case.\n self.encoder = nn.Linear(num_inputs, args.comm_dim)\n\n # if self.args.env_name == 'starcraft':\n # self.state_encoder = nn.Linear(num_inputs, num_inputs)\n # self.encoder = nn.Linear(num_inputs * 2, args.hid_size)\n if args.recurrent:\n self.hidd_encoder = nn.Linear(args.hid_size, args.hid_size)\n\n # TODO: currently the prototype is only being handled for the recurrent case. Do it more generally\n if args.recurrent:\n # not sure why is hidden dependent on batch size\n # also the initialised hiddens arent being assigned to anything\n self.init_hidden(args.batch_size)\n\n # Old code when the input size was equal to the hidden size.\n # self.f_module = nn.LSTMCell(args.hid_size, args.hid_size)\n\n self.f_module = nn.LSTMCell(args.comm_dim, args.hid_size)\n\n\n else:\n if args.share_weights:\n self.f_module = nn.Linear(args.hid_size, args.hid_size)\n self.f_modules = nn.ModuleList([self.f_module\n for _ in range(self.comm_passes)])\n else:\n self.f_modules = nn.ModuleList([nn.Linear(args.hid_size, args.hid_size)\n for _ in range(self.comm_passes)])\n # else:\n # raise RuntimeError(\"Unsupported RNN type.\")\n\n # Our main function for converting current hidden state to next state\n # self.f = nn.Linear(args.hid_size, args.hid_size)\n\n if args.share_weights:\n self.C_module = nn.Linear(args.hid_size, args.hid_size)\n self.C_modules = nn.ModuleList([self.C_module\n for _ in range(self.comm_passes)])\n else:\n # changed t\n # self.C_modules = nn.ModuleList([nn.Linear(args.hid_size, args.hid_size)\n # for _ in range(self.comm_passes)])\n\n self.C_modules = nn.ModuleList([nn.Linear(args.comm_dim, args.comm_dim)\n for _ in range(self.comm_passes)])\n\n # self.C = nn.Linear(args.hid_size, args.hid_size)\n\n # initialise weights as 0\n\n if args.comm_init == 'zeros':\n for i in range(self.comm_passes):\n self.C_modules[i].weight.data.zero_()\n self.tanh = nn.Tanh()\n\n # print(self.C)\n # self.C.weight.data.zero_()\n # Init weights for linear layers\n # self.apply(self.init_weights)\n\n self.value_head = nn.Linear(self.hid_size, 1)\n\n # communication limit, default always allows communication\n self.comm_budget = torch.tensor([self.args.max_steps+1] * self.nagents)\n self.budget = args.budget\n\n # autoencoder decoder\n if self.args.autoencoder_action:\n self.decoderNet = nn.Linear(args.hid_size, num_inputs+self.args.nagents)\n else:\n self.decoderNet = nn.Linear(args.hid_size, num_inputs)\n\n # remove null messages\n # with open('IC3Net/nulls/'+self.args.pretrain_exp_name+'/seed' + str(self.args.seed) + '/nulls.txt', 'r') as f:\n # with open('/Users/seth/Documents/research/neurips/nulls/tj_easy_proto_soft_minComm_autoencoder/seed' + str(self.args.seed) + '/nulls.txt', 'r') as f:\n # with open('/Users/seth/Documents/research/neurips/nulls/'+self.args.exp_name+'/seed' + str(self.args.seed) + '/nulls.txt', 'r') as f:\n if self.args.remove_null:\n null_path = os.path.join(self.args.null_dict_dir, exp_name, \"seed\" + str(seed), 'nulls.txt')\n with open(null_path) as f:\n protos = f.readlines()\n for i in range(len(protos)):\n protos[i] = protos[i].replace(\"\\n\", \"\").split(',')\n self.null_dict = torch.tensor(np.array(protos).astype(np.float32))\n\n self.num_null = 0\n self.num_good_comms = 0\n self.num_cut_comms = 0\n self.num_comms = 0\n\n self.null_action = np.zeros(self.args.nagents)\n\n def get_agent_mask(self, batch_size, info):\n n = self.nagents\n\n if 'alive_mask' in info:\n agent_mask = torch.from_numpy(info['alive_mask'])\n num_agents_alive = agent_mask.sum()\n else:\n agent_mask = torch.ones(n)\n num_agents_alive = n\n\n agent_mask = agent_mask.view(1, 1, n)\n agent_mask = agent_mask.expand(batch_size, n, n).unsqueeze(-1)\n\n return num_agents_alive, agent_mask\n\n def forward_state_encoder(self, x):\n hidden_state, cell_state = None, None\n\n if self.args.recurrent:\n x, extras = x\n\n # In case of recurrent first take out the actual observation and then encode it.\n x = self.encoder(x)\n\n if self.args.rnn_type == 'LSTM':\n # if you're using the extras would have both the hidden and the cell state.\n hidden_state, cell_state = extras\n else:\n hidden_state = extras\n # hidden_state = self.tanh( self.hidd_encoder(prev_hidden_state) + x)\n else:\n x = self.encoder(x)\n x = self.tanh(x)\n hidden_state = x\n\n return x, hidden_state, cell_state\n\n def decode(self):\n y = self.h_state + self.comms_all\n y = self.decoderNet(y)\n return y\n\n def get_null_action(self):\n return self.null_action\n\n def forward(self, x, info={}):\n # TODO: Update dimensions\n \"\"\"Forward function for CommNet class, expects state, previous hidden\n and communication tensor.\n B: Batch Size: Normally 1 in case of episode\n N: number of agents\n\n Arguments:\n x {tensor} -- State of the agents (N x num_inputs)\n prev_hidden_state {tensor} -- Previous hidden state for the networks in\n case of multiple passes (1 x N x hid_size)\n comm_in {tensor} -- Communication tensor for the network. (1 x N x N x hid_size)\n\n Returns:\n tuple -- Contains\n next_hidden {tensor}: Next hidden state for network\n comm_out {tensor}: Next communication tensor\n action_data: Data needed for taking next action (Discrete values in\n case of discrete, mean and std in case of continuous)\n v: value head\n \"\"\"\n\n # if self.args.env_name == 'starcraft':\n # maxi = x.max(dim=-2)[0]\n # x = self.state_encoder(x)\n # x = x.sum(dim=-2)\n # x = torch.cat([x, maxi], dim=-1)\n # x = self.tanh(x)\n\n # print(x[0].size(), x[0],\"\\n\")\n x, hidden_state, cell_state = self.forward_state_encoder(x)\n if self.args.autoencoder:\n self.h_state = hidden_state.clone()\n # print(x, hidden_state, cell_state)\n # import sys\n # sys.exit(0)\n batch_size = x.size()[0]\n n = self.nagents\n\n # this should remain regardless of using prototypes or not.\n num_agents_alive, agent_mask = self.get_agent_mask(batch_size, info)\n\n # Hard Attention - action whether an agent communicates or not\n if self.args.hard_attn:\n comm_action = torch.tensor(info['comm_action'])\n for c in range(self.args.nagents):\n if agent_mask[0,0,c] == 0: continue\n # if info['comm_action'][c] == 0:\n # self.num_cut_comms += 1\n self.num_comms += num_agents_alive\n # not sure if this passes batch sizes larger than 1\n # assert batch_size == 1\n # if info['step_t'] == 0:\n # # reset communication budget at the beginning of the episode\n # self.comm_budget = self.budget * torch.tensor([self.args.max_steps] * self.nagents\n # # self.comm_budget -= comm_action\n # if self.budget != 1:\n # comm_action[self.comm_budget <= 0] = 0\n # Add random masking according to the budget\n if self.train_mode:\n info['comm_budget'] = np.random.choice([1.,0.], size=self.nagents, p=[self.budget, 1-self.budget])\n comm_action = comm_action * info['comm_budget']\n # print(\"comm action, budget\", comm_action, self.comm_budget, info['step_t'])\n comm_action_mask = comm_action.expand(batch_size, n, n).unsqueeze(-1)\n # action 1 is talk, 0 is silent i.e. act as dead for comm purposes.\n agent_mask = agent_mask * comm_action_mask.double()\n\n agent_mask_transpose = agent_mask.transpose(1, 2)\n all_comms = []\n for i in range(self.comm_passes):\n if self.args.use_proto:\n raw_outputs = self.proto_layer(hidden_state)\n # raw_outputs is of shape (1, num_agents, num_protos). But we need to get rid of that first dimension.\n raw_outputs = torch.squeeze(raw_outputs, 0)\n if self.train_mode:\n comm = self.proto_layer.step(raw_outputs, True, self.explore_choose_proto_noise, 'cpu')\n else:\n comm = self.proto_layer.step(raw_outputs, False, None, 'cpu')\n all_comms.append(comm.detach().clone())\n # Comm assumes shape (1, num_agents, num_protos), so just add that dimension back in.\n comm = torch.unsqueeze(comm, 0)\n\n if self.add_comm_noise:\n # Currently, just hardcoded. We want enough noise to have an effect but not too much to prevent\n # learning.\n std = 0.2 # 0.4 for dim 16\n # Generates samples from a zero-mean unit gaussian, which we rescale by the std parameter.\n noise = torch.randn_like(comm) * std\n comm += noise\n # check if comm contains null vector\n # print(comm.shape) # 1,5,64\n # sys.exit()\n # if self.args.null_regularization:\n\n elif self.args.discrete_comm: #one-hot\n raw_outputs = self.proto_layer(hidden_state)\n raw_outputs = torch.squeeze(raw_outputs, 0)\n comm = self.proto_layer.onehot_step(raw_outputs, self.train_mode)\n all_comms.append(comm.detach().clone())\n comm = torch.unsqueeze(comm, 0)\n\n else:\n # print(f\"inside else {hidden_state.size()}\")\n comm = hidden_state\n # print(\"before\", comm.shape, comm) # (5,32)\n all_comms.append(torch.squeeze(comm, 0).detach().clone())\n assert self.args.comm_dim == self.args.hid_size , \"If not using protos comm dim should be same as hid\"\n\n if self.args.remove_null:\n null_mask = torch.ones_like(comm)\n for j in range(self.args.nagents):\n if agent_mask[0,0,j] == 0:\n continue\n # print(comm[0,j].shape, self.null_dict[0].shape)\n found_null = False\n for null_i in range(len(self.null_dict)):\n # print(torch.nn.functional.mse_loss(self.null_dict[null_i], comm[0,j]))\n if torch.nn.functional.mse_loss(self.null_dict[null_i], comm[0,j]) < 0.1:\n null_mask[0,j] *= 0\n found_null = True\n break\n if not found_null:\n # track non null communicated\n if info['comm_action'][j] == 1:\n self.num_good_comms += 1\n # else:\n # if info['comm_action'][j] == 0:\n # self.num_null += 1\n self.null_action = np.zeros(self.args.nagents)\n if 'null' in self.args.exp_name or True:\n for j in range(self.args.nagents):\n if null_mask[0,j].sum() == 0:\n if info['comm_action'][j] == 1: # we cut an additional communication\n self.null_action[j] = 1 # get one comm back for later\n self.num_null += 1\n self.num_cut_comms += 1\n comm = comm * null_mask\n # comm = hidden_state.view(batch_size, n, self.hid_size) if self.args.recurrent else hidden_state\n comm = comm.view(batch_size, n, self.args.comm_dim) if self.args.recurrent else comm\n # Get the next communication vector based on next hidden state\n # comm = comm.unsqueeze(-2).expand(-1, n, n, self.hid_size)\n\n # changed for accomadating prototype based approach as well.\n comm = comm.unsqueeze(-2).expand(-1, n, n, self.args.comm_dim)\n\n # Create mask for masking self communication\n mask = self.comm_mask.view(1, n, n)\n\n mask = mask.expand(comm.shape[0], n, n)\n mask = mask.unsqueeze(-1)\n\n mask = mask.expand_as(comm)\n comm = comm * mask\n\n # print(\"comm mode \", self.args.comm_mode)\n if hasattr(self.args, 'comm_mode') and self.args.comm_mode == 'avg' \\\n and num_agents_alive > 1:\n comm = comm / (num_agents_alive - 1)\n\n # Mask comm_in\n # Mask communcation from dead agents\n comm = comm * agent_mask\n # Mask communication to dead agents\n comm = comm * agent_mask_transpose\n # Combine all of C_j for an ith agent which essentially are h_j\n comm_sum = comm.sum(dim=1)\n\n c = self.C_modules[i](comm_sum)\n if self.args.autoencoder:\n self.comms_all = c.clone() # encoded received communciations for autoencoder\n\n if self.args.recurrent:\n # skip connection - combine comm. matrix and encoded input for all agents\n inp = x + c\n\n # inp = inp.view(batch_size * n, self.hid_size)\n\n inp = inp.view(batch_size * n, self.args.comm_dim)\n\n output = self.f_module(inp, (hidden_state, cell_state))\n\n hidden_state = output[0]\n cell_state = output[1]\n\n else: # MLP|RNN\n # Get next hidden state from f node\n # and Add skip connection from start and sum them\n hidden_state = sum([x, self.f_modules[i](hidden_state), c])\n hidden_state = self.tanh(hidden_state)\n\n # v = torch.stack([self.value_head(hidden_state[:, i, :]) for i in range(n)])\n # v = v.view(hidden_state.size(0), n, -1)\n value_head = self.value_head(hidden_state)\n h = hidden_state.view(batch_size, n, self.hid_size)\n\n if self.continuous:\n action_mean = self.action_mean(h)\n action_log_std = self.action_log_std.expand_as(action_mean)\n action_std = torch.exp(action_log_std)\n # will be used later to sample\n action = (action_mean, action_log_std, action_std)\n else:\n # discrete actions\n action = [F.log_softmax(head(h), dim=-1) for head in self.heads]\n # print(f\"uses discrete actions {action}\")\n if self.args.recurrent:\n if info.get('record_comms') is not None:\n # Go through the all comms passes and only pick out comms for the agent you want.\n # filtered_comms = np.array([c[info.get('record_comms')] for c in all_comms])\n filtered_comms = np.array([c.numpy() for c in all_comms])\n if self.args.env_name == 'predator_prey':\n assert len(filtered_comms) == 1, \"Only support one agent at a time\"\n # print(\"communication comm.py\", c.shape, len(filtered_comms[0]))\n # print(info['comm_action'])\n return action, value_head, (hidden_state.clone(), cell_state.clone()), filtered_comms\n return action, value_head, (hidden_state.clone(), cell_state.clone())\n else:\n if info.get('record_comms') is not None:\n filtered_comms = [c[info.get('record_comms')] for c in all_comms]\n assert len(filtered_comms) == 1, \"Only support one agent at a time\"\n return action, value_head, filtered_comms[0]\n return action, value_head\n\n def init_weights(self, m):\n if type(m) == nn.Linear:\n m.weight.data.normal_(0, self.init_std)\n\n def init_hidden(self, batch_size):\n # dim 0 = num of layers * num of direction\n return tuple(( torch.zeros(batch_size * self.nagents, self.hid_size, requires_grad=True),\n torch.zeros(batch_size * self.nagents, self.hid_size, requires_grad=True)))\n"
] | [
[
"torch.randn_like",
"torch.ones",
"torch.zeros",
"numpy.random.choice",
"torch.eye",
"torch.from_numpy",
"torch.unsqueeze",
"torch.tensor",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.nn.LSTMCell",
"torch.exp",
"torch.nn.functional.mse_loss",
"numpy.array",
"numpy.zeros",
"torch.ones_like",
"torch.squeeze"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dahe-cvl/vhh_od | [
"ded750a5d7de55079da4b295230d09c8e99a9ec2"
] | [
"vhh_od/Video.py"
] | [
"import numpy as np\nimport cv2\nimport datetime\nfrom vhh_od.utils import *\nfrom vhh_od.Shot import Shot\nfrom PIL import Image\nimport torchvision\n\n\nclass Video(object):\n \"\"\"\n This class is representing a video. Each instance of this class is holding the properties of one Video.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Constructor\n \"\"\"\n\n #printCustom(\"create instance of video class ... \", STDOUT_TYPE.INFO);\n self.vidFile = ''\n self.vidName = \"\"\n self.frame_rate = 10\n self.channels = 0\n self.height = 0\n self.width = 0\n self.format = ''\n self.length = 0\n self.number_of_frames = 0\n self.vid = None\n self.convert_to_gray = False\n self.convert_to_hsv = False\n self.shot_list = []\n\n def addShotObject(self, shot_obj: Shot):\n self.shot_list.append(shot_obj)\n\n def load(self, vidFile: str):\n \"\"\"\n Method to load video file.\n\n :param vidFile: [required] string representing path to video file\n \"\"\"\n\n #print(vidFile)\n printCustom(f\"Loading Video \\\"{vidFile}\\\"... \", STDOUT_TYPE.INFO)\n self.vidFile = vidFile\n if(self.vidFile == \"\"):\n #print(\"A\")\n print(\"ERROR: you must add a video file path!\")\n exit(1)\n self.vidName = self.vidFile.split('/')[-1]\n self.vid = cv2.VideoCapture(self.vidFile)\n\n if(self.vid.isOpened() == False):\n #print(\"B\")\n print(f\"ERROR: not able to open video file \\\"{vidFile}\\\"!\")\n exit(1)\n\n status, frm = self.vid.read()\n\n self.channels = frm.shape[2]\n self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)\n self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)\n self.frame_rate = self.vid.get(cv2.CAP_PROP_FPS)\n self.format = self.vid.get(cv2.CAP_PROP_FORMAT)\n self.number_of_frames = self.vid.get(cv2.CAP_PROP_FRAME_COUNT)\n\n self.vid.release()\n\n def printVIDInfo(self):\n \"\"\"\n Method to a print summary of video properties.\n \"\"\"\n\n print(\"---------------------------------\")\n print(\"Video information\")\n print(\"filename: \" + str(self.vidFile))\n print(\"format: \" + str(self.format))\n print(\"fps: \" + str(self.frame_rate))\n print(\"channels: \" + str(self.channels))\n print(\"width: \" + str(self.width))\n print(\"height: \" + str(self.height))\n print(\"nFrames: \" + str(self.number_of_frames))\n print(\"---------------------------------\")\n print(\"<<< Shot list >>>\")\n for shot in self.shot_list:\n shot.printShotInfo()\n\n def getAllFrames(self, preprocess_pytorch=None):\n # read all frames of video\n cap = cv2.VideoCapture(self.vidFile)\n\n frame_l = []\n frames_orig = []\n\n cnt = 0\n while (True):\n cnt = cnt + 1\n ret, frame_orig = cap.read()\n\n # print(cnt)\n # print(ret)\n # print(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n if (ret == True):\n if(preprocess_pytorch is not None):\n frame = preprocess_pytorch(frame_orig)\n frame_l.append(frame) # .transpose((2,0,1)\n\n # Deep Sort Tracker needs original image in RGB Space\n frames_orig.append(cv2.cvtColor(frame_orig, cv2.COLOR_BGR2RGB))\n else:\n break\n cap.release()\n\n if (preprocess_pytorch is not None):\n all_tensors_l = torch.stack(frame_l)\n return {\"Tensors\": all_tensors_l, \"Images\" : frames_orig}\n\n # generator for loading Video Frame by Frame\n def loadVideoByFrame(self, preprocess_pytorch=None):\n\n cap = cv2.VideoCapture(self.vidFile)\n\n cnt = 0\n while (True):\n cnt = cnt + 1\n ret, frame_orig = cap.read()\n\n if (ret == True):\n if(preprocess_pytorch is not None):\n frame_processed = preprocess_pytorch(frame_orig)\n else:\n frame_processed = None\n frame_orig = cv2.cvtColor(frame_orig, cv2.COLOR_BGR2RGB)\n yield {\"Tensors\": frame_processed, \"Images\" : frame_orig}\n else:\n break\n cap.release()\n\n def getFrame(self, frame_id):\n \"\"\"\n Method to get one frame of a video on a specified position.\n\n :param frame_id: [required] integer value with valid frame index\n :return: numpy frame (WxHx3)\n \"\"\"\n\n self.vid.open(self.vidFile)\n if(frame_id >= self.number_of_frames):\n print(\"ERROR: frame idx out of range!\")\n return []\n\n #print(\"Read frame with id: \" + str(frame_id));\n time_stamp_start = datetime.datetime.now().timestamp()\n\n self.vid.set(cv2.CAP_PROP_POS_FRAMES, frame_id)\n status, frame_np = self.vid.read()\n self.vid.release()\n\n if(status == True):\n if(self.convert_to_gray == True):\n frame_np = cv2.cvtColor(frame_np, cv2.COLOR_BGR2GRAY)\n #print(frame_gray_np.shape);\n if (self.convert_to_hsv == True):\n frame_np = cv2.cvtColor(frame_np, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(frame_np)\n\n time_stamp_end = datetime.datetime.now().timestamp()\n time_diff = time_stamp_end - time_stamp_start\n #print(\"time: \" + str(round(time_diff, 4)) + \" sec\")\n\n return frame_np\n\n def getFramesByShots_NEW(self, preprocess_pytorch=None, max_frames_per_return = 2000):\n \"\"\"\n Returns Video Shot by Shot\n If a single shot is longer than X frames, it will split the shot into frames of length at most X. \n \"\"\"\n # initialize video capture\n cap = cv2.VideoCapture(self.vidFile)\n\n frame_number = 0\n for shot in self.shot_list:\n\n frame_l = []\n frames_orig = []\n\n sid = int(shot.sid)\n start_idx = int(shot.start_pos)\n stop_idx = int(shot.end_pos)\n shot_is_not_over = True\n\n while shot_is_not_over:\n # print(f\"Retrieving Frames for Shot {sid} (frames {frame_number} to {stop_idx})...\")\n while frame_number < stop_idx and len(frame_l) < max_frames_per_return:\n \n # read next frame\n success, image = cap.read()\n frame_number = frame_number + 1\n #print(frame_number)\n\n # if(start_idx == stop_idx):\n # cv2.imshow(\"frame\", image)\n # k = cv2.waitKey()\n\n # skip to start position (for gradual cuts)\n if frame_number < start_idx:\n # print(frame_number)\n continue\n\n if success == True:\n # if ( (frame_number >= start_idx and frame_number <= stop_idx) or (start_idx == stop_idx) ):\n if (preprocess_pytorch != None):\n frames_orig.append(image)\n image = preprocess_pytorch(image)\n frame_l.append(image)\n else:\n frames_orig.append(image)\n else:\n break\n\n if frame_l != [] and preprocess_pytorch is not None:\n all_tensors_l = torch.stack(frame_l)\n yield {\"Tensors\": all_tensors_l, \"Images\": np.array(frames_orig), \"ShotInfo\": shot}\n else:\n yield {\"Tensors\": None, \"Images\": np.array(frames_orig), \"ShotInfo\": shot}\n\n # End the shot if the current frame is the last frame (or an even later frame)\n if not frame_number < stop_idx:\n shot_is_not_over = False\n # If the shot is not over, prepare to return more frames in this shot \n else:\n frame_l = []\n frames_orig = []\n\n cap.release()\n\n # Returns Video Shot by Shot\n def getFramesByShots(self, preprocess_pytorch=None):\n # initialize video capture\n cap = cv2.VideoCapture(self.vidFile)\n cnt = 0\n\n for shot in self.shot_list:\n\n frame_l = []\n frames_orig = []\n start_pos = shot.start_pos\n end_pos = shot.end_pos\n\n #print(f\"Retrieving Frames for Shot {shot.sid} (frames {cnt} to {end_pos})...\")\n\n while cnt <= end_pos:\n cnt = cnt + 1\n ret, frame_orig = cap.read()\n\n # skip to start position (for gradual cuts)\n if cnt < start_pos:\n continue\n\n if ret == True:\n if preprocess_pytorch is not None:\n frame = preprocess_pytorch(frame_orig)\n frame_l.append(frame)\n\n # Deep Sort Tracker needs original image in RGB Space\n frames_orig.append(cv2.cvtColor(frame_orig, cv2.COLOR_BGR2RGB))\n else:\n break\n\n if preprocess_pytorch is not None:\n all_tensors_l = torch.stack(frame_l)\n yield {\"Tensors\": all_tensors_l, \"Images\": frames_orig, \"ShotInfo\": shot}\n else:\n yield {\"Tensors\": None, \"Images\": frames_orig, \"ShotInfo\": shot}\n\n cap.release()\n\n def getShotFromID(self, sid=-1):\n for shot in self.shot_list:\n if(shot.sid == sid):\n return shot\n return None\n\n def export2csv(self, filepath=None):\n\n printCustom(f\"Exporting results to CSV \\\"{filepath}\\'\", type=STDOUT_TYPE.INFO)\n\n if(filepath == None):\n print(\"ERROR: You have to specify a vailid path! csv export aborted!\")\n exit()\n\n ## clean up\n remove_csv_export(filepath)\n\n ## export\n field_names = ['movie_name', 'sid', 'start', 'stop', 'fid', 'oid', 'bb_x1', 'bb_y1', 'bb_x2', 'bb_y2',\n 'object_conf', 'class_score', 'class_name']\n\n add_header(filepath, field_names)\n for shot in self.shot_list:\n dict_l = shot.convertObjectList2Dict()\n for dict_entry in dict_l:\n append_dict_as_row(filepath, dict_entry, field_names)\n\n def loadCsvExport(self, filepath=\"/data/share/maxrecall_vhh_mmsi/develop/videos/results/vhh_od/raw_results/test.csv\"):\n print(\"load csv results export ... \")\n\n field_names = ['movie_name', 'sid', 'start', 'stop', 'fid', 'oid', 'bb_x1', 'bb_y1', 'bb_x2', 'bb_y2',\n 'object_conf', 'class_score', 'class_name']\n\n dict_l = load_csv_as_dict(file_name=filepath, field_names=field_names)\n print(dict_l)\n\n self.shot_list = []\n\n def visualizeShotsWithBB(self, path=None, sid=-1, all_frames_tensors=None, boundingbox_flag=True,\n save_single_plots_flag=True, plot_flag=False, save_as_video_flag=True):\n #print(\"plot shot with bounding boxes ... \")\n\n if(path == None):\n print(\"Error: you need to specify a valid path!\")\n exit()\n\n shot = self.getShotFromID(sid=sid)\n if(shot == None):\n print(\"ERROR: shot with specified ID [\" + str(sid) + \"] is not available!\")\n return -1\n\n shot_frames_tensors = all_frames_tensors[shot.start_pos:shot.end_pos+1, :, :, :]\n frameSize = (int(shot_frames_tensors[0].size()[1]), int(shot_frames_tensors[0].size()[2]))\n\n video_results_path = path + str(self.vidName.split('.')[0]) + \"/\"\n\n if not os.path.exists(video_results_path):\n os.makedirs(video_results_path)\n\n if (save_single_plots_flag == True):\n if not os.path.exists(video_results_path + str(sid)):\n os.makedirs(video_results_path + str(sid))\n\n if (save_as_video_flag == True):\n out = cv2.VideoWriter(video_results_path + str(sid) + \".avi\", cv2.VideoWriter_fourcc(*\"MJPG\"), 12, frameSize)\n\n for i in range(0, len(shot_frames_tensors)):\n frame_id = i + shot.start_pos\n\n frame = shot_frames_tensors[i]\n frame_np = np.array(frame).transpose((1, 2, 0))\n normalized_frame = frame_np.copy()\n normalized_frame = cv2.normalize(frame_np, normalized_frame, 0, 255, cv2.NORM_MINMAX)\n normalized_frame = normalized_frame.astype('uint8')\n\n if (boundingbox_flag == True):\n for obj in shot.object_list:\n if (frame_id == obj.fid):\n cv2.rectangle(normalized_frame, (obj.bb_x1, obj.bb_y1), (obj.bb_x2, obj.bb_y2), (0, 255, 0), 1)\n font = cv2.FONT_HERSHEY_SIMPLEX\n bottomLeftCornerOfText = (int(obj.bb_x1), int(obj.bb_y1) - 3)\n fontScale = 0.4\n fontColor = (0, 255, 0, 255)\n thickness = 1\n line_type = cv2.LINE_AA\n obj_text = str(obj.object_class_name) + \": \" + str(round(obj.object_conf, 2))\n cv2.putText(normalized_frame,\n obj_text,\n bottomLeftCornerOfText,\n font,\n fontScale,\n fontColor,\n thickness,\n line_type)\n\n if (save_as_video_flag == True):\n out.write(normalized_frame)\n\n if (save_single_plots_flag == True):\n cv2.imwrite(video_results_path + str(sid) + \"/\" + str(frame_id) + \".png\", normalized_frame)\n\n if (plot_flag == True):\n cv2.imshow(\"Shot ID:\" + str(sid), normalized_frame)\n cv2.waitKey(10)\n\n if (save_as_video_flag == True):\n out.release()\n\n if (plot_flag == True):\n cv2.destroyAllWindows()\n\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
libing4752/myia | [
"3bfb8ac20468bfc7be34128a396f79f2e03b24a1"
] | [
"tests/test_compile.py"
] | [
"from pytest import mark\nfrom copy import copy\nimport numpy as np\n\n\nfrom myia.abstract import from_value\nfrom myia.pipeline import standard_pipeline, standard_debug_pipeline\nfrom myia.prim import ops as P\nfrom myia.prim.py_implementations import \\\n typeof, scalar_add, partial\nfrom myia.utils import no_prof, Profile\n\n\nfrom .common import to_abstract_test\n\n\ncompile_pipeline = standard_pipeline\n\ndebug_fn = standard_debug_pipeline \\\n .select('parse', 'resolve', 'infer', 'specialize', 'export')\n\n\ndef parse_compare(*tests, optimize=True, array=False, python=True,\n profile=no_prof):\n \"\"\"Decorate a function to parse and run it against pure Python.\n\n Returns a unit test that will parse the function, and then for\n each `inputs` tuple in `tests` it will check that the pure Python,\n undecorated function returns that same output.\n\n This uses the full myia pipeline.\n\n Arguments:\n tests: One or more inputs tuple.\n\n \"\"\"\n pipeline = compile_pipeline if optimize else \\\n compile_pipeline.configure({'opt.phases.main': []})\n\n def decorate(fn):\n def test(args):\n nonlocal profile\n if not isinstance(args, tuple):\n args = (args,)\n if python:\n ref_result = fn(*map(copy, args))\n argspec = tuple(from_value(arg, broaden=True) for arg in args)\n if profile is True:\n profile = Profile()\n res = pipeline.run(input=fn, argspec=argspec, profile=profile)\n profile.print()\n myia_fn = res['output']\n myia_result = myia_fn(*map(copy, args))\n if python:\n if array:\n np.testing.assert_allclose(ref_result, myia_result)\n else:\n assert ref_result == myia_result\n\n m = mark.parametrize('args', list(tests))(test)\n m.__orig__ = fn\n return m\n return decorate\n\n\n@parse_compare((2, 3))\ndef test_simple(x, y):\n return x + y\n\n\n@parse_compare((42,))\ndef test_constant(x):\n return x == 42\n\n\n@parse_compare((33, 42), (42, 33))\ndef test_if(x, y):\n if x > y:\n return x - y\n else:\n return y - x\n\n\n@parse_compare((33, 42), (44, 42))\ndef test_if_nottail(x, y):\n def cap(x):\n if x > 42:\n x = 42\n return x\n return y - cap(x)\n\n\n@parse_compare((42, 33))\ndef test_call(x, y):\n def f(x):\n return x * x\n\n return f(x) + f(y)\n\n\n@parse_compare((42,))\ndef test_tailcall(x):\n def fsum(x, a):\n if x == 1:\n return a\n else:\n return fsum(x - 1, a + x)\n return fsum(x, 1)\n\n\n@parse_compare((-1,), (1,))\ndef test_callp(x):\n def fn(f, x):\n return f(x)\n\n def f(x):\n return -x\n\n return fn(f, -42)\n\n\n@parse_compare((True, 42, 33))\ndef test_call_hof(c, x, y):\n def f1(x, y):\n return x + y\n\n def f2(x, y):\n return x * y\n\n def choose(c):\n if c:\n return f1\n else:\n return f2\n\n return choose(c)(x, y) + choose(not c)(x, y)\n\n\n@parse_compare((15, 17), optimize=False)\ndef test_partial_prim(x, y):\n return partial(scalar_add, x)(y)\n\n\ndef test_switch_nontail():\n def fn(x, y):\n def f1():\n return x\n\n def f2():\n return y\n\n a = P.switch(x > y, f1, f2)()\n return a * a\n\n i64 = typeof(1)\n argspec = (to_abstract_test(i64), to_abstract_test(i64))\n myia_fn = compile_pipeline.run(input=fn,\n argspec=argspec)['output']\n\n for test in [(6, 23, 23**2), (67, 23, 67**2)]:\n *args, expected = test\n assert myia_fn(*args) == expected\n"
] | [
[
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ploshkin/hivemind | [
"7bb656567417895e9f1d8684a0c0e9ef4e4de25d"
] | [
"hivemind/server/connection_handler.py"
] | [
"import multiprocessing as mp\nimport os\nimport pickle\nfrom typing import Dict\n\nimport grpc\nimport torch\n\nfrom hivemind.proto import runtime_pb2, runtime_pb2_grpc as runtime_grpc\nfrom hivemind.server.expert_backend import ExpertBackend\nfrom hivemind.utils import get_logger, serialize_torch_tensor, deserialize_torch_tensor, Endpoint, nested_flatten\nfrom hivemind.utils.grpc import GRPC_KEEPALIVE_OPTIONS\nfrom hivemind.utils.asyncio import switch_to_uvloop\n\nlogger = get_logger(__name__)\n\n\nclass ConnectionHandler(mp.context.ForkProcess):\n \"\"\"\n A process that accepts incoming requests to experts and submits them into the corresponding TaskPool.\n\n :note: ConnectionHandler is designed so as to allow using multiple handler processes for the same port.\n :param listen_on: network interface, e.g. \"0.0.0.0:1337\" or \"localhost:*\" (* means pick any port) or \"[::]:7654\"\n :param experts: a dict [UID -> ExpertBackend] with all active experts\n \"\"\"\n\n def __init__(self, listen_on: Endpoint, experts: Dict[str, ExpertBackend]):\n super().__init__()\n self.listen_on, self.experts = listen_on, experts\n self.ready = mp.Event()\n\n def run(self):\n torch.set_num_threads(1)\n loop = switch_to_uvloop()\n\n async def _run():\n grpc.aio.init_grpc_aio()\n logger.debug(f'Starting, pid {os.getpid()}')\n server = grpc.aio.server(options=GRPC_KEEPALIVE_OPTIONS + (\n ('grpc.so_reuseport', 1),\n ('grpc.max_send_message_length', -1),\n ('grpc.max_receive_message_length', -1)\n ))\n runtime_grpc.add_ConnectionHandlerServicer_to_server(self, server)\n\n found_port = server.add_insecure_port(self.listen_on)\n assert found_port != 0, f\"Failed to listen to {self.listen_on}\"\n\n await server.start()\n self.ready.set()\n await server.wait_for_termination()\n logger.debug(f\"ConnectionHandler terminated: (pid={os.getpid()})\")\n\n loop.run_until_complete(_run())\n\n async def info(self, request: runtime_pb2.ExpertUID, context: grpc.ServicerContext):\n return runtime_pb2.ExpertInfo(serialized_info=pickle.dumps(self.experts[request.uid].get_info()))\n\n async def forward(self, request: runtime_pb2.ExpertRequest, context: grpc.ServicerContext):\n inputs = [deserialize_torch_tensor(tensor) for tensor in request.tensors]\n future = self.experts[request.uid].forward_pool.submit_task(*inputs)\n serialized_response = [serialize_torch_tensor(tensor, proto.compression, allow_inplace=True) for tensor, proto\n in zip(await future, nested_flatten(self.experts[request.uid].outputs_schema))]\n\n return runtime_pb2.ExpertResponse(tensors=serialized_response)\n\n async def backward(self, request: runtime_pb2.ExpertRequest, context: grpc.ServicerContext):\n inputs_and_grad_outputs = [deserialize_torch_tensor(tensor) for tensor in request.tensors]\n future = self.experts[request.uid].backward_pool.submit_task(*inputs_and_grad_outputs)\n serialized_response = [serialize_torch_tensor(tensor, proto.compression, allow_inplace=True) for tensor, proto\n in zip(await future, nested_flatten(self.experts[request.uid].grad_inputs_schema))]\n return runtime_pb2.ExpertResponse(tensors=serialized_response)\n"
] | [
[
"torch.set_num_threads"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
magh24/carla_RL_IAs | [
"a13b331ee8d69071570c97b35f1348758d658ee5"
] | [
"misc/automatic_control.py"
] | [
"#!/usr/bin/env python\n\n# Copyright (c) 2018 Intel Labs.\n# authors: German Ros ([email protected])\n#\n# This work is licensed under the terms of the MIT license.\n# For a copy, see <https://opensource.org/licenses/MIT>.\n\n\"\"\"\n Example of automatic vehicle control from client side.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport collections\nimport datetime\nimport glob\nimport logging\nimport math\nimport os\nimport random\nimport re\nimport sys\nimport weakref\n\ntry:\n import pygame\n from pygame.locals import KMOD_CTRL\n from pygame.locals import KMOD_SHIFT\n from pygame.locals import K_0\n from pygame.locals import K_9\n from pygame.locals import K_BACKQUOTE\n from pygame.locals import K_BACKSPACE\n from pygame.locals import K_COMMA\n from pygame.locals import K_DOWN\n from pygame.locals import K_ESCAPE\n from pygame.locals import K_F1\n from pygame.locals import K_LEFT\n from pygame.locals import K_PERIOD\n from pygame.locals import K_RIGHT\n from pygame.locals import K_SLASH\n from pygame.locals import K_SPACE\n from pygame.locals import K_TAB\n from pygame.locals import K_UP\n from pygame.locals import K_a\n from pygame.locals import K_c\n from pygame.locals import K_d\n from pygame.locals import K_h\n from pygame.locals import K_m\n from pygame.locals import K_p\n from pygame.locals import K_q\n from pygame.locals import K_r\n from pygame.locals import K_s\n from pygame.locals import K_w\nexcept ImportError:\n raise RuntimeError(\"cannot import pygame, make sure pygame package is installed\")\n\ntry:\n import numpy as np\nexcept ImportError:\n raise RuntimeError(\"cannot import numpy, make sure numpy package is installed\")\n\n# ==============================================================================\n# -- find carla module ---------------------------------------------------------\n# ==============================================================================\ntry:\n sys.path.append(\n glob.glob(\n \"**/carla-*%d.%d-%s.egg\"\n % (\n sys.version_info.major,\n sys.version_info.minor,\n \"win-amd64\" if os.name == \"nt\" else \"linux-x86_64\",\n )\n )[0]\n )\nexcept IndexError:\n pass\n\n# ==============================================================================\n# -- add PythonAPI for release mode --------------------------------------------\n# ==============================================================================\ntry:\n sys.path.append(glob.glob(\"PythonAPI\")[0])\nexcept IndexError:\n pass\n\nimport carla\nfrom carla import ColorConverter as cc\nfrom agents.navigation.roaming_agent import RoamingAgent\nfrom agents.navigation.basic_agent import BasicAgent\n\n\n# ==============================================================================\n# -- Global functions ----------------------------------------------------------\n# ==============================================================================\n\n\ndef find_weather_presets():\n rgx = re.compile(\".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)\")\n name = lambda x: \" \".join(m.group(0) for m in rgx.finditer(x))\n presets = [x for x in dir(carla.WeatherParameters) if re.match(\"[A-Z].+\", x)]\n return [(getattr(carla.WeatherParameters, x), name(x)) for x in presets]\n\n\ndef get_actor_display_name(actor, truncate=250):\n name = \" \".join(actor.type_id.replace(\"_\", \".\").title().split(\".\")[1:])\n return (name[: truncate - 1] + u\"\\u2026\") if len(name) > truncate else name\n\n\n# ==============================================================================\n# -- World ---------------------------------------------------------------\n# ==============================================================================\n\n\nclass World(object):\n def __init__(self, carla_world, hud):\n self.world = carla_world\n self.map = self.world.get_map()\n self.hud = hud\n self.vehicle = None\n self.collision_sensor = None\n self.lane_invasion_sensor = None\n self.camera_manager = None\n self._weather_presets = find_weather_presets()\n self._weather_index = 0\n self.restart()\n self.world.on_tick(hud.on_world_tick)\n\n def restart(self):\n # Keep same camera config if the camera manager exists.\n cam_index = self.camera_manager._index if self.camera_manager is not None else 0\n cam_pos_index = (\n self.camera_manager._transform_index if self.camera_manager is not None else 0\n )\n\n blueprint = self.world.get_blueprint_library().find(\"vehicle.lincoln.mkz2017\")\n blueprint.set_attribute(\"role_name\", \"hero\")\n if blueprint.has_attribute(\"color\"):\n color = random.choice(blueprint.get_attribute(\"color\").recommended_values)\n blueprint.set_attribute(\"color\", color)\n\n # Spawn the vehicle.\n if self.vehicle is not None:\n spawn_point = self.vehicle.get_transform()\n spawn_point.location.z += 2.0\n spawn_point.rotation.roll = 0.0\n spawn_point.rotation.pitch = 0.0\n self.destroy()\n\n spawn_points = self.map.get_spawn_points()\n spawn_point = spawn_points[1]\n self.vehicle = self.world.spawn_actor(blueprint, spawn_point)\n\n while self.vehicle is None:\n spawn_points = self.map.get_spawn_points()\n spawn_point = spawn_points[1]\n self.vehicle = self.world.spawn_actor(blueprint, spawn_point)\n\n # Set up the sensors.\n self.collision_sensor = CollisionSensor(self.vehicle, self.hud)\n self.lane_invasion_sensor = LaneInvasionSensor(self.vehicle, self.hud)\n self.camera_manager = CameraManager(self.vehicle, self.hud)\n self.camera_manager._transform_index = cam_pos_index\n self.camera_manager.set_sensor(cam_index, notify=False)\n actor_type = get_actor_display_name(self.vehicle)\n self.hud.notification(actor_type)\n\n def next_weather(self, reverse=False):\n self._weather_index += -1 if reverse else 1\n self._weather_index %= len(self._weather_presets)\n preset = self._weather_presets[self._weather_index]\n self.hud.notification(\"Weather: %s\" % preset[1])\n self.vehicle.get_world().set_weather(preset[0])\n\n def tick(self, clock):\n self.hud.tick(self, clock)\n\n def render(self, display):\n self.camera_manager.render(display)\n self.hud.render(display)\n\n def destroy(self):\n actors = [\n self.camera_manager.sensor,\n self.collision_sensor.sensor,\n self.lane_invasion_sensor.sensor,\n self.vehicle,\n ]\n for actor in actors:\n if actor is not None:\n actor.destroy()\n\n\n# ==============================================================================\n# -- KeyboardControl -----------------------------------------------------------\n# ==============================================================================\n\n\nclass KeyboardControl(object):\n def __init__(self, world, start_in_autopilot):\n self._autopilot_enabled = start_in_autopilot\n self._control = carla.VehicleControl()\n self._steer_cache = 0.0\n world.vehicle.set_autopilot(self._autopilot_enabled)\n world.hud.notification(\"Press 'H' or '?' for help.\", seconds=4.0)\n\n def parse_events(self, world, clock):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return True\n elif event.type == pygame.KEYUP:\n if self._is_quit_shortcut(event.key):\n return True\n elif event.key == K_BACKSPACE:\n world.restart()\n elif event.key == K_F1:\n world.hud.toggle_info()\n elif event.key == K_h or (\n event.key == K_SLASH and pygame.key.get_mods() & KMOD_SHIFT\n ):\n world.hud.help.toggle()\n elif event.key == K_TAB:\n world.camera_manager.toggle_camera()\n elif event.key == K_c and pygame.key.get_mods() & KMOD_SHIFT:\n world.next_weather(reverse=True)\n elif event.key == K_c:\n world.next_weather()\n elif event.key == K_BACKQUOTE:\n world.camera_manager.next_sensor()\n elif event.key > K_0 and event.key <= K_9:\n world.camera_manager.set_sensor(event.key - 1 - K_0)\n elif event.key == K_r:\n world.camera_manager.toggle_recording()\n elif event.key == K_q:\n self._control.gear = 1 if self._control.reverse else -1\n elif event.key == K_m:\n self._control.manual_gear_shift = not self._control.manual_gear_shift\n self._control.gear = world.vehicle.get_control().gear\n world.hud.notification(\n \"%s Transmission\"\n % (\"Manual\" if self._control.manual_gear_shift else \"Automatic\")\n )\n elif self._control.manual_gear_shift and event.key == K_COMMA:\n self._control.gear = max(-1, self._control.gear - 1)\n elif self._control.manual_gear_shift and event.key == K_PERIOD:\n self._control.gear = self._control.gear + 1\n elif event.key == K_p:\n self._autopilot_enabled = not self._autopilot_enabled\n world.vehicle.set_autopilot(self._autopilot_enabled)\n world.hud.notification(\n \"Autopilot %s\" % (\"On\" if self._autopilot_enabled else \"Off\")\n )\n if not self._autopilot_enabled:\n self._parse_keys(pygame.key.get_pressed(), clock.get_time())\n self._control.reverse = self._control.gear < 0\n\n def _parse_keys(self, keys, milliseconds):\n self._control.throttle = 1.0 if keys[K_UP] or keys[K_w] else 0.0\n steer_increment = 5e-4 * milliseconds\n if keys[K_LEFT] or keys[K_a]:\n self._steer_cache -= steer_increment\n elif keys[K_RIGHT] or keys[K_d]:\n self._steer_cache += steer_increment\n else:\n self._steer_cache = 0.0\n self._steer_cache = min(0.7, max(-0.7, self._steer_cache))\n self._control.steer = round(self._steer_cache, 1)\n self._control.brake = 1.0 if keys[K_DOWN] or keys[K_s] else 0.0\n self._control.hand_brake = keys[K_SPACE]\n\n @staticmethod\n def _is_quit_shortcut(key):\n return (key == K_ESCAPE) or (key == K_q and pygame.key.get_mods() & KMOD_CTRL)\n\n\n# ==============================================================================\n# -- HUD -----------------------------------------------------------------\n# ==============================================================================\n\n\nclass HUD(object):\n def __init__(self, width, height):\n self.dim = (width, height)\n font = pygame.font.Font(pygame.font.get_default_font(), 20)\n fonts = [x for x in pygame.font.get_fonts() if \"mono\" in x]\n default_font = \"ubuntumono\"\n mono = default_font if default_font in fonts else fonts[0]\n mono = pygame.font.match_font(mono)\n self._font_mono = pygame.font.Font(mono, 14)\n self._notifications = FadingText(font, (width, 40), (0, height - 40))\n self.help = HelpText(pygame.font.Font(mono, 24), width, height)\n self.server_fps = 0\n self.frame_number = 0\n self.simulation_time = 0\n self._show_info = True\n self._info_text = []\n self._server_clock = pygame.time.Clock()\n\n def on_world_tick(self, timestamp):\n self._server_clock.tick()\n self.server_fps = self._server_clock.get_fps()\n self.frame_number = timestamp.frame_count\n self.simulation_time = timestamp.elapsed_seconds\n\n def tick(self, world, clock):\n if not self._show_info:\n return\n t = world.vehicle.get_transform()\n v = world.vehicle.get_velocity()\n c = world.vehicle.get_control()\n heading = \"N\" if abs(t.rotation.yaw) < 89.5 else \"\"\n heading += \"S\" if abs(t.rotation.yaw) > 90.5 else \"\"\n heading += \"E\" if 179.5 > t.rotation.yaw > 0.5 else \"\"\n heading += \"W\" if -0.5 > t.rotation.yaw > -179.5 else \"\"\n colhist = world.collision_sensor.get_collision_history()\n collision = [colhist[x + self.frame_number - 200] for x in range(0, 200)]\n max_col = max(1.0, max(collision))\n collision = [x / max_col for x in collision]\n vehicles = world.world.get_actors().filter(\"vehicle.*\")\n self._info_text = [\n \"Server: % 16d FPS\" % self.server_fps,\n \"\",\n \"Vehicle: % 20s\" % get_actor_display_name(world.vehicle, truncate=20),\n \"Map: % 20s\" % world.map.name,\n \"Simulation time: % 12s\" % datetime.timedelta(seconds=int(self.simulation_time)),\n \"\",\n \"Speed: % 15.0f km/h\" % (3.6 * math.sqrt(v.x ** 2 + v.y ** 2 + v.z ** 2)),\n u\"Heading:% 16.0f\\N{DEGREE SIGN} % 2s\" % (t.rotation.yaw, heading),\n \"Location:% 20s\" % (\"(% 5.1f, % 5.1f)\" % (t.location.x, t.location.y)),\n \"Height: % 18.0f m\" % t.location.z,\n \"\",\n (\"Throttle:\", c.throttle, 0.0, 1.0),\n (\"Steer:\", c.steer, -1.0, 1.0),\n (\"Brake:\", c.brake, 0.0, 1.0),\n (\"Reverse:\", c.reverse),\n (\"Hand brake:\", c.hand_brake),\n (\"Manual:\", c.manual_gear_shift),\n \"Gear: %s\" % {-1: \"R\", 0: \"N\"}.get(c.gear, c.gear),\n \"\",\n \"Collision:\",\n collision,\n \"\",\n \"Number of vehicles: % 8d\" % len(vehicles),\n ]\n if len(vehicles) > 1:\n self._info_text += [\"Nearby vehicles:\"]\n distance = lambda l: math.sqrt(\n (l.x - t.location.x) ** 2 + (l.y - t.location.y) ** 2 + (l.z - t.location.z) ** 2\n )\n vehicles = [\n (distance(x.get_location()), x) for x in vehicles if x.id != world.vehicle.id\n ]\n for d, vehicle in sorted(vehicles):\n if d > 200.0:\n break\n vehicle_type = get_actor_display_name(vehicle, truncate=22)\n self._info_text.append(\"% 4dm %s\" % (d, vehicle_type))\n self._notifications.tick(world, clock)\n\n def toggle_info(self):\n self._show_info = not self._show_info\n\n def notification(self, text, seconds=2.0):\n self._notifications.set_text(text, seconds=seconds)\n\n def error(self, text):\n self._notifications.set_text(\"Error: %s\" % text, (255, 0, 0))\n\n def render(self, display):\n if self._show_info:\n info_surface = pygame.Surface((220, self.dim[1]))\n info_surface.set_alpha(100)\n display.blit(info_surface, (0, 0))\n v_offset = 4\n bar_h_offset = 100\n bar_width = 106\n for item in self._info_text:\n if v_offset + 18 > self.dim[1]:\n break\n if isinstance(item, list):\n if len(item) > 1:\n points = [\n (x + 8, v_offset + 8 + (1.0 - y) * 30) for x, y in enumerate(item)\n ]\n pygame.draw.lines(display, (255, 136, 0), False, points, 2)\n item = None\n v_offset += 18\n elif isinstance(item, tuple):\n if isinstance(item[1], bool):\n rect = pygame.Rect((bar_h_offset, v_offset + 8), (6, 6))\n pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1)\n else:\n rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6))\n pygame.draw.rect(display, (255, 255, 255), rect_border, 1)\n f = (item[1] - item[2]) / (item[3] - item[2])\n if item[2] < 0.0:\n rect = pygame.Rect(\n (bar_h_offset + f * (bar_width - 6), v_offset + 8), (6, 6)\n )\n else:\n rect = pygame.Rect((bar_h_offset, v_offset + 8), (f * bar_width, 6))\n pygame.draw.rect(display, (255, 255, 255), rect)\n item = item[0]\n if item: # At this point has to be a str.\n surface = self._font_mono.render(item, True, (255, 255, 255))\n display.blit(surface, (8, v_offset))\n v_offset += 18\n self._notifications.render(display)\n self.help.render(display)\n\n\n# ==============================================================================\n# -- FadingText ----------------------------------------------------------------\n# ==============================================================================\n\n\nclass FadingText(object):\n def __init__(self, font, dim, pos):\n self.font = font\n self.dim = dim\n self.pos = pos\n self.seconds_left = 0\n self.surface = pygame.Surface(self.dim)\n\n def set_text(self, text, color=(255, 255, 255), seconds=2.0):\n text_texture = self.font.render(text, True, color)\n self.surface = pygame.Surface(self.dim)\n self.seconds_left = seconds\n self.surface.fill((0, 0, 0, 0))\n self.surface.blit(text_texture, (10, 11))\n\n def tick(self, _, clock):\n delta_seconds = 1e-3 * clock.get_time()\n self.seconds_left = max(0.0, self.seconds_left - delta_seconds)\n self.surface.set_alpha(500.0 * self.seconds_left)\n\n def render(self, display):\n display.blit(self.surface, self.pos)\n\n\n# ==============================================================================\n# -- HelpText ------------------------------------------------------------------\n# ==============================================================================\n\n\nclass HelpText(object):\n def __init__(self, font, width, height):\n lines = __doc__.split(\"\\n\")\n self.font = font\n self.dim = (680, len(lines) * 22 + 12)\n self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1])\n self.seconds_left = 0\n self.surface = pygame.Surface(self.dim)\n self.surface.fill((0, 0, 0, 0))\n for n, line in enumerate(lines):\n text_texture = self.font.render(line, True, (255, 255, 255))\n self.surface.blit(text_texture, (22, n * 22))\n self._render = False\n self.surface.set_alpha(220)\n\n def toggle(self):\n self._render = not self._render\n\n def render(self, display):\n if self._render:\n display.blit(self.surface, self.pos)\n\n\n# ==============================================================================\n# -- CollisionSensor -----------------------------------------------------------\n# ==============================================================================\n\n\nclass CollisionSensor(object):\n def __init__(self, parent_actor, hud):\n self.sensor = None\n self._history = []\n self._parent = parent_actor\n self._hud = hud\n world = self._parent.get_world()\n bp = world.get_blueprint_library().find(\"sensor.other.collision\")\n self.sensor = world.spawn_actor(bp, carla.Transform(), attach_to=self._parent)\n # We need to pass the lambda a weak reference to self to avoid circular\n # reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda event: CollisionSensor._on_collision(weak_self, event))\n\n def get_collision_history(self):\n history = collections.defaultdict(int)\n for frame, intensity in self._history:\n history[frame] += intensity\n return history\n\n @staticmethod\n def _on_collision(weak_self, event):\n self = weak_self()\n if not self:\n return\n actor_type = get_actor_display_name(event.other_actor)\n self._hud.notification(\"Collision with %r, id = %d\" % (actor_type, event.other_actor.id))\n impulse = event.normal_impulse\n intensity = math.sqrt(impulse.x ** 2 + impulse.y ** 2 + impulse.z ** 2)\n self._history.append((event.frame_number, intensity))\n if len(self._history) > 4000:\n self._history.pop(0)\n\n\n# ==============================================================================\n# -- LaneInvasionSensor --------------------------------------------------------\n# ==============================================================================\n\n\nclass LaneInvasionSensor(object):\n def __init__(self, parent_actor, hud):\n self.sensor = None\n self._parent = parent_actor\n self._hud = hud\n world = self._parent.get_world()\n bp = world.get_blueprint_library().find(\"sensor.other.lane_detector\")\n self.sensor = world.spawn_actor(bp, carla.Transform(), attach_to=self._parent)\n # We need to pass the lambda a weak reference to self to avoid circular\n # reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda event: LaneInvasionSensor._on_invasion(weak_self, event))\n\n @staticmethod\n def _on_invasion(weak_self, event):\n self = weak_self()\n if not self:\n return\n text = [\"%r\" % str(x).split()[-1] for x in set(event.crossed_lane_markings)]\n self._hud.notification(\"Crossed line %s\" % \" and \".join(text))\n\n\n# ==============================================================================\n# -- CameraManager -------------------------------------------------------------\n# ==============================================================================\n\n\nclass CameraManager(object):\n def __init__(self, parent_actor, hud):\n self.sensor = None\n self._surface = None\n self._parent = parent_actor\n self._hud = hud\n self._recording = False\n self._camera_transforms = [\n carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15)),\n carla.Transform(carla.Location(x=1.6, z=1.7)),\n ]\n self._transform_index = 1\n self._sensors = [\n [\"sensor.camera.rgb\", cc.Raw, \"Camera RGB\"],\n [\"sensor.camera.depth\", cc.Raw, \"Camera Depth (Raw)\"],\n [\"sensor.camera.depth\", cc.Depth, \"Camera Depth (Gray Scale)\"],\n [\"sensor.camera.depth\", cc.LogarithmicDepth, \"Camera Depth (Logarithmic Gray Scale)\"],\n [\"sensor.camera.semantic_segmentation\", cc.Raw, \"Camera Semantic Segmentation (Raw)\"],\n [\n \"sensor.camera.semantic_segmentation\",\n cc.CityScapesPalette,\n \"Camera Semantic Segmentation (CityScapes Palette)\",\n ],\n [\"sensor.lidar.ray_cast\", None, \"Lidar (Ray-Cast)\"],\n ]\n world = self._parent.get_world()\n bp_library = world.get_blueprint_library()\n for item in self._sensors:\n bp = bp_library.find(item[0])\n if item[0].startswith(\"sensor.camera\"):\n bp.set_attribute(\"image_size_x\", str(hud.dim[0]))\n bp.set_attribute(\"image_size_y\", str(hud.dim[1]))\n item.append(bp)\n self._index = None\n\n def toggle_camera(self):\n self._transform_index = (self._transform_index + 1) % len(self._camera_transforms)\n self.sensor.set_transform(self._camera_transforms[self._transform_index])\n\n def set_sensor(self, index, notify=True):\n index = index % len(self._sensors)\n needs_respawn = (\n True\n if self._index is None\n else self._sensors[index][0] != self._sensors[self._index][0]\n )\n if needs_respawn:\n if self.sensor is not None:\n self.sensor.destroy()\n self._surface = None\n self.sensor = self._parent.get_world().spawn_actor(\n self._sensors[index][-1],\n self._camera_transforms[self._transform_index],\n attach_to=self._parent,\n )\n # We need to pass the lambda a weak reference to self to avoid\n # circular reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda image: CameraManager._parse_image(weak_self, image))\n if notify:\n self._hud.notification(self._sensors[index][2])\n self._index = index\n\n def next_sensor(self):\n self.set_sensor(self._index + 1)\n\n def toggle_recording(self):\n self._recording = not self._recording\n self._hud.notification(\"Recording %s\" % (\"On\" if self._recording else \"Off\"))\n\n def render(self, display):\n if self._surface is not None:\n display.blit(self._surface, (0, 0))\n\n @staticmethod\n def _parse_image(weak_self, image):\n self = weak_self()\n if not self:\n return\n if self._sensors[self._index][0].startswith(\"sensor.lidar\"):\n points = np.frombuffer(image.raw_data, dtype=np.dtype(\"f4\"))\n points = np.reshape(points, (int(points.shape[0] / 3), 3))\n lidar_data = np.array(points[:, :2])\n lidar_data *= min(self._hud.dim) / 100.0\n lidar_data += (0.5 * self._hud.dim[0], 0.5 * self._hud.dim[1])\n lidar_data = np.fabs(lidar_data)\n lidar_data = lidar_data.astype(np.int32)\n lidar_data = np.reshape(lidar_data, (-1, 2))\n lidar_img_size = (self._hud.dim[0], self._hud.dim[1], 3)\n lidar_img = np.zeros(lidar_img_size)\n lidar_img[tuple(lidar_data.T)] = (255, 255, 255)\n self._surface = pygame.surfarray.make_surface(lidar_img)\n else:\n image.convert(self._sensors[self._index][1])\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n self._surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))\n if self._recording:\n image.save_to_disk(\"_out/%08d\" % image.frame_number)\n\n\n# ==============================================================================\n# -- game_loop() ---------------------------------------------------------\n# ==============================================================================\n\n\ndef game_loop(args):\n pygame.init()\n pygame.font.init()\n world = None\n\n try:\n client = carla.Client(args.host, args.port)\n client.set_timeout(4.0)\n\n display = pygame.display.set_mode(\n (args.width, args.height), pygame.HWSURFACE | pygame.DOUBLEBUF\n )\n\n hud = HUD(args.width, args.height)\n world = World(client.get_world(), hud)\n controller = KeyboardControl(world, False)\n\n if args.agent == \"Roaming\":\n agent = RoamingAgent(world.vehicle)\n else:\n agent = BasicAgent(world.vehicle)\n spawn_point = world.map.get_spawn_points()[0]\n agent.set_destination(\n (spawn_point.location.x, spawn_point.location.y, spawn_point.location.z)\n )\n\n clock = pygame.time.Clock()\n while True:\n if controller.parse_events(world, clock):\n return\n\n # as soon as the server is ready continue!\n if not world.world.wait_for_tick(10.0):\n continue\n\n world.tick(clock)\n world.render(display)\n pygame.display.flip()\n control = agent.run_step()\n world.vehicle.apply_control(control)\n\n finally:\n if world is not None:\n world.destroy()\n\n pygame.quit()\n\n\n# ==============================================================================\n# -- main() --------------------------------------------------------------\n# ==============================================================================\n\n\ndef main():\n argparser = argparse.ArgumentParser(description=\"CARLA Manual Control Client\")\n argparser.add_argument(\n \"-v\", \"--verbose\", action=\"store_true\", dest=\"debug\", help=\"print debug information\"\n )\n argparser.add_argument(\n \"--host\",\n metavar=\"H\",\n default=\"127.0.0.1\",\n help=\"IP of the host server (default: 127.0.0.1)\",\n )\n argparser.add_argument(\n \"-p\",\n \"--port\",\n metavar=\"P\",\n default=2000,\n type=int,\n help=\"TCP port to listen to (default: 2000)\",\n )\n argparser.add_argument(\n \"--res\",\n metavar=\"WIDTHxHEIGHT\",\n default=\"1280x720\",\n help=\"window resolution (default: 1280x720)\",\n )\n\n argparser.add_argument(\n \"-a\",\n \"--agent\",\n type=str,\n choices=[\"Roaming\", \"Basic\"],\n help=\"select which agent to run\",\n default=\"Basic\",\n )\n args = argparser.parse_args()\n\n args.width, args.height = [int(x) for x in args.res.split(\"x\")]\n\n log_level = logging.DEBUG if args.debug else logging.INFO\n logging.basicConfig(format=\"%(levelname)s: %(message)s\", level=log_level)\n\n logging.info(\"listening to server %s:%s\", args.host, args.port)\n\n print(__doc__)\n\n try:\n\n game_loop(args)\n\n except KeyboardInterrupt:\n print(\"\\nCancelled by user. Bye!\")\n except Exception as error:\n logging.exception(error)\n\n\nif __name__ == \"__main__\":\n\n main()\n"
] | [
[
"numpy.reshape",
"numpy.dtype",
"numpy.array",
"numpy.zeros",
"numpy.fabs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SsnL/BigGAN-PyTorch | [
"a11c8621a7bc733475afdd0c5e1e86db0b923381"
] | [
"BigGANdeep.py"
] | [
"import numpy as np\nimport math\nimport functools\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.nn import Parameter as P\n\nimport layers\nfrom sync_batchnorm import SynchronizedBatchNorm2d as SyncBatchNorm2d\n\n# BigGAN-deep: uses a different resblock and pattern\n\n\n# Architectures for G\n# Attention is passed in in the format '32_64' to mean applying an attention\n# block at both resolution 32x32 and 64x64. Just '64' will apply at 64x64.\n\n# Channel ratio is the ratio of\nclass GBlock(nn.Module):\n def __init__(self, in_channels, out_channels,\n which_conv=nn.Conv2d, which_bn=layers.bn, activation=None,\n upsample=None, channel_ratio=4):\n super(GBlock, self).__init__()\n\n self.in_channels, self.out_channels = in_channels, out_channels\n self.hidden_channels = self.in_channels // channel_ratio\n self.which_conv, self.which_bn = which_conv, which_bn\n self.activation = activation\n self.upsample = upsample\n # Conv layers\n self.conv1 = self.which_conv(self.in_channels, self.hidden_channels,\n kernel_size=1, padding=0)\n self.conv2 = self.which_conv(self.hidden_channels, self.hidden_channels)\n self.conv3 = self.which_conv(self.hidden_channels, self.hidden_channels)\n self.conv4 = self.which_conv(self.hidden_channels, self.out_channels,\n kernel_size=1, padding=0)\n # Batchnorm layers\n self.bn1 = self.which_bn(self.in_channels)\n self.bn2 = self.which_bn(self.hidden_channels)\n self.bn3 = self.which_bn(self.hidden_channels)\n self.bn4 = self.which_bn(self.hidden_channels)\n # upsample layers\n self.upsample = upsample\n\n def forward(self, x, y):\n # Project down to channel ratio\n h = self.conv1(self.activation(self.bn1(x, y)))\n # Apply next BN-ReLU\n h = self.activation(self.bn2(h, y))\n # Drop channels in x if necessary\n if self.in_channels != self.out_channels:\n x = x[:, :self.out_channels]\n # Upsample both h and x at this point\n if self.upsample:\n h = self.upsample(h)\n x = self.upsample(x)\n # 3x3 convs\n h = self.conv2(h)\n h = self.conv3(self.activation(self.bn3(h, y)))\n # Final 1x1 conv\n h = self.conv4(self.activation(self.bn4(h, y)))\n return h + x\n\ndef G_arch(ch=64, attention='64', ksize='333333', dilation='111111'):\n arch = {}\n arch[256] = {'in_channels' : [ch * item for item in [16, 16, 8, 8, 4, 2]],\n 'out_channels' : [ch * item for item in [16, 8, 8, 4, 2, 1]],\n 'upsample' : [True] * 6,\n 'resolution' : [8, 16, 32, 64, 128, 256],\n 'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])\n for i in range(3,9)}}\n arch[128] = {'in_channels' : [ch * item for item in [16, 16, 8, 4, 2]],\n 'out_channels' : [ch * item for item in [16, 8, 4, 2, 1]],\n 'upsample' : [True] * 5,\n 'resolution' : [8, 16, 32, 64, 128],\n 'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])\n for i in range(3,8)}}\n arch[64] = {'in_channels' : [ch * item for item in [16, 16, 8, 4]],\n 'out_channels' : [ch * item for item in [16, 8, 4, 2]],\n 'upsample' : [True] * 4,\n 'resolution' : [8, 16, 32, 64],\n 'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])\n for i in range(3,7)}}\n arch[32] = {'in_channels' : [ch * item for item in [4, 4, 4]],\n 'out_channels' : [ch * item for item in [4, 4, 4]],\n 'upsample' : [True] * 3,\n 'resolution' : [8, 16, 32],\n 'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])\n for i in range(3,6)}}\n\n return arch\n\nclass Generator(nn.Module):\n def __init__(self, G_ch=64, G_depth=2, dim_z=128, bottom_width=4, resolution=128,\n G_kernel_size=3, G_attn='64', n_classes=1000,\n num_G_SVs=1, num_G_SV_itrs=1,\n G_shared=True, shared_dim=0, hier=False,\n cross_replica=False, mybn=False,\n G_activation=nn.ReLU(inplace=False),\n G_lr=5e-5, G_B1=0.0, G_B2=0.999, adam_eps=1e-8,\n BN_eps=1e-5, SN_eps=1e-12, G_mixed_precision=False, G_fp16=False,\n G_init='ortho', skip_init=False, no_optim=False,\n G_param='SN', norm_style='bn', quiet=False,\n **kwargs):\n super(Generator, self).__init__()\n # Channel width mulitplier\n self.ch = G_ch\n # Number of resblocks per stage\n self.G_depth = G_depth\n # Dimensionality of the latent space\n self.dim_z = dim_z\n # The initial spatial dimensions\n self.bottom_width = bottom_width\n # Resolution of the output\n self.resolution = resolution\n # Kernel size?\n self.kernel_size = G_kernel_size\n # Attention?\n self.attention = G_attn\n # number of classes, for use in categorical conditional generation\n self.n_classes = n_classes\n # Use shared embeddings?\n self.G_shared = G_shared\n # Dimensionality of the shared embedding? Unused if not using G_shared\n self.shared_dim = shared_dim if shared_dim > 0 else dim_z\n # Hierarchical latent space?\n self.hier = hier\n # Cross replica batchnorm?\n self.cross_replica = cross_replica\n # Use my batchnorm?\n self.mybn = mybn\n # nonlinearity for residual blocks\n self.activation = G_activation\n # Initialization style\n self.init = G_init\n # Parameterization style\n self.G_param = G_param\n # Normalization style\n self.norm_style = norm_style\n # Epsilon for BatchNorm?\n self.BN_eps = BN_eps\n # Epsilon for Spectral Norm?\n self.SN_eps = SN_eps\n # fp16?\n self.fp16 = G_fp16\n # Architecture dict\n self.arch = G_arch(self.ch, self.attention)[resolution]\n\n\n # Which convs, batchnorms, and linear layers to use\n if self.G_param == 'SN':\n self.which_conv = functools.partial(layers.SNConv2d,\n kernel_size=3, padding=1,\n num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,\n eps=self.SN_eps)\n self.which_linear = functools.partial(layers.SNLinear,\n num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,\n eps=self.SN_eps)\n else:\n self.which_conv = functools.partial(nn.Conv2d, kernel_size=3, padding=1)\n self.which_linear = nn.Linear\n\n # We use a non-spectral-normed embedding here regardless;\n # For some reason applying SN to G's embedding seems to randomly cripple G\n self.which_embedding = nn.Embedding\n bn_linear = (functools.partial(self.which_linear, bias=False) if self.G_shared\n else self.which_embedding)\n self.which_bn = functools.partial(layers.ccbn,\n which_linear=bn_linear,\n cross_replica=self.cross_replica,\n mybn=self.mybn,\n input_size=(self.shared_dim + self.dim_z if self.G_shared\n else self.n_classes),\n norm_style=self.norm_style,\n eps=self.BN_eps)\n\n\n # Prepare model\n # If not using shared embeddings, self.shared is just a passthrough\n self.shared = (self.which_embedding(n_classes, self.shared_dim) if G_shared\n else layers.identity())\n # First linear layer\n self.linear = self.which_linear(self.dim_z + self.shared_dim, self.arch['in_channels'][0] * (self.bottom_width **2))\n\n # self.blocks is a doubly-nested list of modules, the outer loop intended\n # to be over blocks at a given resolution (resblocks and/or self-attention)\n # while the inner loop is over a given block\n self.blocks = []\n for index in range(len(self.arch['out_channels'])):\n self.blocks += [[GBlock(in_channels=self.arch['in_channels'][index],\n out_channels=self.arch['in_channels'][index] if g_index==0 else self.arch['out_channels'][index],\n which_conv=self.which_conv,\n which_bn=self.which_bn,\n activation=self.activation,\n upsample=(functools.partial(F.interpolate, scale_factor=2)\n if self.arch['upsample'][index] and g_index == (self.G_depth-1) else None))]\n for g_index in range(self.G_depth)]\n\n # If attention on this block, attach it to the end\n if self.arch['attention'][self.arch['resolution'][index]]:\n if not quiet:\n print('Adding attention layer in G at resolution %d' % self.arch['resolution'][index])\n self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index], self.which_conv)]\n\n # Turn self.blocks into a ModuleList so that it's all properly registered.\n self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])\n\n # output layer: batchnorm-relu-conv.\n # Consider using a non-spectral conv here\n self.output_layer = nn.Sequential(layers.bn(self.arch['out_channels'][-1],\n cross_replica=self.cross_replica,\n mybn=self.mybn),\n self.activation,\n self.which_conv(self.arch['out_channels'][-1], 3))\n\n # Initialize weights. Optionally skip init for testing.\n if not skip_init:\n self.init_weights(quiet)\n\n # Set up optimizer\n # If this is an EMA copy, no need for an optim, so just return now\n if no_optim:\n return\n self.lr, self.B1, self.B2, self.adam_eps = G_lr, G_B1, G_B2, adam_eps\n if G_mixed_precision:\n if not quiet:\n print('Using fp16 adam in G...')\n import utils\n self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,\n betas=(self.B1, self.B2), weight_decay=0,\n eps=self.adam_eps)\n else:\n self.optim = optim.Adam(params=self.parameters(), lr=self.lr,\n betas=(self.B1, self.B2), weight_decay=0,\n eps=self.adam_eps)\n\n # LR scheduling, left here for forward compatibility\n # self.lr_sched = {'itr' : 0}# if self.progressive else {}\n # self.j = 0\n\n # Initialize\n def init_weights(self, quiet=False):\n self.param_count = 0\n for module in self.modules():\n if (isinstance(module, nn.Conv2d)\n or isinstance(module, nn.Linear)\n or isinstance(module, nn.Embedding)):\n if self.init == 'ortho':\n init.orthogonal_(module.weight)\n elif self.init == 'N02':\n init.normal_(module.weight, 0, 0.02)\n elif self.init in ['glorot', 'xavier']:\n init.xavier_uniform_(module.weight)\n else:\n raise RuntimeError('Init style not recognized...')\n self.param_count += sum([p.data.nelement() for p in module.parameters()])\n if not quiet:\n print('Param count for G''s initialized parameters: %d' % self.param_count)\n\n # Note on this forward function: we pass in a y vector which has\n # already been passed through G.shared to enable easy class-wise\n # interpolation later. If we passed in the one-hot and then ran it through\n # G.shared in this forward function, it would be harder to handle.\n # NOTE: The z vs y dichotomy here is for compatibility with not-y\n def forward(self, z, y):\n # If hierarchical, concatenate zs and ys\n if self.hier:\n z = torch.cat([y, z], 1)\n y = z\n # First linear layer\n h = self.linear(z)\n # Reshape\n h = h.view(h.size(0), -1, self.bottom_width, self.bottom_width)\n # Loop over blocks\n for index, blocklist in enumerate(self.blocks):\n # Second inner loop in case block has multiple layers\n for block in blocklist:\n h = block(h, y)\n\n # Apply batchnorm-relu-conv-tanh at output\n return torch.tanh(self.output_layer(h))\n\nclass DBlock(nn.Module):\n def __init__(self, in_channels, out_channels, which_conv=layers.SNConv2d, wide=True,\n preactivation=True, activation=None, downsample=None,\n channel_ratio=4):\n super(DBlock, self).__init__()\n self.in_channels, self.out_channels = in_channels, out_channels\n # If using wide D (as in SA-GAN and BigGAN), change the channel pattern\n self.hidden_channels = self.out_channels // channel_ratio\n self.which_conv = which_conv\n self.preactivation = preactivation\n self.activation = activation\n self.downsample = downsample\n\n # Conv layers\n self.conv1 = self.which_conv(self.in_channels, self.hidden_channels,\n kernel_size=1, padding=0)\n self.conv2 = self.which_conv(self.hidden_channels, self.hidden_channels)\n self.conv3 = self.which_conv(self.hidden_channels, self.hidden_channels)\n self.conv4 = self.which_conv(self.hidden_channels, self.out_channels,\n kernel_size=1, padding=0)\n\n self.learnable_sc = True if (in_channels != out_channels) else False\n if self.learnable_sc:\n self.conv_sc = self.which_conv(in_channels, out_channels - in_channels,\n kernel_size=1, padding=0)\n def shortcut(self, x):\n if self.downsample:\n x = self.downsample(x)\n if self.learnable_sc:\n x = torch.cat([x, self.conv_sc(x)], 1)\n return x\n\n def forward(self, x):\n # 1x1 bottleneck conv\n h = self.conv1(F.relu(x))\n # 3x3 convs\n h = self.conv2(self.activation(h))\n h = self.conv3(self.activation(h))\n # relu before downsample\n h = self.activation(h)\n # downsample\n if self.downsample:\n h = self.downsample(h)\n # final 1x1 conv\n h = self.conv4(h)\n return h + self.shortcut(x)\n\n# Discriminator architecture, same paradigm as G's above\ndef D_arch(ch=64, attention='64',ksize='333333', dilation='111111'):\n arch = {}\n arch[256] = {'in_channels' : [item * ch for item in [1, 2, 4, 8, 8, 16]],\n 'out_channels' : [item * ch for item in [2, 4, 8, 8, 16, 16]],\n 'downsample' : [True] * 6 + [False],\n 'resolution' : [128, 64, 32, 16, 8, 4, 4 ],\n 'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]\n for i in range(2,8)}}\n arch[128] = {'in_channels' : [item * ch for item in [1, 2, 4, 8, 16]],\n 'out_channels' : [item * ch for item in [2, 4, 8, 16, 16]],\n 'downsample' : [True] * 5 + [False],\n 'resolution' : [64, 32, 16, 8, 4, 4],\n 'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]\n for i in range(2,8)}}\n arch[64] = {'in_channels' : [item * ch for item in [1, 2, 4, 8]],\n 'out_channels' : [item * ch for item in [2, 4, 8, 16]],\n 'downsample' : [True] * 4 + [False],\n 'resolution' : [32, 16, 8, 4, 4],\n 'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]\n for i in range(2,7)}}\n arch[32] = {'in_channels' : [item * ch for item in [4, 4, 4]],\n 'out_channels' : [item * ch for item in [4, 4, 4]],\n 'downsample' : [True, True, False, False],\n 'resolution' : [16, 16, 16, 16],\n 'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]\n for i in range(2,6)}}\n return arch\n\nclass Discriminator(nn.Module):\n\n def __init__(self, D_ch=64, D_wide=True, D_depth=2, resolution=128,\n D_kernel_size=3, D_attn='64', n_classes=1000,\n num_D_SVs=1, num_D_SV_itrs=1, D_activation=nn.ReLU(inplace=False),\n D_lr=2e-4, D_B1=0.0, D_B2=0.999, adam_eps=1e-8,\n SN_eps=1e-12, output_dim=1, D_mixed_precision=False, D_fp16=False,\n D_init='ortho', skip_init=False, D_param='SN', **kwargs):\n super(Discriminator, self).__init__()\n # Width multiplier\n self.ch = D_ch\n # Use Wide D as in BigGAN and SA-GAN or skinny D as in SN-GAN?\n self.D_wide = D_wide\n # How many resblocks per stage?\n self.D_depth = D_depth\n # Resolution\n self.resolution = resolution\n # Kernel size\n self.kernel_size = D_kernel_size\n # Attention?\n self.attention = D_attn\n # Number of classes\n self.n_classes = n_classes\n # Activation\n self.activation = D_activation\n # Initialization style\n self.init = D_init\n # Parameterization style\n self.D_param = D_param\n # Epsilon for Spectral Norm?\n self.SN_eps = SN_eps\n # Fp16?\n self.fp16 = D_fp16\n # Architecture\n self.arch = D_arch(self.ch, self.attention)[resolution]\n\n\n # Which convs, batchnorms, and linear layers to use\n # No option to turn off SN in D right now\n if self.D_param == 'SN':\n self.which_conv = functools.partial(layers.SNConv2d,\n kernel_size=3, padding=1,\n num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,\n eps=self.SN_eps)\n self.which_linear = functools.partial(layers.SNLinear,\n num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,\n eps=self.SN_eps)\n self.which_embedding = functools.partial(layers.SNEmbedding,\n num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,\n eps=self.SN_eps)\n\n\n # Prepare model\n # Stem convolution\n self.input_conv = self.which_conv(3, self.arch['in_channels'][0])\n # self.blocks is a doubly-nested list of modules, the outer loop intended\n # to be over blocks at a given resolution (resblocks and/or self-attention)\n self.blocks = []\n for index in range(len(self.arch['out_channels'])):\n self.blocks += [[DBlock(in_channels=self.arch['in_channels'][index] if d_index==0 else self.arch['out_channels'][index],\n out_channels=self.arch['out_channels'][index],\n which_conv=self.which_conv,\n wide=self.D_wide,\n activation=self.activation,\n preactivation=True,\n downsample=(nn.AvgPool2d(2) if self.arch['downsample'][index] and d_index==0 else None))\n for d_index in range(self.D_depth)]]\n # If attention on this block, attach it to the end\n if self.arch['attention'][self.arch['resolution'][index]]:\n print('Adding attention layer in D at resolution %d' % self.arch['resolution'][index])\n self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index],\n self.which_conv)]\n # Turn self.blocks into a ModuleList so that it's all properly registered.\n self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])\n # Linear output layer. The output dimension is typically 1, but may be\n # larger if we're e.g. turning this into a VAE with an inference output\n self.linear = self.which_linear(self.arch['out_channels'][-1], output_dim)\n # Embedding for projection discrimination\n self.embed = self.which_embedding(self.n_classes, self.arch['out_channels'][-1])\n\n # Initialize weights\n if not skip_init:\n self.init_weights()\n\n # Set up optimizer\n self.lr, self.B1, self.B2, self.adam_eps = D_lr, D_B1, D_B2, adam_eps\n if D_mixed_precision:\n print('Using fp16 adam in D...')\n import utils\n self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,\n betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)\n else:\n self.optim = optim.Adam(params=self.parameters(), lr=self.lr,\n betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)\n # LR scheduling, left here for forward compatibility\n # self.lr_sched = {'itr' : 0}# if self.progressive else {}\n # self.j = 0\n\n # Initialize\n def init_weights(self):\n self.param_count = 0\n for module in self.modules():\n if (isinstance(module, nn.Conv2d)\n or isinstance(module, nn.Linear)\n or isinstance(module, nn.Embedding)):\n if self.init == 'ortho':\n init.orthogonal_(module.weight)\n elif self.init == 'N02':\n init.normal_(module.weight, 0, 0.02)\n elif self.init in ['glorot', 'xavier']:\n init.xavier_uniform_(module.weight)\n else:\n print('Init style not recognized...')\n self.param_count += sum([p.data.nelement() for p in module.parameters()])\n print('Param count for D''s initialized parameters: %d' % self.param_count)\n\n def forward(self, x, y=None):\n # Run input conv\n h = self.input_conv(x)\n # Loop over blocks\n for index, blocklist in enumerate(self.blocks):\n for block in blocklist:\n h = block(h)\n # Apply global sum pooling as in SN-GAN\n h = torch.sum(self.activation(h), [2, 3])\n # Get initial class-unconditional output\n out = self.linear(h)\n # Get projection of final featureset onto class vectors and add to evidence\n out = out + torch.sum(self.embed(y) * h, 1, keepdim=True)\n return out\n\n# Parallelized G_D to minimize cross-gpu communication\n# Without this, Generator outputs would get all-gathered and then rebroadcast.\nclass G_D(nn.Module):\n def __init__(self, G, D):\n super(G_D, self).__init__()\n self.G = G\n self.D = D\n\n def forward(self, z, gy, x=None, dy=None, train_G=False, return_G_z=False,\n split_D=False):\n # If training G, enable grad tape\n with torch.set_grad_enabled(train_G):\n # Get Generator output given noise\n G_z = self.G(z, self.G.shared(gy))\n # Cast as necessary\n if self.G.fp16 and not self.D.fp16:\n G_z = G_z.float()\n if self.D.fp16 and not self.G.fp16:\n G_z = G_z.half()\n # Split_D means to run D once with real data and once with fake,\n # rather than concatenating along the batch dimension.\n if split_D:\n D_fake = self.D(G_z, gy)\n if x is not None:\n D_real = self.D(x, dy)\n return D_fake, D_real\n else:\n if return_G_z:\n return D_fake, G_z\n else:\n return D_fake\n # If real data is provided, concatenate it with the Generator's output\n # along the batch dimension for improved efficiency.\n else:\n D_input = torch.cat([G_z, x], 0) if x is not None else G_z\n D_class = torch.cat([gy, dy], 0) if dy is not None else gy\n # Get Discriminator output\n D_out = self.D(D_input, D_class)\n if x is not None:\n return torch.split(D_out, [G_z.shape[0], x.shape[0]]) # D_fake, D_real\n else:\n if return_G_z:\n return D_out, G_z\n else:\n return D_out\n"
] | [
[
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.AvgPool2d",
"torch.nn.functional.relu",
"torch.set_grad_enabled",
"torch.nn.init.orthogonal_",
"torch.nn.init.normal_",
"torch.split",
"torch.nn.init.xavier_uniform_",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alexandrebatista/mips-spark-optimization | [
"8d049920d602e42b988f1eaa0aca61df1a39ac4e"
] | [
"src/mips.py"
] | [
"from pyspark.ml.clustering import KMeans\nfrom pyspark.ml.evaluation import ClusteringEvaluator\nimport pyspark.sql.functions as F\nimport numpy as np\nfrom heapq import heapify, heappush, heappop, nlargest\nimport threading\nimport logging\nimport time\n\ndef unit_vector(vector):\n return vector / np.linalg.norm(vector)\n\ndef angle_between(v1, v2):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))\n\ndef CBound(vec_c, vec_i, ang_tic, ang_tb):\n\tnorma_i = np.linalg.norm(vec_i)\n\ttest = norma_i * np.cos(ang_tic - ang_tb)\n\treturn test if ang_tb < ang_tic else norma_i\n\ndef queryIndex():\n list_li = list(li)\n for i in L[K:]:\n if CBound(vec_c, vec_i, ang_tic, ang_tb) < min_heap:\n break\n elif (vec_u.T * vec_i) > min_heap:\n list_li.append(vec_u.T * i) # add i to H with weight u.T*i\n print(list_li)\n\ndef ParalelMaximus(iterator, centers, itensDataframe):\n theta = -1000\n L = []\n K = 10\n users = []\n\n # Construct Index\n for user in iterator:\n id = user.__getitem__(\"id\")\n userLatentFactors = user.__getitem__(\"features\")\n partition = user.__getitem__(\"prediction\")\n centroid = centers[partition]\n\n users.append((id, userLatentFactors))\n\n angle = angle_between(userLatentFactors, centroid)\n if angle > theta:\n theta = angle\n\n if theta != -1000:\n for index, row in itensDataframe.iterrows():\n itemLatentFactors = row[1]\n angle = angle_between(itemLatentFactors, centroid)\n CB = CBound(centroid, itemLatentFactors, angle, theta)\n LItem = (row[0], CB, itemLatentFactors)\n L.append(LItem)\n\n L = sorted(L, reverse = True, key = lambda x: x[1])\n \n # Query Index\n for user in users:\n id = user[0]\n userLatentFactors = user[1]\n heap = []\n\n for itens in L[:K]:\n itemLatentFactors = itens[2]\n weight = np.dot(userLatentFactors, itemLatentFactors)\n heappush(heap, (weight, itens[0]))\n\n for itens in L[K:]:\n if itens[1] < min(heap):\n break\n else:\n itemLatentFactors = itens[2]\n weight = np.dot(userLatentFactors, itemLatentFactors)\n if weight > min(heap):\n heappush(heap, (weight, itens[0]))\n #print(nlargest(K, heap))\n\ndef SequencialMaximus(threadId, usersDataframe, itensDataframe, centers):\n threadCluster = usersDataframe[usersDataframe.prediction == threadId]\n\n theta = -1000\n L = []\n K = 10\n users = []\n\n # Construct Index\n for index, row in threadCluster.iterrows():\n id = row[\"id\"]\n userLatentFactors = row[\"features\"]\n partition = row[\"prediction\"]\n centroid = centers[partition]\n\n users.append((id, userLatentFactors))\n\n angle = angle_between(userLatentFactors, centroid)\n if angle > theta:\n theta = angle\n\n if theta != -1000:\n for index, row in itensDataframe.iterrows():\n itemLatentFactors = row[1]\n angle = angle_between(itemLatentFactors, centroid)\n CB = CBound(centroid, itemLatentFactors, angle, theta)\n LItem = (row[0], CB, itemLatentFactors)\n L.append(LItem)\n\n L = sorted(L, reverse = True, key = lambda x: x[1])\n \n # Query Index\n for user in users:\n id = user[0]\n userLatentFactors = user[1]\n heap = []\n\n for itens in L[:K]:\n itemLatentFactors = itens[2]\n weight = np.dot(userLatentFactors, itemLatentFactors)\n heappush(heap, (weight, itens[0]))\n\n for itens in L[K:]:\n if itens[1] < min(heap):\n break\n else:\n itemLatentFactors = itens[2]\n weight = np.dot(userLatentFactors, itemLatentFactors)\n if weight > min(heap):\n heappush(heap, (weight, itens[0]))\n #print(nlargest(K, heap))\n\ndef process(usersfactors, itensfactors):\n format = \"%(asctime)s: %(message)s\"\n logging.basicConfig(format=format, level=logging.INFO,\n datefmt=\"%H:%M:%S\")\n numberOfClusters = 4\n kmeans = KMeans(k = numberOfClusters, seed=1) # 4 clusters here\n model = kmeans.fit(usersfactors.select('features'))\n\n transformed = model.transform(usersfactors)\n\n # Trains a k-means model.\n #kmeans = KMeans().setK(2).setSeed(1)\n #model = kmeans.fit(dataset)\n\n # Make predictions\n #predictions = model.transform(dataset)\n\n # Shows the result.\n centers = model.clusterCenters()\n\n transformed = transformed.repartition('prediction')\n\n #transformed.select('features').write.csv('numbers')\n\n usersDataframe = transformed.toPandas()\n itensDataframe = itensfactors.toPandas()\n\n # Sequencial version\n threads = list()\n logging.info(\"Multithread Execution: Started!\")\n startingTimeMultithreading = time.time()\n for threadId in range(numberOfClusters):\n logging.info(\"Multithread Execution: create and start thread \" + str(threadId))\n x = threading.Thread(target=SequencialMaximus, args=(threadId, usersDataframe, itensDataframe, centers))\n threads.append(x)\n x.start()\n\n for threadId, thread in enumerate(threads):\n logging.info(\"Multithread Execution: before joining thread \" + str(threadId))\n thread.join()\n logging.info(\"Multithread Execution: thread \" + str(threadId) +\" done\")\n logging.info(\"Multithread Execution: Finished!\")\n endingTimeMultithreading = time.time()\n\n # Parallel version\n logging.info(\"Spark Execution: Started!\")\n startingTimeSpark = time.time()\n transformed.foreachPartition(lambda iterator: ParalelMaximus(iterator, centers, itensDataframe))\n logging.info(\"Spark Execution: Finished!\")\n endingTimeSpark = time.time()\n\n print(\"Time elapsed\")\n print(\"Multithreading Execution: \" + str(endingTimeMultithreading - startingTimeMultithreading) + \"s\")\n print(\"Spark Execution: \" + str(endingTimeSpark - startingTimeSpark) + \"s\")"
] | [
[
"numpy.dot",
"numpy.cos",
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gpetretto/PyXtal | [
"8cdd44ccdfbc27bced35b305ec796ce3bcdcfa5e"
] | [
"pyxtal/test_all.py"
] | [
"# python -m unittest pyxtal/test_all.py\nimport unittest\n\nimport numpy as np\nfrom pkg_resources import resource_filename\nfrom pymatgen.core.structure import Molecule\nimport pymatgen.analysis.structure_matcher as sm\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\nfrom pymatgen.core.operations import SymmOp\n\nfrom pyxtal import pyxtal\nfrom pyxtal.lattice import Lattice\nfrom pyxtal.symmetry import Group, Wyckoff_position, get_wyckoffs\nfrom pyxtal.wyckoff_site import WP_merge\nfrom pyxtal.XRD import Similarity\nfrom pyxtal.operations import get_inverse\n\ncif_path = resource_filename(\"pyxtal\", \"database/cifs/\")\nl0 = Lattice.from_matrix([[4.08, 0, 0], [0, 9.13, 0], [0, 0, 5.50]])\nl1 = Lattice.from_matrix([[4.08, 0, 0], [0, 9.13, 0], [0, 0, 5.50]])\nl2 = Lattice.from_para(4.08, 9.13, 5.50, 90, 90, 90)\nl3 = Lattice.from_para(4.08, 7.13, 5.50, 90, 38, 90, ltype=\"monoclinic\")\nwp1 = Wyckoff_position.from_group_and_index(36, 0)\nwp2 = Wyckoff_position.from_group_and_index(36, \"4a\")\n\n\nclass TestGroup(unittest.TestCase):\n def test_list_wyckoff_combinations(self):\n g = Group(64)\n a1, _ = g.list_wyckoff_combinations([4, 2])\n self.assertTrue(a1 is None)\n a2, _ = g.list_wyckoff_combinations([4, 8], quick=False) \n self.assertTrue(len(a2) == 8)\n\nclass TestOptLat(unittest.TestCase):\n def test_atomic(self):\n c1 = pyxtal()\n c1.from_seed(cif_path+\"LiCs.cif\", backend='pyxtal')\n pmg1 = c1.to_pymatgen()\n\n c2 = c1.copy()\n c2.optimize_lattice(1)\n pmg2 = c2.to_pymatgen()\n self.assertTrue(sm.StructureMatcher().fit(pmg1, pmg2))\n\n c2.optimize_lattice(1)\n pmg2 = c2.to_pymatgen()\n self.assertTrue(sm.StructureMatcher().fit(pmg1, pmg2))\n \n c3 = pyxtal()\n c3.from_seed(cif_path+\"LiCs.cif\")\n pmg3 = c3.to_pymatgen()\n self.assertTrue(sm.StructureMatcher().fit(pmg1, pmg3))\n\nclass TestWP(unittest.TestCase):\n def test_wp(self):\n symbol = str(wp1.multiplicity) + wp1.letter\n self.assertTrue(symbol == \"8b\")\n symbol = str(wp2.multiplicity) + wp2.letter\n self.assertTrue(symbol == \"4a\")\n\n def test_merge(self):\n pt, wp, _ = WP_merge([0.05, 0.7, 0.24], l1.get_matrix(), wp1, 0.5)\n symbol = str(wp.multiplicity) + wp.letter\n self.assertTrue(symbol == \"4a\")\n pt, wp, _ = WP_merge([0.15, 0.7, 0.24], l1.get_matrix(), wp1, 0.5)\n symbol = str(wp.multiplicity) + wp.letter\n self.assertTrue(symbol == \"8b\")\n\n def test_get_wyckoff(self):\n for i in [1, 2, 229, 230]:\n get_wyckoffs(i)\n get_wyckoffs(i, organized=True)\n\n # to add test from string\nclass TestDof(unittest.TestCase):\n def test_atomic(self):\n s = pyxtal() \n s.from_random(3, 225, ['C'], [8])\n ans = s.get_dof()\n self.assertTrue(s.lattice.dof == 1)\n self.assertTrue(ans == 1)\n\nclass TestMolecular(unittest.TestCase):\n def test_single_specie(self):\n # print(\"test_h2o\")\n struc = pyxtal(molecular=True)\n struc.from_random(3, 36, [\"H2O\"], [8], sites=[[\"8b\"]])\n struc.to_file()\n self.assertTrue(struc.valid)\n\n # test space group\n pmg_struc = struc.to_pymatgen()\n sga = SpacegroupAnalyzer(pmg_struc)\n # print(sga.get_space_group_symbol())\n self.assertTrue(sga.get_space_group_number() >= 36)\n # print(pmg_struc.frac_coords[:3])\n\n # test rotation\n ax = struc.mol_sites[0].orientation.axis\n struc.mol_sites[0].rotate(axis=[1, 0, 0], angle=90)\n pmg_struc = struc.to_pymatgen()\n sga = SpacegroupAnalyzer(pmg_struc)\n pmg_struc.to(\"cif\", \"1.cif\")\n self.assertTrue(sga.get_space_group_symbol() == \"Cmc2_1\")\n # print(pmg_struc.frac_coords[:3])\n\n def test_sites(self):\n struc = pyxtal(molecular=True)\n struc.from_random(3, 19, [\"H2O\"], [4])\n pmg_struc = struc.to_pymatgen()\n sga = SpacegroupAnalyzer(pmg_struc)\n self.assertTrue(sga.get_space_group_symbol() == \"P2_12_12_1\")\n\n struc = pyxtal(molecular=True)\n struc.from_random(3, 36, [\"H2O\"], [8], sites=[[\"4a\", \"4a\"]])\n pmg_struc = struc.to_pymatgen()\n sga = SpacegroupAnalyzer(pmg_struc)\n self.assertTrue(sga.get_space_group_symbol() == \"Cmc2_1\")\n\n def test_read(self):\n # test reading structure from external\n struc = pyxtal(molecular=True)\n struc.from_seed(seed=cif_path+\"aspirin.cif\", molecule=\"aspirin\")\n pmg_struc = struc.to_pymatgen()\n sga = SpacegroupAnalyzer(pmg_struc)\n self.assertTrue(sga.get_space_group_symbol() == \"P2_1/c\")\n C = struc.subgroup_once(eps=0, H=4)\n pmg_s2 = C.to_pymatgen()\n self.assertTrue(sm.StructureMatcher().fit(pmg_struc, pmg_s2))\n\n def test_ice(self):\n struc = pyxtal(molecular=True)\n struc.from_seed(seed=cif_path+\"ice.cif\", molecule='H2O')\n N = struc.check_short_distances_by_dict({\"H-H\": 1.0, \"O-O\": 2.0})\n self.assertTrue(N==0)\n\n def test_big_molecule(self):\n # print(\"test_big_molecule\")\n for mol in [\"ROY\", \"aspirin\"]:\n struc = pyxtal(molecular=True)\n struc.from_random(3, 19, [mol], [4], 1.2)\n self.assertTrue(struc.valid)\n pair = struc.check_short_distances()\n if len(pair) > 0:\n print(\"short distances were detected\")\n print(mol)\n print(pair)\n self.assertTrue(len(pair) == 0)\n\n def test_c60(self):\n struc = pyxtal(molecular=True)\n struc.from_random(3, 36, [\"C60\"], [4], 1.0)\n self.assertTrue(struc.valid)\n\n def test_mutiple_species(self):\n Li = Molecule([\"Li\"], [[0.0, 0.0, 0.0]])\n coords = [\n [0.000000, 0.000000, 0.000000],\n [1.200000, 1.200000, -1.200000],\n [1.200000, -1.200000, 1.200000],\n [-1.200000, 1.200000, 1.200000],\n [-1.200000, -1.200000, -1.200000],\n ]\n ps4 = Molecule([\"P\", \"S\", \"S\", \"S\", \"S\"], coords)\n\n for i in range(3):\n struc = pyxtal(molecular=True)\n struc.from_random(3, 10, [Li, ps4], [6, 2], 1.2, conventional=False)\n if struc.valid:\n self.assertTrue(len(struc.to_pymatgen()) == 16)\n\n def test_molecular_2d(self):\n # print(\"test_molecular_2d\")\n struc = pyxtal(molecular=True)\n struc.from_random(2, 20, [\"H2O\"], [4], 1.0, conventional=False)\n cif = struc.to_file()\n self.assertTrue(struc.valid)\n\n def test_molecular_1d(self):\n struc = pyxtal(molecular=True)\n struc.from_random(1, 20, [\"H2O\"], [4], 1.0, conventional=False)\n cif = struc.to_file()\n self.assertTrue(struc.valid)\n # def test_space_groups(self):\n\n def test_preassigned_sites(self):\n sites = [[\"4a\", \"4a\"]]\n struc = pyxtal(molecular=True)\n struc.from_random(3, 36, [\"H2O\"], [8], sites=sites)\n self.assertTrue(struc.valid)\n\nclass TestAtomic3D(unittest.TestCase):\n def test_single_specie(self):\n struc = pyxtal()\n struc.from_random(3, 225, [\"C\"], [4], 1.2, conventional=False)\n struc.to_file()\n self.assertTrue(struc.valid)\n\n def test_mutiple_species(self):\n struc = pyxtal()\n struc.from_random(3, 99, [\"Ba\", \"Ti\", \"O\"], [1, 1, 3], 1.2)\n self.assertTrue(struc.valid)\n\n def test_preassigned_sites(self):\n sites = [[\"1b\"], [\"1b\"], [\"2c\", \"1b\"]]\n struc = pyxtal()\n struc.from_random(3, 99, [\"Ba\", \"Ti\", \"O\"], [1, 1, 3], 1.0, sites=sites)\n self.assertTrue(struc.valid)\n\n struc = pyxtal()\n struc.from_random(3, 225, [\"C\"], [12], 1.0, sites=[[\"4a\", \"8c\"]])\n self.assertTrue(struc.valid)\n\nclass TestAtomic2D(unittest.TestCase):\n def test_single_specie(self):\n struc = pyxtal()\n struc.from_random(2, 20, [\"C\"], [4], 1.0, thickness=2.0)\n struc.to_file()\n self.assertTrue(struc.valid)\n\n def test_mutiple_species(self):\n struc = pyxtal()\n struc.from_random(2, 4, [\"Mo\", \"S\"], [2, 4], 1.0)\n self.assertTrue(struc.valid)\n\nclass TestAtomic1D(unittest.TestCase):\n def test_single_specie(self):\n struc = pyxtal()\n struc.from_random(1, 20, [\"C\"], [4], 1.0)\n struc.to_file()\n self.assertTrue(struc.valid)\n\n def test_mutiple_species(self):\n struc = pyxtal()\n struc.from_random(1, 4, [\"Mo\", \"S\"], [2, 4], 1.0)\n self.assertTrue(struc.valid)\n\n\nclass TestCluster(unittest.TestCase):\n def test_multi_sites(self):\n struc = pyxtal()\n struc.from_random(0, 1, [\"C\"], [60], 1.0)\n self.assertTrue(struc.valid)\n\n struc = pyxtal()\n struc.from_random(0, 3, [\"C\"], [60], 1.0)\n self.assertTrue(struc.valid)\n\n def test_single_specie(self):\n struc = pyxtal()\n struc.from_random(0, \"Ih\", [\"C\"], [60], 1.0)\n self.assertTrue(struc.valid)\n\n def test_mutiple_species(self):\n struc = pyxtal()\n struc.from_random(0, 4, [\"Mo\", \"S\"], [2, 4], 1.0)\n self.assertTrue(struc.valid)\n\n\nclass TestLattice(unittest.TestCase):\n def test_para_matrix(self):\n self.assertTrue(np.allclose(l1.matrix, l2.matrix))\n\n def test_swap(self):\n l1.swap_axis(ids=[1, 0, 2])\n abc = l1.get_para()[:3]\n self.assertTrue(abc, np.array([9.13, 4.08, 5.50]))\n\n def test_optimize(self):\n l4, tran, _ = l3.optimize()\n self.assertTrue(abs(l4.beta-1.495907)<1e-4)\n\n def test_setpara(self):\n l0.set_para([5, 5, 5, 90, 90, 90])\n self.assertTrue(l0.a == 5)\n\n\nclass TestSymmetry(unittest.TestCase):\n def test_P21(self):\n strs = [\"x, y, z\", \"-x, y+1/2, -z\"]\n wyc, perm = Wyckoff_position.from_symops(strs)\n self.assertTrue(wyc.number == 4)\n\n def test_Pmn21(self):\n strs = [\"x, y, z\", \"-x+1/2, -y, z+1/2\", \"-x, y, z\", \"x+1/2, -y, z+1/2\"]\n wyc, perm = Wyckoff_position.from_symops(strs)\n self.assertTrue(wyc.number == 31)\n\n def test_P21a(self):\n strs = [\"x, y, z\", \"-x, -y, -z\", \"-x+1/2, y+1/2, -z\", \"x+1/2, -y+1/2, z\"]\n wyc, perm = Wyckoff_position.from_symops(strs)\n self.assertTrue(wyc.number == 14)\n\n def test_P21n(self):\n strs = [\n \"x, y, z\",\n \"-x, -y, -z\",\n \"-x+1/2, y+1/2, -z+1/2\",\n \"x+1/2, -y+1/2, z+1/2\",\n ]\n wyc, perm = Wyckoff_position.from_symops(strs)\n self.assertTrue(wyc.number == 14)\n\nclass TestSubgroup(unittest.TestCase):\n def test_cubic_cubic(self):\n sites = ['8a', '32e']\n numIons = int(sum([int(i[:-1]) for i in sites]))\n C1 = pyxtal()\n C1.from_random(3, 227, ['C'], [numIons], sites=[sites])\n pmg_s1 = C1.to_pymatgen()\n sga1 = SpacegroupAnalyzer(pmg_s1).get_space_group_symbol()\n\n C2s = C1.subgroup(eps=1e-4)\n for C2 in C2s:\n pmg_s2 = C2.to_pymatgen()\n sga2 = SpacegroupAnalyzer(pmg_s2).get_space_group_symbol()\n self.assertTrue(sm.StructureMatcher().fit(pmg_s1, pmg_s2))\n\n C3s = C1.subgroup(permutations={\"C\":\"Si\"}, H=216)\n\n def test_from_seed(self):\n from pymatgen import Lattice, Structure\n coords = [[0, 0, 0], [0.75,0.5,0.75]]\n lattice = Lattice.from_parameters(a=3.84, b=3.84, c=3.84, alpha=120,\n beta=90, gamma=60)\n struct = Structure(lattice, [\"Si\", \"C\"], coords)\n s1 = pyxtal()\n s1.from_seed(struct)\n s2 = s1.subgroup_once(eps=0)\n pmg_s1 = s1.to_pymatgen()\n pmg_s2 = s2.to_pymatgen()\n self.assertTrue(sm.StructureMatcher().fit(pmg_s1, pmg_s2))\n \n pmg_s1 = Structure.from_file(cif_path + \"B28.vasp\")\n struc = pyxtal()\n struc.from_seed(seed=cif_path + \"B28.vasp\")\n pmg_s2 = struc.to_pymatgen()\n self.assertTrue(sm.StructureMatcher().fit(pmg_s1, pmg_s2))\n permutation = {\"B\":\"C\"}\n struc.subgroup_once(0.01, None, permutation, max_cell=2) \n\n def test_molecules(self):\n for name in [\"HAHCOI\", \"WEXBOS\", \"MERQIM\", \"LAGNAL\", \"YICMOP\", \"LUFHAW\", \"JAPWIH\"]:\n cif = cif_path + name + \".cif\"\n struc = pyxtal(molecular=True)\n struc.from_seed(seed=cif, molecule=name)\n pmg_struc = struc.to_pymatgen()\n Cs = struc.subgroup(eps=0, max_cell=1)\n for C in Cs:\n pmg_s2 = C.to_pymatgen()\n self.assertTrue(sm.StructureMatcher().fit(pmg_struc, pmg_s2))\n \n def test_special(self):\n cif = cif_path + '191.vasp'\n struc = pyxtal()\n struc.from_seed(seed=cif)\n for i in range(100):\n s = struc.subgroup_once(0.2, None, None, 't+k', 2)\n \nclass TestPXRD(unittest.TestCase):\n def test_similarity(self):\n sites = ['8a']\n C1 = pyxtal()\n C1.from_random(3, 227, ['C'], [8], sites=[['8a']])\n xrd1 = C1.get_XRD()\n C2 = C1.subgroup_once(eps=1e-3)\n xrd2 = C1.get_XRD()\n p1 = xrd1.get_profile()\n p2 = xrd2.get_profile()\n s = Similarity(p1, p2, x_range=[15, 90])\n self.assertTrue( 0.9 <s.S <1.001)\n \n\n C2.apply_perturbation(1e-3, 1e-3)\n xrd3 = C2.get_XRD()\n p3 = xrd3.get_profile()\n s = Similarity(p1, p2, x_range=[15, 90])\n self.assertTrue( 0.95 <s.S <1.001)\n\nclass TestLoad(unittest.TestCase):\n def test_atomic(self):\n s1 = pyxtal()\n s1.from_random(3, 36, ['C', 'Si'], [4, 8])\n s2 = pyxtal()\n s2.load_dict(s1.save_dict())\n pmg_s1 = s1.to_pymatgen()\n pmg_s2 = s2.to_pymatgen()\n self.assertTrue(sm.StructureMatcher().fit(pmg_s1, pmg_s2))\n\n def test_molecular(self):\n s1 = pyxtal(molecular=True)\n s1.from_random(3, 36, ['H2O'], [4])\n s2 = pyxtal()\n s2.load_dict(s1.save_dict())\n pmg_s1 = s1.to_pymatgen()\n pmg_s2 = s2.to_pymatgen()\n self.assertTrue(sm.StructureMatcher().fit(pmg_s1, pmg_s2))\n\nclass Test_operations(unittest.TestCase):\n def test_inverse(self):\n coord0 = [0.35, 0.1, 0.4]\n coords = np.array([\n\t\t\t [0.350, 0.100, 0.400],\n\t\t\t [0.350, 0.100, 0.000],\n\t\t\t [0.350, 0.100, 0.000],\n\t\t\t [0.350, 0.000, 0.667],\n\t\t\t [0.350, 0.000, 0.250],\n\t\t\t [0.350, 0.350, 0.400],\n\t\t\t [0.350, 0.350, 0.500],\n\t\t\t [0.350, 0.350, 0.000],\n\t\t\t [0.350, 0.350, 0.350],\n\t\t\t [0.100, 0.100, 0.100],\n\t\t\t [0.400, 0.400, 0.400],\n\t\t\t [0.350, 0.000, 0.000],\n\t\t\t [0.000, 0.100, 0.400],\n\t\t\t [0.350, 0.000, 0.400],\n\t\t\t ])\n xyzs = ['x,y,z',\n 'x,y,0',\n 'y,x,0',\n 'x,0,2/3',\n '0,x,1/4',\n 'x,x,z',\n 'x,-x,1/2',\n '2x,x,0',\n '-2x,-0.5x,-x+1/4',\n '-2y,-0.5y,-y+1/4',\n '-2z,-0.5z,-z+1/4',\n '0,0,x',\n '-y/2+1/2,-z,0',\n '-z,-x/2+1/2,0',\n ]\n \n for i, xyz in enumerate(xyzs):\n op = SymmOp.from_xyz_string(xyz)\n inv_op = get_inverse(op)\n coord1 = op.operate(coord0)\n coord2 = inv_op.operate(coord1)\n self.assertTrue(np.allclose(coord2, coords[i], rtol=1e-2))\n #strs = \"{:6.3f} {:6.3f} {:6.3f}\".format(*coord0)\n #strs += \" {:12s} \".format(op.as_xyz_string())\n #strs += \"{:6.3f} {:6.3f} {:6.3f}\".format(*coord1)\n #strs += \" {:12s} \".format(inv_op.as_xyz_string())\n #strs += \"{:6.3f} {:6.3f} {:6.3f}\".format(*coord2)\n #print(strs)\n\n def test_swap_wp(self):\n g = Group(38)\n wp = g[4]\n wp1, trans = wp.swap_axis([1,0,2])\n \n g = Group(71)\n wp = g[5]\n wp1, trans = wp.swap_axis([0,2,1])\n wp1, trans = wp.swap_axis([1,2,0])\n wp1, trans = wp.swap_axis([2,1,0])\n\n def test_swap_xtal(self):\n s = pyxtal()\n s.from_seed(cif_path+\"BTO-Amm2.cif\")\n strucs = s.get_alternatives()\n self.assertTrue(len(strucs)==1)\n #s.from_seed(cif_path+\"BTO-Amm2.cif\")\n #strucs = s.get_alternatives()\n #self.assertTrue(len(strucs)==5)\n\n# class TestIO(unittest.TestCase):\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.array",
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JunhoPark0314/fvcore | [
"50a98a712553e4e2f8fc66466585f89793255077"
] | [
"tests/test_activation_count.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n# pyre-ignore-all-errors[2]\n\nimport typing\nimport unittest\nfrom collections import Counter, defaultdict\nfrom typing import Any, Dict, List\n\nimport torch\nimport torch.nn as nn\nfrom fvcore.nn.activation_count import Handle, activation_count\nfrom numpy import prod\n\n\nclass SmallConvNet(nn.Module):\n \"\"\"\n A network with three conv layers. This is used for testing convolution\n layers for activation count.\n \"\"\"\n\n def __init__(self, input_dim: int) -> None:\n super(SmallConvNet, self).__init__()\n conv_dim1 = 8\n conv_dim2 = 4\n conv_dim3 = 2\n self.conv1 = nn.Conv2d(input_dim, conv_dim1, 1, 1)\n self.conv2 = nn.Conv2d(conv_dim1, conv_dim2, 1, 2)\n self.conv3 = nn.Conv2d(conv_dim2, conv_dim3, 1, 2)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n return x\n\n def get_gt_activation(self, x: torch.Tensor) -> int:\n count = 0\n x = self.conv1(x)\n count += prod(list(x.size()))\n x = self.conv2(x)\n count += prod(list(x.size()))\n x = self.conv3(x)\n count += prod(list(x.size()))\n return count\n\n\nclass TestActivationCount(unittest.TestCase):\n \"\"\"\n Unittest for activation_count.\n \"\"\"\n\n def test_conv2d(self) -> None:\n \"\"\"\n Test the activation count for convolutions.\n \"\"\"\n batch_size = 1\n input_dim = 3\n spatial_dim = 32\n x = torch.randn(batch_size, input_dim, spatial_dim, spatial_dim)\n convNet = SmallConvNet(input_dim)\n ac_dict, _ = activation_count(convNet, (x,))\n gt_count = convNet.get_gt_activation(x)\n\n gt_dict = defaultdict(float)\n gt_dict[\"conv\"] = gt_count / 1e6\n self.assertDictEqual(\n gt_dict,\n ac_dict,\n \"ConvNet with 3 layers failed to pass the activation count test.\",\n )\n\n def test_linear(self) -> None:\n \"\"\"\n Test the activation count for fully connected layer.\n \"\"\"\n batch_size = 1\n input_dim = 10\n output_dim = 20\n netLinear = nn.Linear(input_dim, output_dim)\n x = torch.randn(batch_size, input_dim)\n ac_dict, _ = activation_count(netLinear, (x,))\n gt_count = batch_size * output_dim\n gt_dict = defaultdict(float)\n gt_dict[\"addmm\"] = gt_count / 1e6\n self.assertEquals(\n gt_dict, ac_dict, \"FC layer failed to pass the activation count test.\"\n )\n\n def test_supported_ops(self) -> None:\n \"\"\"\n Test the activation count for user provided handles.\n \"\"\"\n\n def dummy_handle(inputs: List[Any], outputs: List[Any]) -> typing.Counter[str]:\n return Counter({\"conv\": 100})\n\n batch_size = 1\n input_dim = 3\n spatial_dim = 32\n x = torch.randn(batch_size, input_dim, spatial_dim, spatial_dim)\n convNet = SmallConvNet(input_dim)\n sp_ops: Dict[str, Handle] = {\"aten::_convolution\": dummy_handle}\n ac_dict, _ = activation_count(convNet, (x,), sp_ops)\n gt_dict = defaultdict(float)\n conv_layers = 3\n gt_dict[\"conv\"] = 100 * conv_layers / 1e6\n self.assertDictEqual(\n gt_dict,\n ac_dict,\n \"ConvNet with 3 layers failed to pass the activation count test.\",\n )\n"
] | [
[
"torch.nn.Linear",
"torch.randn",
"torch.nn.Conv2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ukky17/invert_MV_pytorch | [
"b6f54bf81f7deb5189213071056e83f0e578a7a6"
] | [
"ff_training.py"
] | [
"import os\nimport time\nimport sys\n\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport torchvision.datasets as dst\nimport torchvision.transforms as tfs\nfrom torch.utils.data import DataLoader\n\nimport model\n\ndef train(dataloader, net):\n net.train()\n total = 0\n correct = 0\n for x, y in dataloader:\n x, y = x.to(device), y.to(device)\n optimizer.zero_grad()\n output = net(x)\n lossv = loss_f(output, y)\n lossv.backward()\n optimizer.step()\n correct += y.eq(torch.max(output.data, 1)[1]).sum().item()\n total += y.numel()\n return correct / total\n\ndef test(dataloader, net):\n net.eval()\n total = 0\n correct = 0\n with torch.no_grad():\n for x, y in dataloader:\n x, y = x.to(device), y.to(device)\n output = net(x)\n correct += y.eq(torch.max(output.data, 1)[1]).sum().item()\n total += y.numel()\n return correct / total\n\nif __name__ == \"__main__\":\n # parameters\n batchSize = 128\n lr = 1e-4\n model_path = 'models/vgg16.pth'\n data_dir = 'data/cifar10/'\n epochs = [80, 20]\n\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n if not os.path.exists('models'):\n os.makedirs('models')\n\n device = torch.device('cuda')\n\n # model and loss\n net = model.VGG()\n loss_f = nn.CrossEntropyLoss()\n net.to(device)\n loss_f.to(device)\n\n # data\n transform_train = tfs.Compose([tfs.RandomCrop(32, padding=4),\n tfs.RandomHorizontalFlip(),\n tfs.ToTensor()])\n data = dst.CIFAR10(data_dir, download=True, train=True,\n transform=transform_train)\n data_test = dst.CIFAR10(data_dir, download=True, train=False,\n transform=tfs.Compose([tfs.ToTensor()]))\n dataloader = DataLoader(data, batch_size=batchSize, shuffle=True)\n dataloader_test = DataLoader(data_test, batch_size=batchSize, shuffle=False)\n\n count = 0\n for epoch in epochs:\n optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)\n for _ in range(epoch):\n beg = time.time()\n count += 1\n train_acc = train(dataloader, net)\n test_acc = test(dataloader_test, net)\n run_time = time.time() - beg\n print('Epoch {}, Time {:.2f}, Train: {:.5f}, Test: {:.5f}'.\\\n format(count, run_time, train_acc, test_acc))\n sys.stdout.flush()\n\n lr /= 10\n\n torch.save(net.state_dict(), model_path)\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ksteensig/pytorch-batch-ops | [
"12063a470b92945470d09e2d23ff55ae13c85564"
] | [
"parallel_no_gesvda.py"
] | [
"import torch\nimport torch_batch_ops_cpp\nimport math\n\ndef csvd(X):\n size = list(X.size())\n m = size[1]\n n = size[2]\n batch_size = size[0]\n\n p = 20\n k = math.floor(n*0.10)\n l = k+p # estimate a low rank approx that is 10% of with p oversampling\n\n #Phi = torch.randint(0,2,(batch_size, l, m),device='cuda:0',dtype=torch.float32)\n #Y = Phi.matmul(X)\n Y = X[:,:l,:]\n Yt = Y.transpose(1,2)\n\n B = Y.matmul(Yt)\n B = B.add(B.transpose(1,2))\n B.mul_(0.5)\n\n index = torch.range(l-1, 0, -1, dtype=torch.long).to('cuda:0', non_blocking=True)\n \n D,T = torch_batch_ops_cpp.batch_symeig_cpp(B, True, 1e-7, 20)\n D = D.index_select(dim=1, index=index)\n T = T.index_select(dim=1, index=index).transpose(1,2)\n S_ = D[:,:k].pow(-0.5).diag_embed(0, 1, 2)\n\n V_ = Yt.matmul(T[:,:,:k]).matmul(S_)\n U_ = X.matmul(V_)\n\n S,Q = torch_batch_ops_cpp.batch_symeig_cpp(U_.transpose(1,2).matmul(U_), True, 1e-7, 20)\n\n S = S.pow(0.5)\n\n U = U_.matmul(Q).matmul(S.pow(-1).diag_embed(0,1,2))\n S = S.diag_embed(0,1,2)\n\n V = V_.matmul(Q)\n\n return U, S, V\n\n\nimport time\n\nN = 10\nB = 25\n\nX = torch.randn(B, 10000, 784).to('cuda:0')\ntorch.cuda.synchronize()\nU,S,V = csvd(X)\ntorch.cuda.synchronize()\n\nfor _ in range(N):\n X = torch.randn(B, 10000, 784).to('cuda:0')\n torch.cuda.synchronize()\n\n csvd_start = time.time()\n U,S,V = csvd(X)\n torch.cuda.synchronize()\n csvd_end = time.time()\n csvd_time = (csvd_end-csvd_start)\n print(csvd_time)\n"
] | [
[
"torch.randn",
"torch.cuda.synchronize",
"torch.range"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Icenowy/apicula | [
"cace8bb98f6acc5d847c7da6c6cc086f05337fb0"
] | [
"apycula/tiled_fuzzer.py"
] | [
"import re\nimport os\nimport sys\nimport tempfile\nimport subprocess\nfrom collections import deque, Counter, namedtuple\nfrom itertools import chain, count, zip_longest\nfrom functools import reduce\nfrom random import shuffle, seed\nfrom warnings import warn\nfrom math import factorial\nimport numpy as np\nfrom multiprocessing.dummy import Pool\nimport pickle\nimport json\nfrom shutil import copytree\n\nfrom apycula import codegen\nfrom apycula import bslib\nfrom apycula import pindef\nfrom apycula import fuse_h4x\n#TODO proper API\n#from apycula import dat19_h4x\nfrom apycula import tm_h4x\nfrom apycula import chipdb\n\ngowinhome = os.getenv(\"GOWINHOME\")\nif not gowinhome:\n raise Exception(\"GOWINHOME not set\")\n\n# XXX\n# The indexes of the flag values depend on the device.\n# So far I have not found where it is described in the tables\ndef recode_idx_gw1n1(idx):\n return idx\n\ndef recode_idx_gw1ns_2(idx):\n new_idx = idx + 1\n if idx >= 69:\n new_idx += 3\n if idx >= 80:\n new_idx += 1\n return new_idx\n\ndef recode_idx_gw1ns_4(idx):\n new_idx = idx\n if idx >= 48:\n new_idx -= 1\n if idx >= 55:\n new_idx -= 1\n if idx >= 70:\n new_idx -= 3\n return new_idx\n\ndef recode_idx_gw1n9(idx):\n new_idx = idx\n if idx >= 69:\n new_idx += 3\n return new_idx\n\ndef recode_idx_gw1n4(idx):\n new_idx = idx\n if idx >= 48:\n new_idx -= 1\n if idx >= 55:\n new_idx -= 1\n if idx >= 70:\n new_idx -= 2\n return new_idx\n\n# device = os.getenv(\"DEVICE\")\ndevice = sys.argv[1]\nparams = {\n \"GW1NS-2\": {\n \"package\": \"LQFP144\",\n \"device\": \"GW1NS-2C-LQFP144-5\",\n \"partnumber\": \"GW1NS-UX2CLQ144C5/I4\",\n \"recode_idx\": recode_idx_gw1ns_2,\n },\n \"GW1NS-4\": {\n \"package\": \"QFN48\",\n \"device\": \"GW1NSR-4C-QFN48-7\",\n \"partnumber\": \"GW1NSR-LV4CQN48PC7/I6\",\n \"recode_idx\": recode_idx_gw1ns_4,\n },\n \"GW1N-9\": {\n \"package\": \"PBGA256\",\n \"device\": \"GW1N-9-PBGA256-6\",\n \"partnumber\": \"GW1N-LV9PG256C6/I5\",\n \"recode_idx\": recode_idx_gw1n9,\n },\n \"GW1N-4\": {\n \"package\": \"PBGA256\",\n \"device\": \"GW1N-4-PBGA256-6\",\n \"partnumber\": \"GW1N-LV4PG256C6/I5\",\n \"recode_idx\": recode_idx_gw1n4,\n },\n \"GW1N-1\": {\n \"package\": \"LQFP144\",\n \"device\": \"GW1N-1-LQFP144-6\",\n \"partnumber\": \"GW1N-LV1LQ144C6/I5\",\n \"recode_idx\": recode_idx_gw1n1,\n },\n}[device]\n\nname_idx = 0\ndef make_name(bel, typ):\n global name_idx\n name_idx += 1\n return f\"inst{name_idx}_{bel}_{typ}\"\n\n# one fuzzer\nFuzzer = namedtuple('Fuzzer', [\n 'ttyp',\n 'mod',\n 'cst', # constraints\n 'cfg', # device config\n 'iostd', # io standard\n ])\n\ndffmap = {\n \"DFF\": None,\n \"DFFN\": None,\n \"DFFS\": \"SET\",\n \"DFFR\": \"RESET\",\n \"DFFP\": \"PRESET\",\n \"DFFC\": \"CLEAR\",\n \"DFFNS\": \"SET\",\n \"DFFNR\": \"RESET\",\n \"DFFNP\": \"PRESET\",\n \"DFFNC\": \"CLEAR\",\n}\ndef dff(locations):\n for ttyp in range(12, 18): # for each tile type\n mod = codegen.Module()\n cst = codegen.Constraints()\n try:\n # get all tiles of this type\n # iter causes the loop to not repeat the same locs per cls\n locs = iter(locations[ttyp])\n except KeyError:\n continue\n\n for cls in range(3): # for each cls\n for side in [\"A\", \"B\"]:\n for typ, port in dffmap.items(): # for each bel type\n try:\n loc = next(locs) # get the next unused tile\n except StopIteration:\n yield Fuzzer(ttyp, mod, cst, {}, '')\n locs = iter(locations[ttyp])\n loc = next(locs)\n mod = codegen.Module()\n cst = codegen.Constraints()\n\n lutname = make_name(\"DUMMY\", \"LUT4\")\n lut = codegen.Primitive(\"LUT4\", lutname)\n lut.params[\"INIT\"] = \"16'hffff\"\n lut.portmap['F'] = lutname+\"_F\"\n lut.portmap['I0'] = lutname+\"_I0\"\n lut.portmap['I1'] = lutname+\"_I1\"\n lut.portmap['I2'] = lutname+\"_I2\"\n lut.portmap['I3'] = lutname+\"_I3\"\n\n mod.wires.update(lut.portmap.values())\n mod.primitives[lutname] = lut\n name = make_name(\"DFF\", typ)\n dff = codegen.Primitive(typ, name)\n dff.portmap['CLK'] = name+\"_CLK\"\n dff.portmap['D'] = lutname+\"_F\"\n dff.portmap['Q'] = name+\"_Q\"\n if port:\n dff.portmap[port] = name+\"_\"+port\n mod.wires.update(dff.portmap.values())\n mod.primitives[name] = dff\n\n row = loc[0]+1\n col = loc[1]+1\n cst.cells[lutname] = (row, col, cls, side)\n cst.cells[name] = (row, col, cls, side)\n yield Fuzzer(ttyp, mod, cst, {}, '')\n\n# illegal pin-attr combination for device\n_illegal_combo = { (\"IOR6A\", \"SLEW_RATE\") : \"GW1NS-2\",\n (\"IOR6B\", \"SLEW_RATE\") : \"GW1NS-2\"}\n\ndef is_illegal(iostd, pin, attr):\n if _illegal_combo.get((pin, attr)) == device:\n return True\n # GW1N-1, GW1NS-2, GW1N-4 and GW1N-9 allow single resisor only in banks 1/3\n if (attr == \"SINGLE_RESISTOR\") and (pin[2] in \"BT\"):\n return True\n # bottom pins GW1NS-4 (bank 3) support LVCMOS only\n if iostd != '' and device == 'GW1NS-4':\n if pin.startswith('IOB'):\n return not iostd.startswith('LVCMOS')\n return False\n\n# take TBUF == IOBUF - O\niobmap = {\n \"IBUF\": {\"wires\": [\"O\"], \"inputs\": [\"I\"]},\n \"OBUF\": {\"wires\": [\"I\"], \"outputs\": [\"O\"]},\n \"IOBUF\": {\"wires\": [\"I\", \"O\", \"OEN\"], \"inouts\": [\"IO\"]},\n}\n\niostd_open_drain = {\n \"\" ,\n \"LVCMOS33\",\n \"LVCMOS25\",\n \"LVCMOS18\",\n \"LVCMOS15\",\n \"LVCMOS12\",\n }\niostd_histeresis = {\n \"\" ,\n \"LVCMOS33\",\n \"LVCMOS25\",\n \"LVCMOS18\",\n \"LVCMOS15\",\n \"LVCMOS12\",\n \"PCI33\" ,\n }\n\niostandards = [\"\", \"LVCMOS18\", \"LVCMOS33\", \"LVCMOS25\", \"LVCMOS15\",\n \"SSTL25_I\", \"SSTL33_I\", \"SSTL15\", \"HSTL18_I\", \"PCI33\"]\n\nAttrValues = namedtuple('ModeAttr', [\n 'allowed_modes', # allowed modes for the attribute\n 'values', # values of the attribute\n 'table', # special values table\n ])\n\niobattrs = {\n \"IO_TYPE\" : AttrValues([\"IBUF\", \"OBUF\", \"IOBUF\"], [\"\"], None),\n #\"SINGLE_RESISTOR\" : AttrValues([\"IBUF\", \"IOBUF\"], [\"ON\", \"OFF\"], None),\n}\n\ndef tbrl2rc(fse, side, num):\n if side == 'T':\n row = 0\n col = int(num) - 1\n elif side == 'B':\n row = len(fse['header']['grid'][61])-1\n col = int(num) - 1\n elif side == 'L':\n row = int(num) - 1\n col = 0\n elif side == 'R':\n row = int(num) - 1\n col = len(fse['header']['grid'][61][0])-1\n return (row, col)\n\n# get fuse bits from longval table\n# the key is automatically sorted and appended with zeros.\n# If ignore_key_elem is set, the initial elements in the table record keys\n# is ignored when searching.\ndef get_longval(fse, ttyp, table, key, ignore_key_elem = 0):\n bits = set()\n sorted_key = (sorted(key) + [0] * 16)[:16 - ignore_key_elem]\n for rec in fse[ttyp]['longval'][table]:\n k = rec[ignore_key_elem:16]\n if k == sorted_key:\n fuses = [f for f in rec[16:] if f != -1]\n for fuse in fuses:\n bits.update({fuse_h4x.fuse_lookup(fse, ttyp, fuse)})\n break\n return bits\n\n# diff boards have diff key indexes\ndef recode_key(key):\n return set(map(params['recode_idx'], key))\n\n# IOB from tables\n# (code, {option values}, is cmos-like mode, GW1N-4 aliases)\n_iostd_codes = {\n # XXX default LVCMOS18\n \"\" : ( 66, {'4', '8', '12'}, True, {'4': None, '8': 51, '12': 53}),\n \"LVCMOS33\" : ( 68, {'4', '8', '12', '16', '24'}, True, {'4': 48, '8': None, '12': 50, '16': 51, '24': 53}),\n \"LVCMOS25\" : ( 67, {'4', '8', '12', '16'}, True, {'4': None, '8': 50, '12': 51, '16': 53}),\n \"LVCMOS18\" : ( 66, {'4', '8', '12'}, True, {'4': None, '8': 51, '12': 53}),\n \"LVCMOS15\" : ( 65, {'4', '8'}, True, {'4': 50, '8': 53}),\n \"LVCMOS12\" : ( 64, {'4', '8'}, True, {'4': 50, '8': 53}),\n \"SSTL25_I\" : ( 71, {'8'}, False, {'8': 50}),\n \"SSTL25_II\" : ( 71, {'8'}, False, {'8': 50}),\n \"SSTL33_I\" : ( -1, {'8'}, False, {'8': None}),\n \"SSTL33_II\" : ( -1, {'8'}, False, {'8': None}),\n \"SSTL18_I\" : ( 72, {'8'}, False, {'8': 51}),\n \"SSTL18_II\" : ( 72, {'8'}, False, {'8': 51}),\n \"SSTL15\" : ( 74, {'8'}, False, {'8': 51}),\n \"HSTL18_I\" : ( 72, {'8'}, False, {'8': 53}),\n \"HSTL18_II\" : ( 72, {'8'}, False, {'8': 53}),\n \"HSTL15_I\" : ( 74, {'8'}, False, {'8': 51}),\n \"PCI33\" : ( 69, {'4', '8'}, False, {'4':48, '8': None}),\n }\n\n# PULL_MODE\n_pin_mode_longval = {'A':23, 'B':24, 'C':40, 'D':41, 'E':42, 'F':43, 'G':44, 'H':45, 'I':46, 'J':47}\n_pull_mode_iob = [\"IBUF\", \"OBUF\", \"IOBUF\"]\n_tbrlre = re.compile(r\"IO([TBRL])(\\d+)\")\n_pull_mode_idx = { 'UP' : -1, 'NONE' : 45, 'KEEPER' : 44, 'DOWN' : 43}\ndef fse_pull_mode(fse, db, pin_locations):\n for ttyp, tiles in pin_locations.items():\n pin_loc = list(tiles.keys())[0]\n side, num = _tbrlre.match(pin_loc).groups()\n row, col = tbrl2rc(fse, side, num)\n bels = {name[-1] for loc in tiles.values() for name in loc}\n for bel_idx in bels:\n bel = db.grid[row][col].bels.setdefault(f\"IOB{bel_idx}\", chipdb.Bel())\n for iostd, b_iostd in bel.iob_flags.items():\n for io_mode in _pull_mode_iob:\n b_mode = b_iostd.setdefault(io_mode, chipdb.IOBMode())\n b_attr = b_mode.flags.setdefault('PULL_MODE', chipdb.IOBFlag())\n for opt_name, val in _pull_mode_idx.items():\n if val == -1:\n loc = set()\n else:\n loc = get_longval(fse, ttyp, _pin_mode_longval[bel_idx], recode_key({val}))\n b_attr.options[opt_name] = loc\n\n# LVCMOS12/15/18 fuse\ndef get_12_15_18_bits(fse, ttyp, pin):\n return get_longval(fse, ttyp, _pin_mode_longval[pin], recode_key({66}))\n\n# SLEW_RATE\n_slew_rate_iob = [ \"OBUF\", \"IOBUF\"]\n_slew_rate_idx = { 'SLOW' : -1, 'FAST' : 42}\ndef fse_slew_rate(fse, db, pin_locations):\n for ttyp, tiles in pin_locations.items():\n pin_loc = list(tiles.keys())[0]\n side, num = _tbrlre.match(pin_loc).groups()\n row, col = tbrl2rc(fse, side, num)\n bels = {name[-1] for loc in tiles.values() for name in loc}\n for bel_idx in bels:\n bel = db.grid[row][col].bels.setdefault(f\"IOB{bel_idx}\", chipdb.Bel())\n for iostd, b_iostd in bel.iob_flags.items():\n for io_mode in _slew_rate_iob:\n b_mode = b_iostd.setdefault(io_mode, chipdb.IOBMode())\n b_attr = b_mode.flags.setdefault('SLEW_RATE', chipdb.IOBFlag())\n for opt_name, val in _slew_rate_idx.items():\n if val == -1:\n loc = set()\n else:\n loc = get_longval(fse, ttyp, _pin_mode_longval[bel_idx], recode_key({val}))\n b_attr.options[opt_name] = loc\n\n# DRIVE\n_drive_iob = [ \"OBUF\", \"IOBUF\"]\n_drive_idx = {'4': {48}, '8': {50}, '12': {51}, '16': {52}, '24': {54}}\n_drive_key = {56}\ndef fse_drive(fse, db, pin_locations):\n for ttyp, tiles in pin_locations.items():\n pin_loc = list(tiles.keys())[0]\n side, num = _tbrlre.match(pin_loc).groups()\n row, col = tbrl2rc(fse, side, num)\n bels = {name[-1] for loc in tiles.values() for name in loc}\n for bel_idx in bels:\n bel = db.grid[row][col].bels.setdefault(f\"IOB{bel_idx}\", chipdb.Bel())\n for iostd, b_iostd in bel.iob_flags.items():\n for io_mode in _drive_iob:\n b_mode = b_iostd.setdefault(io_mode, chipdb.IOBMode())\n b_attr = b_mode.flags.setdefault('DRIVE', chipdb.IOBFlag())\n for opt_name, val in _drive_idx.items():\n iostd_key, iostd_vals, iostd_cmos, gw1n4_aliases = _iostd_codes[iostd]\n if opt_name not in iostd_vals:\n continue\n # XXX\n if iostd_key == -1 or (iostd == \"PCI33\" and opt_name == '8'):\n loc = set()\n else:\n if device in ['GW1N-4', 'GW1NS-4']:\n opt_key = gw1n4_aliases[opt_name]\n if opt_key:\n val = _drive_key.union({opt_key})\n loc = get_longval(fse, ttyp, _pin_mode_longval[bel_idx],\n recode_key(val), 1)\n else:\n loc = set()\n else:\n val = {iostd_key}.union(_drive_key)\n if iostd_cmos:\n val = val.union(_drive_idx[opt_name])\n loc = get_longval(fse, ttyp, _pin_mode_longval[bel_idx],\n recode_key(val), 1)\n b_attr.options[opt_name] = loc\n\n# OPEN_DRAIN\n_open_drain_iob = [ \"OBUF\", \"IOBUF\"]\n_open_drain_key = {\"ON\": {55, 70}, \"NOISE\": {55, 72}}\n_open_drain_gw1n4_key = {\"ON\": {49, 54}, \"NOISE\": {51, 54}}\ndef fse_open_drain(fse, db, pin_locations):\n for ttyp, tiles in pin_locations.items():\n pin_loc = list(tiles.keys())[0]\n side, num = _tbrlre.match(pin_loc).groups()\n row, col = tbrl2rc(fse, side, num)\n bels = {name[-1] for loc in tiles.values() for name in loc}\n for bel_idx in bels:\n bel = db.grid[row][col].bels.setdefault(f\"IOB{bel_idx}\", chipdb.Bel())\n for iostd, b_iostd in bel.iob_flags.items():\n if iostd not in iostd_open_drain:\n continue\n # XXX presumably OPEN_DRAIN is another DRIVE mode, strange as it may sound.\n # Three fuses are used: ON=100, i.e. one is set and the other two are cleared,\n # OFF=xxx (xxx != 100)\n # These are the same fuses that are used for DRIVE and in the future you can\n # come up with a smarter way to find them.\n # XXX Below is a very shamanic method of determining the fuses,\n iostd33_key, _, _, gw1n4_aliases = _iostd_codes[\"LVCMOS33\"]\n if device in ['GW1N-4', 'GW1NS-4']:\n cur16ma_key = _drive_key.union({gw1n4_aliases[\"16\"]})\n keys = _open_drain_gw1n4_key\n else:\n cur16ma_key = {iostd33_key}.union(_drive_key).union(_drive_idx[\"16\"])\n keys = _open_drain_key\n # ON fuse is simple\n on_fuse = get_longval(fse, ttyp, _pin_mode_longval[bel_idx],\n recode_key(keys['ON']), 1)\n # the mask to clear is diff between 16mA fuses of LVCMOS33 standard and\n # some key\n cur16ma_fuse = get_longval(fse, ttyp, _pin_mode_longval[bel_idx],\n recode_key(cur16ma_key), 1)\n noise_fuse = get_longval(fse, ttyp, _pin_mode_longval[bel_idx],\n recode_key(keys['NOISE']), 1)\n clear_mask = cur16ma_fuse - noise_fuse - on_fuse;\n for io_mode in _open_drain_iob:\n b_mode = b_iostd.setdefault(io_mode, chipdb.IOBMode())\n b_attr = b_mode.flags.setdefault('OPEN_DRAIN', chipdb.IOBFlag())\n # bits of this attribute are the same as the DRIVE bits\n # so make a flag mask here, also never use OFF when encoding, only ON\n b_attr.mask = clear_mask.union(on_fuse)\n b_attr.options[\"OFF\"] = set()\n b_attr.options[\"ON\"] = on_fuse.copy()\n #print(b_attr.options)\n\n# HYSTERESIS\n_hysteresis_iob = [ \"IBUF\", \"IOBUF\"]\n_hysteresis_idx = { 'NONE': -1, 'HIGH': {57, 85}, 'H2L': {58, 85}, 'L2H': {59, 85}}\ndef fse_hysteresis(fse, db, pin_locations):\n for ttyp, tiles in pin_locations.items():\n pin_loc = list(tiles.keys())[0]\n side, num = _tbrlre.match(pin_loc).groups()\n row, col = tbrl2rc(fse, side, num)\n bels = {name[-1] for loc in tiles.values() for name in loc}\n for bel_idx in bels:\n bel = db.grid[row][col].bels.setdefault(f\"IOB{bel_idx}\", chipdb.Bel())\n for iostd, b_iostd in bel.iob_flags.items():\n if iostd not in iostd_histeresis:\n continue\n for io_mode in _hysteresis_iob:\n b_mode = b_iostd.setdefault(io_mode, chipdb.IOBMode())\n b_attr = b_mode.flags.setdefault('HYSTERESIS', chipdb.IOBFlag())\n for opt_name, val in _hysteresis_idx.items():\n if val == -1:\n loc = set()\n else:\n loc = get_longval(fse, ttyp, _pin_mode_longval[bel_idx],\n recode_key(val), 1)\n b_attr.options[opt_name] = loc\n\n# IOB fuzzer\ndef find_next_loc(pin, locs):\n # find the next location that has pin\n # or make a new module\n for tile, names in locs.items():\n name = tile+pin\n if name in names:\n del locs[tile]\n return name\n return None\n\ndef iob(locations):\n for iostd in iostandards:\n for ttyp, tiles in locations.items(): # for each tile of this type\n locs = tiles.copy()\n mod = codegen.Module()\n cst = codegen.Constraints()\n # get bels in this ttyp\n bels = {name[-1] for loc in tiles.values() for name in loc}\n for pin in bels: # [A, B, C, D, ...]\n for attr, attr_values in iobattrs.items(): # each IOB attribute\n # XXX remove\n if iostd == \"PCI33\" and attr == \"SINGLE_RESISTOR\":\n continue\n attr_vals = attr_values.values\n if attr_vals == None:\n attr_vals = attr_values.table[iostd]\n for attr_val in attr_vals: # each value of the attribute\n for typ, conn in iobmap.items():\n # skip illegal atributesa for mode\n if typ not in attr_values.allowed_modes:\n continue\n # find the next location that has pin\n # or make a new module\n loc = find_next_loc(pin, locs)\n if (loc == None):\n yield Fuzzer(ttyp, mod, cst, {}, iostd)\n locs = tiles.copy()\n mod = codegen.Module()\n cst = codegen.Constraints()\n loc = find_next_loc(pin, locs)\n\n # special pins\n if is_illegal(iostd, loc, attr):\n continue\n name = make_name(\"IOB\", typ)\n iob = codegen.Primitive(typ, name)\n for port in chain.from_iterable(conn.values()):\n iob.portmap[port] = name+\"_\"+port\n\n for direction, wires in conn.items():\n wnames = [name+\"_\"+w for w in wires]\n getattr(mod, direction).update(wnames)\n mod.primitives[name] = iob\n cst.ports[name] = loc\n # complex iob. connect OEN and O\n if typ == \"IOBUF\":\n iob.portmap[\"OEN\"] = name + \"_O\"\n if attr_val:\n # port attribute value\n cst.attrs[name] = {attr: attr_val}\n if iostd:\n cst.attrs.setdefault(name, {}).update({\"IO_TYPE\": iostd})\n yield Fuzzer(ttyp, mod, cst, {}, iostd)\n\n# collect all routing bits of the tile\n_route_mem = {}\ndef route_bits(db, row, col):\n mem = _route_mem.get((row, col), None)\n if mem != None:\n return mem\n\n bits = set()\n for w in db.grid[row][col].pips.values():\n for v in w.values():\n bits.update(v)\n _route_mem.setdefault((row, col), bits)\n return bits\n\ndualmode_pins = {'jtag', 'sspi', 'mspi', 'ready', 'done', 'reconfig', 'mode', 'i2c'}\ndef dualmode(ttyp):\n for pin in dualmode_pins:\n mod = codegen.Module()\n cst = codegen.Constraints()\n cfg = {pin: \"0\"}\n # modules with different ttyp can be combined, so in theory it could happen\n # that there is an IOB in the module, which claims the dual-purpose pin.\n # P&R will not be able to place it and the fuzzling result will be misleading.\n # Non-optimal: prohibit combining with anything.\n yield Fuzzer(ttyp, mod, cst, cfg, 'dual_mode_fuzzing')\n\n# read vendor .posp log\n_cst_parser = re.compile(r\"([^ ]+) (?:PLACE|CST)_R(\\d+)C(\\d+)\\[([0-3])\\]\\[([A-Z])\\]\")\n_place_parser = re.compile(r\"([^ ]+) (?:PLACE|CST)_IO([TBLR])(\\d+)\\[([A-Z])\\]\")\ndef read_posp(fname):\n with open(fname, 'r') as f:\n for line in f:\n cst = _cst_parser.match(line)\n place = _place_parser.match(line)\n if cst:\n name, row, col, cls, lut = cst.groups()\n yield \"cst\", name, int(row), int(col), int(cls), lut\n elif place:\n name, side, num, pin = place.groups()\n yield \"place\", name, side, int(num), pin\n elif line.strip() and not line.startswith('//'):\n raise Exception(line)\n\n# Read the packer vendor log to identify problem with primitives/attributes\n# returns dictionary {(primitive name, error code) : [full error text]}\n_err_parser = re.compile(\"(\\w+) +\\(([\\w\\d]+)\\).*'(inst[^\\']+)\\'.*\")\ndef read_err_log(fname):\n errs = {}\n with open(fname, 'r') as f:\n for line in f:\n res = _err_parser.match(line)\n if res:\n line_type, code, name = res.groups()\n text = res.group(0)\n if line_type in [\"Warning\", \"Error\"]:\n errs.setdefault((name, code), []).append(text)\n return errs\n\n# check if the primitive caused the warning/error\ndef primitive_caused_err(name, err_code, log):\n return (name, err_code) in log\n\n# Result of the vendor router-packer run\nPnrResult = namedtuple('PnrResult', [\n 'bitmap', 'hdr', 'ftr',\n 'constrs', # constraints\n 'config', # device config\n 'attrs', # port attributes\n 'errs' # parsed log file\n ])\n\ndef run_pnr(mod, constr, config):\n cfg = codegen.DeviceConfig({\n \"use_jtag_as_gpio\" : config.get('jtag', \"1\"),\n \"use_sspi_as_gpio\" : config.get('sspi', \"1\"),\n \"use_mspi_as_gpio\" : config.get('mspi', \"1\"),\n \"use_ready_as_gpio\" : config.get('ready', \"1\"),\n \"use_done_as_gpio\" : config.get('done', \"1\"),\n \"use_reconfign_as_gpio\" : config.get('reconfig', \"1\"),\n \"use_mode_as_gpio\" : config.get('mode', \"1\"),\n \"use_i2c_as_gpio\" : config.get('i2c', \"1\"),\n \"bit_crc_check\" : \"1\",\n \"bit_compress\" : \"0\",\n \"bit_encrypt\" : \"0\",\n \"bit_security\" : \"1\",\n \"bit_incl_bsram_init\" : \"0\",\n \"loading_rate\" : \"250/100\",\n \"spi_flash_addr\" : \"0x00FFF000\",\n \"bit_format\" : \"txt\",\n \"bg_programming\" : \"off\",\n \"secure_mode\" : \"0\"})\n\n opt = codegen.PnrOptions({\n \"gen_posp\" : \"1\",\n \"gen_io_cst\" : \"1\",\n \"gen_ibis\" : \"1\",\n \"ireg_in_iob\" : \"0\",\n \"oreg_in_iob\" : \"0\",\n \"ioreg_in_iob\" : \"0\",\n \"timing_driven\" : \"0\",\n \"cst_warn_to_error\" : \"0\"})\n #\"show_all_warn\" : \"1\",\n\n pnr = codegen.Pnr()\n pnr.device = device\n pnr.partnumber = params['partnumber']\n pnr.opt = opt\n pnr.cfg = cfg\n\n with tempfile.TemporaryDirectory() as tmpdir:\n with open(tmpdir+\"/top.v\", \"w\") as f:\n mod.write(f)\n pnr.netlist = tmpdir+\"/top.v\"\n with open(tmpdir+\"/top.cst\", \"w\") as f:\n constr.write(f)\n pnr.cst = tmpdir+\"/top.cst\"\n with open(tmpdir+\"/run.tcl\", \"w\") as f:\n pnr.write(f)\n\n subprocess.run([gowinhome + \"/IDE/bin/gw_sh\", tmpdir+\"/run.tcl\"], cwd = tmpdir)\n #print(tmpdir); input()\n try:\n return PnrResult(\n *bslib.read_bitstream(tmpdir+\"/impl/pnr/top.fs\"),\n constr,\n config, constr.attrs,\n read_err_log(tmpdir+\"/impl/pnr/top.log\"))\n except FileNotFoundError:\n print(tmpdir)\n input()\n return None\n\n\n# module + constraints + config\nDataForPnr = namedtuple('DataForPnr', ['modmap', 'cstmap', 'cfgmap'])\n\nif __name__ == \"__main__\":\n with open(f\"{gowinhome}/IDE/share/device/{device}/{device}.fse\", 'rb') as f:\n fse = fuse_h4x.readFse(f)\n\n with open(f\"{device}.json\") as f:\n dat = json.load(f)\n\n with open(f\"{gowinhome}/IDE/share/device/{device}/{device}.tm\", 'rb') as f:\n tm = tm_h4x.read_tm(f, device)\n\n db = chipdb.from_fse(fse)\n db.timing = tm\n db.packages, db.pinout, db.pin_bank = chipdb.json_pinout(device)\n\n corners = [\n (0, 0, fse['header']['grid'][61][0][0]),\n (0, db.cols-1, fse['header']['grid'][61][0][-1]),\n (db.rows-1, 0, fse['header']['grid'][61][-1][0]),\n (db.rows-1, db.cols-1, fse['header']['grid'][61][-1][-1]),\n ]\n\n locations = {}\n for row, row_dat in enumerate(fse['header']['grid'][61]):\n for col, typ in enumerate(row_dat):\n locations.setdefault(typ, []).append((row, col))\n\n pin_names = pindef.get_locs(device, params['package'], True)\n edges = {'T': fse['header']['grid'][61][0],\n 'B': fse['header']['grid'][61][-1],\n 'L': [row[0] for row in fse['header']['grid'][61]],\n 'R': [row[-1] for row in fse['header']['grid'][61]]}\n pin_locations = {}\n pin_re = re.compile(r\"IO([TBRL])(\\d+)([A-Z])\")\n for name in pin_names:\n side, num, pin = pin_re.match(name).groups()\n ttyp = edges[side][int(num)-1]\n ttyp_pins = pin_locations.setdefault(ttyp, {})\n ttyp_pins.setdefault(name[:-1], set()).add(name)\n\n # Add fuzzers here\n fuzzers = chain(\n iob(pin_locations),\n dff(locations),\n dualmode(fse['header']['grid'][61][0][0]),\n )\n\n # Only combine modules with the same IO standard\n pnr_data = {}\n for fuzzer in fuzzers:\n pnr_data.setdefault(fuzzer.iostd, DataForPnr({}, {}, {}))\n pnr_data[fuzzer.iostd].modmap.setdefault(fuzzer.ttyp, []).append(fuzzer.mod)\n pnr_data[fuzzer.iostd].cstmap.setdefault(fuzzer.ttyp, []).append(fuzzer.cst)\n pnr_data[fuzzer.iostd].cfgmap.setdefault(fuzzer.ttyp, []).append(fuzzer.cfg)\n\n modules = []\n constrs = []\n configs = []\n for data in pnr_data.values():\n modules += [reduce(lambda a, b: a+b, m, codegen.Module())\n for m in zip_longest(*data.modmap.values(), fillvalue=codegen.Module())]\n constrs += [reduce(lambda a, b: a+b, c, codegen.Constraints())\n for c in zip_longest(*data.cstmap.values(), fillvalue=codegen.Constraints())]\n configs += [reduce(lambda a, b: {**a, **b}, c, {})\n for c in zip_longest(*data.cfgmap.values(), fillvalue={})]\n\n type_re = re.compile(r\"inst\\d+_([A-Z]+)_([A-Z]+)\")\n\n pnr_empty = run_pnr(codegen.Module(), codegen.Constraints(), {})\n db.cmd_hdr = pnr_empty.hdr\n db.cmd_ftr = pnr_empty.ftr\n db.template = pnr_empty.bitmap\n\n p = Pool()\n pnr_res = p.imap_unordered(lambda param: run_pnr(*param), zip(modules, constrs, configs), 4)\n for pnr in pnr_res:\n seen = {}\n diff = pnr.bitmap ^ pnr_empty.bitmap\n bm = fuse_h4x.tile_bitmap(fse, diff)\n placement = chain(\n [(\"cst\", name, info) for name, info in pnr.constrs.cells.items()],\n [(\"place\", name, pin_re.match(info).groups()) for name, info in pnr.constrs.ports.items()]\n )\n for cst_type, name, info in placement:\n if primitive_caused_err(name, \"CT1108\", pnr.errs) or \\\n primitive_caused_err(name, \"CT1117\", pnr.errs) or \\\n primitive_caused_err(name, \"PR2016\", pnr.errs) or \\\n primitive_caused_err(name, \"PR2017\", pnr.errs) or \\\n primitive_caused_err(name, \"CT1005\", pnr.errs):\n raise Exception(f\"Placement conflict (PR201[67]):{name} or CT1108/CT1117\")\n\n bel_type, cell_type = type_re.match(name).groups()\n if cst_type == \"cst\":\n row, col, cls, lut = info\n print(name, row, col, cls, lut)\n row = row-1\n col = col-1\n elif cst_type == \"place\":\n side, num, pin = info\n row, col = tbrl2rc(fse, side, num)\n print(name, row, col, side, num, pin)\n\n typ = fse['header']['grid'][61][row][col]\n idx = (row, col, typ)\n\n # verify integrity\n if bel_type not in [\"DUMMY\", \"IOB\"]:\n if (row, col) in seen:\n oldname = seen[(row, col)]\n raise Exception(f\"Location {idx} used by {oldname} and {name}\")\n else:\n seen[(row, col)] = name\n\n tile = bm[idx]\n\n #for bitrow in tile:\n # print(*bitrow, sep='')\n\n rows, cols = np.where(tile==1)\n loc = set(zip(rows, cols))\n #print(cell_type, loc)\n\n if bel_type == \"DUMMY\":\n continue\n elif bel_type == \"DFF\":\n i = ord(lut)-ord(\"A\")\n bel = db.grid[row][col].bels.setdefault(f\"DFF{cls*2+i}\", chipdb.Bel())\n bel.modes[cell_type] = loc\n bel.portmap = {\n # D inputs hardwired to LUT F\n 'Q': f\"Q{cls*2+i}\",\n 'CLK': f\"CLK{cls}\",\n 'LSR': f\"LSR{cls}\", # set/reset\n 'CE': f\"CE{cls}\", # clock enable\n }\n elif bel_type == \"IOB\":\n bel = db.grid[row][col].bels.setdefault(f\"IOB{pin}\", chipdb.Bel())\n bel.lvcmos121518_bits = get_12_15_18_bits(fse, typ, pin)\n pnr_attrs = pnr.attrs.get(name)\n if pnr_attrs:\n # first get iostd\n iostd = pnr_attrs.get(\"IO_TYPE\")\n # default iostd and some attr\n if iostd == None:\n rec_iostd = \"\"\n rec_attr = list(pnr_attrs)[0]\n rec_val = pnr_attrs[rec_attr]\n # add flag record\n b_iostd = bel.iob_flags.setdefault(rec_iostd, {})\n b_mode = b_iostd.setdefault(cell_type, chipdb.IOBMode())\n b_attr = b_mode.flags.setdefault(rec_attr, chipdb.IOBFlag())\n b_attr.options[rec_val] = loc\n elif len(pnr_attrs) == 1:\n # only IO_TYPE\n # set mode bits\n b_iostd = bel.iob_flags.setdefault(iostd, {})\n b_mode = b_iostd.setdefault(cell_type, chipdb.IOBMode())\n if cell_type == \"IBUF\" and iostd in {'LVCMOS25', 'LVCMOS33'}:\n loc -= bel.lvcmos121518_bits\n b_mode.encode_bits = loc\n else:\n # IO_TYPE and some attr\n pnr_attrs.pop(iostd, None)\n rec_iostd = iostd\n rec_attr = list(pnr_attrs)[0]\n rec_val = pnr_attrs[rec_attr]\n # add flag record\n b_iostd = bel.iob_flags.setdefault(rec_iostd, {})\n b_mode = b_iostd.setdefault(cell_type, chipdb.IOBMode())\n b_attr = b_mode.flags.setdefault(rec_attr, chipdb.IOBFlag())\n b_attr.options[rec_val] = loc\n else:\n # set mode bits\n b_iostd = bel.iob_flags.setdefault('', {})\n b_mode = b_iostd.setdefault(cell_type, chipdb.IOBMode())\n b_mode.encode_bits = loc\n else:\n raise ValueError(f\"Type {bel_type} not handled\")\n\n # corner tiles for bank enable\n print(\"### CORNER TILES ###\")\n for idx in corners:\n row, col, typ = idx\n try:\n tile = bm[idx]\n except KeyError:\n continue\n rows, cols = np.where(tile==1)\n loc = set(zip(rows, cols))\n print(idx, loc)\n\n try:\n flag, = dualmode_pins.intersection(pnr.config)\n bel = db.grid[row][col].bels.setdefault(\"CFG\", chipdb.Bel())\n bel.flags.setdefault(flag.upper(), set()).update(loc)\n except ValueError:\n bel = db.grid[row][col].bels.setdefault(\"BANK\", chipdb.Bel())\n # in one file all iostd are same\n iostd = ''\n if pnr.attrs:\n iostd = pnr.attrs[next(iter(pnr.attrs))].get('IO_TYPE', '')\n if iostd:\n bel.bank_flags[iostd] = loc;\n else:\n bel.modes[\"ENABLE\"] = loc\n\n # Fill the IOB encodings from fse tables\n fse_pull_mode(fse, db, pin_locations)\n fse_slew_rate(fse, db, pin_locations)\n fse_hysteresis(fse, db, pin_locations)\n fse_drive(fse, db, pin_locations)\n\n chipdb.dat_portmap(dat, db)\n chipdb.dat_aliases(dat, db)\n chipdb.diff2flag(db)\n\n # must be after diff2flags in order to make clean mask for OPEN_DRAIN\n fse_open_drain(fse, db, pin_locations)\n chipdb.dff_clean(db)\n\n db.grid[0][0].bels['CFG'].flags['UNK0'] = {(3, 1)}\n db.grid[0][0].bels['CFG'].flags['UNK1'] = {(3, 2)}\n\n # set template dual-mode pins to HW mode\n for pin in dualmode_pins:\n try:\n loc, = db.grid[0][0].bels['CFG'].flags[pin.upper()]\n except KeyError:\n continue\n db.template[loc] = 0\n\n #TODO proper serialization format\n with open(f\"{device}_stage1.pickle\", 'wb') as f:\n pickle.dump(db, f)\n"
] | [
[
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
broccoliboy/LedFx | [
"1c90d5c3ddaf993a072eab92d3e373dd3b0fb45c"
] | [
"ledfx/effects/mel.py"
] | [
"\"\"\"This module implements a Mel Filter Bank.\nIn other words it is a filter bank with triangular shaped bands\narnged on the mel frequency scale.\nAn example ist shown in the following figure:\n.. plot::\n from pylab import plt\n import melbank\n f1, f2 = 1000, 8000\n melmat, (melfreq, fftfreq) = melbank.compute_melmat(6, f1, f2, num_fft_bands=4097)\n fig, ax = plt.subplots(figsize=(8, 3))\n ax.plot(fftfreq, melmat.T)\n ax.grid(True)\n ax.set_ylabel('Weight')\n ax.set_xlabel('Frequency / Hz')\n ax.set_xlim((f1, f2))\n ax2 = ax.twiny()\n ax2.xaxis.set_ticks_position('top')\n ax2.set_xlim((f1, f2))\n ax2.xaxis.set_ticks(melbank.mel_to_hertz(melfreq))\n ax2.xaxis.set_ticklabels(['{:.0f}'.format(mf) for mf in melfreq])\n ax2.set_xlabel('Frequency / mel')\n plt.tight_layout()\n fig, ax = plt.subplots()\n ax.matshow(melmat)\n plt.axis('equal')\n plt.axis('tight')\n plt.title('Mel Matrix')\n plt.tight_layout()\nFunctions\n---------\n\"\"\"\n\nfrom math import log\n\nfrom numpy import abs, arange, linspace, mean, zeros\n\n\ndef hertz_to_mel(freq):\n \"\"\"Returns mel-frequency from linear frequency input.\n Parameter\n ---------\n freq : scalar or ndarray\n Frequency value or array in Hz.\n Returns\n -------\n mel : scalar or ndarray\n Mel-frequency value or ndarray in Mel\n \"\"\"\n # return 2595.0 * log10(1 + (freq / 700.0))\n return 3340.0 * log(1 + (freq / 250.0), 9)\n\n\ndef mel_to_hertz(mel):\n \"\"\"Returns frequency from mel-frequency input.\n Parameter\n ---------\n mel : scalar or ndarray\n Mel-frequency value or ndarray in Mel\n Returns\n -------\n freq : scalar or ndarray\n Frequency value or array in Hz.\n \"\"\"\n # return 700.0 * (10**(mel / 2595.0)) - 700.0\n return 250.0 * (9 ** (mel / 3340.0)) - 250.0\n\n\ndef melfrequencies_mel_filterbank(\n num_bands, freq_min, freq_max, num_fft_bands\n):\n \"\"\"Returns centerfrequencies and band edges for a mel filter bank\n Parameters\n ----------\n num_bands : int\n Number of mel bands.\n freq_min : scalar\n Minimum frequency for the first band.\n freq_max : scalar\n Maximum frequency for the last band.\n num_fft_bands : int\n Number of fft bands.\n Returns\n -------\n center_frequencies_mel : ndarray\n lower_edges_mel : ndarray\n upper_edges_mel : ndarray\n \"\"\"\n\n mel_max = hertz_to_mel(freq_max)\n mel_min = hertz_to_mel(freq_min)\n delta_mel = abs(mel_max - mel_min) / (num_bands + 1.0)\n frequencies_mel = mel_min + delta_mel * arange(0, num_bands + 2)\n lower_edges_mel = frequencies_mel[:-2]\n upper_edges_mel = frequencies_mel[2:]\n center_frequencies_mel = frequencies_mel[1:-1]\n return center_frequencies_mel, lower_edges_mel, upper_edges_mel\n\n\ndef compute_melmat(\n num_mel_bands=12,\n freq_min=64,\n freq_max=8000,\n num_fft_bands=513,\n sample_rate=16000,\n):\n \"\"\"Returns tranformation matrix for mel spectrum.\n Parameters\n ----------\n num_mel_bands : int\n Number of mel bands. Number of rows in melmat.\n Default: 24\n freq_min : scalar\n Minimum frequency for the first band.\n Default: 64\n freq_max : scalar\n Maximum frequency for the last band.\n Default: 8000\n num_fft_bands : int\n Number of fft-frequenc bands. This ist NFFT/2+1 !\n number of columns in melmat.\n Default: 513 (this means NFFT=1024)\n sample_rate : scalar\n Sample rate for the signals that will be used.\n Default: 44100\n Returns\n -------\n melmat : ndarray\n Transformation matrix for the mel spectrum.\n Use this with fft spectra of num_fft_bands_bands length\n and multiply the spectrum with the melmat\n this will tranform your fft-spectrum\n to a mel-spectrum.\n frequencies : tuple (ndarray <num_mel_bands>, ndarray <num_fft_bands>)\n Center frequencies of the mel bands, center frequencies of fft spectrum.\n \"\"\"\n (\n center_frequencies_mel,\n lower_edges_mel,\n upper_edges_mel,\n ) = melfrequencies_mel_filterbank(\n num_mel_bands, freq_min, freq_max, num_fft_bands\n )\n\n center_frequencies_hz = mel_to_hertz(center_frequencies_mel)\n lower_edges_hz = mel_to_hertz(lower_edges_mel)\n upper_edges_hz = mel_to_hertz(upper_edges_mel)\n freqs = linspace(0.0, sample_rate / 2.0, num_fft_bands)\n melmat = zeros((num_mel_bands, num_fft_bands))\n\n for imelband, (center, lower, upper) in enumerate(\n zip(center_frequencies_hz, lower_edges_hz, upper_edges_hz)\n ):\n\n left_slope = (freqs >= lower) == (freqs <= center)\n melmat[imelband, left_slope] = (freqs[left_slope] - lower) / (\n center - lower\n )\n\n right_slope = (freqs >= center) == (freqs <= upper)\n melmat[imelband, right_slope] = (upper - freqs[right_slope]) / (\n upper - center\n )\n return (melmat, center_frequencies_hz, freqs)\n\n\ndef compute_melmat_from_range(\n lower_edges_hz,\n upper_edges_hz,\n num_fft_bands=513,\n sample_rate=16000,\n):\n\n melmat = zeros((len(lower_edges_hz), num_fft_bands))\n freqs = linspace(0.0, sample_rate / 2.0, num_fft_bands)\n center_frequencies_hz = mean([lower_edges_hz, upper_edges_hz], axis=0)\n\n for imelband, (lower, center, upper) in enumerate(\n zip(lower_edges_hz, center_frequencies_hz, upper_edges_hz)\n ):\n\n left_slope = (freqs >= lower) == (freqs <= center)\n melmat[imelband, left_slope] = (freqs[left_slope] - lower) / (\n center - lower\n )\n\n right_slope = (freqs >= center) == (freqs <= upper)\n melmat[imelband, right_slope] = (upper - freqs[right_slope]) / (\n upper - center\n )\n return (melmat, center_frequencies_hz, freqs)\n"
] | [
[
"numpy.abs",
"numpy.linspace",
"numpy.arange",
"numpy.mean",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
me2190901/Library_Inventory_Management_System | [
"d708aba96f0ff4283edb0bc784e8063aeb2f9ec3"
] | [
"Integrated.py"
] | [
"import spine_segment\nimport glob as gb\nimport text_recognize as tr\nimport pandas as pd\n# from IPython.display import display, HTML\ndef prediction(loc,debug=False):\n df=pd.read_csv(\".\\data1.csv\")\n df.drop_duplicates(subset =\"Title\",\n keep = False, inplace = True)\n if (debug):\n l=[[\"Afewwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww\",1],[\"B\",2]]\n return l\n img_segmentation=spine_segment.get_book_lines(loc)\n l=[]\n total={}\n for img_s in img_segmentation:\n title=tr.text_detection_spines(img_s,False)\n # if(len(title)>0):\n # print(\"Original : \",title) \n dic={}\n line=title.strip().split()\n for word in line:\n a=df[df['Title'].str.contains(word,case=False)][['Title']]\n\n\n for i in a[\"Title\"]:\n if i not in dic:\n dic[i]=0\n else:\n dic[i]=dic[i]+len(word)\n # print(dic)\n try:\n maxtitle = max(zip(dic.values(), dic.keys()))[1]\n b=df.index[df[\"Title\"]==maxtitle]\n b=b.tolist()\n ind=b[0]\n df.iat[ind,1]=df.iat[ind,1]+1\n if(df.iat[ind,2]-df.iat[ind,1]-df.iat[ind,3]>0):\n df.iat[ind,4]=df.iat[ind,2]-df.iat[ind,1]-df.iat[ind,3]\n else:\n df.iat[ind,4]=0\n if(maxtitle in total):\n total[maxtitle]=total[maxtitle]+1\n else:\n total[maxtitle]=1\n # l.append([maxtitle,df.iat[ind,1]])\n # print(\"Pridicted : \",maxtitle)\n except:\n # print(\"No Title Found\")\n print(\"No Title Found\")\n for key in total:\n l.append([key,total[key]])\n df.to_csv(\"data1.csv\", index=False)\n # print(\"------------------------------------\")\n return l\n\ndef search(line,number,debug=False):\n df=pd.read_csv(\".\\data1.csv\")\n df.drop_duplicates(subset =\"Title\",\n keep = False, inplace = True)\n dic={}\n line1=line\n line=list(line.strip().split())\n for word in line:\n # print(word)\n a=df[df['Title'].str.contains(word,case=False)][['Title']]\n for i in a[\"Title\"]:\n if i not in dic:\n dic[i]=0\n else:\n dic[i]=dic[i]+len(word)\n # print(dic)\n try:\n maxtitle = max(zip(dic.values(), dic.keys()))[1]\n b=df.index[df[\"Title\"]==maxtitle]\n if(len(line1)/len(maxtitle)<0.6):\n raise Exception(\"Title not detected correctly\")\n b=b.tolist()\n ind=b[0]\n df.iat[ind,1]=df.iat[ind,1]+number\n if(df.iat[ind,2]-df.iat[ind,1]-df.iat[ind,3]>0):\n df.iat[ind,4]=df.iat[ind,2]-df.iat[ind,1]-df.iat[ind,3]\n else:\n df.iat[ind,4]=0\n if(not debug):\n df.to_csv(\"data1.csv\", index=False)\n # l.append([maxtitle,df.iat[ind,1]])\n # print(\"Pridicted : \",maxtitle)\n except:\n # print(\"No Title Found\")\n # print(line1,number)\n df1 = pd.DataFrame([(line1, number)],\n columns=('Title', 'Checked')\n )\n df3 = pd.concat([df, df1], ignore_index = True)\n df3.reset_index()\n if(not debug):\n df3.to_csv(\"data1.csv\", index=False)\n \n # print(dic)\n # print(\"------------------------------------\")\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
yzhq97/SCKR | [
"601545db60eac3845e0eeaaae6b0580d4a41d949"
] | [
"modules/vgg16.py"
] | [
"import inspect\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nimport time\n\nVGG_MEAN = [103.939, 116.779, 123.68]\n\n\nclass Vgg16:\n def __init__(self, vgg16_npy_path):\n self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()\n print(\"vgg16 npy file loaded\")\n\n def build(self, input, format='rgb'):\n \"\"\"\n load variable from npy to build the VGG\n :param input: rgb or bgr image [batch, height, width, 3] values scaled [0, 255]\n :param format: rgb or bgr\n \"\"\"\n\n if format == 'rgb':\n red, green, blue = tf.split(input, 3, 3)\n bgr = tf.concat([\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ], 3)\n elif format == 'bgr':\n bgr = input\n else:\n raise Exception('format can only be rgb or bgr')\n\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n\n self.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n self.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\n self.pool3 = self.max_pool(self.conv3_3, 'pool3')\n\n self.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\n self.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\n self.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\n self.pool4 = self.max_pool(self.conv4_3, 'pool4')\n\n self.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\n self.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\n self.pool5 = self.max_pool(self.conv5_3, 'pool5')\n\n self.fc6 = self.fc_layer(self.pool5, \"fc6\")\n assert self.fc6.get_shape().as_list()[1:] == [4096]\n self.relu6 = tf.nn.relu(self.fc6)\n\n # self.fc7 = self.fc_layer(self.relu6, \"fc7\")\n # self.relu7 = tf.nn.relu(self.fc7)\n\n # self.fc8 = self.fc_layer(self.relu7, \"fc8\")\n\n # self.prob = tf.nn.softmax(self.fc8, name=\"prob\")\n\n\n def avg_pool(self, bottom, name):\n return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)\n\n def max_pool(self, bottom, name):\n return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)\n\n def conv_layer(self, bottom, name):\n with tf.variable_scope(name):\n filt = self.get_conv_filter(name)\n\n conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')\n\n conv_biases = self.get_bias(name)\n bias = tf.nn.bias_add(conv, conv_biases)\n\n relu = tf.nn.relu(bias)\n return relu\n\n def fc_layer(self, bottom, name):\n with tf.variable_scope(name):\n shape = bottom.get_shape().as_list()\n dim = 1\n for d in shape[1:]:\n dim *= d\n x = tf.reshape(bottom, [-1, dim])\n\n weights = self.get_fc_weight(name)\n biases = self.get_bias(name)\n\n # Fully connected layer. Note that the '+' operation automatically\n # broadcasts the biases.\n fc = tf.nn.bias_add(tf.matmul(x, weights), biases)\n\n return fc\n\n def get_conv_filter(self, name):\n return tf.constant(self.data_dict[name][0], name=\"filter\")\n\n def get_bias(self, name):\n return tf.constant(self.data_dict[name][1], name=\"biases\")\n\n def get_fc_weight(self, name):\n return tf.constant(self.data_dict[name][0], name=\"weights\")"
] | [
[
"tensorflow.nn.bias_add",
"tensorflow.nn.relu",
"tensorflow.matmul",
"tensorflow.constant",
"tensorflow.concat",
"tensorflow.nn.max_pool",
"tensorflow.reshape",
"tensorflow.nn.avg_pool",
"tensorflow.variable_scope",
"numpy.load",
"tensorflow.split",
"tensorflow.nn.conv2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
renwenduan/fooltrader | [
"c9ede56d6ce4f952618d14e0ec28479584ad9377"
] | [
"fooltrader/spiders/stock_kdata_spider_ths.py"
] | [
"import json\nimport os\n\nimport pandas as pd\nimport scrapy\nfrom scrapy import Request\nfrom scrapy import signals\n\nfrom fooltrader.api.quote import get_security_list\nfrom fooltrader.consts import TONGHUASHUN_KDATA_HEADER\nfrom fooltrader.contract import data_contract\nfrom fooltrader.contract.files_contract import get_kdata_path, get_trading_dates_path_ths\n\n\nclass StockKDataSpiderTHS(scrapy.Spider):\n name = \"stock_kdata_ths\"\n\n custom_settings = {\n 'DOWNLOAD_DELAY': 5,\n 'CONCURRENT_REQUESTS_PER_DOMAIN': 4,\n\n 'SPIDER_MIDDLEWARES': {\n 'fooltrader.middlewares.FoolErrorMiddleware': 1000,\n },\n 'DOWNLOADER_MIDDLEWARES': {\n 'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': None,\n 'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware': None,\n 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': None,\n 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,\n 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,\n 'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': None,\n 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': None,\n 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': None,\n 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': None,\n 'scrapy.downloadermiddlewares.stats.DownloaderStats': None,\n }\n }\n\n def start_requests(self):\n for _, item in get_security_list().iterrows():\n for fuquan in ['hfq', 'bfq']:\n data_path = get_kdata_path(item, fuquan=fuquan, source='ths')\n data_exist = os.path.isfile(data_path)\n if not data_exist or True:\n # get day k data\n if fuquan == 'hfq':\n flag = 2\n else:\n flag = 0\n url = self.get_k_data_url(item['code'], flag)\n yield Request(url=url, headers=TONGHUASHUN_KDATA_HEADER,\n meta={'path': data_path, 'item': item, 'fuquan': fuquan},\n callback=self.download_day_k_data)\n\n else:\n self.logger.info(\"{} kdata existed\".format(item['code']))\n\n def download_day_k_data(self, response):\n path = response.meta['path']\n item = response.meta['item']\n\n trading_dates = []\n price_json = []\n\n try:\n df = pd.DataFrame(columns=data_contract.KDATA_COLUMN)\n\n tmp_str = response.text\n json_str = tmp_str[tmp_str.index('{'):tmp_str.index('}') + 1]\n tmp_json = json.loads(json_str)\n\n # parse the trading dates\n dates = tmp_json['dates'].split(',')\n count = 0\n for year_dates in tmp_json['sortYear']:\n for i in range(year_dates[1]):\n trading_dates.append('{}-{}-{}'.format(year_dates[0], dates[count][0:2], dates[count][2:]))\n count += 1\n\n # parse the kdata\n tmp_price = tmp_json['price'].split(',')\n for i in range(int(len(tmp_price) / 4)):\n low_price = round(int(tmp_price[4 * i]) / 100, 2)\n open_price = round(low_price + int(tmp_price[4 * i + 1]) / 100, 2)\n high_price = round(low_price + int(tmp_price[4 * i + 2]) / 100, 2)\n close_price = round(low_price + int(tmp_price[4 * i + 3]) / 100, 2)\n\n price_json.append({\"low\": low_price,\n \"open\": open_price,\n \"high\": high_price,\n \"close\": close_price})\n\n volumns = tmp_json['volumn'].split(',')\n\n for i in range(int(tmp_json['total'])):\n df.loc[i] = [trading_dates[i], item['code'], price_json[i]['low'], price_json[i]['open'],\n price_json[i]['close'],\n price_json[i]['high'], int(volumns[i]), 0, item['id']]\n\n df.to_csv(path, index=False, )\n except Exception as e:\n self.logger.error('error when getting k data url={} error={}'.format(response.url, e))\n\n if len(trading_dates) > 0:\n try:\n with open(get_trading_dates_path_ths(item), \"w\") as f:\n json.dump(trading_dates, f)\n except Exception as e:\n self.logger.error(\n 'error when saving trading dates url={} path={} error={}'.format(response.url, path, e))\n\n @classmethod\n def from_crawler(cls, crawler, *args, **kwargs):\n spider = super(StockKDataSpiderTHS, cls).from_crawler(crawler, *args, **kwargs)\n crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)\n return spider\n\n def spider_closed(self, spider, reason):\n spider.logger.info('Spider closed: %s,%s\\n', spider.name, reason)\n\n def get_k_data_url(self, code, fuquan=0):\n return 'http://d.10jqka.com.cn/v6/line/hs_{}/0{}/all.js'.format(code, fuquan)\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
euske/introdl | [
"f6d9da71c7172952e9b5872502293dbb41eb7d93"
] | [
"lec3/plotgrad.py"
] | [
"#!/usr/bin/env python\nimport numpy as np\nfrom PIL import Image\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\nsize = 256\nls = np.linspace(-2,+2,size,False)\nw1 = ls\nw2 = ls.reshape(size,1)[::-1]\nw3 = -2.0\nb = 1.0\n# compute y.\nya = sigmoid(w1*0 + w2*0 + w3*0 + b)\nyb = sigmoid(w1*0 + w2*1 + w3*0 + b)\nyc = sigmoid(w1*1 + w2*0 + w3*1 + b)\n# compute loss.\nL = (ya-1)**2 + (yb-1)**2 + (yc-0)**2\n# scale to 0.0 - 1.0.\n(l0, l1) = (L.min(), L.max())\nL = (L-l0)/(l1-l0)\n# convert to RGB.\na = np.array([L, np.zeros_like(L), 1-L])\n# save image.\na = a.transpose(1,2,0)*255\nimg = Image.fromarray(a.astype(np.uint8), 'RGB')\nimg.save('grad1.png')\n"
] | [
[
"numpy.exp",
"numpy.zeros_like",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wptoux/finvest-tutorial | [
"73fe98dbdb65853170740cf2361b03256316ab01"
] | [
"portfolio_opt.py"
] | [
"import numpy as np\nimport scipy.optimize as sco\nimport math\n\n__all__ = [\n 'efficient_frontier'\n]\n\ndef efficient_frontier(returns):\n '''\n '''\n n = returns.shape[1]\n noa = returns.shape[0]\n \n N = 100\n qs = [10**(5.0 * t/N - 1.0) for t in range(N)]\n \n Sigma = np.cov(returns)\n RT = np.mean(returns,axis=1)\n \n cons = ({'type':'eq','fun':lambda x:np.sum(x)-1})\n bnds = tuple((0,1) for x in range(noa))\n \n rets = []\n risks = []\n weights = []\n \n def markowitz_loss(weights,q):\n wT = weights.flatten()\n w = wT.T\n loss = wT.dot(Sigma).dot(w) - q * RT.dot(w)\n return loss\n \n for q in qs:\n res = sco.minimize(markowitz_loss, noa*[1./noa,], method='SLSQP', bounds=bnds, constraints=cons, args=(q))\n rets.append(RT.dot(res.x.T))\n risks.append(math.sqrt(res.x.T.dot(Sigma).dot(res.x)))\n weights.append(res.x)\n \n rets = np.array(rets)\n risks = np.array(risks)\n return rets,risks,weights"
] | [
[
"numpy.cov",
"numpy.mean",
"scipy.optimize.minimize",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
erayon/models | [
"c5ad244e8e1ac263e8dd33533e9c2c0b53763d46"
] | [
"official/benchmark/models/resnet_cifar_main.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Runs a ResNet model on the Cifar-10 dataset.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom absl import flags\nimport tensorflow as tf\nfrom official.benchmark.models import resnet_cifar_model\nfrom official.utils.flags import core as flags_core\nfrom official.utils.logs import logger\nfrom official.utils.misc import distribution_utils\nfrom official.utils.misc import keras_utils\nfrom official.vision.image_classification import cifar_preprocessing\nfrom official.vision.image_classification import common\n\n\nLR_SCHEDULE = [ # (multiplier, epoch to start) tuples\n (0.1, 91), (0.01, 136), (0.001, 182)\n]\n\n\ndef learning_rate_schedule(current_epoch,\n current_batch,\n batches_per_epoch,\n batch_size):\n \"\"\"Handles linear scaling rule and LR decay.\n\n Scale learning rate at epoch boundaries provided in LR_SCHEDULE by the\n provided scaling factor.\n\n Args:\n current_epoch: integer, current epoch indexed from 0.\n current_batch: integer, current batch in the current epoch, indexed from 0.\n batches_per_epoch: integer, number of steps in an epoch.\n batch_size: integer, total batch sized.\n\n Returns:\n Adjusted learning rate.\n \"\"\"\n del current_batch, batches_per_epoch # not used\n initial_learning_rate = common.BASE_LEARNING_RATE * batch_size / 128\n learning_rate = initial_learning_rate\n for mult, start_epoch in LR_SCHEDULE:\n if current_epoch >= start_epoch:\n learning_rate = initial_learning_rate * mult\n else:\n break\n return learning_rate\n\n\nclass LearningRateBatchScheduler(tf.keras.callbacks.Callback):\n \"\"\"Callback to update learning rate on every batch (not epoch boundaries).\n\n N.B. Only support Keras optimizers, not TF optimizers.\n\n Attributes:\n schedule: a function that takes an epoch index and a batch index as input\n (both integer, indexed from 0) and returns a new learning rate as\n output (float).\n \"\"\"\n\n def __init__(self, schedule, batch_size, steps_per_epoch):\n super(LearningRateBatchScheduler, self).__init__()\n self.schedule = schedule\n self.steps_per_epoch = steps_per_epoch\n self.batch_size = batch_size\n self.epochs = -1\n self.prev_lr = -1\n\n def on_epoch_begin(self, epoch, logs=None):\n if not hasattr(self.model.optimizer, 'learning_rate'):\n raise ValueError('Optimizer must have a \"learning_rate\" attribute.')\n self.epochs += 1\n\n def on_batch_begin(self, batch, logs=None):\n \"\"\"Executes before step begins.\"\"\"\n lr = self.schedule(self.epochs,\n batch,\n self.steps_per_epoch,\n self.batch_size)\n if not isinstance(lr, (float, np.float32, np.float64)):\n raise ValueError('The output of the \"schedule\" function should be float.')\n if lr != self.prev_lr:\n self.model.optimizer.learning_rate = lr # lr should be a float here\n self.prev_lr = lr\n tf.compat.v1.logging.debug(\n 'Epoch %05d Batch %05d: LearningRateBatchScheduler '\n 'change learning rate to %s.', self.epochs, batch, lr)\n\n\ndef run(flags_obj):\n \"\"\"Run ResNet Cifar-10 training and eval loop using native Keras APIs.\n\n Args:\n flags_obj: An object containing parsed flag values.\n\n Raises:\n ValueError: If fp16 is passed as it is not currently supported.\n\n Returns:\n Dictionary of training and eval stats.\n \"\"\"\n keras_utils.set_session_config(\n enable_eager=flags_obj.enable_eager,\n enable_xla=flags_obj.enable_xla)\n\n # Execute flag override logic for better model performance\n if flags_obj.tf_gpu_thread_mode:\n keras_utils.set_gpu_thread_mode_and_count(\n per_gpu_thread_count=flags_obj.per_gpu_thread_count,\n gpu_thread_mode=flags_obj.tf_gpu_thread_mode,\n num_gpus=flags_obj.num_gpus,\n datasets_num_private_threads=flags_obj.datasets_num_private_threads)\n common.set_cudnn_batchnorm_mode()\n\n dtype = flags_core.get_tf_dtype(flags_obj)\n if dtype == 'fp16':\n raise ValueError('dtype fp16 is not supported in Keras. Use the default '\n 'value(fp32).')\n\n data_format = flags_obj.data_format\n if data_format is None:\n data_format = ('channels_first'\n if tf.test.is_built_with_cuda() else 'channels_last')\n tf.keras.backend.set_image_data_format(data_format)\n\n strategy = distribution_utils.get_distribution_strategy(\n distribution_strategy=flags_obj.distribution_strategy,\n num_gpus=flags_obj.num_gpus,\n all_reduce_alg=flags_obj.all_reduce_alg,\n num_packs=flags_obj.num_packs)\n\n if strategy:\n # flags_obj.enable_get_next_as_optional controls whether enabling\n # get_next_as_optional behavior in DistributedIterator. If true, last\n # partial batch can be supported.\n strategy.extended.experimental_enable_get_next_as_optional = (\n flags_obj.enable_get_next_as_optional\n )\n\n strategy_scope = distribution_utils.get_strategy_scope(strategy)\n\n if flags_obj.use_synthetic_data:\n distribution_utils.set_up_synthetic_data()\n input_fn = common.get_synth_input_fn(\n height=cifar_preprocessing.HEIGHT,\n width=cifar_preprocessing.WIDTH,\n num_channels=cifar_preprocessing.NUM_CHANNELS,\n num_classes=cifar_preprocessing.NUM_CLASSES,\n dtype=flags_core.get_tf_dtype(flags_obj),\n drop_remainder=True)\n else:\n distribution_utils.undo_set_up_synthetic_data()\n input_fn = cifar_preprocessing.input_fn\n\n train_input_dataset = input_fn(\n is_training=True,\n data_dir=flags_obj.data_dir,\n batch_size=flags_obj.batch_size,\n num_epochs=flags_obj.train_epochs,\n parse_record_fn=cifar_preprocessing.parse_record,\n datasets_num_private_threads=flags_obj.datasets_num_private_threads,\n dtype=dtype,\n # Setting drop_remainder to avoid the partial batch logic in normalization\n # layer, which triggers tf.where and leads to extra memory copy of input\n # sizes between host and GPU.\n drop_remainder=(not flags_obj.enable_get_next_as_optional))\n\n eval_input_dataset = None\n if not flags_obj.skip_eval:\n eval_input_dataset = input_fn(\n is_training=False,\n data_dir=flags_obj.data_dir,\n batch_size=flags_obj.batch_size,\n num_epochs=flags_obj.train_epochs,\n parse_record_fn=cifar_preprocessing.parse_record)\n\n steps_per_epoch = (\n cifar_preprocessing.NUM_IMAGES['train'] // flags_obj.batch_size)\n lr_schedule = 0.1\n if flags_obj.use_tensor_lr:\n initial_learning_rate = common.BASE_LEARNING_RATE * flags_obj.batch_size / 128\n lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(\n boundaries=list(p[1] * steps_per_epoch for p in LR_SCHEDULE),\n values=[initial_learning_rate] +\n list(p[0] * initial_learning_rate for p in LR_SCHEDULE))\n\n with strategy_scope:\n optimizer = common.get_optimizer(lr_schedule)\n model = resnet_cifar_model.resnet56(classes=cifar_preprocessing.NUM_CLASSES)\n\n # TODO(b/138957587): Remove when force_v2_in_keras_compile is on longer\n # a valid arg for this model. Also remove as a valid flag.\n if flags_obj.force_v2_in_keras_compile is not None:\n model.compile(\n loss='sparse_categorical_crossentropy',\n optimizer=optimizer,\n metrics=(['sparse_categorical_accuracy']\n if flags_obj.report_accuracy_metrics else None),\n run_eagerly=flags_obj.run_eagerly,\n experimental_run_tf_function=flags_obj.force_v2_in_keras_compile)\n else:\n model.compile(\n loss='sparse_categorical_crossentropy',\n optimizer=optimizer,\n metrics=(['sparse_categorical_accuracy']\n if flags_obj.report_accuracy_metrics else None),\n run_eagerly=flags_obj.run_eagerly)\n\n train_epochs = flags_obj.train_epochs\n\n callbacks = common.get_callbacks(steps_per_epoch)\n\n if not flags_obj.use_tensor_lr:\n lr_callback = LearningRateBatchScheduler(\n schedule=learning_rate_schedule,\n batch_size=flags_obj.batch_size,\n steps_per_epoch=steps_per_epoch)\n callbacks.append(lr_callback)\n\n # if mutliple epochs, ignore the train_steps flag.\n if train_epochs <= 1 and flags_obj.train_steps:\n steps_per_epoch = min(flags_obj.train_steps, steps_per_epoch)\n train_epochs = 1\n\n num_eval_steps = (cifar_preprocessing.NUM_IMAGES['validation'] //\n flags_obj.batch_size)\n\n validation_data = eval_input_dataset\n if flags_obj.skip_eval:\n if flags_obj.set_learning_phase_to_train:\n # TODO(haoyuzhang): Understand slowdown of setting learning phase when\n # not using distribution strategy.\n tf.keras.backend.set_learning_phase(1)\n num_eval_steps = None\n validation_data = None\n\n if not strategy and flags_obj.explicit_gpu_placement:\n # TODO(b/135607227): Add device scope automatically in Keras training loop\n # when not using distribition strategy.\n no_dist_strat_device = tf.device('/device:GPU:0')\n no_dist_strat_device.__enter__()\n\n history = model.fit(train_input_dataset,\n epochs=train_epochs,\n steps_per_epoch=steps_per_epoch,\n callbacks=callbacks,\n validation_steps=num_eval_steps,\n validation_data=validation_data,\n validation_freq=flags_obj.epochs_between_evals,\n verbose=2)\n eval_output = None\n if not flags_obj.skip_eval:\n eval_output = model.evaluate(eval_input_dataset,\n steps=num_eval_steps,\n verbose=2)\n\n if not strategy and flags_obj.explicit_gpu_placement:\n no_dist_strat_device.__exit__()\n\n stats = common.build_stats(history, eval_output, callbacks)\n return stats\n\n\ndef define_cifar_flags():\n common.define_keras_flags(dynamic_loss_scale=False)\n\n flags_core.set_defaults(data_dir='/tmp/cifar10_data/cifar-10-batches-bin',\n model_dir='/tmp/cifar10_model',\n epochs_between_evals=10,\n batch_size=128)\n\n\ndef main(_):\n with logger.benchmark_context(flags.FLAGS):\n return run(flags.FLAGS)\n\n\nif __name__ == '__main__':\n tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)\n define_cifar_flags()\n absl_app.run(main)\n"
] | [
[
"tensorflow.device",
"tensorflow.compat.v1.logging.debug",
"tensorflow.test.is_built_with_cuda",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.keras.backend.set_image_data_format",
"tensorflow.keras.backend.set_learning_phase"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
Radioactive-Waste/c3 | [
"947ed882f94a6eaa43990af2eb4adcf31d19bc1f"
] | [
"c3/optimizers/sensitivity.py"
] | [
"\"\"\"Object that deals with the sensitivity test.\"\"\"\n\nimport os\nimport shutil\nimport pickle\nimport itertools\nimport numpy as np\nimport tensorflow as tf\nfrom c3.optimizers.optimizer import Optimizer\nfrom c3.utils.utils import log_setup\nfrom c3.libraries.estimators import (\n g_LL_prime_combined,\n g_LL_prime,\n neg_loglkh_multinom_norm,\n)\n\n\nclass SET(Optimizer):\n \"\"\"Object that deals with the sensitivity test.\n\n Parameters\n ----------\n dir_path : str\n Filepath to save results\n fom : callable\n Figure of merit\n sampling : str\n Sampling method from the sampling library\n batch_sizes : list\n Number of points to select from each dataset\n sweep_map : list\n Identifiers to be swept\n state_labels : list\n Identifiers for the qubit subspaces\n algorithm : callable\n From the algorithm library\n options : dict\n Options to be passed to the algorithm\n same_dyn : boolean\n ?\n run_name : str\n User specified name for the run, will be used as root folder\n \"\"\"\n\n def __init__(\n self,\n dir_path,\n fom,\n estimator_list,\n sampling,\n batch_sizes,\n pmap,\n state_labels=None,\n sweep_map=None,\n sweep_bounds=None,\n algorithm=None,\n run_name=None,\n same_dyn=False,\n options={},\n ):\n \"\"\"Initiliase.\"\"\"\n super().__init__(pmap=pmap, algorithm=algorithm)\n self.fom = fom\n self.estimator_list = estimator_list\n self.sampling = sampling\n self.batch_sizes = batch_sizes\n self.state_labels = state_labels\n self.sweep_map = sweep_map\n self.opt_map = [sweep_map[0]]\n self.sweep_bounds = sweep_bounds\n self.options = options\n self.inverse = False\n self.learn_data = {}\n self.same_dyn = same_dyn\n self.__dir_path = dir_path\n self.__run_name = run_name\n\n def log_setup(self, dir_path, run_name) -> None:\n \"\"\"\n Create the folders to store data.\n\n Parameters\n ----------\n dir_path : str\n Filepath\n run_name : str\n User specified name for the run\n\n \"\"\"\n dir_path = os.path.abspath(self.__dir_path)\n run_name = self.__run_name\n if run_name is None:\n run_name = \"-\".join(\n [\n \"sensitivity\",\n self.algorithm.__name__,\n self.sampling.__name__,\n self.fom.__name__,\n ]\n )\n self.logdir = log_setup(dir_path, run_name)\n self.logname = \"sensitivity.log\"\n shutil.copy2(self.__real_model_folder, self.logdir)\n\n def read_data(self, datafiles):\n # TODO move common methods of sensitivity and c3 to super class\n \"\"\"\n Open data files and read in experiment results.\n\n Parameters\n ----------\n datafiles : list of str\n List of paths for files that contain learning data.\n \"\"\"\n self.__real_model_folder = os.path.dirname(datafiles.values()[0])\n for target, datafile in datafiles.items():\n with open(datafile, \"rb+\") as file:\n self.learn_data[target] = pickle.load(file)\n\n def select_from_data(self, batch_size):\n \"\"\"\n Select a subset of each dataset to compute the goal function on.\n\n Parameters\n ----------\n batch_size : int\n Number of points to select\n\n Returns\n -------\n list\n Indeces of the selected data points.\n \"\"\"\n learn_from = self.learn_from\n sampling = self.sampling\n indeces = sampling(learn_from, batch_size)\n if self.inverse:\n return list(set(all) - set(indeces))\n else:\n return indeces\n\n def sensitivity(self):\n \"\"\"\n Run the sensitivity analysis.\n\n \"\"\"\n self.nice_print = self.exp.print_parameters\n\n print(\"Initial parameters:\")\n print(self.exp.print_parameters())\n for ii in range(len(self.sweep_map)):\n self.dfname = \"data.dat\"\n self.opt_map = [self.sweep_map[ii]]\n self.options[\"bounds\"] = [self.sweep_bounds[ii]]\n print(f\"C3:STATUS:Sweeping {self.opt_map}: {self.sweep_bounds[ii]}\")\n self.log_setup(self.dir_path, \"_\".join(self.opt_map[0]))\n self.start_log()\n print(f\"C3:STATUS:Saving as: {os.path.abspath(self.logdir + self.logname)}\")\n x_init = self.exp.get_parameters(self.opt_map, scaled=False)\n self.init_gateset_params = self.exp.gateset.get_parameters()\n self.init_gateset_opt_map = self.exp.gateset.list_parameters()\n try:\n self.algorithm(\n x_init,\n fun=self.fct_to_min,\n fun_grad=self.fct_to_min_autograd,\n grad_lookup=self.lookup_gradient,\n options=self.options,\n )\n except KeyboardInterrupt:\n pass\n self.exp.set_parameters(x_init, self.opt_map, scaled=False)\n\n # #=== Get the resulting data ======================================\n\n # Xs=np.array(list(learner.data.keys()))\n # Ys=np.array(list(learner.data.values()))\n # Ks=np.argsort(Xs)\n # Xs=Xs[Ks]\n # Ys=Ys[Ks]\n\n def goal_run(self, current_params):\n \"\"\"\n Evaluate the figure of merit for the current model parameters.\n\n Parameters\n ----------\n val : tf.Tensor\n Current model parameters\n\n Returns\n -------\n tf.float64\n Figure of merit\n\n \"\"\"\n exp_values = []\n exp_stds = []\n sim_values = []\n exp_shots = []\n goals = []\n seq_weigths = []\n count = 0\n # TODO: seq per point is not constant. Remove.\n\n for target, data in self.learn_data.items():\n\n self.learn_from = data[\"seqs_grouped_by_param_set\"]\n self.gateset_opt_map = data[\"opt_map\"]\n indeces = self.select_from_data(self.batch_sizes[target])\n\n for ipar in indeces:\n # if count % 100 == 0:\n # print(\"count: \" + str(count))\n\n count += 1\n m = self.learn_from[ipar]\n gateset_params = m[\"params\"]\n gateset_opt_map = self.gateset_opt_map\n m_vals = m[\"results\"]\n m_stds = np.array(m[\"results_std\"])\n m_shots = m[\"shots\"]\n sequences = m[\"seqs\"]\n num_seqs = len(sequences)\n if target == \"all\":\n num_seqs = len(sequences) * 3\n\n self.pmap.set_parameters_scaled(current_params)\n self.pmap.model.update_model()\n\n self.pmap.set_parameters(gateset_params, gateset_opt_map)\n # We find the unique gates used in the sequence and compute\n # only them.\n self.exp.opt_gates = list(set(itertools.chain.from_iterable(sequences)))\n self.exp.get_gates()\n pops = self.exp.evaluate(sequences)\n sim_vals = self.exp.process(\n labels=self.state_labels[target], populations=pops\n )\n\n exp_stds.extend(m_stds)\n exp_shots.extend(m_shots)\n\n if target == \"all\":\n goal = neg_loglkh_multinom_norm(\n m_vals,\n tf.stack(sim_vals),\n tf.Variable(m_stds, dtype=tf.float64),\n tf.Variable(m_shots, dtype=tf.float64),\n )\n else:\n goal = g_LL_prime(\n m_vals,\n tf.stack(sim_vals),\n tf.Variable(m_stds, dtype=tf.float64),\n tf.Variable(m_shots, dtype=tf.float64),\n )\n goals.append(goal.numpy())\n seq_weigths.append(num_seqs)\n sim_values.extend(sim_vals)\n exp_values.extend(m_vals)\n\n with open(self.logdir + self.logname, \"a\") as logfile:\n logfile.write(\n f\"\\n Parameterset {ipar + 1}, #{count} of {len(indeces)}:\\n\"\n f\"{str(self.exp.pmap)}\\n\"\n )\n logfile.write(\n \"Sequence Simulation Experiment Std Shots\"\n \" Diff\\n\"\n )\n\n for iseq in range(len(sequences)):\n m_val = np.array(m_vals[iseq])\n m_std = np.array(m_stds[iseq])\n shots = np.array(m_shots[iseq])\n sim_val = sim_vals[iseq].numpy()\n with open(self.logdir + self.logname, \"a\") as logfile:\n for ii in range(len(sim_val)):\n logfile.write(\n f\"{iseq + 1:8} \"\n f\"{float(sim_val[ii]):8.6f} \"\n f\"{float(m_val[ii]):8.6f} \"\n f\"{float(m_std[ii]):8.6f} \"\n f\"{float(shots[0]):8} \"\n f\"{float(m_val[ii]-sim_val[ii]):8.6f}\\n\"\n )\n logfile.flush()\n\n goal = g_LL_prime_combined(goals, seq_weigths)\n\n with open(self.logdir + self.logname, \"a\") as logfile:\n logfile.write(\"\\nFinished batch with \")\n logfile.write(\"{}: {}\\n\".format(self.fom.__name__, goal))\n print(\"{}: {}\".format(self.fom.__name__, goal))\n for est in self.estimator_list:\n val = float(est(exp_values, sim_values, exp_stds, exp_shots).numpy())\n logfile.write(\"{}: {}\\n\".format(est.__name__, val))\n # print(\"{}: {}\".format(est.__name__, val))\n print(\"\")\n logfile.flush()\n\n self.optim_status[\"params\"] = [\n par.numpy().tolist() for par in self.exp.get_parameters(self.opt_map)\n ]\n self.optim_status[\"goal\"] = goal\n self.evaluation += 1\n return goal\n"
] | [
[
"tensorflow.stack",
"numpy.array",
"tensorflow.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"2.7",
"1.4",
"2.6",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.