repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
itsraina/keras | [
"5e9376b5b94b6fb445dd52dbfafbc4e95bff5e35"
] | [
"keras/feature_column/dense_features_v2.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A layer that produces a dense `Tensor` based on given `feature_columns`.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom keras.feature_column import base_feature_layer as kfc\nfrom keras.feature_column import dense_features\nfrom keras.utils import tf_contextlib\n\n# isort: off\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export(\"keras.layers.DenseFeatures\", v1=[])\nclass DenseFeatures(dense_features.DenseFeatures):\n \"\"\"A layer that produces a dense `Tensor` based on given `feature_columns`.\n\n Generally a single example in training data is described with\n FeatureColumns. At the first layer of the model, this column oriented data\n should be converted to a single `Tensor`.\n\n This layer can be called multiple times with different features.\n\n This is the V2 version of this layer that uses name_scopes to create\n variables instead of variable_scopes. But this approach currently lacks\n support for partitioned variables. In that case, use the V1 version instead.\n\n Example:\n\n ```python\n price = tf.feature_column.numeric_column('price')\n keywords_embedded = tf.feature_column.embedding_column(\n tf.feature_column.categorical_column_with_hash_bucket(\"keywords\",\n 10000),\n dimensions=16)\n columns = [price, keywords_embedded, ...]\n feature_layer = tf.keras.layers.DenseFeatures(columns)\n\n features = tf.io.parse_example(\n ..., features=tf.feature_column.make_parse_example_spec(columns))\n dense_tensor = feature_layer(features)\n for units in [128, 64, 32]:\n dense_tensor = tf.keras.layers.Dense(units, activation='relu')(\n dense_tensor)\n prediction = tf.keras.layers.Dense(1)(dense_tensor)\n ```\n \"\"\"\n\n def __init__(self, feature_columns, trainable=True, name=None, **kwargs):\n \"\"\"Creates a DenseFeatures object.\n\n Args:\n feature_columns: An iterable containing the FeatureColumns to use as\n inputs to your model. All items should be instances of classes\n derived from `DenseColumn` such as `numeric_column`,\n `embedding_column`, `bucketized_column`, `indicator_column`. If you\n have categorical features, you can wrap them with an\n `embedding_column` or `indicator_column`.\n trainable: Boolean, whether the layer's variables will be updated via\n gradient descent during training.\n name: Name to give to the DenseFeatures.\n **kwargs: Keyword arguments to construct a layer.\n\n Raises:\n ValueError: if an item in `feature_columns` is not a `DenseColumn`.\n \"\"\"\n super().__init__(\n feature_columns=feature_columns,\n trainable=trainable,\n name=name,\n **kwargs\n )\n self._state_manager = _StateManagerImplV2(self, self.trainable)\n\n def build(self, _):\n for column in self._feature_columns:\n with tf.name_scope(column.name):\n column.create_state(self._state_manager)\n # We would like to call Layer.build and not _DenseFeaturesHelper.build.\n\n super(kfc._BaseFeaturesLayer, self).build(None)\n\n\nclass _StateManagerImplV2(tf.__internal__.feature_column.StateManager):\n \"\"\"Manages the state of DenseFeatures.\"\"\"\n\n def create_variable(\n self,\n feature_column,\n name,\n shape,\n dtype=None,\n trainable=True,\n use_resource=True,\n initializer=None,\n ):\n if name in self._cols_to_vars_map[feature_column]:\n raise ValueError(\"Variable already exists.\")\n\n # We explicitly track these variables since `name` is not guaranteed to\n # be unique and disable manual tracking that the add_weight call does.\n with no_manual_dependency_tracking_scope(self._layer):\n var = self._layer.add_weight(\n name=name,\n shape=shape,\n dtype=dtype,\n initializer=initializer,\n trainable=self._trainable and trainable,\n use_resource=use_resource,\n )\n if isinstance(var, tf.__internal__.tracking.Trackable):\n self._layer._track_trackable(var, feature_column.name + \"/\" + name)\n self._cols_to_vars_map[feature_column][name] = var\n return var\n\n\n@tf_contextlib.contextmanager\ndef no_manual_dependency_tracking_scope(obj):\n \"\"\"A context that disables manual dependency tracking for the given `obj`.\n\n Sometimes library methods might track objects on their own and we might want\n to disable that and do the tracking on our own. One can then use this\n context manager to disable the tracking the library method does and do your\n own tracking.\n\n For example:\n\n class TestLayer(tf.keras.Layer):\n def build():\n with no_manual_dependency_tracking_scope(self):\n var = self.add_weight(\"name1\") # Creates a var and doesn't track it\n # We track variable with name `name2`\n self._track_trackable(\"name2\", var)\n\n Args:\n obj: A trackable object.\n\n Yields:\n a scope in which the object doesn't track dependencies manually.\n \"\"\"\n\n previous_value = getattr(obj, \"_manual_tracking\", True)\n obj._manual_tracking = False\n try:\n yield\n finally:\n obj._manual_tracking = previous_value\n"
] | [
[
"tensorflow.compat.v2.name_scope",
"tensorflow.python.util.tf_export.keras_export"
]
] |
haribharadwaj/statsmodels | [
"8675b890607fe6f116b1186dcba4c387c5e3778a"
] | [
"statsmodels/regression/feasible_gls.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n\nCreated on Tue Dec 20 20:24:20 2011\n\nAuthor: Josef Perktold\nLicense: BSD-3\n\n\"\"\"\n\nfrom statsmodels.compat.python import range\nimport numpy as np\nimport statsmodels.base.model as base\nfrom statsmodels.regression.linear_model import OLS, GLS, WLS, RegressionResults\n\n\ndef atleast_2dcols(x):\n x = np.asarray(x)\n if x.ndim == 1:\n x = x[:,None]\n return x\n\n\nclass GLSHet2(GLS):\n '''WLS with heteroscedasticity that depends on explanatory variables\n\n note: mixing GLS sigma and weights for heteroscedasticity might not make\n sense\n\n I think rewriting following the pattern of GLSAR is better\n stopping criteria: improve in GLSAR also, e.g. change in rho\n\n '''\n\n\n def __init__(self, endog, exog, exog_var, sigma=None):\n self.exog_var = atleast_2dcols(exog_var)\n super(self.__class__, self).__init__(endog, exog, sigma=sigma)\n\n\n def fit(self, lambd=1.):\n #maybe iterate\n #preliminary estimate\n res_gls = GLS(self.endog, self.exog, sigma=self.sigma).fit()\n res_resid = OLS(res_gls.resid**2, self.exog_var).fit()\n #or log-link\n #res_resid = OLS(np.log(res_gls.resid**2), self.exog_var).fit()\n #here I could use whiten and current instance instead of delegating\n #but this is easier\n #see pattern of GLSAR, calls self.initialize and self.fit\n res_wls = WLS(self.endog, self.exog, weights=1./res_resid.fittedvalues).fit()\n\n res_wls._results.results_residual_regression = res_resid\n return res_wls\n\n\nclass GLSHet(WLS):\n \"\"\"\n A regression model with an estimated heteroscedasticity.\n\n A subclass of WLS, that additionally estimates the weight matrix as a\n function of additional explanatory variables.\n\n Parameters\n ----------\n endog : array_like\n exog : array_like\n exog_var : array_like, 1d or 2d\n regressors, explanatory variables for the variance\n weights : array_like or None\n If weights are given, then they are used in the first step estimation.\n link : link function or None\n If None, then the variance is assumed to be a linear combination of\n the exog_var. If given, then ... not tested yet\n\n *extra attributes*\n\n history : dict\n contains the parameter estimates in both regression for each iteration\n\n result instance has\n\n results_residual_regression : OLS result instance\n result of heteroscedasticity estimation\n\n except for fit_iterative all methods are inherited from WLS.\n\n Notes\n -----\n GLSHet is considered to be experimental.\n\n `fit` is just standard WLS fit for fixed weights\n `fit_iterative` updates the estimate for weights, see its docstring\n\n The two alternative for handling heteroscedasticity in the data are to\n use heteroscedasticity robust standard errors or estimating the\n heteroscedasticity\n Estimating heteroscedasticity and using weighted least squares produces\n smaller confidence intervals for the estimated parameters then the\n heteroscedasticity robust standard errors if the heteroscedasticity is\n correctly specified. If the heteroscedasticity is incorrectly specified\n then the estimated covariance is inconsistent.\n\n Stock and Watson for example argue in favor of using OLS with\n heteroscedasticity robust standard errors instead of GLSHet sind we are\n seldom sure enough about the correct specification (in economics).\n\n GLSHet has asymptotically the same distribution as WLS if the true\n weights are know. In both cases the asymptotic distribution of the\n parameter estimates is the normal distribution.\n\n The assumption of the model:\n\n y = X*beta + u,\n with E(u) = 0, E(X*u)=0, var(u_i) = z_i*gamma\n or for vector of all observations Sigma = diag(Z*gamma)\n\n where\n y : endog (nobs)\n X : exog (nobs, k_vars)\n Z : exog_var (nobs, k_vars2)\n beta, gamma estimated parameters\n\n If a link is specified, then the heteroscedasticity is\n\n var(u_i) = link.inverse(z_i*gamma), or\n link(var(u_i)) = z_i*gamma\n\n for example for log-linkg\n var(u_i) = exp(z_i*gamma)\n\n\n Usage : see example ....\n\n TODO: test link option\n\n \"\"\"\n def __init__(self, endog, exog, exog_var=None, weights=None, link=None):\n self.exog_var = atleast_2dcols(exog_var)\n if weights is None:\n weights = np.ones(endog.shape)\n if link is not None:\n self.link = link\n self.linkinv = link.inverse #as defined in families.links\n else:\n self.link = lambda x: x #no transformation\n self.linkinv = lambda x: x\n\n super(self.__class__, self).__init__(endog, exog, weights=weights)\n\n def iterative_fit(self, maxiter=3):\n \"\"\"\n Perform an iterative two-step procedure to estimate a WLS model.\n\n The model is assumed to have heteroscedastic errors.\n The variance is estimated by OLS regression of the link transformed\n squared residuals on Z, i.e.::\n\n link(sigma_i) = x_i*gamma.\n\n Parameters\n ----------\n maxiter : integer, optional\n the number of iterations\n\n Notes\n -----\n maxiter=1: returns the estimated based on given weights\n maxiter=2: performs a second estimation with the updated weights,\n this is 2-step estimation\n maxiter>2: iteratively estimate and update the weights\n\n TODO: possible extension stop iteration if change in parameter\n estimates is smaller than x_tol\n\n Repeated calls to fit_iterative, will do one redundant pinv_wexog\n calculation. Calling fit_iterative(maxiter) ones does not do any\n redundant recalculations (whitening or calculating pinv_wexog).\n\n \"\"\"\n\n import collections\n self.history = collections.defaultdict(list) #not really necessary\n res_resid = None #if maxiter < 2 no updating\n for i in range(maxiter):\n #pinv_wexog is cached\n if hasattr(self, 'pinv_wexog'):\n del self.pinv_wexog\n #self.initialize()\n #print 'wls self',\n results = self.fit()\n self.history['self_params'].append(results.params)\n if not i == maxiter-1: #skip for last iteration, could break instead\n #print 'ols',\n self.results_old = results #for debugging\n #estimate heteroscedasticity\n res_resid = OLS(self.link(results.resid**2), self.exog_var).fit()\n self.history['ols_params'].append(res_resid.params)\n #update weights\n self.weights = 1./self.linkinv(res_resid.fittedvalues)\n self.weights /= self.weights.max() #not required\n self.weights[self.weights < 1e-14] = 1e-14 #clip\n #print 'in iter', i, self.weights.var() #debug, do weights change\n self.initialize()\n\n #note results is the wrapper, results._results is the results instance\n results._results.results_residual_regression = res_resid\n return results\n"
] | [
[
"numpy.ones",
"numpy.asarray"
]
] |
Westlake-AI/openmixup | [
"ea81250819e740dd823e30cb7ce382d14a3c1b91"
] | [
"openmixup/models/heads/mim_head.py"
] | [
"import torch\nimport torch.nn as nn\nfrom mmcv.runner import BaseModule\nfrom torch.nn import functional as F\nfrom mmcv.cnn.utils.weight_init import trunc_normal_init\n\nfrom ..builder import build_loss\nfrom ..registry import HEADS\nfrom .cls_head import ClsHead\nfrom openmixup.utils import print_log\n\n\[email protected]_module\nclass MAEPretrainHead(BaseModule):\n \"\"\"Pre-training head for MAE.\n\n Args:\n norm_pix_loss (bool): Whether or not normalize target.\n Defaults to False.\n patch_size (int): Patch size. Defaults to 16.\n \"\"\"\n\n def __init__(self, norm_pix=False, patch_size=16):\n super(MAEPretrainHead, self).__init__()\n self.norm_pix = norm_pix\n self.patch_size = patch_size\n\n def patchify(self, imgs):\n\n p = self.patch_size\n assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0\n\n h = w = imgs.shape[2] // p\n x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p))\n x = torch.einsum('nchpwq->nhwpqc', x)\n x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * 3))\n return x\n\n def forward(self, x, x_rec, mask):\n losses = dict()\n target = self.patchify(x)\n if self.norm_pix:\n mean = target.mean(dim=-1, keepdim=True)\n var = target.var(dim=-1, keepdim=True)\n target = (target - mean) / (var + 1.e-6)**.5\n\n loss = (x_rec - target)**2\n loss = loss.mean(dim=-1)\n\n loss = (loss * mask).sum() / mask.sum()\n losses['loss'] = loss\n return losses\n\n\[email protected]_module()\nclass MAEFinetuneHead(ClsHead):\n \"\"\"Fine-tuning head for MAE.\n\n Args:\n embed_dim (int): The dim of the feature before the classifier head.\n num_classes (int): The total classes. Defaults to 1000.\n \"\"\"\n\n def __init__(self, **kwargs):\n super(MAEFinetuneHead, self).__init__(**kwargs)\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Linear):\n trunc_normal_init(m, std=2e-5, bias=0)\n\n def forward(self, x):\n \"\"\"\"Get the logits.\"\"\"\n assert isinstance(x, (tuple, list)) and len(x) == 1\n x = x[0]\n return [self.fc(x)]\n\n\[email protected]_module()\nclass MAELinprobeHead(ClsHead):\n \"\"\"Linear probing head for MAE.\n\n Args:\n embed_dim (int): The dim of the feature before the classifier head.\n num_classes (int): The total classes. Defaults to 1000.\n \"\"\"\n\n def __init__(self, in_channels=786, **kwargs):\n super(MAELinprobeHead, self).__init__(in_channels=in_channels, **kwargs)\n self.bn = nn.BatchNorm1d(in_channels, affine=False, eps=1e-6)\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Linear):\n trunc_normal_init(m, std=0.01, bias=0)\n\n def forward(self, x):\n \"\"\"\"Get the logits.\"\"\"\n assert isinstance(x, (tuple, list)) and len(x) == 1\n x = self.bn(x[0])\n return [self.fc(x)]\n\n\[email protected]_module\nclass SimMIMHead(BaseModule):\n \"\"\"Pretrain Head for SimMIM.\n\n Args:\n encoder_in_channels (int): Number of input channels for encoder.\n \"\"\"\n\n def __init__(self, encoder_in_channels=3):\n super(SimMIMHead, self).__init__()\n self.encoder_in_channels = encoder_in_channels\n\n def forward(self, x, x_rec, mask):\n scale_h, scale_w = x.size(2) / mask.size(1), x.size(3) / mask.size(2)\n if scale_h > 1:\n mask = mask.repeat_interleave(int(scale_h), 1).repeat_interleave(\n int(scale_w), 2).unsqueeze(1).contiguous()\n else:\n mask = F.interpolate(mask.type_as(x).unsqueeze(1),\n scale_factor=(scale_h, scale_w), mode=\"nearest\")\n \n loss_rec = F.l1_loss(x_rec, x, reduction='none')\n loss = (loss_rec * mask).sum() / (mask.sum() +\n 1e-5) / self.encoder_in_channels\n losses = dict()\n losses['loss'] = loss\n\n return losses\n\n\[email protected]_module\nclass MIMHead(BaseModule):\n \"\"\"Head for A2MIM training.\n\n Args:\n loss (dict): Config of regression loss.\n encoder_in_channels (int): Number of input channels for encoder.\n unmask_weight (float): Loss weight for unmasked patches.\n fft_weight (float): Loss weight for the fft prediction loss. Default to 0.\n fft_reweight (bool): Whether to use the fft reweight loss. Default to False.\n fft_focal (bool): Whether to adopt the focal fft loss. Default to False.\n fft_unmask_replace (str): Mode to replace (detach) unmask patches for the fft\n loss, in {None, 'target', 'prediction', 'mean', 'mixed',}.\n fft_unmask_weight (float): Loss weight to caculate the fft loss on unmask\n tokens. Default to 0.\n \"\"\"\n\n def __init__(self,\n loss=dict(\n type='RegressionLoss', loss_weight=1.0, mode=\"l1_loss\"),\n encoder_in_channels=3,\n unmask_weight=0,\n fft_weight=0,\n fft_reweight=False,\n fft_focal=False,\n fft_unmask_replace=None,\n fft_unmask_weight=0,\n **kwargs,\n ):\n super(MIMHead, self).__init__()\n self.encoder_in_channels = encoder_in_channels\n self.unmask_weight = unmask_weight\n self.fft_weight = fft_weight\n self.fft_reweight = fft_reweight\n self.fft_focal = fft_focal\n self.fft_unmask_weight = fft_unmask_weight\n self.fft_unmask_replace = fft_unmask_replace\n assert fft_unmask_replace in [None, 'target', 'prediction', 'mean', 'mixed',]\n assert 0 <= unmask_weight <= 1 and 0 <= fft_unmask_weight <= 1\n if self.unmask_weight < 1:\n if fft_unmask_replace is None and fft_weight > 0:\n self.fft_unmask_replace = 'target'\n print_log(\"When using the fft loss, `fft_unmask_replace` should \" + \\\n \"not be None. Reset as `fft_unmask_replace='target'`.\")\n \n # spatial loss\n assert loss is None or isinstance(loss, dict)\n if loss is None:\n loss = dict(\n type='RegressionLoss', loss_weight=1.0, mode=\"l1_loss\")\n self.criterion = build_loss(loss)\n # fft loss\n if fft_focal:\n fft_loss = dict(\n type='FocalFrequencyLoss', loss_weight=1.0, alpha=1.0,\n ave_spectrum=True, log_matrix=True, batch_matrix=True)\n else:\n fft_loss = loss\n if loss[\"mode\"] not in [\"l1_loss\", \"mse_loss\", \"focal_l1_loss\", \"focal_mse_loss\",]:\n fft_loss['mode'] = \"l1_loss\"\n self.fft_loss = build_loss(fft_loss)\n\n def forward(self, x, x_rec, mask):\n # upsampling mask\n scale_h, scale_w = x.size(2) / mask.size(1), x.size(3) / mask.size(2)\n if scale_h > 1:\n mask = mask.repeat_interleave(int(scale_h), 1).repeat_interleave(\n int(scale_w), 2).unsqueeze(1).contiguous()\n else:\n mask = F.interpolate(mask.type_as(x).unsqueeze(1),\n scale_factor=(scale_h, scale_w), mode=\"nearest\")\n \n # spatial loss\n if self.unmask_weight > 0.:\n # reweight unmasked patches\n mask_s = mask.clone()\n mask_s = mask_s + (1. - mask_s) * self.unmask_weight\n else:\n mask_s = mask\n loss_rec = self.criterion(x_rec, target=x, reduction_override='none')\n loss_rec = (loss_rec * mask_s).sum() / (mask_s.sum() + 1e-5) / self.encoder_in_channels\n \n # fourier domain loss\n if self.fft_weight > 0:\n # replace unmask patches (with detach)\n x_replace = None\n if self.fft_unmask_replace is not None:\n if self.fft_unmask_replace == 'target':\n x_replace = x.clone()\n elif self.fft_unmask_replace == 'prediction':\n x_replace = x_rec.clone().detach()\n elif self.fft_unmask_replace == 'mean':\n x_replace = x.mean(dim=[2, 3], keepdim=True).expand(x.size())\n elif self.fft_unmask_replace == 'mixed':\n x_replace = 0.5 * x_rec.clone().detach() + 0.5 * x.clone()\n if self.fft_unmask_weight < 1:\n mask_f = mask.clone()\n mask_f = mask_f + (1. - mask_f) * self.fft_unmask_weight\n x_rec = (x_rec * mask_f) + (x_replace * (1. - mask_f)) # replace unmask tokens\n \n # apply fft loss\n if self.fft_focal:\n loss_fft = self.fft_loss(x_rec, x)\n else:\n f_x = torch.fft.fftn(x, dim=(2, 3), norm='ortho')\n f_x_rec = torch.fft.fftn(x_rec, dim=(2, 3), norm='ortho')\n if self.fft_reweight:\n loss_fft = self.fft_loss(f_x_rec, target=f_x, reduction_override='none')\n fft_weight = loss_fft.clone().detach()\n loss_fft = (fft_weight * loss_fft).mean()\n else:\n loss_fft = self.fft_loss(f_x_rec, target=f_x, reduction_override='mean')\n loss_rec += self.fft_weight * loss_fft\n \n losses = dict()\n losses['loss'] = loss_rec\n \n return losses\n"
] | [
[
"torch.einsum",
"torch.fft.fftn",
"torch.nn.functional.l1_loss",
"torch.nn.BatchNorm1d"
]
] |
fabiansinz/locker | [
"9ca397d0a9aa747552bc43188b07056b87c6e9f0"
] | [
"scripts/fig3_locking_across_frequencies.py"
] | [
"import matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib.collections import PolyCollection\nfrom numpy.fft import fft, fftfreq, fftshift\nfrom locker import mkdir\nfrom locker.analysis import *\nfrom locker.data import *\nfrom scripts.config import params as plot_params, FormatedFigure\n\n\ndef generate_filename(cell, contrast):\n dir = 'figures/figure_locking_across_frequencies/%s/' % (cell['cell_type'],)\n mkdir(dir)\n return dir + '%s_contrast%.2f.pdf' % (cell['cell_id'], contrast)\n\n\ndef gauss(t, m, v):\n return np.exp(-(t - m) ** 2 / 2 / v)\n\n\nclass FigureMechanisms(FormatedFigure):\n def prepare(self):\n sns.set_context('paper')\n sns.set_style('ticks')\n with plt.rc_context(plot_params):\n self.fig = plt.figure(figsize=(7, 5), dpi=400)\n gs = plt.GridSpec(3, 4)\n self.ax = {}\n self.ax['violin'] = self.fig.add_subplot(gs[:3, 3])\n self.ax['spectrum'] = self.fig.add_subplot(gs[:3, :3])\n\n self.gs = gs\n\n @staticmethod\n def format_spectrum(ax):\n ax.set_xlim((0, 1500))\n ax.set_xticks(np.linspace(0, 1500, 7))\n ax.legend(bbox_to_anchor=(1.05, 1), bbox_transform=ax.transAxes, ncol=3, frameon=False)\n sns.despine(ax=ax, left=True, trim=True, offset=0)\n ax.set_yticks([])\n ax.set_ylim((-.5, 9.5))\n ax.set_xlabel('frequency [Hz]')\n ax.text(-0.01, 0.99, 'A', transform=ax.transAxes, fontweight='bold')\n\n @staticmethod\n def format_violin(ax):\n ax.set_xlim((0, 2 * np.pi))\n ax.set_xticks(np.linspace(0, 2 * np.pi, 5))\n ax.set_xticklabels([r'$0$', r'$\\frac{\\pi}{2}$', r'$\\pi$', r'$\\frac{3\\pi}{4}$', r'$2\\pi$'])\n ax.set_ylabel(r'$\\Delta f$ [Hz]')\n ax.set_xlabel('phase')\n for art in ax.get_children():\n if isinstance(art, PolyCollection):\n art.set_edgecolor(None)\n leg = ax.legend(ncol=1, title='PSTH per cycle of', bbox_to_anchor=(1, 0.97), frameon=False)\n\n plt.setp(leg.get_title(), fontsize=leg.get_texts()[0].get_fontsize())\n ax.text(-0.15, 1.01, 'B', transform=ax.transAxes, fontweight='bold', va='top', ha='right')\n sns.despine(ax=ax, trim=True, offset=0)\n\n\n def format_figure(self):\n self.ax['violin'].set_ylim([e / .8 for e in self.ax['spectrum'].get_ylim()])\n for a in self.ax.values():\n a.tick_params(length=3, width=1)\n a.spines['bottom'].set_linewidth(1)\n a.spines['left'].set_linewidth(1)\n\n self.gs.tight_layout(self.fig)\n\n\nif __name__ == \"__main__\":\n f_max = 2000 # Hz\n N = 10\n delta_f = 200\n frequency_restriction = '(delta_f > -319) or (delta_f < -381)'\n runs = Runs()\n for cell in (Cells() & dict(cell_type='p-unit', cell_id=\"2014-12-03-aj\")).fetch(as_dict=True):\n # for cell in (Cells() & dict(cell_type='p-unit')).fetch.as_dict:\n\n unit = cell['cell_type']\n print('Processing', cell['cell_id'])\n\n # for contrast in [5, 10, 20]:\n for contrast in [20]:\n print(\"contrast: %.2f%%\" % (contrast,))\n\n target_trials = SecondOrderSpikeSpectra() * runs & cell & \\\n dict(contrast=contrast, am=0, n_harmonics=0) & frequency_restriction\n if target_trials:\n with FigureMechanisms(filename=generate_filename(cell, contrast=contrast)) as (fig, ax):\n\n # --- plot spectra\n y = [0]\n stim_freq, eod_freq, deltaf_freq = [], [], []\n done = []\n for i, spec in enumerate(sorted(target_trials.fetch(as_dict=True), key=lambda x: x['delta_f'])):\n if spec['delta_f'] in done:\n continue\n else:\n done.append(spec['delta_f'])\n print(u\"\\t\\t\\u0394 f=%.2f\" % spec['delta_f'])\n\n f, v = spec['frequencies'], spec['vector_strengths']\n idx = (f >= 0) & (f <= f_max) & ~np.isnan(v)\n ax['spectrum'].fill_between(f[idx], y[-1] + 0 * f[idx], y[-1] + v[idx], lw=0,\n color='k')\n if i == 0:\n ax['spectrum'].plot([20, 20], [8., 8.5], '-', color='k', lw=2,\n solid_capstyle='butt')\n ax['spectrum'].text(40, 8.15, '0.5 vector strength', fontsize=6)\n y.append(y[-1] + .8)\n stim_freq.append(spec['eod'] + spec['delta_f'])\n deltaf_freq.append(spec['delta_f'])\n eod_freq.append(spec['eod'])\n\n ax['spectrum'].plot(eod_freq, y[:-1], '-', alpha=.25, zorder=-10, lw=4, color=colordict['eod'],\n label='EODf')\n ax['spectrum'].plot(stim_freq, y[:-1], '-', alpha=.25, zorder=-10, lw=4,\n color=colordict['stimulus'],\n label='stimulus')\n ax['spectrum'].plot(np.abs(deltaf_freq), y[:-1], '-', alpha=.25, zorder=-10, lw=4,\n color=colordict['delta_f'],\n label=r'$|\\Delta f|$')\n\n # --- plot locking\n PhaseLockingHistogram().violin_plot(ax['violin'], restrictions=target_trials.proj(),\n palette=[colordict['eod'], colordict['stimulus']])\n ax['violin'].legend().set_visible(False)\n\n"
] | [
[
"matplotlib.use"
]
] |
thsis/NIS18 | [
"1f2a7be1ab209fa7c0a25cb8eace744336b07c1f"
] | [
"tests/tests_helpers.py"
] | [
"import numpy as np\nfrom algorithms import helpers\n\n\ndef test_QR(Ntests):\n passed = 0\n critical = 0\n for _ in range(Ntests):\n try:\n n = np.random.randint(2, 11)\n X = np.random.uniform(low=0.0,\n high=100.0,\n size=(n, n))\n Q, R = helpers.qr_factorize(X)\n assert all(np.isclose(Q.dot(R), X).flatten())\n passed += 1\n except AssertionError:\n print(\"AssertionError with:\")\n print(X)\n continue\n except Exception:\n print(\"Other Error with:\")\n print(X)\n critical += 1\n\n print(\"Test Results:\")\n print(\"Passed {} of {} Tests.\".format(passed, Ntests))\n print(\"Failed {} tests.\".format(Ntests-passed-critical))\n print(\"{} tests failed critically\".format(critical))\n if passed == Ntests:\n return True\n else:\n return False\n\n\nassert test_QR(1000)\n"
] | [
[
"numpy.random.uniform",
"numpy.random.randint"
]
] |
starasteh/DeepLearning_from_scratch | [
"6ed4685e4da57ad5ea51edf84010f2cc9725a2ba"
] | [
"Layers/LSTM.py"
] | [
"'''\nCreated on January 2020.\n\n@author: Soroosh Tayebi Arasteh <[email protected]>\nhttps://github.com/tayebiarasteh/\n'''\n\nfrom Layers.Base import *\nimport numpy as np\nimport pdb\nfrom Layers import Sigmoid, FullyConnected, TanH\nimport copy\n\n\nclass LSTM(base_layer):\n def __init__(self, input_size, hidden_size, output_size):\n '''\n :input_size: denotes the dimension of the input vector\n :hidden_size: denotes the dimension of the hidden state.\n '''\n super().__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.hidden_state = np.zeros((self.hidden_size))\n self.cell_state = np.zeros((self.hidden_size))\n\n # Sets the boolean state representing whether the RNN\n # regards subsequent sequences as a belonging to the same long sequence.\n self._memorize = False\n\n self._optimizer = None\n self._gradient_weights = 0\n\n # The weights are defined as the weights which are involved in calculating the\n # hidden state as a stacked tensor. E.g. if the hidden state is computed with\n # a single Fully Connected layer, which receives a stack of the hidden state\n # and the input tensor, the weights of this particular Fully Connected Layer,\n # are the weights considered to be weights for the whole class.\n self._weights = None\n\n self.sigmoid1 = Sigmoid.Sigmoid()\n self.sigmoid2 = Sigmoid.Sigmoid()\n self.sigmoid3 = Sigmoid.Sigmoid()\n self.sigmoid4 = Sigmoid.Sigmoid()\n self.tanh1 = TanH.TanH()\n self.tanh2 = TanH.TanH()\n self.fully_middle = FullyConnected.FullyConnected(input_size=input_size + hidden_size ,\n output_size=4 * hidden_size)\n self.fully_out = FullyConnected.FullyConnected(input_size=hidden_size, output_size=output_size)\n\n\n def forward(self, input_tensor):\n output_tensor = np.zeros((input_tensor.shape[0], self.output_size))\n\n # activations and inputs for the backward\n self.o = []\n self.i = []\n self.f = []\n self.C_tilda = []\n self.cell_state_b = []\n self.hidden_state_b = []\n self.tanh2_out = []\n self.fully_middle_input = []\n\n if self._memorize == False:\n self.hidden_state = np.zeros((self.hidden_size))\n self.cell_state = np.zeros((self.hidden_size))\n\n self.cell_state_b.append(self.cell_state)\n self.hidden_state_b.append(np.zeros((self.hidden_size + 1)))\n\n # giving inputs sequentially\n for t, batch in enumerate(input_tensor):\n # Concatenation of input and previous hidden state\n X_tilda = np.concatenate([self.hidden_state, batch])\n\n # first fully connected layer\n fully_middle_out = self.fully_middle.forward(X_tilda)\n self.fully_middle_input.append((self.fully_middle.input_tensor))\n\n '''deconcatenating to 4 vectors'''\n # Calculate forget gate\n f = self.sigmoid1.forward(fully_middle_out[:fully_middle_out.shape[0]//4])\n self.f.append(f)\n\n # Calculate input gate\n i = self.sigmoid2.forward(fully_middle_out[fully_middle_out.shape[0]//4:fully_middle_out.shape[0]//2])\n self.i.append(i)\n\n # Calculate candidate\n C_tilda = self.tanh1.forward(fully_middle_out[fully_middle_out.shape[0]//2: 3*fully_middle_out.shape[0]//4])\n self.C_tilda.append(C_tilda)\n\n # Calculate memory state\n self.cell_state = f * self.cell_state + i * C_tilda\n self.cell_state_b.append(self.cell_state)\n\n # Calculate output gate\n o = self.sigmoid3.forward(fully_middle_out[3*fully_middle_out.shape[0]//4:])\n self.o.append(o)\n\n # tanh2 output\n tanh2_out = self.tanh2.forward(self.cell_state)\n self.tanh2_out.append(tanh2_out)\n\n # Calculate hidden state\n self.hidden_state = o * tanh2_out\n\n # Calculate logits\n y = self.fully_out.forward(self.hidden_state)\n self.hidden_state_b.append(self.fully_out.input_tensor)\n y = self.sigmoid4.forward(y)\n output_tensor[t] = y\n\n self.output_tensor = output_tensor\n return output_tensor\n\n\n\n def backward(self, error_tensor):\n gradient_input = np.zeros((error_tensor.shape[0], self.input_size))\n\n # initializing the hidden and cell state gradients\n gradient_hidden = np.zeros((error_tensor.shape[0] + 1, self.hidden_size))\n gradient_cell = np.zeros((error_tensor.shape[0] + 1, self.hidden_size))\n gradient_weights_out = 0\n gradient_weights_middle = 0\n\n # giving inputs sequentially\n for t in reversed(range(len(error_tensor))):\n\n # gradient of output w.r.t input\n self.sigmoid4.activation = self.output_tensor[t]\n gradient_out_wrt_in = self.sigmoid4.backward(np.copy(error_tensor)[t])\n self.fully_out.input_tensor = self.hidden_state_b[t]\n gradient_out_wrt_in = self.fully_out.backward(gradient_out_wrt_in)\n gradient_weights_out += self.fully_out.gradient_weights\n\n # gradient summing\n out_hidden = gradient_hidden[t] + gradient_out_wrt_in\n\n # gradient output gate\n o_gradient = np.copy(out_hidden) * self.tanh2_out[t]\n self.sigmoid3.activation = self.o[t]\n o_gradient = self.sigmoid3.backward(o_gradient)\n\n # gradient tanh2\n gradient_out_wrt_in_cell = np.copy(out_hidden) * self.o[t]\n self.tanh2.activation = self.tanh2_out[t]\n gradient_out_wrt_in_cell = self.tanh2.backward(gradient_out_wrt_in_cell)\n\n # gradient summing\n out_cell = gradient_out_wrt_in_cell + gradient_cell[t + 1]\n\n '''gradient of the summation'''\n # gradient candidate\n C_tilda_gradient = np.copy(out_cell) * self.i[t]\n self.tanh1.activation = self.C_tilda[t]\n C_tilda_gradient = self.tanh1.backward(C_tilda_gradient)\n\n # gradient input gate\n i_gradient = np.copy(out_cell) * self.C_tilda[t]\n self.sigmoid2.activation = self.i[t]\n i_gradient = self.sigmoid2.backward(i_gradient)\n\n # gradient cell\n gradient_cell[t] = np.copy(out_cell) * self.f[t]\n\n # gradient forget gate\n f_gradient = np.copy(out_cell) * self.cell_state_b[t]\n self.sigmoid1.activation = self.f[t]\n f_gradient = self.sigmoid1.backward(f_gradient)\n\n # concatenation for the fully connected\n self.fully_middle.input_tensor = self.fully_middle_input[t]\n y = self.fully_middle.backward(np.concatenate([f_gradient, i_gradient, C_tilda_gradient, o_gradient]))\n gradient_weights_middle += self.fully_middle.gradient_weights\n\n gradient_hidden[t - 1] = y[:self.hidden_size]\n gradient_input[t] = y[self.hidden_size:]\n\n if self._optimizer:\n self.fully_out.weights = self._optimizer2.calculate_update(self.fully_out.weights, gradient_weights_out)\n self.fully_middle.weights = self._optimizer.calculate_update(self.fully_middle.weights, gradient_weights_middle)\n\n self.final_gradient_weights = gradient_weights_middle\n return gradient_input\n\n\n def initialize(self, weights_initializer, bias_initializer):\n self.fully_middle.initialize(weights_initializer, bias_initializer)\n self.fully_out.initialize(weights_initializer, bias_initializer)\n\n\n def calculate_regularization_loss(self, layer):\n r_loss = 0\n if hasattr(layer, 'optimizer'):\n if layer.optimizer:\n if layer.optimizer.regularizer:\n r_loss += layer.optimizer.regularizer.norm(layer.weights)\n return r_loss\n\n\n\n '''Properties'''\n\n @property\n def memorize(self):\n return self._memorize\n @memorize.setter\n def memorize(self, value):\n self._memorize = value\n\n @property\n def gradient_weights(self):\n return self.final_gradient_weights\n @gradient_weights.setter\n def gradient_weights(self, value):\n self.fully_middle.gradient_weights = value\n @gradient_weights.deleter\n def gradient_weights(self):\n del self.fully_middle.gradient_weights\n\n @property\n def weights(self):\n return self.fully_middle.weights\n @weights.setter\n def weights(self, value):\n self.fully_middle.weights = value\n @weights.deleter\n def weights(self):\n del self.fully_middle.weights\n\n @property\n def optimizer(self):\n return self._optimizer\n @optimizer.setter\n def optimizer(self, value):\n self._optimizer = value\n self._optimizer2 = copy.deepcopy(self._optimizer)\n @optimizer.deleter\n def optimizer(self):\n del self._optimizer"
] | [
[
"numpy.concatenate",
"numpy.zeros",
"numpy.copy"
]
] |
LarsChrWiik/Comparing-Machine-Learning-Models | [
"050b1bdb40c1d2e9c15f927e9eb257b4b7aaacbe"
] | [
"main.py"
] | [
"\r\nfrom scipy.io import arff\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.utils import shuffle\r\nfrom ModelScorer import ModelScorer\r\nimport pandas as pd\r\nfrom Plotter import *\r\nimport warnings\r\n#warnings.simplefilter(action='ignore', category=FutureWarning)\r\nwarnings.filterwarnings(\"ignore\")\r\npd.set_option('display.expand_frame_repr', False)\r\n\r\n# Machine Learning Classifiers.\r\n\r\nfrom sklearn.dummy import DummyClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.naive_bayes import MultinomialNB, BernoulliNB\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.svm import SVC, LinearSVC\r\nfrom sklearn.svm import OneClassSVM\r\nfrom xgboost import XGBClassifier\r\n\r\n\r\nY_col = 'C'\r\n\r\n\r\ndef read_arff(name):\r\n data = arff.loadarff(name)\r\n df = pd.DataFrame(data[0])\r\n\r\n # Convert target strings to bits.\r\n df[Y_col] = df[Y_col].map(lambda x: 1 if str(x)[2:-1]=='True' else 0)\r\n return df\r\n\r\n\r\ndef score_models():\r\n df = read_arff('dataset.arff')\r\n\r\n # Normalize.\r\n df = df.apply(lambda x: (x - x.min()) /(x.max() - x.min()), axis=0)\r\n\r\n # Unsupervised Learning.\r\n X = df.drop(Y_col, axis=1)\r\n ocsvm = OneClassSVM()\r\n ocsvm.fit(X)\r\n df['Category'] = ocsvm.predict(X)\r\n\r\n # Model Scorer.\r\n scores = []\r\n model_scorer = ModelScorer(df=df, Y_col=Y_col)\r\n scores.append(model_scorer.score_model(clf=DummyClassifier()))\r\n scores.append(model_scorer.score_model(clf=DecisionTreeClassifier()))\r\n scores.append(model_scorer.score_model(clf=RandomForestClassifier(n_estimators=100)))\r\n scores.append(model_scorer.score_model(clf=GradientBoostingClassifier(n_estimators=100)))\r\n scores.append(model_scorer.score_model(clf=XGBClassifier(n_estimators=100)))\r\n scores.append(model_scorer.score_model(clf=SGDClassifier()))\r\n scores.append(model_scorer.score_model(clf=LogisticRegression()))\r\n scores.append(model_scorer.score_model(clf=GaussianNB()))\r\n scores.append(model_scorer.score_model(clf=KNeighborsClassifier()))\r\n scores.append(model_scorer.score_model(clf=BernoulliNB()))\r\n scores.append(model_scorer.score_model(clf=SVC(kernel='linear', degree=5)))\r\n scores.append(model_scorer.score_model(clf = MLPClassifier()))\r\n scores.append(model_scorer.score_model(\r\n clf = MLPClassifier(\r\n activation = 'tanh',\r\n solver = 'lbfgs',\r\n hidden_layer_sizes = 100,\r\n learning_rate_init = 0.001,\r\n max_iter = 100000\r\n ),\r\n name='Tuned MLPClassifier')\r\n )\r\n\r\n df_result = pd.concat(scores).reset_index(drop=True)\r\n df_result = df_result.sort_values([\"accuracy\"], ascending=False)\r\n print(df_result)\r\n\r\n\r\ndef show_feature_importances():\r\n df = read_arff('dataset.arff')\r\n\r\n # Normalize.\r\n df = df.apply(lambda x: (x - x.min()) /(x.max() - x.min()), axis=0)\r\n\r\n X = df.drop(Y_col, axis=1)\r\n Y = df[Y_col]\r\n plot_feature_importance(X, Y)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n #score_models()\r\n show_feature_importances()\r\n"
] | [
[
"sklearn.svm.SVC",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.ensemble.GradientBoostingClassifier",
"sklearn.linear_model.SGDClassifier",
"pandas.DataFrame",
"pandas.set_option",
"sklearn.naive_bayes.BernoulliNB",
"sklearn.neural_network.MLPClassifier",
"scipy.io.arff.loadarff",
"pandas.concat",
"sklearn.svm.OneClassSVM",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.linear_model.LogisticRegression",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.dummy.DummyClassifier",
"sklearn.naive_bayes.GaussianNB"
]
] |
rs992214/keanu | [
"c75b2a00571a0da93c6b1d5e9f0cbe09aebdde4d"
] | [
"keanu-python/keanu/infer_type.py"
] | [
"from typing import Callable, Dict, Any, Union\n\nimport numpy as np\n\nfrom keanu.vartypes import (numpy_types, tensor_arg_types, runtime_numpy_types, runtime_pandas_types,\n runtime_primitive_types, runtime_bool_types, runtime_int_types, runtime_float_types,\n primitive_types)\nfrom keanu.vertex.base import Vertex\n\n\ndef infer_type_and_execute(value: tensor_arg_types, actions: Dict[type, Callable]) -> Any:\n return actions[get_type_of_value(value)](value)\n\n\ndef get_type_of_value(t: Union[tensor_arg_types, Vertex]) -> type:\n if isinstance(t, Vertex):\n return get_type_of_value(t.get_value())\n if isinstance(t, runtime_numpy_types):\n return __infer_type_from_ndarray(t)\n elif isinstance(t, runtime_pandas_types):\n return __infer_type_from_ndarray(t.values)\n elif isinstance(t, runtime_primitive_types):\n return __infer_type_from_scalar(t)\n else:\n raise NotImplementedError(\n \"Argument t must be either an ndarray or an instance of numbers.Number. Was given {} instead\".format(\n type(t)))\n\n\ndef __infer_type_from_ndarray(ndarray: numpy_types) -> type:\n if np.issubdtype(ndarray.dtype, np.bool_):\n return bool\n elif np.issubdtype(ndarray.dtype, np.integer):\n return int\n elif np.issubdtype(ndarray.dtype, np.floating):\n return float\n else:\n raise NotImplementedError(\"Generic types in an ndarray are not supported. Was given {}\".format(ndarray.dtype))\n\n\ndef __infer_type_from_scalar(scalar: primitive_types) -> type:\n if isinstance(scalar, runtime_bool_types):\n return bool\n elif isinstance(scalar, runtime_int_types):\n return int\n elif isinstance(scalar, runtime_float_types):\n return float\n else:\n raise NotImplementedError(\"Generic types in an ndarray are not supported. Was given {}\".format(type(scalar)))\n"
] | [
[
"numpy.issubdtype"
]
] |
gesa23/ds1hw1 | [
"fe69bcfd311467611a9534bbeaa7705ed95fafdb"
] | [
"main.py"
] | [
"from sklearn.datasets import load_iris\nimport pandas as pd\n\nds = load_iris()\ndf = pd.DataFrame(data= ds[\"data\"], columns=ds[\"feature_names\"])\ntarget_names = [ds.target_names[x] for x in ds.target]\ndf['species'] = target_names\nprint(df)"
] | [
[
"pandas.DataFrame",
"sklearn.datasets.load_iris"
]
] |
jacke121/MBMD | [
"2daf5edb4fb40ee652baead4f9332ca00fa111a5"
] | [
"core/target_assigner.py"
] | [
"from object_detection.core.target_assigner import TargetAssigner\nimport tensorflow as tf\nfrom object_detection.core import box_list\n\nclass TargetAssignerExtend(TargetAssigner):\n def assign(self, anchors, groundtruth_boxes, groundtruth_labels=None,\n **params):\n \"\"\"Assign classification and regression targets to each anchor.\n The extended version assign 0 weights to negative (0) box regression.\n \n For a given set of anchors and groundtruth detections, match anchors\n to groundtruth_boxes and assign classification and regression targets to\n each anchor as well as weights based on the resulting match (specifying,\n e.g., which anchors should not contribute to training loss).\n \n Anchors that are not matched to anything are given a classification target\n of self._unmatched_cls_target which can be specified via the constructor.\n \n \n Args:\n anchors: a BoxList representing N anchors\n groundtruth_boxes: a BoxList representing M groundtruth boxes\n groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]\n with labels for each of the ground_truth boxes. The subshape\n [d_1, ... d_k] can be empty (corresponding to scalar inputs). When set\n to None, groundtruth_labels assumes a binary problem where all\n ground_truth boxes get a positive label (of 1).\n **params: Additional keyword arguments for specific implementations of\n the Matcher.\n \n Returns:\n cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],\n where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels\n which has shape [num_gt_boxes, d_1, d_2, ... d_k].\n cls_weights: a float32 tensor with shape [num_anchors]\n reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]\n reg_weights: a float32 tensor with shape [num_anchors]\n match: a matcher.Match object encoding the match between anchors and\n groundtruth boxes, with rows corresponding to groundtruth boxes\n and columns corresponding to anchors.\n \n Raises:\n ValueError: if anchors or groundtruth_boxes are not of type\n box_list.BoxList\n \"\"\"\n if not isinstance(anchors, box_list.BoxList):\n raise ValueError('anchors must be an BoxList')\n if not isinstance(groundtruth_boxes, box_list.BoxList):\n raise ValueError('groundtruth_boxes must be an BoxList')\n\n if groundtruth_labels is None:\n groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(),\n 0))\n groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)\n shape_assert = tf.assert_equal(tf.shape(groundtruth_labels)[1:],\n tf.shape(self._unmatched_cls_target))\n\n with tf.control_dependencies([shape_assert]):\n match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes,\n anchors)\n match = self._matcher.match(match_quality_matrix, **params)\n reg_targets = self._create_regression_targets(anchors,\n groundtruth_boxes,\n match)\n cls_targets = self._create_classification_targets(groundtruth_labels,\n match)\n reg_weights = self._create_regression_weights(match, groundtruth_labels)\n cls_weights = self._create_classification_weights(\n match, self._positive_class_weight, self._negative_class_weight)\n\n num_anchors = anchors.num_boxes_static()\n if num_anchors is not None:\n reg_targets = self._reset_target_shape(reg_targets, num_anchors)\n cls_targets = self._reset_target_shape(cls_targets, num_anchors)\n reg_weights = self._reset_target_shape(reg_weights, num_anchors)\n cls_weights = self._reset_target_shape(cls_weights, num_anchors)\n\n return cls_targets, cls_weights, reg_targets, reg_weights, match\n\n def _create_regression_weights(self, match, groundtruth_labels):\n \"\"\"Set regression weight for each anchor.\n \n Only positive anchors are set to contribute to the regression loss, so this\n method returns a weight of 1 for every positive anchor and 0 for every\n negative anchor.\n \n Args:\n match: a matcher.Match object that provides a matching between anchors\n and groundtruth boxes.\n \n Returns:\n reg_weights: a float32 tensor with shape [num_anchors] representing\n regression weights\n \"\"\"\n\n reg_weights = tf.cast(match.matched_column_indicator(), tf.float32)\n\n matched_gt_indices = match.matched_row_indices()\n matched_label = tf.gather(groundtruth_labels, matched_gt_indices)\n matched_is_foreground = tf.cast(matched_label[:,0] <= 0, tf.float32)\n matched_anchor_indices = match.matched_column_indices()\n unmatched_ignored_anchor_indices=match.unmatched_or_ignored_column_indices()\n unmatched_ignored_reg_weights = tf.gather(reg_weights, unmatched_ignored_anchor_indices)\n reg_weights= tf.dynamic_stitch(\n [matched_anchor_indices, unmatched_ignored_anchor_indices],\n [matched_is_foreground, unmatched_ignored_reg_weights])\n return reg_weights\n\n"
] | [
[
"tensorflow.shape",
"tensorflow.expand_dims",
"tensorflow.cast",
"tensorflow.gather",
"tensorflow.dynamic_stitch",
"tensorflow.control_dependencies"
]
] |
marklr/vqgan-clip-app | [
"23edb7ae6234ab177a91865c02be160151fcf566"
] | [
"diffusion_logic.py"
] | [
"import clip\nimport sys\nimport torch\nfrom torchvision import transforms\nfrom torchvision.transforms import functional as TF\nfrom kornia import augmentation, filters\nfrom torch import nn\nfrom torch.nn import functional as F\nimport math\nimport lpips\nfrom PIL import Image\n\nsys.path.append(\"./guided-diffusion\")\n\nfrom guided_diffusion.script_util import (\n create_model_and_diffusion,\n model_and_diffusion_defaults,\n)\n\nDIFFUSION_METHODS_AND_WEIGHTS = {\n # \"CLIP Guided Diffusion 256x256\",\n \"256x256 HQ Uncond\": \"256x256_diffusion_uncond.pt\",\n \"512x512 HQ Cond\": \"512x512_diffusion.pt\",\n \"512x512 HQ Uncond\": \"512x512_diffusion_uncond_finetune_008100.pt\",\n}\n\n\ndef spherical_dist_loss(x, y):\n x = F.normalize(x, dim=-1)\n y = F.normalize(y, dim=-1)\n return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)\n\n\ndef parse_prompt(prompt):\n vals = prompt.rsplit(\":\", 1)\n vals = vals + [\"\", \"1\"][len(vals) :]\n return vals[0], float(vals[1])\n\n\nclass MakeCutouts(nn.Module):\n def __init__(self, cut_size, cutn, cut_pow=1.0):\n super().__init__()\n self.cut_size = cut_size\n self.cutn = cutn\n self.cut_pow = cut_pow\n\n def forward(self, input):\n sideY, sideX = input.shape[2:4]\n max_size = min(sideX, sideY)\n min_size = min(sideX, sideY, self.cut_size)\n cutouts = []\n for _ in range(self.cutn):\n size = int(\n torch.rand([]) ** self.cut_pow * (max_size - min_size) + min_size\n )\n offsetx = torch.randint(0, sideX - size + 1, ())\n offsety = torch.randint(0, sideY - size + 1, ())\n cutout = input[:, :, offsety : offsety + size, offsetx : offsetx + size]\n cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size))\n return torch.cat(cutouts)\n\n\ndef tv_loss(input):\n \"\"\"L2 total variation loss, as in Mahendran et al.\"\"\"\n input = F.pad(input, (0, 1, 0, 1), \"replicate\")\n x_diff = input[..., :-1, 1:] - input[..., :-1, :-1]\n y_diff = input[..., 1:, :-1] - input[..., :-1, :-1]\n return (x_diff ** 2 + y_diff ** 2).mean([1, 2, 3])\n\n\ndef range_loss(input):\n return (input - input.clamp(-1, 1)).pow(2).mean([1, 2, 3])\n\n\nclass CLIPGuidedDiffusion:\n def __init__(\n self,\n prompt: str,\n ckpt: str,\n batch_size: int = 1,\n clip_guidance_scale: float = 1000,\n seed: int = 0,\n num_steps: int = 1000,\n continue_prev_run: bool = True,\n skip_timesteps: int = 0,\n ) -> None:\n\n assert ckpt in DIFFUSION_METHODS_AND_WEIGHTS.keys()\n self.ckpt = ckpt\n print(self.ckpt)\n\n # Default config\n self.model_config = model_and_diffusion_defaults()\n self.model_config.update(\n {\n \"attention_resolutions\": \"32, 16, 8\",\n \"class_cond\": True if ckpt == \"512x512 HQ Cond\" else False,\n \"diffusion_steps\": num_steps,\n \"rescale_timesteps\": True,\n \"timestep_respacing\": str(\n num_steps\n ), # modify this to decrease timesteps\n \"image_size\": 512 if ckpt.startswith(\"512\") else 256,\n \"learn_sigma\": True,\n \"noise_schedule\": \"linear\",\n \"num_channels\": 256,\n \"num_head_channels\": 64,\n \"num_res_blocks\": 2,\n \"resblock_updown\": True,\n \"use_checkpoint\": False,\n \"use_fp16\": True,\n \"use_scale_shift_norm\": True,\n }\n )\n # Split text by \"|\" symbol\n self.prompts = [phrase.strip() for phrase in prompt.split(\"|\")]\n if self.prompts == [\"\"]:\n self.prompts = []\n\n self.image_prompts = [] # TODO\n self.batch_size = batch_size\n\n # Controls how much the image should look like the prompt.\n self.clip_guidance_scale = clip_guidance_scale\n\n # Controls the smoothness of the final output.\n self.tv_scale = 150 # TODO add control widget\n\n # Controls how far out of range RGB values are allowed to be.\n self.range_scale = 50 # TODO add control widget\n\n self.cutn = 32 # TODO add control widget\n self.cutn_batches = 2 # TODO add control widget\n self.cut_pow = 0.5 # TODO add control widget\n\n # Removed, repeat batches by triggering a new run\n # self.n_batches = 1\n\n # This enhances the effect of the init image, a good value is 1000.\n self.init_scale = 1000 # TODO add control widget\n\n # This needs to be between approx. 200 and 500 when using an init image.\n # Higher values make the output look more like the init.\n self.skip_timesteps = skip_timesteps # TODO add control widget\n\n self.seed = seed\n self.continue_prev_run = continue_prev_run\n\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(\"Using device:\", self.device)\n\n def load_model(\n self,\n model_file_loc=\"assets/256x256_diffusion_uncond.pt\",\n prev_model=None,\n prev_diffusion=None,\n prev_clip_model=None,\n ) -> None:\n if (\n self.continue_prev_run is True\n and prev_model is not None\n and prev_diffusion is not None\n and prev_clip_model is not None\n ):\n self.model = prev_model\n self.diffusion = prev_diffusion\n self.clip_model = prev_clip_model\n\n self.clip_size = self.clip_model.visual.input_resolution\n self.normalize = transforms.Normalize(\n mean=[0.48145466, 0.4578275, 0.40821073],\n std=[0.26862954, 0.26130258, 0.27577711],\n )\n\n else:\n self.model, self.diffusion = create_model_and_diffusion(**self.model_config)\n self.model.load_state_dict(torch.load(model_file_loc, map_location=\"cpu\"))\n self.model.eval().requires_grad_(False).to(self.device)\n\n if self.ckpt == \"512x512 HQ Cond\":\n for name, param in self.model.named_parameters():\n if \"qkv\" in name or \"norm\" in name or \"proj\" in name:\n param.requires_grad_()\n\n if self.model_config[\"use_fp16\"]:\n self.model.convert_to_fp16()\n\n self.clip_model = (\n clip.load(\"ViT-B/16\", jit=False)[0]\n .eval()\n .requires_grad_(False)\n .to(self.device)\n )\n\n self.clip_size = self.clip_model.visual.input_resolution\n self.normalize = transforms.Normalize(\n mean=[0.48145466, 0.4578275, 0.40821073],\n std=[0.26862954, 0.26130258, 0.27577711],\n )\n\n return self.model, self.diffusion, self.clip_model\n\n def cond_fn_conditional(self, x, t, y=None):\n # From 512 HQ notebook using OpenAI's conditional 512x512 model\n # TODO: Merge with cond_fn's cutn_batches\n with torch.enable_grad():\n x = x.detach().requires_grad_()\n n = x.shape[0]\n my_t = torch.ones([n], device=self.device, dtype=torch.long) * self.cur_t\n out = self.diffusion.p_mean_variance(\n self.model, x, my_t, clip_denoised=False, model_kwargs={\"y\": y}\n )\n fac = self.diffusion.sqrt_one_minus_alphas_cumprod[self.cur_t]\n x_in = out[\"pred_xstart\"] * fac + x * (1 - fac)\n clip_in = self.normalize(self.make_cutouts(x_in.add(1).div(2)))\n image_embeds = (\n self.clip_model.encode_image(clip_in).float().view([self.cutn, n, -1])\n )\n dists = spherical_dist_loss(image_embeds, self.target_embeds.unsqueeze(0))\n losses = dists.mean(0)\n tv_losses = tv_loss(x_in)\n loss = (\n losses.sum() * self.clip_guidance_scale\n + tv_losses.sum() * self.tv_scale\n )\n # TODO: Implement init image\n return -torch.autograd.grad(loss, x)[0]\n\n def cond_fn(self, x, t, out, y=None):\n n = x.shape[0]\n fac = self.diffusion.sqrt_one_minus_alphas_cumprod[self.cur_t]\n x_in = out[\"pred_xstart\"] * fac + x * (1 - fac)\n x_in_grad = torch.zeros_like(x_in)\n for i in range(self.cutn_batches):\n clip_in = self.normalize(self.make_cutouts(x_in.add(1).div(2)))\n image_embeds = self.clip_model.encode_image(clip_in).float()\n dists = spherical_dist_loss(\n image_embeds.unsqueeze(1), self.target_embeds.unsqueeze(0)\n )\n dists = dists.view([self.cutn, n, -1])\n losses = dists.mul(self.weights).sum(2).mean(0)\n x_in_grad += (\n torch.autograd.grad(losses.sum() * self.clip_guidance_scale, x_in)[0]\n / self.cutn_batches\n )\n tv_losses = tv_loss(x_in)\n range_losses = range_loss(out[\"pred_xstart\"])\n loss = tv_losses.sum() * self.tv_scale + range_losses.sum() * self.range_scale\n if self.init is not None and self.init_scale:\n init_losses = self.lpips_model(x_in, self.init)\n loss = loss + init_losses.sum() * self.init_scale\n x_in_grad += torch.autograd.grad(loss, x_in)[0]\n grad = -torch.autograd.grad(x_in, x, x_in_grad)[0]\n return grad\n\n def model_init(self, init_image: Image.Image = None) -> None:\n if self.seed is not None:\n torch.manual_seed(self.seed)\n\n self.make_cutouts = MakeCutouts(self.clip_size, self.cutn, self.cut_pow)\n self.side_x = self.side_y = self.model_config[\"image_size\"]\n\n self.target_embeds, self.weights = [], []\n\n for prompt in self.prompts:\n txt, weight = parse_prompt(prompt)\n self.target_embeds.append(\n self.clip_model.encode_text(clip.tokenize(txt).to(self.device)).float()\n )\n self.weights.append(weight)\n\n # TODO: Implement image prompt parsing\n # for prompt in self.image_prompts:\n # path, weight = parse_prompt(prompt)\n # img = Image.open(fetch(path)).convert('RGB')\n # img = TF.resize(img, min(side_x, side_y, *img.size), transforms.InterpolationMode.LANCZOS)\n # batch = make_cutouts(TF.to_tensor(img).unsqueeze(0).to(device))\n # embed = clip_model.encode_image(normalize(batch)).float()\n # target_embeds.append(embed)\n # weights.extend([weight / cutn] * cutn)\n\n self.target_embeds = torch.cat(self.target_embeds)\n self.weights = torch.tensor(self.weights, device=self.device)\n if self.weights.sum().abs() < 1e-3:\n raise RuntimeError(\"The weights must not sum to 0.\")\n self.weights /= self.weights.sum().abs()\n\n self.init = None\n if init_image is not None:\n self.init = init_image.resize((self.side_x, self.side_y), Image.LANCZOS)\n self.init = (\n TF.to_tensor(self.init).to(self.device).unsqueeze(0).mul(2).sub(1)\n )\n\n # LPIPS not required if init_image not used!\n if self.init is None:\n self.lpips_model = None\n else:\n self.lpips_model = lpips.LPIPS(net=\"vgg\").to(self.device)\n\n if self.model_config[\"timestep_respacing\"].startswith(\"ddim\"):\n sample_fn = self.diffusion.ddim_sample_loop_progressive\n else:\n sample_fn = self.diffusion.p_sample_loop_progressive\n\n self.cur_t = self.diffusion.num_timesteps - self.skip_timesteps - 1\n\n if self.ckpt == \"512x512 HQ Cond\":\n print(\"Using conditional sampling fn\")\n self.samples = sample_fn(\n self.model,\n (self.batch_size, 3, self.side_y, self.side_x),\n clip_denoised=False,\n model_kwargs={\n \"y\": torch.zeros(\n [self.batch_size], device=self.device, dtype=torch.long\n )\n },\n cond_fn=self.cond_fn_conditional,\n progress=True,\n skip_timesteps=self.skip_timesteps,\n init_image=self.init,\n randomize_class=True,\n )\n else:\n print(\"Using unconditional sampling fn\")\n self.samples = sample_fn(\n self.model,\n (self.batch_size, 3, self.side_y, self.side_x),\n clip_denoised=False,\n model_kwargs={},\n cond_fn=self.cond_fn,\n progress=True,\n skip_timesteps=self.skip_timesteps,\n init_image=self.init,\n randomize_class=True,\n cond_fn_with_grad=True,\n )\n\n self.samplesgen = enumerate(self.samples)\n\n def iterate(self):\n self.cur_t -= 1\n _, sample = next(self.samplesgen)\n\n ims = []\n for _, image in enumerate(sample[\"pred_xstart\"]):\n im = TF.to_pil_image(image.add(1).div(2).clamp(0, 1))\n ims.append(im)\n\n return ims\n"
] | [
[
"torch.ones",
"torch.autograd.grad",
"torch.randint",
"torch.load",
"torch.nn.functional.normalize",
"torch.nn.functional.pad",
"torch.zeros_like",
"torch.tensor",
"torch.manual_seed",
"torch.rand",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.cuda.is_available",
"torch.enable_grad",
"torch.zeros",
"torch.cat"
]
] |
jurreht/cic | [
"95a5e32eeb26da8d18642add2259f164426e1a25"
] | [
"tests/cic_test.py"
] | [
"import os\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport pytest\nimport scipy.io\nimport scipy.stats\n\nimport cic\n\n\ndef cases():\n \"\"\"\n Loads all filenames of the pre-calculated test cases.\n \"\"\"\n case_dir = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n 'cases'\n )\n cases = []\n for dir_path, _, files in os.walk(case_dir):\n cases = cases + [os.path.join(dir_path, f) for f in files]\n return cases\n\n\[email protected]('inpath', cases())\n# Run both serially and in parallel\[email protected]('n_jobs', [None, -1])\ndef test_cic(inpath, n_jobs):\n np.random.seed(323490)\n\n # Load the case\n objs = scipy.io.loadmat(inpath)\n\n y00 = objs['y00'][:, 0]\n y01 = objs['y01'][:, 0]\n y10 = objs['y10'][:, 0]\n y11 = objs['y11'][:, 0]\n\n est_qte, se_qte, est_ate, se_ate = cic.calculate_cic(\n y00, y01, y10, y11, n_bootstraps=499, n_draws=10000,\n moments=[np.mean],\n n_jobs=n_jobs,\n # The original code uses some small (in my view unneccessary)\n # numerical corrections when calculating cdf's and inverse cdf's.\n # Without using them here also there will always be some test\n # cases slightly off.\n use_corrections=True\n )\n est_test = objs['est'][0, 1:10]\n se_test = objs['se'][1, 1:10]\n\n # Test quantile treatment effects\n assert_allclose(est_qte, est_test)\n assert_allclose(se_qte, se_test, atol=5e-2, rtol=1e-3)\n\n # Test average treatment effect\n # It is possible to get closer than an atol of 5e-3 by increasing n_draws\n # above, at the cost of slower tests\n assert_allclose(est_ate[0], objs['est'][0, 0], atol=5e-3)\n assert_allclose(se_ate[0], objs['se'][1, 0], atol=5e-2, rtol=1e-3)\n\n\[email protected](\n 'inpath',\n # exp8 and exp10 don't pass without use_corrections, which is only\n # supported for the simple case.\n [c for c in cases() if not ('exp8' in c or 'exp10' in c)])\ndef test_multiple_cic_from_simple_case(inpath):\n np.random.seed(442342234)\n\n # Load the case\n objs = scipy.io.loadmat(inpath)\n\n y00 = objs['y00'][:, 0]\n y01 = objs['y01'][:, 0]\n y10 = objs['y10'][:, 0]\n y11 = objs['y11'][:, 0]\n\n y = np.concatenate([y00, y01, y10, y11])\n g = np.concatenate([np.zeros(y00.shape[0] + y01.shape[0], dtype=np.int_),\n np.ones(y10.shape[0] + y11.shape[0], dtype=np.int_)])\n t = np.concatenate([np.zeros(y00.shape[0], dtype=np.int_),\n np.ones(y01.shape[0], dtype=np.int_),\n np.zeros(y10.shape[0], dtype=np.int_),\n np.ones(y11.shape[0], dtype=np.int_)])\n treat = np.array([[0, 0], [0, 1]], dtype=np.bool_)\n\n model = cic.CICModel(y, g, t, treat, n_bootstraps=499, moments=[np.mean],\n n_draws=10000)\n\n assert np.all(model.treatment_for == np.array([[1, 1]], dtype=np.int_))\n\n est_test = objs['est'][0, 1:10]\n se_test = objs['se'][1, 1:10]\n\n assert_allclose(model.quantile_effect[0], est_test)\n assert_allclose(model.quantile_se[0], se_test, atol=5e-2, rtol=1e-3)\n # Test average treatment effect\n # It is possible to get closer than an atol of 5e-3 by increasing n_draws\n # above, at the cost of slower tests\n assert_allclose(model.moment_effect[0], objs['est'][0, 0], atol=5e-3)\n assert_allclose(model.moment_se[0], objs['se'][1, 0], atol=5e-2, rtol=1e-3)\n\n\ndef test_cic_model_no_effect():\n \"\"\"\n Test a 3x3 CIC model where none of the treatments have any effect.\n The test is done by simulating and estimating the model many times\n and checking the coverage of the confidence intervals.\n \"\"\"\n np.random.seed(45354354)\n\n treat = np.array([\n [0, 0, 0],\n [0, 0, 1],\n [0, 1, 1]\n ], dtype=np.bool)\n\n n_trials = 250\n n_obs = 1000\n quantiles = np.array([0.1, .3, .5, .7, .9])\n effect_in_ci = np.zeros((3, quantiles.shape[0]), dtype=np.int_)\n for trial_ind in range(n_trials):\n g, t, y = generate_sample(n_obs)\n model = cic.CICModel(y, g, t, treat, quantiles)\n effect_in_ci += (\n (model.quantile_effect - 1.96 * model.quantile_se <= 0) &\n (model.quantile_effect + 1.96 * model.quantile_se >= 0))\n\n coverage = effect_in_ci / n_trials\n assert_allclose(coverage, np.ones_like(coverage) * .95, rtol=5e-2)\n\n\ndef test_cic_model_shift_effect():\n \"\"\"\n Test a 3x3 CIC model where the treatments are linear shifts, but\n different for different groups and times.\n The test is done by simulating and estimating the model many times\n and checking the coverage of the confidence intervals.\n \"\"\"\n np.random.seed(45354354)\n\n treat = np.array([\n [0, 0, 0],\n [0, 0, 1],\n [0, 1, 1]\n ], dtype=np.bool)\n\n n_trials = 250\n n_obs = 1000\n quantiles = np.array([.25, .5, .75])\n moments = [np.mean, np.std]\n quantile_in_ci = np.zeros((3, 3, 3), dtype=np.int_)\n moment_in_ci = np.zeros((3, 3, 2), dtype=np.int_)\n for trial_ind in range(n_trials):\n g, t, y = generate_sample(n_obs)\n y[(g == 1) & (t == 2)] += 1\n y[(g == 2) & (t == 1)] -= 1\n y[(g == 2) & (t == 2)] -= 2\n model = cic.CICModel(y, g, t, treat, quantiles, moments)\n\n mean, se = model.treatment_quantile(1, 2)\n quantile_in_ci[:, 0] += ((mean - 1.96 * se <= 1) &\n (mean + 1.96 * se >= 1))\n mean, se = model.treatment_quantile(2, 1)\n quantile_in_ci[:, 1] += ((mean - 1.96 * se <= -1) &\n (mean + 1.96 * se >= -1))\n mean, se = model.treatment_quantile(2, 2)\n quantile_in_ci[:, 2] += ((mean - 1.96 * se <= -2) &\n (mean + 1.96 * se >= -2))\n\n mean, se = model.treatment_moment(1, 2)\n moment_in_ci[:, 0, 0] += ((mean[0] - 1.96 * se[0] <= 1) &\n (mean[0] + 1.96 * se[0] >= 1))\n moment_in_ci[:, 0, 1] += ((mean[1] - 1.96 * se[1] <= 0) &\n (mean[1] + 1.96 * se[1] >= 0))\n mean, se = model.treatment_moment(2, 1)\n moment_in_ci[:, 1, 0] += ((mean[0] - 1.96 * se[0] <= -1) &\n (mean[0] + 1.96 * se[0] >= -1))\n moment_in_ci[:, 1, 1] += ((mean[1] - 1.96 * se[1] <= 0) &\n (mean[1] + 1.96 * se[1] >= 0))\n mean, se = model.treatment_moment(2, 2)\n moment_in_ci[:, 2, 0] += ((mean[0] - 1.96 * se[0] <= -2) &\n (mean[0] + 1.96 * se[0] >= -2))\n moment_in_ci[:, 2, 1] += ((mean[1] - 1.96 * se[1] <= 0) &\n (mean[1] + 1.96 * se[1] >= 0))\n\n quantile_coverage = quantile_in_ci / n_trials\n assert_allclose(quantile_coverage,\n np.ones_like(quantile_coverage) * .95,\n rtol=5e-2)\n moment_coverage = moment_in_ci / n_trials\n assert_allclose(moment_coverage,\n np.ones_like(moment_in_ci) * .95,\n rtol=5e-2)\n\n\ndef test_cic_model_dispersion_effect():\n \"\"\"\n Test a 3x3 CIC model where treatments are multiplying the distribution\n by some number, which differs by group and time.\n The test is done by simulating and estimating the model many times\n and checking the coverage of the confidence intervals.\n \"\"\"\n np.random.seed(45354354)\n\n treat = np.array([\n [0, 0, 0],\n [0, 0, 1],\n [0, 1, 1]\n ], dtype=np.bool)\n\n n_trials = 250\n n_obs = 2000\n quantiles = np.array([.5])\n moments = [np.mean, np.std]\n quantile_in_ci = np.zeros((3, 3, 1), dtype=np.int_)\n moment_in_ci = np.zeros((3, 3, 2), dtype=np.int_)\n for trial_ind in range(n_trials):\n g, t, y = generate_sample(n_obs)\n y[(g == 1) & (t == 2)] *= 2\n y[(g == 2) & (t == 1)] *= 3\n y[(g == 2) & (t == 2)] *= .5\n model = cic.CICModel(y, g, t, treat, quantiles, moments)\n\n # Q_{aX}(p) = a Q_X(p) for a quantile function Q and a > 0.\n # The median here is 1000, 2 * 1000 = 2000, hence the QTE is 1000\n mean, se = model.treatment_quantile(1, 2)\n quantile_in_ci[:, 0] += ((mean - 1.96 * se <= 1000) &\n (mean + 1.96 * se >= 1000))\n # The median here is 0, 3 * 0 = 0, hence the QTE is 0\n mean, se = model.treatment_quantile(2, 1)\n quantile_in_ci[:, 1] += ((mean - 1.96 * se <= 0) &\n (mean + 1.96 * se >= 0))\n # The median here is 1000, .5 * 1000 = 500, hence the QTE is -500\n mean, se = model.treatment_quantile(2, 2)\n quantile_in_ci[:, 2] += ((mean - 1.96 * se <= -500) &\n (mean + 1.96 * se >= -500))\n\n mean, se = model.treatment_moment(1, 2)\n # The mean goes from 1000 to 2000 => ATE = 1000\n moment_in_ci[:, 0, 0] += ((mean[0] - 1.96 * se[0] <= 1000) &\n (mean[0] + 1.96 * se[0] >= 1000))\n # The standard deviation goes from 1 to 2 => TE = 1\n moment_in_ci[:, 0, 1] += ((mean[1] - 1.96 * se[1] <= 1) &\n (mean[1] + 1.96 * se[1] >= 1))\n mean, se = model.treatment_moment(2, 1)\n # The mean goes from 0 to 0 => ATE = 0\n moment_in_ci[:, 1, 0] += ((mean[0] - 1.96 * se[0] <= 0) &\n (mean[0] + 1.96 * se[0] >= 0))\n # The standard deviation goes from 1/3 to 1 => TE = 2/3\n moment_in_ci[:, 1, 1] += ((mean[1] - 1.96 * se[1] <= 2 / 3) &\n (mean[1] + 1.96 * se[1] >= 2 / 3))\n mean, se = model.treatment_moment(2, 2)\n # The mean goes from 1000 to 500 => ATE = -500\n moment_in_ci[:, 2, 0] += ((mean[0] - 1.96 * se[0] <= -500) &\n (mean[0] + 1.96 * se[0] >= -500))\n # The standard deviation goes from 1 to .5 => TE = -.5\n moment_in_ci[:, 2, 1] += ((mean[1] - 1.96 * se[1] <= -.5) &\n (mean[1] + 1.96 * se[1] >= -.5))\n\n quantile_coverage = quantile_in_ci / n_trials\n assert_allclose(quantile_coverage,\n np.ones_like(quantile_coverage) * .95,\n rtol=5e-2)\n moment_coverage = moment_in_ci / n_trials\n assert_allclose(moment_coverage,\n np.ones_like(moment_in_ci) * .95,\n rtol=5e-2)\n\n\ndef test_test_model_based_on_quantile_valid():\n np.random.seed(3423482)\n\n treat = np.array([\n [0, 0, 0],\n [0, 0, 1],\n [0, 1, 1]\n ], dtype=np.bool)\n\n n_trials = 100\n n_obs = 500\n quantiles = np.array([.5])\n reject = 0\n for trial_ind in range(n_trials):\n g, t, y = generate_sample(n_obs)\n # y[(g == 1) & (t == 2)] = 2 * y[(g == 1) & (t == 2)] - 3\n # y[(g == 2) & (t == 1)] = np.exp(y[(g == 2) & (t == 1)])\n # y[(g == 1) & (t == 2)] *= 2\n # y[(g == 2) & (t == 1)] -= 3\n # y[(g == 2) & (t == 2)] += 1\n model = cic.CICModel(y, g, t, treat, quantiles)\n\n test_stat, rank_dist = model.test_model_based_on_quantile(0)\n crit_val = scipy.stats.chi2.ppf(.95, rank_dist)\n # import pdb; pdb.set_trace()\n if test_stat > crit_val:\n reject += 1\n\n reject_prob = reject / n_trials\n # Just check that the rejection probability is not too large.\n # To get reject_prob~0.05 increse n_obs above, but this slows\n # down the test too much.\n assert reject_prob <= 0.05\n\n\ndef test_combine_effects():\n np.random.seed(4545543)\n\n treat = np.array([\n [0, 0, 0],\n [0, 1, 1]\n ], dtype=np.bool)\n g = np.concatenate((np.zeros(3000, dtype=np.int_), np.ones(4000, dtype=np.int_)))\n t = np.concatenate((np.full(1000, 0), np.full(1000, 1), np.full(1000, 2),\n np.full(1000, 0), np.full(1000, 1), np.full(2000, 2)))\n y = np.random.randn(7000)\n y[(g == 1) & (t == 1)] += 1\n y[(g == 1) & (t == 2)] += 2\n model = cic.CICModel(y, g, t, treat, np.array([.5, .6]), [np.mean], n_draws=2000)\n qte_effect, _, moment_effect, _ = model.combine_effects([(1, 1), (1, 2)])\n true_effect = 1 / 3 + 2 * 2 / 3\n assert_allclose(qte_effect, true_effect, rtol=5e-2)\n assert_allclose(moment_effect, true_effect, rtol=5e-2)\n\n\ndef generate_sample(n_obs):\n g = np.random.choice(np.arange(3), n_obs)\n t = np.random.choice(np.arange(3), n_obs)\n\n u = np.random.randn(n_obs)\n y = np.empty(n_obs)\n y[t == 0] = u[t == 0]**3\n y[t == 1] = u[t == 1] / 3\n y[t == 2] = u[t == 2] + 1000\n\n return g, t, y\n"
] | [
[
"numpy.ones",
"numpy.empty",
"numpy.zeros",
"numpy.random.seed",
"numpy.random.randn",
"numpy.ones_like",
"numpy.arange",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.concatenate",
"numpy.full"
]
] |
hannesdm/shap | [
"ae96bef7879f47978c8a436ebf19c2f2747cd887"
] | [
"shap/explainers/_deep/deep_tf.py"
] | [
"import numpy as np\nimport warnings\nfrom .._explainer import Explainer\nfrom packaging import version\nfrom ..tf_utils import _get_session, _get_graph, _get_model_inputs, _get_model_output\nkeras = None\ntf = None\ntf_ops = None\ntf_backprop = None\ntf_execute = None\ntf_gradients_impl = None\n\ndef custom_record_gradient(op_name, inputs, attrs, results):\n \"\"\" This overrides tensorflow.python.eager.backprop._record_gradient.\n\n We need to override _record_gradient in order to get gradient backprop to\n get called for ResourceGather operations. In order to make this work we\n temporarily \"lie\" about the input type to prevent the node from getting\n pruned from the gradient backprop process. We then reset the type directly\n afterwards back to what it was (an integer type).\n \"\"\"\n reset_input = False\n if op_name == \"ResourceGather\" and inputs[1].dtype == tf.int32:\n inputs[1].__dict__[\"_dtype\"] = tf.float32\n reset_input = True\n\n try:\n out = tf_backprop._record_gradient(\"shap_\"+op_name, inputs, attrs, results) # old TF2.0 versions\n except AttributeError:\n out = tf_backprop.record_gradient(\"shap_\"+op_name, inputs, attrs, results)\n\n if reset_input:\n inputs[1].__dict__[\"_dtype\"] = tf.int32\n\n return out\n\nclass TFDeep(Explainer):\n \"\"\"\n Using tf.gradients to implement the backgropagation was\n inspired by the gradient based implementation approach proposed by Ancona et al, ICLR 2018. Note\n that this package does not currently use the reveal-cancel rule for ReLu units proposed in DeepLIFT.\n \"\"\"\n\n def __init__(self, model, data, session=None, learning_phase_flags=None):\n \"\"\" An explainer object for a deep model using a given background dataset.\n\n Note that the complexity of the method scales linearly with the number of background data\n samples. Passing the entire training dataset as `data` will give very accurate expected\n values, but be unreasonably expensive. The variance of the expectation estimates scale by\n roughly 1/sqrt(N) for N background data samples. So 100 samples will give a good estimate,\n and 1000 samples a very good estimate of the expected values.\n\n Parameters\n ----------\n model : tf.keras.Model or (input : [tf.Operation], output : tf.Operation)\n A keras model object or a pair of TensorFlow operations (or a list and an op) that\n specifies the input and output of the model to be explained. Note that SHAP values\n are specific to a single output value, so you get an explanation for each element of\n the output tensor (which must be a flat rank one vector).\n\n data : [numpy.array] or [pandas.DataFrame] or function\n The background dataset to use for integrating out features. DeepExplainer integrates\n over all these samples for each explanation. The data passed here must match the input\n operations given to the model. If a function is supplied, it must be a function that\n takes a particular input example and generates the background dataset for that example\n session : None or tensorflow.Session\n The TensorFlow session that has the model we are explaining. If None is passed then\n we do our best to find the right session, first looking for a keras session, then\n falling back to the default TensorFlow session.\n\n learning_phase_flags : None or list of tensors\n If you have your own custom learning phase flags pass them here. When explaining a prediction\n we need to ensure we are not in training mode, since this changes the behavior of ops like\n batch norm or dropout. If None is passed then we look for tensors in the graph that look like\n learning phase flags (this works for Keras models). Note that we assume all the flags should\n have a value of False during predictions (and hence explanations).\n\n \"\"\"\n # try and import keras and tensorflow\n global tf, tf_ops, tf_backprop, tf_execute, tf_gradients_impl\n if tf is None:\n from tensorflow.python.framework import ops as tf_ops # pylint: disable=E0611\n from tensorflow.python.ops import gradients_impl as tf_gradients_impl # pylint: disable=E0611\n from tensorflow.python.eager import backprop as tf_backprop\n from tensorflow.python.eager import execute as tf_execute\n if not hasattr(tf_gradients_impl, \"_IsBackpropagatable\"):\n from tensorflow.python.ops import gradients_util as tf_gradients_impl\n import tensorflow as tf\n if version.parse(tf.__version__) < version.parse(\"1.4.0\"):\n warnings.warn(\"Your TensorFlow version is older than 1.4.0 and not supported.\")\n global keras\n if keras is None:\n try:\n import keras\n warnings.warn(\"keras is no longer supported, please use tf.keras instead.\")\n except:\n pass\n \n if version.parse(tf.__version__) >= version.parse(\"2.4.0\"):\n warnings.warn(\"Your TensorFlow version is newer than 2.4.0 and so graph support has been removed in eager mode and some static graphs may not be supported. See PR #1483 for discussion.\")\n\n # determine the model inputs and outputs\n self.model_inputs = _get_model_inputs(model)\n self.model_output = _get_model_output(model)\n assert type(self.model_output) != list, \"The model output to be explained must be a single tensor!\"\n assert len(self.model_output.shape) < 3, \"The model output must be a vector or a single value!\"\n self.multi_output = True\n if len(self.model_output.shape) == 1:\n self.multi_output = False\n\n if tf.executing_eagerly():\n if type(model) is tuple or type(model) is list:\n assert len(model) == 2, \"When a tuple is passed it must be of the form (inputs, outputs)\"\n from tensorflow.keras import Model\n self.model = Model(model[0], model[1])\n else:\n self.model = model\n\n # check if we have multiple inputs\n self.multi_input = True\n if type(self.model_inputs) != list or len(self.model_inputs) == 1:\n self.multi_input = False\n if type(self.model_inputs) != list:\n self.model_inputs = [self.model_inputs]\n if type(data) != list and (hasattr(data, '__call__')==False):\n data = [data]\n self.data = data\n\n self._vinputs = {} # used to track what op inputs depends on the model inputs\n self.orig_grads = {}\n\n if not tf.executing_eagerly():\n self.session = _get_session(session)\n\n self.graph = _get_graph(self)\n\n # if no learning phase flags were given we go looking for them\n # ...this will catch the one that keras uses\n # we need to find them since we want to make sure learning phase flags are set to False\n if learning_phase_flags is None:\n self.learning_phase_ops = []\n for op in self.graph.get_operations():\n if 'learning_phase' in op.name and op.type == \"Const\" and len(op.outputs[0].shape) == 0:\n if op.outputs[0].dtype == tf.bool:\n self.learning_phase_ops.append(op)\n self.learning_phase_flags = [op.outputs[0] for op in self.learning_phase_ops]\n else:\n self.learning_phase_ops = [t.op for t in learning_phase_flags]\n\n # save the expected output of the model\n # if self.data is a function, set self.expected_value to None\n if (hasattr(self.data, '__call__')):\n self.expected_value = None\n else:\n if self.data[0].shape[0] > 5000:\n warnings.warn(\"You have provided over 5k background samples! For better performance consider using smaller random sample.\")\n if not tf.executing_eagerly():\n self.expected_value = self.run(self.model_output, self.model_inputs, self.data).mean(0)\n else:\n if type(self.model)is tuple:\n sel.fModel(cnn.inputs, cnn.get_layer(theNameYouWant).outputs)\n self.expected_value = tf.reduce_mean(self.model(self.data), 0)\n\n if not tf.executing_eagerly():\n self._init_between_tensors(self.model_output.op, self.model_inputs)\n\n # make a blank array that will get lazily filled in with the SHAP value computation\n # graphs for each output. Lazy is important since if there are 1000 outputs and we\n # only explain the top 5 it would be a waste to build graphs for the other 995\n if not self.multi_output:\n self.phi_symbolics = [None]\n else:\n noutputs = self.model_output.shape.as_list()[1]\n if noutputs is not None:\n self.phi_symbolics = [None for i in range(noutputs)]\n else:\n raise Exception(\"The model output tensor to be explained cannot have a static shape in dim 1 of None!\")\n\n def _get_model_output(self, model):\n if len(model.layers[-1]._inbound_nodes) == 0:\n if len(model.outputs) > 1:\n warnings.warn(\"Only one model output supported.\")\n return model.outputs[0]\n else:\n return model.layers[-1].output\n\n def _init_between_tensors(self, out_op, model_inputs):\n # find all the operations in the graph between our inputs and outputs\n tensor_blacklist = tensors_blocked_by_false(self.learning_phase_ops) # don't follow learning phase branches\n dependence_breakers = [k for k in op_handlers if op_handlers[k] == break_dependence]\n back_ops = backward_walk_ops(\n [out_op], tensor_blacklist,\n dependence_breakers\n )\n start_ops = []\n for minput in model_inputs:\n for op in minput.consumers():\n start_ops.append(op)\n self.between_ops = forward_walk_ops(\n start_ops,\n tensor_blacklist, dependence_breakers,\n within_ops=back_ops\n )\n\n # note all the tensors that are on the path between the inputs and the output\n self.between_tensors = {}\n for op in self.between_ops:\n for t in op.outputs:\n self.between_tensors[t.name] = True\n for t in model_inputs:\n self.between_tensors[t.name] = True\n\n # save what types are being used\n self.used_types = {}\n for op in self.between_ops:\n self.used_types[op.type] = True\n\n def _variable_inputs(self, op):\n \"\"\" Return which inputs of this operation are variable (i.e. depend on the model inputs).\n \"\"\"\n if op not in self._vinputs:\n out = np.zeros(len(op.inputs), dtype=np.bool)\n for i,t in enumerate(op.inputs):\n out[i] = t.name in self.between_tensors\n self._vinputs[op] = out\n return self._vinputs[op]\n\n def phi_symbolic(self, i):\n \"\"\" Get the SHAP value computation graph for a given model output.\n \"\"\"\n if self.phi_symbolics[i] is None:\n\n if not tf.executing_eagerly():\n def anon():\n out = self.model_output[:,i] if self.multi_output else self.model_output\n return tf.gradients(out, self.model_inputs)\n\n self.phi_symbolics[i] = self.execute_with_overridden_gradients(anon)\n else:\n @tf.function\n def grad_graph(shap_rAnD):\n phase = tf.keras.backend.learning_phase()\n tf.keras.backend.set_learning_phase(0)\n\n with tf.GradientTape(watch_accessed_variables=False) as tape:\n tape.watch(shap_rAnD)\n out = self.model(shap_rAnD)\n if self.multi_output:\n out = out[:,i]\n\n self._init_between_tensors(out.op, shap_rAnD)\n x_grad = tape.gradient(out, shap_rAnD)\n tf.keras.backend.set_learning_phase(phase)\n return x_grad\n\n self.phi_symbolics[i] = grad_graph\n\n return self.phi_symbolics[i]\n\n def shap_values(self, X, ranked_outputs=None, output_rank_order=\"max\", check_additivity=True):\n # check if we have multiple inputs\n if not self.multi_input:\n if type(X) == list and len(X) != 1:\n assert False, \"Expected a single tensor as model input!\"\n elif type(X) != list:\n X = [X]\n else:\n assert type(X) == list, \"Expected a list of model inputs!\"\n assert len(self.model_inputs) == len(X), \"Number of model inputs (%d) does not match the number given (%d)!\" % (len(self.model_inputs), len(X))\n\n # rank and determine the model outputs that we will explain\n if ranked_outputs is not None and self.multi_output:\n if not tf.executing_eagerly():\n model_output_values = self.run(self.model_output, self.model_inputs, X)\n else:\n model_output_values = self.model(X)\n\n if output_rank_order == \"max\":\n model_output_ranks = np.argsort(-model_output_values)\n elif output_rank_order == \"min\":\n model_output_ranks = np.argsort(model_output_values)\n elif output_rank_order == \"max_abs\":\n model_output_ranks = np.argsort(np.abs(model_output_values))\n else:\n assert False, \"output_rank_order must be max, min, or max_abs!\"\n model_output_ranks = model_output_ranks[:,:ranked_outputs]\n else:\n model_output_ranks = np.tile(np.arange(len(self.phi_symbolics)), (X[0].shape[0], 1))\n\n # compute the attributions\n output_phis = []\n for i in range(model_output_ranks.shape[1]):\n phis = []\n for k in range(len(X)):\n phis.append(np.zeros(X[k].shape))\n for j in range(X[0].shape[0]):\n if (hasattr(self.data, '__call__')):\n bg_data = self.data([X[l][j] for l in range(len(X))])\n if type(bg_data) != list:\n bg_data = [bg_data]\n else:\n bg_data = self.data\n\n # tile the inputs to line up with the background data samples\n tiled_X = [np.tile(X[l][j:j+1], (bg_data[l].shape[0],) + tuple([1 for k in range(len(X[l].shape)-1)])) for l in range(len(X))]\n\n # we use the first sample for the current sample and the rest for the references\n joint_input = [np.concatenate([tiled_X[l], bg_data[l]], 0) for l in range(len(X))]\n\n # run attribution computation graph\n feature_ind = model_output_ranks[j,i]\n sample_phis = self.run(self.phi_symbolic(feature_ind), self.model_inputs, joint_input)\n\n # assign the attributions to the right part of the output arrays\n for l in range(len(X)):\n phis[l][j] = (sample_phis[l][bg_data[l].shape[0]:] * (X[l][j] - bg_data[l])).mean(0)\n\n output_phis.append(phis[0] if not self.multi_input else phis)\n\n # check that the SHAP values sum up to the model output\n if check_additivity:\n if not tf.executing_eagerly():\n model_output = self.run(self.model_output, self.model_inputs, X)\n else:\n model_output = self.model(X)\n for l in range(len(self.expected_value)):\n if not self.multi_input:\n diffs = model_output[:, l] - self.expected_value[l] - output_phis[l].sum(axis=tuple(range(1, output_phis[l].ndim)))\n else:\n diffs = model_output[:, l] - self.expected_value[l]\n for i in range(len(output_phis[l])):\n diffs -= output_phis[l][i].sum(axis=tuple(range(1, output_phis[l][i].ndim)))\n assert np.abs(diffs).max() < 1e-2, \"The SHAP explanations do not sum up to the model's output! This is either because of a \" \\\n \"rounding error or because an operator in your computation graph was not fully supported. If \" \\\n \"the sum difference of %f is significant compared the scale of your model outputs please post \" \\\n \"as a github issue, with a reproducable example if possible so we can debug it.\" % np.abs(diffs).max()\n\n if not self.multi_output:\n return output_phis[0]\n elif ranked_outputs is not None:\n return output_phis, model_output_ranks\n else:\n return output_phis\n\n def run(self, out, model_inputs, X):\n \"\"\" Runs the model while also setting the learning phase flags to False.\n \"\"\"\n if not tf.executing_eagerly():\n feed_dict = dict(zip(model_inputs, X))\n for t in self.learning_phase_flags:\n feed_dict[t] = False\n return self.session.run(out, feed_dict)\n else:\n def anon():\n tf_execute.record_gradient = custom_record_gradient\n\n # build inputs that are correctly shaped, typed, and tf-wrapped\n inputs = []\n for i in range(len(X)):\n shape = list(self.model_inputs[i].shape)\n shape[0] = -1\n data = X[i].reshape(shape)\n v = tf.constant(data, dtype=self.model_inputs[i].dtype)\n inputs.append(v)\n final_out = out(inputs)\n try:\n tf_execute.record_gradient = tf_backprop._record_gradient # old TF2 versions\n except AttributeError:\n tf_execute.record_gradient = tf_backprop.record_gradient\n\n\n return final_out\n return self.execute_with_overridden_gradients(anon)\n\n def custom_grad(self, op, *grads):\n \"\"\" Passes a gradient op creation request to the correct handler.\n \"\"\"\n type_name = op.type[5:] if op.type.startswith(\"shap_\") else op.type\n out = op_handlers[type_name](self, op, *grads) # we cut off the shap_ prefex before the lookup\n return out\n\n def execute_with_overridden_gradients(self, f):\n # replace the gradients for all the non-linear activations\n # we do this by hacking our way into the registry (TODO: find a public API for this if it exists)\n reg = tf_ops._gradient_registry._registry\n ops_not_in_registry = ['TensorListReserve']\n # NOTE: location_tag taken from tensorflow source for None type ops\n location_tag = (\"UNKNOWN\", \"UNKNOWN\", \"UNKNOWN\", \"UNKNOWN\", \"UNKNOWN\")\n # TODO: unclear why some ops are not in the registry with TF 2.0 like TensorListReserve\n for non_reg_ops in ops_not_in_registry:\n reg[non_reg_ops] = {'type': None, 'location': location_tag}\n for n in op_handlers:\n if n in reg:\n self.orig_grads[n] = reg[n][\"type\"]\n reg[\"shap_\"+n] = {\n \"type\": self.custom_grad,\n \"location\": reg[n][\"location\"]\n }\n reg[n][\"type\"] = self.custom_grad\n\n # In TensorFlow 1.10 they started pruning out nodes that they think can't be backpropped\n # unfortunately that includes the index of embedding layers so we disable that check here\n if hasattr(tf_gradients_impl, \"_IsBackpropagatable\"):\n orig_IsBackpropagatable = tf_gradients_impl._IsBackpropagatable\n tf_gradients_impl._IsBackpropagatable = lambda tensor: True\n\n # define the computation graph for the attribution values using a custom gradient-like computation\n try:\n out = f()\n finally:\n # reinstate the backpropagatable check\n if hasattr(tf_gradients_impl, \"_IsBackpropagatable\"):\n tf_gradients_impl._IsBackpropagatable = orig_IsBackpropagatable\n\n # restore the original gradient definitions\n for n in op_handlers:\n if n in reg:\n del reg[\"shap_\"+n]\n reg[n][\"type\"] = self.orig_grads[n]\n for non_reg_ops in ops_not_in_registry:\n del reg[non_reg_ops]\n if not tf.executing_eagerly():\n return out\n else:\n return [v.numpy() for v in out]\n\ndef tensors_blocked_by_false(ops):\n \"\"\" Follows a set of ops assuming their value is False and find blocked Switch paths.\n\n This is used to prune away parts of the model graph that are only used during the training\n phase (like dropout, batch norm, etc.).\n \"\"\"\n blocked = []\n def recurse(op):\n if op.type == \"Switch\":\n blocked.append(op.outputs[1]) # the true path is blocked since we assume the ops we trace are False\n else:\n for out in op.outputs:\n for c in out.consumers():\n recurse(c)\n for op in ops:\n recurse(op)\n\n return blocked\n\ndef backward_walk_ops(start_ops, tensor_blacklist, op_type_blacklist):\n found_ops = []\n op_stack = [op for op in start_ops]\n while len(op_stack) > 0:\n op = op_stack.pop()\n if op.type not in op_type_blacklist and op not in found_ops:\n found_ops.append(op)\n for input in op.inputs:\n if input not in tensor_blacklist:\n op_stack.append(input.op)\n return found_ops\n\ndef forward_walk_ops(start_ops, tensor_blacklist, op_type_blacklist, within_ops):\n found_ops = []\n op_stack = [op for op in start_ops]\n while len(op_stack) > 0:\n op = op_stack.pop()\n if op.type not in op_type_blacklist and op in within_ops and op not in found_ops:\n found_ops.append(op)\n for out in op.outputs:\n if out not in tensor_blacklist:\n for c in out.consumers():\n op_stack.append(c)\n return found_ops\n\n\ndef softmax(explainer, op, *grads):\n \"\"\" Just decompose softmax into its components and recurse, we can handle all of them :)\n\n We assume the 'axis' is the last dimension because the TF codebase swaps the 'axis' to\n the last dimension before the softmax op if 'axis' is not already the last dimension.\n We also don't subtract the max before tf.exp for numerical stability since that might\n mess up the attributions and it seems like TensorFlow doesn't define softmax that way\n (according to the docs)\n \"\"\"\n in0 = op.inputs[0]\n in0_max = tf.reduce_max(in0, axis=-1, keepdims=True, name=\"in0_max\")\n in0_centered = in0 - in0_max\n evals = tf.exp(in0_centered, name=\"custom_exp\")\n rsum = tf.reduce_sum(evals, axis=-1, keepdims=True)\n div = evals / rsum\n\n # mark these as in-between the inputs and outputs\n for op in [evals.op, rsum.op, div.op, in0_centered.op]:\n for t in op.outputs:\n if t.name not in explainer.between_tensors:\n explainer.between_tensors[t.name] = False\n\n out = tf.gradients(div, in0_centered, grad_ys=grads[0])[0]\n\n # remove the names we just added\n for op in [evals.op, rsum.op, div.op, in0_centered.op]:\n for t in op.outputs:\n if explainer.between_tensors[t.name] is False:\n del explainer.between_tensors[t.name]\n\n # rescale to account for our shift by in0_max (which we did for numerical stability)\n xin0,rin0 = tf.split(in0, 2)\n xin0_centered,rin0_centered = tf.split(in0_centered, 2)\n delta_in0 = xin0 - rin0\n dup0 = [2] + [1 for i in delta_in0.shape[1:]]\n return tf.where(\n tf.tile(tf.abs(delta_in0), dup0) < 1e-6,\n out,\n out * tf.tile((xin0_centered - rin0_centered) / delta_in0, dup0)\n )\n\ndef maxpool(explainer, op, *grads):\n xin0,rin0 = tf.split(op.inputs[0], 2)\n xout,rout = tf.split(op.outputs[0], 2)\n delta_in0 = xin0 - rin0\n dup0 = [2] + [1 for i in delta_in0.shape[1:]]\n cross_max = tf.maximum(xout, rout)\n diffs = tf.concat([cross_max - rout, xout - cross_max], 0)\n if op.type.startswith(\"shap_\"):\n op.type = op.type[5:]\n xmax_pos,rmax_pos = tf.split(explainer.orig_grads[op.type](op, grads[0] * diffs), 2)\n return tf.tile(tf.where(\n tf.abs(delta_in0) < 1e-7,\n tf.zeros_like(delta_in0),\n (xmax_pos + rmax_pos) / delta_in0\n ), dup0)\n\ndef gather(explainer, op, *grads):\n #params = op.inputs[0]\n indices = op.inputs[1]\n #axis = op.inputs[2]\n var = explainer._variable_inputs(op)\n if var[1] and not var[0]:\n assert len(indices.shape) == 2, \"Only scalar indices supported right now in GatherV2!\"\n\n xin1,rin1 = tf.split(tf.cast(op.inputs[1], tf.float32), 2)\n xout,rout = tf.split(op.outputs[0], 2)\n dup_in1 = [2] + [1 for i in xin1.shape[1:]]\n dup_out = [2] + [1 for i in xout.shape[1:]]\n delta_in1_t = tf.tile(xin1 - rin1, dup_in1)\n out_sum = tf.reduce_sum(grads[0] * tf.tile(xout - rout, dup_out), list(range(len(indices.shape), len(grads[0].shape))))\n if op.type == \"ResourceGather\":\n return [None, tf.where(\n tf.abs(delta_in1_t) < 1e-6,\n tf.zeros_like(delta_in1_t),\n out_sum / delta_in1_t\n )]\n return [None, tf.where(\n tf.abs(delta_in1_t) < 1e-6,\n tf.zeros_like(delta_in1_t),\n out_sum / delta_in1_t\n ), None]\n elif var[0] and not var[1]:\n if op.type.startswith(\"shap_\"):\n op.type = op.type[5:]\n return [explainer.orig_grads[op.type](op, grads[0]), None] # linear in this case\n else:\n assert False, \"Axis not yet supported to be varying for gather op!\"\n\ndef linearity_1d_nonlinearity_2d(input_ind0, input_ind1, op_func):\n def handler(explainer, op, *grads):\n var = explainer._variable_inputs(op)\n if var[input_ind0] and not var[input_ind1]:\n return linearity_1d_handler(input_ind0, explainer, op, *grads)\n elif var[input_ind1] and not var[input_ind0]:\n return linearity_1d_handler(input_ind1, explainer, op, *grads)\n elif var[input_ind0] and var[input_ind1]:\n return nonlinearity_2d_handler(input_ind0, input_ind1, op_func, explainer, op, *grads)\n else:\n return [None for _ in op.inputs] # no inputs vary, we must be hidden by a switch function\n return handler\n\ndef nonlinearity_1d_nonlinearity_2d(input_ind0, input_ind1, op_func):\n def handler(explainer, op, *grads):\n var = explainer._variable_inputs(op)\n if var[input_ind0] and not var[input_ind1]:\n return nonlinearity_1d_handler(input_ind0, explainer, op, *grads)\n elif var[input_ind1] and not var[input_ind0]:\n return nonlinearity_1d_handler(input_ind1, explainer, op, *grads)\n elif var[input_ind0] and var[input_ind1]:\n return nonlinearity_2d_handler(input_ind0, input_ind1, op_func, explainer, op, *grads)\n else:\n return [None for _ in op.inputs] # no inputs vary, we must be hidden by a switch function\n return handler\n\ndef nonlinearity_1d(input_ind):\n def handler(explainer, op, *grads):\n return nonlinearity_1d_handler(input_ind, explainer, op, *grads)\n return handler\n\ndef nonlinearity_1d_handler(input_ind, explainer, op, *grads):\n # make sure only the given input varies\n op_inputs = op.inputs\n if op_inputs is None:\n op_inputs = op.outputs[0].op.inputs\n\n for i in range(len(op_inputs)):\n if i != input_ind:\n assert not explainer._variable_inputs(op)[i], str(i) + \"th input to \" + op.name + \" cannot vary!\"\n\n xin0, rin0 = tf.split(op_inputs[input_ind], 2)\n xout, rout = tf.split(op.outputs[input_ind], 2)\n delta_in0 = xin0 - rin0\n if delta_in0.shape is None:\n dup0 = [2, 1]\n else:\n dup0 = [2] + [1 for i in delta_in0.shape[1:]]\n out = [None for _ in op_inputs]\n if op.type.startswith(\"shap_\"):\n op.type = op.type[5:]\n orig_grad = explainer.orig_grads[op.type](op, grads[0])\n out[input_ind] = tf.where(\n tf.tile(tf.abs(delta_in0), dup0) < 1e-6,\n orig_grad[input_ind] if len(op_inputs) > 1 else orig_grad,\n grads[0] * tf.tile((xout - rout) / delta_in0, dup0)\n )\n return out\n\ndef nonlinearity_2d_handler(input_ind0, input_ind1, op_func, explainer, op, *grads):\n assert input_ind0 == 0 and input_ind1 == 1, \"TODO: Can't yet handle double inputs that are not first!\"\n xout,rout = tf.split(op.outputs[0], 2)\n in0 = op.inputs[input_ind0]\n in1 = op.inputs[input_ind1]\n xin0,rin0 = tf.split(in0, 2)\n xin1,rin1 = tf.split(in1, 2)\n delta_in0 = xin0 - rin0\n delta_in1 = xin1 - rin1\n dup0 = [2] + [1 for i in delta_in0.shape[1:]]\n out10 = op_func(xin0, rin1)\n out01 = op_func(rin0, xin1)\n out11,out00 = xout,rout\n out0 = 0.5 * (out11 - out01 + out10 - out00)\n out0 = grads[0] * tf.tile(out0 / delta_in0, dup0)\n out1 = 0.5 * (out11 - out10 + out01 - out00)\n out1 = grads[0] * tf.tile(out1 / delta_in1, dup0)\n\n # Avoid divide by zero nans\n out0 = tf.where(tf.abs(tf.tile(delta_in0, dup0)) < 1e-7, tf.zeros_like(out0), out0)\n out1 = tf.where(tf.abs(tf.tile(delta_in1, dup0)) < 1e-7, tf.zeros_like(out1), out1)\n\n # see if due to broadcasting our gradient shapes don't match our input shapes\n if (np.any(np.array(out1.shape) != np.array(in1.shape))):\n broadcast_index = np.where(np.array(out1.shape) != np.array(in1.shape))[0][0]\n out1 = tf.reduce_sum(out1, axis=broadcast_index, keepdims=True)\n elif (np.any(np.array(out0.shape) != np.array(in0.shape))):\n broadcast_index = np.where(np.array(out0.shape) != np.array(in0.shape))[0][0]\n out0 = tf.reduce_sum(out0, axis=broadcast_index, keepdims=True)\n\n return [out0, out1]\n\ndef linearity_1d(input_ind):\n def handler(explainer, op, *grads):\n return linearity_1d_handler(input_ind, explainer, op, *grads)\n return handler\n\ndef linearity_1d_handler(input_ind, explainer, op, *grads):\n # make sure only the given input varies (negative means only that input cannot vary, and is measured from the end of the list)\n for i in range(len(op.inputs)):\n if i != input_ind:\n assert not explainer._variable_inputs(op)[i], str(i) + \"th input to \" + op.name + \" cannot vary!\"\n if op.type.startswith(\"shap_\"):\n op.type = op.type[5:]\n return explainer.orig_grads[op.type](op, *grads)\n\ndef linearity_with_excluded(input_inds):\n def handler(explainer, op, *grads):\n return linearity_with_excluded_handler(input_inds, explainer, op, *grads)\n return handler\n\ndef linearity_with_excluded_handler(input_inds, explainer, op, *grads):\n # make sure the given inputs don't vary (negative is measured from the end of the list)\n for i in range(len(op.inputs)):\n if i in input_inds or i - len(op.inputs) in input_inds:\n assert not explainer._variable_inputs(op)[i], str(i) + \"th input to \" + op.name + \" cannot vary!\"\n if op.type.startswith(\"shap_\"):\n op.type = op.type[5:]\n return explainer.orig_grads[op.type](op, *grads)\n\ndef passthrough(explainer, op, *grads):\n if op.type.startswith(\"shap_\"):\n op.type = op.type[5:]\n return explainer.orig_grads[op.type](op, *grads)\n\ndef break_dependence(explainer, op, *grads):\n \"\"\" This function name is used to break attribution dependence in the graph traversal.\n\n These operation types may be connected above input data values in the graph but their outputs\n don't depend on the input values (for example they just depend on the shape).\n \"\"\"\n return [None for _ in op.inputs]\n\n\nop_handlers = {}\n\n# ops that are always linear\nop_handlers[\"Identity\"] = passthrough\nop_handlers[\"StridedSlice\"] = passthrough\nop_handlers[\"Squeeze\"] = passthrough\nop_handlers[\"ExpandDims\"] = passthrough\nop_handlers[\"Pack\"] = passthrough\nop_handlers[\"BiasAdd\"] = passthrough\nop_handlers[\"Unpack\"] = passthrough\nop_handlers[\"Add\"] = passthrough\nop_handlers[\"Sub\"] = passthrough\nop_handlers[\"Merge\"] = passthrough\nop_handlers[\"Sum\"] = passthrough\nop_handlers[\"Mean\"] = passthrough\nop_handlers[\"Cast\"] = passthrough\nop_handlers[\"Transpose\"] = passthrough\nop_handlers[\"Enter\"] = passthrough\nop_handlers[\"Exit\"] = passthrough\nop_handlers[\"NextIteration\"] = passthrough\nop_handlers[\"Tile\"] = passthrough\nop_handlers[\"TensorArrayScatterV3\"] = passthrough\nop_handlers[\"TensorArrayReadV3\"] = passthrough\nop_handlers[\"TensorArrayWriteV3\"] = passthrough\n\n# NEW\nop_handlers[\"AddV2\"] = passthrough\nop_handlers[\"StatelessWhile\"] = passthrough\nop_handlers[\"TensorListStack\"] = passthrough\nop_handlers[\"StatelessWhile\"] = passthrough\nop_handlers[\"TensorListFromTensor\"] = passthrough\n\n\n# ops that don't pass any attributions to their inputs\nop_handlers[\"Shape\"] = break_dependence\nop_handlers[\"RandomUniform\"] = break_dependence\nop_handlers[\"ZerosLike\"] = break_dependence\n#op_handlers[\"StopGradient\"] = break_dependence # this allows us to stop attributions when we want to (like softmax re-centering)\n\n# ops that are linear and only allow a single input to vary\nop_handlers[\"Reshape\"] = linearity_1d(0)\nop_handlers[\"Pad\"] = linearity_1d(0)\nop_handlers[\"ReverseV2\"] = linearity_1d(0)\nop_handlers[\"ConcatV2\"] = linearity_with_excluded([-1])\nop_handlers[\"Conv2D\"] = linearity_1d(0)\nop_handlers[\"Switch\"] = linearity_1d(0)\nop_handlers[\"AvgPool\"] = linearity_1d(0)\nop_handlers[\"FusedBatchNorm\"] = linearity_1d(0)\n\n# ops that are nonlinear and only allow a single input to vary\nop_handlers[\"Relu\"] = nonlinearity_1d(0)\nop_handlers[\"Elu\"] = nonlinearity_1d(0)\nop_handlers[\"Sigmoid\"] = nonlinearity_1d(0)\nop_handlers[\"Tanh\"] = nonlinearity_1d(0)\nop_handlers[\"Softplus\"] = nonlinearity_1d(0)\nop_handlers[\"Exp\"] = nonlinearity_1d(0)\nop_handlers[\"ClipByValue\"] = nonlinearity_1d(0)\nop_handlers[\"Rsqrt\"] = nonlinearity_1d(0)\nop_handlers[\"Square\"] = nonlinearity_1d(0)\nop_handlers[\"Max\"] = nonlinearity_1d(0)\n\n# NEW\nop_handlers[\"Sin\"] = nonlinearity_1d(0)\n\n# ops that are nonlinear and allow two inputs to vary\nop_handlers[\"SquaredDifference\"] = nonlinearity_1d_nonlinearity_2d(0, 1, lambda x, y: (x - y) * (x - y))\nop_handlers[\"Minimum\"] = nonlinearity_1d_nonlinearity_2d(0, 1, lambda x, y: tf.minimum(x, y))\nop_handlers[\"Maximum\"] = nonlinearity_1d_nonlinearity_2d(0, 1, lambda x, y: tf.maximum(x, y))\n\n# ops that allow up to two inputs to vary are are linear when only one input varies\nop_handlers[\"Mul\"] = linearity_1d_nonlinearity_2d(0, 1, lambda x, y: x * y)\nop_handlers[\"RealDiv\"] = linearity_1d_nonlinearity_2d(0, 1, lambda x, y: x / y)\nop_handlers[\"MatMul\"] = linearity_1d_nonlinearity_2d(0, 1, lambda x, y: tf.matmul(x, y))\n\n# ops that need their own custom attribution functions\nop_handlers[\"GatherV2\"] = gather\nop_handlers[\"ResourceGather\"] = gather\nop_handlers[\"MaxPool\"] = maxpool\nop_handlers[\"Softmax\"] = softmax\n\n\n# TODO items\n# TensorArrayGatherV3\n# Max\n# TensorArraySizeV3\n# Range\n"
] | [
[
"tensorflow.python.eager.backprop.record_gradient",
"tensorflow.reduce_max",
"numpy.argsort",
"tensorflow.matmul",
"tensorflow.abs",
"tensorflow.keras.backend.learning_phase",
"tensorflow.python.eager.backprop._record_gradient",
"tensorflow.concat",
"tensorflow.executing_eagerly",
"tensorflow.reduce_sum",
"tensorflow.split",
"tensorflow.minimum",
"numpy.abs",
"tensorflow.GradientTape",
"tensorflow.constant",
"numpy.zeros",
"tensorflow.zeros_like",
"tensorflow.cast",
"tensorflow.gradients",
"tensorflow.tile",
"tensorflow.keras.Model",
"tensorflow.exp",
"tensorflow.keras.backend.set_learning_phase",
"numpy.array",
"numpy.concatenate",
"tensorflow.maximum"
]
] |
HustQBW/Single-Object-Localization | [
"3a6bd87cd75543f55eb3eed12b6d09475f05b8fd"
] | [
"train.py"
] | [
"from dataset import tiny_dataset\nfrom bbox_codec import bbox_encode\nfrom resnet50_base import Localization_net2\nfrom torch.utils.data import DataLoader,random_split\nimport torch as t\nimport tqdm\nfrom torch.utils.tensorboard import SummaryWriter\nimport torch.nn as nn\nimport torch.optim as optim\nimport argparse\nfrom loss import Loss_for_localization\nfrom evaluate import compute_three_acc\nimport os\ndef parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--lr',help='learning rate',type=float,default=1e-2,dest='lr')\n parser.add_argument('--batch-size',help='batchsize',type=int,default=32,dest='batch_size')\n parser.add_argument('--lr-decay',help='the decay of lr',type=float,default=0.1,dest='lr_decay')\n parser.add_argument('--root',help='root directory of dataset',type=str,\n default=r'E:\\BS_learning\\4_1\\CV_basis\\experiment\\2\\tiny_vid',dest='root')\n parser.add_argument('--weight-decay',help='weight decay of optimizer',type=float,\n default=1e-5,dest='weight_decay')\n parser.add_argument('--epochs',help='set the num of epochs',type=int,default=100)\n parser.add_argument('--log-dir',help='tensorboard log dir',type=str,required=True)\n parser.add_argument('--save-file-name', help='the pth file name', type=str,required=True)\n parser.add_argument('--class-weight',help='the weight of classification of the loss',default=1,type=int)\n parser.add_argument('--regre-weight', help='the weight of regression of the loss', default=2,type=int)\n return parser\n\ndef weight_init(net):\n for name,child in net.named_children():\n if name == 'feature_extraction':\n continue\n\n if isinstance(child,nn.Conv2d):\n nn.init.kaiming_normal_(child.weight)\n if child.bias != None:\n nn.init.zeros_(child.bias)\n\n elif isinstance(child,nn.Linear):\n nn.init.kaiming_normal_(child.weight)\n if child.bias != None:\n nn.init.zeros_(child.bias)\n return net\n\ndef train():\n args = parser().parse_args()\n t.manual_seed(777)\n t.cuda.manual_seed(777)\n\n dataset = tiny_dataset(root=args.root)\n train_set,val_set = random_split(dataset=dataset,lengths=[150*5,30*5],\n generator=t.Generator().manual_seed(777))\n\n train_loader = DataLoader(dataset=train_set,batch_size=args.batch_size,shuffle=True,num_workers=2)\n\n val_loader = DataLoader(dataset=val_set,batch_size=1,shuffle=False,num_workers=0)\n\n print('establish the net ...')\n net = Localization_net2(class_num=5).cuda()\n\n print('initialize the net')\n net = weight_init(net=net)\n high_lr_list = []\n low_lr_list = []\n for name,param in net.named_parameters():\n if 'feature_extraction' in name:\n low_lr_list.append(param)\n else:\n high_lr_list.append(param)\n\n optimizer = optim.SGD([{'params':low_lr_list,'lr':0.1*args.lr},{'params':high_lr_list}],\n lr=args.lr,weight_decay=args.weight_decay,momentum=0.9)\n # scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,\n # mode='min', patience=2,factor=args.lr_decay)\n\n\n writer = SummaryWriter(log_dir=args.log_dir,comment='curves_log')\n criterion = Loss_for_localization().cuda()\n\n for i in tqdm.tqdm(range(args.epochs)):\n t_loss = 0.\n tc_acc = 0.\n tr_acc = 0.\n t_acc = 0.\n\n v_loss = 0.\n vc_acc = 0.\n vr_acc = 0.\n v_acc = 0.\n\n print('\\n%dth epoch'%(i+1))\n\n\n if i+1 == args.epochs//4:\n optimizer.param_groups[0]['lr'] *= args.lr_decay\n optimizer.param_groups[1]['lr'] *= args.lr_decay\n\n if i+1 == args.epochs//2:\n optimizer.param_groups[0]['lr'] *= args.lr_decay\n optimizer.param_groups[1]['lr'] *= args.lr_decay\n\n if i+1 == 3*args.epochs//4:\n optimizer.param_groups[0]['lr'] *= args.lr_decay\n optimizer.param_groups[1]['lr'] *= args.lr_decay\n\n for item in train_loader:\n tc_acc_num = 0\n tr_acc_num = 0\n t_acc_num = 0\n net.train()\n img = item['img'].cuda()\n label = item['label'].cuda()\n bbox = item['bbox'].cuda()\n objects, scores, locs = net(img)\n\n\n gt = bbox_encode(bbox=bbox,feature_map_size=(4,4),img_size=(128,128)).cuda()\n\n loss = criterion(objects,scores,locs,label,gt,args.regre_weight,0.5,args.class_weight)\n\n t_loss += loss.item()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n\n for j in range(img.size()[0]):\n a,b,c = compute_three_acc(objects=objects[j].view(1,*objects[j].size()),\n score=scores[j].view(1,*scores[j].size()), loc=locs[j].view(1,*locs[j].size()),\n label=label[j].view(1,*label[j].size()), bbox=bbox[j].view(1,*bbox[j].size()))\n tc_acc_num += a\n tr_acc_num += b\n t_acc_num += c\n\n tc_acc += tc_acc_num/float(img.size()[0])\n tr_acc += tr_acc_num / float(img.size()[0])\n t_acc += t_acc_num / float(img.size()[0])\n\n\n\n\n net.eval()\n with t.no_grad():\n for item2 in val_loader:\n img = item2['img'].cuda()\n label = item2['label'].cuda()\n bbox = item2['bbox'].cuda()\n objects, scores, locs = net(img)\n class_acc,regression_acc,acc = compute_three_acc(objects=objects,score=scores,\n loc=locs,label=label,bbox=bbox)\n gt = bbox_encode(bbox=bbox, feature_map_size=(4, 4), img_size=(128, 128)).cuda()\n\n vc_acc += class_acc\n vr_acc += regression_acc\n v_acc += acc\n\n loss = criterion(objects, scores, locs,label, gt,args.regre_weight,0.5,args.class_weight)\n v_loss +=loss.item()\n\n v_loss /= len(val_loader)\n vc_acc /= len(val_loader)\n vr_acc /= len(val_loader)\n v_acc /= len(val_loader)\n\n # scheduler.step(v_loss)\n\n print('train_loss: %.5f val_loss : %.5f' % (t_loss/len(train_loader),v_loss))\n\n writer.add_scalar('low_lr_curve', optimizer.param_groups[0][\"lr\"], i + 1)\n writer.add_scalar('high_lr_curve', optimizer.param_groups[1][\"lr\"], i + 1)\n writer.add_scalars('loss', {'Train':t_loss / len(train_loader)}, i+1)\n writer.add_scalars('loss', {'Val':v_loss}, i+1)\n writer.add_scalars('train_acc', {'class_acc': tc_acc/ len(train_loader)}, i + 1)\n writer.add_scalars('train_acc', {'regression_acc': tr_acc/ len(train_loader)}, i + 1)\n writer.add_scalars('train_acc', {'two_task_acc': t_acc/ len(train_loader)}, i + 1)\n writer.add_scalars('val_acc',{'class_acc':vc_acc},i+1)\n writer.add_scalars('val_acc', {'regression_acc': vr_acc}, i + 1)\n writer.add_scalars('val_acc', {'two_task_acc': v_acc}, i + 1)\n\n if optimizer.param_groups[0]['lr'] <= 1e-8:\n break\n t.save(net,os.path.join(args.log_dir,args.save_file_name + 'epoch%d.pth'%i))\n\n\n\n\nif __name__ == '__main__':\n train()"
] | [
[
"torch.utils.data.DataLoader",
"torch.nn.init.kaiming_normal_",
"torch.Generator",
"torch.optim.SGD",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.no_grad",
"torch.nn.init.zeros_",
"torch.utils.tensorboard.SummaryWriter"
]
] |
Geodan/building-boundary | [
"d0eb88d99743af917568131e8609f481b10e4520"
] | [
"building_boundary/footprint.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: Chris Lucas\n\"\"\"\n\nimport math\n\nimport numpy as np\nfrom shapely.geometry import (\n Polygon, MultiPolygon, LineString, MultiLineString, LinearRing\n)\nfrom shapely import wkt\n\nfrom building_boundary import utils\n\n\ndef line_orientations(lines):\n \"\"\"\n Computes the orientations of the lines.\n\n Parameters\n ----------\n lines : list of (2x2) array\n The lines defined by the coordinates two points.\n\n Returns\n -------\n orientations : list of float\n The orientations of the lines in radians from\n 0 to pi (east to west counterclockwise)\n 0 to -pi (east to west clockwise)\n \"\"\"\n orientations = []\n for l in lines:\n dx, dy = l[0] - l[1]\n orientation = math.atan2(dy, dx)\n if not any([np.isclose(orientation, o) for o in orientations]):\n orientations.append(orientation)\n return orientations\n\n\ndef geometry_orientations(geom):\n \"\"\"\n Computes the orientations of the lines of a geometry (Polygon,\n MultiPolygon, LineString, MultiLineString, or LinearRing).\n\n Parameters\n ----------\n geom : Polygon, MultiPolygon, LineString, MultiLineString, or LinearRing\n The geometry\n\n Returns\n -------\n orientations : list of float\n The orientations of the lines of the geometry in radians from\n 0 to pi (east to west counterclockwise)\n 0 to -pi (east to west clockwise)\n \"\"\"\n orientations = []\n if type(geom) == Polygon:\n lines = utils.create_pairs(geom.exterior.coords[:-1])\n orientations = line_orientations(lines)\n elif type(geom) == MultiPolygon:\n for p in geom:\n lines = utils.create_pairs(p.exterior.coords[:-1])\n orientations.extend(line_orientations(lines))\n elif type(geom) == LineString:\n if geom.coords[0] == geom.coords[-1]:\n lines = utils.create_pairs(geom.coords[:-1])\n else:\n lines = list(utils.create_pairs(geom.coords))[:-1]\n orientations = line_orientations(lines)\n elif type(geom) == MultiLineString:\n for l in geom:\n if l.coords[0] == l.coords[-1]:\n lines = utils.create_pairs(l.coords[:-1])\n else:\n lines = list(utils.create_pairs(l.coords))[:-1]\n orientations.extend(line_orientations(lines))\n elif type(geom) == LinearRing:\n lines = utils.create_pairs(geom.coords[:-1])\n orientations = line_orientations(lines)\n else:\n raise TypeError('Invalid geometry type. Expects Polygon, '\n 'MultiPolygon, LineString, MultiLineString, '\n 'or LinearRing.')\n return orientations\n\n\ndef compute_orientations(footprint_wkt):\n \"\"\"\n Computes the orientations of the footprint.\n\n Parameters\n ----------\n footprint_wkt : string\n The footprint geometry defined by a WKT string.\n\n Returns\n -------\n orientations : list of float\n The orientations of the lines of the geometry in radians from\n 0 to pi (east to west counterclockwise)\n 0 to -pi (east to west clockwise)\n \"\"\"\n footprint_geom = wkt.loads(footprint_wkt)\n orientations = geometry_orientations(footprint_geom)\n return orientations\n"
] | [
[
"numpy.isclose"
]
] |
mangoyuan/Unifed-Seg3d | [
"74c82464dbe901cf18e38afb0e1b74cc159a8850"
] | [
"nnunet/training/network_training/network_trainer.py"
] | [
"from _warnings import warn\nimport matplotlib\nfrom batchgenerators.utilities.file_and_folder_operations import *\nfrom sklearn.model_selection import KFold\nmatplotlib.use(\"agg\")\nfrom time import time, sleep\nimport torch\nimport numpy as np\nfrom torch.optim import lr_scheduler\nimport matplotlib.pyplot as plt\nimport sys\nfrom collections import OrderedDict\nfrom datetime import datetime\nimport torch.backends.cudnn as cudnn\nfrom abc import abstractmethod\nfrom datetime import datetime\n\n\ntry:\n from apex import amp\nexcept ImportError:\n amp = None\n\n\nclass NetworkTrainer(object):\n def __init__(self, deterministic=True, fp16=False):\n \"\"\"\n A generic class that can train almost any neural network (RNNs excluded). It provides basic functionality such\n as the training loop, tracking of training and validation losses (and the target metric if you implement it)\n Training can be terminated early if the validation loss (or the target metric if implemented) do not improve\n anymore. This is based on a moving average (MA) of the loss/metric instead of the raw values to get more smooth\n results.\n\n What you need to override:\n - __init__\n - initialize\n - run_online_evaluation (optional)\n - finish_online_evaluation (optional)\n - validate\n - predict_test_case\n \"\"\"\n np.random.seed(12345)\n torch.manual_seed(12345)\n torch.cuda.manual_seed_all(12345)\n self.fp16 = fp16\n\n if deterministic:\n cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n else:\n cudnn.deterministic = False\n torch.backends.cudnn.benchmark = True\n\n ################# SET THESE IN self.initialize() ###################################\n self.network = None\n self.optimizer = None\n self.lr_scheduler = None\n self.tr_gen = self.val_gen = None\n self.was_initialized = False\n\n ################# SET THESE IN INIT ################################################\n self.output_folder = None\n self.fold = None\n self.loss = None\n self.dataset_directory = None\n\n ################# SET THESE IN LOAD_DATASET OR DO_SPLIT ############################\n self.dataset = None # these can be None for inference mode\n self.dataset_tr = self.dataset_val = None # do not need to be used, they just appear if you are using the suggested load_dataset_and_do_split\n\n ################# THESE DO NOT NECESSARILY NEED TO BE MODIFIED #####################\n self.patience = 50\n self.val_eval_criterion_alpha = 0.9 # alpha * old + (1-alpha) * new\n # if this is too low then the moving average will be too noisy and the training may terminate early. If it is\n # too high the training will take forever\n self.train_loss_MA_alpha = 0.93 # alpha * old + (1-alpha) * new\n self.train_loss_MA_eps = 5e-4 # new MA must be at least this much better (smaller)\n self.save_every = 50\n self.save_latest_only = True\n self.max_num_epochs = 1000\n self.num_batches_per_epoch = 250\n self.num_val_batches_per_epoch = 50\n self.also_val_in_tr_mode = False\n self.lr_threshold = 1e-6 # the network will not terminate training if the lr is still above this threshold\n\n ################# LEAVE THESE ALONE ################################################\n self.val_eval_criterion_MA = None\n self.train_loss_MA = None\n self.best_val_eval_criterion_MA = None\n self.best_MA_tr_loss_for_patience = None\n self.best_epoch_based_on_MA_tr_loss = None\n self.all_tr_losses = []\n self.all_val_losses = []\n self.all_val_losses_tr_mode = []\n self.all_val_eval_metrics = [] # does not have to be used\n self.epoch = 0\n self.log_file = None\n self.deterministic = deterministic\n\n\n @abstractmethod\n def initialize(self, training=True):\n \"\"\"\n create self.output_folder\n\n modify self.output_folder if you are doing cross-validation (one folder per fold)\n\n set self.tr_gen and self.val_gen\n \n set self.network, self.optimizer and self.lr_scheduler\n \n finally set self.was_initialized to True\n :param training:\n :return:\n \"\"\"\n\n @abstractmethod\n def load_dataset(self):\n pass\n\n def do_split(self):\n \"\"\"\n This is a suggestion for if your dataset is a dictionary (my personal standard)\n :return:\n \"\"\"\n splits_file = join(self.dataset_directory, \"splits_final.pkl\")\n\n if not isfile(splits_file):\n self.print_to_log_file(\"Creating new split...\")\n splits = []\n all_keys_sorted = np.sort(list(self.dataset.keys()))\n kfold = KFold(n_splits=5, shuffle=True, random_state=12345)\n for i, (train_idx, test_idx) in enumerate(kfold.split(all_keys_sorted)):\n train_keys = np.array(all_keys_sorted)[train_idx]\n test_keys = np.array(all_keys_sorted)[test_idx]\n splits.append(OrderedDict())\n splits[-1]['train'] = train_keys\n splits[-1]['val'] = test_keys\n save_pickle(splits, splits_file)\n\n splits = load_pickle(splits_file)\n\n if self.fold == \"all\":\n tr_keys = val_keys = list(self.dataset.keys())\n else:\n tr_keys = splits[self.fold]['train']\n val_keys = splits[self.fold]['val']\n\n tr_keys.sort()\n val_keys.sort()\n\n self.dataset_tr = OrderedDict()\n for i in tr_keys:\n self.dataset_tr[i] = self.dataset[i]\n\n self.dataset_val = OrderedDict()\n for i in val_keys:\n self.dataset_val[i] = self.dataset[i]\n\n def plot_progress(self):\n \"\"\"\n Should probably by improved\n :return:\n \"\"\"\n try:\n font = {'weight': 'normal',\n 'size': 18}\n\n matplotlib.rc('font', **font)\n\n fig = plt.figure(figsize=(30, 24))\n ax = fig.add_subplot(111)\n ax2 = ax.twinx()\n\n x_values = list(range(self.epoch + 1))\n\n ax.plot(x_values, self.all_tr_losses, color='b', ls='-', label=\"loss_tr\")\n\n ax.plot(x_values, self.all_val_losses, color='r', ls='-', label=\"loss_val, train=False\")\n\n if len(self.all_val_losses_tr_mode) > 0:\n ax.plot(x_values, self.all_val_losses_tr_mode, color='g', ls='-', label=\"loss_val, train=True\")\n if len(self.all_val_eval_metrics) == len(self.all_val_losses):\n ax2.plot(x_values, self.all_val_eval_metrics, color='g', ls='--', label=\"evaluation metric\")\n\n ax.set_xlabel(\"epoch\")\n ax.set_ylabel(\"loss\")\n ax2.set_ylabel(\"evaluation metric\")\n ax.legend()\n ax2.legend(loc=9)\n\n fig.savefig(join(self.output_folder, \"progress.png\"))\n plt.close()\n except IOError:\n self.print_to_log_file(\"failed to plot: \", sys.exc_info())\n\n def print_to_log_file(self, *args, also_print_to_console=True, add_timestamp=True):\n\n timestamp = time()\n dt_object = datetime.fromtimestamp(timestamp)\n\n if add_timestamp:\n args = (\"%s:\" % dt_object, *args)\n\n if self.log_file is None:\n maybe_mkdir_p(self.output_folder)\n timestamp = datetime.now()\n self.log_file = join(self.output_folder, \"training_log_%d_%d_%d_%02.0d_%02.0d_%02.0d.txt\" %\n (timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute, timestamp.second))\n with open(self.log_file, 'w') as f:\n f.write(\"Starting... \\n\")\n successful = False\n max_attempts = 5\n ctr = 0\n while not successful and ctr < max_attempts:\n try:\n with open(self.log_file, 'a+') as f:\n for a in args:\n f.write(str(a))\n f.write(\" \")\n f.write(\"\\n\")\n successful = True\n except IOError:\n print(\"%s: failed to log: \" % datetime.fromtimestamp(timestamp), sys.exc_info())\n sleep(0.5)\n ctr += 1\n if also_print_to_console:\n print(*args)\n\n def save_checkpoint(self, fname, save_optimizer=True):\n start_time = time()\n state_dict = self.network.state_dict()\n for key in state_dict.keys():\n state_dict[key] = state_dict[key].cpu()\n lr_sched_state_dct = None\n if self.lr_scheduler is not None and not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):\n lr_sched_state_dct = self.lr_scheduler.state_dict()\n for key in lr_sched_state_dct.keys():\n lr_sched_state_dct[key] = lr_sched_state_dct[key]\n if save_optimizer:\n optimizer_state_dict = self.optimizer.state_dict()\n else:\n optimizer_state_dict = None\n\n self.print_to_log_file(\"saving checkpoint...\")\n torch.save({\n 'epoch': self.epoch + 1,\n 'state_dict': state_dict,\n 'optimizer_state_dict': optimizer_state_dict,\n 'lr_scheduler_state_dict': lr_sched_state_dct,\n 'plot_stuff': (self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode,\n self.all_val_eval_metrics)},\n fname)\n self.print_to_log_file(\"done, saving took %.2f seconds\" % (time() - start_time))\n\n def load_best_checkpoint(self, train=True):\n if self.fold is None:\n raise RuntimeError(\"Cannot load best checkpoint if self.fold is None\")\n self.load_checkpoint(join(self.output_folder, \"model_best.model\"), train=train)\n\n def load_latest_checkpoint(self, train=True):\n if isfile(join(self.output_folder, \"model_final_checkpoint.model\")):\n return self.load_checkpoint(join(self.output_folder, \"model_final_checkpoint.model\"), train=train)\n if isfile(join(self.output_folder, \"model_latest.model\")):\n return self.load_checkpoint(join(self.output_folder, \"model_latest.model\"), train=train)\n all_checkpoints = [i for i in os.listdir(self.output_folder) if i.endswith(\".model\") and i.find(\"_ep_\") != -1]\n if len(all_checkpoints) == 0:\n return self.load_best_checkpoint(train=train)\n corresponding_epochs = [int(i.split(\"_\")[-1].split(\".\")[0]) for i in all_checkpoints]\n checkpoint = all_checkpoints[np.argmax(corresponding_epochs)]\n self.load_checkpoint(join(self.output_folder, checkpoint), train=train)\n\n def load_checkpoint(self, fname, train=True):\n self.print_to_log_file(\"loading checkpoint\", fname, \"train=\", train)\n if not self.was_initialized:\n self.initialize(train)\n saved_model = torch.load(fname, map_location=torch.device('cuda', torch.cuda.current_device()))\n self.load_checkpoint_ram(saved_model, train)\n\n def load_checkpoint_ram(self, saved_model, train=True):\n \"\"\"\n used for if the checkpoint is already in ram\n :param saved_model:\n :param train:\n :return:\n \"\"\"\n if not self.was_initialized:\n self.initialize(train)\n\n new_state_dict = OrderedDict()\n curr_state_dict_keys = list(self.network.state_dict().keys())\n # if state dict comes form nn.DataParallel but we use non-parallel model here then the state dict keys do not\n # match. Use heuristic to make it match\n for k, value in saved_model['state_dict'].items():\n key = k\n if key not in curr_state_dict_keys:\n key = key[7:]\n new_state_dict[key] = value\n self.network.load_state_dict(new_state_dict)\n self.epoch = saved_model['epoch']\n if train:\n optimizer_state_dict = saved_model['optimizer_state_dict']\n if optimizer_state_dict is not None:\n self.optimizer.load_state_dict(optimizer_state_dict)\n if self.lr_scheduler is not None and not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):\n self.lr_scheduler.load_state_dict(saved_model['lr_scheduler_state_dict'])\n\n self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode, self.all_val_eval_metrics = saved_model['plot_stuff']\n\n def _maybe_init_amp(self):\n # we use fp16 for training only, not inference\n if self.fp16:\n if amp is not None:\n self.network, self.optimizer = amp.initialize(self.network, self.optimizer, opt_level=\"O1\")\n else:\n self.print_to_log_file(\"WARNING: FP16 training was requested but nvidia apex is not installed. \"\n \"Install it from https://github.com/NVIDIA/apex\")\n\n def run_training(self):\n torch.cuda.empty_cache()\n\n self._maybe_init_amp()\n\n if cudnn.benchmark and cudnn.deterministic:\n warn(\"torch.backends.cudnn.deterministic is True indicating a deterministic training is desired. \"\n \"But torch.backends.cudnn.benchmark is True as well and this will prevent deterministic training! \"\n \"If you want deterministic then set benchmark=False\")\n\n maybe_mkdir_p(self.output_folder)\n\n if not self.was_initialized:\n self.initialize(True)\n\n while self.epoch < self.max_num_epochs:\n self.print_to_log_file(\"\\nepoch: \", self.epoch)\n epoch_start_time = time()\n train_losses_epoch = []\n\n # train one epoch\n self.network.train()\n for b in range(self.num_batches_per_epoch):\n l = self.run_iteration(self.tr_gen, True)\n train_losses_epoch.append(l)\n\n self.all_tr_losses.append(np.mean(train_losses_epoch))\n self.print_to_log_file(\"train loss : %.4f\" % self.all_tr_losses[-1])\n\n with torch.no_grad():\n # validation with train=False\n self.network.eval()\n val_losses = []\n for b in range(self.num_val_batches_per_epoch):\n l = self.run_iteration(self.val_gen, False, True)\n val_losses.append(l)\n self.all_val_losses.append(np.mean(val_losses))\n self.print_to_log_file(\"val loss (train=False): %.4f\" % self.all_val_losses[-1])\n\n if self.also_val_in_tr_mode:\n self.network.train()\n # validation with train=True\n val_losses = []\n for b in range(self.num_val_batches_per_epoch):\n l = self.run_iteration(self.val_gen, False)\n val_losses.append(l)\n self.all_val_losses_tr_mode.append(np.mean(val_losses))\n self.print_to_log_file(\"val loss (train=True): %.4f\" % self.all_val_losses_tr_mode[-1])\n\n epoch_end_time = time()\n\n self.update_train_loss_MA() # needed for lr scheduler and stopping of training\n\n continue_training = self.on_epoch_end()\n if not continue_training:\n # allows for early stopping\n break\n\n self.epoch += 1\n self.print_to_log_file(\"This epoch took %f s\\n\" % (epoch_end_time-epoch_start_time))\n\n self.save_checkpoint(join(self.output_folder, \"model_final_checkpoint.model\"))\n # now we can delete latest as it will be identical with final\n if isfile(join(self.output_folder, \"model_latest.model\")):\n os.remove(join(self.output_folder, \"model_latest.model\"))\n if isfile(join(self.output_folder, \"model_latest.model.pkl\")):\n os.remove(join(self.output_folder, \"model_latest.model.pkl\"))\n\n def maybe_update_lr(self):\n # maybe update learning rate\n if self.lr_scheduler is not None:\n assert isinstance(self.lr_scheduler, (lr_scheduler.ReduceLROnPlateau, lr_scheduler._LRScheduler))\n\n if isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):\n # lr scheduler is updated with moving average val loss. should be more robust\n self.lr_scheduler.step(self.train_loss_MA)\n else:\n self.lr_scheduler.step(self.epoch + 1)\n self.print_to_log_file(\"lr is now (scheduler) %s\" % str(self.optimizer.param_groups[0]['lr']))\n\n def maybe_save_checkpoint(self):\n \"\"\"\n Saves a checkpoint every save_ever epochs.\n :return:\n \"\"\"\n if self.epoch % self.save_every == (self.save_every - 1):\n self.print_to_log_file(\"saving scheduled checkpoint file...\")\n if not self.save_latest_only:\n self.save_checkpoint(join(self.output_folder, \"model_ep_%03.0d.model\" % (self.epoch + 1)))\n self.save_checkpoint(join(self.output_folder, \"model_latest.model\"))\n self.print_to_log_file(\"done\")\n\n def update_eval_criterion_MA(self):\n \"\"\"\n If self.all_val_eval_metrics is unused (len=0) then we fall back to using -self.all_val_losses for the MA to determine early stopping\n (not a minimization, but a maximization of a metric and therefore the - in the latter case)\n :return:\n \"\"\"\n if self.val_eval_criterion_MA is None:\n if len(self.all_val_eval_metrics) == 0:\n self.val_eval_criterion_MA = - self.all_val_losses[-1]\n else:\n self.val_eval_criterion_MA = self.all_val_eval_metrics[-1]\n else:\n if len(self.all_val_eval_metrics) == 0:\n \"\"\"\n We here use alpha * old - (1 - alpha) * new because new in this case is the vlaidation loss and lower \n is better, so we need to negate it. \n \"\"\"\n self.val_eval_criterion_MA = self.val_eval_criterion_alpha * self.val_eval_criterion_MA - (\n 1 - self.val_eval_criterion_alpha) * \\\n self.all_val_losses[-1]\n else:\n self.val_eval_criterion_MA = self.val_eval_criterion_alpha * self.val_eval_criterion_MA + (\n 1 - self.val_eval_criterion_alpha) * \\\n self.all_val_eval_metrics[-1]\n\n def manage_patience(self):\n # update patience\n continue_training = True\n if self.patience is not None:\n # if best_MA_tr_loss_for_patience and best_epoch_based_on_MA_tr_loss were not yet initialized,\n # initialize them\n if self.best_MA_tr_loss_for_patience is None:\n self.best_MA_tr_loss_for_patience = self.train_loss_MA\n\n if self.best_epoch_based_on_MA_tr_loss is None:\n self.best_epoch_based_on_MA_tr_loss = self.epoch\n\n if self.best_val_eval_criterion_MA is None:\n self.best_val_eval_criterion_MA = self.val_eval_criterion_MA\n\n # check if the current epoch is the best one according to moving average of validation criterion. If so\n # then save 'best' model\n # Do not use this for validation. This is intended for test set prediction only.\n self.print_to_log_file(\"current best_val_eval_criterion_MA is %.4f0\" % self.best_val_eval_criterion_MA)\n self.print_to_log_file(\"current val_eval_criterion_MA is %.4f\" % self.val_eval_criterion_MA)\n\n if self.val_eval_criterion_MA > self.best_val_eval_criterion_MA:\n self.best_val_eval_criterion_MA = self.val_eval_criterion_MA\n self.print_to_log_file(\"saving best epoch checkpoint...\")\n self.save_checkpoint(join(self.output_folder, \"model_best.model\"))\n\n # Now see if the moving average of the train loss has improved. If yes then reset patience, else\n # increase patience\n if self.train_loss_MA + self.train_loss_MA_eps < self.best_MA_tr_loss_for_patience:\n self.best_MA_tr_loss_for_patience = self.train_loss_MA\n self.best_epoch_based_on_MA_tr_loss = self.epoch\n self.print_to_log_file(\"New best epoch (train loss MA): %03.4f\" % self.best_MA_tr_loss_for_patience)\n else:\n self.print_to_log_file(\"No improvement: current train MA %03.4f, best: %03.4f, eps is %03.4f\" %\n (self.train_loss_MA, self.best_MA_tr_loss_for_patience, self.train_loss_MA_eps))\n\n # if patience has reached its maximum then finish training (provided lr is low enough)\n if self.epoch - self.best_epoch_based_on_MA_tr_loss > self.patience:\n if self.optimizer.param_groups[0]['lr'] > self.lr_threshold:\n self.print_to_log_file(\"My patience ended, but I believe I need more time (lr > 1e-6)\")\n self.best_epoch_based_on_MA_tr_loss = self.epoch - self.patience // 2\n else:\n self.print_to_log_file(\"My patience ended\")\n continue_training = False\n else:\n self.print_to_log_file(\n \"Patience: %d/%d\" % (self.epoch - self.best_epoch_based_on_MA_tr_loss, self.patience))\n\n return continue_training\n\n def on_epoch_end(self):\n self.finish_online_evaluation() # does not have to do anything, but can be used to update self.all_val_eval_\n # metrics\n\n self.plot_progress()\n\n self.maybe_update_lr()\n\n self.maybe_save_checkpoint()\n\n self.update_eval_criterion_MA()\n\n continue_training = self.manage_patience()\n continue_training = True\n return continue_training\n\n def update_train_loss_MA(self):\n if self.train_loss_MA is None:\n self.train_loss_MA = self.all_tr_losses[-1]\n else:\n self.train_loss_MA = self.train_loss_MA_alpha * self.train_loss_MA + (1 - self.train_loss_MA_alpha) * \\\n self.all_tr_losses[-1]\n\n def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):\n data_dict = next(data_generator)\n data = data_dict['data']\n target = data_dict['target']\n\n if not isinstance(data, torch.Tensor):\n data = torch.from_numpy(data).float()\n if not isinstance(target, torch.Tensor):\n target = torch.from_numpy(target).float()\n\n data = data.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n\n self.optimizer.zero_grad()\n\n output = self.network(data)\n del data\n l = self.loss(output, target)\n\n if run_online_evaluation:\n self.run_online_evaluation(output, target)\n\n del target\n\n if do_backprop:\n if not self.fp16 or amp is None:\n l.backward()\n else:\n with amp.scale_loss(l, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n self.optimizer.step()\n\n return l.detach().cpu().numpy()\n\n def run_online_evaluation(self, *args, **kwargs):\n \"\"\"\n Can be implemented, does not have to\n :param output_torch:\n :param target_npy:\n :return:\n \"\"\"\n pass\n\n def finish_online_evaluation(self):\n \"\"\"\n Can be implemented, does not have to\n :return:\n \"\"\"\n pass\n\n @abstractmethod\n def validate(self, *args, **kwargs):\n pass\n\n def find_lr(self, num_iters=1000, init_value=1e-6, final_value=10., beta=0.98):\n \"\"\"\n stolen and adapted from here: https://sgugger.github.io/how-do-you-find-a-good-learning-rate.html\n :param num_iters:\n :param init_value:\n :param final_value:\n :param beta:\n :return:\n \"\"\"\n import math\n self._maybe_init_amp()\n mult = (final_value / init_value) ** (1/num_iters)\n lr = init_value\n self.optimizer.param_groups[0]['lr'] = lr\n avg_loss = 0.\n best_loss = 0.\n losses = []\n log_lrs = []\n\n for batch_num in range(1, num_iters + 1):\n # +1 because this one here is not designed to have negative loss...\n loss = self.run_iteration(self.tr_gen, do_backprop=True, run_online_evaluation=False).data.item() + 1\n\n # Compute the smoothed loss\n avg_loss = beta * avg_loss + (1-beta) * loss\n smoothed_loss = avg_loss / (1 - beta**batch_num)\n\n # Stop if the loss is exploding\n if batch_num > 1 and smoothed_loss > 4 * best_loss:\n break\n\n # Record the best loss\n if smoothed_loss < best_loss or batch_num==1:\n best_loss = smoothed_loss\n\n # Store the values\n losses.append(smoothed_loss)\n log_lrs.append(math.log10(lr))\n\n # Update the lr for the next step\n lr *= mult\n self.optimizer.param_groups[0]['lr'] = lr\n\n import matplotlib.pyplot as plt\n lrs = [10 ** i for i in log_lrs]\n fig = plt.figure()\n plt.xscale('log')\n plt.plot(lrs[10:-5], losses[10:-5])\n plt.savefig(join(self.output_folder, \"lr_finder.png\"))\n plt.close()\n return log_lrs, losses\n"
] | [
[
"torch.cuda.empty_cache",
"torch.cuda.manual_seed_all",
"matplotlib.pyplot.figure",
"torch.manual_seed",
"torch.save",
"numpy.random.seed",
"torch.no_grad",
"matplotlib.rc",
"numpy.argmax",
"torch.cuda.current_device",
"matplotlib.pyplot.xscale",
"torch.from_numpy",
"matplotlib.pyplot.close",
"sklearn.model_selection.KFold",
"matplotlib.use",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.mean"
]
] |
Pradeep-Gopal/yolo_deer_people_final_project | [
"2337e8cbb88f467a6d19ab9cdb14abbf2ba04bc2"
] | [
"yolov3_tiny_deer_detection/evaluate_mAP.py"
] | [
"\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.saved_model import tag_constants\nfrom yolov3.dataset import Dataset\nfrom yolov3.yolov4 import Create_Yolo\nfrom yolov3.utils import load_yolo_weights, detect_image, image_preprocess, postprocess_boxes, nms, read_class_names\nfrom yolov3.configs import *\nimport shutil\nimport json\nimport time\n\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif len(gpus) > 0:\n try: tf.config.experimental.set_memory_growth(gpus[0], True)\n except RuntimeError: print(\"RuntimeError in tf.config.experimental.list_physical_devices('GPU')\")\n\n\ndef voc_ap(rec, prec):\n \"\"\"\n --- Official matlab code VOC2012---\n mrec=[0 ; rec ; 1];\n mpre=[0 ; prec ; 0];\n for i=numel(mpre)-1:-1:1\n mpre(i)=max(mpre(i),mpre(i+1));\n end\n i=find(mrec(2:end)~=mrec(1:end-1))+1;\n ap=sum((mrec(i)-mrec(i-1)).*mpre(i));\n \"\"\"\n rec.insert(0, 0.0) # insert 0.0 at begining of list\n rec.append(1.0) # insert 1.0 at end of list\n mrec = rec[:]\n prec.insert(0, 0.0) # insert 0.0 at begining of list\n prec.append(0.0) # insert 0.0 at end of list\n mpre = prec[:]\n \"\"\"\n This part makes the precision monotonically decreasing\n (goes from the end to the beginning)\n matlab: for i=numel(mpre)-1:-1:1\n mpre(i)=max(mpre(i),mpre(i+1));\n \"\"\"\n # matlab indexes start in 1 but python in 0, so I have to do:\n # range(start=(len(mpre) - 2), end=0, step=-1)\n # also the python function range excludes the end, resulting in:\n # range(start=(len(mpre) - 2), end=-1, step=-1)\n for i in range(len(mpre)-2, -1, -1):\n mpre[i] = max(mpre[i], mpre[i+1])\n \"\"\"\n This part creates a list of indexes where the recall changes\n matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1;\n \"\"\"\n i_list = []\n for i in range(1, len(mrec)):\n if mrec[i] != mrec[i-1]:\n i_list.append(i) # if it was matlab would be i + 1\n \"\"\"\n The Average Precision (AP) is the area under the curve\n (numerical integration)\n matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i));\n \"\"\"\n ap = 0.0\n for i in i_list:\n ap += ((mrec[i]-mrec[i-1])*mpre[i])\n return ap, mrec, mpre\n\n\ndef get_mAP(Yolo, dataset, score_threshold=0.25, iou_threshold=0.50, TEST_INPUT_SIZE=TEST_INPUT_SIZE):\n MINOVERLAP = 0.5 # default value (defined in the PASCAL VOC2012 challenge)\n NUM_CLASS = read_class_names(TRAIN_CLASSES)\n\n ground_truth_dir_path = 'mAP/ground-truth'\n if os.path.exists(ground_truth_dir_path): shutil.rmtree(ground_truth_dir_path)\n\n if not os.path.exists('mAP'): os.mkdir('mAP')\n os.mkdir(ground_truth_dir_path)\n\n print(f'\\ncalculating mAP{int(iou_threshold*100)}...\\n')\n\n gt_counter_per_class = {}\n for index in range(dataset.num_samples):\n ann_dataset = dataset.annotations[index]\n\n original_image, bbox_data_gt = dataset.parse_annotation(ann_dataset, True)\n\n if len(bbox_data_gt) == 0:\n bboxes_gt = []\n classes_gt = []\n else:\n bboxes_gt, classes_gt = bbox_data_gt[:, :4], bbox_data_gt[:, 4]\n ground_truth_path = os.path.join(ground_truth_dir_path, str(index) + '.txt')\n num_bbox_gt = len(bboxes_gt)\n\n bounding_boxes = []\n for i in range(num_bbox_gt):\n class_name = NUM_CLASS[classes_gt[i]]\n xmin, ymin, xmax, ymax = list(map(str, bboxes_gt[i]))\n bbox = xmin + \" \" + ymin + \" \" + xmax + \" \" +ymax\n bounding_boxes.append({\"class_name\":class_name, \"bbox\":bbox, \"used\":False})\n\n # count that object\n if class_name in gt_counter_per_class:\n gt_counter_per_class[class_name] += 1\n else:\n # if class didn't exist yet\n gt_counter_per_class[class_name] = 1\n bbox_mess = ' '.join([class_name, xmin, ymin, xmax, ymax]) + '\\n'\n with open(f'{ground_truth_dir_path}/{str(index)}_ground_truth.json', 'w') as outfile:\n json.dump(bounding_boxes, outfile)\n\n gt_classes = list(gt_counter_per_class.keys())\n # sort the classes alphabetically\n gt_classes = sorted(gt_classes)\n n_classes = len(gt_classes)\n\n times = []\n json_pred = [[] for i in range(n_classes)]\n for index in range(dataset.num_samples):\n ann_dataset = dataset.annotations[index]\n\n image_name = ann_dataset[0].split('/')[-1]\n original_image, bbox_data_gt = dataset.parse_annotation(ann_dataset, True)\n \n image = image_preprocess(np.copy(original_image), [TEST_INPUT_SIZE, TEST_INPUT_SIZE])\n image_data = image[np.newaxis, ...].astype(np.float32)\n\n t1 = time.time()\n if YOLO_FRAMEWORK == \"tf\":\n pred_bbox = Yolo.predict(image_data)\n elif YOLO_FRAMEWORK == \"trt\":\n batched_input = tf.constant(image_data)\n result = Yolo(batched_input)\n pred_bbox = []\n for key, value in result.items():\n value = value.numpy()\n pred_bbox.append(value)\n \n t2 = time.time()\n \n times.append(t2-t1)\n \n pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]\n pred_bbox = tf.concat(pred_bbox, axis=0)\n\n bboxes = postprocess_boxes(pred_bbox, original_image, TEST_INPUT_SIZE, score_threshold)\n bboxes = nms(bboxes, iou_threshold, method='nms')\n\n for bbox in bboxes:\n coor = np.array(bbox[:4], dtype=np.int32)\n score = bbox[4]\n class_ind = int(bbox[5])\n class_name = NUM_CLASS[class_ind]\n score = '%.4f' % score\n xmin, ymin, xmax, ymax = list(map(str, coor))\n bbox = xmin + \" \" + ymin + \" \" + xmax + \" \" +ymax\n json_pred[gt_classes.index(class_name)].append({\"confidence\": str(score), \"file_id\": str(index), \"bbox\": str(bbox)})\n\n ms = sum(times)/len(times)*1000\n fps = 1000 / ms\n\n for class_name in gt_classes:\n json_pred[gt_classes.index(class_name)].sort(key=lambda x:float(x['confidence']), reverse=True)\n with open(f'{ground_truth_dir_path}/{class_name}_predictions.json', 'w') as outfile:\n json.dump(json_pred[gt_classes.index(class_name)], outfile)\n\n # Calculate the AP for each class\n sum_AP = 0.0\n ap_dictionary = {}\n # open file to store the results\n with open(\"mAP/results.txt\", 'w') as results_file:\n results_file.write(\"# AP and precision/recall per class\\n\")\n count_true_positives = {}\n for class_index, class_name in enumerate(gt_classes):\n count_true_positives[class_name] = 0\n # Load predictions of that class\n predictions_file = f'{ground_truth_dir_path}/{class_name}_predictions.json'\n predictions_data = json.load(open(predictions_file))\n\n # Assign predictions to ground truth objects\n nd = len(predictions_data)\n tp = [0] * nd # creates an array of zeros of size nd\n fp = [0] * nd\n for idx, prediction in enumerate(predictions_data):\n file_id = prediction[\"file_id\"]\n # assign prediction to ground truth object if any\n # open ground-truth with that file_id\n gt_file = f'{ground_truth_dir_path}/{str(file_id)}_ground_truth.json'\n ground_truth_data = json.load(open(gt_file))\n ovmax = -1\n gt_match = -1\n # load prediction bounding-box\n bb = [ float(x) for x in prediction[\"bbox\"].split() ] # bounding box of prediction\n for obj in ground_truth_data:\n # look for a class_name match\n if obj[\"class_name\"] == class_name:\n bbgt = [ float(x) for x in obj[\"bbox\"].split() ] # bounding box of ground truth\n bi = [max(bb[0],bbgt[0]), max(bb[1],bbgt[1]), min(bb[2],bbgt[2]), min(bb[3],bbgt[3])]\n iw = bi[2] - bi[0] + 1\n ih = bi[3] - bi[1] + 1\n if iw > 0 and ih > 0:\n # compute overlap (IoU) = area of intersection / area of union\n ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + (bbgt[2] - bbgt[0]\n + 1) * (bbgt[3] - bbgt[1] + 1) - iw * ih\n ov = iw * ih / ua\n if ov > ovmax:\n ovmax = ov\n gt_match = obj\n\n # assign prediction as true positive/don't care/false positive\n if ovmax >= MINOVERLAP:# if ovmax > minimum overlap\n if not bool(gt_match[\"used\"]):\n # true positive\n tp[idx] = 1\n gt_match[\"used\"] = True\n count_true_positives[class_name] += 1\n # update the \".json\" file\n with open(gt_file, 'w') as f:\n f.write(json.dumps(ground_truth_data))\n else:\n # false positive (multiple detection)\n fp[idx] = 1\n else:\n # false positive\n fp[idx] = 1\n\n # compute precision/recall\n cumsum = 0\n for idx, val in enumerate(fp):\n fp[idx] += cumsum\n cumsum += val\n cumsum = 0\n for idx, val in enumerate(tp):\n tp[idx] += cumsum\n cumsum += val\n #print(tp)\n rec = tp[:]\n for idx, val in enumerate(tp):\n rec[idx] = float(tp[idx]) / gt_counter_per_class[class_name]\n #print(rec)\n prec = tp[:]\n for idx, val in enumerate(tp):\n prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])\n #print(prec)\n\n ap, mrec, mprec = voc_ap(rec, prec)\n sum_AP += ap\n text = \"{0:.3f}%\".format(ap*100) + \" = \" + class_name + \" AP \" #class_name + \" AP = {0:.2f}%\".format(ap*100)\n\n rounded_prec = [ '%.3f' % elem for elem in prec ]\n rounded_rec = [ '%.3f' % elem for elem in rec ]\n # Write to results.txt\n results_file.write(text + \"\\n Precision: \" + str(rounded_prec) + \"\\n Recall :\" + str(rounded_rec) + \"\\n\\n\")\n\n print(text)\n ap_dictionary[class_name] = ap\n\n results_file.write(\"\\n# mAP of all classes\\n\")\n mAP = sum_AP / n_classes\n\n text = \"mAP = {:.3f}%, {:.2f} FPS\".format(mAP*100, fps)\n results_file.write(text + \"\\n\")\n print(text)\n \n return mAP*100\n\nif __name__ == '__main__': \n if YOLO_FRAMEWORK == \"tf\": # TensorFlow detection\n if YOLO_TYPE == \"yolov4\":\n Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS\n if YOLO_TYPE == \"yolov3\":\n Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS\n\n if YOLO_CUSTOM_WEIGHTS == False:\n yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=YOLO_COCO_CLASSES)\n load_yolo_weights(yolo, Darknet_weights) # use Darknet weights\n else:\n yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)\n yolo.load_weights(f\"./checkpoints/{TRAIN_MODEL_NAME}\") # use custom weights\n \n elif YOLO_FRAMEWORK == \"trt\": # TensorRT detection\n saved_model_loaded = tf.saved_model.load(f\"./checkpoints/{TRAIN_MODEL_NAME}\", tags=[tag_constants.SERVING])\n signature_keys = list(saved_model_loaded.signatures.keys())\n yolo = saved_model_loaded.signatures['serving_default']\n\n testset = Dataset('test', TEST_INPUT_SIZE=YOLO_INPUT_SIZE)\n get_mAP(yolo, testset, score_threshold=0.05, iou_threshold=0.50, TEST_INPUT_SIZE=YOLO_INPUT_SIZE)\n"
] | [
[
"tensorflow.saved_model.load",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.shape",
"numpy.copy",
"tensorflow.concat",
"numpy.array",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.constant"
]
] |
DenisSch/svca | [
"bd029c120ca8310f43311253e4d7ce19bc08350c"
] | [
"svca_limix/limix/core/mean/mean.py"
] | [
"import sys\nfrom limix.core.old.cobj import *\nfrom limix.utils.preprocess import regressOut\nimport numpy as np\n\nimport scipy.linalg as LA\nimport copy\n\ndef compute_X1KX2(Y, D, X1, X2, A1=None, A2=None):\n\n R,C = Y.shape\n if A1 is None:\n nW_A1 = Y.shape[1]\n #A1 = np.eye(Y.shape[1])\t#for now this creates A1 and A2\n else:\n nW_A1 = A1.shape[0]\n\n if A2 is None:\n nW_A2 = Y.shape[1]\n #A2 = np.eye(Y.shape[1])\t#for now this creates A1 and A2\n else:\n nW_A2 = A2.shape[0]\n\n\n nW_X1 = X1.shape[1]\n rows_block = nW_A1 * nW_X1\n\n if 0:#independentX2:\n nW_X2 = 1\n else:\n nW_X2 = X2.shape[1]\n cols_block = nW_A2 * nW_X2\n\n block = np.zeros((rows_block,cols_block))\n\n\n if (R>C) or (A1 is None) or (A2 is None):\n for c in range(C):\n X1D = X1 * D[:,c:c+1]\n X1X2 = X1D.T.dot(X2)\n if (A1 is None) and (A2 is None):\n block[c*X1.shape[1]:(c+1)*X1.shape[1], c*X2.shape[1]:(c+1)*X2.shape[1]] += X1X2\n elif (A1 is None):\n block[c*X1.shape[1]:(c+1)*X1.shape[1],:] += np.kron(A2[:,c:c+1].T,X1X2)\n elif (A2 is None):\n block[:,c*X2.shape[1]:(c+1)*X2.shape[1]] += np.kron(A1[:,c:c+1],X1X2)\n else:\n A1A2 = np.outer(A1[:,c],A2[:,c])\n block += np.kron(A1A2,X1X2)\n else:\n for r in range(R):\n A1D = A1 * D[r:r+1,:]\n A1A2 = A1D.dot(A2.T)\n X1X2 = X1[r,:][:,np.newaxis].dot(X2[r,:][np.newaxis,:])\n block += np.kron(A1A2,X1X2)\n\n return block\n\nclass mean(cObject):\n\n def __init__(self,Y, identity_trick=False):\n \"\"\" init data term \"\"\"\n self.Y = Y\n self.identity_trick=identity_trick\n self.clearFixedEffect()\n\n #########################################\n # Properties\n #########################################\n\n @property\n def A(self):\n return self._A\n\n @property\n def B(self):\n return self._B\n\n @property\n def F(self):\n return self._F\n\n @property\n def A_identity(self):\n return self._A_identity\n\n @property\n def REML_term(self):\n return self._REML_term\n\n @property\n def Y(self):\n return self._Y\n\n @property\n def N(self):\n return self._N\n\n @property\n def P(self):\n return self._P\n\n @property\n def n_fixed_effs(self):\n return self._n_fixed_effs\n\n @property\n def n_terms(self):\n return self._n_terms\n\n @property\n def Lr(self):\n return self._Lr\n\n @property\n def Lc(self):\n return self._Lc\n\n @property\n def d(self):\n return self._d\n\n @property\n def D(self):\n return np.reshape(self.d,(self.N,self.P), order='F')\n\n @property\n def LRLdiag(self):\n return self._LRLdiag\n\n @property\n def LCL(self):\n return self._LCL\n\n #########################################\n # Setters\n #########################################\n def use_identity_trick(self,identity_trick=True):\n self.identity_trick=identity_trick\n self.clear_cache('Fstar','Astar','Xstar','Xhat',\n 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',\n 'LRLdiag_Xhat_tens','Areml_grad',\n 'beta_grad','Xstar_beta_grad','Zstar','DLZ')\n\n\n def clearFixedEffect(self):\n \"\"\" erase all fixed effects \"\"\"\n self._A = []\n self._F = []\n self._B = []\n self._A_identity = []\n self._REML_term = []\n self._n_terms = 0\n self._n_fixed_effs = 0\n self._n_fixed_effs_REML = 0\n self.indicator = {'term':np.array([]),\n 'row':np.array([]),\n 'col':np.array([])}\n self.clear_cache('Fstar','Astar','Xstar','Xhat',\n 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',\n 'LRLdiag_Xhat_tens','Areml_grad',\n 'beta_grad','Xstar_beta_grad','Zstar','DLZ')\n\n def addFixedEffect(self,F=None,A=None, REML=True, index=None):\n \"\"\"\n set sample and trait designs\n F: NxK sample design\n A: LxP sample design\n REML: REML for this term?\n index: index of which fixed effect to replace. If None, just append.\n \"\"\"\n if F is None: F = np.ones((self.N,1))\n if A is None:\n A = np.eye(self.P)\n A_identity = True\n elif (A.shape == (self.P,self.P)) & (A==np.eye(self.P)).all():\n A_identity = True\n else:\n A_identity = False\n\n assert F.shape[0]==self.N, \"F dimension mismatch\"\n assert A.shape[1]==self.P, \"A dimension mismatch\"\n if index is None or index==self.n_terms:\n self.F.append(F)\n self.A.append(A)\n self.A_identity.append(A_identity)\n self.REML_term.append(REML)\n # build B matrix and indicator\n self.B.append(np.zeros((F.shape[1],A.shape[0])))\n self._n_terms+=1\n self._update_indicator(F.shape[1],A.shape[0])\n elif index >self.n_terms:\n raise Exception(\"index exceeds max index of terms\")\n else:\n self._n_fixed_effs-=self.F[index].shape[1]*self.A[index].shape[0]\n if self.REML_term[index]:\n self._n_fixed_effs_REML-=self.F[index].shape[1]*self.A[index].shape[0]\n self.F[index] = F\n self.A[index] = A\n self.A_identity[index] = A_identity\n self.REML_term[index]=REML\n self.B[index] = np.zeros((F.shape[1],A.shape[0]))\n self._rebuild_indicator()\n\n self._n_fixed_effs+=F.shape[1]*A.shape[0]\n if REML:\n self._n_fixed_effs_REML+=F.shape[1]*A.shape[0]\n self.clear_cache('Fstar','Astar','Xstar','Xhat',\n 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',\n 'LRLdiag_Xhat_tens','Areml_grad',\n 'beta_grad','Xstar_beta_grad','Zstar','DLZ')\n\n def removeFixedEffect(self, index=None):\n \"\"\"\n set sample and trait designs\n F: NxK sample design\n A: LxP sample design\n REML: REML for this term?\n index: index of which fixed effect to replace. If None, remove last term.\n \"\"\"\n if self._n_terms==0:\n pass\n if index is None or index==(self._n_terms-1):\n\n self._n_terms-=1\n F = self._F.pop() #= self.F[:-1]\n A = self._A.pop() #= self.A[:-1]\n self._A_identity.pop() #= self.A_identity[:-1]\n REML_term = self._REML_term.pop()# = self.REML_term[:-1]\n self._B.pop()# = self.B[:-1]\n self._n_fixed_effs-=F.shape[1]*A.shape[0]\n if REML_term:\n self._n_fixed_effs_REML-=F.shape[1]*A.shape[0]\n\n pass\n elif index >= self.n_terms:\n raise Exception(\"index exceeds max index of terms\")\n else:\n raise NotImplementedError(\"currently only last term can be removed\")\n pass\n self._rebuild_indicator()\n self.clear_cache('Fstar','Astar','Xstar','Xhat',\n 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',\n 'LRLdiag_Xhat_tens','Areml_grad',\n 'beta_grad','Xstar_beta_grad','Zstar','DLZ')\n\n @Y.setter\n def Y(self,value):\n \"\"\" set phenotype \"\"\"\n self._N,self._P = value.shape\n self._Y = value\n self.clear_cache('Ystar1','Ystar','Yhat','LRLdiag_Yhat',\n 'beta_grad','Xstar_beta_grad','Zstar','DLZ')\n\n @Lr.setter\n def Lr(self,value):\n \"\"\" set row rotation \"\"\"\n assert value.shape[0]==self._N, 'dimension mismatch'\n assert value.shape[1]==self._N, 'dimension mismatch'\n self._Lr = value\n self.clear_cache('Fstar','Ystar1','Ystar','Yhat','Xstar','Xhat',\n 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',\n 'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad',\n 'beta_grad','Xstar_beta_grad',\n 'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad',\n 'beta_grad','Xstar_beta_grad','Zstar','DLZ')\n\n @Lc.setter\n def Lc(self,value):\n \"\"\" set col rotation \"\"\"\n assert value.shape[0]==self._P, 'Lc dimension mismatch'\n assert value.shape[1]==self._P, 'Lc dimension mismatch'\n self._Lc = value\n self.clear_cache('Astar','Ystar','Yhat','Xstar','Xhat',\n 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',\n 'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad',\n 'beta_grad','Xstar_beta_grad','Zstar','DLZ')\n\n @d.setter\n def d(self,value):\n \"\"\" set anisotropic scaling \"\"\"\n assert value.shape[0]==self._P*self._N, 'd dimension mismatch'\n self._d = value\n self.clear_cache('Yhat','Xhat','Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',\n 'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad',\n 'beta_grad','Xstar_beta_grad','Zstar','DLZ')\n\n @LRLdiag.setter\n def LRLdiag(self,value):\n \"\"\" set anisotropic scaling \"\"\"\n self._LRLdiag = value\n self.clear_cache('LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad',\n 'beta_grad','Xstar_beta_grad')\n\n @LCL.setter\n def LCL(self,value):\n \"\"\" set anisotropic scaling \"\"\"\n self._LCL = value\n self.clear_cache('Areml_grad','beta_grad','Xstar_beta_grad')\n\n #########################################\n # Getters (caching)\n #########################################\n\n @cached\n def Astar(self):\n RV = []\n for term_i in range(self.n_terms):\n RV.append(np.dot(self.A[term_i],self.Lc.T))\n return RV\n\n @cached\n def Fstar(self):\n RV = []\n for term_i in range(self.n_terms):\n RV.append(np.dot(self.Lr,self.F[term_i]))\n return RV\n\n def Ystar1(self):\n return np.dot(self.Lr,self.Y)\n\n @cached\n def Ystar(self):\n return np.dot(self.Ystar1(),self.Lc.T)\n\n @cached\n def Yhat(self):\n return self.D*self.Ystar()\n\n @cached\n def Xstar(self):\n RV = np.zeros((self.N*self.P,self.n_fixed_effs))\n ip = 0\n for i in range(self.n_terms):\n Ki = self.A[i].shape[0]*self.F[i].shape[1]\n RV[:,ip:ip+Ki] = np.kron(self.Astar()[i].T,self.Fstar()[i])\n ip += Ki\n return RV\n\n def var_total(self):\n return (self.Yhat()*self.Ystar()).sum()\n\n\n def var_explained(self):\n XKY = self.compute_XKY(M=self.Yhat())\n beta_hat = self.Areml_solve(XKY)\n return (XKY*beta_hat).sum(), beta_hat\n\n\n @cached\n def Xhat(self):\n RV = self.d[:,np.newaxis]*self.Xstar()\n return RV\n\n @cached\n def Areml(self):\n #A1 = self.XstarT_dot(self.Xhat())\n A2 = self.compute_XKX()\n return A2\n\n @cached\n def Areml_chol(self):\n return LA.cholesky(self.Areml()).T\n\n @cached\n def Areml_REML_chol(self):\n return LA.cholesky(self.Areml()).T\n\n @cached\n def Areml_inv(self):\n return LA.cho_solve((self.Areml_chol(),True),np.eye(self.n_fixed_effs))\n\n #caching bug:\n #@cached\n def beta_hat(self):\n XKY = self.compute_XKY(M=self.Yhat())\n beta_hat = self.Areml_solve(XKY)\n return beta_hat\n\n\n @cached\n def B_hat(self):\n RV = []\n ip = 0\n for term_i in range(self.n_terms):\n RV.append(np.reshape(self.beta_hat()[ip:ip+self.B[term_i].size],self.B[term_i].shape, order='F'))\n ip += self.B[term_i].size\n return RV\n\n @cached\n def LRLdiag_Xhat_tens(self):\n RV = np.reshape(self.Xhat(),(self.N,self.P,self.n_fixed_effs),order='F').copy()\n RV *= self.LRLdiag[:,np.newaxis,np.newaxis]\n return RV\n\n @cached\n def LRLdiag_Yhat(self):\n return self.LRLdiag[:,np.newaxis]*self.Yhat()\n\n @cached\n def Areml_grad(self):\n RV = np.einsum('jpk,lp->jlk',self.LRLdiag_Xhat_tens(),self.LCL)\n RV = RV.reshape((self.N*self.P,self.n_fixed_effs),order='F')\n RV*= self.d[:,np.newaxis]\n RV = -self.XstarT_dot(RV)\n return RV\n\n @cached\n def beta_grad(self):\n RV = np.reshape(np.dot(self.LRLdiag_Yhat(),self.LCL.T),(self.N*self.P),order='F')\n RV *= self.d\n RV = self.XstarT_dot(RV)\n RV += np.dot(self.Areml_grad(),self.beta_hat())\n RV = -np.dot(self.Areml_inv(),RV)\n return RV\n\n @cached\n def Xstar_beta_grad(self):\n RV = np.zeros((self.N,self.P))\n ip = 0\n for term_i in range(self.n_terms):\n _Bgrad = np.reshape(self.beta_grad()[ip:ip+self.B[term_i].size],self.B[term_i].shape, order='F')\n RV+=np.dot(self.Fstar()[term_i],np.dot(_Bgrad,self.Astar()[term_i]))\n ip += self.B[term_i].size\n return RV\n\n\n @cached\n def Zstar(self):\n \"\"\" predict the value of the fixed effect \"\"\"\n RV = self.Ystar().copy()\n for term_i in range(self.n_terms):\n if self.identity_trick and self.A_identity[term_i]:\n RV-=np.dot(self.Fstar()[term_i],self.B_hat()[term_i])\n else:\n RV-=np.dot(self.Fstar()[term_i],np.dot(self.B_hat()[term_i],self.Astar()[term_i]))\n self.clear_cache('DLZ')\n return RV\n\n @cached\n def Areml_eigh(self):\n \"\"\"compute the eigenvalue decomposition of Astar\"\"\"\n s,U = LA.eigh(self.Areml(),lower=True)\n i_pos = (s>1e-10)\n s = s[i_pos]\n U = U[:,i_pos]\n return s,U\n\n @cached\n def DLZ(self):\n return self.Zstar()*np.reshape(self.D,(self.N,self.P), order='F')\n\n ###############################################\n # Other getters with no caching, should not they have caching somehow?\n ###############################################\n\n def Areml_solve(self, b):\n try:\n res = LA.cho_solve((self.Areml_chol(),True),b)\n except LA.LinAlgError:\n\n s,U = self.Areml_eigh()\n res = U.T.dot(b)\n res /= s[:,np.newaxis]\n res = U.dot(res)\n\n return res\n\n\n def compute_XKY(self, M=None):\n if M is None:\n M = self.Yhat()\n assert M.shape==(self.N,self.P)\n XKY = np.zeros((self.n_fixed_effs))\n n_weights = 0\n for term in range(self.n_terms):\n if self.identity_trick and self.A_identity[term]:\n XKY_block = compute_XYA(DY=M, X=self.Fstar()[term], A=None)\n else:\n XKY_block = compute_XYA(DY=M, X=self.Fstar()[term], A=self.Astar()[term])\n XKY[n_weights:n_weights + self.A[term].shape[0] * self.F[term].shape[1]] = XKY_block.ravel(order='F')\n n_weights += self.A[term].shape[0] * self.F[term].shape[1]\n return XKY\n\n def compute_XKX(self):\n #n_weights1 = 0\n #\n #for term1 in xrange(self.n_terms):\n # n_weights1+=self.Astar()[term1].shape[0] * self.Fstar()[term1].shape[1]\n #cov_beta = np.zeros((n_weights1,n_weights1))\n cov_beta = np.zeros((self.n_fixed_effs,self.n_fixed_effs))\n n_weights1 = 0\n for term1 in range(self.n_terms):\n if self.identity_trick and self.A_identity[term1]:\n A_term1 = None\n else:\n A_term1 = self.Astar()[term1]\n n_weights2 = n_weights1\n for term2 in range(term1,self.n_terms):\n if self.identity_trick and self.A_identity[term2]:\n A_term2 = None\n else:\n A_term2 = self.Astar()[term2]\n block = compute_X1KX2(Y=self.Ystar(), D=self.D, X1=self.Fstar()[term1], X2=self.Fstar()[term2], A1=A_term1, A2=A_term2)\n cov_beta[n_weights1:n_weights1 + self.A[term1].shape[0] * self.F[term1].shape[1], n_weights2:n_weights2 + self.A[term2].shape[0] * self.F[term2].shape[1]] = block\n if term1!=term2:\n cov_beta[n_weights2:n_weights2 + self.A[term2].shape[0] * self.F[term2].shape[1], n_weights1:n_weights1 + self.A[term1].shape[0] * self.F[term1].shape[1]] = block.T\n\n n_weights2+=self.A[term2].shape[0] * self.F[term2].shape[1]\n\n n_weights1+=self.A[term1].shape[0] * self.F[term1].shape[1]\n return cov_beta\n\n def predict(self):\n \"\"\" predict the value of the fixed effect \"\"\"\n RV = np.zeros((self.N,self.P))\n for term_i in range(self.n_terms):\n RV+=np.dot(self.Fstar()[term_i],np.dot(self.B()[term_i],self.Astar()[term_i]))\n return RV\n\n def evaluate(self):\n \"\"\" predict the value of \"\"\"\n RV = -self.predict()\n RV += self.Ystar()\n return RV\n\n def getGradient(self,j):\n \"\"\" get rotated gradient for fixed effect i \"\"\"\n i = int(self.indicator['term'][j])\n r = int(self.indicator['row'][j])\n c = int(self.indicator['col'][j])\n rv = -np.kron(self.Fstar()[i][:,[r]],self.Astar()[i][[c],:])\n return rv\n\n def XstarT_dot(self,M):\n \"\"\" get dot product of Xhat and M \"\"\"\n if 0:\n #TODO: implement this properly\n pass\n else:\n RV = np.dot(self.Xstar().T,M)\n return RV\n\n def getResiduals(self):\n \"\"\" regress out fixed effects and results residuals \"\"\"\n X = np.zeros((self.N*self.P,self.n_fixed_effs))\n ip = 0\n for i in range(self.n_terms):\n Ki = self.A[i].shape[0]*self.F[i].shape[1]\n X[:,ip:ip+Ki] = np.kron(self.A[i].T,self.F[i])\n ip += Ki\n y = np.reshape(self.Y,(self.Y.size,1),order='F')\n RV = regressOut(y,X)\n RV = np.reshape(RV,self.Y.shape,order='F')\n return RV\n\n #########################################\n # Params manipulation\n #########################################\n\n def getParams(self):\n \"\"\" get params \"\"\"\n rv = np.array([])\n if self.n_terms>0:\n rv = np.concatenate([np.reshape(self.B[term_i],self.B[term_i].size, order='F') for term_i in range(self.n_terms)])\n return rv\n\n def setParams(self,params):\n \"\"\" set params \"\"\"\n start = 0\n for i in range(self.n_terms):\n n_effects = self.B[i].size\n self.B[i] = np.reshape(params[start:start+n_effects],self.B[i].shape, order='F')\n start += n_effects\n\n #########################################\n # Utility functions\n #########################################\n\n def getDimensions(self):\n \"\"\" get phenotype dimensions \"\"\"\n return self.N,self.P\n\n def _set_toChange(x):\n \"\"\" set variables in list x toChange \"\"\"\n for key in list(x.keys()):\n self.toChange[key] = True\n\n def _update_indicator(self,K,L):\n \"\"\" update the indicator \"\"\"\n _update = {'term': self.n_terms*np.ones((K,L)).T.ravel(),\n 'row': np.kron(np.arange(K)[:,np.newaxis],np.ones((1,L))).T.ravel(),\n 'col': np.kron(np.ones((K,1)),np.arange(L)[np.newaxis,:]).T.ravel()}\n for key in list(_update.keys()):\n self.indicator[key] = np.concatenate([self.indicator[key],_update[key]])\n\n def _rebuild_indicator(self):\n \"\"\" update the indicator \"\"\"\n indicator = {'term':np.array([]),\n 'row':np.array([]),\n 'col':np.array([])}\n\n for term in range(self.n_terms):\n L = self.A[term].shape[0]\n K = self.F[term].shape[1]\n _update = {'term': (term+1)*np.ones((K,L)).T.ravel(),\n 'row': np.kron(np.arange(K)[:,np.newaxis],np.ones((1,L))).T.ravel(),\n 'col': np.kron(np.ones((K,1)),np.arange(L)[np.newaxis,:]).T.ravel()}\n for key in list(_update.keys()):\n indicator[key] = np.concatenate([indicator[key],_update[key]])\n self.indicator = indicator\n"
] | [
[
"numpy.ones",
"numpy.eye",
"numpy.zeros",
"numpy.kron",
"numpy.concatenate",
"numpy.reshape",
"numpy.arange",
"numpy.array",
"numpy.dot",
"numpy.outer"
]
] |
koba35/retinanet | [
"99820cde438a2fc14e38973437766de6fe6a94a3"
] | [
"losses.py"
] | [
"import numpy as np\nimport torch\nimport torch.nn as nn\n\n\ndef calc_iou(a, b):\n area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])\n\n iw = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 0])\n ih = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 1])\n\n iw = torch.clamp(iw, min=0)\n ih = torch.clamp(ih, min=0)\n\n ua = torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), dim=1) + area - iw * ih\n\n ua = torch.clamp(ua, min=1e-8)\n\n intersection = iw * ih\n\n IoU = intersection / ua\n\n return IoU\n\n\nclass FocalLoss(nn.Module):\n # def __init__(self):\n\n def forward(self, classifications, regressions, anchors, annotations):\n alpha = 0.25\n gamma = 2.0\n batch_size = classifications.shape[0]\n classification_losses = []\n regression_losses = []\n\n anchor = anchors[0, :, :]\n\n anchor_widths = anchor[:, 2] - anchor[:, 0]\n anchor_heights = anchor[:, 3] - anchor[:, 1]\n anchor_ctr_x = anchor[:, 0] + 0.5 * anchor_widths\n anchor_ctr_y = anchor[:, 1] + 0.5 * anchor_heights\n\n for j in range(batch_size):\n\n classification = classifications[j, :, :]\n regression = regressions[j, :, :]\n\n bbox_annotation = annotations[j, :, :]\n bbox_annotation = bbox_annotation[bbox_annotation[:, 4] != -1]\n\n if bbox_annotation.shape[0] == 0:\n regression_losses.append(torch.tensor(0).float().cuda())\n classification_losses.append(torch.tensor(0).float().cuda())\n\n continue\n\n classification = torch.clamp(classification, 1e-4, 1.0 - 1e-4)\n\n IoU = calc_iou(anchors[0, :, :], bbox_annotation[:, :4]) # num_anchors x num_annotations\n\n IoU_max, IoU_argmax = torch.max(IoU, dim=1) # num_anchors x 1\n\n # import pdb\n # pdb.set_trace()\n\n # compute the loss for classification\n targets = torch.ones(classification.shape) * -1\n targets = targets.cuda()\n\n targets[torch.lt(IoU_max, 0.4), :] = 0\n\n positive_indices = torch.ge(IoU_max, 0.5)\n\n num_positive_anchors = positive_indices.sum()\n\n assigned_annotations = bbox_annotation[IoU_argmax, :]\n\n targets[positive_indices, :] = 0\n targets[positive_indices, assigned_annotations[positive_indices, 4].long()] = 1\n\n alpha_factor = torch.ones(targets.shape).cuda() * alpha\n\n alpha_factor = torch.where(torch.eq(targets, 1.), alpha_factor, 1. - alpha_factor)\n focal_weight = torch.where(torch.eq(targets, 1.), 1. - classification, classification)\n focal_weight = alpha_factor * torch.pow(focal_weight, gamma)\n\n bce = -(targets * torch.log(classification) + (1.0 - targets) * torch.log(1.0 - classification))\n\n # cls_loss = focal_weight * torch.pow(bce, gamma)\n cls_loss = focal_weight * bce\n\n cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros(cls_loss.shape).cuda())\n\n classification_losses.append(cls_loss.sum() / torch.clamp(num_positive_anchors.float(), min=1.0))\n\n # compute the loss for regression\n\n if positive_indices.sum() > 0:\n assigned_annotations = assigned_annotations[positive_indices, :]\n\n anchor_widths_pi = anchor_widths[positive_indices]\n anchor_heights_pi = anchor_heights[positive_indices]\n anchor_ctr_x_pi = anchor_ctr_x[positive_indices]\n anchor_ctr_y_pi = anchor_ctr_y[positive_indices]\n\n gt_widths = assigned_annotations[:, 2] - assigned_annotations[:, 0]\n gt_heights = assigned_annotations[:, 3] - assigned_annotations[:, 1]\n gt_ctr_x = assigned_annotations[:, 0] + 0.5 * gt_widths\n gt_ctr_y = assigned_annotations[:, 1] + 0.5 * gt_heights\n\n # clip widths to 1\n gt_widths = torch.clamp(gt_widths, min=1)\n gt_heights = torch.clamp(gt_heights, min=1)\n\n targets_dx = (gt_ctr_x - anchor_ctr_x_pi) / anchor_widths_pi\n targets_dy = (gt_ctr_y - anchor_ctr_y_pi) / anchor_heights_pi\n targets_dw = torch.log(gt_widths / anchor_widths_pi)\n targets_dh = torch.log(gt_heights / anchor_heights_pi)\n\n targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh))\n targets = targets.t()\n\n targets = targets / torch.Tensor([[0.1, 0.1, 0.2, 0.2]]).cuda()\n\n negative_indices = 1 - positive_indices\n\n regression_diff = torch.abs(targets - regression[positive_indices, :])\n\n regression_loss = torch.where(\n torch.le(regression_diff, 1.0 / 9.0),\n 0.5 * 9.0 * torch.pow(regression_diff, 2),\n regression_diff - 0.5 / 9.0\n )\n regression_losses.append(regression_loss.mean())\n else:\n regression_losses.append(torch.tensor(0).float().cuda())\n\n return torch.stack(classification_losses).mean(dim=0, keepdim=True), torch.stack(regression_losses).mean(dim=0,\n keepdim=True)\n"
] | [
[
"torch.unsqueeze",
"torch.ones",
"torch.stack",
"torch.le",
"torch.pow",
"torch.Tensor",
"torch.ge",
"torch.lt",
"torch.eq",
"torch.tensor",
"torch.log",
"torch.abs",
"torch.max",
"torch.zeros",
"torch.ne",
"torch.clamp"
]
] |
googleinterns/protein-embedding-retrieval | [
"388563d3206e1486fe5dbcfd8326be6f1185a00e"
] | [
"contextual_lenses/train_utils.py"
] | [
"\"\"\"Train utils\n\nGeneral tools for instantiating and training models.\n\"\"\"\n\nimport flax\nfrom flax import nn\nfrom flax import optim\nfrom flax.training import checkpoints\nfrom flax.training import common_utils\n\nimport jax\nfrom jax import random\nimport jax.nn\nimport jax.numpy as jnp\nfrom jax.config import config\nconfig.enable_omnistaging()\n\nimport tensorflow as tf\n\nimport numpy as np\n\nimport functools\n\nimport copy\n\nfrom google_research.protein_lm import models\n\n\n# Data batching.\ndef create_data_iterator(df,\n input_col,\n output_col,\n batch_size,\n epochs=1,\n buffer_size=None,\n seed=0,\n drop_remainder=False,\n add_outputs=True,\n as_numpy=True):\n \"\"\"Creates iterator of batches of (inputs) or (inputs, outputs).\"\"\"\n\n if buffer_size is None:\n buffer_size = len(df)\n\n inputs = list(df[input_col].values)\n inputs = tf.data.Dataset.from_tensor_slices(inputs)\n\n outputs = df[output_col].values\n outputs = tf.data.Dataset.from_tensor_slices(outputs)\n\n if add_outputs:\n batches = tf.data.Dataset.zip(\n (inputs, outputs)).shuffle(buffer_size=buffer_size,\n seed=seed,\n reshuffle_each_iteration=True)\n else:\n batches = inputs.shuffle(buffer_size=buffer_size,\n seed=seed,\n reshuffle_each_iteration=True)\n\n batches = batches.repeat(epochs).batch(batch_size=batch_size,\n drop_remainder=drop_remainder)\n\n if as_numpy:\n batches = batches.as_numpy_iterator()\n\n return batches\n\n\ndef path_inclusion_filter_fn(path, param, layer):\n \"\"\"Returns whether or not layer name is contained in path.\"\"\"\n\n return layer in path\n\n\ndef create_optimizer(model, learning_rate, weight_decay, layers=None):\n \"\"\"Instantiates Adam multi-optimizer.\"\"\"\n\n if layers is None:\n assert (\n type(learning_rate) == type(weight_decay) == float\n ), 'Specify float values for moded learning rate and weight decay!'\n optimizer_def = optim.Adam(learning_rate=learning_rate,\n weight_decay=weight_decay)\n optimizer = optimizer_def.create(model)\n\n else:\n assert (\n len(learning_rate) == len(weight_decay) == len(layers)\n ), 'Number of specified learning rates, weight decays, and layers must be equal!'\n optimizers = []\n for lr, wd, layer in zip(learning_rate, weight_decay, layers):\n if lr > 0:\n opt = optim.Adam(learning_rate=lr, weight_decay=wd)\n filter_fn = functools.partial(path_inclusion_filter_fn,\n layer=layer)\n traversal = optim.ModelParamTraversal(filter_fn)\n traversal_opt = (traversal, opt)\n optimizers.append(traversal_opt)\n optimizer_def = optim.MultiOptimizer(*optimizers)\n optimizer = optimizer_def.create(model)\n\n return optimizer\n\n\[email protected](jax.jit, static_argnums=(3, 4))\ndef train_step(optimizer, X, Y, loss_fn, loss_fn_kwargs):\n \"\"\"Trains model (optimizer.target) using specified loss function.\"\"\"\n def compute_loss_fn(model, X, Y, loss_fn, loss_fn_kwargs):\n Y_hat = model(X)\n loss = loss_fn(Y, Y_hat, **loss_fn_kwargs)\n return loss\n\n grad_fn = jax.value_and_grad(compute_loss_fn)\n _, grad = grad_fn(optimizer.target, X, Y, loss_fn, loss_fn_kwargs)\n optimizer = optimizer.apply_gradient(grad)\n\n return optimizer\n\n\ndef get_p_train_step():\n \"\"\"Wraps train_step with jax.pmap.\"\"\"\n\n p_train_step = jax.pmap(train_step,\n axis_name='batch',\n static_broadcasted_argnums=(3, 4))\n\n return p_train_step\n\n\ndef train(model,\n train_data,\n loss_fn,\n loss_fn_kwargs,\n learning_rate=1e-4,\n weight_decay=0.1,\n layers=None,\n restore_dir=None,\n save_dir=None,\n use_pmap=False):\n \"\"\"Instantiates optimizer, applies train_step/p_train_step over training data.\"\"\"\n\n optimizer = create_optimizer(model,\n learning_rate=learning_rate,\n weight_decay=weight_decay,\n layers=layers)\n\n if restore_dir is not None:\n optimizer = checkpoints.restore_checkpoint(ckpt_dir=restore_dir,\n target=optimizer)\n\n if use_pmap:\n p_train_step = get_p_train_step()\n optimizer = optimizer.replicate()\n\n for batch in iter(train_data):\n X, Y = batch\n X, Y = common_utils.shard(X), common_utils.shard(Y)\n optimizer = p_train_step(optimizer, X, Y, loss_fn, loss_fn_kwargs)\n\n optimizer = optimizer.unreplicate()\n\n else:\n for batch in iter(train_data):\n X, Y = batch\n optimizer = train_step(optimizer, X, Y, loss_fn, loss_fn_kwargs)\n\n if save_dir is not None:\n state = optimizer.state\n if type(state) == list:\n step = [sub_state.step for sub_state in state]\n else:\n step = state.step\n checkpoints.save_checkpoint(ckpt_dir=save_dir,\n target=optimizer,\n step=step)\n\n return optimizer\n\n\ndef load_params(params,\n encoder_fn_params=None,\n reduce_fn_params=None,\n predict_fn_params=None):\n \"\"\"Updates randomly initialized parameters using loaded parameters.\"\"\"\n\n loaded_params = copy.deepcopy(params)\n fn_names = list(loaded_params.keys())\n\n num_learnable_layers = len([\n params_dict for params_dict in\n [encoder_fn_params, reduce_fn_params, predict_fn_params]\n if params_dict is not None\n ])\n if encoder_fn_params is not None:\n encoder_fn_ind = '_0'\n if reduce_fn_params is not None:\n reduce_fn_ind = '_1'\n predict_fn_ind = '_2'\n else:\n predict_fn_ind = '_1'\n else:\n if reduce_fn_params is not None:\n reduce_fn_ind = '_0'\n predict_fn_ind = '_1'\n else:\n predict_fn_ind = '_0'\n\n assert (len(loaded_params.keys()) >= num_learnable_layers\n ), 'Model encoder and lens architecture incorrectly specified!'\n\n encoder_fn_name = None\n if encoder_fn_params is not None:\n for fn_name in fn_names:\n if encoder_fn_ind in fn_name:\n if encoder_fn_name is not None:\n raise ValueError(\n 'Multiple instances of encoder_fn detected. %s' %\n fn_name)\n encoder_fn_name = fn_name\n loaded_params[encoder_fn_name] = encoder_fn_params\n\n reduce_fn_name = None\n if reduce_fn_params is not None:\n for fn_name in fn_names:\n if reduce_fn_ind in fn_name:\n if reduce_fn_name is not None:\n raise ValueError(\n 'Multiple instances of reduce_fn detected. %s' %\n fn_name)\n reduce_fn_name = fn_name\n loaded_params[reduce_fn_name] = reduce_fn_params\n\n predict_fn_name = None\n if predict_fn_params is not None:\n for fn_name in fn_names:\n if predict_fn_ind in fn_name:\n if predict_fn_name is not None:\n raise ValueError(\n 'Multiple instances of predict_fn detected. %s' %\n fn_name)\n predict_fn_name = fn_name\n loaded_params[predict_fn_name] = predict_fn_params\n\n return loaded_params\n\n\nclass RepresentationModel(nn.Module):\n def apply(self,\n x,\n encoder_fn,\n encoder_fn_kwargs,\n reduce_fn,\n reduce_fn_kwargs,\n num_categories,\n output_features,\n output='prediction',\n use_transformer=False,\n padding_mask=None):\n \"\"\"Computes padding mask, encodes indices using embeddings, \n applies lensing operation, predicts scalar value.\n \"\"\"\n\n outputs = dict()\n\n if padding_mask is None:\n padding_mask = jnp.expand_dims(jnp.where(x < num_categories - 1, 1,\n 0),\n axis=2)\n\n if not use_transformer:\n x = encoder_fn(x,\n num_categories=num_categories,\n **encoder_fn_kwargs)\n else:\n x = encoder_fn(x)\n\n rep = reduce_fn(x, padding_mask=padding_mask, **reduce_fn_kwargs)\n\n outputs['embedding'] = rep\n\n out = nn.Dense(rep,\n output_features,\n kernel_init=nn.initializers.xavier_uniform(),\n bias_init=nn.initializers.normal(stddev=1e-6))\n\n outputs['prediction'] = out\n\n return outputs[output]\n\n\ndef create_representation_model(encoder_fn,\n encoder_fn_kwargs,\n reduce_fn,\n reduce_fn_kwargs,\n num_categories,\n output_features,\n output='prediction',\n key=random.PRNGKey(0),\n encoder_fn_params=None,\n reduce_fn_params=None,\n predict_fn_params=None):\n \"\"\"Instantiates a RepresentationModel object.\"\"\"\n\n module = RepresentationModel.partial(encoder_fn=encoder_fn,\n encoder_fn_kwargs=encoder_fn_kwargs,\n reduce_fn=reduce_fn,\n reduce_fn_kwargs=reduce_fn_kwargs,\n num_categories=num_categories,\n output_features=output_features,\n output=output,\n use_transformer=False)\n\n _, initial_params = RepresentationModel.init_by_shape(\n key,\n input_specs=[((1, 1), jnp.float32)],\n encoder_fn=encoder_fn,\n encoder_fn_kwargs=encoder_fn_kwargs,\n reduce_fn=reduce_fn,\n reduce_fn_kwargs=reduce_fn_kwargs,\n num_categories=num_categories,\n output_features=output_features,\n output=output,\n use_transformer=False)\n\n loaded_params = load_params(initial_params, encoder_fn_params,\n reduce_fn_params, predict_fn_params)\n\n model = nn.Model(module, loaded_params)\n\n return model\n\n\ndef create_transformer_representation_model(transformer_kwargs,\n reduce_fn,\n reduce_fn_kwargs,\n num_categories,\n output_features,\n bidirectional=False,\n output='prediction',\n key=random.PRNGKey(0),\n encoder_fn_params=None,\n reduce_fn_params=None,\n predict_fn_params=None):\n \"\"\"Instantiates a RepresentationModel object with Transformer encoder.\"\"\"\n\n if not bidirectional:\n transformer = models.FlaxLM(**transformer_kwargs)\n else:\n transformer = models.FlaxBERT(**transformer_kwargs)\n transformer_optimizer = transformer._optimizer\n transformer_model = models.jax_utils.unreplicate(\n transformer_optimizer.target)\n transformer_encoder = transformer_model.module.partial(\n output_head='output_emb')\n\n module = RepresentationModel.partial(encoder_fn=transformer_encoder,\n encoder_fn_kwargs={},\n reduce_fn=reduce_fn,\n reduce_fn_kwargs=reduce_fn_kwargs,\n num_categories=num_categories,\n output_features=output_features,\n output=output,\n use_transformer=True)\n\n _, initial_params = RepresentationModel.init_by_shape(\n key,\n input_specs=[((1, 1), jnp.float32)],\n encoder_fn=transformer_encoder,\n encoder_fn_kwargs={},\n reduce_fn=reduce_fn,\n reduce_fn_kwargs=reduce_fn_kwargs,\n num_categories=num_categories,\n output_features=output_features,\n output=output,\n use_transformer=True)\n\n loaded_params = load_params(initial_params, encoder_fn_params,\n reduce_fn_params, predict_fn_params)\n\n model = nn.Model(module, loaded_params)\n\n return model\n\n\ndef architecture_to_layers(encoder_fn_name, reduce_fn_name):\n\n layers = []\n\n no_trainable_encoder = False\n if encoder_fn_name is None or encoder_fn_name == 'transformer':\n layers.append('Transformer_0')\n elif encoder_fn_name == 'one_hot':\n no_trainable_encoder = True\n elif encoder_fn_name == 'cnn_one_hot':\n layers.append('CNN_0')\n else:\n raise ValueError('Incorrect encoder name specified.')\n\n no_trainable_lens = False\n if reduce_fn_name == 'mean_pool' or reduce_fn_name == 'max_pool':\n no_trainable_lens = True\n elif reduce_fn_name == 'linear_mean_pool' or reduce_fn_name == 'linear_max_pool':\n if no_trainable_encoder:\n layers.append('Dense_0')\n else:\n layers.append('Dense_1')\n elif reduce_fn_name == 'gated_conv':\n if no_trainable_encoder:\n layers.append('GatedConv_0')\n else:\n layers.append('GatedConv_1')\n else:\n raise ValueError('Incorrect lens name specified.')\n\n if no_trainable_encoder:\n if no_trainable_lens:\n layers.append('Dense_0')\n else:\n layers.append('Dense_1')\n else:\n if no_trainable_lens:\n layers.append('Dense_1')\n else:\n layers.append('Dense_2')\n\n trainable_encoder = not no_trainable_encoder\n\n return layers, trainable_encoder\n"
] | [
[
"tensorflow.data.Dataset.zip",
"tensorflow.data.Dataset.from_tensor_slices"
]
] |
sethmnielsen/mavsim_template_files | [
"453ec4f7d38fc2d1162198b554834b5bdb7de96f"
] | [
"mavsim_python/chap3/mav_dynamics.py"
] | [
"\"\"\"\nmav_dynamics\n - this file implements the dynamic equations of motion for MAV\n - use unit quaternion for the attitude state\n\npart of mavsimPy\n - Beard & McLain, PUP, 2012\n - Update history:\n 12/17/2018 - RWB\n 1/14/2019 - RWB\n\"\"\"\nimport sys\nsys.path.append('..')\nimport numpy as np\n\n# load message types\nfrom message_types.msg_state import msg_state\n\nimport parameters.aerosonde_parameters as MAV\nfrom tools.rotations import Quaternion2Euler\n\nfrom IPython.core.debugger import Pdb\n\nclass mav_dynamics:\n def __init__(self, Ts):\n self.ts_simulation = Ts\n # set initial states based on parameter file\n self.reset_state()\n self.msg_true_state = msg_state()\n\n ###################################\n # public functions\n\n def reset_state(self):\n # _state is the 13x1 internal state of the aircraft that is being propagated:\n # _state = [pn, pe, pd, u, v, w, e0, e1, e2, e3, p, q, r]\n self._state = np.array([[MAV.pn0], # (0)\n [MAV.pe0], # (1)\n [MAV.pd0], # (2)\n [MAV.u0], # (3)\n [MAV.v0], # (4)\n [MAV.w0], # (5)\n [MAV.e0], # (6)\n [MAV.e1], # (7)\n [MAV.e2], # (8)\n [MAV.e3], # (9)\n [MAV.p0], # (10)\n [MAV.q0], # (11)\n [MAV.r0]]) # (12)\n\n\n def update_state(self, forces_moments):\n '''\n\n Integrate the differential equations defining dynamics.\n Inputs are the forces and moments on the aircraft.\n Ts is the time step between function calls.\n '''\n\n # Integrate ODE using Runge-Kutta RK4 algorithm\n time_step = self.ts_simulation\n k1 = self._derivatives(self._state, forces_moments)\n k2 = self._derivatives(self._state + time_step/2.*k1, forces_moments)\n k3 = self._derivatives(self._state + time_step/2.*k2, forces_moments)\n k4 = self._derivatives(self._state + time_step*k3, forces_moments)\n self._state += time_step/6 * (k1 + 2*k2 + 2*k3 + k4)\n\n # normalize the quaternion\n e0 = self._state.item(6)\n e1 = self._state.item(7)\n e2 = self._state.item(8)\n e3 = self._state.item(9)\n normE = np.sqrt(e0**2+e1**2+e2**2+e3**2)\n self._state[6][0] = self._state.item(6)/normE\n self._state[7][0] = self._state.item(7)/normE\n self._state[8][0] = self._state.item(8)/normE\n self._state[9][0] = self._state.item(9)/normE\n\n # update the message class for the true state\n self._update_msg_true_state()\n\n ###################################\n # private functions\n def _derivatives(self, state, forces_moments):\n \"\"\"\n for the dynamics xdot = f(x, u), returns f(x, u)\n \"\"\"\n # extract the states\n pn = state.item(0)\n pe = state.item(1)\n pd = state.item(2)\n u = state.item(3)\n v = state.item(4)\n w = state.item(5)\n e0 = state.item(6)\n e1 = state.item(7)\n e2 = state.item(8)\n e3 = state.item(9)\n p = state.item(10)\n q = state.item(11)\n r = state.item(12)\n # extract forces/moments\n fx = forces_moments.item(0)\n fy = forces_moments.item(1)\n fz = forces_moments.item(2)\n l = forces_moments.item(3)\n m = forces_moments.item(4)\n n = forces_moments.item(5)\n\n # position kinematics\n R_vb = np.array([[e1**2+e0**2-e2**2-e3**2, 2*(e1*e2-e3*e0), 2*(e1*e3+e2*e0)],\n [2*(e1*e2+e3*e0), e2**2+e0**2-e1**2-e3**2, 2*(e2*e3-e1*e0)],\n [2*(e1*e3-e2*e0), 2*(e2*e3+e1*e0), e3**2+e0**2-e1**2-e2**2]])\n pn_dot, pe_dot, pd_dot = R_vb @ np.array([u, v, w])\n\n # position dynamics\n vec_pos = np.array([r*v - q*w, p*w - r*u, q*u - p*v])\n u_dot, v_dot, w_dot = vec_pos + 1/MAV.mass * np.array([fx, fy, fz])\n\n # rotational kinematics\n mat_rot = np.array([[0, -p, -q, -r],\n [p, 0, r, -q],\n [q, -r, 0, p],\n [r, q, -p, 0]])\n e0_dot, e1_dot, e2_dot, e3_dot = 0.5*mat_rot @ np.array([e0,e1,e2,e3])\n\n # rotatonal dynamics\n G = MAV.gamma\n G1 = MAV.gamma1\n G2 = MAV.gamma2\n G3 = MAV.gamma3\n G4 = MAV.gamma4\n G5 = MAV.gamma5\n G6 = MAV.gamma6\n G7 = MAV.gamma7\n G8 = MAV.gamma8\n\n vec_rot = np.array([G1*p*q - G2*q*r, G5*p*r - G6*(p**2-r**2), G7*p*q - G1*q*r])\n vec_rot2 = np.array([G3*l + G4*n, m/MAV.Jy, G4*l + G8*n])\n\n p_dot, q_dot, r_dot = vec_rot + vec_rot2\n\n # collect the derivative of the states\n x_dot = np.array([[pn_dot, pe_dot, pd_dot, u_dot, v_dot, w_dot,\n e0_dot, e1_dot, e2_dot, e3_dot, p_dot, q_dot, r_dot]]).T\n return x_dot\n\n def _update_msg_true_state(self):\n # update the true state message:\n phi, theta, psi = Quaternion2Euler(self._state[6:10])\n self.msg_true_state.pn = self._state.item(0)\n self.msg_true_state.pe = self._state.item(1)\n self.msg_true_state.h = -self._state.item(2)\n self.msg_true_state.phi = phi\n self.msg_true_state.theta = theta\n self.msg_true_state.psi = psi\n self.msg_true_state.p = self._state.item(10)\n self.msg_true_state.q = self._state.item(11)\n self.msg_true_state.r = self._state.item(12)\n"
] | [
[
"numpy.array",
"numpy.sqrt"
]
] |
EEmGuzman/orphics | [
"f8f25f9db7c9104dba5cbeaac0b4924bf4f6920e"
] | [
"tests/legacy/test_cross_cov.py"
] | [
"from __future__ import print_function\nfrom orphics import maps,io,cosmology,symcoupling as sc,stats,lensing\nfrom enlib import enmap,bench\nimport numpy as np\nimport os,sys\n\n\n\ncache = True\nhdv = False\ndeg = 5\npx = 1.5\nshape,wcs = maps.rect_geometry(width_deg = deg,px_res_arcmin=px)\nmc = sc.LensingModeCoupling(shape,wcs)\npols = ['TT',\"TE\",'EE','EB','TB']\n\n\ntheory = cosmology.default_theory(lpad=20000)\nnoise_t = 10.0\nnoise_p = 10.0*np.sqrt(2.)\nfwhm = 1.5\nkbeam = maps.gauss_beam(fwhm,mc.modlmap)\nells = np.arange(0,3000,1)\nlbeam = maps.gauss_beam(fwhm,ells)\nntt = np.nan_to_num((noise_t*np.pi/180./60.)**2./kbeam**2.)\nnee = np.nan_to_num((noise_p*np.pi/180./60.)**2./kbeam**2.)\nnbb = np.nan_to_num((noise_p*np.pi/180./60.)**2./kbeam**2.)\nlntt = np.nan_to_num((noise_t*np.pi/180./60.)**2./lbeam**2.)\nlnee = np.nan_to_num((noise_p*np.pi/180./60.)**2./lbeam**2.)\nlnbb = np.nan_to_num((noise_p*np.pi/180./60.)**2./lbeam**2.)\n\n\nellmin = 20\nellmax = 3000\nxmask = maps.mask_kspace(shape,wcs,lmin=ellmin,lmax=ellmax)\nymask = xmask\n\nAls = {}\nfor pol in pols:\n with bench.show(\"ALcalc\"):\n AL = mc.AL(pol,xmask,ymask,ntt,nee,nbb,theory=theory,hdv=hdv,cache=cache)\n Als[pol] = AL.copy()\nbin_edges = np.arange(10,2000,40)\n\npl = io.Plotter(yscale='log')\npl.add(ells,theory.gCl('kk',ells),lw=3,color='k')\n\ncrosses = [('TT','EE'),('TT','TE'),('EE','TE'),('EB','TB')]\n\nfor pol1,pol2 in crosses:\n print(pol1,pol2)\n with bench.show(\"ALcalc\"):\n cross = mc.cross(pol1,pol2,theory,xmask,ymask,noise_t=ntt,noise_e=nee,noise_b=nbb,\n ynoise_t=None,ynoise_e=None,ynoise_b=None,\n cross_xnoise_t=None,cross_ynoise_t=None,\n cross_xnoise_e=None,cross_ynoise_e=None,\n cross_xnoise_b=None,cross_ynoise_b=None,\n theory_norm=None,hdv=hdv,save_expression=\"current\",validate=True,cache=True)\n\n Nlalt = np.abs(mc.NL(Als[pol1],Als[pol2],cross))\n cents,nkkalt = stats.bin_in_annuli(Nlalt,mc.modlmap,bin_edges)\n pl.add(cents,nkkalt,marker=\"o\",alpha=0.2,label=pol1 + \"x\" + pol2)\npl.legend()\npl.done()\n\nzcrosses = [('TT','TB'),('TT','EB'),('EE','EB'),('EE','TB')]\n\npl = io.Plotter()\n\nfor pol1,pol2 in zcrosses:\n print(pol1,pol2)\n with bench.show(\"ALcalc\"):\n cross = mc.cross(pol1,pol2,theory,xmask,ymask,noise_t=ntt,noise_e=nee,noise_b=nbb,\n ynoise_t=None,ynoise_e=None,ynoise_b=None,\n cross_xnoise_t=None,cross_ynoise_t=None,\n cross_xnoise_e=None,cross_ynoise_e=None,\n cross_xnoise_b=None,cross_ynoise_b=None,\n theory_norm=None,hdv=hdv,save_expression=\"current\",validate=True,cache=True)\n\n Nlalt = mc.NL(Als[pol1],Als[pol2],cross)\n cents,nkkalt = stats.bin_in_annuli(Nlalt,mc.modlmap,bin_edges)\n pl.add(cents,nkkalt,marker=\"o\",alpha=0.2,label=pol1 + \"x\" + pol2)\n\npl.legend()\npl.done()\n\nprint(\"nffts : \",mc.nfft,mc.nifft)\n"
] | [
[
"numpy.arange",
"numpy.nan_to_num",
"numpy.sqrt"
]
] |
AKSHANSH47/crowdsource-platform2 | [
"a31446d44bc10dca56a0d534cab226947a6bbb4e"
] | [
"fixtures/createJson.py"
] | [
"__author__ = 'Megha'\n# Script to transfer csv containing data about various models to json\n# Input csv file constituting of the model data\n# Output json file representing the csv data as json object\n# Assumes model name to be first line\n# Field names of the model on the second line\n# Data seperated by __DELIM__\n# Example:\n# L01 ModelName: registrationmodel\n# L02 FieldNames: user,activation_key,created_timestamp,last_updated\n# L03 Data: 1,qwer,2015-05-01T00:17:40.085Z,2015-05-01T00:17:40.085Z\n# L04 Data: 2,assd,2015-05-01T00:17:40.085Z,2015-05-01T00:17:40.085Z\n\nimport numpy as np\nimport pandas as pd\nimport json as json\n\n__MODULE_NAME__ = 7 # Number of lines after which Model Name\n__INPUT_FILE__ = 'meghaWorkerData.csv'\n__OUTPUT_FILE__ = 'meghaWorkerData.json'\n__NEWLINE__ = '\\n'\n__KEY1__ = 0\n__KEY2__ = 0\n__DELIM__ = ','\n__APPEND__ = 'crowdsourcing.'\n__KEY_MODEL__ = 'model'\n__KEY_FIELDS__ = 'fields'\n__KEY_PK__ = 'pk'\n\n\ndef create_dict(input_dict, module, data_collection):\n for key, value in input_dict.items():\n data_dict = {}\n data_dict[__KEY_FIELDS__] = value\n data_dict[__KEY_PK__] = key\n data_dict[__KEY_MODEL__] = __APPEND__ + module\n data_collection.append(data_dict)\n return data_collection\n\n\ndef create_data_json(file):\n in_fp = open(file, 'rb')\n file_lines = in_fp.readlines()\n in_fp.close()\n data_collection = []\n for line_no in range(0, len(file_lines)):\n if line_no % __MODULE_NAME__ == 0:\n columns = file_lines[line_no + 1].strip(__NEWLINE__).split(__DELIM__)\n instance1 = file_lines[line_no + 2].strip(__NEWLINE__).split(__DELIM__)\n instance2 = file_lines[line_no + 3].strip(__NEWLINE__).split(__DELIM__)\n instance3 = file_lines[line_no + 4].strip(__NEWLINE__).split(__DELIM__)\n instance4 = file_lines[line_no + 5].strip(__NEWLINE__).split(__DELIM__)\n instance5 = file_lines[line_no + 6].strip(__NEWLINE__).split(__DELIM__)\n data = np.array([instance1, instance2, instance3, instance4, instance5])\n df = pd.DataFrame(data, columns=columns)\n create_dict(df.transpose().to_dict(), file_lines[line_no].strip(__NEWLINE__), data_collection)\n del (df)\n print(data_collection)\n out_fp = open(__OUTPUT_FILE__, 'wb')\n out_fp.write(json.dumps(data_collection, indent=2))\n out_fp.close()\n\n\nif __name__ == '__main__':\n create_data_json(__INPUT_FILE__)\n"
] | [
[
"numpy.array",
"pandas.DataFrame"
]
] |
convergence-lab/covid19-detection | [
"6a57e87ec1d8688712e6170a4c3aafb6e113ca73"
] | [
"src/train.py"
] | [
"import toml\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, f1_score, roc_auc_score\nfrom logzero import logger\n\nimport torch\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\n\nfrom model import Model\nfrom data import load_data, CovidChestxrayDataset\n\ndef check_grad(parameters):\n grad = 0\n cnt = 0\n for p in parameters:\n grad += p.grad.norm()\n cnt += 1\n return grad / cnt\n\ndef train():\n with open(\"config.toml\") as f:\n config = toml.load(f)\n\n base_dir = config[\"data\"][\"base_dir\"]\n epochs = config[\"train\"][\"epochs\"]\n batch_size = config[\"train\"][\"batch_size\"]\n lr = config[\"train\"][\"lr\"]\n betas = config[\"train\"][\"betas\"]\n in_filters = config[\"model\"][\"in_filters\"]\n image_size = config[\"model\"][\"image_size\"]\n filters = config[\"model\"][\"filters\"]\n num_classes = config[\"model\"][\"num_classes\"]\n kernel_size = config[\"model\"][\"kernel_size\"]\n padding = config[\"model\"][\"padding\"]\n num_resblocks = config[\"model\"][\"num_resblocks\"]\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n records = load_data(base_dir)\n train_records, test_records = train_test_split(records, test_size=0.2)\n\n train_transform = transforms.Compose([\n transforms.Resize(image_size),\n transforms.RandomAffine(10, translate=[0.1, 0.1], shear=0.1),\n transforms.ColorJitter(brightness=0.7, contrast=0.7),\n transforms.ToTensor(),\n transforms.Normalize(0.5, 0.5)\n ])\n\n test_transform = transforms.Compose([\n transforms.Resize(image_size),\n transforms.ToTensor(),\n transforms.Normalize(0.5, 0.5)\n ])\n\n trainset = CovidChestxrayDataset(train_records, base_dir, train_transform)\n testset = CovidChestxrayDataset(test_records, base_dir, test_transform)\n trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True)\n testloader = DataLoader(testset, batch_size=1, shuffle=False)\n\n net = Model(in_filters, image_size, filters, kernel_size, padding, num_resblocks, num_classes)\n net.to(device)\n\n criterion = nn.NLLLoss()\n optimizer = optim.AdamW(net.parameters(), lr=lr, betas=betas, weight_decay=1e-2)\n\n for epoch in range(epochs):\n net.train()\n train_loss = 0\n train_targets = []\n train_probs = []\n train_preds = []\n grad = 0\n for batch in trainloader:\n img, label = batch\n train_targets += label.numpy().tolist()\n img, label = img.to(device), label.to(device)\n optimizer.zero_grad()\n pred = net(img)\n loss = criterion(pred, label)\n loss.backward()\n grad += check_grad(net.parameters())\n torch.nn.utils.clip_grad_norm_(net.parameters(), 1)\n optimizer.step()\n train_loss += loss.item()\n train_preds += pred.cpu().detach().numpy().argmax(axis=1).tolist()\n train_probs += pred.cpu().detach().numpy()[:, 1].tolist()\n acc = accuracy_score(train_targets, train_preds)\n f1 = f1_score(train_targets, train_preds, average=\"macro\")\n auc = roc_auc_score(train_targets, train_probs)\n logger.info(f\"Epoch {epoch+1} Train loss {train_loss/len(trainloader):.5}, Acc {acc*100:.3}%, F1 {f1*100:.3}%, AUC {auc*100:.4}%, grad {grad/len(trainloader)}\")\n net.eval()\n test_loss = 0\n test_targets = []\n test_preds = []\n test_probs = []\n for batch in testloader:\n img, label = batch\n test_targets += label.numpy().tolist()\n img, label = img.to(device), label.to(device)\n with torch.no_grad():\n pred = net(img)\n loss = criterion(pred, label)\n test_loss += loss.item()\n test_preds += pred.cpu().detach().numpy().argmax(axis=1).tolist()\n test_probs += pred.cpu().detach().numpy()[:, 1].tolist()\n\n acc = accuracy_score(test_targets, test_preds)\n f1 = f1_score(test_targets, test_preds, average=\"macro\")\n auc = roc_auc_score(test_targets, test_probs)\n logger.info(f\"Epoch {epoch+1} Test loss {test_loss/len(testloader):.5}, Acc {acc*100:.3}%, F1 {f1*100:.3}%, AUC {auc*100:.4}%\")\n torch.save(net.state_dict, \"net.pt\")\n\nif __name__ == \"__main__\":\n train()"
] | [
[
"torch.utils.data.DataLoader",
"torch.nn.NLLLoss",
"torch.save",
"torch.no_grad",
"sklearn.metrics.f1_score",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.roc_auc_score",
"torch.cuda.is_available",
"sklearn.model_selection.train_test_split"
]
] |
HLTCHKUST/emotion-dialogue | [
"0d58b339134dd9a2f386948ae474b270a77370f9"
] | [
"baseline/baseline_classifier.py"
] | [
"from utils import constant\nfrom sklearn import svm\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nfrom xgboost import XGBClassifier\n\n\ndef get_classifier(ty=\"LR\", c=1.0, max_depth=5, n_estimators=300, gamma=0):\n if(ty==\"LR\"):\n classifier = LogisticRegression(solver='lbfgs',multi_class='multinomial', C=c)\n elif(ty==\"SVM\"):\n classifier = SVC(kernel='linear')\n elif(ty==\"XGB\"):\n classifier = XGBClassifier(max_depth=max_depth, n_estimators=n_estimators, gamma=gamma, n_jobs=4, tree_method=\"gpu_hist\") ## change later ##\n return classifier"
] | [
[
"sklearn.svm.SVC",
"sklearn.linear_model.LogisticRegression"
]
] |
dedsec-9/AutoGL | [
"487f2b2f798b9b1363ad5dc100fb410b12222e06"
] | [
"examples/node_classification.py"
] | [
"import yaml\nimport random\nimport torch.backends.cudnn\nimport numpy as np\nfrom autogl.datasets import build_dataset_from_name\nfrom autogl.solver import AutoNodeClassifier\nfrom autogl.module import Acc\nfrom autogl.backend import DependentBackend\n\nif __name__ == \"__main__\":\n\n from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n\n parser = ArgumentParser(\n \"auto node classification\", formatter_class=ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\n \"--dataset\",\n default=\"cora\",\n type=str,\n help=\"dataset to use\",\n choices=[\n \"cora\",\n \"pubmed\",\n \"citeseer\",\n \"coauthor_cs\",\n \"coauthor_physics\",\n \"amazon_computers\",\n \"amazon_photo\",\n ],\n )\n parser.add_argument(\n \"--configs\",\n type=str,\n default=\"../configs/nodeclf_gcn_benchmark_small.yml\",\n help=\"config to use\",\n )\n # following arguments will override parameters in the config file\n parser.add_argument(\"--hpo\", type=str, default=\"tpe\", help=\"hpo methods\")\n parser.add_argument(\n \"--max_eval\", type=int, default=50, help=\"max hpo evaluation times\"\n )\n parser.add_argument(\"--seed\", type=int, default=0, help=\"random seed\")\n parser.add_argument(\"--device\", default=0, type=int, help=\"GPU device\")\n\n args = parser.parse_args()\n if torch.cuda.is_available():\n torch.cuda.set_device(args.device)\n seed = args.seed\n # set random seed\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n dataset = build_dataset_from_name(args.dataset)\n label = dataset[0].nodes.data[\"y\" if DependentBackend.is_pyg() else \"label\"]\n num_classes = len(np.unique(label.numpy()))\n\n configs = yaml.load(open(args.configs, \"r\").read(), Loader=yaml.FullLoader)\n configs[\"hpo\"][\"name\"] = args.hpo\n configs[\"hpo\"][\"max_evals\"] = args.max_eval\n autoClassifier = AutoNodeClassifier.from_config(configs)\n\n # train\n if args.dataset in [\"cora\", \"citeseer\", \"pubmed\"]:\n autoClassifier.fit(dataset, time_limit=3600, evaluation_method=[Acc])\n else:\n autoClassifier.fit(\n dataset,\n time_limit=3600,\n evaluation_method=[Acc],\n seed=seed,\n train_split=20 * num_classes,\n val_split=30 * num_classes,\n balanced=False,\n )\n autoClassifier.get_leaderboard().show()\n acc = autoClassifier.evaluate(metric=\"acc\")\n print(\"test acc: {:.4f}\".format(acc))\n"
] | [
[
"numpy.random.seed"
]
] |
hekaplex/resnet_dl | [
"fc8d4dcc0adffbe22d01d333e6cf5db955f2f011"
] | [
"benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/image_recognition_benchmark.py"
] | [
"#\r\n# -*- coding: utf-8 -*-\r\n#\r\n# Copyright (c) 2019 Intel Corporation\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n#\r\n# SPDX-License-Identifier: EPL-2.0\r\n#\r\n\r\n\"\"\"Send simulated image data to tensorflow_model_server loaded with ResNet50 or InceptionV3 model.\r\n\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport random\r\n\r\nimport grpc\r\nimport numpy as np\r\nimport sys\r\nimport tensorflow as tf\r\nimport tensorflow.compat.v1 as tf_v1\r\nimport time\r\nfrom tensorflow_serving.apis import predict_pb2\r\nfrom tensorflow_serving.apis import prediction_service_pb2_grpc\r\n\r\nfrom util import preprocess_image, parse_example_proto\r\n\r\ntf_v1.disable_eager_execution()\r\n\r\ntf_v1.app.flags.DEFINE_string('server', 'localhost:8500', 'PredictionService host:port')\r\ntf_v1.app.flags.DEFINE_integer('batch_size', 1, 'Batch size to use')\r\ntf_v1.app.flags.DEFINE_string('data_dir', '', 'path to images in TF records format')\r\ntf_v1.app.flags.DEFINE_string('model', 'resnet50', 'Name of model (resnet50 or inceptionv3).')\r\nFLAGS = tf_v1.app.flags.FLAGS\r\n\r\n\r\ndef sample_images(image_size):\r\n \"\"\"Pull a random batch of images from FLAGS.data_dir containing TF record formatted ImageNet validation set\r\n Returns:\r\n ndarray of float32 with shape [FLAGS.batch_size, image_size, image_size, 3]\r\n \"\"\"\r\n\r\n sample_file = random.choice(os.listdir(FLAGS.data_dir))\r\n dataset = tf.data.TFRecordDataset(os.path.join(FLAGS.data_dir, sample_file))\r\n dataset = dataset.map(lambda x: parse_example_proto(x)).shuffle(True).batch(FLAGS.batch_size)\r\n iterator = dataset.make_one_shot_iterator()\r\n next_element = iterator.get_next()\r\n with tf.Session() as sess:\r\n images, labels = sess.run(next_element)\r\n images = np.array([sess.run(preprocess_image(x, FLAGS.model, image_size)) for x in images])\r\n\r\n return images\r\n\r\n\r\ndef main(_):\r\n if FLAGS.model == 'resnet50':\r\n image_size = 224\r\n elif FLAGS.model == 'inceptionv3':\r\n image_size = 299\r\n else:\r\n print('Please specify model as either resnet50 or inceptionv3.')\r\n sys.exit(-1)\r\n\r\n channel = grpc.insecure_channel(FLAGS.server)\r\n stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)\r\n i = 0\r\n num_iteration = 40\r\n warm_up_iteration = 10\r\n total_time = 0\r\n for _ in range(num_iteration):\r\n i += 1\r\n if FLAGS.data_dir:\r\n image_np = sample_images(image_size)\r\n else:\r\n image_np = np.random.rand(FLAGS.batch_size, image_size, image_size, 3).astype(np.float32)\r\n if FLAGS.model == 'resnet50':\r\n # For ResNet50, rescale to [0, 256]\r\n image_np *= 256.0\r\n elif FLAGS.model == 'inceptionv3':\r\n # For InceptionV3, rescale to [-1, 1]\r\n image_np = (image_np - 0.5) * 2.0\r\n\r\n request = predict_pb2.PredictRequest()\r\n request.model_spec.name = FLAGS.model\r\n request.model_spec.signature_name = 'serving_default'\r\n request.inputs['input'].CopyFrom(\r\n tf.make_tensor_proto(image_np, shape=[FLAGS.batch_size, image_size, image_size, 3]))\r\n start_time = time.time()\r\n stub.Predict(request, 10.0) # 10 secs timeout\r\n time_consume = time.time() - start_time\r\n print('Iteration %d: %.3f sec' % (i, time_consume))\r\n if i > warm_up_iteration:\r\n total_time += time_consume\r\n\r\n time_average = total_time / (num_iteration - warm_up_iteration)\r\n print('Average time: %.3f sec' % (time_average))\r\n\r\n print('Batch size = %d' % FLAGS.batch_size)\r\n if (FLAGS.batch_size == 1):\r\n print('Latency: %.3f ms' % (time_average * 1000))\r\n\r\n print('Throughput: %.3f images/sec' % (FLAGS.batch_size / time_average))\r\n\r\n\r\nif __name__ == '__main__':\r\n tf_v1.app.run()\r\n"
] | [
[
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.make_tensor_proto",
"numpy.random.rand",
"tensorflow.Session",
"tensorflow.compat.v1.app.flags.DEFINE_integer",
"tensorflow.compat.v1.app.flags.DEFINE_string",
"tensorflow.compat.v1.app.run"
]
] |
arlo-lib/ARLO | [
"159669884044686e36e07bd1cc0948884ed7cc8d"
] | [
"experiments/Scripts for creating plots/sac_performance_over_generations.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\n\nif __name__ == '__main__':\n x=np.arange(50)\n \n y=np.array([-59.00138158129509, \n -43.966695525591895, \n -52.5277642686108,\n -32.1793153104166,\n -37.81484603001339,\n -24.97787027415733,\n -20.170115700140766,\n -19.194577812051865,\n -24.267556747544734,\n -18.56846706310683,\n -24.168507205879642,\n -21.613453728913854,\n -19.833679338413056,\n -16.78310378266553,\n -15.692655896866523,\n -15.496178593312704,\n -15.23787215267857,\n -14.754095951096263,\n -12.79724037524585,\n -11.496812508420765,\n -11.593305322673082,\n -12.144980726639616,\n -11.889169042516812,\n -10.983010599192548,\n -10.751331950717917,\n -10.887445777009278,\n -10.94197566653676,\n -10.983575687515879,\n -10.315668585661115,\n -10.200188159394665,\n -10.2623815297516,\n -9.98878690162022,\n -9.664489111145294,\n -9.798550374351311,\n -9.66769644336881,\n -9.114549499466483,\n -9.259332831572362,\n -9.175694376996443,\n -9.415038345909062,\n -9.50191440403006,\n -9.36517394141991,\n -9.244892043097575,\n -9.220243263930586,\n -9.160062939634974,\n -9.293750423507198,\n -9.189954421974406,\n -9.125946744761388,\n -9.182482014624696,\n -9.135265034880312,\n -9.35027383852138])\n \n plt.plot()\n plt.plot(x,y)"
] | [
[
"numpy.arange",
"matplotlib.pyplot.plot",
"numpy.array"
]
] |
satishpasumarthi/sagemaker-python-sdk | [
"255a339ae985041ef47e3a80da91b9f54bca17b9"
] | [
"tests/integ/test_ntm.py"
] | [
"# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nfrom __future__ import absolute_import\n\nimport os\n\nimport numpy as np\nimport pytest\n\nfrom sagemaker import NTM, NTMModel, Predictor\nfrom sagemaker.amazon.common import read_records\nfrom sagemaker.serverless import ServerlessInferenceConfig\nfrom sagemaker.utils import unique_name_from_base\nfrom tests.integ import DATA_DIR, TRAINING_DEFAULT_TIMEOUT_MINUTES\nfrom tests.integ.timeout import timeout, timeout_and_delete_endpoint_by_name\nfrom tests.integ.record_set import prepare_record_set_from_local_files\n\n\[email protected]\[email protected](\n reason=\"This test has always failed, but the failure was masked by a bug. \"\n \"This test should be fixed. Details in https://github.com/aws/sagemaker-python-sdk/pull/968\"\n)\ndef test_ntm(sagemaker_session, cpu_instance_type):\n job_name = unique_name_from_base(\"ntm\")\n\n with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):\n data_path = os.path.join(DATA_DIR, \"ntm\")\n data_filename = \"nips-train_1.pbr\"\n\n with open(os.path.join(data_path, data_filename), \"rb\") as f:\n all_records = read_records(f)\n\n # all records must be same\n feature_num = int(all_records[0].features[\"values\"].float32_tensor.shape[0])\n\n ntm = NTM(\n role=\"SageMakerRole\",\n instance_count=1,\n instance_type=cpu_instance_type,\n num_topics=10,\n sagemaker_session=sagemaker_session,\n )\n\n record_set = prepare_record_set_from_local_files(\n data_path, ntm.data_location, len(all_records), feature_num, sagemaker_session\n )\n ntm.fit(records=record_set, job_name=job_name)\n\n with timeout_and_delete_endpoint_by_name(job_name, sagemaker_session):\n model = NTMModel(ntm.model_data, role=\"SageMakerRole\", sagemaker_session=sagemaker_session)\n predictor = model.deploy(1, cpu_instance_type, endpoint_name=job_name)\n\n predict_input = np.random.rand(1, feature_num)\n result = predictor.predict(predict_input)\n\n assert len(result) == 1\n for record in result:\n assert record.label[\"topic_weights\"] is not None\n\n\ndef test_ntm_serverless_inference(sagemaker_session, cpu_instance_type):\n job_name = unique_name_from_base(\"ntm-serverless\")\n\n with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):\n data_path = os.path.join(DATA_DIR, \"ntm\")\n data_filename = \"nips-train_1.pbr\"\n\n with open(os.path.join(data_path, data_filename), \"rb\") as f:\n all_records = read_records(f)\n\n # all records must be same\n feature_num = int(all_records[0].features[\"values\"].float32_tensor.shape[0])\n\n ntm = NTM(\n role=\"SageMakerRole\",\n instance_count=1,\n instance_type=cpu_instance_type,\n num_topics=10,\n sagemaker_session=sagemaker_session,\n )\n\n record_set = prepare_record_set_from_local_files(\n data_path, ntm.data_location, len(all_records), feature_num, sagemaker_session\n )\n ntm.fit(records=record_set, job_name=job_name)\n\n with timeout_and_delete_endpoint_by_name(job_name, sagemaker_session):\n model = NTMModel(ntm.model_data, role=\"SageMakerRole\", sagemaker_session=sagemaker_session)\n predictor = model.deploy(\n serverless_inference_config=ServerlessInferenceConfig(), endpoint_name=job_name\n )\n assert isinstance(predictor, Predictor)\n"
] | [
[
"numpy.random.rand"
]
] |
johnbachman/deft | [
"3643dd33ba4cb548f7622f24a3b87fbe48e38050"
] | [
"adeft/tests/test_disambiguate.py"
] | [
"import os\nimport uuid\nimport json\nimport shutil\nimport logging\nfrom nose.tools import raises\n\nfrom numpy import array_equal\n\nfrom adeft.modeling.classify import load_model\nfrom adeft.locations import TEST_RESOURCES_PATH\nfrom adeft.disambiguate import AdeftDisambiguator, load_disambiguator\n\nlogger = logging.getLogger(__name__)\n\n# Get test model path so we can write a temporary file here\nTEST_MODEL_PATH = os.path.join(TEST_RESOURCES_PATH, 'test_model')\n# Path to scratch directory to write files to during tests\nSCRATCH_PATH = os.path.join(TEST_RESOURCES_PATH, 'scratch')\n\nexample1 = ('The insulin receptor (IR) is a transmembrane receptor that'\n ' is activated by insulin, IGF-I, IGF-II and belongs to the large'\n ' class of tyrosine kinase receptors')\n\nexample2 = ('The insulin receptor (IR) is a transmembrane receptor that'\n ' is activated by insulin, IGF-I, IGF-II and belongs to the large'\n ' class of tyrosine kinase receptors. Insulin resistance (IR)'\n ' is considered as a pathological condition in which cells fail'\n ' to respond normally to the hormone insulin')\n\nexample3 = ('IR is a transmembrane receptor that is activated by insulin,'\n ' IGF-1, IFG-II and belongs to the large class of tyrosine'\n ' kinase receptors')\n\n\ndef test_load_disambiguator():\n ad = load_disambiguator('IR', path=TEST_MODEL_PATH)\n assert ad.shortforms == ['IR']\n assert hasattr(ad, 'classifier')\n assert hasattr(ad, 'recognizers')\n\n\ndef test_dump_disambiguator():\n ad1 = load_disambiguator('IR', path=TEST_MODEL_PATH)\n tempname = uuid.uuid4().hex\n ad1.dump(tempname, path=SCRATCH_PATH)\n ad2 = load_disambiguator('IR', path=SCRATCH_PATH)\n\n assert ad1.grounding_dict == ad2.grounding_dict\n assert ad1.names == ad2.names\n assert ad1.pos_labels == ad2.pos_labels\n assert (array_equal(ad1.classifier.estimator.named_steps['logit'].coef_,\n ad2.classifier.estimator.named_steps['logit'].coef_))\n assert ad1.info() == ad2.info(), (ad1.info(), ad2.info())\n try:\n shutil.rmtree(os.path.join(SCRATCH_PATH, tempname))\n except Exception:\n logger.warning('Could not clean up temporary folder %s'\n % os.path.join(SCRATCH_PATH, tempname))\n\n\ndef test_disambiguate():\n test_model = load_model(os.path.join(TEST_MODEL_PATH, 'IR',\n 'IR_model.gz'))\n with open(os.path.join(TEST_MODEL_PATH, 'IR',\n 'IR_grounding_dict.json')) as f:\n grounding_dict = json.load(f)\n with open(os.path.join(TEST_MODEL_PATH, 'IR',\n 'IR_names.json')) as f:\n names = json.load(f)\n\n ad = AdeftDisambiguator(test_model, grounding_dict, names)\n # case where there is a unique defining pattern\n disamb1 = ad.disambiguate(example1)\n assert disamb1[0] == 'HGNC:6091'\n assert disamb1[1] == 'INSR'\n assert disamb1[2]['HGNC:6091'] == 1.0\n assert disamb1[2]['MESH:D011839'] == 0.0\n\n # case where there are conflicting defining patterns\n disamb2 = ad.disambiguate(example2)\n preds = disamb2[2]\n nonzero = {key for key, value in preds.items() if value > 0.0}\n assert nonzero == {'HGNC:6091', 'MESH:D007333'}\n\n # case without a defining pattern\n disamb3 = ad.disambiguate(example3)\n assert disamb3[0] == 'HGNC:6091'\n assert disamb3[1] == 'INSR'\n\n\ndef test_modify_groundings():\n \"\"\"Test updating groundings of existing model.\"\"\"\n ad = load_disambiguator('IR', path=TEST_MODEL_PATH)\n ad.modify_groundings(new_groundings={'HGNC:6091': 'UP:P06213'},\n new_names={'HGNC:6091': 'Insulin Receptor'})\n\n assert 'UP:P06213' in ad.pos_labels\n assert 'UP:P06213' in ad.classifier.pos_labels\n assert 'UP:P06213' in ad.classifier.estimator.classes_\n assert 'UP:P06213' in ad.names\n assert 'UP:P06213' in ad.grounding_dict['IR'].values()\n assert ad.names['UP:P06213'] == 'Insulin Receptor'\n\n\n@raises(ValueError)\ndef test_modify_groundings_error():\n ad = load_disambiguator('IR', path=TEST_MODEL_PATH)\n ad.modify_groundings(new_groundings={'MESH:D011839': 'HGNC:6091'})\n"
] | [
[
"numpy.array_equal"
]
] |
zedian/esm | [
"9d2b50cd96753e8a703ca810e875c9e887047ed9"
] | [
"models.py"
] | [
"from __future__ import print_function\nimport torch\nfrom torch import nn\nimport torch.utils.data as Data\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport numpy as np\n\nimport collections\nimport math\nimport copy\n\ntorch.manual_seed(1)\nnp.random.seed(1)\n\n\n\nclass BIN_Interaction_Flat(nn.Sequential):\n '''\n Interaction Network with 2D interaction map\n '''\n \n def __init__(self, **config):\n super(BIN_Interaction_Flat, self).__init__()\n self.max_d = config['max_drug_seq']\n self.max_p = config['max_protein_seq']\n self.emb_size = config['emb_size']\n self.dropout_rate = config['dropout_rate']\n \n #densenet\n self.scale_down_ratio = config['scale_down_ratio']\n self.growth_rate = config['growth_rate']\n self.transition_rate = config['transition_rate']\n self.num_dense_blocks = config['num_dense_blocks']\n self.kernal_dense_size = config['kernal_dense_size']\n self.batch_size = config['batch_size']\n self.input_dim_drug = config['input_dim_drug']\n self.input_dim_target = config['input_dim_target']\n self.gpus = torch.cuda.device_count()\n self.n_layer = 2\n #encoder\n self.hidden_size = config['emb_size']\n self.intermediate_size = config['intermediate_size']\n self.num_attention_heads = config['num_attention_heads']\n self.attention_probs_dropout_prob = config['attention_probs_dropout_prob']\n self.hidden_dropout_prob = config['hidden_dropout_prob']\n \n self.flatten_dim = config['flat_dim'] \n \n # specialized embedding with positional one\n self.demb = Embeddings(self.input_dim_drug, self.emb_size, self.max_d, self.dropout_rate)\n self.pemb = Embeddings(self.input_dim_target, self.emb_size, self.max_p, self.dropout_rate)\n self.d_encoder = Encoder_MultipleLayers(Encoder, self.n_layer, self.hidden_size, self.intermediate_size, self.num_attention_heads, self.attention_probs_dropout_prob, self.hidden_dropout_prob)\n self.p_encoder = Encoder_MultipleLayers(Encoder, self.n_layer, self.hidden_size, self.intermediate_size, self.num_attention_heads, self.attention_probs_dropout_prob, self.hidden_dropout_prob)\n \n self.icnn = nn.Conv2d(1, 3, 3, padding = 0)\n \n self.decoder = nn.Sequential(\n nn.Linear(self.flatten_dim, 512),\n nn.ReLU(True),\n \n nn.BatchNorm1d(512),\n nn.Linear(512, 64),\n nn.ReLU(True),\n \n nn.BatchNorm1d(64),\n nn.Linear(64, 32),\n nn.ReLU(True),\n \n #output layer\n nn.Linear(32, 1)\n )\n \n def forward(self, d, p, d_mask, p_mask):\n \n ex_d_mask = d_mask.unsqueeze(1).unsqueeze(2)\n ex_p_mask = p_mask.unsqueeze(1).unsqueeze(2)\n \n ex_d_mask = (1.0 - ex_d_mask) * -10000.0\n ex_p_mask = (1.0 - ex_p_mask) * -10000.0\n \n d_emb = self.demb(d) # batch_size x seq_length x embed_size\n p_emb = self.pemb(p)\n\n # set output_all_encoded_layers be false, to obtain the last layer hidden states only...\n \n d_encoded_layers = self.d_encoder(d_emb.float(), ex_d_mask.float())\n p_encoded_layers = self.p_encoder(p_emb.float(), ex_p_mask.float())\n \n print(\"Drug encoded Layers shape: \", d_encoded_layers.shape)\n print(\"Protein encoded Layers shape: \", p_encoded_layers.shape)\n \n # repeat to have the same tensor size for aggregation \n d_aug = torch.unsqueeze(d_encoded_layers, 2).repeat(1, 1, self.max_p, 1) # repeat along protein size\n p_aug = torch.unsqueeze(p_encoded_layers, 1).repeat(1, self.max_d, 1, 1) # repeat along drug size\n \n print(\"Reshaped drug encoding shape: \", d_aug.shape)\n print(\"Reshaped protein encoding shape: \", p_aug.shape)\n \n i = d_aug * p_aug # interaction\n \n print(\"Interaction shape: \", i.shape)\n# if self.gpus != 0:\n# i_v = i.view(int(self.batch_size/self.gpus), -1, self.max_d, self.max_p)\n# else:\n i_v = i.view(self.batch_size, -1, self.max_d, self.max_p)\n print(i_v.shape)\n # batch_size x embed size x max_drug_seq_len x max_protein_seq_len\n i_v = torch.sum(i_v, dim = 1)\n print(i_v.shape)\n i_v = torch.unsqueeze(i_v, 1)\n print(i_v.shape)\n \n i_v = F.dropout(i_v, p = self.dropout_rate) \n \n #f = self.icnn2(self.icnn1(i_v))\n f = self.icnn(i_v)\n \n #print(f.shape)\n \n #f = self.dense_net(f)\n #print(f.shape)\n \n# f = f.view(int(self.batch_size/self.gpus), -1)\n f = f.view(self.batch_size, -1)\n# print(f.shape)\n \n #f_encode = torch.cat((d_encoded_layers[:,-1], p_encoded_layers[:,-1]), dim = 1)\n \n #score = self.decoder(torch.cat((f, f_encode), dim = 1))\n score = self.decoder(f)\n return score \n\nclass Single_Transformer_Embedding(nn.Sequential):\n def __init__(self, **config):\n super(Single_Transformer_Embedding, self).__init__()\n self.max_len = config[\"max_len\"]\n self.emb_size = config[\"emb_size\"]\n self.dropout_rate = config[\"dropout_rate\"]\n \n self.batch_size = config[\"batch_size\"]\n self.input_dim = config[\"input_dim\"]\n self.gpus = torch.cuda.device_count()\n self.n_layer = 2\n \n self.hidden_size = config[\"emb_size\"]\n self.intermediate_size = config[\"intermediate_size\"]\n self.num_attention_heads = config[\"num_attention_heads\"]\n self.attention_dropout = config[\"attention_probs_dropout_prob\"]\n self.hidden_dropout = config[\"hidden_dropout_prob\"]\n self.vocab_size = config['vocab_size']\n self.flatten_dim = config[\"flat_dim\"]\n \n self.emb = Embeddings(self.input_dim, self.emb_size, self.max_len, self.dropout_rate)\n \n self.encoder = Encoder_MultipleLayers(Encoder, self.n_layer, self.hidden_size, self.intermediate_size, self.num_attention_heads, self.attention_dropout, self.hidden_dropout)\n \n self.decoder = nn.Sequential(\n nn.Linear(self.hidden_size, self.vocab_size)\n )\n \n def forward(self, x):\n ex_mask = 0. * x\n \n emb = self.emb(x) # batch_size x seq_length x embed_size\n # set output_all_encoded_layers be false, to obtain the last layer hidden states only...\n \n encoded_layers = self.encoder(emb.float(), 0.)\n\n# encoded_layers = encoded_layers.view(self.batch_size, -1)\n embedding = self.decoder(encoded_layers)\n return embedding \n \n \nclass BIN_Transformer_Single(nn.Sequential):\n '''\n Simple transformer encoder\n '''\n \n def __init__(self, **config):\n super(BIN_Transformer_Single, self).__init__()\n self.max_d = config['max_drug_seq']\n self.max_p = config['max_protein_seq']\n self.max_len = config['max_len'] # drug and protein concatenated\n\n self.emb_size = config['emb_size']\n self.dropout_rate = config['dropout_rate']\n \n #densenet\n self.scale_down_ratio = config['scale_down_ratio']\n self.growth_rate = config['growth_rate']\n self.transition_rate = config['transition_rate']\n self.num_dense_blocks = config['num_dense_blocks']\n self.kernal_dense_size = config['kernal_dense_size']\n self.batch_size = config['batch_size']\n self.input_dim = config['input_dim']\n self.gpus = torch.cuda.device_count()\n self.n_layer = 2\n #encoder\n self.hidden_size = config['emb_size']\n self.intermediate_size = config['intermediate_size']\n self.num_attention_heads = config['num_attention_heads']\n self.attention_probs_dropout_prob = config['attention_probs_dropout_prob']\n self.hidden_dropout_prob = config['hidden_dropout_prob']\n self.vocab_size = config['vocab_size']\n \n self.flatten_dim = config['flat_dim'] \n \n # specialized embedding with positional one\n self.emb = Embeddings(self.input_dim, self.emb_size, self.max_len, self.dropout_rate)\n \n self.encoder = Encoder_MultipleLayers(Encoder, self.n_layer, self.hidden_size, self.intermediate_size, self.num_attention_heads, self.attention_probs_dropout_prob, self.hidden_dropout_prob)\n \n self.decoder = nn.Sequential(\n nn.Linear(self.flatten_dim, 512),\n nn.ReLU(True),\n \n nn.BatchNorm1d(512),\n nn.Linear(512, 64),\n nn.ReLU(True),\n \n nn.BatchNorm1d(64),\n nn.Linear(64, 32),\n nn.ReLU(True),\n \n #output layer\n nn.Linear(32, 1)\n )\n \n def forward(self, x, mask):\n \n ex_mask = mask.unsqueeze(1).unsqueeze(2)\n \n ex_mask = (1.0 - ex_mask) * -10000.0\n \n emb = self.emb(x) # batch_size x seq_length x embed_size\n # set output_all_encoded_layers be false, to obtain the last layer hidden states only...\n \n encoded_layers = self.encoder(emb.float(), ex_mask.float())\n print(\"Encoder dim: \", encoded_layers.shape)\n \n # repeat to have the same tensor size for aggregation \n# aug = torch.unsqueeze(encoded_layers, 2).repeat(1, 1, self.max_len, 1) # repeat along protein size\n# print(aug.shape)\n #score = self.decoder(torch.cat((f, f_encode), dim = 1))\n encoded_layers = encoded_layers.view(self.batch_size, -1)\n score = self.decoder(encoded_layers)\n return score \n \n# help classes \n \nclass LayerNorm(nn.Module):\n def __init__(self, hidden_size, variance_epsilon=1e-12):\n\n super(LayerNorm, self).__init__()\n self.gamma = nn.Parameter(torch.ones(hidden_size))\n self.beta = nn.Parameter(torch.zeros(hidden_size))\n self.variance_epsilon = variance_epsilon\n\n def forward(self, x):\n u = x.mean(-1, keepdim=True)\n s = (x - u).pow(2).mean(-1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.variance_epsilon)\n return self.gamma * x + self.beta\n\n\nclass Embeddings(nn.Module):\n \"\"\"Construct the embeddings from protein/target, position embeddings.\n \"\"\"\n def __init__(self, vocab_size, hidden_size, max_position_size, dropout_rate):\n super(Embeddings, self).__init__()\n self.word_embeddings = nn.Embedding(vocab_size, hidden_size)\n self.position_embeddings = nn.Embedding(max_position_size, hidden_size)\n\n self.LayerNorm = LayerNorm(hidden_size)\n self.dropout = nn.Dropout(dropout_rate)\n\n def forward(self, input_ids):\n seq_length = input_ids.size(1)\n position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n \n words_embeddings = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n\n embeddings = words_embeddings + position_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n \n\nclass SelfAttention(nn.Module):\n def __init__(self, hidden_size, num_attention_heads, attention_probs_dropout_prob):\n super(SelfAttention, self).__init__()\n if hidden_size % num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (hidden_size, num_attention_heads))\n self.num_attention_heads = num_attention_heads\n self.attention_head_size = int(hidden_size / num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(hidden_size, self.all_head_size)\n self.key = nn.Linear(hidden_size, self.all_head_size)\n self.value = nn.Linear(hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(self, hidden_states, attention_mask):\n mixed_query_layer = self.query(hidden_states)\n mixed_key_layer = self.key(hidden_states)\n mixed_value_layer = self.value(hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n context_layer = torch.matmul(attention_probs, value_layer)\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n return context_layer\n \n\nclass SelfOutput(nn.Module):\n def __init__(self, hidden_size, hidden_dropout_prob):\n super(SelfOutput, self).__init__()\n self.dense = nn.Linear(hidden_size, hidden_size)\n self.LayerNorm = LayerNorm(hidden_size)\n self.dropout = nn.Dropout(hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states \n \n \nclass Attention(nn.Module):\n def __init__(self, hidden_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob):\n super(Attention, self).__init__()\n self.self = SelfAttention(hidden_size, num_attention_heads, attention_probs_dropout_prob)\n self.output = SelfOutput(hidden_size, hidden_dropout_prob)\n\n def forward(self, input_tensor, attention_mask):\n self_output = self.self(input_tensor, attention_mask)\n attention_output = self.output(self_output, input_tensor)\n return attention_output \n \nclass Intermediate(nn.Module):\n def __init__(self, hidden_size, intermediate_size):\n super(Intermediate, self).__init__()\n self.dense = nn.Linear(hidden_size, intermediate_size)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = F.relu(hidden_states)\n return hidden_states\n\nclass Output(nn.Module):\n def __init__(self, intermediate_size, hidden_size, hidden_dropout_prob):\n super(Output, self).__init__()\n self.dense = nn.Linear(intermediate_size, hidden_size)\n self.LayerNorm = LayerNorm(hidden_size)\n self.dropout = nn.Dropout(hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\nclass Encoder(nn.Module):\n def __init__(self, hidden_size, intermediate_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob):\n super(Encoder, self).__init__()\n self.attention = Attention(hidden_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob)\n self.intermediate = Intermediate(hidden_size, intermediate_size)\n self.output = Output(intermediate_size, hidden_size, hidden_dropout_prob)\n\n def forward(self, hidden_states, attention_mask):\n attention_output = self.attention(hidden_states, attention_mask)\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output \n\nclass LinearEncoder(nn.Module):\n def __init__(self, hidden_size, intermediate_size, num_attention_heads, attention_dropout, hidden_dropout):\n super(LinearEncoder, self).__init__()\n attention_head_size = int(hidden_size / num_attention_heads)\n self.attention = LinearMultiHeadAttn(num_attention_heads, hidden_size, attention_head_size, hidden_dropout, attention_dropout)\n \n def forward(self, hidden_states, attention_mask):\n attention_output = self.attention(hidden_states, attention_mask)\n return attention_output\n\n# class DPFPEncoder(nn.Module):\n# def __init__(self, hidden_size, intermediate_size, num_attention_heads, attention_dropout, hidden_dropout):\n# super(DPFPEncoder, self).__init__()\n# attention_head_size = int(hidden_size / num_attention_heads)\n# self.attention = DPFPMultiHeadAttn(num_attention_heads, hidden_size, attention_head_size, hidden_dropout, attention_dropout)\n \n# def forward(self, hidden_states, attention_mask):\n# attention_output = self.attention(hidden_states, attention_mask)\n# return attention_output\n\nclass Encoder_MultipleLayers(nn.Module):\n def __init__(self, encoder, n_layer, hidden_size, intermediate_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob):\n super(Encoder_MultipleLayers, self).__init__()\n layer = encoder(hidden_size, intermediate_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob)\n self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(n_layer)]) \n\n def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):\n all_encoder_layers = []\n for layer_module in self.layer:\n hidden_states = layer_module(hidden_states, attention_mask)\n #if output_all_encoded_layers:\n # all_encoder_layers.append(hidden_states)\n #if not output_all_encoded_layers:\n # all_encoder_layers.append(hidden_states)\n return hidden_states\n"
] | [
[
"numpy.random.seed",
"torch.sqrt",
"torch.nn.Conv2d",
"torch.nn.Dropout",
"torch.nn.functional.dropout",
"torch.nn.BatchNorm1d",
"torch.nn.Softmax",
"torch.cuda.device_count",
"torch.arange",
"torch.unsqueeze",
"torch.ones",
"torch.manual_seed",
"torch.sum",
"torch.nn.Linear",
"torch.nn.Embedding",
"torch.nn.functional.relu",
"torch.zeros",
"torch.nn.ReLU",
"torch.matmul"
]
] |
ibme-qubic/oxasl | [
"e583103f3313aed2890b60190b6ca7b265a46e3c"
] | [
"oxasl/mask.py"
] | [
"\"\"\"\nOXASL - Module to generate a suitable mask for ASL data\n\nCopyright (c) 2008-2020 Univerisity of Oxford\n\"\"\"\nimport numpy as np\nimport scipy as sp\n\nimport fsl.wrappers as fsl\nfrom fsl.data.image import Image\n\nfrom oxasl import reg\nfrom oxasl.reporting import LightboxImage\n\ndef generate_mask(wsp):\n \"\"\"\n For compatibility\n \"\"\"\n run(wsp)\n\ndef run(wsp):\n \"\"\"\n Generate mask for ASL data\n\n - If a ready-made mask image is provided or has already been generated, this is returned\n - If a structural image is provided this will be used. Brain extraction and registration\n will be performed if required\n - If a calibration image is provided, this is used. It is assumed to be in the same space\n as the ASL data\n - If none of the above are present, the ASL data itself is averaged and brain extracted\n to produce the mask\n\n Required workspace attributes\n -----------------------------\n\n Formally there are no required attributes, however at least one image must be provided\n which enables a mask to be generated.\n\n Optional workspace attributes\n -----------------------------\n\n - ``asldata`` : ASL data image\n - ``mask`` : Existing brain mask\n - ``struc`` : Structural image (wholehead)\n - ``struc_brain``: Already brain-extracted structural image\n - ``asl2struc`` : Existring ASL->Structural space transformation matrix\n - ``calib`` : Calibration image\n - ``nativeref`` : ASL registration source image\n \"\"\"\n if wsp.rois is not None and wsp.rois.mask is not None:\n return\n\n wsp.sub(\"rois\")\n wsp.log.write(\"\\nGenerating ASL data mask\\n\")\n\n # Reporting\n page = wsp.report.page(\"mask\")\n page.heading(\"Mask generation\", level=0)\n\n if wsp.mask is not None:\n wsp.rois.mask_src = \"user\"\n mask_source = \"provided by user (assumed to be ASL space): %s\" % wsp.mask.name\n wsp.rois.mask = wsp.mask\n elif wsp.structural is not None and wsp.structural.struc is not None:\n # Preferred option is to use brain extracted structural\n wsp.rois.mask_src = \"struc\"\n page.heading(\"Brain extracted structural image\", level=1)\n page.image(\"struc_brain\", LightboxImage(wsp.structural.brain, bgimage=wsp.structural.struc))\n wsp.rois.mask_struc = wsp.structural.brain_mask\n wsp.rois.mask_asl = reg.change_space(wsp, wsp.structural.brain_mask, \"native\")\n wsp.rois.mask = Image(sp.ndimage.morphology.binary_fill_holes((wsp.rois.mask_asl.data > 0.25)).astype(np.int), header=wsp.rois.mask_asl.header)\n mask_source = \"generated from brain extracting structural image and registering to ASL space\"\n else:\n # Alternatively, use registration image (which will be BETed calibration or mean ASL image)\n wsp.rois.mask_src = \"nativeref\"\n wsp.rois.mask = Image((wsp.reg.nativeref.data != 0).astype(np.int), header=wsp.reg.nativeref.header)\n mask_source = \"generated from brain extracted registration ASL image\"\n\n wsp.log.write(\" - Mask %s\\n\" % mask_source)\n\n page.heading(\"Masked ASL brain image\", level=1)\n page.text(\"Mask was %s\" % mask_source)\n page.text(\"PW ASL image masked by ASL-space mask\")\n\n if wsp.asldata.iaf in (\"diff\", \"tc\", \"ct\"):\n page.image(\"mask_outline\", LightboxImage(wsp.rois.mask, bgimage=wsp.asldata.perf_weighted(), outline=True))\n else:\n page.image(\"mask_outline\", LightboxImage(wsp.rois.mask, bgimage=wsp.asldata.mean(), outline=True))\n"
] | [
[
"scipy.ndimage.morphology.binary_fill_holes"
]
] |
skyf0cker/Statistical_learning_method | [
"8151f3b8595ac086f08d161dc0cb961946f4b7fc"
] | [
"lh/DecisionTree2.py"
] | [
"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Date : 2019-02-03 15:17:08\r\n# @Author : Vophan Lee ([email protected])\r\n# @Link : https://www.jianshu.com/u/3e6114e983ad\r\n\r\nfrom sklearn.datasets import make_classification\r\nimport numpy as np\r\nimport math\r\n\r\n\r\nclass Decision_Tree(object):\r\n \"\"\"\r\n this is a class to build the decision tree\r\n \"\"\"\r\n\r\n feature_list = []\r\n gain_list = []\r\n dim_list = []\r\n index = 0\r\n\r\n def __init__(self):\r\n super(Decision_Tree, self).__init__()\r\n self.features = 5\r\n self.samples = 100\r\n self.data = make_classification(\r\n n_samples=self.samples, n_features=self.features, n_classes=2)\r\n self.empirical_entropy = self.cal_emp_entropy(self.data)\r\n\r\n def cal_emp_entropy(self, data):\r\n \"\"\"\r\n calculate the empirical entropy\r\n \"\"\"\r\n data_0 = []\r\n data_1 = []\r\n for i in enumerate(data[1]):\r\n if i[1] == 0:\r\n data_0.append(data[0][i[0]])\r\n else:\r\n data_1.append(data[0][i[0]])\r\n entropy = 0\r\n for data_ in [data_0, data_1]:\r\n entropy += - \\\r\n (len(data_) / len(data[0])) * \\\r\n math.log2(len(data_) / len(data[0]))\r\n return entropy\r\n\r\n def div_point(self, dim_data):\r\n \"\"\"\r\n decide the divided point of each feature,here we sopposed that dim_data is a continuous dataset\r\n dim_data: tuple\r\n \"\"\"\r\n def dichotomy(dim_data):\r\n div_points = np.zeros((1, self.samples)).reshape(self.samples)\r\n for i in enumerate(dim_data):\r\n if i[0] == len(dim_data) - 1:\r\n break\r\n div_points[i[0]] = (dim_data[i[0] + 1] + i[1]) / 2\r\n return div_points\r\n dim_data = list(dim_data)\r\n dim_data = np.array(dim_data)\r\n dim_data = dim_data[:, dim_data[0].argsort()]\r\n dim_data = tuple(dim_data)\r\n div_points = dichotomy(dim_data[1])\r\n information_gain_list = []\r\n for i in div_points:\r\n div_index = list(div_points).index(i) + 1\r\n front = dim_data[1][:div_index]\r\n behind = dim_data[1][div_index:]\r\n front_flag = dim_data[0][:div_index]\r\n behind_flag = dim_data[0][div_index:]\r\n front_data = (front, front_flag)\r\n behind_data = (behind, behind_flag)\r\n if len(front_data[0]) == 1 or ((front_data[1] == front_data[1][::-1]).all() and len(front_data[0]) != len(dim_data[0]) / 2):\r\n behind_entropy = self.cal_emp_entropy(behind_data)\r\n information_gain = self.empirical_entropy - \\\r\n (behind_entropy * (len(behind) / len(dim_data[0])))\r\n information_gain_list.append(information_gain)\r\n elif len(behind_data[0]) == 1 or ((behind_data[1] == behind_data[1][::-1]).all() and len(front_data[0]) != len(dim_data[0]) / 2):\r\n front_entropy = self.cal_emp_entropy(front_data)\r\n information_gain = self.empirical_entropy - \\\r\n (front_entropy * (len(front) / len(dim_data[0])))\r\n information_gain_list.append(information_gain)\r\n elif (front_data[1] == front_data[1][::-1]).all() and len(front_data[0]) == len(dim_data[0]) / 2:\r\n\r\n return -1, div_points[int(len(dim_data[0]) / 2 - 1)]\r\n else:\r\n front_entropy = self.cal_emp_entropy(front_data)\r\n behind_entropy = self.cal_emp_entropy(behind_data)\r\n information_gain = self.empirical_entropy - (front_entropy * (len(front) / len(\r\n dim_data[0])) + behind_entropy * (len(behind) / len(dim_data[0])))\r\n information_gain_list.append(information_gain)\r\n max_information_gain = max(information_gain_list)\r\n return max_information_gain, div_points[information_gain_list.index(max_information_gain)]\r\n\r\n def compare_features(self):\r\n \"\"\"\r\n here we choose a maximium information gain among all features\r\n \"\"\"\r\n gain_list_tmp = []\r\n point_list = []\r\n for i in range(self.features):\r\n information_gain, div_point = self.div_point((self.data[1], self.data[0].transpose()[i]))\r\n gain_list_tmp.append(information_gain)\r\n point_list.append(div_point)\r\n com_matrix = np.array([\r\n gain_list_tmp,\r\n point_list,\r\n range(self.features)\r\n ])\r\n com_matrix = com_matrix[:, com_matrix[0].argsort()]\r\n Decision_Tree.feature_list = list(com_matrix[1])\r\n Decision_Tree.gain_list = list(com_matrix[0])\r\n Decision_Tree.dim_list = list(com_matrix[2])\r\n\r\n def planet_tree(self, data):\r\n \"\"\"\r\n here is the process of planeting the tree\r\n data: without flag\r\n \"\"\"\r\n feature = Decision_Tree.feature_list[Decision_Tree.index]\r\n dim = Decision_Tree.dim_list[Decision_Tree.index]\r\n Decision_Tree.index += 1\r\n if Decision_Tree.gain_list[Decision_Tree.feature_list.index(feature)] == -1 or Decision_Tree.index >= len(Decision_Tree.feature_list) - 1:\r\n return tree_node([x for x in data.transpose()[int(dim)] if x < feature],\r\n [x for x in data.transpose()[int(dim)] if x > feature],\r\n feature)\r\n else:\r\n return tree_node(self.planet_tree([x for x in data[0] if x < feature]),self.planet_tree([x for x in data[0] if x > feature]), feature)\r\n\r\n\r\nclass tree_node(object):\r\n \"\"\"\r\n this is the node of the decision tree\r\n \"\"\"\r\n\r\n def __init__(self, left, right, data):\r\n self.left=left\r\n self.right=right\r\n self.data=data\r\n"
] | [
[
"numpy.array",
"numpy.zeros",
"sklearn.datasets.make_classification"
]
] |
jun-hyeok/SUP5001-41_Deep-Neural-Networks_2022Spring | [
"95bc0f3a7042debbc388c76d9bd43ad24aba2c88"
] | [
"DNN_HW5/main.py"
] | [
"# %% [markdown]\n# [](https://colab.research.google.com/github/jun-hyeok/SUP5001-41_Deep-Neural-Networks_2022Spring/blob/main/DNN_HW5/main.ipynb)\n\n# %% [markdown]\n# # DNN HW5 : #9\n#\n# 2022.03.23\n# 박준혁\n\n# %%\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\n# %% [markdown]\n# Create XOR dataset with torch.FloatTensor\n\n# %%\n# xor dataset\nx = torch.FloatTensor([[0, 0], [0, 1], [1, 0], [1, 1]])\ny = torch.FloatTensor([[0], [1], [1], [0]])\n\n# %% [markdown]\n# 1. NN model - 10 hidden layer with 4 nodes each\n\n# %%\n# neural network 10 hidden layers with 4 nodes each\nclass NN10(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(2, 4, bias=True)\n self.fc2 = nn.Linear(4, 4)\n self.fc3 = nn.Linear(4, 4)\n self.fc4 = nn.Linear(4, 4)\n self.fc5 = nn.Linear(4, 4)\n self.fc6 = nn.Linear(4, 4)\n self.fc7 = nn.Linear(4, 4)\n self.fc8 = nn.Linear(4, 4)\n self.fc9 = nn.Linear(4, 4)\n self.fc10 = nn.Linear(4, 1)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = F.relu(self.fc4(x))\n x = F.relu(self.fc5(x))\n x = F.relu(self.fc6(x))\n x = F.relu(self.fc7(x))\n x = F.relu(self.fc8(x))\n x = F.relu(self.fc9(x))\n x = F.sigmoid(self.fc10(x))\n return x\n\n\n# %%\nnn10 = NN10()\noptimizer10 = optim.SGD(nn10.parameters(), lr=0.1)\nepochs = 10000\nfor epoch in range(epochs):\n optimizer10.zero_grad()\n y_pred10 = nn10(x)\n ce10 = F.binary_cross_entropy(y_pred10, y)\n ce10.backward()\n optimizer10.step()\n if epoch % 1000 == 0:\n print(\"Epoch: {:4d}/{}\".format(epoch, epochs), end=\" \")\n print(\"Cost: {:.6f}\".format(ce10.item()))\n\n# %% [markdown]\n# 2. NN model - 2 hidden layer with 4 nodes each\n\n# %%\n# neural network 2 hidden layers with 4 nodes each\nclass NN02(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(2, 4, bias=True)\n self.fc2 = nn.Linear(4, 4)\n self.fc3 = nn.Linear(4, 1)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.sigmoid(self.fc3(x))\n return x\n\n\n# %%\nnn02 = NN02()\noptimizer02 = optim.SGD(nn02.parameters(), lr=0.1)\nepochs = 10000\nfor epoch in range(epochs):\n optimizer02.zero_grad()\n y_pred02 = nn02(x)\n ce02 = F.binary_cross_entropy(y_pred02, y)\n ce02.backward()\n optimizer02.step()\n if epoch % 1000 == 0:\n print(\"Epoch: {:4d}/{}\".format(epoch, epochs), end=\" \")\n print(\"Cost: {:.6f}\".format(ce02.item()))\n"
] | [
[
"torch.FloatTensor",
"torch.nn.Linear",
"torch.nn.functional.binary_cross_entropy"
]
] |
Hacky-DH/pytorch | [
"80dc4be615854570aa39a7e36495897d8a040ecc",
"80dc4be615854570aa39a7e36495897d8a040ecc",
"80dc4be615854570aa39a7e36495897d8a040ecc",
"80dc4be615854570aa39a7e36495897d8a040ecc"
] | [
"benchmarks/distributed/ddp/compare/compare_ddp.py",
"caffe2/quantization/server/tanh_dnnlowp_op_test.py",
"caffe2/python/lazy_dyndep_test.py",
"test/test_gen_backend_stubs.py"
] | [
"\"\"\"\nA simple tool to compare the performance of different impls of\nDistributedDataParallel on resnet50, three flavors:\n\n1. DistributedDataParallel, which has a python wrapper and C++ core to do\n gradient distribution and reduction. It's current production version.\n\n2. PythonDDP with async gradient reduction.\n\n3. PythonDDP with synchrous gradient reduction.\n\nExample::\n >>> modify configs in main func\n >>> python compare_ddp.py\n >>> Sample out: compare_ddp_sample.md\n\"\"\"\n\nimport numpy as np\nimport os\nimport pickle\nimport glob\nimport python_ddp\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.models as models\n\nfrom collections import OrderedDict\nfrom enum import Enum\nfrom tabulate import tabulate\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nclass DDPOption(Enum):\n DDP_CPP_CORE = 1\n PYTHON_DDP_SYNC_REDUCTION = 2\n PYTHON_DDP_ASYNC_REDUCTION = 3\n\nclass LatencyData:\n __slots__ = [\"buffer_size_in_M\", \"ddp_option\", \"rank\", \"metrics\"]\n\n def __init__(self, buffer_size_in_M, ddp_option, rank, metrics):\n self.buffer_size_in_M = buffer_size_in_M\n self.ddp_option = ddp_option\n self.rank = rank\n self.metrics = metrics\n\ndef serialize(buffer_size_in_M, ddp_option, rank, metrics,\n data_dir=\"./tmp\", ext=\"ddpraw\"):\n if not os.path.exists(data_dir):\n print(f'{data_dir} not exist, mkdir {data_dir}')\n os.mkdir(data_dir)\n file_name = \"buffer_size_{}M_rank{}_{}.{}\".format(\n buffer_size_in_M, rank, ddp_option, ext)\n file_path = os.path.join(data_dir, file_name)\n print(\"Writing metrics to file: '{}'\".format(file_path))\n data = LatencyData(buffer_size_in_M, ddp_option, rank, metrics)\n with open(file_path, \"wb\") as f:\n pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)\n print(f\"Wrote metrics to '{file_path}''\")\n\ndef load_detailed_metrics(data_dir=\"./tmp\", ext=\"ddpraw\"):\n assert os.path.exists(data_dir)\n file_pattern = os.path.join(data_dir, f\"*.{ext}\")\n files = glob.glob(file_pattern)\n print(\"load_detailed_metrics found {} files\".format(len(files)))\n buffer_size_to_metrics = OrderedDict()\n for file_path in files:\n with open(file_path, \"rb\") as f:\n data = pickle.load(f)\n # Add data to buffer_size_to_metrics\n buffer_size = data.buffer_size_in_M\n if buffer_size not in buffer_size_to_metrics:\n buffer_size_to_metrics[buffer_size] = {}\n metrics = buffer_size_to_metrics.get(buffer_size)\n assert metrics is not None\n metrics[data.ddp_option] = data.metrics\n return buffer_size_to_metrics\n\ndef setup(rank, world_size):\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = '12355'\n\n # initialize the process group\n dist.init_process_group(\"gloo\", rank=rank, world_size=world_size)\n\ndef create_ddp_model(module, rank, pg, ddp_option, buffer_size_in_M):\n \"\"\"Helper to create DDPModel. \"\"\"\n if ddp_option == DDPOption.DDP_CPP_CORE:\n ddp_model = DDP(module, device_ids=[rank],\n process_group=pg,\n bucket_cap_mb=buffer_size_in_M)\n ddp_model._set_static_graph()\n return ddp_model\n elif ddp_option == DDPOption.PYTHON_DDP_SYNC_REDUCTION:\n M = 2 ** 20\n return python_ddp.PythonDDP(module, pg, False, buffer_size=buffer_size_in_M * M)\n elif ddp_option == DDPOption.PYTHON_DDP_ASYNC_REDUCTION:\n M = 2 ** 20\n return python_ddp.PythonDDP(module, pg, True, buffer_size=buffer_size_in_M * M)\n else:\n raise NotImplementedError\n\ndef run_ddp(rank, world_size, epochs, ddp_option, buffer_size_in_M, warmup_iterations=20):\n print(f'Invoked run_ddp rank {rank}')\n assert epochs > warmup_iterations\n\n # Setup\n print(\"setting up ... \")\n setup(rank, world_size)\n torch.manual_seed(rank)\n torch.cuda.manual_seed(rank)\n device = torch.device('cuda:%d' % rank)\n print('setup done')\n\n # Create ResNet50 module and wrap in DDP module.\n pg = dist.distributed_c10d._get_default_group()\n model = models.resnet50().to(device)\n ddp_model = create_ddp_model(model, rank, pg, ddp_option, buffer_size_in_M)\n assert ddp_model is not None\n\n loss_fn = nn.MSELoss()\n optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)\n\n # Container to hold: event -> list of events in milliseconds\n MODEL_FORWARD = \"forward\"\n MODEL_BACKWARD = \"backward\"\n metrics = {MODEL_FORWARD: [], MODEL_BACKWARD: []}\n\n for epoch in range(epochs):\n if epoch % 10 == 0:\n print(f'Epoch {epoch}/{epochs} ...')\n\n start = torch.cuda.Event(enable_timing=True)\n end = torch.cuda.Event(enable_timing=True)\n\n # TODO(bowangbj): Switch to real training set from ImageNet.\n inputs = torch.rand([32, 3, 224, 224], device=device)\n labels = torch.rand([32, 1000], device=device)\n\n # Forward\n start.record()\n outputs = ddp_model(inputs)\n loss = loss_fn(outputs, labels)\n\n end.record()\n torch.cuda.synchronize()\n if epoch >= warmup_iterations:\n metrics[MODEL_FORWARD].append(start.elapsed_time(end))\n\n # Backward\n start.record()\n loss.backward()\n # Reduce all grad, this is needed for non-DDP_CPP_CORE since the hook\n # for all_reduce does not exist yet.\n if ddp_option != DDPOption.DDP_CPP_CORE:\n ddp_model.all_reduce_grads()\n end.record()\n torch.cuda.synchronize()\n if epoch >= warmup_iterations:\n metrics[MODEL_BACKWARD].append(start.elapsed_time(end))\n\n # Optimization\n optimizer.step()\n optimizer.zero_grad()\n\n if rank == 0:\n print(f\"\\nMetrics for GPU {rank}, ddp_option={ddp_option}, buffer_size={buffer_size_in_M}M\")\n print(f\"Skipped {warmup_iterations} CUDA warmpup iterations. \")\n for step, elapsed_milliseconds in metrics.items():\n A = np.array(elapsed_milliseconds)\n print(' {N} iterations, {step}, mean={mean} ms, median={median} ms, p90={p90} ms, p99={p99} ms'.format(\n N=len(A), step=step, mean=np.mean(A),\n median=np.percentile(A, 50), p90=np.percentile(A, 90),\n p99=np.percentile(A, 99)))\n\n # Serialize the raw data to be used to compute summary. Didn't choose to\n # maintain a global object holding the metrics b/c mp.spawn tries to\n # fork all the arguments before spawning new process thus it's infeasible\n # save global states in an object.\n serialize(buffer_size_in_M, ddp_option, rank, metrics)\n\ndef append_delta(row_list, base, exp):\n percent = 100 * ((exp - base) / base)\n row_list.append(percent)\n\ndef print_summary(buffer_size_to_metrics):\n # metrics: {ddp_option, Metrics}\n # Metrics: step -> [latency]\n for buffer_size, metrics in buffer_size_to_metrics.items():\n assert DDPOption.DDP_CPP_CORE in metrics.keys()\n baseline = metrics.get(DDPOption.DDP_CPP_CORE)\n print(f\"=== Summary for buffer_size: {buffer_size}M === \")\n for step in baseline.keys():\n # step takes value from [forward, backward]\n # compute latency for each step into a table, each row is looks like\n # [option, mean, diff, mean, diff, p90, diff, p95, diff, p99, diff]\n data = []\n baseline_latencies = baseline.get(step)\n assert baseline_latencies is not None\n A_baseline = np.array(baseline_latencies)\n for ddp_option, exp_metrics in metrics.items():\n exp_latencies = exp_metrics.get(step)\n assert exp_latencies is not None\n A_exp = np.array(exp_latencies)\n # Yield option, mean, p50, p90, p95, p99 and delta.\n row = [ddp_option]\n row.append(np.mean(A_exp))\n append_delta(row, np.mean(A_baseline), np.mean(A_exp))\n for px in [50, 90, 95, 99]:\n base = np.percentile(A_baseline, px)\n exp = np.percentile(A_exp, px)\n row.append(exp)\n append_delta(row, base, exp)\n data.append(row)\n\n # Output buffer_size, step as a table.\n print(tabulate(data,\n headers=[f\"DDP: [{step}]\", \"Mean\", \"delta%\",\n \"mean\", \"delta%\", \"p90\", \"delta%\",\n \"p95\", \"delta%%\", \"p99\", \"delta%\"]))\n print(\"\\n\")\n\ndef main():\n world_size = 2\n epochs = 120\n\n # resnet50 model facts:\n # total_param_count = 161\n # total_elements = 25557032 ~= 24.37M\n # param_max_elements = 2359296 ~= 2.25M\n # Try different bucket sizes.\n buffer_size_in_mbs = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]\n print(\"buffer_size_in_mbs: \" + str(buffer_size_in_mbs))\n for buffer_size_in_M in buffer_size_in_mbs:\n print(\"\\n\\n=== NEW EXPERIMENT: buffer_size={}M, {} epochs, world_size={} ===\".format(\n buffer_size_in_M, epochs, world_size))\n options = [\n DDPOption.DDP_CPP_CORE,\n DDPOption.PYTHON_DDP_ASYNC_REDUCTION,\n DDPOption.PYTHON_DDP_SYNC_REDUCTION\n ]\n for option in options:\n print(\"Measuring option: {} ... \".format(option))\n mp.spawn(run_ddp,\n args=(world_size, epochs, option, buffer_size_in_M),\n nprocs=world_size,\n join=True)\n\n print(\"\\n Generating summaries ... \")\n buffer_size_to_metrics = load_detailed_metrics(data_dir=\"./tmp\", ext=\"ddpraw\")\n print_summary(buffer_size_to_metrics)\n\nif __name__ == \"__main__\" :\n main()\n",
"\n\nimport collections\n\nimport caffe2.python.hypothesis_test_util as hu\nimport hypothesis.strategies as st\nimport numpy as np\nfrom caffe2.python import core, dyndep, workspace\nfrom hypothesis import given, settings\n\n\ndyndep.InitOpsLibrary(\"//caffe2/caffe2/quantization/server:dnnlowp_ops\")\nworkspace.GlobalInit([\"caffe2\", \"--caffe2_omp_num_threads=11\"])\n\n\nclass DNNLowPTanhOpTest(hu.HypothesisTestCase):\n @given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)\n @settings(max_examples=10, deadline=None)\n def test_dnnlowp_tanh(self, size, is_empty, gc, dc):\n if is_empty:\n size = 0\n\n X = (np.random.rand(size) * 10 - 5).astype(np.float32)\n\n Output = collections.namedtuple(\"Output\", [\"Y\", \"op_type\", \"engine\"])\n outputs = []\n\n op_engine_list = [(\"Tanh\", \"\"), (\"Tanh\", \"DNNLOWP\"), (\"Int8Tanh\", \"DNNLOWP\")]\n\n for op_type, engine in op_engine_list:\n net = core.Net(\"test_net\")\n\n if engine == \"DNNLOWP\":\n quantize = core.CreateOperator(\n \"Quantize\",\n [\"X\"],\n [\"X_q\"],\n engine=engine,\n device_option=gc,\n followed_by=\"Tanh\",\n )\n net.Proto().op.extend([quantize])\n\n tanh = core.CreateOperator(\n op_type,\n [\"X_q\" if engine == \"DNNLOWP\" else \"X\"],\n [\"Y_q\" if engine == \"DNNLOWP\" else \"Y\"],\n engine=engine,\n device_option=gc,\n )\n net.Proto().op.extend([tanh])\n\n if engine == \"DNNLOWP\":\n dequantize = core.CreateOperator(\n \"Dequantize\", [\"Y_q\"], [\"Y\"], engine=engine, device_option=gc\n )\n net.Proto().op.extend([dequantize])\n\n self.ws.create_blob(\"X\").feed(X, device_option=gc)\n self.ws.run(net)\n outputs.append(\n Output(Y=self.ws.blobs[\"Y\"].fetch(), op_type=op_type, engine=engine)\n )\n\n for o in outputs:\n np.testing.assert_allclose(o.Y, outputs[0].Y, atol=0.02, rtol=0)\n",
"#!/usr/bin/env python3\n\n\n\n\n\n\nfrom hypothesis import given, settings\nimport hypothesis.strategies as st\nfrom multiprocessing import Process\n\nimport numpy as np\nimport tempfile\nimport shutil\n\nimport caffe2.python.hypothesis_test_util as hu\nimport unittest\n\nop_engine = 'GLOO'\n\nclass TemporaryDirectory:\n def __enter__(self):\n self.tmpdir = tempfile.mkdtemp()\n return self.tmpdir\n\n def __exit__(self, type, value, traceback):\n shutil.rmtree(self.tmpdir)\n\n\ndef allcompare_process(filestore_dir, process_id, data, num_procs):\n from caffe2.python import core, data_parallel_model, workspace, lazy_dyndep\n from caffe2.python.model_helper import ModelHelper\n from caffe2.proto import caffe2_pb2\n lazy_dyndep.RegisterOpsLibrary(\"@/caffe2/caffe2/distributed:file_store_handler_ops\")\n\n workspace.RunOperatorOnce(\n core.CreateOperator(\n \"FileStoreHandlerCreate\", [], [\"store_handler\"], path=filestore_dir\n )\n )\n rendezvous = dict(\n kv_handler=\"store_handler\",\n shard_id=process_id,\n num_shards=num_procs,\n engine=op_engine,\n exit_nets=None\n )\n\n model = ModelHelper()\n model._rendezvous = rendezvous\n\n workspace.FeedBlob(\"test_data\", data)\n\n data_parallel_model._RunComparison(\n model, \"test_data\", core.DeviceOption(caffe2_pb2.CPU, 0)\n )\n\n\nclass TestLazyDynDepAllCompare(hu.HypothesisTestCase):\n @given(\n d=st.integers(1, 5), n=st.integers(2, 11), num_procs=st.integers(1, 8)\n )\n @settings(deadline=None)\n def test_allcompare(self, d, n, num_procs):\n dims = []\n for _ in range(d):\n dims.append(np.random.randint(1, high=n))\n test_data = np.random.ranf(size=tuple(dims)).astype(np.float32)\n\n with TemporaryDirectory() as tempdir:\n processes = []\n for idx in range(num_procs):\n process = Process(\n target=allcompare_process,\n args=(tempdir, idx, test_data, num_procs)\n )\n processes.append(process)\n process.start()\n\n while len(processes) > 0:\n process = processes.pop()\n process.join()\n\nclass TestLazyDynDepError(unittest.TestCase):\n def test_errorhandler(self):\n from caffe2.python import core, lazy_dyndep\n import tempfile\n\n with tempfile.NamedTemporaryFile() as f:\n lazy_dyndep.RegisterOpsLibrary(f.name)\n\n def handler(e):\n raise ValueError(\"test\")\n lazy_dyndep.SetErrorHandler(handler)\n with self.assertRaises(ValueError, msg=\"test\"):\n core.RefreshRegisteredOperators()\n\n def test_importaftererror(self):\n from caffe2.python import core, lazy_dyndep\n import tempfile\n\n with tempfile.NamedTemporaryFile() as f:\n lazy_dyndep.RegisterOpsLibrary(f.name)\n\n def handler(e):\n raise ValueError(\"test\")\n lazy_dyndep.SetErrorHandler(handler)\n with self.assertRaises(ValueError):\n core.RefreshRegisteredOperators()\n\n def handlernoop(e):\n raise\n lazy_dyndep.SetErrorHandler(handlernoop)\n lazy_dyndep.RegisterOpsLibrary(\"@/caffe2/caffe2/distributed:file_store_handler_ops\")\n core.RefreshRegisteredOperators()\n\n def test_workspacecreatenet(self):\n from caffe2.python import workspace, lazy_dyndep\n import tempfile\n\n with tempfile.NamedTemporaryFile() as f:\n lazy_dyndep.RegisterOpsLibrary(f.name)\n called = False\n\n def handler(e):\n raise ValueError(\"test\")\n lazy_dyndep.SetErrorHandler(handler)\n with self.assertRaises(ValueError, msg=\"test\"):\n workspace.CreateNet(\"fake\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"import os\nimport tempfile\n\nfrom torch.testing._internal.common_utils import TestCase, run_tests\nimport tools.codegen.gen_backend_stubs\n\npath = os.path.dirname(os.path.realpath(__file__))\ngen_backend_stubs_path = os.path.join(path, '../tools/codegen/gen_backend_stubs.py')\n\n# gen_backend_stubs.py is an integration point that is called directly by external backends.\n# The tests here are to confirm that badly formed inputs result in reasonable error messages.\nclass TestGenBackendStubs(TestCase):\n\n def assert_success_from_gen_backend_stubs(self, yaml_str: str) -> str:\n with tempfile.NamedTemporaryFile(mode='w') as fp:\n fp.write(yaml_str)\n fp.flush()\n tools.codegen.gen_backend_stubs.run(fp.name, '', True)\n\n def get_errors_from_gen_backend_stubs(self, yaml_str: str) -> str:\n with tempfile.NamedTemporaryFile(mode='w') as fp:\n fp.write(yaml_str)\n fp.flush()\n try:\n tools.codegen.gen_backend_stubs.run(fp.name, '', True)\n except AssertionError as e:\n # Scrub out the temp file name from any error messages to simplify assertions.\n return str(e).replace(fp.name, '')\n self.fail('Expected gen_backend_stubs to raise an AssertionError, but it did not.')\n\n def test_valid_single_op(self):\n yaml_str = '''\\\nbackend: XLA\ncpp_namespace: torch_xla\nsupported:\n- abs'''\n self.assert_success_from_gen_backend_stubs(yaml_str)\n\n def test_valid_multiple_ops(self):\n yaml_str = '''\\\nbackend: XLA\ncpp_namespace: torch_xla\nsupported:\n- add.Tensor\n- abs'''\n self.assert_success_from_gen_backend_stubs(yaml_str)\n\n def test_valid_zero_ops(self):\n yaml_str = '''\\\nbackend: XLA\ncpp_namespace: torch_xla\nsupported:'''\n self.assert_success_from_gen_backend_stubs(yaml_str)\n\n def test_valid_zero_ops_doesnt_require_backend_dispatch_key(self):\n yaml_str = '''\\\nbackend: BAD_XLA\ncpp_namespace: torch_xla\nsupported:'''\n # External codegen on a yaml file with no operators is effectively a no-op,\n # so there's no reason to parse the backend\n self.assert_success_from_gen_backend_stubs(yaml_str)\n\n def test_valid_with_autograd_ops(self):\n yaml_str = '''\\\nbackend: XLA\ncpp_namespace: torch_xla\nsupported:\n- abs\nautograd:\n- add.Tensor'''\n # External codegen on a yaml file with no operators is effectively a no-op,\n # so there's no reason to parse the backend\n self.assert_success_from_gen_backend_stubs(yaml_str)\n\n def test_missing_backend(self):\n yaml_str = '''\\\ncpp_namespace: torch_xla\nsupported:\n- abs'''\n output_error = self.get_errors_from_gen_backend_stubs(yaml_str)\n self.assertExpectedInline(output_error, '''You must provide a value for \"backend\"''')\n\n def test_empty_backend(self):\n yaml_str = '''\\\nbackend:\ncpp_namespace: torch_xla\nsupported:\n- abs'''\n output_error = self.get_errors_from_gen_backend_stubs(yaml_str)\n self.assertExpectedInline(output_error, '''You must provide a value for \"backend\"''')\n\n def test_backend_invalid_dispatch_key(self):\n yaml_str = '''\\\nbackend: NOT_XLA\ncpp_namespace: torch_xla\nsupported:\n- abs'''\n output_error = self.get_errors_from_gen_backend_stubs(yaml_str)\n self.assertExpectedInline(output_error, '''\\\nunknown dispatch key NOT_XLA\n The provided value for \"backend\" must be a valid DispatchKey, but got NOT_XLA.''') # noqa: B950\n\n def test_missing_cpp_namespace(self):\n yaml_str = '''\\\nbackend: XLA\nsupported:\n- abs'''\n output_error = self.get_errors_from_gen_backend_stubs(yaml_str)\n self.assertExpectedInline(output_error, '''You must provide a value for \"cpp_namespace\"''')\n\n def test_whitespace_cpp_namespace(self):\n yaml_str = '''\\\nbackend: XLA\ncpp_namespace:\\t\nsupported:\n- abs'''\n output_error = self.get_errors_from_gen_backend_stubs(yaml_str)\n self.assertExpectedInline(output_error, '''You must provide a value for \"cpp_namespace\"''')\n\n # supported is a single item (it should be a list)\n def test_nonlist_supported(self):\n yaml_str = '''\\\nbackend: XLA\ncpp_namespace: torch_xla\nsupported: abs'''\n output_error = self.get_errors_from_gen_backend_stubs(yaml_str)\n self.assertExpectedInline(output_error, '''expected \"supported\" to be a list, but got: abs (of type <class 'str'>)''')\n\n # supported contains an op that isn't in native_functions.yaml\n def test_supported_invalid_op(self):\n yaml_str = '''\\\nbackend: XLA\ncpp_namespace: torch_xla\nsupported:\n- abs_BAD'''\n output_error = self.get_errors_from_gen_backend_stubs(yaml_str)\n self.assertExpectedInline(output_error, '''Found an invalid operator name: abs_BAD''')\n\n # The backend is valid, but doesn't have a valid autograd key. They can't override autograd kernels in that case.\n # Only using Vulkan here because it has a valid backend key but not an autograd key- if this changes we can update the test.\n def test_backend_has_no_autograd_key_but_provides_entries(self):\n yaml_str = '''\\\nbackend: Vulkan\ncpp_namespace: torch_vulkan\nsupported:\n- add\nautograd:\n- sub'''\n output_error = self.get_errors_from_gen_backend_stubs(yaml_str)\n self.assertExpectedInline(output_error, '''Found an invalid operator name: add''') # noqa: B950\n\n # in an operator group, currently all operators must either be registered to the backend or autograd kernel.\n # Here, functional and out mismatch\n def test_backend_autograd_kernel_mismatch_out_functional(self):\n yaml_str = '''\\\nbackend: XLA\ncpp_namespace: torch_xla\nsupported:\n- add.Tensor\nautograd:\n- add.out'''\n output_error = self.get_errors_from_gen_backend_stubs(yaml_str)\n self.assertExpectedInline(output_error, '''Currently, all variants of an op must either be registered to a backend key, or to a backend's autograd key. They cannot be mix and matched. If this is something you need, feel free to create an issue! add is listed under \"supported\", but add_out is listed under \"autograd\".''') # noqa: B950\n\n # in an operator group, currently all operators must either be registered to the backend or autograd kernel.\n # Here, functional and inplace mismatch\n def test_backend_autograd_kernel_mismatch_functional_inplace(self):\n yaml_str = '''\\\nbackend: XLA\ncpp_namespace: torch_xla\nsupported:\n- add.Tensor\nautograd:\n- add_.Tensor'''\n output_error = self.get_errors_from_gen_backend_stubs(yaml_str)\n self.assertExpectedInline(output_error, '''Currently, all variants of an op must either be registered to a backend key, or to a backend's autograd key. They cannot be mix and matched. If this is something you need, feel free to create an issue! add is listed under \"supported\", but add_ is listed under \"autograd\".''') # noqa: B950\n\n # Currently, the same operator can't be listed under both 'supported' and 'autograd', which would\n # involve registering the same kernel to both the XLA and AutogradXLA keys.\n # If we need that functionality in the future, we'll need to augment the codegen.\n def test_op_appears_in_supported_and_autograd_lists(self):\n yaml_str = '''\\\nbackend: XLA\ncpp_namespace: torch_xla\nsupported:\n- add.Tensor\nautograd:\n- add.Tensor'''\n output_error = self.get_errors_from_gen_backend_stubs(yaml_str)\n self.assertExpectedInline(output_error, '''Currently, all variants of an op must either be registered to a backend key, or to a backend's autograd key. They cannot be mix and matched. If this is something you need, feel free to create an issue! add is listed under \"supported\", but add is listed under \"autograd\".''') # noqa: B950\n\n # unrecognized extra yaml key\n def test_unrecognized_key(self):\n yaml_str = '''\\\nbackend: XLA\ncpp_namespace: torch_xla\nsupported:\n- abs\ninvalid_key: invalid_val'''\n output_error = self.get_errors_from_gen_backend_stubs(yaml_str)\n self.assertExpectedInline(output_error, ''' contains unexpected keys: invalid_key. Only the following keys are supported: backend, cpp_namespace, extra_headers, supported, autograd''') # noqa: B950\n\n\nif __name__ == '__main__':\n run_tests()\n"
] | [
[
"torch.distributed.distributed_c10d._get_default_group",
"torch.nn.MSELoss",
"torch.multiprocessing.spawn",
"torch.cuda.manual_seed",
"torch.distributed.init_process_group",
"torch.manual_seed",
"torch.rand",
"torch.cuda.synchronize",
"torch.cuda.Event",
"torch.nn.parallel.DistributedDataParallel",
"numpy.array",
"torch.device",
"numpy.percentile",
"numpy.mean"
],
[
"numpy.random.rand",
"numpy.testing.assert_allclose"
],
[
"numpy.random.randint"
],
[
"torch.testing._internal.common_utils.run_tests"
]
] |
mingxiaoh/chainer-v3 | [
"815ff00f5eaf7944d6e8a75662ff64a2fe046a4d"
] | [
"tests/chainer_tests/functions_tests/connection_tests/test_n_step_lstm.py"
] | [
"import unittest\n\nimport mock\nimport numpy\n\nimport chainer\nfrom chainer import cuda\nfrom chainer import functions\nfrom chainer import gradient_check\nfrom chainer import testing\nfrom chainer.testing import attr\n\n\ndef sigmoid(x):\n return numpy.tanh(x * 0.5) * 0.5 + 0.5\n\n\ndef _split(inputs, pos):\n return inputs[:pos], inputs[pos:]\n\n\[email protected](*testing.product({\n 'use_cudnn': [True, False],\n}))\nclass TestNStepLSTM(unittest.TestCase):\n\n batches = [3, 2, 1]\n length = len(batches)\n in_size = 3\n out_size = 2\n n_layers = 2\n dropout = 0.0\n\n def setUp(self):\n self.xs = [numpy.random.uniform(-1, 1, (b, self.in_size)).astype('f')\n for b in self.batches]\n h_shape = (self.n_layers, self.batches[0], self.out_size)\n self.cx = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)\n self.hx = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)\n\n self.ws = []\n self.bs = []\n for i in range(self.n_layers):\n weights = []\n biases = []\n for j in range(8):\n if i == 0 and j < 4:\n w_in = self.in_size\n else:\n w_in = self.out_size\n\n weights.append(numpy.random.uniform(\n -1, 1, (self.out_size, w_in)).astype('f'))\n biases.append(numpy.random.uniform(\n -1, 1, (self.out_size,)).astype('f'))\n self.ws.append(weights)\n self.bs.append(biases)\n\n self.dys = [numpy.random.uniform(-1, 1, (b, self.out_size)).astype('f')\n for b in self.batches]\n self.dcy = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)\n self.dhy = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)\n\n def check_forward(\n self, h_data, c_data, xs_data, ws_data, bs_data, volatile):\n h = chainer.Variable(h_data, volatile=volatile)\n c = chainer.Variable(c_data, volatile=volatile)\n xs = [chainer.Variable(x, volatile=volatile) for x in xs_data]\n ws = [[chainer.Variable(w, volatile=volatile) for w in ws]\n for ws in ws_data]\n bs = [[chainer.Variable(b, volatile=volatile) for b in bs]\n for bs in bs_data]\n hy, cy, ys = functions.n_step_lstm(\n self.n_layers, self.dropout, h, c, ws, bs, xs,\n use_cudnn=self.use_cudnn)\n\n e_hy = self.hx.copy()\n e_cy = self.cx.copy()\n for ind in range(self.length):\n x = self.xs[ind]\n batch = x.shape[0]\n for layer in range(self.n_layers):\n w = self.ws[layer]\n b = self.bs[layer]\n h_prev = e_hy[layer, :batch]\n c_prev = e_cy[layer, :batch]\n i = sigmoid(x.dot(w[0].T) + h_prev.dot(w[4].T) + b[0] + b[4])\n f = sigmoid(x.dot(w[1].T) + h_prev.dot(w[5].T) + b[1] + b[5])\n c_bar = numpy.tanh(\n x.dot(w[2].T) + h_prev.dot(w[6].T) + b[2] + b[6])\n o = sigmoid(x.dot(w[3].T) + h_prev.dot(w[7].T) + b[3] + b[7])\n e_c = (f * c_prev + i * c_bar)\n e_h = o * numpy.tanh(e_c)\n e_hy[layer, :batch] = e_h\n e_cy[layer, :batch] = e_c\n\n x = e_h\n\n testing.assert_allclose(\n ys[ind].data, x, rtol=1e-4, atol=1e-4)\n\n testing.assert_allclose(hy.data, e_hy, rtol=1e-4, atol=1e-4)\n testing.assert_allclose(cy.data, e_cy, rtol=1e-4, atol=1e-4)\n\n def test_forward_cpu(self):\n self.check_forward(self.hx, self.cx, self.xs, self.ws, self.bs, False)\n\n def test_forward_cpu_volatile(self):\n self.check_forward(self.hx, self.cx, self.xs, self.ws, self.bs, True)\n\n @attr.gpu\n def test_forward_gpu(self):\n self.check_forward(cuda.to_gpu(self.hx),\n cuda.to_gpu(self.cx),\n [cuda.to_gpu(x) for x in self.xs],\n [[cuda.to_gpu(w) for w in ws] for ws in self.ws],\n [[cuda.to_gpu(b) for b in bs] for bs in self.bs],\n False)\n\n @attr.gpu\n def test_forward_gpu_volatile(self):\n self.check_forward(cuda.to_gpu(self.hx),\n cuda.to_gpu(self.cx),\n [cuda.to_gpu(x) for x in self.xs],\n [[cuda.to_gpu(w) for w in ws] for ws in self.ws],\n [[cuda.to_gpu(b) for b in bs] for bs in self.bs],\n True)\n\n def check_backward(self, h_data, c_data, xs_data, ws_data, bs_data,\n dhy_data, dcy_data, dys_data):\n args = tuple([h_data, c_data] + sum(ws_data, []) + sum(bs_data, []) +\n xs_data)\n grads = tuple([dhy_data, dcy_data] + dys_data)\n\n def f(*inputs):\n (hx, cx), inputs = _split(inputs, 2)\n ws = []\n for i in range(self.n_layers):\n weights, inputs = _split(inputs, 8)\n ws.append(weights)\n bs = []\n for i in range(self.n_layers):\n biases, inputs = _split(inputs, 8)\n bs.append(biases)\n xs = inputs\n hy, cy, ys = functions.n_step_lstm(\n self.n_layers, self.dropout, hx, cx, ws, bs, xs)\n return (hy, cy) + ys\n\n gradient_check.check_backward(\n f, args, grads, eps=1e-2, rtol=1e-3, atol=1e-3)\n\n def test_backward_cpu(self):\n self.check_backward(self.hx, self.cx, self.xs, self.ws, self.bs,\n self.dhy, self.dcy, self.dys)\n\n @attr.gpu\n def test_backward_gpu(self):\n self.check_backward(cuda.to_gpu(self.hx),\n cuda.to_gpu(self.cx),\n [cuda.to_gpu(x) for x in self.xs],\n [[cuda.to_gpu(w) for w in ws] for ws in self.ws],\n [[cuda.to_gpu(b) for b in bs] for bs in self.bs],\n cuda.to_gpu(self.dhy),\n cuda.to_gpu(self.dcy),\n [cuda.to_gpu(dy) for dy in self.dys])\n\n\[email protected](*testing.product({\n 'use_cudnn': [True, False],\n}))\[email protected]\nclass TestNStepLSTMCudnnCall(unittest.TestCase):\n\n batches = [4, 3, 2, 1]\n length = len(batches)\n in_size = 3\n out_size = 4\n n_layers = 2\n dropout = 0.0\n\n def setUp(self):\n self.xs = [cuda.cupy.random.uniform(\n -1, 1, (b, self.in_size)).astype('f')\n for b in self.batches]\n h_shape = (self.n_layers, self.batches[0], self.out_size)\n self.cx = cuda.cupy.random.uniform(-1, 1, h_shape).astype('f')\n self.hx = cuda.cupy.random.uniform(-1, 1, h_shape).astype('f')\n\n self.ws = []\n self.bs = []\n for i in range(self.n_layers):\n weights = []\n biases = []\n for j in range(8):\n if i == 0 and j < 4:\n w_in = self.in_size\n else:\n w_in = self.out_size\n\n weights.append(cuda.cupy.random.uniform(\n -1, 1, (self.out_size, w_in)).astype('f'))\n biases.append(cuda.cupy.random.uniform(\n -1, 1, (self.out_size,)).astype('f'))\n\n self.ws.append(weights)\n self.bs.append(biases)\n\n self.dys = [cuda.cupy.random.uniform(\n -1, 1, (b, self.out_size)).astype('f')\n for b in self.batches]\n self.dcy = cuda.cupy.random.uniform(-1, 1, h_shape).astype('f')\n self.dhy = cuda.cupy.random.uniform(-1, 1, h_shape).astype('f')\n self.expect = self.use_cudnn and (\n cuda.cudnn.cudnn.getVersion() >= 5000)\n\n def forward(self, train):\n volatile = not train\n h = chainer.Variable(self.hx, volatile=volatile)\n c = chainer.Variable(self.cx, volatile=volatile)\n xs = [chainer.Variable(x, volatile=volatile) for x in self.xs]\n ws = [[chainer.Variable(w, volatile=volatile) for w in ws]\n for ws in self.ws]\n bs = [[chainer.Variable(b, volatile=volatile) for b in bs]\n for bs in self.bs]\n with chainer.using_config('train', train):\n return functions.n_step_lstm(\n self.n_layers, self.dropout, h, c, ws, bs, xs,\n use_cudnn=self.use_cudnn)\n\n def test_call_cudnn_forward_training(self):\n with mock.patch('cupy.cuda.cudnn.RNNForwardTraining') as func:\n self.forward(True)\n self.assertEqual(func.called, self.expect)\n\n def test_call_cudnn_forward_inference(self):\n with mock.patch('cupy.cuda.cudnn.RNNForwardInference') as func:\n self.forward(False)\n self.assertEqual(func.called, self.expect)\n\n def test_call_cudnn_backward(self):\n hy, cy, ys = self.forward(True)\n hy.grad = self.dhy\n with mock.patch('cupy.cuda.cudnn.RNNBackwardWeights') as func:\n hy.backward()\n self.assertEqual(func.called, self.expect)\n\n\ntesting.run_module(__name__, __file__)\n"
] | [
[
"numpy.random.uniform",
"numpy.tanh"
]
] |
WuYff/ggnn.pytorch | [
"795bc7fb51876231406d71610aa5ec7ed29865c0"
] | [
"main_live.py"
] | [
"import argparse\nimport random\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom model_live import GGNN\nfrom utils.train_live import train\nfrom utils.test_live import test\nfrom utils.validation_live import validation\nfrom utils.data.wy_dataset_live import bAbIDataset\nfrom utils.data.dataloader import bAbIDataloader\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--task_id', type=int, default=4, help='bAbI task id')\nparser.add_argument('--question_id', type=int, default=0, help='question types')\nparser.add_argument('--workers', type=int, help='number of data loading workers', default=2)\n# parser.add_argument('--batchSize', type=int, default=10, help='input batch size')\nparser.add_argument('--batchSize', type=int, default=10, help='input batch size')\nparser.add_argument('--state_dim', type=int, default=4, help='GGNN hidden state size')\nparser.add_argument('--n_steps', type=int, default=1, help='propogation steps number of GGNN')\n# parser.add_argument('--niter', type=int, default=10, help='number of epochs to train for')\nparser.add_argument('--niter', type=int, default=15, help='number of epochs to train for')\nparser.add_argument('--lr', type=float, default=0.01, help='learning rate')\nparser.add_argument('--cuda', action='store_true', help='enables cuda')\nparser.add_argument('--verbal', action='store_true', help='print training info or not')\nparser.add_argument('--manualSeed', type=int, help='manual seed')\nparser.add_argument('--criterion', type=int, default=1)\nparser.add_argument('--choice_steps', type=int, default=2)\nparser.add_argument('--how_many', type=int, default=40)\n\nopt = parser.parse_args()\n\n# todo : shuffle before each epoch, specify the number od n_steps\n\nif opt.manualSeed is None:\n opt.manualSeed = random.randint(1, 10000)\nprint(\"Random Seed: \", opt.manualSeed)\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\n\nopt.dataroot = '/home/yiwu/ggnn/wy/ggnn.pytorch/wy_data/live_jfree/lala'\n#opt.dataroot = '/home/yiwu/ggnn/wy/ggnn.pytorch/wy_data/one'\n\nif opt.cuda:\n torch.cuda.manual_seed_all(opt.manualSeed)\n\ndef main(opt):\n train_dataset = bAbIDataset(opt.dataroot, opt.question_id, \"t\",0,opt.how_many)\n print(\"len(train_dataset)\",len(train_dataset))\n # for i, (adj_matrix, annotation, target) in enumerate(train_dataset, 0):\n # print(\"annotation size\",annotation.shape)\n # print(\"adj_matrix size\",adj_matrix.shape)\n # print(\"target int\",target)\n # break\n \n # for i, (adj_matrix, annotation, target) in enumerate(train_dataloader, 0):\n # print(\"@annotation size\",annotation.shape)\n # print(\"@adj_matrix size\",adj_matrix.shape)\n # print(\"@target size\",target.shape)\n # break\n \n\n validation_dataset = bAbIDataset(opt.dataroot, opt.question_id, \"v\", train_dataset.n_node,opt.how_many)\n validation_dataloader = bAbIDataloader(validation_dataset, batch_size=opt.batchSize, \\\n shuffle=False, num_workers=2)\n print(\"len(validation_dataset)\",len(validation_dataset))\n\n test_dataset = bAbIDataset(opt.dataroot, opt.question_id, \"est\", train_dataset.n_node,opt.how_many)\n test_dataloader = bAbIDataloader(test_dataset, batch_size=opt.batchSize, \\\n shuffle=False, num_workers=2)\n print(\"len(test_dataset)\",len(test_dataset))\n opt.annotation_dim = train_dataset.n_def # for bAbI\n opt.n_edge_types = train_dataset.n_edge_types\n opt.n_node = train_dataset.n_node\n opt.state_dim = opt.annotation_dim\n opt.n_steps = opt.n_node\n \n if opt.choice_steps == 2:\n opt.n_steps = round(opt.n_node*0.5)\n elif opt.choice_steps == 3:\n opt.n_steps = opt.n_node*2\n elif opt.choice_steps == 4:\n opt.n_steps = opt.n_node*opt.n_node\n elif opt.choice_steps == 5:\n opt.n_steps = round(opt.n_node*0.3)\n\n net = GGNN(opt)\n net.double()\n \n \n criterion = nn.SmoothL1Loss()\n if opt.criterion == 2:\n criterion = torch.nn.L1Loss()\n elif opt.criterion == 3:\n criterion = torch.nn.MSELoss()\n \n\n if opt.cuda:\n net.cuda()\n criterion.cuda()\n\n optimizer = optim.Adam(net.parameters(), lr=opt.lr)\n print(\"opt\",opt)\n print(net)\n for epoch in range(0, opt.niter):\n train_dataloader = bAbIDataloader(train_dataset, batch_size=opt.batchSize, \\\n shuffle=True, num_workers=4)\n print(\"len(train_dataloader)\",len(train_dataloader))\n train(epoch, train_dataloader, net, criterion, optimizer, opt)\n validation(validation_dataloader, net, criterion, optimizer, opt)\n test(test_dataloader, net, criterion, optimizer, opt)\n\n\nif __name__ == \"__main__\":\n main(opt)\n\n"
] | [
[
"torch.nn.SmoothL1Loss",
"torch.cuda.manual_seed_all",
"torch.nn.MSELoss",
"torch.manual_seed",
"torch.nn.L1Loss"
]
] |
sbl1996/pytorch-hrvvi-ext | [
"f19abcbedd844a700b2e2596dd817ea80cbb6287"
] | [
"horch/legacy/models/detection/enhance.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom horch.common import tuplify\nfrom horch.models.block import mb_conv_block, MBConv\nfrom horch.models.detection.nasfpn import ReLUConvBN\n\nfrom horch.models.modules import upsample_add, Conv2d, Sequential, Pool2d, upsample_concat\nfrom horch.models.detection.nasfpn import NASFPN\nfrom horch.models.utils import remove_stride_padding\n\n\nclass TopDown(nn.Module):\n def __init__(self, in_channels, f_channels, lite=False, aggregate='add'):\n super().__init__()\n self.aggregate = aggregate\n self.lat = Conv2d(\n in_channels, f_channels, kernel_size=1,\n norm='default')\n channels = f_channels * 2 if aggregate == 'cat' else f_channels\n self.conv = Conv2d(\n channels, f_channels, kernel_size=5 if lite else 3,\n norm='default', act='default', depthwise_separable=lite)\n\n def forward(self, c, p):\n if self.aggregate == 'cat':\n p = upsample_concat(p, self.lat(c))\n else:\n p = upsample_add(p, self.lat(c))\n p = self.conv(p)\n return p\n\n\nclass DeconvTopDown(nn.Module):\n def __init__(self, in_channels1, in_channels2, f_channels, lite=False, aggregate='add'):\n super().__init__()\n self.aggregate = aggregate\n self.lat = Conv2d(\n in_channels1, f_channels, kernel_size=1,\n norm='default')\n self.deconv = Conv2d(in_channels2, f_channels, kernel_size=4, stride=2,\n norm='default', depthwise_separable=lite, transposed=True)\n channels = f_channels * 2 if aggregate == 'cat' else f_channels\n self.conv = Conv2d(\n channels, f_channels, kernel_size=5 if lite else 3,\n norm='default', act='default', depthwise_separable=lite)\n\n def forward(self, c, p):\n if self.aggregate == 'cat':\n p = torch.cat([self.lat(c), self.deconv(p)], dim=1)\n else:\n p = self.lat(c) + self.deconv(p)\n p = self.conv(p)\n return p\n\n\nclass FPNExtraLayers(nn.Module):\n def __init__(self, in_channels, extra_layers=(6, 7), f_channels=None, downsample='conv', lite=False):\n super().__init__()\n self.extra_layers = nn.ModuleList([])\n for _ in extra_layers:\n if downsample == 'conv':\n l = ReLUConvBN(in_channels, f_channels, stride=2, lite=lite)\n elif downsample == 'maxpool':\n l = Pool2d('max', kernel_size=1, stride=2)\n elif downsample == 'avgpool':\n l = Pool2d('avg', kernel_size=1, stride=2)\n else:\n raise ValueError(\"%s as downsampling is invalid.\" % downsample)\n in_channels = f_channels\n self.extra_layers.append(l)\n\n def forward(self, p):\n ps = []\n for l in self.extra_layers:\n p = l(p)\n ps.append(p)\n return tuple(ps)\n\n\nclass BasicBlock(nn.Sequential):\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.conv1 = Conv2d(in_channels, out_channels // 2, kernel_size=1,\n norm='default', act='default')\n self.conv2 = Conv2d(out_channels // 2, out_channels, kernel_size=3, stride=2,\n norm='default', act='default')\n\n\nclass ExtraLayers(nn.Module):\n def __init__(self, in_channels_list, num_extra_layers=3, f_channels_list=(512, 256, 256), no_padding=0, block=BasicBlock, **kwargs):\n super().__init__()\n f_channels_list = tuplify(f_channels_list, num_extra_layers)\n in_channels_list = list(in_channels_list)\n self.extra_layers = nn.ModuleList([])\n for f_channels in f_channels_list:\n l = block(in_channels_list[-1], f_channels, **kwargs)\n self.extra_layers.append(l)\n in_channels_list.append(f_channels)\n\n for i in range(no_padding, 0):\n remove_stride_padding(self.extra_layers[i])\n\n self.out_channels = in_channels_list\n\n def forward(self, *cs):\n ps = list(cs)\n for l in self.extra_layers:\n ps.append(l(ps[-1]))\n return tuple(ps)\n\n\nclass SSDExtraLayers(ExtraLayers):\n def __init__(self, in_channels_list, num_extra_layers=3, f_channels_list=(512, 256, 256), no_padding=-1):\n super().__init__(\n in_channels_list,\n num_extra_layers,\n f_channels_list,\n no_padding,\n BasicBlock\n )\n\n\nclass SSDLiteExtraLayers(ExtraLayers):\n def __init__(self, in_channels_list, num_extra_layers=3, f_channels_list=(512, 256, 256), no_padding=-1, kernel_size=3):\n super().__init__(\n in_channels_list,\n num_extra_layers,\n f_channels_list,\n no_padding,\n mb_conv_block,\n expand_ratio=4,\n kernel_size=kernel_size\n )\n\n\nclass FPN(nn.Module):\n r\"\"\"\n Feature Pyramid Network which enhance features of different levels.\n\n Parameters\n ----------\n in_channels_list : sequence of ints\n Number of input channels of every level, e.g., ``(256,512,1024)``\n f_channels : int\n Number of output channels.\n extra_layers : tuple of ints\n Extra layers to add, e.g., ``(6, 7)``\n lite : bool\n Whether to replace conv3x3 with depthwise seperable conv.\n Default: False\n upsample : str\n Use bilinear upsampling when `interpolate` and ConvTransposed when `deconv`\n Default: `interpolate`\n \"\"\"\n\n def __init__(self, in_channels_list, f_channels=256, extra_layers=(), downsample='conv', lite=False,\n upsample='interpolate', aggregate='add'):\n super().__init__()\n self.lat = Conv2d(in_channels_list[-1], f_channels, kernel_size=1, norm='default')\n self.extra_layers = extra_layers\n if extra_layers:\n self.extras = FPNExtraLayers(f_channels, extra_layers, f_channels, downsample=downsample, lite=lite)\n if upsample == 'deconv':\n self.topdowns = nn.ModuleList([\n DeconvTopDown(c, f_channels, f_channels, lite=lite, aggregate=aggregate)\n for c in in_channels_list[:-1]\n ])\n else:\n self.topdowns = nn.ModuleList([\n TopDown(c, f_channels, lite=lite, aggregate=aggregate)\n for c in in_channels_list[:-1]\n ])\n self.out_channels = [f_channels] * (len(in_channels_list) + len(extra_layers))\n\n def forward(self, *cs):\n p = self.lat(cs[-1])\n ps = (p,)\n if self.extra_layers:\n ps = ps + self.extras(p)\n for c, topdown in zip(reversed(cs[:-1]), reversed(self.topdowns)):\n p = topdown(c, ps[0])\n ps = (p,) + ps\n return ps\n\n\nclass BottomUp(nn.Module):\n def __init__(self, f_channels, lite=False):\n super().__init__()\n self.down = Conv2d(\n f_channels, f_channels, kernel_size=3, stride=2,\n norm='default', act='default', depthwise_separable=lite)\n self.conv = Conv2d(\n f_channels, f_channels, kernel_size=3,\n norm='default', act='default', depthwise_separable=lite)\n\n def forward(self, p, n):\n n = p + self.down(n)\n n = self.conv(n)\n return n\n\n\nclass FPN2(nn.Module):\n r\"\"\"\n Bottom-up path augmentation.\n\n Parameters\n ----------\n in_channels_list : sequence of ints\n Number of input channels of every level, e.g., ``(256,256,256)``\n Notice: they must be the same.\n f_channels : int\n Number of output channels.\n \"\"\"\n\n def __init__(self, in_channels_list, f_channels, lite=False):\n super().__init__()\n assert len(set(in_channels_list)) == 1, \"Input channels of every level must be the same\"\n assert in_channels_list[0] == f_channels, \"Input channels must be the same as `f_channels`\"\n self.bottomups = nn.ModuleList([\n BottomUp(f_channels, lite=lite)\n for _ in in_channels_list[1:]\n ])\n self.out_channels = [f_channels] * len(in_channels_list)\n\n def forward(self, *ps):\n ns = [ps[0]]\n for p, bottomup in zip(ps[1:], self.bottomups):\n n = bottomup(p, ns[-1])\n ns.append(n)\n return tuple(ns)\n\n\nclass ContextEnhance(nn.Module):\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.lats = nn.ModuleList([\n Conv2d(c, out_channels, kernel_size=1, norm='default')\n for c in in_channels\n ])\n self.lat_glb = Conv2d(in_channels[-1], out_channels, kernel_size=1,\n norm='default')\n\n def forward(self, *cs):\n size = cs[0].size()[2:4]\n p = self.lats[0](cs[0])\n for c, lat in zip(cs[1:], self.lats[1:]):\n p += F.interpolate(lat(c), size=size, mode='bilinear', align_corners=False)\n c_glb = F.adaptive_avg_pool2d(cs[-1], 1)\n p_glb = self.lat_glb(c_glb)\n p += p_glb\n return p\n\n\ndef stacked_fpn(num_stacked, in_channels_list, extra_layers=(), f_channels=256, lite=False, upsample='interpolate'):\n r\"\"\"\n Stacked FPN with alternant top down block and bottom up block.\n\n Parameters\n ----------\n num_stacked : int\n Number of stacked fpns.\n in_channels_list : sequence of ints\n Number of input channels of every level, e.g., ``(128,256,512)``\n extra_layers : tuple of ints\n Extra layers to add, e.g., ``(6, 7)``\n f_channels : int\n Number of feature (output) channels.\n Default: 256\n lite : bool\n Whether to replace conv3x3 with depthwise seperable conv.\n Default: False\n upsample : str\n Use bilinear upsampling if `interpolate` and ConvTransposed if `deconv`\n Default: `interpolate`\n \"\"\"\n assert num_stacked >= 2, \"Use FPN directly if `num_stacked` is smaller than 2.\"\n layers = [FPN(in_channels_list, f_channels, extra_layers, lite=lite, upsample=upsample)]\n for i in range(1, num_stacked):\n if i % 2 == 0:\n layers.append(FPN(layers[-1].out_channels, f_channels, lite=lite, upsample=upsample))\n else:\n layers.append(FPN2(layers[-1].out_channels, f_channels, lite=lite))\n m = Sequential(*layers)\n m.out_channels = m[-1].out_channels\n return m\n\n\ndef stacked_nas_fpn(num_stacked, in_channels_list, extra_layers=(), f_channels=256, lite=False, upsample='interpolate'):\n r\"\"\"\n Stacked FPN with alternant top down block and bottom up block.\n\n Parameters\n ----------\n num_stacked : int\n Number of stacked fpns.\n in_channels_list : sequence of ints\n Number of input channels of every level, e.g., ``(128,256,512)``\n extra_layers : tuple of ints\n Extra layers to add, e.g., ``(6, 7)``\n f_channels : int\n Number of feature (output) channels.\n Default: 256\n lite : bool\n Whether to replace conv3x3 with depthwise seperable conv.\n Default: False\n upsample : str\n Use bilinear upsampling if `interpolate` and ConvTransposed if `deconv`\n Default: `interpolate`\n \"\"\"\n assert num_stacked >= 2, \"Use FPN directly if `num_stacked` is smaller than 2.\"\n layers = [FPN(in_channels_list, f_channels, extra_layers, downsample='maxpool', lite=lite, upsample=upsample)]\n for i in range(1, num_stacked):\n layers.append(NASFPN(f_channels))\n m = Sequential(*layers)\n m.out_channels = m[-1].out_channels\n return m\n\n\nclass IDA(nn.Module):\n def __init__(self, in_channels_list, f_channels, lite=False):\n super().__init__()\n self.num_levels = len(in_channels_list)\n self.topdowns = nn.ModuleList([\n DeconvTopDown(in_channels_list[i], in_channels_list[i + 1], f_channels, lite=lite)\n for i in range(self.num_levels - 1)\n ])\n if self.num_levels > 2:\n self.deep = IDA([f_channels] * (self.num_levels - 1), f_channels)\n\n def forward(self, *xs):\n xs = [\n l(xs[i], xs[i + 1]) for i, l in enumerate(self.topdowns)\n ]\n if self.num_levels > 2:\n return self.deep(*xs)\n else:\n return xs[0]\n\n\nclass IDA2(nn.Module):\n def __init__(self, in_channels, lite=False):\n super().__init__()\n self.num_levels = len(in_channels)\n self.topdowns = nn.ModuleList([\n DeconvTopDown(in_channels[i], in_channels[i + 1], in_channels[i + 1], lite=lite)\n for i in range(self.num_levels - 1)\n ])\n if self.num_levels > 2:\n self.deep = IDA2(in_channels[1:], lite=lite)\n\n def forward(self, *xs):\n xs = [\n l(xs[i], xs[i + 1]) for i, l in enumerate(self.topdowns)\n ]\n if self.num_levels > 2:\n return self.deep(*xs)\n else:\n return xs[0]\n\n\nclass YOLOFPN(nn.Module):\n def __init__(self, in_channels_list, f_channels_list=(256, 512, 1024), kernel_size=5):\n super().__init__()\n assert len(in_channels_list) == len(f_channels_list)\n num_levels = len(in_channels_list)\n self.convs = nn.ModuleList([])\n self.lats = nn.ModuleList([])\n self.outs = nn.ModuleList([])\n for i in range(num_levels):\n f_channels = f_channels_list[-(i+1)]\n in_channels = in_channels_list[-(i+1)]\n if i == 0:\n self.convs.append(nn.Sequential(\n MBConv(in_channels, in_channels, f_channels // 4, kernel_size=kernel_size),\n MBConv(f_channels // 4, f_channels, f_channels // 4, kernel_size=kernel_size),\n ))\n else:\n self.lats.append(Conv2d(f_channels_list[-i] // 4, f_channels // 4, kernel_size=1,\n norm='default'))\n self.convs.append(nn.Sequential(\n MBConv(in_channels + f_channels // 4, in_channels + f_channels // 4, f_channels // 4, kernel_size=kernel_size),\n MBConv(f_channels // 4, f_channels, f_channels // 4, kernel_size=kernel_size),\n ))\n self.outs.append(MBConv(f_channels // 4, f_channels, None, kernel_size=kernel_size))\n\n self.out_channels = tuple(f_channels_list)\n\n def forward(self, *cs):\n ps = []\n p1 = self.convs[0](cs[-1])\n p2 = self.outs[0](p1)\n ps.append(p2)\n for lat, conv, out, c in zip(self.lats, self.convs[1:], self.outs[1:], reversed(cs[:-1])):\n c = upsample_concat(lat(p1), c)\n p1 = conv(c)\n p2 = out(p1)\n ps.append(p2)\n return tuple(reversed(ps))\n\n\n"
] | [
[
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.ModuleList"
]
] |
dvorotnev/NNEF-Tools | [
"0219a509c34bb5b291bee497cbd658d6a5922171"
] | [
"nnef_tools/io/tf/graphdef/reader.py"
] | [
"# Copyright (c) 2020 The Khronos Group Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import division, print_function, absolute_import\n\nfrom collections import namedtuple\nfrom ....model import *\nfrom ....utils.types import as_str\nfrom .protobuf import *\nimport numpy as np\nimport six\n\n\nFunction = namedtuple('Function', ['name', 'attrs'])\n\n\n_DtypeToNumpy = {\n 'DT_INVALID': None,\n 'DT_RESOURCE': np.dtype([('resource', np.int32)]),\n 'DT_HALF': np.float16,\n 'DT_FLOAT': np.float32,\n 'DT_DOUBLE': np.float64,\n 'DT_INT8': np.int8,\n 'DT_INT16': np.int16,\n 'DT_INT32': np.int32,\n 'DT_INT64': np.int64,\n 'DT_UINT8': np.uint8,\n 'DT_UINT16': np.uint16,\n 'DT_UINT32': np.uint32,\n 'DT_UINT64': np.uint64,\n 'DT_BOOL': np.bool,\n 'DT_STRING': np.str,\n 'DT_COMPLEX64': np.complex64,\n 'DT_COMPLEX128': np.complex128,\n}\n\n\ndef _get_shape(shape_proto):\n return tuple(int(dim.size) if dim.size >= 0 else None for dim in shape_proto.dim) \\\n if not shape_proto.unknown_rank else None\n\n\ndef _get_dtype(dtype_enum):\n dtype = _DtypeToNumpy[DataType.Name(dtype_enum)]\n assert dtype is not None, \"non-numeric dtype '{}' in attribute\".format(DataType.Name(dtype_enum))\n return dtype\n\n\ndef _get_nonempty_items(message, fields):\n for field in fields:\n items = getattr(message, field)\n if len(items):\n return field, items\n\n return None, None\n\n\ndef _get_tensor(tensor_proto):\n shape = _get_shape(tensor_proto.tensor_shape)\n dtype = _get_dtype(tensor_proto.dtype)\n\n if len(tensor_proto.tensor_content):\n data = np.frombuffer(tensor_proto.tensor_content, dtype=dtype).reshape(shape)\n else:\n field, items = _get_nonempty_items(tensor_proto,\n fields=['half_val', 'float_val', 'double_val', 'int_val', 'int64_val',\n 'bool_val', 'string_val', 'uint32_val', 'uint64_val',\n 'resource_handle_val', 'scomplex_val', 'dcomplex_val'])\n assert items is not None, \"tensor items are empty, dtype = {}\".format(dtype)\n\n items = [item for item in items]\n if len(items) == int(np.prod(shape)):\n data = np.array(items, dtype=dtype).reshape(shape)\n else:\n assert len(items) == 1\n data = np.full(shape=shape, dtype=dtype, fill_value=items[0])\n\n return data\n\n\ndef _get_func(name_attrlist_proto):\n return Function(name_attrlist_proto.name, _get_attributes(name_attrlist_proto.attr))\n\n\ndef _get_attribute(field, value):\n if field == 'i' or field == 'f' or field == 'b' or field == 'placeholder':\n return value\n elif field == 's':\n return as_str(value.decode())\n elif field == 'shape':\n return _get_shape(value)\n elif field == 'type':\n return _get_dtype(value)\n elif field == 'tensor':\n return _get_tensor(value)\n elif field == 'func':\n return _get_func(value)\n elif field == 'list':\n field, items = _get_nonempty_items(value, fields=['i', 'f', 'b', 's', 'shape', 'type', 'tensor', 'func'])\n return [_get_attribute(field, item) for item in items] if items is not None else []\n\n assert False\n\n\ndef _get_attributes(attr_map_proto):\n attributes = {}\n for name, value in attr_map_proto.items():\n field = value.WhichOneof('value')\n if field is not None:\n value = getattr(value, field)\n attributes[as_str(name)] = _get_attribute(field, value)\n else:\n attributes[as_str(name)] = None\n\n return attributes\n\n\ndef _get_output_name(node_name, idx):\n return node_name + ':' + str(idx) if idx > 0 else node_name\n\n\ndef _has_output_shapes(graph_def):\n return all('_output_shapes' in node.attr and node.attr['_output_shapes'].WhichOneof('value') is not None\n for node in graph_def.node)\n\n\ndef _add_output_shapes(graph_def):\n try:\n import tensorflow.compat.v1 as tf\n except ImportError:\n import tensorflow as tf\n\n tf.import_graph_def(graph_def, name='')\n return tf.get_default_graph().as_graph_def(add_shapes=True)\n\n\ndef _get_dtypes(graph_def):\n try:\n import tensorflow.compat.v1 as tf\n except ImportError:\n import tensorflow as tf\n\n dtypes = {}\n\n graph = tf.Graph()\n with graph.as_default():\n tf.import_graph_def(graph_def, name='')\n for op in graph.get_operations():\n for tensor in op.outputs:\n name = tensor.name[:-2] if tensor.name.endswith(':0') else tensor.name\n dtypes[name] = tensor.dtype.as_numpy_dtype if tensor.dtype != tf.resource else _DtypeToNumpy['DT_RESOURCE'].type\n\n return dtypes\n\n\ndef _get_output_shapes(attr_map_proto):\n value = attr_map_proto['_output_shapes']\n field = value.WhichOneof('value')\n if field is None:\n return None\n\n value = getattr(value, field)\n return _get_attribute(field, value)\n\n\ndef build_graph(graph_def):\n graph = Graph()\n\n dtypes = _get_dtypes(graph_def)\n\n # create tensors\n node_outputs = {}\n for node in graph_def.node:\n output_shapes = _get_output_shapes(node.attr)\n if output_shapes is not None:\n name = as_str(node.name)\n node_outputs[name] = [Tensor(graph, _get_output_name(name, idx), shape=shape, dtype=dtypes.get(name))\n for idx, shape in enumerate(output_shapes)]\n\n tensors = {tensor.name: tensor for outputs in six.itervalues(node_outputs) for tensor in outputs}\n\n # create ops\n for node in graph_def.node:\n attributes = _get_attributes(node.attr)\n inputs = [tensors[name] for name in node.input if not name.startswith('^')]\n outputs = node_outputs[node.name] if node.name in node_outputs else []\n\n Operation(graph,\n type=as_str(node.op),\n name=as_str(node.name),\n inputs=inputs,\n outputs=outputs,\n attribs=attributes)\n\n graph.inputs = [node_outputs[node.name][0] for node in graph_def.node if node.op == 'Placeholder']\n graph.outputs = [output for op in graph.operations if all(len(output.consumers) == 0 for output in op.outputs)\n for output in op.outputs]\n return graph\n\n\ndef _unpack_custom_ops(graph):\n for op in graph.operations:\n if op.type == 'PyFunc':\n op.custom = True\n op.type = op.attribs['token']\n op.attribs = {key[2:-2]: value for key, value in six.iteritems(op.attribs)\n if key.startswith('_$') and key.endswith('$_')}\n\n\ndef read_graphdef(filename, input_shapes, fold_constants):\n graph_def = GraphDef()\n with open(filename, 'rb') as file:\n graph_def.ParseFromString(file.read())\n\n if not _has_output_shapes(graph_def):\n graph_def = _add_output_shapes(graph_def)\n\n if input_shapes is not None:\n from .utils import set_input_shapes\n graph_def = set_input_shapes(graph_def, input_shapes)\n\n if fold_constants:\n from .utils import fold_constant_tensors\n graph_def = fold_constant_tensors(graph_def)\n\n graph = build_graph(graph_def)\n _unpack_custom_ops(graph)\n\n return graph\n\n\nclass Reader(object):\n\n def __init__(self, fold_constants=False):\n self._fold_constants = fold_constants\n\n def __call__(self, filename, input_shapes=None):\n return read_graphdef(filename, input_shapes, self._fold_constants)\n"
] | [
[
"numpy.dtype",
"numpy.prod",
"tensorflow.Graph",
"tensorflow.get_default_graph",
"tensorflow.import_graph_def",
"numpy.array",
"numpy.full",
"numpy.frombuffer"
]
] |
dbradf/signal-processing-algorithms | [
"75312e873543f0f89aace75f43ded783395425c5"
] | [
"src/signal_processing_algorithms/gesd.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nGESD based Detect outliers.\n\nGeneralized ESD Test for Outliers\nsee 'GESD<https://www.itl.nist.gov/div898/handbook/eda/section3/eda35h3.htm>'\n\"\"\"\nimport collections\n\nfrom typing import List\n\nimport numpy as np\nimport numpy.ma as ma\nimport structlog\n\nfrom scipy.stats import t\n\nLOG = structlog.get_logger()\n\nGesdResult = collections.namedtuple(\n \"GesdResult\",\n [\"count\", \"suspicious_indexes\", \"test_statistics\", \"critical_values\", \"all_z_scores\"],\n)\n\"\"\"\nA named tuple for the results of the GESD algorithm.\n\nThe outliers are in suspicious_indexes[count:].\nThe low confidence outliers are in suspicious_indexes[:count].\n\n:type count: int,\n:type suspicious_indexes: list(int)\n:type test_statistics: list(float)\n:type critical_values: list(float)\n:type all_z_scores: list(float, float)\n:type series: list(float)\n\"\"\"\n\n\ndef gesd(\n data: List[float], max_outliers: int = 10, significance_level: float = 0.05, mad: bool = False\n) -> GesdResult:\n \"\"\"\n Perform a Generalized ESD Test for Outliers.\n\n The generalized ESD(Extreme Studentized Deviate) test (Rosner 1983) is used to detect one or\n more outliers in a univariate data set that follows an approximately normal distribution.\n\n Usage:\n gesd_result = gesd(\n series,\n max_outliers,\n significance_level=significance,\n mad=True)\n\n count = gesd_result.count\n indexes = gesd_result.suspicious_indexes\n\n print(\"outliers indexes {}\".format(indexes[:count])\n print(\"low confidence outliers indexes {}\".format(indexes[count:])\n\n\n If the standard deviation of the series data is zero then the outlier detection will bail out.\n For non-mad this entails a constant series or sub-series so this behaviour makes sense.\n\n In the MAD case, this may mean that the series is constant or that a majority of the series\n are the median value. The data should be validated to avoid this issue.\n\n Note: the test_statistics array is signed, this allows determination of the outlier above\n or below the mean.\n\n :param data: The data to test.\n :param max_outliers: Test for up to max outliers.\n :param significance_level: Test for up to max outliers.\n :param mad: Use Median Absolute Deviation.\n :return: The number of outliers, suspicious indexes, test_statistics, critical_values, z_values.\n see 'here<https://www.itl.nist.gov/div898/handbook/eda/section3/eda35h3.htm>'\n \"\"\"\n if data is None or np.size(data) == 0:\n raise ValueError(\"No Data ({})\".format(data))\n length = len(data)\n if max_outliers < 1:\n raise ValueError(\n \"max_outliers({max_outliers}) must be >= 1\".format(max_outliers=max_outliers)\n )\n if max_outliers >= len(data):\n raise ValueError(\n \"max_outliers({max_outliers}) <= length({length})\".format(\n length=len(data) if data is not None else None, max_outliers=max_outliers\n )\n )\n if significance_level <= 0.0 or significance_level >= 1.0:\n raise ValueError(\n \"invalid significance_level({significance_level})\".format(\n significance_level=significance_level\n )\n )\n\n series = ma.array(data)\n test_statistics = []\n critical_values = []\n potential_outlier_indices = []\n\n # max outliers must be less than length, the standard deviation and mad of a single entry list\n # are 0 so z score would be nan.\n m_outliers = min(max_outliers, length - 1)\n\n indexes = np.arange(m_outliers, dtype=int)\n for i in indexes:\n LOG.debug(\"iteration\", i=i, mad=mad, series=series)\n if mad:\n # sigma in this case is an estimate of .75 quantile * MAD\n # note : 1.4826 == 1 / Q(.75) == 1 / 0.675\n center = np.ma.median(series)\n sigma = 1.4826 * np.ma.median(np.fabs(series - center))\n else:\n center = series.mean()\n sigma = series.std(ddof=1)\n\n if sigma == 0:\n break\n\n z_scores = (series - center) / sigma\n if i == 0:\n all_z_scores = (series - center) / sigma\n\n LOG.debug(\"calculate\", z_scores=np.array([np.arange(length, dtype=int), z_scores]).T)\n\n max_z_score_index = np.fabs(z_scores).argmax()\n max_z_score = z_scores[max_z_score_index]\n\n # 2 sided test for significance.\n significance_result = 1.0 - significance_level / (2.0 * (length - i))\n\n # Percent point function with df (degrees of freedom).\n percentage_point = t.ppf(significance_result, df=length - i - 2)\n value = (\n (length - i - 1)\n * percentage_point\n / np.sqrt((length - i - 2 + percentage_point ** 2) * (length - i))\n )\n\n # Update results.\n potential_outlier_indices.append(max_z_score_index)\n test_statistics.append(max_z_score)\n critical_values.append(value)\n\n LOG.debug(\n \"iteration complete\",\n z_scores=np.array(\n [\n np.arange(max_outliers, dtype=int),\n test_statistics,\n critical_values,\n np.greater(test_statistics, critical_values),\n ]\n ).T,\n )\n\n # Mask and exclude the selected value from the next iteration.\n series[max_z_score_index] = ma.masked\n\n LOG.debug(\"values calculated\", max_z_scores=test_statistics, lambda_values=critical_values)\n if potential_outlier_indices:\n for number_outliers in range(len(potential_outlier_indices), 0, -1):\n if np.abs(test_statistics[number_outliers - 1]) > critical_values[number_outliers - 1]:\n LOG.debug(\n \"outliers discovered\",\n number_outliers=number_outliers,\n outliers=potential_outlier_indices[0:number_outliers],\n )\n\n return GesdResult(\n number_outliers,\n potential_outlier_indices,\n test_statistics,\n critical_values,\n all_z_scores[potential_outlier_indices],\n )\n return GesdResult(\n 0,\n potential_outlier_indices,\n test_statistics,\n critical_values,\n all_z_scores[potential_outlier_indices] if potential_outlier_indices else [],\n )\n"
] | [
[
"numpy.ma.median",
"numpy.fabs",
"scipy.stats.t.ppf",
"numpy.abs",
"numpy.greater",
"numpy.size",
"numpy.ma.array",
"numpy.arange",
"numpy.sqrt"
]
] |
prjemian/XAnoS | [
"8a70380a88421042feff6f4aa9f5cf1f79ab4efc"
] | [
"Functions/FormFactors/SphericalShell_expDecay.py"
] | [
"####Please do not remove lines below####\nfrom lmfit import Parameters\nimport numpy as np\nimport sys\nimport os\nsys.path.append(os.path.abspath('.'))\nsys.path.append(os.path.abspath('./Functions'))\nsys.path.append(os.path.abspath('./Fortran_rountines'))\n\n####Please do not remove lines above####\n\n####Import your modules below if needed####\nfrom xraydb import XrayDB\n#from pyEQL import chemical_formula\n\n\nclass SphericalShell_expDecay: #Please put the class name same as the function name\n No = 6.023e23 # Avagadro number\n re2= (2.817e-5)**2 # Square of classical electron radius in Angs^2\n def __init__(self, x=0, rmin=0.0, rmax=30.0, Nr=31, Rc=10.0, strho=1.0, tst=2.0, lrho=0.5, lexp=10.0, rhosol=0.0, norm=1.0, bkg=0.0, mpar={}):\n \"\"\"\n Documentation\n x \t: independent variable in the form of a scalar or an array\n Rc \t: Radial distance in Angstroms after which the solvent contribution starts\n strho \t: Concentration of the ions of interest in the stern layer in Molar\n tst \t: Thickness of stern layer in Angstroms\n lrho \t: The maximum concentration of the diffuse layer in Molars\n lexp \t: The decay length of the diffuse layer assuming exponential decay\n rhosol\t: The surrounding bulk density\n norm \t: Density of particles in Moles/Liter\n bkg \t: Constant background\n \"\"\"\n if type(x)==list:\n self.x=np.array(x)\n else:\n self.x=x\n self.rmin=rmin\n self.rmax=rmax\n self.Nr=Nr\n self.Rc=Rc\n self.strho=strho\n self.tst=tst\n self.lrho=lrho\n self.lexp=lexp\n self.rhosol=rhosol\n self.norm=norm\n self.bkg=bkg\n self.__mpar__=mpar #If there is any multivalued parameter\n self.choices={} #If there are choices available for any fixed parameters\n self.__xrdb__=XrayDB()\n self.init_params()\n\n def init_params(self):\n \"\"\"\n Define all the fitting parameters like\n self.param.add('sig',value=0,vary=0)\n \"\"\"\n self.params=Parameters()\n self.params.add('Rc',value=self.Rc,vary=0,min=-np.inf,max=np.inf,expr=None,brute_step=0.1)\n self.params.add('strho', value=self.strho, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('tst', value=self.tst, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('lrho', value=self.lrho, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('lexp', value=self.lexp, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('norm', value=self.norm, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('bkg', value=self.bkg, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n\n\n\n def solrho(self, r, Rp=100.0, Rc=12.5, strho=1.0, tst=2.0, lrho=0.5, lexp=10.0, rhosol=0.0):\n \"\"\"\n Calculates the electron density for the distribution of ions as a function of radial distance surrounding a spherical particle\n\n Rp :: Radius of the sphere in Angstroms enclosing the spherical particle\n Rc :: Radial distance in Angstroms after which the solvent contribution starts\n strho :: Concentration of the ions of interest in the stern layer in Molar\n tst :: Thickness of stern layer in Angstroms\n lrho :: The maximum concentration of the diffuse layer in Molars\n lexp :: The decay length of the diffuse layer assuming exponential decay\n rhosol :: The surrounding bulk density\n \"\"\"\n R1=Rc\n R2=Rc+tst\n #integral=np.sum([r1**2*np.exp(-(r1-R2)/lexp) for r1 in np.linspace(R2,Rp,1001)])*(Rp-R2)/1000\n integral=lexp*(R2**2*np.exp(-R2/lexp)-Rp**2*np.exp(-Rp/lexp))+2*lexp**2*(R2*np.exp(-R2/lexp)-Rp*np.exp(-Rp/lexp))+2*lexp**3*(np.exp(-Rp/lexp)-np.exp(-R2/lexp))\n rhos=(rhosol*(Rp**3-R1**3)-strho*(R2**3-R1**3)-3*lrho*integral*np.exp(R2/lexp))/(Rp**3-R2**3)\n self.output_params['scaler_parameters']['rho_bulk']=rhos\n stern = np.where(r > R1, strho, 0.0) * np.where(r > R2, 0.0, 1.0)\n diffuse = np.where(r > R2, lrho * np.exp(-(r - R2) / lexp) + rhos, 0.0)\n rho = (stern + diffuse)\n return rho # in Moles/Liter\n\n\n def calc_form(self, q, r, rho):\n \"\"\"\n Calculates the isotropic form factor using the isotropic electron density as a funciton of radial distance\n\n q :: scaler or array of reciprocal reciprocal wave vector in inv. Angstroms at which the form factor needs to be calculated in\n r :: array of radial distances at which the element/ion density in known in Angstroms\n rho :: array of element/ion densities as a function of radial distance in el/Angstroms^3. Note: The electron density should decay to zero at the last radial distance\n \"\"\"\n dr = r[1] - r[0]\n form = np.zeros_like(q)\n rho = (rho - rho[-1])* self.No/1e27 #converting it to moles/Angs^3\n for r1, rho1 in zip(r, rho):\n form = form + 4 * np.pi * r1 * rho1 * np.sin(q * r1) / q\n form = (np.absolute(form) * dr)**2\n return self.re2 * form * 1e-16 * self.No / 1e3 # in cm^-1\n\n def y(self):\n \"\"\"\n Define the function in terms of x to return some value\n \"\"\"\n self.output_params={}\n self.output_params['scaler_parameters']={}\n r=np.linspace(self.rmin, self.rmax, self.Nr)\n strho=self.params['strho'].value\n tst=self.params['tst'].value\n lrho=self.params['lrho'].value\n lexp=self.params['lexp'].value\n #rhosol=self.params['rhosol'].value\n norm=self.params['norm'].value\n bkg=self.params['bkg'].value\n Rc = self.params['Rc'].value\n Rp=(3/(4*np.pi*norm*6.022e23))**(1.0/3.0)*1e9\n rho=self.solrho(r, Rp=Rp, Rc=Rc, strho=strho, tst=tst, lrho=lrho, lexp=lexp, rhosol=self.rhosol)\n self.output_params['Electron_Density']={'x':r,'y':rho}\n self.output_params['scaler_parameters']['Rp']=Rp\n form=norm*self.calc_form(self.x,r,rho)+bkg\n return form\n\nif __name__=='__main__':\n x=np.arange(0.001,1.0,0.1)\n fun=SphericalShell_expDecay(x=x)\n print(fun.y())\n"
] | [
[
"numpy.zeros_like",
"numpy.exp",
"numpy.arange",
"numpy.absolute",
"numpy.array",
"numpy.sin",
"numpy.where",
"numpy.linspace"
]
] |
alexandru-dinu/PRML | [
"acd823e098df67abe0306a70225e7539f8edda40"
] | [
"prml/linear/_bayesian_regression.py"
] | [
"import numpy as np\n\nfrom prml.linear._regression import Regression\n\n\nclass BayesianRegression(Regression):\n \"\"\"Bayesian regression model.\n\n w ~ N(w|0, alpha^(-1)I)\n y = X @ w\n t ~ N(t|X @ w, beta^(-1))\n \"\"\"\n\n def __init__(self, alpha: float = 1.0, beta: float = 1.0):\n \"\"\"Initialize bayesian linear regression model.\n\n Parameters\n ----------\n alpha : float, optional\n Precision parameter of the prior, by default 1.\n beta : float, optional\n Precision parameter of the likelihood, by default 1.\n \"\"\"\n self.alpha = alpha\n self.beta = beta\n self.w_mean = None\n self.w_precision = None\n\n def _is_prior_defined(self) -> bool:\n return self.w_mean is not None and self.w_precision is not None\n\n def _get_prior(self, ndim: int) -> tuple:\n if self._is_prior_defined():\n return self.w_mean, self.w_precision\n else:\n return np.zeros(ndim), self.alpha * np.eye(ndim)\n\n def fit(self, x_train: np.ndarray, y_train: np.ndarray):\n \"\"\"Bayesian update of parameters given training dataset.\n\n Parameters\n ----------\n x_train : np.ndarray\n training data independent variable (N, n_features)\n y_train : np.ndarray\n training data dependent variable\n \"\"\"\n mean_prev, precision_prev = self._get_prior(np.size(x_train, 1))\n\n w_precision = precision_prev + self.beta * x_train.T @ x_train\n w_mean = np.linalg.solve(\n w_precision,\n precision_prev @ mean_prev + self.beta * x_train.T @ y_train,\n )\n self.w_mean = w_mean\n self.w_precision = w_precision\n self.w_cov = np.linalg.inv(self.w_precision)\n\n def predict(\n self,\n x: np.ndarray,\n return_std: bool = False,\n sample_size: int = None,\n ):\n \"\"\"Return mean (and standard deviation) of predictive distribution.\n\n Parameters\n ----------\n x : np.ndarray\n independent variable (N, n_features)\n return_std : bool, optional\n flag to return standard deviation (the default is False)\n sample_size : int, optional\n number of samples to draw from the predictive distribution\n (the default is None, no sampling from the distribution)\n\n Returns\n -------\n y : np.ndarray\n mean of the predictive distribution (N,)\n y_std : np.ndarray\n standard deviation of the predictive distribution (N,)\n y_sample : np.ndarray\n samples from the predictive distribution (N, sample_size)\n \"\"\"\n if sample_size is not None:\n w_sample = np.random.multivariate_normal(\n self.w_mean,\n self.w_cov,\n size=sample_size,\n )\n y_sample = x @ w_sample.T\n return y_sample\n y = x @ self.w_mean\n if return_std:\n y_var = 1 / self.beta + np.sum(x @ self.w_cov * x, axis=1)\n y_std = np.sqrt(y_var)\n return y, y_std\n return y\n"
] | [
[
"numpy.sum",
"numpy.eye",
"numpy.linalg.solve",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.size",
"numpy.random.multivariate_normal",
"numpy.sqrt"
]
] |
jnorwood/tensorflow | [
"67ab6c9cebc4cbb2103246a1523d04261bef22d2"
] | [
"tensorflow/python/saved_model/save.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Exports a SavedModel from a Trackable Python object.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os\n\nfrom tensorflow.core.framework import versions_pb2\nfrom tensorflow.core.protobuf import meta_graph_pb2\nfrom tensorflow.core.protobuf import saved_model_pb2\nfrom tensorflow.core.protobuf import saved_object_graph_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import function as defun\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import meta_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.framework import versions\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.saved_model import builder_impl\nfrom tensorflow.python.saved_model import constants\nfrom tensorflow.python.saved_model import function_serialization\nfrom tensorflow.python.saved_model import nested_structure_coder\nfrom tensorflow.python.saved_model import revived_types\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.python.saved_model import signature_def_utils\nfrom tensorflow.python.saved_model import signature_serialization\nfrom tensorflow.python.saved_model import tag_constants\nfrom tensorflow.python.saved_model import utils_impl\nfrom tensorflow.python.training.saving import functional_saver\nfrom tensorflow.python.training.tracking import base\nfrom tensorflow.python.training.tracking import graph_view\nfrom tensorflow.python.training.tracking import object_identity\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.training.tracking import util\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util.tf_export import tf_export\n\n_UNCOPIABLE_DTYPES = frozenset((dtypes.resource, dtypes.variant))\n\n\n# A container for an EagerTensor constant which has been copied to the exported\n# Graph.\n_CapturedConstant = collections.namedtuple(\n \"_CapturedConstant\", [\"eager_tensor\", \"graph_tensor\"])\n\n\nclass _AugmentedGraphView(graph_view.ObjectGraphView):\n \"\"\"An extendable graph which also tracks functions attached to objects.\n\n Extensions through `add_object` appear in the object graph and any checkpoints\n generated from it, even if they are not dependencies of the node they were\n attached to in the saving program. For example a `.signatures` attribute is\n added to exported SavedModel root objects without modifying the root object\n itself.\n\n Also tracks functions attached to objects in the graph, through the caching\n `list_functions` method. Enumerating functions only through this method\n ensures that we get a consistent view of functions, even if object attributes\n create new functions every time they are accessed.\n \"\"\"\n\n def __init__(self, root):\n if (not context.executing_eagerly()\n and not ops.inside_function()):\n saveables_cache = object_identity.ObjectIdentityWeakKeyDictionary()\n else:\n saveables_cache = None\n super(_AugmentedGraphView, self).__init__(root, saveables_cache)\n # Object -> (name -> dep)\n self._extra_dependencies = object_identity.ObjectIdentityDictionary()\n self._functions = object_identity.ObjectIdentityDictionary()\n\n def add_object(self, parent_node, name_in_parent, subgraph_root):\n \"\"\"Attach an object to `parent_node`, overriding any existing dependency.\"\"\"\n self._extra_dependencies.setdefault(\n parent_node, {})[name_in_parent] = subgraph_root\n\n def list_dependencies(self, obj):\n \"\"\"Overrides a parent method to include `add_object` objects.\"\"\"\n extra_dependencies = self._extra_dependencies.get(obj, {})\n used_names = set()\n for name, dep in super(_AugmentedGraphView, self).list_dependencies(obj):\n used_names.add(name)\n if name in extra_dependencies:\n yield base.TrackableReference(name, extra_dependencies[name])\n else:\n yield base.TrackableReference(name, dep)\n for name, dep in extra_dependencies.items():\n if name in used_names:\n continue\n yield base.TrackableReference(name, dep)\n\n def list_functions(self, obj):\n obj_functions = self._functions.get(obj, None)\n if obj_functions is None:\n obj_functions = obj._list_functions_for_serialization() # pylint: disable=protected-access\n self._functions[obj] = obj_functions\n return obj_functions\n\n\nclass _SaveableView(object):\n \"\"\"Provides a frozen view over a trackable root.\n\n This class helps creating a single stable view over an object to save. The\n saving code should access properties and functions via this class and not via\n the original object as there are cases where an object construct their\n trackable attributes and functions dynamically per call and will yield\n different objects if invoked more than once.\n\n Changes to the graph, for example adding objects, must happen in\n `checkpoint_view` (an `_AugmentedGraphView`) before the `_SaveableView` is\n constructed. Changes after the `_SaveableView` has been constructed will be\n ignored.\n \"\"\"\n\n def __init__(self, checkpoint_view):\n self.checkpoint_view = checkpoint_view\n trackable_objects, node_ids, slot_variables = (\n self.checkpoint_view.objects_ids_and_slot_variables())\n self.nodes = trackable_objects\n self.node_ids = node_ids\n self.captured_tensor_node_ids = object_identity.ObjectIdentityDictionary()\n self.slot_variables = slot_variables\n self.concrete_functions = []\n\n # Also add `Function`s as nodes.\n nodes_without_functions = list(self.nodes)\n seen_function_names = set()\n for node in nodes_without_functions:\n for function in checkpoint_view.list_functions(node).values():\n if function not in self.node_ids:\n self.node_ids[function] = len(self.nodes)\n self.nodes.append(function)\n if isinstance(function, def_function.Function):\n # Force listing the concrete functions for the side effects:\n # - populate the cache for functions that have an input_signature\n # and have not been called.\n # - force side effects of creation of concrete functions, e.g. create\n # variables on first run.\n concrete_functions = (\n function._list_all_concrete_functions_for_serialization()) # pylint: disable=protected-access\n else:\n concrete_functions = [function]\n for concrete_function in concrete_functions:\n if concrete_function.name not in seen_function_names:\n seen_function_names.add(concrete_function.name)\n self.concrete_functions.append(concrete_function)\n\n @property\n def root(self):\n return self.nodes[0]\n\n def fill_object_graph_proto(self, proto):\n \"\"\"Populate the nodes, children and slot_variables of a SavedObjectGraph.\"\"\"\n for node_id, node in enumerate(self.nodes):\n assert self.node_ids[node] == node_id\n object_proto = proto.nodes.add()\n object_proto.slot_variables.extend(self.slot_variables.get(node, ()))\n if isinstance(node, (def_function.Function, defun.ConcreteFunction,\n _CapturedConstant)):\n continue\n for child in self.checkpoint_view.list_dependencies(node):\n child_proto = object_proto.children.add()\n child_proto.node_id = self.node_ids[child.ref]\n child_proto.local_name = child.name\n for local_name, ref_function in (\n self.checkpoint_view.list_functions(node).items()):\n child_proto = object_proto.children.add()\n child_proto.node_id = self.node_ids[ref_function]\n child_proto.local_name = local_name\n\n def map_resources(self):\n \"\"\"Makes new resource handle ops corresponding to existing resource tensors.\n\n Creates resource handle ops in the current default graph, whereas\n `accessible_objects` will be from an eager context. Resource mapping adds\n resource handle ops to the main GraphDef of a SavedModel, which allows the\n C++ loader API to interact with variables.\n\n Returns:\n A tuple of (object_map, resource_map, asset_info):\n object_map: A dictionary mapping from object in `accessible_objects` to\n replacement objects created to hold the new resource tensors.\n resource_map: A dictionary mapping from resource tensors extracted from\n `accessible_objects` to newly created resource tensors.\n asset_info: An _AssetInfo tuple describing external assets referenced\n from accessible_objects.\n \"\"\"\n # Only makes sense when adding to the export Graph\n assert not context.executing_eagerly()\n # TODO(allenl): Handle MirroredVariables and other types of variables which\n # may need special casing.\n object_map = object_identity.ObjectIdentityDictionary()\n resource_map = {}\n asset_info = _AssetInfo(\n asset_defs=[],\n asset_initializers_by_resource={},\n asset_filename_map={},\n asset_index={})\n for node_id, obj in enumerate(self.nodes):\n if isinstance(obj, tracking.CapturableResource):\n # pylint: disable=protected-access\n with ops.device(obj._resource_device):\n new_resource = obj._create_resource()\n # pylint: enable=protected-access\n resource_map[obj.resource_handle] = new_resource\n self.captured_tensor_node_ids[obj.resource_handle] = node_id\n elif resource_variable_ops.is_resource_variable(obj):\n new_variable = resource_variable_ops.copy_to_graph_uninitialized(obj)\n object_map[obj] = new_variable\n resource_map[obj.handle] = new_variable.handle\n self.captured_tensor_node_ids[obj.handle] = node_id\n elif isinstance(obj, tracking.TrackableAsset):\n _process_asset(obj, asset_info, resource_map)\n self.captured_tensor_node_ids[obj.asset_path] = node_id\n\n for concrete_function in self.concrete_functions:\n for capture in concrete_function.captured_inputs:\n if (tensor_util.is_tensor(capture)\n and capture.dtype not in _UNCOPIABLE_DTYPES\n and capture not in self.captured_tensor_node_ids):\n copied_tensor = constant_op.constant(\n tensor_util.constant_value(capture))\n node_id = len(self.nodes)\n node = _CapturedConstant(\n eager_tensor=capture, graph_tensor=copied_tensor)\n self.nodes.append(node)\n self.node_ids[capture] = node_id\n self.node_ids[node] = node_id\n self.captured_tensor_node_ids[capture] = node_id\n resource_map[capture] = copied_tensor\n\n return object_map, resource_map, asset_info\n\n\ndef _tensor_dict_to_tensorinfo(tensor_dict):\n return {key: utils_impl.build_tensor_info_internal(value)\n for key, value in tensor_dict.items()}\n\n\ndef _map_captures_to_created_tensors(\n original_captures, resource_map):\n \"\"\"Maps eager tensors captured by a function to Graph resources for export.\n\n Args:\n original_captures: A dictionary mapping from tensors captured by the\n function to interior placeholders for those tensors (inside the function\n body).\n resource_map: A dictionary mapping from resource tensors owned by the eager\n context to resource tensors in the exported graph.\n\n Returns:\n A list of stand-in tensors which belong to the exported graph, corresponding\n to the function's captures.\n\n Raises:\n AssertionError: If the function references a resource which is not part of\n `resource_map`.\n \"\"\"\n export_captures = []\n for exterior, interior in original_captures.items():\n mapped_resource = resource_map.get(exterior, None)\n if mapped_resource is None:\n raise AssertionError(\n (\"Tried to export a function which references untracked object {}.\"\n \"TensorFlow objects (e.g. tf.Variable) captured by functions must \"\n \"be tracked by assigning them to an attribute of a tracked object \"\n \"or assigned to an attribute of the main object directly.\")\n .format(interior))\n export_captures.append(mapped_resource)\n return export_captures\n\n\ndef _map_function_arguments_to_created_inputs(\n function_arguments, signature_key, function_name):\n \"\"\"Creates exterior placeholders in the exported graph for function arguments.\n\n Functions have two types of inputs: tensors captured from the outside (eager)\n context, and arguments to the function which we expect to receive from the\n user at each call. `_map_captures_to_created_tensors` replaces\n captured tensors with stand-ins (typically these are resource dtype tensors\n associated with variables). `_map_function_inputs_to_created_inputs` runs over\n every argument, creating a new placeholder for each which will belong to the\n exported graph rather than the function body.\n\n Args:\n function_arguments: A list of argument placeholders in the function body.\n signature_key: The name of the signature being exported, for error messages.\n function_name: The name of the function, for error messages.\n\n Returns:\n A tuple of (mapped_inputs, exterior_placeholders)\n mapped_inputs: A list with entries corresponding to `function_arguments`\n containing all of the inputs of the function gathered from the exported\n graph (both captured resources and arguments).\n exterior_argument_placeholders: A dictionary mapping from argument names\n to placeholders in the exported graph, containing the explicit arguments\n to the function which a user is expected to provide.\n\n Raises:\n ValueError: If argument names are not unique.\n \"\"\"\n # `exterior_argument_placeholders` holds placeholders which are outside the\n # function body, directly contained in a MetaGraph of the SavedModel. The\n # function body itself contains nearly identical placeholders used when\n # running the function, but these exterior placeholders allow Session-based\n # APIs to call the function using feeds and fetches which name Tensors in the\n # MetaGraph.\n exterior_argument_placeholders = {}\n mapped_inputs = []\n for placeholder in function_arguments:\n # `export_captures` contains an exhaustive set of captures, so if we don't\n # find the input there then we now know we have an argument.\n user_input_name = compat.as_str_any(\n placeholder.op.get_attr(\"_user_specified_name\"))\n # If the internal placeholders for a function have names which were\n # uniquified by TensorFlow, then a single user-specified argument name\n # must refer to multiple Tensors. The resulting signatures would be\n # confusing to call. Instead, we throw an exception telling the user to\n # specify explicit names.\n if user_input_name != placeholder.op.name:\n # This should be unreachable, since concrete functions may not be\n # generated with non-unique argument names.\n raise ValueError(\n (\"Got non-flat/non-unique argument names for SavedModel \"\n \"signature '{}': more than one argument to '{}' was named '{}'. \"\n \"Signatures have one Tensor per named input, so to have \"\n \"predictable names Python functions used to generate these \"\n \"signatures should avoid *args and Tensors in nested \"\n \"structures unless unique names are specified for each. Use \"\n \"tf.TensorSpec(..., name=...) to provide a name for a Tensor \"\n \"input.\")\n .format(signature_key, compat.as_str_any(function_name),\n user_input_name))\n arg_placeholder = array_ops.placeholder(\n shape=placeholder.shape,\n dtype=placeholder.dtype,\n name=\"{}_{}\".format(signature_key, user_input_name))\n exterior_argument_placeholders[user_input_name] = arg_placeholder\n mapped_inputs.append(arg_placeholder)\n return mapped_inputs, exterior_argument_placeholders\n\n\ndef _call_function_with_mapped_captures(function, args, resource_map):\n \"\"\"Calls `function` in the exported graph, using mapped resource captures.\"\"\"\n export_captures = _map_captures_to_created_tensors(\n function.graph.captures, resource_map)\n mapped_inputs = args + export_captures\n # Calls the function quite directly, since we have new captured resource\n # tensors we need to feed in which weren't part of the original function\n # definition.\n # pylint: disable=protected-access\n outputs = function._build_call_outputs(\n function._inference_function.call(context.context(), mapped_inputs))\n return outputs\n\n\ndef _generate_signatures(signature_functions, resource_map):\n \"\"\"Validates and calls `signature_functions` in the default graph.\n\n Args:\n signature_functions: A dictionary mapping string keys to concrete TensorFlow\n functions (e.g. from `signature_serialization.canonicalize_signatures`)\n which will be used to generate SignatureDefs.\n resource_map: A dictionary mapping from resource tensors in the eager\n context to resource tensors in the Graph being exported. This dictionary\n is used to re-bind resources captured by functions to tensors which will\n exist in the SavedModel.\n\n Returns:\n Each function in the `signature_functions` dictionary is called with\n placeholder Tensors, generating a function call operation and output\n Tensors. The placeholder Tensors, the function call operation, and the\n output Tensors from the function call are part of the default Graph.\n\n This function then returns a dictionary with the same structure as\n `signature_functions`, with the concrete functions replaced by SignatureDefs\n implicitly containing information about how to call each function from a\n TensorFlow 1.x Session / the C++ Loader API. These SignatureDefs reference\n the generated placeholders and Tensor outputs by name.\n\n The caller is expected to include the default Graph set while calling this\n function as a MetaGraph in a SavedModel, including the returned\n SignatureDefs as part of that MetaGraph.\n \"\"\"\n signatures = {}\n for signature_key, function in sorted(signature_functions.items()):\n if function.graph.captures:\n argument_inputs = function.graph.inputs[:-len(function.graph.captures)]\n else:\n argument_inputs = function.graph.inputs\n mapped_inputs, exterior_argument_placeholders = (\n _map_function_arguments_to_created_inputs(\n argument_inputs, signature_key, function.name))\n outputs = _call_function_with_mapped_captures(\n function, mapped_inputs, resource_map)\n signatures[signature_key] = signature_def_utils.build_signature_def(\n _tensor_dict_to_tensorinfo(exterior_argument_placeholders),\n _tensor_dict_to_tensorinfo(outputs),\n method_name=signature_constants.PREDICT_METHOD_NAME)\n return signatures\n\n\ndef _trace_resource_initializers(accessible_objects):\n \"\"\"Create concrete functions from `CapturableResource` objects.\"\"\"\n resource_initializers = []\n\n def _wrap_initializer(obj):\n obj._initialize() # pylint: disable=protected-access\n return constant_op.constant(1.) # Dummy control output\n\n def _wrap_obj_initializer(obj):\n return lambda: _wrap_initializer(obj)\n\n for obj in accessible_objects:\n if isinstance(obj, tracking.CapturableResource):\n resource_initializers.append(def_function.function(\n _wrap_obj_initializer(obj),\n # All inputs are captures.\n input_signature=[]).get_concrete_function())\n return resource_initializers\n\n\n_AssetInfo = collections.namedtuple(\n \"_AssetInfo\", [\n # List of AssetFileDef protocol buffers\n \"asset_defs\",\n # Map from asset variable resource Tensors to their init ops\n \"asset_initializers_by_resource\",\n # Map from base asset filenames to full paths\n \"asset_filename_map\",\n # Map from TrackableAsset to index of corresponding AssetFileDef\n \"asset_index\"])\n\n\ndef _process_asset(trackable_asset, asset_info, resource_map):\n \"\"\"Add `trackable_asset` to `asset_info` and `resource_map`.\"\"\"\n original_path_tensor = trackable_asset.asset_path\n original_path = tensor_util.constant_value(original_path_tensor)\n try:\n original_path = str(original_path.astype(str))\n except AttributeError:\n # Already a string rather than a numpy array\n pass\n path = builder_impl.get_asset_filename_to_add(\n asset_filepath=original_path,\n asset_filename_map=asset_info.asset_filename_map)\n # TODO(andresp): Instead of mapping 1-1 between trackable asset\n # and asset in the graph def consider deduping the assets that\n # point to the same file.\n asset_path_initializer = array_ops.placeholder(\n shape=original_path_tensor.shape,\n dtype=dtypes.string,\n name=\"asset_path_initializer\")\n asset_variable = resource_variable_ops.ResourceVariable(\n asset_path_initializer)\n asset_info.asset_filename_map[path] = original_path\n asset_def = meta_graph_pb2.AssetFileDef()\n asset_def.filename = path\n asset_def.tensor_info.name = asset_path_initializer.name\n asset_info.asset_defs.append(asset_def)\n asset_info.asset_initializers_by_resource[original_path_tensor] = (\n asset_variable.initializer)\n asset_info.asset_index[trackable_asset] = len(asset_info.asset_defs) - 1\n resource_map[original_path_tensor] = asset_variable\n\n\ndef _fill_meta_graph_def(meta_graph_def, saveable_view, signature_functions):\n \"\"\"Generates a MetaGraph which calls `signature_functions`.\n\n Args:\n meta_graph_def: The MetaGraphDef proto to fill.\n saveable_view: The _SaveableView being exported.\n signature_functions: A dictionary mapping signature keys to concrete\n functions containing signatures to add to the MetaGraph.\n\n Returns:\n An _AssetInfo, which contains information to help creating the SavedModel.\n \"\"\"\n # List objects from the eager context to make sure Optimizers give us the\n # right Graph-dependent variables.\n accessible_objects = saveable_view.nodes\n resource_initializer_functions = _trace_resource_initializers(\n accessible_objects)\n exported_graph = ops.Graph()\n resource_initializer_ops = []\n with exported_graph.as_default():\n object_map, resource_map, asset_info = saveable_view.map_resources()\n for resource_initializer_function in resource_initializer_functions:\n asset_dependencies = []\n for capture in resource_initializer_function.graph.external_captures:\n asset_initializer = asset_info.asset_initializers_by_resource.get(\n capture, None)\n if asset_initializer is not None:\n asset_dependencies.append(asset_initializer)\n with ops.control_dependencies(asset_dependencies):\n resource_initializer_ops.append(\n _call_function_with_mapped_captures(\n resource_initializer_function, [], resource_map))\n resource_initializer_ops.extend(\n asset_info.asset_initializers_by_resource.values())\n with ops.control_dependencies(resource_initializer_ops):\n init_op = control_flow_ops.no_op()\n # Add the same op to the main_op collection and to the init_op\n # signature. The collection is for compatibility with older loader APIs;\n # only one will be executed.\n meta_graph_def.collection_def[constants.MAIN_OP_KEY].node_list.value.append(\n init_op.name)\n meta_graph_def.signature_def[constants.INIT_OP_SIGNATURE_KEY].CopyFrom(\n signature_def_utils.op_signature_def(\n init_op, constants.INIT_OP_SIGNATURE_KEY))\n\n # Saving an object-based checkpoint again gathers variables. We need to do the\n # gathering from the eager context so Optimizers save the right set of\n # variables, but want any operations associated with the save/restore to be in\n # the exported graph (thus the `to_graph` argument).\n saver = functional_saver.MultiDeviceSaver(\n saveable_view.checkpoint_view.frozen_saveable_objects(\n object_map=object_map, to_graph=exported_graph))\n\n with exported_graph.as_default():\n signatures = _generate_signatures(signature_functions, resource_map)\n for concrete_function in saveable_view.concrete_functions:\n concrete_function.add_to_graph()\n saver_def = saver.to_proto()\n meta_graph_def.saver_def.CopyFrom(saver_def)\n graph_def = exported_graph.as_graph_def(add_shapes=True)\n\n meta_graph_def.graph_def.CopyFrom(graph_def)\n meta_graph_def.meta_info_def.tags.append(tag_constants.SERVING)\n meta_graph_def.meta_info_def.tensorflow_version = versions.__version__\n meta_graph_def.meta_info_def.tensorflow_git_version = (\n versions.__git_version__)\n # We currently always strip default attributes.\n meta_graph_def.meta_info_def.stripped_default_attrs = True\n meta_graph_def.meta_info_def.stripped_op_list.MergeFrom(\n meta_graph.stripped_op_list_for_graph(meta_graph_def.graph_def))\n meta_graph_def.asset_file_def.extend(asset_info.asset_defs)\n for signature_key, signature in signatures.items():\n meta_graph_def.signature_def[signature_key].CopyFrom(signature)\n meta_graph.strip_graph_default_valued_attrs(meta_graph_def)\n return asset_info, exported_graph\n\n\ndef _serialize_object_graph(saveable_view, asset_file_def_index):\n \"\"\"Save a SavedObjectGraph proto for `root`.\"\"\"\n # SavedObjectGraph is similar to the TrackableObjectGraph proto in the\n # checkpoint. It will eventually go into the SavedModel.\n proto = saved_object_graph_pb2.SavedObjectGraph()\n saveable_view.fill_object_graph_proto(proto)\n\n coder = nested_structure_coder.StructureCoder()\n for concrete_function in saveable_view.concrete_functions:\n serialized = function_serialization.serialize_concrete_function(\n concrete_function, saveable_view.captured_tensor_node_ids, coder)\n if serialized is not None:\n proto.concrete_functions[concrete_function.name].CopyFrom(\n serialized)\n\n for obj, obj_proto in zip(saveable_view.nodes, proto.nodes):\n _write_object_proto(obj, obj_proto, asset_file_def_index)\n return proto\n\n\ndef _write_object_proto(obj, proto, asset_file_def_index):\n \"\"\"Saves an object into SavedObject proto.\"\"\"\n if isinstance(obj, tracking.TrackableAsset):\n proto.asset.SetInParent()\n proto.asset.asset_file_def_index = asset_file_def_index[obj]\n elif resource_variable_ops.is_resource_variable(obj):\n proto.variable.SetInParent()\n if not obj.name.endswith(\":0\"):\n raise ValueError(\"Cowardly refusing to save variable %s because of\"\n \" unexpected suffix which won't be restored.\")\n proto.variable.name = meta_graph._op_name(obj.name) # pylint: disable=protected-access\n proto.variable.trainable = obj.trainable\n proto.variable.dtype = obj.dtype.as_datatype_enum\n proto.variable.synchronization = obj.synchronization.value\n proto.variable.aggregation = obj.aggregation.value\n proto.variable.shape.CopyFrom(obj.shape.as_proto())\n elif isinstance(obj, def_function.Function):\n proto.function.CopyFrom(\n function_serialization.serialize_function(obj))\n elif isinstance(obj, defun.ConcreteFunction):\n proto.bare_concrete_function.CopyFrom(\n function_serialization.serialize_bare_concrete_function(obj))\n elif isinstance(obj, _CapturedConstant):\n proto.constant.operation = obj.graph_tensor.op.name\n elif isinstance(obj, tracking.CapturableResource):\n proto.resource.device = obj._resource_device # pylint: disable=protected-access\n else:\n registered_type_proto = revived_types.serialize(obj)\n if registered_type_proto is None:\n # Fallback for types with no matching registration\n registered_type_proto = saved_object_graph_pb2.SavedUserObject(\n identifier=\"_generic_user_object\",\n version=versions_pb2.VersionDef(\n producer=1, min_consumer=1, bad_consumers=[]))\n proto.user_object.CopyFrom(registered_type_proto)\n\n\n@tf_export(\"saved_model.save\",\n v1=[\"saved_model.save\", \"saved_model.experimental.save\"])\ndef save(obj, export_dir, signatures=None):\n # pylint: disable=line-too-long\n \"\"\"Exports the Trackable object `obj` to [SavedModel format](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md).\n\n Example usage:\n\n ```python\n class Adder(tf.Module):\n\n @tf.function(input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])\n def add(self, x):\n return x + x + 1.\n\n to_export = Adder()\n tf.saved_model.save(to_export, '/tmp/adder')\n ```\n\n The resulting SavedModel is then servable with an input named \"x\", its value\n having any shape and dtype float32.\n\n The optional `signatures` argument controls which methods in `obj` will be\n available to programs which consume `SavedModel`s, for example serving\n APIs. Python functions may be decorated with\n `@tf.function(input_signature=...)` and passed as signatures directly, or\n lazily with a call to `get_concrete_function` on the method decorated with\n `@tf.function`.\n\n If the `signatures` argument is omitted, `obj` will be searched for\n `@tf.function`-decorated methods. If exactly one `@tf.function` is found, that\n method will be used as the default signature for the SavedModel. This behavior\n is expected to change in the future, when a corresponding\n `tf.saved_model.load` symbol is added. At that point signatures will be\n completely optional, and any `@tf.function` attached to `obj` or its\n dependencies will be exported for use with `load`.\n\n When invoking a signature in an exported SavedModel, `Tensor` arguments are\n identified by name. These names will come from the Python function's argument\n names by default. They may be overridden by specifying a `name=...` argument\n in the corresponding `tf.TensorSpec` object. Explicit naming is required if\n multiple `Tensor`s are passed through a single argument to the Python\n function.\n\n The outputs of functions used as `signatures` must either be flat lists, in\n which case outputs will be numbered, or a dictionary mapping string keys to\n `Tensor`, in which case the keys will be used to name outputs.\n\n Signatures are available in objects returned by `tf.saved_model.load` as a\n `.signatures` attribute. This is a reserved attribute: `tf.saved_model.save`\n on an object with a custom `.signatures` attribute will raise an exception.\n\n Since `tf.keras.Model` objects are also Trackable, this function can be\n used to export Keras models. For example, exporting with a signature\n specified:\n\n ```python\n class Model(tf.keras.Model):\n\n @tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string)])\n def serve(self, serialized):\n ...\n\n m = Model()\n tf.saved_model.save(m, '/tmp/saved_model/')\n ```\n\n Exporting from a function without a fixed signature:\n\n ```python\n class Model(tf.keras.Model):\n\n @tf.function\n def call(self, x):\n ...\n\n m = Model()\n tf.saved_model.save(\n m, '/tmp/saved_model/',\n signatures=m.call.get_concrete_function(\n tf.TensorSpec(shape=[None, 3], dtype=tf.float32, name=\"inp\")))\n ```\n\n `tf.keras.Model` instances constructed from inputs and outputs already have a\n signature and so do not require a `@tf.function` decorator or a `signatures`\n argument. If neither are specified, the model's forward pass is exported.\n\n ```python\n x = input_layer.Input((4,), name=\"x\")\n y = core.Dense(5, name=\"out\")(x)\n model = training.Model(x, y)\n tf.saved_model.save(model, '/tmp/saved_model/')\n # The exported SavedModel takes \"x\" with shape [None, 4] and returns \"out\"\n # with shape [None, 5]\n ```\n\n Variables must be tracked by assigning them to an attribute of a tracked\n object or to an attribute of `obj` directly. TensorFlow objects (e.g. layers\n from `tf.keras.layers`, optimizers from `tf.train`) track their variables\n automatically. This is the same tracking scheme that `tf.train.Checkpoint`\n uses, and an exported `Checkpoint` object may be restored as a training\n checkpoint by pointing `tf.train.Checkpoint.restore` to the SavedModel's\n \"variables/\" subdirectory. Currently variables are the only stateful objects\n supported by `tf.saved_model.save`, but others (e.g. tables) will be supported\n in the future.\n\n `tf.function` does not hard-code device annotations from outside the function\n body, instead using the calling context's device. This means for example that\n exporting a model which runs on a GPU and serving it on a CPU will generally\n work, with some exceptions. `tf.device` annotations inside the body of the\n function will be hard-coded in the exported model; this type of annotation is\n discouraged. Device-specific operations, e.g. with \"cuDNN\" in the name or with\n device-specific layouts, may cause issues. Currently a `DistributionStrategy`\n is another exception: active distribution strategies will cause device\n placements to be hard-coded in a function. Exporting a single-device\n computation and importing under a `DistributionStrategy` is not currently\n supported, but may be in the future.\n\n SavedModels exported with `tf.saved_model.save` [strip default-valued\n attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes)\n automatically, which removes one source of incompatibilities when the consumer\n of a SavedModel is running an older TensorFlow version than the\n producer. There are however other sources of incompatibilities which are not\n handled automatically, such as when the exported model contains operations\n which the consumer does not have definitions for.\n\n Args:\n obj: A trackable object to export.\n export_dir: A directory in which to write the SavedModel.\n signatures: Optional, either a `tf.function` with an input signature\n specified or the result of `f.get_concrete_function` on a\n `@tf.function`-decorated function `f`, in which case `f` will be used to\n generate a signature for the SavedModel under the default serving\n signature key. `signatures` may also be a dictionary, in which case it\n maps from signature keys to either `tf.function` instances with input\n signatures or concrete functions. The keys of such a dictionary may be\n arbitrary strings, but will typically be from the\n `tf.saved_model.signature_constants` module.\n\n Raises:\n ValueError: If `obj` is not trackable.\n\n @compatibility(eager)\n Not well supported when graph building. From TensorFlow 1.x,\n `tf.compat.v1.enable_eager_execution()` should run first. Calling\n tf.saved_model.save in a loop when graph building from TensorFlow 1.x will\n add new save operations to the default graph each iteration.\n\n May not be called from within a function body.\n @end_compatibility\n \"\"\"\n if ops.inside_function():\n raise AssertionError(\n \"tf.saved_model.save is not supported inside a traced \"\n \"@tf.function. Move the call to the outer eagerly-executed \"\n \"context.\")\n # pylint: enable=line-too-long\n if not isinstance(obj, base.Trackable):\n raise ValueError(\n \"Expected a Trackable object for export, got {}.\".format(obj))\n\n checkpoint_graph_view = _AugmentedGraphView(obj)\n if signatures is None:\n signatures = signature_serialization.find_function_to_export(\n checkpoint_graph_view)\n\n signatures = signature_serialization.canonicalize_signatures(signatures)\n signature_serialization.validate_saveable_view(checkpoint_graph_view)\n signature_map = signature_serialization.create_signature_map(signatures)\n checkpoint_graph_view.add_object(\n parent_node=checkpoint_graph_view.root,\n name_in_parent=signature_serialization.SIGNATURE_ATTRIBUTE_NAME,\n subgraph_root=signature_map)\n\n # Use _SaveableView to provide a frozen listing of properties and functions.\n # Note we run this twice since, while constructing the view the first time\n # there can be side effects of creating variables.\n _ = _SaveableView(checkpoint_graph_view)\n saveable_view = _SaveableView(checkpoint_graph_view)\n\n # TODO(allenl): Factor out some subset of SavedModelBuilder which is 2.x\n # compatible (no sessions) and share it with this export API rather than\n # making a SavedModel proto and writing it directly.\n saved_model = saved_model_pb2.SavedModel()\n meta_graph_def = saved_model.meta_graphs.add()\n object_saver = util.TrackableSaver(checkpoint_graph_view)\n asset_info, exported_graph = _fill_meta_graph_def(\n meta_graph_def, saveable_view, signatures)\n saved_model.saved_model_schema_version = (\n constants.SAVED_MODEL_SCHEMA_VERSION)\n # So far we've just been generating protocol buffers with no I/O. Now we write\n # the checkpoint, copy assets into the assets directory, and write out the\n # SavedModel proto itself.\n utils_impl.get_or_create_variables_dir(export_dir)\n object_saver.save(utils_impl.get_variables_path(export_dir))\n builder_impl.copy_assets_to_destination_dir(asset_info.asset_filename_map,\n export_dir)\n path = os.path.join(\n compat.as_bytes(export_dir),\n compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))\n object_graph_proto = _serialize_object_graph(\n saveable_view, asset_info.asset_index)\n meta_graph_def.object_graph_def.CopyFrom(object_graph_proto)\n file_io.write_string_to_file(path, saved_model.SerializeToString())\n # Clean reference cycles so repeated export()s don't make work for the garbage\n # collector. Before this point we need to keep references to captured\n # constants in the saved graph.\n ops.dismantle_graph(exported_graph)\n"
] | [
[
"tensorflow.python.saved_model.utils_impl.get_or_create_variables_dir",
"tensorflow.python.saved_model.revived_types.serialize",
"tensorflow.core.protobuf.meta_graph_pb2.AssetFileDef",
"tensorflow.python.saved_model.function_serialization.serialize_function",
"tensorflow.python.saved_model.signature_serialization.canonicalize_signatures",
"tensorflow.python.framework.meta_graph.stripped_op_list_for_graph",
"tensorflow.python.saved_model.signature_serialization.create_signature_map",
"tensorflow.python.saved_model.function_serialization.serialize_concrete_function",
"tensorflow.python.framework.meta_graph._op_name",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.resource_variable_ops.copy_to_graph_uninitialized",
"tensorflow.core.protobuf.saved_model_pb2.SavedModel",
"tensorflow.python.training.tracking.util.TrackableSaver",
"tensorflow.python.saved_model.signature_serialization.find_function_to_export",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.framework.ops.dismantle_graph",
"tensorflow.python.training.tracking.object_identity.ObjectIdentityWeakKeyDictionary",
"tensorflow.python.saved_model.signature_def_utils.op_signature_def",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.framework.ops.device",
"tensorflow.python.saved_model.signature_serialization.validate_saveable_view",
"tensorflow.python.eager.context.context",
"tensorflow.core.protobuf.saved_object_graph_pb2.SavedObjectGraph",
"tensorflow.python.saved_model.builder_impl.get_asset_filename_to_add",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.util.compat.as_str_any",
"tensorflow.python.saved_model.utils_impl.build_tensor_info_internal",
"tensorflow.python.saved_model.builder_impl.copy_assets_to_destination_dir",
"tensorflow.core.framework.versions_pb2.VersionDef",
"tensorflow.python.saved_model.function_serialization.serialize_bare_concrete_function",
"tensorflow.python.ops.resource_variable_ops.is_resource_variable",
"tensorflow.python.framework.meta_graph.strip_graph_default_valued_attrs",
"tensorflow.python.ops.resource_variable_ops.ResourceVariable",
"tensorflow.python.saved_model.utils_impl.get_variables_path",
"tensorflow.python.saved_model.nested_structure_coder.StructureCoder",
"tensorflow.python.framework.ops.inside_function",
"tensorflow.python.training.tracking.object_identity.ObjectIdentityDictionary",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.training.tracking.base.TrackableReference",
"tensorflow.python.ops.control_flow_ops.no_op",
"tensorflow.python.framework.tensor_util.is_tensor"
]
] |
Jianrong-Lu/MONAI | [
"c319ca8ff31aa980a045f1b913fb2eb22aadb080"
] | [
"monai/transforms/spatial/array.py"
] | [
"# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nA collection of \"vanilla\" transforms for spatial operations\nhttps://github.com/Project-MONAI/MONAI/wiki/MONAI_Design\n\"\"\"\nimport warnings\nfrom copy import deepcopy\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union\n\nimport numpy as np\nimport torch\n\nfrom monai.config import USE_COMPILED, DtypeLike\nfrom monai.config.type_definitions import NdarrayOrTensor\nfrom monai.data.utils import AFFINE_TOL, compute_shape_offset, reorient_spatial_axes, to_affine_nd, zoom_affine\nfrom monai.networks.layers import AffineTransform, GaussianFilter, grid_pull\nfrom monai.networks.utils import meshgrid_ij, normalize_transform\nfrom monai.transforms.croppad.array import CenterSpatialCrop, Pad\nfrom monai.transforms.transform import Randomizable, RandomizableTransform, ThreadUnsafe, Transform\nfrom monai.transforms.utils import (\n create_control_grid,\n create_grid,\n create_rotate,\n create_scale,\n create_shear,\n create_translate,\n map_spatial_axes,\n)\nfrom monai.transforms.utils_pytorch_numpy_unification import allclose, moveaxis\nfrom monai.utils import (\n GridSampleMode,\n GridSamplePadMode,\n InterpolateMode,\n NumpyPadMode,\n PytorchPadMode,\n ensure_tuple,\n ensure_tuple_rep,\n ensure_tuple_size,\n fall_back_tuple,\n issequenceiterable,\n optional_import,\n pytorch_after,\n)\nfrom monai.utils.deprecate_utils import deprecated_arg\nfrom monai.utils.enums import TransformBackends\nfrom monai.utils.module import look_up_option\nfrom monai.utils.type_conversion import convert_data_type, convert_to_dst_type\n\nnib, has_nib = optional_import(\"nibabel\")\n\n__all__ = [\n \"SpatialResample\",\n \"ResampleToMatch\",\n \"Spacing\",\n \"Orientation\",\n \"Flip\",\n \"GridDistortion\",\n \"Resize\",\n \"Rotate\",\n \"Zoom\",\n \"Rotate90\",\n \"RandRotate90\",\n \"RandRotate\",\n \"RandFlip\",\n \"RandGridDistortion\",\n \"RandAxisFlip\",\n \"RandZoom\",\n \"AffineGrid\",\n \"RandAffineGrid\",\n \"RandDeformGrid\",\n \"Resample\",\n \"Affine\",\n \"RandAffine\",\n \"Rand2DElastic\",\n \"Rand3DElastic\",\n]\n\nRandRange = Optional[Union[Sequence[Union[Tuple[float, float], float]], float]]\n\n\nclass SpatialResample(Transform):\n \"\"\"\n Resample input image from the orientation/spacing defined by ``src_affine`` affine matrix into\n the ones specified by ``dst_affine`` affine matrix.\n\n Internally this transform computes the affine transform matrix from ``src_affine`` to ``dst_affine``,\n by ``xform = linalg.solve(src_affine, dst_affine)``, and call ``monai.transforms.Affine`` with ``xform``.\n \"\"\"\n\n backend = [TransformBackends.TORCH]\n\n def __init__(\n self,\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,\n align_corners: bool = False,\n dtype: DtypeLike = np.float64,\n ):\n \"\"\"\n Args:\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n When `USE_COMPILED` is `True`, this argument uses\n ``\"nearest\"``, ``\"bilinear\"``, ``\"bicubic\"`` to indicate 0, 1, 3 order interpolations.\n See also: https://docs.monai.io/en/stable/networks.html#grid-pull\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"border\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n align_corners: Geometrically, we consider the pixels of the input as squares rather than points.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.\n If ``None``, use the data type of input data. To be compatible with other modules,\n the output data type is always ``np.float32``.\n \"\"\"\n self.mode = mode\n self.padding_mode = padding_mode\n self.align_corners = align_corners\n self.dtype = dtype\n\n def __call__(\n self,\n img: NdarrayOrTensor,\n src_affine: Optional[NdarrayOrTensor] = None,\n dst_affine: Optional[NdarrayOrTensor] = None,\n spatial_size: Optional[Union[Sequence[int], np.ndarray, int]] = None,\n mode: Union[GridSampleMode, str, None] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str, None] = GridSamplePadMode.BORDER,\n align_corners: Optional[bool] = False,\n dtype: DtypeLike = None,\n ) -> Tuple[NdarrayOrTensor, NdarrayOrTensor]:\n \"\"\"\n Args:\n img: input image to be resampled. It currently supports channel-first arrays with\n at most three spatial dimensions.\n src_affine: source affine matrix. Defaults to ``None``, which means the identity matrix.\n the shape should be `(r+1, r+1)` where `r` is the spatial rank of ``img``.\n dst_affine: destination affine matrix. Defaults to ``None``, which means the same as `src_affine`.\n the shape should be `(r+1, r+1)` where `r` is the spatial rank of ``img``.\n when `dst_affine` and `spatial_size` are None, the input will be returned without resampling,\n but the data type will be `float32`.\n spatial_size: output image spatial size.\n if `spatial_size` and `self.spatial_size` are not defined,\n the transform will compute a spatial size automatically containing the previous field of view.\n if `spatial_size` is ``-1`` are the transform will use the corresponding input img size.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n When `USE_COMPILED` is `True`, this argument uses\n ``\"nearest\"``, ``\"bilinear\"``, ``\"bicubic\"`` to indicate 0, 1, 3 order interpolations.\n See also: https://docs.monai.io/en/stable/networks.html#grid-pull\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"border\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n align_corners: Geometrically, we consider the pixels of the input as squares rather than points.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n dtype: data type for resampling computation. Defaults to ``self.dtype`` or\n ``np.float64`` (for best precision). If ``None``, use the data type of input data.\n To be compatible with other modules, the output data type is always `float32`.\n\n The spatial rank is determined by the smallest among ``img.ndim -1``, ``len(src_affine) - 1``, and ``3``.\n\n When both ``monai.config.USE_COMPILED`` and ``align_corners`` are set to ``True``,\n MONAI's resampling implementation will be used.\n Set `dst_affine` and `spatial_size` to `None` to turn off the resampling step.\n \"\"\"\n if src_affine is None:\n src_affine = np.eye(4, dtype=np.float64)\n spatial_rank = min(len(img.shape) - 1, src_affine.shape[0] - 1, 3)\n if (not isinstance(spatial_size, int) or spatial_size != -1) and spatial_size is not None:\n spatial_rank = min(len(ensure_tuple(spatial_size)), 3) # infer spatial rank based on spatial_size\n src_affine = to_affine_nd(spatial_rank, src_affine)\n dst_affine = to_affine_nd(spatial_rank, dst_affine) if dst_affine is not None else src_affine\n dst_affine, *_ = convert_to_dst_type(dst_affine, dst_affine, dtype=torch.float32)\n\n in_spatial_size = np.asarray(img.shape[1 : spatial_rank + 1])\n if isinstance(spatial_size, int) and (spatial_size == -1): # using the input spatial size\n spatial_size = in_spatial_size\n elif spatial_size is None and spatial_rank > 1: # auto spatial size\n spatial_size, _ = compute_shape_offset(in_spatial_size, src_affine, dst_affine) # type: ignore\n spatial_size = np.asarray(fall_back_tuple(ensure_tuple(spatial_size)[:spatial_rank], in_spatial_size))\n\n if (\n allclose(src_affine, dst_affine, atol=AFFINE_TOL)\n and allclose(spatial_size, in_spatial_size)\n or spatial_rank == 1\n ):\n # no significant change, return original image\n output_data, *_ = convert_to_dst_type(img, img, dtype=torch.float32)\n return output_data, dst_affine\n\n if has_nib and isinstance(img, np.ndarray):\n spatial_ornt, dst_r = reorient_spatial_axes(img.shape[1 : spatial_rank + 1], src_affine, dst_affine)\n if allclose(dst_r, dst_affine, atol=AFFINE_TOL) and allclose(spatial_size, in_spatial_size):\n # simple reorientation achieves the desired affine\n spatial_ornt[:, 0] += 1\n spatial_ornt = np.concatenate([np.array([[0, 1]]), spatial_ornt])\n img_ = nib.orientations.apply_orientation(img, spatial_ornt)\n output_data, *_ = convert_to_dst_type(img_, img, dtype=torch.float32)\n return output_data, dst_affine\n\n try:\n src_affine, *_ = convert_to_dst_type(src_affine, dst_affine)\n if isinstance(src_affine, np.ndarray):\n xform = np.linalg.solve(src_affine, dst_affine)\n else:\n xform = (\n torch.linalg.solve(src_affine, dst_affine)\n if pytorch_after(1, 8, 0)\n else torch.solve(dst_affine, src_affine).solution # type: ignore\n )\n except (np.linalg.LinAlgError, RuntimeError) as e:\n raise ValueError(f\"src affine is not invertible: {src_affine}\") from e\n xform = to_affine_nd(spatial_rank, xform)\n # no resampling if it's identity transform\n if allclose(xform, np.diag(np.ones(len(xform))), atol=AFFINE_TOL) and allclose(spatial_size, in_spatial_size):\n output_data, *_ = convert_to_dst_type(img, img, dtype=torch.float32)\n return output_data, dst_affine\n\n _dtype = dtype or self.dtype or img.dtype\n in_spatial_size = in_spatial_size.tolist()\n chns, additional_dims = img.shape[0], img.shape[spatial_rank + 1 :] # beyond three spatial dims\n # resample\n img_ = convert_data_type(img, torch.Tensor, dtype=_dtype)[0]\n xform = convert_to_dst_type(xform, img_)[0]\n align_corners = self.align_corners if align_corners is None else align_corners\n mode = mode or self.mode\n padding_mode = padding_mode or self.padding_mode\n if additional_dims:\n xform_shape = [-1] + in_spatial_size\n img_ = img_.reshape(xform_shape)\n if align_corners:\n _t_r = torch.diag(torch.ones(len(xform), dtype=xform.dtype, device=xform.device)) # type: ignore\n for idx, d_dst in enumerate(spatial_size[:spatial_rank]):\n _t_r[idx, -1] = (max(d_dst, 2) - 1.0) / 2.0\n xform = xform @ _t_r\n if not USE_COMPILED:\n _t_l = normalize_transform(\n in_spatial_size, xform.device, xform.dtype, align_corners=True # type: ignore\n )\n xform = _t_l @ xform # type: ignore\n affine_xform = Affine(\n affine=xform, spatial_size=spatial_size, norm_coords=False, image_only=True, dtype=_dtype\n )\n output_data = affine_xform(img_, mode=mode, padding_mode=padding_mode)\n else:\n affine_xform = AffineTransform(\n normalized=False,\n mode=mode,\n padding_mode=padding_mode,\n align_corners=align_corners,\n reverse_indexing=True,\n )\n output_data = affine_xform(img_.unsqueeze(0), theta=xform, spatial_size=spatial_size).squeeze(0)\n if additional_dims:\n full_shape = (chns, *spatial_size, *additional_dims)\n output_data = output_data.reshape(full_shape)\n # output dtype float\n output_data, *_ = convert_to_dst_type(output_data, img, dtype=torch.float32)\n return output_data, dst_affine\n\n\nclass ResampleToMatch(SpatialResample):\n \"\"\"Resample an image to match given meta data. The affine matrix will be aligned,\n and the size of the output image will match.\"\"\"\n\n def __call__( # type: ignore\n self,\n img: NdarrayOrTensor,\n src_meta: Optional[Dict] = None,\n dst_meta: Optional[Dict] = None,\n mode: Union[GridSampleMode, str, None] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str, None] = GridSamplePadMode.BORDER,\n align_corners: Optional[bool] = False,\n dtype: DtypeLike = None,\n ):\n if src_meta is None:\n raise RuntimeError(\"`in_meta` is missing\")\n if dst_meta is None:\n raise RuntimeError(\"`out_meta` is missing\")\n mode = mode or self.mode\n padding_mode = padding_mode or self.padding_mode\n align_corners = self.align_corners if align_corners is None else align_corners\n dtype = dtype or self.dtype\n src_affine = src_meta.get(\"affine\")\n dst_affine = dst_meta.get(\"affine\")\n img, updated_affine = super().__call__(\n img=img,\n src_affine=src_affine,\n dst_affine=dst_affine,\n spatial_size=dst_meta.get(\"spatial_shape\"),\n mode=mode,\n padding_mode=padding_mode,\n align_corners=align_corners,\n dtype=dtype,\n )\n dst_meta = deepcopy(dst_meta)\n dst_meta[\"affine\"] = updated_affine\n return img, dst_meta\n\n\nclass Spacing(Transform):\n \"\"\"\n Resample input image into the specified `pixdim`.\n \"\"\"\n\n backend = SpatialResample.backend\n\n def __init__(\n self,\n pixdim: Union[Sequence[float], float, np.ndarray],\n diagonal: bool = False,\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,\n align_corners: bool = False,\n dtype: DtypeLike = np.float64,\n image_only: bool = False,\n ) -> None:\n \"\"\"\n Args:\n pixdim: output voxel spacing. if providing a single number, will use it for the first dimension.\n items of the pixdim sequence map to the spatial dimensions of input image, if length\n of pixdim sequence is longer than image spatial dimensions, will ignore the longer part,\n if shorter, will pad with `1.0`.\n if the components of the `pixdim` are non-positive values, the transform will use the\n corresponding components of the original pixdim, which is computed from the `affine`\n matrix of input image.\n diagonal: whether to resample the input to have a diagonal affine matrix.\n If True, the input data is resampled to the following affine::\n\n np.diag((pixdim_0, pixdim_1, ..., pixdim_n, 1))\n\n This effectively resets the volume to the world coordinate system (RAS+ in nibabel).\n The original orientation, rotation, shearing are not preserved.\n\n If False, this transform preserves the axes orientation, orthogonal rotation and\n translation components from the original affine. This option will not flip/swap axes\n of the original data.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n When `USE_COMPILED` is `True`, this argument uses\n ``\"nearest\"``, ``\"bilinear\"``, ``\"bicubic\"`` to indicate 0, 1, 3 order interpolations.\n See also: https://docs.monai.io/en/stable/networks.html#grid-pull\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"border\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n align_corners: Geometrically, we consider the pixels of the input as squares rather than points.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.\n If None, use the data type of input data. To be compatible with other modules,\n the output data type is always ``np.float32``.\n image_only: return just the image or the image, the old affine and new affine. Default is `False`.\n\n \"\"\"\n self.pixdim = np.array(ensure_tuple(pixdim), dtype=np.float64)\n self.diagonal = diagonal\n self.image_only = image_only\n\n self.sp_resample = SpatialResample(\n mode=look_up_option(mode, GridSampleMode),\n padding_mode=look_up_option(padding_mode, GridSamplePadMode),\n align_corners=align_corners,\n dtype=dtype,\n )\n\n def __call__(\n self,\n data_array: NdarrayOrTensor,\n affine: Optional[NdarrayOrTensor] = None,\n mode: Optional[Union[GridSampleMode, str]] = None,\n padding_mode: Optional[Union[GridSamplePadMode, str]] = None,\n align_corners: Optional[bool] = None,\n dtype: DtypeLike = None,\n output_spatial_shape: Optional[Union[Sequence[int], np.ndarray, int]] = None,\n ) -> Union[NdarrayOrTensor, Tuple[NdarrayOrTensor, NdarrayOrTensor, NdarrayOrTensor]]:\n \"\"\"\n Args:\n data_array: in shape (num_channels, H[, W, ...]).\n affine (matrix): (N+1)x(N+1) original affine matrix for spatially ND `data_array`. Defaults to identity.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n When `USE_COMPILED` is `True`, this argument uses\n ``\"nearest\"``, ``\"bilinear\"``, ``\"bicubic\"`` to indicate 0, 1, 3 order interpolations.\n See also: https://docs.monai.io/en/stable/networks.html#grid-pull\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``self.padding_mode``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n align_corners: Geometrically, we consider the pixels of the input as squares rather than points.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n dtype: data type for resampling computation. Defaults to ``self.dtype``.\n If None, use the data type of input data. To be compatible with other modules,\n the output data type is always ``np.float32``.\n output_spatial_shape: specify the shape of the output data_array. This is typically useful for\n the inverse of `Spacingd` where sometimes we could not compute the exact shape due to the quantization\n error with the affine.\n\n Raises:\n ValueError: When ``data_array`` has no spatial dimensions.\n ValueError: When ``pixdim`` is nonpositive.\n\n Returns:\n data_array (resampled into `self.pixdim`), original affine, current affine.\n\n \"\"\"\n sr = int(data_array.ndim - 1)\n if sr <= 0:\n raise ValueError(\"data_array must have at least one spatial dimension.\")\n if affine is None:\n # default to identity\n affine_np = affine = np.eye(sr + 1, dtype=np.float64)\n affine_ = np.eye(sr + 1, dtype=np.float64)\n else:\n affine_np, *_ = convert_data_type(affine, np.ndarray)\n affine_ = to_affine_nd(sr, affine_np)\n\n out_d = self.pixdim[:sr]\n if out_d.size < sr:\n out_d = np.append(out_d, [1.0] * (sr - out_d.size))\n\n # compute output affine, shape and offset\n new_affine = zoom_affine(affine_, out_d, diagonal=self.diagonal)\n output_shape, offset = compute_shape_offset(data_array.shape[1:], affine_, new_affine)\n new_affine[:sr, -1] = offset[:sr]\n output_data, new_affine = self.sp_resample(\n data_array,\n src_affine=affine,\n dst_affine=new_affine,\n spatial_size=list(output_shape) if output_spatial_shape is None else output_spatial_shape,\n mode=mode,\n padding_mode=padding_mode,\n align_corners=align_corners,\n dtype=dtype,\n )\n new_affine = to_affine_nd(affine_np, new_affine)\n new_affine, *_ = convert_to_dst_type(src=new_affine, dst=affine, dtype=torch.float32)\n\n if self.image_only:\n return output_data\n return output_data, affine, new_affine\n\n\nclass Orientation(Transform):\n \"\"\"\n Change the input image's orientation into the specified based on `axcodes`.\n \"\"\"\n\n backend = [TransformBackends.NUMPY, TransformBackends.TORCH]\n\n def __init__(\n self,\n axcodes: Optional[str] = None,\n as_closest_canonical: bool = False,\n labels: Optional[Sequence[Tuple[str, str]]] = tuple(zip(\"LPI\", \"RAS\")),\n image_only: bool = False,\n ) -> None:\n \"\"\"\n Args:\n axcodes: N elements sequence for spatial ND input's orientation.\n e.g. axcodes='RAS' represents 3D orientation:\n (Left, Right), (Posterior, Anterior), (Inferior, Superior).\n default orientation labels options are: 'L' and 'R' for the first dimension,\n 'P' and 'A' for the second, 'I' and 'S' for the third.\n as_closest_canonical: if True, load the image as closest to canonical axis format.\n labels: optional, None or sequence of (2,) sequences\n (2,) sequences are labels for (beginning, end) of output axis.\n Defaults to ``(('L', 'R'), ('P', 'A'), ('I', 'S'))``.\n image_only: if True return only the image volume, otherwise return (image, affine, new_affine).\n\n Raises:\n ValueError: When ``axcodes=None`` and ``as_closest_canonical=True``. Incompatible values.\n\n See Also: `nibabel.orientations.ornt2axcodes`.\n\n \"\"\"\n if axcodes is None and not as_closest_canonical:\n raise ValueError(\"Incompatible values: axcodes=None and as_closest_canonical=True.\")\n if axcodes is not None and as_closest_canonical:\n warnings.warn(\"using as_closest_canonical=True, axcodes ignored.\")\n self.axcodes = axcodes\n self.as_closest_canonical = as_closest_canonical\n self.labels = labels\n self.image_only = image_only\n\n def __call__(\n self, data_array: NdarrayOrTensor, affine: Optional[NdarrayOrTensor] = None\n ) -> Union[NdarrayOrTensor, Tuple[NdarrayOrTensor, NdarrayOrTensor, NdarrayOrTensor]]:\n \"\"\"\n original orientation of `data_array` is defined by `affine`.\n\n Args:\n data_array: in shape (num_channels, H[, W, ...]).\n affine (matrix): (N+1)x(N+1) original affine matrix for spatially ND `data_array`. Defaults to identity.\n\n Raises:\n ValueError: When ``data_array`` has no spatial dimensions.\n ValueError: When ``axcodes`` spatiality differs from ``data_array``.\n\n Returns:\n data_array [reoriented in `self.axcodes`] if `self.image_only`, else\n (data_array [reoriented in `self.axcodes`], original axcodes, current axcodes).\n\n \"\"\"\n spatial_shape = data_array.shape[1:]\n sr = len(spatial_shape)\n if sr <= 0:\n raise ValueError(\"data_array must have at least one spatial dimension.\")\n affine_: np.ndarray\n if affine is None:\n # default to identity\n affine_np = affine = np.eye(sr + 1, dtype=np.float64)\n affine_ = np.eye(sr + 1, dtype=np.float64)\n else:\n affine_np, *_ = convert_data_type(affine, np.ndarray)\n affine_ = to_affine_nd(sr, affine_np)\n\n src = nib.io_orientation(affine_)\n if self.as_closest_canonical:\n spatial_ornt = src\n else:\n if self.axcodes is None:\n raise ValueError(\"Incompatible values: axcodes=None and as_closest_canonical=True.\")\n if sr < len(self.axcodes):\n warnings.warn(\n f\"axcodes ('{self.axcodes}') length is smaller than the number of input spatial dimensions D={sr}.\\n\"\n f\"{self.__class__.__name__}: input spatial shape is {spatial_shape}, num. channels is {data_array.shape[0]},\"\n \"please make sure the input is in the channel-first format.\"\n )\n dst = nib.orientations.axcodes2ornt(self.axcodes[:sr], labels=self.labels)\n if len(dst) < sr:\n raise ValueError(\n f\"axcodes must match data_array spatially, got axcodes={len(self.axcodes)}D data_array={sr}D\"\n )\n spatial_ornt = nib.orientations.ornt_transform(src, dst)\n new_affine = affine_ @ nib.orientations.inv_ornt_aff(spatial_ornt, spatial_shape)\n _is_tensor = isinstance(data_array, torch.Tensor)\n spatial_ornt[:, 0] += 1 # skip channel dim\n spatial_ornt = np.concatenate([np.array([[0, 1]]), spatial_ornt])\n axes = [ax for ax, flip in enumerate(spatial_ornt[:, 1]) if flip == -1]\n if axes:\n data_array = (\n torch.flip(data_array, dims=axes) if _is_tensor else np.flip(data_array, axis=axes) # type: ignore\n )\n full_transpose = np.arange(len(data_array.shape))\n full_transpose[: len(spatial_ornt)] = np.argsort(spatial_ornt[:, 0])\n if not np.all(full_transpose == np.arange(len(data_array.shape))):\n if _is_tensor:\n data_array = data_array.permute(full_transpose.tolist()) # type: ignore\n else:\n data_array = data_array.transpose(full_transpose) # type: ignore\n out, *_ = convert_to_dst_type(src=data_array, dst=data_array)\n new_affine = to_affine_nd(affine_np, new_affine)\n new_affine, *_ = convert_to_dst_type(src=new_affine, dst=affine, dtype=torch.float32)\n\n if self.image_only:\n return out\n return out, affine, new_affine\n\n\nclass Flip(Transform):\n \"\"\"\n Reverses the order of elements along the given spatial axis. Preserves shape.\n Uses ``np.flip`` in practice. See numpy.flip for additional details:\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html.\n\n Args:\n spatial_axis: spatial axes along which to flip over. Default is None.\n The default `axis=None` will flip over all of the axes of the input array.\n If axis is negative it counts from the last to the first axis.\n If axis is a tuple of ints, flipping is performed on all of the axes\n specified in the tuple.\n\n \"\"\"\n\n backend = [TransformBackends.TORCH, TransformBackends.NUMPY]\n\n def __init__(self, spatial_axis: Optional[Union[Sequence[int], int]] = None) -> None:\n self.spatial_axis = spatial_axis\n\n def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:\n \"\"\"\n Args:\n img: channel first array, must have shape: (num_channels, H[, W, ..., ]),\n \"\"\"\n if isinstance(img, np.ndarray):\n return np.ascontiguousarray(np.flip(img, map_spatial_axes(img.ndim, self.spatial_axis)))\n return torch.flip(img, map_spatial_axes(img.ndim, self.spatial_axis))\n\n\nclass Resize(Transform):\n \"\"\"\n Resize the input image to given spatial size (with scaling, not cropping/padding).\n Implemented using :py:class:`torch.nn.functional.interpolate`.\n\n Args:\n spatial_size: expected shape of spatial dimensions after resize operation.\n if some components of the `spatial_size` are non-positive values, the transform will use the\n corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted\n to `(32, 64)` if the second spatial dimension size of img is `64`.\n size_mode: should be \"all\" or \"longest\", if \"all\", will use `spatial_size` for all the spatial dims,\n if \"longest\", rescale the image so that only the longest side is equal to specified `spatial_size`,\n which must be an int number in this case, keeping the aspect ratio of the initial image, refer to:\n https://albumentations.ai/docs/api_reference/augmentations/geometric/resize/\n #albumentations.augmentations.geometric.resize.LongestMaxSize.\n mode: {``\"nearest\"``, ``\"linear\"``, ``\"bilinear\"``, ``\"bicubic\"``, ``\"trilinear\"``, ``\"area\"``}\n The interpolation mode. Defaults to ``\"area\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html\n align_corners: This only has an effect when mode is\n 'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html\n \"\"\"\n\n backend = [TransformBackends.TORCH]\n\n def __init__(\n self,\n spatial_size: Union[Sequence[int], int],\n size_mode: str = \"all\",\n mode: Union[InterpolateMode, str] = InterpolateMode.AREA,\n align_corners: Optional[bool] = None,\n ) -> None:\n self.size_mode = look_up_option(size_mode, [\"all\", \"longest\"])\n self.spatial_size = spatial_size\n self.mode: InterpolateMode = look_up_option(mode, InterpolateMode)\n self.align_corners = align_corners\n\n def __call__(\n self,\n img: NdarrayOrTensor,\n mode: Optional[Union[InterpolateMode, str]] = None,\n align_corners: Optional[bool] = None,\n ) -> NdarrayOrTensor:\n \"\"\"\n Args:\n img: channel first array, must have shape: (num_channels, H[, W, ..., ]).\n mode: {``\"nearest\"``, ``\"linear\"``, ``\"bilinear\"``, ``\"bicubic\"``, ``\"trilinear\"``, ``\"area\"``}\n The interpolation mode. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html\n align_corners: This only has an effect when mode is\n 'linear', 'bilinear', 'bicubic' or 'trilinear'. Defaults to ``self.align_corners``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html\n\n Raises:\n ValueError: When ``self.spatial_size`` length is less than ``img`` spatial dimensions.\n\n \"\"\"\n img_, *_ = convert_data_type(img, torch.Tensor, dtype=torch.float)\n if self.size_mode == \"all\":\n input_ndim = img_.ndim - 1 # spatial ndim\n output_ndim = len(ensure_tuple(self.spatial_size))\n if output_ndim > input_ndim:\n input_shape = ensure_tuple_size(img_.shape, output_ndim + 1, 1)\n img_ = img_.reshape(input_shape)\n elif output_ndim < input_ndim:\n raise ValueError(\n \"len(spatial_size) must be greater or equal to img spatial dimensions, \"\n f\"got spatial_size={output_ndim} img={input_ndim}.\"\n )\n spatial_size_ = fall_back_tuple(self.spatial_size, img_.shape[1:])\n else: # for the \"longest\" mode\n img_size = img_.shape[1:]\n if not isinstance(self.spatial_size, int):\n raise ValueError(\"spatial_size must be an int number if size_mode is 'longest'.\")\n scale = self.spatial_size / max(img_size)\n spatial_size_ = tuple(int(round(s * scale)) for s in img_size)\n resized = torch.nn.functional.interpolate(\n input=img_.unsqueeze(0),\n size=spatial_size_,\n mode=look_up_option(self.mode if mode is None else mode, InterpolateMode).value,\n align_corners=self.align_corners if align_corners is None else align_corners,\n )\n out, *_ = convert_to_dst_type(resized.squeeze(0), img)\n return out\n\n\nclass Rotate(Transform, ThreadUnsafe):\n \"\"\"\n Rotates an input image by given angle using :py:class:`monai.networks.layers.AffineTransform`.\n\n Args:\n angle: Rotation angle(s) in radians. should a float for 2D, three floats for 3D.\n keep_size: If it is True, the output shape is kept the same as the input.\n If it is False, the output shape is adapted so that the\n input array is contained completely in the output. Default is True.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"border\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n align_corners: Defaults to False.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n dtype: data type for resampling computation. Defaults to ``np.float32``.\n If None, use the data type of input data. To be compatible with other modules,\n the output data type is always ``np.float32``.\n \"\"\"\n\n backend = [TransformBackends.TORCH]\n\n def __init__(\n self,\n angle: Union[Sequence[float], float],\n keep_size: bool = True,\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,\n align_corners: bool = False,\n dtype: Union[DtypeLike, torch.dtype] = np.float32,\n ) -> None:\n self.angle = angle\n self.keep_size = keep_size\n self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)\n self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)\n self.align_corners = align_corners\n self.dtype = dtype\n self._rotation_matrix: Optional[NdarrayOrTensor] = None\n\n def __call__(\n self,\n img: NdarrayOrTensor,\n mode: Optional[Union[GridSampleMode, str]] = None,\n padding_mode: Optional[Union[GridSamplePadMode, str]] = None,\n align_corners: Optional[bool] = None,\n dtype: Union[DtypeLike, torch.dtype] = None,\n ) -> NdarrayOrTensor:\n \"\"\"\n Args:\n img: channel first array, must have shape: [chns, H, W] or [chns, H, W, D].\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``self.padding_mode``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n align_corners: Defaults to ``self.align_corners``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n align_corners: Defaults to ``self.align_corners``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n dtype: data type for resampling computation. Defaults to ``self.dtype``.\n If None, use the data type of input data. To be compatible with other modules,\n the output data type is always ``np.float32``.\n\n Raises:\n ValueError: When ``img`` spatially is not one of [2D, 3D].\n\n \"\"\"\n _dtype = dtype or self.dtype or img.dtype\n\n img_t, *_ = convert_data_type(img, torch.Tensor, dtype=_dtype)\n\n im_shape = np.asarray(img_t.shape[1:]) # spatial dimensions\n input_ndim = len(im_shape)\n if input_ndim not in (2, 3):\n raise ValueError(f\"Unsupported img dimension: {input_ndim}, available options are [2, 3].\")\n _angle = ensure_tuple_rep(self.angle, 1 if input_ndim == 2 else 3)\n transform = create_rotate(input_ndim, _angle)\n shift = create_translate(input_ndim, ((im_shape - 1) / 2).tolist())\n if self.keep_size:\n output_shape = im_shape\n else:\n corners = np.asarray(np.meshgrid(*[(0, dim) for dim in im_shape], indexing=\"ij\")).reshape(\n (len(im_shape), -1)\n )\n corners = transform[:-1, :-1] @ corners # type: ignore\n output_shape = np.asarray(corners.ptp(axis=1) + 0.5, dtype=int)\n shift_1 = create_translate(input_ndim, (-(output_shape - 1) / 2).tolist())\n transform = shift @ transform @ shift_1\n\n transform_t, *_ = convert_to_dst_type(transform, img_t)\n\n xform = AffineTransform(\n normalized=False,\n mode=look_up_option(mode or self.mode, GridSampleMode),\n padding_mode=look_up_option(padding_mode or self.padding_mode, GridSamplePadMode),\n align_corners=self.align_corners if align_corners is None else align_corners,\n reverse_indexing=True,\n )\n output: torch.Tensor = xform(img_t.unsqueeze(0), transform_t, spatial_size=output_shape).float().squeeze(0)\n self._rotation_matrix = transform\n out: NdarrayOrTensor\n out, *_ = convert_to_dst_type(output, dst=img, dtype=output.dtype)\n return out\n\n def get_rotation_matrix(self) -> Optional[NdarrayOrTensor]:\n \"\"\"\n Get the most recently applied rotation matrix\n This is not thread-safe.\n \"\"\"\n return self._rotation_matrix\n\n\nclass Zoom(Transform):\n \"\"\"\n Zooms an ND image using :py:class:`torch.nn.functional.interpolate`.\n For details, please see https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html.\n\n Different from :py:class:`monai.transforms.resize`, this transform takes scaling factors\n as input, and provides an option of preserving the input spatial size.\n\n Args:\n zoom: The zoom factor along the spatial axes.\n If a float, zoom is the same for each spatial axis.\n If a sequence, zoom should contain one value for each spatial axis.\n mode: {``\"nearest\"``, ``\"linear\"``, ``\"bilinear\"``, ``\"bicubic\"``, ``\"trilinear\"``, ``\"area\"``}\n The interpolation mode. Defaults to ``\"area\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html\n padding_mode: available modes for numpy array:{``\"constant\"``, ``\"edge\"``, ``\"linear_ramp\"``, ``\"maximum\"``,\n ``\"mean\"``, ``\"median\"``, ``\"minimum\"``, ``\"reflect\"``, ``\"symmetric\"``, ``\"wrap\"``, ``\"empty\"``}\n available modes for PyTorch Tensor: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}.\n One of the listed string values or a user supplied function. Defaults to ``\"constant\"``.\n The mode to pad data after zooming.\n See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html\n https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html\n align_corners: This only has an effect when mode is\n 'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html\n keep_size: Should keep original size (padding/slicing if needed), default is True.\n kwargs: other arguments for the `np.pad` or `torch.pad` function.\n note that `np.pad` treats channel dimension as the first dimension.\n\n \"\"\"\n\n backend = [TransformBackends.TORCH]\n\n def __init__(\n self,\n zoom: Union[Sequence[float], float],\n mode: Union[InterpolateMode, str] = InterpolateMode.AREA,\n padding_mode: Union[NumpyPadMode, PytorchPadMode, str] = NumpyPadMode.EDGE,\n align_corners: Optional[bool] = None,\n keep_size: bool = True,\n **kwargs,\n ) -> None:\n self.zoom = zoom\n self.mode: InterpolateMode = InterpolateMode(mode)\n self.padding_mode = padding_mode\n self.align_corners = align_corners\n self.keep_size = keep_size\n self.kwargs = kwargs\n\n def __call__(\n self,\n img: NdarrayOrTensor,\n mode: Optional[Union[InterpolateMode, str]] = None,\n padding_mode: Optional[Union[NumpyPadMode, PytorchPadMode, str]] = None,\n align_corners: Optional[bool] = None,\n ) -> NdarrayOrTensor:\n \"\"\"\n Args:\n img: channel first array, must have shape: (num_channels, H[, W, ..., ]).\n mode: {``\"nearest\"``, ``\"linear\"``, ``\"bilinear\"``, ``\"bicubic\"``, ``\"trilinear\"``, ``\"area\"``}\n The interpolation mode. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html\n padding_mode: available modes for numpy array:{``\"constant\"``, ``\"edge\"``, ``\"linear_ramp\"``, ``\"maximum\"``,\n ``\"mean\"``, ``\"median\"``, ``\"minimum\"``, ``\"reflect\"``, ``\"symmetric\"``, ``\"wrap\"``, ``\"empty\"``}\n available modes for PyTorch Tensor: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}.\n One of the listed string values or a user supplied function. Defaults to ``\"constant\"``.\n The mode to pad data after zooming.\n See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html\n https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html\n align_corners: This only has an effect when mode is\n 'linear', 'bilinear', 'bicubic' or 'trilinear'. Defaults to ``self.align_corners``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html\n\n \"\"\"\n img_t, *_ = convert_data_type(img, torch.Tensor, dtype=torch.float32)\n\n _zoom = ensure_tuple_rep(self.zoom, img.ndim - 1) # match the spatial image dim\n zoomed: NdarrayOrTensor = torch.nn.functional.interpolate( # type: ignore\n recompute_scale_factor=True,\n input=img_t.unsqueeze(0),\n scale_factor=list(_zoom),\n mode=look_up_option(self.mode if mode is None else mode, InterpolateMode).value,\n align_corners=self.align_corners if align_corners is None else align_corners,\n )\n zoomed = zoomed.squeeze(0)\n\n if self.keep_size and not np.allclose(img_t.shape, zoomed.shape):\n\n pad_vec = [(0, 0)] * len(img_t.shape)\n slice_vec = [slice(None)] * len(img_t.shape)\n for idx, (od, zd) in enumerate(zip(img_t.shape, zoomed.shape)):\n diff = od - zd\n half = abs(diff) // 2\n if diff > 0: # need padding\n pad_vec[idx] = (half, diff - half)\n elif diff < 0: # need slicing\n slice_vec[idx] = slice(half, half + od)\n\n padder = Pad(pad_vec, padding_mode or self.padding_mode)\n zoomed = padder(zoomed)\n zoomed = zoomed[tuple(slice_vec)]\n\n out, *_ = convert_to_dst_type(zoomed, dst=img)\n return out\n\n\nclass Rotate90(Transform):\n \"\"\"\n Rotate an array by 90 degrees in the plane specified by `axes`.\n See np.rot90 for additional details:\n https://numpy.org/doc/stable/reference/generated/numpy.rot90.html.\n\n \"\"\"\n\n backend = [TransformBackends.TORCH, TransformBackends.NUMPY]\n\n def __init__(self, k: int = 1, spatial_axes: Tuple[int, int] = (0, 1)) -> None:\n \"\"\"\n Args:\n k: number of times to rotate by 90 degrees.\n spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes.\n Default: (0, 1), this is the first two axis in spatial dimensions.\n If axis is negative it counts from the last to the first axis.\n \"\"\"\n self.k = k\n spatial_axes_: Tuple[int, int] = ensure_tuple(spatial_axes) # type: ignore\n if len(spatial_axes_) != 2:\n raise ValueError(\"spatial_axes must be 2 int numbers to indicate the axes to rotate 90 degrees.\")\n self.spatial_axes = spatial_axes_\n\n def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:\n \"\"\"\n Args:\n img: channel first array, must have shape: (num_channels, H[, W, ..., ]),\n \"\"\"\n rot90: Callable = torch.rot90 if isinstance(img, torch.Tensor) else np.rot90 # type: ignore\n out: NdarrayOrTensor = rot90(img, self.k, map_spatial_axes(img.ndim, self.spatial_axes))\n out, *_ = convert_data_type(out, dtype=img.dtype)\n return out\n\n\nclass RandRotate90(RandomizableTransform):\n \"\"\"\n With probability `prob`, input arrays are rotated by 90 degrees\n in the plane specified by `spatial_axes`.\n \"\"\"\n\n backend = Rotate90.backend\n\n def __init__(self, prob: float = 0.1, max_k: int = 3, spatial_axes: Tuple[int, int] = (0, 1)) -> None:\n \"\"\"\n Args:\n prob: probability of rotating.\n (Default 0.1, with 10% probability it returns a rotated array)\n max_k: number of rotations will be sampled from `np.random.randint(max_k) + 1`, (Default 3).\n spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes.\n Default: (0, 1), this is the first two axis in spatial dimensions.\n \"\"\"\n RandomizableTransform.__init__(self, prob)\n self.max_k = max_k\n self.spatial_axes = spatial_axes\n\n self._rand_k = 0\n\n def randomize(self, data: Optional[Any] = None) -> None:\n super().randomize(None)\n if not self._do_transform:\n return None\n self._rand_k = self.R.randint(self.max_k) + 1\n\n def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:\n \"\"\"\n Args:\n img: channel first array, must have shape: (num_channels, H[, W, ..., ]),\n randomize: whether to execute `randomize()` function first, default to True.\n \"\"\"\n if randomize:\n self.randomize()\n\n if not self._do_transform:\n return img\n\n return Rotate90(self._rand_k, self.spatial_axes)(img)\n\n\nclass RandRotate(RandomizableTransform):\n \"\"\"\n Randomly rotate the input arrays.\n\n Args:\n range_x: Range of rotation angle in radians in the plane defined by the first and second axes.\n If single number, angle is uniformly sampled from (-range_x, range_x).\n range_y: Range of rotation angle in radians in the plane defined by the first and third axes.\n If single number, angle is uniformly sampled from (-range_y, range_y).\n range_z: Range of rotation angle in radians in the plane defined by the second and third axes.\n If single number, angle is uniformly sampled from (-range_z, range_z).\n prob: Probability of rotation.\n keep_size: If it is False, the output shape is adapted so that the\n input array is contained completely in the output.\n If it is True, the output shape is the same as the input. Default is True.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"border\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n align_corners: Defaults to False.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n dtype: data type for resampling computation. Defaults to ``np.float32``.\n If None, use the data type of input data. To be compatible with other modules,\n the output data type is always ``np.float32``.\n \"\"\"\n\n backend = Rotate.backend\n\n def __init__(\n self,\n range_x: Union[Tuple[float, float], float] = 0.0,\n range_y: Union[Tuple[float, float], float] = 0.0,\n range_z: Union[Tuple[float, float], float] = 0.0,\n prob: float = 0.1,\n keep_size: bool = True,\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,\n align_corners: bool = False,\n dtype: Union[DtypeLike, torch.dtype] = np.float32,\n ) -> None:\n RandomizableTransform.__init__(self, prob)\n self.range_x = ensure_tuple(range_x)\n if len(self.range_x) == 1:\n self.range_x = tuple(sorted([-self.range_x[0], self.range_x[0]]))\n self.range_y = ensure_tuple(range_y)\n if len(self.range_y) == 1:\n self.range_y = tuple(sorted([-self.range_y[0], self.range_y[0]]))\n self.range_z = ensure_tuple(range_z)\n if len(self.range_z) == 1:\n self.range_z = tuple(sorted([-self.range_z[0], self.range_z[0]]))\n\n self.keep_size = keep_size\n self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)\n self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)\n self.align_corners = align_corners\n self.dtype = dtype\n\n self.x = 0.0\n self.y = 0.0\n self.z = 0.0\n\n def randomize(self, data: Optional[Any] = None) -> None:\n super().randomize(None)\n if not self._do_transform:\n return None\n self.x = self.R.uniform(low=self.range_x[0], high=self.range_x[1])\n self.y = self.R.uniform(low=self.range_y[0], high=self.range_y[1])\n self.z = self.R.uniform(low=self.range_z[0], high=self.range_z[1])\n\n def __call__(\n self,\n img: NdarrayOrTensor,\n mode: Optional[Union[GridSampleMode, str]] = None,\n padding_mode: Optional[Union[GridSamplePadMode, str]] = None,\n align_corners: Optional[bool] = None,\n dtype: Union[DtypeLike, torch.dtype] = None,\n randomize: bool = True,\n get_matrix: bool = False,\n ):\n \"\"\"\n Args:\n img: channel first array, must have shape 2D: (nchannels, H, W), or 3D: (nchannels, H, W, D).\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``self.padding_mode``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n align_corners: Defaults to ``self.align_corners``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n dtype: data type for resampling computation. Defaults to ``self.dtype``.\n If None, use the data type of input data. To be compatible with other modules,\n the output data type is always ``np.float32``.\n randomize: whether to execute `randomize()` function first, default to True.\n get_matrix: whether to return the rotated image and rotate matrix together, default to False.\n \"\"\"\n if randomize:\n self.randomize()\n\n if not self._do_transform:\n return img\n\n rotator = Rotate(\n angle=self.x if img.ndim == 3 else (self.x, self.y, self.z),\n keep_size=self.keep_size,\n mode=look_up_option(mode or self.mode, GridSampleMode),\n padding_mode=look_up_option(padding_mode or self.padding_mode, GridSamplePadMode),\n align_corners=self.align_corners if align_corners is None else align_corners,\n dtype=dtype or self.dtype or img.dtype,\n )\n img = rotator(img)\n return (img, rotator.get_rotation_matrix()) if get_matrix else img\n\n\nclass RandFlip(RandomizableTransform):\n \"\"\"\n Randomly flips the image along axes. Preserves shape.\n See numpy.flip for additional details.\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html\n\n Args:\n prob: Probability of flipping.\n spatial_axis: Spatial axes along which to flip over. Default is None.\n \"\"\"\n\n backend = Flip.backend\n\n def __init__(self, prob: float = 0.1, spatial_axis: Optional[Union[Sequence[int], int]] = None) -> None:\n RandomizableTransform.__init__(self, prob)\n self.flipper = Flip(spatial_axis=spatial_axis)\n\n def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:\n \"\"\"\n Args:\n img: channel first array, must have shape: (num_channels, H[, W, ..., ]),\n randomize: whether to execute `randomize()` function first, default to True.\n \"\"\"\n if randomize:\n self.randomize(None)\n\n if not self._do_transform:\n return img\n\n return self.flipper(img)\n\n\nclass RandAxisFlip(RandomizableTransform):\n \"\"\"\n Randomly select a spatial axis and flip along it.\n See numpy.flip for additional details.\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html\n\n Args:\n prob: Probability of flipping.\n\n \"\"\"\n\n backend = Flip.backend\n\n def __init__(self, prob: float = 0.1) -> None:\n RandomizableTransform.__init__(self, prob)\n self._axis: Optional[int] = None\n\n def randomize(self, data: NdarrayOrTensor) -> None:\n super().randomize(None)\n if not self._do_transform:\n return None\n self._axis = self.R.randint(data.ndim - 1)\n\n def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:\n \"\"\"\n Args:\n img: channel first array, must have shape: (num_channels, H[, W, ..., ]),\n randomize: whether to execute `randomize()` function first, default to True.\n \"\"\"\n if randomize:\n self.randomize(data=img)\n\n if not self._do_transform:\n return img\n\n return Flip(spatial_axis=self._axis)(img)\n\n\nclass RandZoom(RandomizableTransform):\n \"\"\"\n Randomly zooms input arrays with given probability within given zoom range.\n\n Args:\n prob: Probability of zooming.\n min_zoom: Min zoom factor. Can be float or sequence same size as image.\n If a float, select a random factor from `[min_zoom, max_zoom]` then apply to all spatial dims\n to keep the original spatial shape ratio.\n If a sequence, min_zoom should contain one value for each spatial axis.\n If 2 values provided for 3D data, use the first value for both H & W dims to keep the same zoom ratio.\n max_zoom: Max zoom factor. Can be float or sequence same size as image.\n If a float, select a random factor from `[min_zoom, max_zoom]` then apply to all spatial dims\n to keep the original spatial shape ratio.\n If a sequence, max_zoom should contain one value for each spatial axis.\n If 2 values provided for 3D data, use the first value for both H & W dims to keep the same zoom ratio.\n mode: {``\"nearest\"``, ``\"linear\"``, ``\"bilinear\"``, ``\"bicubic\"``, ``\"trilinear\"``, ``\"area\"``}\n The interpolation mode. Defaults to ``\"area\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html\n padding_mode: available modes for numpy array:{``\"constant\"``, ``\"edge\"``, ``\"linear_ramp\"``, ``\"maximum\"``,\n ``\"mean\"``, ``\"median\"``, ``\"minimum\"``, ``\"reflect\"``, ``\"symmetric\"``, ``\"wrap\"``, ``\"empty\"``}\n available modes for PyTorch Tensor: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}.\n One of the listed string values or a user supplied function. Defaults to ``\"constant\"``.\n The mode to pad data after zooming.\n See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html\n https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html\n align_corners: This only has an effect when mode is\n 'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html\n keep_size: Should keep original size (pad if needed), default is True.\n kwargs: other arguments for the `np.pad` or `torch.pad` function.\n note that `np.pad` treats channel dimension as the first dimension.\n\n \"\"\"\n\n backend = Zoom.backend\n\n def __init__(\n self,\n prob: float = 0.1,\n min_zoom: Union[Sequence[float], float] = 0.9,\n max_zoom: Union[Sequence[float], float] = 1.1,\n mode: Union[InterpolateMode, str] = InterpolateMode.AREA,\n padding_mode: Union[NumpyPadMode, PytorchPadMode, str] = NumpyPadMode.EDGE,\n align_corners: Optional[bool] = None,\n keep_size: bool = True,\n **kwargs,\n ) -> None:\n RandomizableTransform.__init__(self, prob)\n self.min_zoom = ensure_tuple(min_zoom)\n self.max_zoom = ensure_tuple(max_zoom)\n if len(self.min_zoom) != len(self.max_zoom):\n raise AssertionError(\"min_zoom and max_zoom must have same length.\")\n self.mode: InterpolateMode = look_up_option(mode, InterpolateMode)\n self.padding_mode = padding_mode\n self.align_corners = align_corners\n self.keep_size = keep_size\n self.kwargs = kwargs\n\n self._zoom: Sequence[float] = [1.0]\n\n def randomize(self, img: NdarrayOrTensor) -> None:\n super().randomize(None)\n if not self._do_transform:\n return None\n self._zoom = [self.R.uniform(l, h) for l, h in zip(self.min_zoom, self.max_zoom)]\n if len(self._zoom) == 1:\n # to keep the spatial shape ratio, use same random zoom factor for all dims\n self._zoom = ensure_tuple_rep(self._zoom[0], img.ndim - 1)\n elif len(self._zoom) == 2 and img.ndim > 3:\n # if 2 zoom factors provided for 3D data, use the first factor for H and W dims, second factor for D dim\n self._zoom = ensure_tuple_rep(self._zoom[0], img.ndim - 2) + ensure_tuple(self._zoom[-1])\n\n def __call__(\n self,\n img: NdarrayOrTensor,\n mode: Optional[Union[InterpolateMode, str]] = None,\n padding_mode: Optional[Union[NumpyPadMode, PytorchPadMode, str]] = None,\n align_corners: Optional[bool] = None,\n randomize: bool = True,\n ) -> NdarrayOrTensor:\n \"\"\"\n Args:\n img: channel first array, must have shape 2D: (nchannels, H, W), or 3D: (nchannels, H, W, D).\n mode: {``\"nearest\"``, ``\"linear\"``, ``\"bilinear\"``, ``\"bicubic\"``, ``\"trilinear\"``, ``\"area\"``}\n The interpolation mode. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html\n padding_mode: available modes for numpy array:{``\"constant\"``, ``\"edge\"``, ``\"linear_ramp\"``, ``\"maximum\"``,\n ``\"mean\"``, ``\"median\"``, ``\"minimum\"``, ``\"reflect\"``, ``\"symmetric\"``, ``\"wrap\"``, ``\"empty\"``}\n available modes for PyTorch Tensor: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}.\n One of the listed string values or a user supplied function. Defaults to ``\"constant\"``.\n The mode to pad data after zooming.\n See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html\n https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html\n align_corners: This only has an effect when mode is\n 'linear', 'bilinear', 'bicubic' or 'trilinear'. Defaults to ``self.align_corners``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html\n randomize: whether to execute `randomize()` function first, default to True.\n\n \"\"\"\n # match the spatial image dim\n if randomize:\n self.randomize(img=img)\n\n if not self._do_transform:\n return img\n\n return Zoom(\n self._zoom,\n keep_size=self.keep_size,\n mode=look_up_option(mode or self.mode, InterpolateMode),\n padding_mode=padding_mode or self.padding_mode,\n align_corners=align_corners or self.align_corners,\n **self.kwargs,\n )(img)\n\n\nclass AffineGrid(Transform):\n \"\"\"\n Affine transforms on the coordinates.\n\n Args:\n rotate_params: a rotation angle in radians, a scalar for 2D image, a tuple of 3 floats for 3D.\n Defaults to no rotation.\n shear_params: shearing factors for affine matrix, take a 3D affine as example::\n\n [\n [1.0, params[0], params[1], 0.0],\n [params[2], 1.0, params[3], 0.0],\n [params[4], params[5], 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ]\n\n a tuple of 2 floats for 2D, a tuple of 6 floats for 3D. Defaults to no shearing.\n translate_params: a tuple of 2 floats for 2D, a tuple of 3 floats for 3D. Translation is in\n pixel/voxel relative to the center of the input image. Defaults to no translation.\n scale_params: scale factor for every spatial dims. a tuple of 2 floats for 2D,\n a tuple of 3 floats for 3D. Defaults to `1.0`.\n dtype: data type for the grid computation. Defaults to ``np.float32``.\n If ``None``, use the data type of input data (if `grid` is provided).\n device: device on which the tensor will be allocated, if a new grid is generated.\n affine: If applied, ignore the params (`rotate_params`, etc.) and use the\n supplied matrix. Should be square with each side = num of image spatial\n dimensions + 1.\n\n .. deprecated:: 0.6.0\n ``as_tensor_output`` is deprecated.\n\n \"\"\"\n\n backend = [TransformBackends.TORCH, TransformBackends.NUMPY]\n\n @deprecated_arg(name=\"as_tensor_output\", since=\"0.6\")\n def __init__(\n self,\n rotate_params: Optional[Union[Sequence[float], float]] = None,\n shear_params: Optional[Union[Sequence[float], float]] = None,\n translate_params: Optional[Union[Sequence[float], float]] = None,\n scale_params: Optional[Union[Sequence[float], float]] = None,\n as_tensor_output: bool = True,\n device: Optional[torch.device] = None,\n dtype: DtypeLike = np.float32,\n affine: Optional[NdarrayOrTensor] = None,\n ) -> None:\n self.rotate_params = rotate_params\n self.shear_params = shear_params\n self.translate_params = translate_params\n self.scale_params = scale_params\n self.device = device\n self.dtype = dtype\n self.affine = affine\n\n def __call__(\n self, spatial_size: Optional[Sequence[int]] = None, grid: Optional[NdarrayOrTensor] = None\n ) -> Tuple[NdarrayOrTensor, NdarrayOrTensor]:\n \"\"\"\n The grid can be initialized with a `spatial_size` parameter, or provided directly as `grid`.\n Therefore, either `spatial_size` or `grid` must be provided.\n When initialising from `spatial_size`, the backend \"torch\" will be used.\n\n Args:\n spatial_size: output grid size.\n grid: grid to be transformed. Shape must be (3, H, W) for 2D or (4, H, W, D) for 3D.\n\n Raises:\n ValueError: When ``grid=None`` and ``spatial_size=None``. Incompatible values.\n\n \"\"\"\n if grid is None: # create grid from spatial_size\n if spatial_size is None:\n raise ValueError(\"Incompatible values: grid=None and spatial_size=None.\")\n grid = create_grid(spatial_size, device=self.device, backend=\"torch\", dtype=self.dtype)\n _b = TransformBackends.TORCH if isinstance(grid, torch.Tensor) else TransformBackends.NUMPY\n _device = grid.device if isinstance(grid, torch.Tensor) else self.device\n affine: NdarrayOrTensor\n if self.affine is None:\n spatial_dims = len(grid.shape) - 1\n affine = (\n torch.eye(spatial_dims + 1, device=_device)\n if _b == TransformBackends.TORCH\n else np.eye(spatial_dims + 1)\n )\n if self.rotate_params:\n affine = affine @ create_rotate(spatial_dims, self.rotate_params, device=_device, backend=_b)\n if self.shear_params:\n affine = affine @ create_shear(spatial_dims, self.shear_params, device=_device, backend=_b)\n if self.translate_params:\n affine = affine @ create_translate(spatial_dims, self.translate_params, device=_device, backend=_b)\n if self.scale_params:\n affine = affine @ create_scale(spatial_dims, self.scale_params, device=_device, backend=_b)\n else:\n affine = self.affine\n\n grid, *_ = convert_data_type(grid, torch.Tensor, device=_device, dtype=self.dtype or grid.dtype)\n affine, *_ = convert_to_dst_type(affine, grid)\n\n grid = (affine @ grid.reshape((grid.shape[0], -1))).reshape([-1] + list(grid.shape[1:]))\n return grid, affine\n\n\nclass RandAffineGrid(Randomizable, Transform):\n \"\"\"\n Generate randomised affine grid.\n\n \"\"\"\n\n backend = AffineGrid.backend\n\n @deprecated_arg(name=\"as_tensor_output\", since=\"0.6\")\n def __init__(\n self,\n rotate_range: RandRange = None,\n shear_range: RandRange = None,\n translate_range: RandRange = None,\n scale_range: RandRange = None,\n as_tensor_output: bool = True,\n device: Optional[torch.device] = None,\n ) -> None:\n \"\"\"\n Args:\n rotate_range: angle range in radians. If element `i` is a pair of (min, max) values, then\n `uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter\n for the `i`th spatial dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used.\n This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be\n in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]`\n for dim0 and nothing for the remaining dimensions.\n shear_range: shear range with format matching `rotate_range`, it defines the range to randomly select\n shearing factors(a tuple of 2 floats for 2D, a tuple of 6 floats for 3D) for affine matrix,\n take a 3D affine as example::\n\n [\n [1.0, params[0], params[1], 0.0],\n [params[2], 1.0, params[3], 0.0],\n [params[4], params[5], 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ]\n\n translate_range: translate range with format matching `rotate_range`, it defines the range to randomly\n select voxels to translate for every spatial dims.\n scale_range: scaling range with format matching `rotate_range`. it defines the range to randomly select\n the scale factor to translate for every spatial dims. A value of 1.0 is added to the result.\n This allows 0 to correspond to no change (i.e., a scaling of 1.0).\n device: device to store the output grid data.\n\n See also:\n - :py:meth:`monai.transforms.utils.create_rotate`\n - :py:meth:`monai.transforms.utils.create_shear`\n - :py:meth:`monai.transforms.utils.create_translate`\n - :py:meth:`monai.transforms.utils.create_scale`\n\n .. deprecated:: 0.6.0\n ``as_tensor_output`` is deprecated.\n\n \"\"\"\n self.rotate_range = ensure_tuple(rotate_range)\n self.shear_range = ensure_tuple(shear_range)\n self.translate_range = ensure_tuple(translate_range)\n self.scale_range = ensure_tuple(scale_range)\n\n self.rotate_params: Optional[List[float]] = None\n self.shear_params: Optional[List[float]] = None\n self.translate_params: Optional[List[float]] = None\n self.scale_params: Optional[List[float]] = None\n\n self.device = device\n self.affine: Optional[NdarrayOrTensor] = None\n\n def _get_rand_param(self, param_range, add_scalar: float = 0.0):\n out_param = []\n for f in param_range:\n if issequenceiterable(f):\n if len(f) != 2:\n raise ValueError(\"If giving range as [min,max], should only have two elements per dim.\")\n out_param.append(self.R.uniform(f[0], f[1]) + add_scalar)\n elif f is not None:\n out_param.append(self.R.uniform(-f, f) + add_scalar)\n return out_param\n\n def randomize(self, data: Optional[Any] = None) -> None:\n self.rotate_params = self._get_rand_param(self.rotate_range)\n self.shear_params = self._get_rand_param(self.shear_range)\n self.translate_params = self._get_rand_param(self.translate_range)\n self.scale_params = self._get_rand_param(self.scale_range, 1.0)\n\n def __call__(\n self, spatial_size: Optional[Sequence[int]] = None, grid: Optional[NdarrayOrTensor] = None\n ) -> NdarrayOrTensor:\n \"\"\"\n Args:\n spatial_size: output grid size.\n grid: grid to be transformed. Shape must be (3, H, W) for 2D or (4, H, W, D) for 3D.\n\n Returns:\n a 2D (3xHxW) or 3D (4xHxWxD) grid.\n \"\"\"\n self.randomize()\n affine_grid = AffineGrid(\n rotate_params=self.rotate_params,\n shear_params=self.shear_params,\n translate_params=self.translate_params,\n scale_params=self.scale_params,\n device=self.device,\n )\n _grid: NdarrayOrTensor\n _grid, self.affine = affine_grid(spatial_size, grid)\n return _grid\n\n def get_transformation_matrix(self) -> Optional[NdarrayOrTensor]:\n \"\"\"Get the most recently applied transformation matrix\"\"\"\n return self.affine\n\n\nclass RandDeformGrid(Randomizable, Transform):\n \"\"\"\n Generate random deformation grid.\n \"\"\"\n\n backend = [TransformBackends.TORCH]\n\n def __init__(\n self,\n spacing: Union[Sequence[float], float],\n magnitude_range: Tuple[float, float],\n as_tensor_output: bool = True,\n device: Optional[torch.device] = None,\n ) -> None:\n \"\"\"\n Args:\n spacing: spacing of the grid in 2D or 3D.\n e.g., spacing=(1, 1) indicates pixel-wise deformation in 2D,\n spacing=(1, 1, 1) indicates voxel-wise deformation in 3D,\n spacing=(2, 2) indicates deformation field defined on every other pixel in 2D.\n magnitude_range: the random offsets will be generated from\n `uniform[magnitude[0], magnitude[1])`.\n as_tensor_output: whether to output tensor instead of numpy array.\n defaults to True.\n device: device to store the output grid data.\n \"\"\"\n self.spacing = spacing\n self.magnitude = magnitude_range\n\n self.rand_mag = 1.0\n self.as_tensor_output = as_tensor_output\n self.random_offset: np.ndarray\n self.device = device\n\n def randomize(self, grid_size: Sequence[int]) -> None:\n self.random_offset = self.R.normal(size=([len(grid_size)] + list(grid_size))).astype(np.float32, copy=False)\n self.rand_mag = self.R.uniform(self.magnitude[0], self.magnitude[1])\n\n def __call__(self, spatial_size: Sequence[int]):\n \"\"\"\n Args:\n spatial_size: spatial size of the grid.\n \"\"\"\n self.spacing = fall_back_tuple(self.spacing, (1.0,) * len(spatial_size))\n control_grid = create_control_grid(spatial_size, self.spacing, device=self.device, backend=\"torch\")\n self.randomize(control_grid.shape[1:])\n _offset, *_ = convert_to_dst_type(self.rand_mag * self.random_offset, control_grid)\n control_grid[: len(spatial_size)] += _offset\n if not self.as_tensor_output:\n control_grid, *_ = convert_data_type(control_grid, output_type=np.ndarray, dtype=np.float32)\n return control_grid\n\n\nclass Resample(Transform):\n\n backend = [TransformBackends.TORCH]\n\n @deprecated_arg(name=\"as_tensor_output\", since=\"0.6\")\n def __init__(\n self,\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,\n as_tensor_output: bool = True,\n norm_coords: bool = True,\n device: Optional[torch.device] = None,\n dtype: DtypeLike = np.float64,\n ) -> None:\n \"\"\"\n computes output image using values from `img`, locations from `grid` using pytorch.\n supports spatially 2D or 3D (num_channels, H, W[, D]).\n\n Args:\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"border\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n When `USE_COMPILED` is `True`, this argument uses\n ``\"nearest\"``, ``\"bilinear\"``, ``\"bicubic\"`` to indicate 0, 1, 3 order interpolations.\n See also: https://docs.monai.io/en/stable/networks.html#grid-pull\n norm_coords: whether to normalize the coordinates from `[-(size-1)/2, (size-1)/2]` to\n `[0, size - 1]` (for ``monai/csrc`` implementation) or\n `[-1, 1]` (for torch ``grid_sample`` implementation) to be compatible with the underlying\n resampling API.\n device: device on which the tensor will be allocated.\n dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.\n If ``None``, use the data type of input data. To be compatible with other modules,\n the output data type is always `float32`.\n\n .. deprecated:: 0.6.0\n ``as_tensor_output`` is deprecated.\n\n \"\"\"\n self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)\n self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)\n self.norm_coords = norm_coords\n self.device = device\n self.dtype = dtype\n\n def __call__(\n self,\n img: NdarrayOrTensor,\n grid: Optional[NdarrayOrTensor] = None,\n mode: Optional[Union[GridSampleMode, str]] = None,\n padding_mode: Optional[Union[GridSamplePadMode, str]] = None,\n dtype: DtypeLike = None,\n ) -> NdarrayOrTensor:\n \"\"\"\n Args:\n img: shape must be (num_channels, H, W[, D]).\n grid: shape must be (3, H, W) for 2D or (4, H, W, D) for 3D.\n if ``norm_coords`` is True, the grid values must be in `[-(size-1)/2, (size-1)/2]`.\n if ``USE_COMPILED=True`` and ``norm_coords=False``, grid values must be in `[0, size-1]`.\n if ``USE_COMPILED=False`` and ``norm_coords=False``, grid values must be in `[-1, 1]`.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n When `USE_COMPILED` is `True`, this argument uses\n ``\"nearest\"``, ``\"bilinear\"``, ``\"bicubic\"`` to indicate 0, 1, 3 order interpolations.\n See also: https://docs.monai.io/en/stable/networks.html#grid-pull\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``self.padding_mode``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n dtype: data type for resampling computation. Defaults to ``self.dtype``.\n To be compatible with other modules, the output data type is always `float32`.\n\n See also:\n :py:const:`monai.config.USE_COMPILED`\n \"\"\"\n if grid is None:\n raise ValueError(\"Unknown grid.\")\n _device = img.device if isinstance(img, torch.Tensor) else self.device\n _dtype = dtype or self.dtype or img.dtype\n img_t, *_ = convert_data_type(img, torch.Tensor, device=_device, dtype=_dtype)\n grid_t = convert_to_dst_type(grid, img_t)[0]\n if grid_t is grid: # copy if needed (convert_data_type converts to contiguous)\n grid_t = grid_t.clone(memory_format=torch.contiguous_format)\n sr = min(len(img_t.shape[1:]), 3)\n\n if USE_COMPILED:\n if self.norm_coords:\n for i, dim in enumerate(img_t.shape[1 : 1 + sr]):\n grid_t[i] = (max(dim, 2) / 2.0 - 0.5 + grid_t[i]) / grid_t[-1:]\n grid_t = moveaxis(grid_t[:sr], 0, -1) # type: ignore\n _padding_mode = self.padding_mode if padding_mode is None else padding_mode\n _padding_mode = _padding_mode.value if isinstance(_padding_mode, GridSamplePadMode) else _padding_mode\n bound = 1 if _padding_mode == \"reflection\" else _padding_mode\n _interp_mode = self.mode if mode is None else mode\n _interp_mode = _interp_mode.value if isinstance(_interp_mode, GridSampleMode) else _interp_mode\n if _interp_mode == \"bicubic\":\n interp = 3\n elif _interp_mode == \"bilinear\":\n interp = 1\n else:\n interp = _interp_mode # type: ignore\n out = grid_pull(\n img_t.unsqueeze(0), grid_t.unsqueeze(0), bound=bound, extrapolate=True, interpolation=interp\n )[0]\n else:\n if self.norm_coords:\n for i, dim in enumerate(img_t.shape[1 : 1 + sr]):\n grid_t[i] = 2.0 / (max(2, dim) - 1.0) * grid_t[i] / grid_t[-1:]\n index_ordering: List[int] = list(range(sr - 1, -1, -1))\n grid_t = moveaxis(grid_t[index_ordering], 0, -1) # type: ignore\n out = torch.nn.functional.grid_sample(\n img_t.unsqueeze(0),\n grid_t.unsqueeze(0),\n mode=self.mode.value if mode is None else GridSampleMode(mode).value,\n padding_mode=self.padding_mode.value if padding_mode is None else GridSamplePadMode(padding_mode).value,\n align_corners=True,\n )[0]\n out_val, *_ = convert_to_dst_type(out, dst=img, dtype=np.float32)\n return out_val\n\n\nclass Affine(Transform):\n \"\"\"\n Transform ``img`` given the affine parameters.\n A tutorial is available: https://github.com/Project-MONAI/tutorials/blob/0.6.0/modules/transforms_demo_2d.ipynb.\n\n \"\"\"\n\n backend = list(set(AffineGrid.backend) & set(Resample.backend))\n\n @deprecated_arg(name=\"as_tensor_output\", since=\"0.6\")\n def __init__(\n self,\n rotate_params: Optional[Union[Sequence[float], float]] = None,\n shear_params: Optional[Union[Sequence[float], float]] = None,\n translate_params: Optional[Union[Sequence[float], float]] = None,\n scale_params: Optional[Union[Sequence[float], float]] = None,\n affine: Optional[NdarrayOrTensor] = None,\n spatial_size: Optional[Union[Sequence[int], int]] = None,\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,\n norm_coords: bool = True,\n as_tensor_output: bool = True,\n device: Optional[torch.device] = None,\n dtype: DtypeLike = np.float32,\n image_only: bool = False,\n ) -> None:\n \"\"\"\n The affine transformations are applied in rotate, shear, translate, scale order.\n\n Args:\n rotate_params: a rotation angle in radians, a scalar for 2D image, a tuple of 3 floats for 3D.\n Defaults to no rotation.\n shear_params: shearing factors for affine matrix, take a 3D affine as example::\n\n [\n [1.0, params[0], params[1], 0.0],\n [params[2], 1.0, params[3], 0.0],\n [params[4], params[5], 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ]\n\n a tuple of 2 floats for 2D, a tuple of 6 floats for 3D. Defaults to no shearing.\n translate_params: a tuple of 2 floats for 2D, a tuple of 3 floats for 3D. Translation is in\n pixel/voxel relative to the center of the input image. Defaults to no translation.\n scale_params: scale factor for every spatial dims. a tuple of 2 floats for 2D,\n a tuple of 3 floats for 3D. Defaults to `1.0`.\n affine: If applied, ignore the params (`rotate_params`, etc.) and use the\n supplied matrix. Should be square with each side = num of image spatial\n dimensions + 1.\n spatial_size: output image spatial size.\n if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,\n the transform will use the spatial size of `img`.\n if some components of the `spatial_size` are non-positive values, the transform will use the\n corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted\n to `(32, 64)` if the second spatial dimension size of img is `64`.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n When `USE_COMPILED` is `True`, this argument uses\n ``\"nearest\"``, ``\"bilinear\"``, ``\"bicubic\"`` to indicate 0, 1, 3 order interpolations.\n See also: https://docs.monai.io/en/stable/networks.html#grid-pull\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"reflection\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n norm_coords: whether to normalize the coordinates from `[-(size-1)/2, (size-1)/2]` to\n `[0, size - 1]` or `[-1, 1]` to be compatible with the underlying resampling API.\n If the coordinates are generated by ``monai.transforms.utils.create_grid``\n and the ``affine`` doesn't include the normalization, this argument should be set to ``True``.\n If the output `self.affine_grid` is already normalized, this argument should be set to ``False``.\n device: device on which the tensor will be allocated.\n dtype: data type for resampling computation. Defaults to ``np.float32``.\n If ``None``, use the data type of input data. To be compatible with other modules,\n the output data type is always `float32`.\n image_only: if True return only the image volume, otherwise return (image, affine).\n\n .. deprecated:: 0.6.0\n ``as_tensor_output`` is deprecated.\n\n \"\"\"\n self.affine_grid = AffineGrid(\n rotate_params=rotate_params,\n shear_params=shear_params,\n translate_params=translate_params,\n scale_params=scale_params,\n affine=affine,\n dtype=dtype,\n device=device,\n )\n self.image_only = image_only\n self.resampler = Resample(norm_coords=norm_coords, device=device, dtype=dtype)\n self.spatial_size = spatial_size\n self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)\n self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)\n\n def __call__(\n self,\n img: NdarrayOrTensor,\n spatial_size: Optional[Union[Sequence[int], int]] = None,\n mode: Optional[Union[GridSampleMode, str]] = None,\n padding_mode: Optional[Union[GridSamplePadMode, str]] = None,\n ) -> Union[NdarrayOrTensor, Tuple[NdarrayOrTensor, NdarrayOrTensor]]:\n \"\"\"\n Args:\n img: shape must be (num_channels, H, W[, D]),\n spatial_size: output image spatial size.\n if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,\n the transform will use the spatial size of `img`.\n if `img` has two spatial dimensions, `spatial_size` should have 2 elements [h, w].\n if `img` has three spatial dimensions, `spatial_size` should have 3 elements [h, w, d].\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n When `USE_COMPILED` is `True`, this argument uses\n ``\"nearest\"``, ``\"bilinear\"``, ``\"bicubic\"`` to indicate 0, 1, 3 order interpolations.\n See also: https://docs.monai.io/en/stable/networks.html#grid-pull\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``self.padding_mode``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n \"\"\"\n sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])\n grid, affine = self.affine_grid(spatial_size=sp_size)\n ret = self.resampler(img, grid=grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode)\n\n return ret if self.image_only else (ret, affine)\n\n\nclass RandAffine(RandomizableTransform):\n \"\"\"\n Random affine transform.\n A tutorial is available: https://github.com/Project-MONAI/tutorials/blob/0.6.0/modules/transforms_demo_2d.ipynb.\n\n \"\"\"\n\n backend = Affine.backend\n\n @deprecated_arg(name=\"as_tensor_output\", since=\"0.6\")\n def __init__(\n self,\n prob: float = 0.1,\n rotate_range: RandRange = None,\n shear_range: RandRange = None,\n translate_range: RandRange = None,\n scale_range: RandRange = None,\n spatial_size: Optional[Union[Sequence[int], int]] = None,\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,\n cache_grid: bool = False,\n as_tensor_output: bool = True,\n device: Optional[torch.device] = None,\n ) -> None:\n \"\"\"\n Args:\n prob: probability of returning a randomized affine grid.\n defaults to 0.1, with 10% chance returns a randomized grid.\n rotate_range: angle range in radians. If element `i` is a pair of (min, max) values, then\n `uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter\n for the `i`th spatial dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used.\n This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be\n in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]`\n for dim0 and nothing for the remaining dimensions.\n shear_range: shear range with format matching `rotate_range`, it defines the range to randomly select\n shearing factors(a tuple of 2 floats for 2D, a tuple of 6 floats for 3D) for affine matrix,\n take a 3D affine as example::\n\n [\n [1.0, params[0], params[1], 0.0],\n [params[2], 1.0, params[3], 0.0],\n [params[4], params[5], 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ]\n\n translate_range: translate range with format matching `rotate_range`, it defines the range to randomly\n select pixel/voxel to translate for every spatial dims.\n scale_range: scaling range with format matching `rotate_range`. it defines the range to randomly select\n the scale factor to translate for every spatial dims. A value of 1.0 is added to the result.\n This allows 0 to correspond to no change (i.e., a scaling of 1.0).\n spatial_size: output image spatial size.\n if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,\n the transform will use the spatial size of `img`.\n if some components of the `spatial_size` are non-positive values, the transform will use the\n corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted\n to `(32, 64)` if the second spatial dimension size of img is `64`.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"reflection\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n cache_grid: whether to cache the identity sampling grid.\n If the spatial size is not dynamically defined by input image, enabling this option could\n accelerate the transform.\n device: device on which the tensor will be allocated.\n\n See also:\n - :py:class:`RandAffineGrid` for the random affine parameters configurations.\n - :py:class:`Affine` for the affine transformation parameters configurations.\n\n .. deprecated:: 0.6.0\n ``as_tensor_output`` is deprecated.\n\n \"\"\"\n RandomizableTransform.__init__(self, prob)\n\n self.rand_affine_grid = RandAffineGrid(\n rotate_range=rotate_range,\n shear_range=shear_range,\n translate_range=translate_range,\n scale_range=scale_range,\n device=device,\n )\n self.resampler = Resample(device=device)\n\n self.spatial_size = spatial_size\n self.cache_grid = cache_grid\n self._cached_grid = self._init_identity_cache()\n self.mode: GridSampleMode = GridSampleMode(mode)\n self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode)\n\n def _init_identity_cache(self):\n \"\"\"\n Create cache of the identity grid if cache_grid=True and spatial_size is known.\n \"\"\"\n if self.spatial_size is None:\n if self.cache_grid:\n warnings.warn(\n \"cache_grid=True is not compatible with the dynamic spatial_size, please specify 'spatial_size'.\"\n )\n return None\n _sp_size = ensure_tuple(self.spatial_size)\n _ndim = len(_sp_size)\n if _sp_size != fall_back_tuple(_sp_size, [1] * _ndim) or _sp_size != fall_back_tuple(_sp_size, [2] * _ndim):\n # dynamic shape because it falls back to different outcomes\n if self.cache_grid:\n warnings.warn(\n \"cache_grid=True is not compatible with the dynamic spatial_size \"\n f\"'spatial_size={self.spatial_size}', please specify 'spatial_size'.\"\n )\n return None\n return create_grid(spatial_size=_sp_size, device=self.rand_affine_grid.device, backend=\"torch\")\n\n def get_identity_grid(self, spatial_size: Sequence[int]):\n \"\"\"\n Return a cached or new identity grid depends on the availability.\n\n Args:\n spatial_size: non-dynamic spatial size\n \"\"\"\n ndim = len(spatial_size)\n if spatial_size != fall_back_tuple(spatial_size, [1] * ndim) or spatial_size != fall_back_tuple(\n spatial_size, [2] * ndim\n ):\n raise RuntimeError(f\"spatial_size should not be dynamic, got {spatial_size}.\")\n return (\n create_grid(spatial_size=spatial_size, device=self.rand_affine_grid.device, backend=\"torch\")\n if self._cached_grid is None\n else self._cached_grid\n )\n\n def set_random_state(\n self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None\n ) -> \"RandAffine\":\n self.rand_affine_grid.set_random_state(seed, state)\n super().set_random_state(seed, state)\n return self\n\n def randomize(self, data: Optional[Any] = None) -> None:\n super().randomize(None)\n if not self._do_transform:\n return None\n self.rand_affine_grid.randomize()\n\n def __call__(\n self,\n img: NdarrayOrTensor,\n spatial_size: Optional[Union[Sequence[int], int]] = None,\n mode: Optional[Union[GridSampleMode, str]] = None,\n padding_mode: Optional[Union[GridSamplePadMode, str]] = None,\n randomize: bool = True,\n ) -> NdarrayOrTensor:\n \"\"\"\n Args:\n img: shape must be (num_channels, H, W[, D]),\n spatial_size: output image spatial size.\n if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,\n the transform will use the spatial size of `img`.\n if `img` has two spatial dimensions, `spatial_size` should have 2 elements [h, w].\n if `img` has three spatial dimensions, `spatial_size` should have 3 elements [h, w, d].\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``self.padding_mode``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n randomize: whether to execute `randomize()` function first, default to True.\n\n \"\"\"\n if randomize:\n self.randomize()\n\n # if not doing transform and spatial size doesn't change, nothing to do\n # except convert to float and device\n sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])\n do_resampling = self._do_transform or (sp_size != ensure_tuple(img.shape[1:]))\n if not do_resampling:\n img, *_ = convert_data_type(img, dtype=torch.float32, device=self.resampler.device)\n grid = self.get_identity_grid(sp_size)\n if self._do_transform:\n grid = self.rand_affine_grid(grid=grid)\n out: NdarrayOrTensor = self.resampler(\n img=img, grid=grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode\n )\n return out\n\n\nclass Rand2DElastic(RandomizableTransform):\n \"\"\"\n Random elastic deformation and affine in 2D.\n A tutorial is available: https://github.com/Project-MONAI/tutorials/blob/0.6.0/modules/transforms_demo_2d.ipynb.\n\n \"\"\"\n\n backend = Resample.backend\n\n @deprecated_arg(name=\"as_tensor_output\", since=\"0.6\")\n def __init__(\n self,\n spacing: Union[Tuple[float, float], float],\n magnitude_range: Tuple[float, float],\n prob: float = 0.1,\n rotate_range: RandRange = None,\n shear_range: RandRange = None,\n translate_range: RandRange = None,\n scale_range: RandRange = None,\n spatial_size: Optional[Union[Tuple[int, int], int]] = None,\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,\n as_tensor_output: bool = False,\n device: Optional[torch.device] = None,\n ) -> None:\n \"\"\"\n Args:\n spacing : distance in between the control points.\n magnitude_range: the random offsets will be generated from ``uniform[magnitude[0], magnitude[1])``.\n prob: probability of returning a randomized elastic transform.\n defaults to 0.1, with 10% chance returns a randomized elastic transform,\n otherwise returns a ``spatial_size`` centered area extracted from the input image.\n rotate_range: angle range in radians. If element `i` is a pair of (min, max) values, then\n `uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter\n for the `i`th spatial dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used.\n This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be\n in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]`\n for dim0 and nothing for the remaining dimensions.\n shear_range: shear range with format matching `rotate_range`, it defines the range to randomly select\n shearing factors(a tuple of 2 floats for 2D) for affine matrix, take a 2D affine as example::\n\n [\n [1.0, params[0], 0.0],\n [params[1], 1.0, 0.0],\n [0.0, 0.0, 1.0],\n ]\n\n translate_range: translate range with format matching `rotate_range`, it defines the range to randomly\n select pixel to translate for every spatial dims.\n scale_range: scaling range with format matching `rotate_range`. it defines the range to randomly select\n the scale factor to translate for every spatial dims. A value of 1.0 is added to the result.\n This allows 0 to correspond to no change (i.e., a scaling of 1.0).\n spatial_size: specifying output image spatial size [h, w].\n if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,\n the transform will use the spatial size of `img`.\n if some components of the `spatial_size` are non-positive values, the transform will use the\n corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted\n to `(32, 64)` if the second spatial dimension size of img is `64`.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"reflection\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n device: device on which the tensor will be allocated.\n\n See also:\n - :py:class:`RandAffineGrid` for the random affine parameters configurations.\n - :py:class:`Affine` for the affine transformation parameters configurations.\n\n .. deprecated:: 0.6.0\n ``as_tensor_output`` is deprecated.\n\n \"\"\"\n RandomizableTransform.__init__(self, prob)\n self.deform_grid = RandDeformGrid(\n spacing=spacing, magnitude_range=magnitude_range, as_tensor_output=True, device=device\n )\n self.rand_affine_grid = RandAffineGrid(\n rotate_range=rotate_range,\n shear_range=shear_range,\n translate_range=translate_range,\n scale_range=scale_range,\n device=device,\n )\n self.resampler = Resample(device=device)\n\n self.device = device\n self.spatial_size = spatial_size\n self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)\n self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)\n\n def set_random_state(\n self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None\n ) -> \"Rand2DElastic\":\n self.deform_grid.set_random_state(seed, state)\n self.rand_affine_grid.set_random_state(seed, state)\n super().set_random_state(seed, state)\n return self\n\n def randomize(self, spatial_size: Sequence[int]) -> None:\n super().randomize(None)\n if not self._do_transform:\n return None\n self.deform_grid.randomize(spatial_size)\n self.rand_affine_grid.randomize()\n\n def __call__(\n self,\n img: NdarrayOrTensor,\n spatial_size: Optional[Union[Tuple[int, int], int]] = None,\n mode: Optional[Union[GridSampleMode, str]] = None,\n padding_mode: Optional[Union[GridSamplePadMode, str]] = None,\n randomize: bool = True,\n ) -> NdarrayOrTensor:\n \"\"\"\n Args:\n img: shape must be (num_channels, H, W),\n spatial_size: specifying output image spatial size [h, w].\n if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,\n the transform will use the spatial size of `img`.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``self.padding_mode``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n randomize: whether to execute `randomize()` function first, default to True.\n \"\"\"\n sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])\n if randomize:\n self.randomize(spatial_size=sp_size)\n\n if self._do_transform:\n grid = self.deform_grid(spatial_size=sp_size)\n grid = self.rand_affine_grid(grid=grid)\n grid = torch.nn.functional.interpolate( # type: ignore\n recompute_scale_factor=True,\n input=grid.unsqueeze(0),\n scale_factor=list(ensure_tuple(self.deform_grid.spacing)),\n mode=InterpolateMode.BICUBIC.value,\n align_corners=False,\n )\n grid = CenterSpatialCrop(roi_size=sp_size)(grid[0])\n else:\n _device = img.device if isinstance(img, torch.Tensor) else self.device\n grid = create_grid(spatial_size=sp_size, device=_device, backend=\"torch\")\n out: NdarrayOrTensor = self.resampler(\n img, grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode\n )\n return out\n\n\nclass Rand3DElastic(RandomizableTransform):\n \"\"\"\n Random elastic deformation and affine in 3D.\n A tutorial is available: https://github.com/Project-MONAI/tutorials/blob/0.6.0/modules/transforms_demo_2d.ipynb.\n\n \"\"\"\n\n backend = Resample.backend\n\n @deprecated_arg(name=\"as_tensor_output\", since=\"0.6\")\n def __init__(\n self,\n sigma_range: Tuple[float, float],\n magnitude_range: Tuple[float, float],\n prob: float = 0.1,\n rotate_range: RandRange = None,\n shear_range: RandRange = None,\n translate_range: RandRange = None,\n scale_range: RandRange = None,\n spatial_size: Optional[Union[Tuple[int, int, int], int]] = None,\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,\n as_tensor_output: bool = False,\n device: Optional[torch.device] = None,\n ) -> None:\n \"\"\"\n Args:\n sigma_range: a Gaussian kernel with standard deviation sampled from\n ``uniform[sigma_range[0], sigma_range[1])`` will be used to smooth the random offset grid.\n magnitude_range: the random offsets on the grid will be generated from\n ``uniform[magnitude[0], magnitude[1])``.\n prob: probability of returning a randomized elastic transform.\n defaults to 0.1, with 10% chance returns a randomized elastic transform,\n otherwise returns a ``spatial_size`` centered area extracted from the input image.\n rotate_range: angle range in radians. If element `i` is a pair of (min, max) values, then\n `uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter\n for the `i`th spatial dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used.\n This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be\n in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]`\n for dim0 and nothing for the remaining dimensions.\n shear_range: shear range with format matching `rotate_range`, it defines the range to randomly select\n shearing factors(a tuple of 6 floats for 3D) for affine matrix, take a 3D affine as example::\n\n [\n [1.0, params[0], params[1], 0.0],\n [params[2], 1.0, params[3], 0.0],\n [params[4], params[5], 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ]\n\n translate_range: translate range with format matching `rotate_range`, it defines the range to randomly\n select voxel to translate for every spatial dims.\n scale_range: scaling range with format matching `rotate_range`. it defines the range to randomly select\n the scale factor to translate for every spatial dims. A value of 1.0 is added to the result.\n This allows 0 to correspond to no change (i.e., a scaling of 1.0).\n spatial_size: specifying output image spatial size [h, w, d].\n if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,\n the transform will use the spatial size of `img`.\n if some components of the `spatial_size` are non-positive values, the transform will use the\n corresponding components of img size. For example, `spatial_size=(32, 32, -1)` will be adapted\n to `(32, 32, 64)` if the third spatial dimension size of img is `64`.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"reflection\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n device: device on which the tensor will be allocated.\n\n See also:\n - :py:class:`RandAffineGrid` for the random affine parameters configurations.\n - :py:class:`Affine` for the affine transformation parameters configurations.\n\n .. deprecated:: 0.6.0\n ``as_tensor_output`` is deprecated.\n\n \"\"\"\n RandomizableTransform.__init__(self, prob)\n self.rand_affine_grid = RandAffineGrid(\n rotate_range=rotate_range,\n shear_range=shear_range,\n translate_range=translate_range,\n scale_range=scale_range,\n device=device,\n )\n self.resampler = Resample(device=device)\n\n self.sigma_range = sigma_range\n self.magnitude_range = magnitude_range\n self.spatial_size = spatial_size\n self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)\n self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)\n self.device = device\n\n self.rand_offset: np.ndarray\n self.magnitude = 1.0\n self.sigma = 1.0\n\n def set_random_state(\n self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None\n ) -> \"Rand3DElastic\":\n self.rand_affine_grid.set_random_state(seed, state)\n super().set_random_state(seed, state)\n return self\n\n def randomize(self, grid_size: Sequence[int]) -> None:\n super().randomize(None)\n if not self._do_transform:\n return None\n self.rand_offset = self.R.uniform(-1.0, 1.0, [3] + list(grid_size)).astype(np.float32, copy=False)\n self.magnitude = self.R.uniform(self.magnitude_range[0], self.magnitude_range[1])\n self.sigma = self.R.uniform(self.sigma_range[0], self.sigma_range[1])\n self.rand_affine_grid.randomize()\n\n def __call__(\n self,\n img: NdarrayOrTensor,\n spatial_size: Optional[Union[Tuple[int, int, int], int]] = None,\n mode: Optional[Union[GridSampleMode, str]] = None,\n padding_mode: Optional[Union[GridSamplePadMode, str]] = None,\n randomize: bool = True,\n ) -> NdarrayOrTensor:\n \"\"\"\n Args:\n img: shape must be (num_channels, H, W, D),\n spatial_size: specifying spatial 3D output image spatial size [h, w, d].\n if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,\n the transform will use the spatial size of `img`.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``self.padding_mode``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n randomize: whether to execute `randomize()` function first, default to True.\n \"\"\"\n sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])\n if randomize:\n self.randomize(grid_size=sp_size)\n\n _device = img.device if isinstance(img, torch.Tensor) else self.device\n grid = create_grid(spatial_size=sp_size, device=_device, backend=\"torch\")\n if self._do_transform:\n if self.rand_offset is None:\n raise RuntimeError(\"rand_offset is not initialized.\")\n gaussian = GaussianFilter(3, self.sigma, 3.0).to(device=_device)\n offset = torch.as_tensor(self.rand_offset, device=_device).unsqueeze(0)\n grid[:3] += gaussian(offset)[0] * self.magnitude\n grid = self.rand_affine_grid(grid=grid)\n out: NdarrayOrTensor = self.resampler(\n img, grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode\n )\n return out\n\n\nclass GridDistortion(Transform):\n\n backend = [TransformBackends.TORCH]\n\n def __init__(\n self,\n num_cells: Union[Tuple[int], int],\n distort_steps: Sequence[Sequence[float]],\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,\n device: Optional[torch.device] = None,\n ) -> None:\n \"\"\"\n Grid distortion transform. Refer to:\n https://github.com/albumentations-team/albumentations/blob/master/albumentations/augmentations/transforms.py\n\n Args:\n num_cells: number of grid cells on each dimension.\n distort_steps: This argument is a list of tuples, where each tuple contains the distort steps of the\n corresponding dimensions (in the order of H, W[, D]). The length of each tuple equals to `num_cells + 1`.\n Each value in the tuple represents the distort step of the related cell.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"border\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n device: device on which the tensor will be allocated.\n\n \"\"\"\n self.resampler = Resample(mode=mode, padding_mode=padding_mode, device=device)\n self.num_cells = num_cells\n self.distort_steps = distort_steps\n self.device = device\n\n def __call__(\n self,\n img: NdarrayOrTensor,\n distort_steps: Optional[Sequence[Sequence]] = None,\n mode: Optional[Union[GridSampleMode, str]] = None,\n padding_mode: Optional[Union[GridSamplePadMode, str]] = None,\n ) -> NdarrayOrTensor:\n \"\"\"\n Args:\n img: shape must be (num_channels, H, W[, D]).\n distort_steps: This argument is a list of tuples, where each tuple contains the distort steps of the\n corresponding dimensions (in the order of H, W[, D]). The length of each tuple equals to `num_cells + 1`.\n Each value in the tuple represents the distort step of the related cell.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"border\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n\n \"\"\"\n distort_steps = self.distort_steps if distort_steps is None else distort_steps\n if len(img.shape) != len(distort_steps) + 1:\n raise ValueError(\"the spatial size of `img` does not match with the length of `distort_steps`\")\n\n all_ranges = []\n num_cells = ensure_tuple_rep(self.num_cells, len(img.shape) - 1)\n for dim_idx, dim_size in enumerate(img.shape[1:]):\n dim_distort_steps = distort_steps[dim_idx]\n ranges = torch.zeros(dim_size, dtype=torch.float32)\n cell_size = dim_size // num_cells[dim_idx]\n prev = 0\n for idx in range(num_cells[dim_idx] + 1):\n start = int(idx * cell_size)\n end = start + cell_size\n if end > dim_size:\n end = dim_size\n cur = dim_size\n else:\n cur = prev + cell_size * dim_distort_steps[idx]\n ranges[start:end] = torch.linspace(prev, cur, end - start)\n prev = cur\n ranges = ranges - (dim_size - 1.0) / 2.0\n all_ranges.append(ranges)\n\n coords = meshgrid_ij(*all_ranges)\n grid = torch.stack([*coords, torch.ones_like(coords[0])])\n\n return self.resampler(img, grid=grid, mode=mode, padding_mode=padding_mode) # type: ignore\n\n\nclass RandGridDistortion(RandomizableTransform):\n\n backend = [TransformBackends.TORCH]\n\n def __init__(\n self,\n num_cells: Union[Tuple[int], int] = 5,\n prob: float = 0.1,\n distort_limit: Union[Tuple[float, float], float] = (-0.03, 0.03),\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,\n device: Optional[torch.device] = None,\n ) -> None:\n \"\"\"\n Random grid distortion transform. Refer to:\n https://github.com/albumentations-team/albumentations/blob/master/albumentations/augmentations/transforms.py\n\n Args:\n num_cells: number of grid cells on each dimension.\n prob: probability of returning a randomized grid distortion transform. Defaults to 0.1.\n distort_limit: range to randomly distort.\n If single number, distort_limit is picked from (-distort_limit, distort_limit).\n Defaults to (-0.03, 0.03).\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"border\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n device: device on which the tensor will be allocated.\n\n \"\"\"\n RandomizableTransform.__init__(self, prob)\n self.num_cells = num_cells\n if isinstance(distort_limit, (int, float)):\n self.distort_limit = (min(-distort_limit, distort_limit), max(-distort_limit, distort_limit))\n else:\n self.distort_limit = (min(distort_limit), max(distort_limit))\n self.distort_steps: Sequence[Sequence[float]] = ((1.0,),)\n self.grid_distortion = GridDistortion(\n num_cells=num_cells, distort_steps=self.distort_steps, mode=mode, padding_mode=padding_mode, device=device\n )\n\n def randomize(self, spatial_shape: Sequence[int]) -> None:\n super().randomize(None)\n if not self._do_transform:\n return\n self.distort_steps = tuple(\n tuple(1.0 + self.R.uniform(low=self.distort_limit[0], high=self.distort_limit[1], size=n_cells + 1))\n for n_cells in ensure_tuple_rep(self.num_cells, len(spatial_shape))\n )\n\n def __call__(\n self,\n img: NdarrayOrTensor,\n mode: Optional[Union[GridSampleMode, str]] = None,\n padding_mode: Optional[Union[GridSamplePadMode, str]] = None,\n randomize: bool = True,\n ) -> NdarrayOrTensor:\n \"\"\"\n Args:\n img: shape must be (num_channels, H, W[, D]).\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"border\"``.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n randomize: whether to shuffle the random factors using `randomize()`, default to True.\n \"\"\"\n if randomize:\n self.randomize(img.shape[1:])\n if not self._do_transform:\n return img\n return self.grid_distortion(img, distort_steps=self.distort_steps, mode=mode, padding_mode=padding_mode)\n"
] | [
[
"torch.ones_like",
"numpy.eye",
"numpy.allclose",
"numpy.linalg.solve",
"numpy.meshgrid",
"numpy.append",
"torch.flip",
"torch.as_tensor",
"torch.linspace",
"numpy.argsort",
"numpy.asarray",
"numpy.flip",
"torch.zeros",
"torch.eye",
"numpy.array",
"torch.linalg.solve",
"torch.solve"
]
] |
seitalab/compass | [
"b08b0b711875e8e049ff07793ffe1446a6c3f144"
] | [
"compass/embedding.py"
] | [
"from sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.manifold import TSNE\nfrom sklearn.cluster import AgglomerativeClustering, KMeans\nfrom sklearn.preprocessing import MinMaxScaler\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport seaborn as sns\nimport pandas\nimport matplotlib.cm as cm\nimport umap\nimport tqdm\nimport scanpy as sc\nimport matplotlib.gridspec as gridspec\nimport networkx as nx\n\nimport numpy\nimport operator\nimport random\nimport pickle\nimport collections\nimport sys\nimport os\n\nclass GeneEmbedding(object):\n\n def __init__(self, embedding_file, context):\n self.vector = []\n self.context = context\n self.embedding_file = embedding_file\n self.embeddings = self.read_embedding(self.embedding_file)\n self.vector = []\n self.genes = []\n for gene in tqdm.tqdm(self.context.expressed_genes):\n if gene in self.embeddings:\n self.vector.append(self.embeddings[gene])\n self.genes.append(gene)\n\n def read_embedding(self, filename):\n embedding = dict()\n lines = open(filename,\"r\").read().splitlines()[1:]\n for line in lines:\n vector = line.split()\n gene = vector.pop(0)\n embedding[gene] = [float(x) for x in vector]\n return embedding\n\n def compute_similarities(self, gene, subset=None):\n print(\"hit\")\n if gene not in self.embeddings:\n return None\n embedding = self.embeddings[gene]\n distances = dict()\n if subset:\n targets = set(list(self.embeddings.keys())).intersection(set(subset))\n else:\n targets = list(self.embeddings.keys())\n for target in targets:\n if target not in self.embeddings:\n continue\n v = self.embeddings[target]\n distance = float(cosine_similarity(numpy.array(embedding).reshape(1, -1),numpy.array(v).reshape(1, -1))[0])\n distances[target] = distance\n sorted_distances = list(reversed(sorted(distances.items(), key=operator.itemgetter(1))))\n genes = [x[0] for x in sorted_distances]\n distance = [x[1] for x in sorted_distances]\n df = pandas.DataFrame.from_dict({\"Gene\":genes, \"Similarity\":distance})\n return df\n\n def cluster(self, n=12):\n kmeans = KMeans(n_clusters=n)\n kmeans.fit(self.vector)\n clusters = kmeans.labels_\n clusters = zip(self.context.expressed_genes, clusters)\n _clusters = []\n for gene, cluster in clusters:\n _clusters.append(\"G\"+str(cluster))\n return _clusters\n\n def clusters(self, clusters):\n average_vector = dict()\n gene_to_cluster = collections.defaultdict(list)\n matrix = collections.defaultdict(list)\n total_average_vector = []\n for gene, cluster in zip(self.context.expressed_genes, clusters):\n if gene in self.embeddings:\n matrix[cluster].append(self.embeddings[gene])\n gene_to_cluster[cluster].append(gene)\n total_average_vector.append(self.embeddings[gene])\n self.total_average_vector = list(numpy.average(total_average_vector, axis=0))\n for cluster, vectors in matrix.items():\n xvec = list(numpy.average(vectors, axis=0))\n average_vector[cluster] = numpy.subtract(xvec,self.total_average_vector)\n return average_vector, gene_to_cluster\n\n def generate_vector(self, genes):\n vector = []\n for gene, vec in zip(self.genes, self.vector):\n if gene in genes:\n vector.append(vec)\n return list(numpy.median(vector, axis=0))\n\n def cluster_definitions(self, clusters):\n average_vector, gene_to_cluster = self.clusters(clusters)\n similarities = collections.defaultdict(dict)\n for cluster, vector in average_vector.items():\n distances = dict()\n for target in gene_to_cluster[cluster]:\n v = self.embeddings[target]\n distance = float(cosine_similarity(numpy.array(vector).reshape(1, -1),numpy.array(v).reshape(1, -1))[0])\n distances[target] = distance\n sorted_distances = list(reversed(sorted(distances.items(), key=operator.itemgetter(1))))\n similarities[cluster] = [x[0] for x in sorted_distances if x[0]]\n return similarities\n\n def cluster_definitions_as_df(self, similarities, top_n=20):\n clusters = []\n symbols = []\n for key, genes in similarities.items():\n clusters.append(key)\n symbols.append(\", \".join(genes[:top_n]))\n df = pandas.DataFrame.from_dict({\"Cluster Name\":clusters, \"Top Genes\":symbols})\n return df\n\n def plot(self, clusters, png=None, method=\"TSNE\", labels=[], pcs=None, remove=[]):\n plt.figure(figsize = (8, 8))\n ax = plt.subplot(1,1,1)\n pcs = self.plot_reduction(clusters, ax, labels=labels, method=method, pcs=pcs, remove=remove)\n if png:\n plt.savefig(png)\n plt.close()\n else:\n plt.show()\n return pcs\n\n def plot_reduction(self, clusters, ax, method=\"TSNE\", labels=[], pcs=None, remove=[]):\n if type(pcs) != numpy.ndarray:\n if method == \"TSNE\":\n print(\"Running t-SNE\")\n pca = TSNE(n_components=2, n_jobs=-1, metric=\"cosine\")\n pcs = pca.fit_transform(self.vector)\n pcs = numpy.transpose(pcs)\n print(\"Finished.\")\n else:\n print(\"Running UMAP\")\n trans = umap.UMAP(random_state=42,metric='cosine').fit(self.vector)\n x = trans.embedding_[:, 0]\n y = trans.embedding_[:, 1]\n pcs = [x,y]\n print(\"Finished.\")\n if len(remove) != 0:\n _pcsx = []\n _pcsy = []\n _clusters = []\n \n for x, y, c in zip(pcs[0],pcs[1],clusters):\n if c not in remove:\n _pcsx.append(x)\n _pcsy.append(y)\n _clusters.append(c)\n pcs = []\n pcs.append(_pcsx)\n pcs.append(_pcsy)\n clusters = _clusters\n data = {\"x\":pcs[0],\"y\":pcs[1], \"Cluster\":clusters}\n df = pandas.DataFrame.from_dict(data)\n sns.scatterplot(data=df,x=\"x\", y=\"y\",hue=\"Cluster\", ax=ax)\n plt.xlabel(\"{}-1\".format(method))\n plt.ylabel(\"{}-2\".format(method))\n ax.set_xticks([])\n ax.set_yticks([])\n if len(labels):\n for x, y, gene in zip(pcs[0], pcs[1], self.context.expressed_genes):\n if gene in labels:\n ax.text(x+.02, y, str(gene), fontsize=8)\n return pcs\n\n def subtract_vector(self, vector):\n for gene, vec in self.embeddings.items():\n vec = numpy.subtract(vec-vector)\n self.embeddings[gene] = vec\n\n @staticmethod\n def relabel_cluster(similarities, clusters, old_label, new_label):\n genes = similarities[old_label]\n del similarities[old_label]\n similarities[new_label] = genes\n _clusters = []\n for cluster in clusters:\n if cluster == old_label:\n _clusters.append(new_label)\n else:\n _clusters.append(cluster)\n return similarities, _clusters\n\n def plot_similarity_matrix(self, markers, marker_labels=None, png=None):\n cmap = matplotlib.cm.tab20\n if marker_labels:\n marker_colors = {}\n ctypes = []\n for value in marker_labels.values():\n ctypes.append(value)\n ctypes = list(set(ctypes))\n for key, value in marker_labels.items():\n marker_colors[key] = cmap(ctypes.index(value))\n colors = pandas.DataFrame(markers)[0].map(marker_colors)\n similarity_matrix = []\n print(\"Running\")\n markers = set(list(self.embeddings.keys())).intersection(set(markers))\n markers = list(markers)\n for marker in markers:\n print(marker)\n row = []\n res = self.compute_similarities(marker, subset=markers)\n resdict = dict(zip(res[\"Gene\"],res[\"Similarity\"]))\n for gene in markers:\n row.append(resdict[gene])\n similarity_matrix.append(row)\n plt.figure(figsize = (12, 10))\n matrix = numpy.array(similarity_matrix)\n df = pandas.DataFrame(matrix,index=markers,columns=markers)\n sns.clustermap(df,figsize=(12,8), dendrogram_ratio=0.1)\n plt.tight_layout()\n if png:\n plt.savefig(\"marker_similarity.png\")\n else:\n plt.show()\n \n def plot_similarity_network(self, markers, marker_labels=None, png=None):\n cmap = matplotlib.cm.tab20\n G = nx.petersen_graph()\n node_color = []\n node_order = []\n node_size = []\n edge_order = []\n edge_color = []\n edge_labels = dict()\n for marker in markers:\n node_order.append(marker)\n if marker_labels:\n ctypes = []\n for value in marker_labels.values():\n ctypes.append(value)\n ctypes = list(set(ctypes))\n node_color.append(ctypes.index(marker_labels[marker]))\n node_size.append(400)\n G.add_node(marker)\n for marker in markers:\n res = self.compute_similarities(marker)\n resdict = dict(zip(res[\"Gene\"],res[\"Similarity\"]))\n i = 0\n for gene, similarity in resdict.items():\n if i > 9: break\n if gene != marker:\n if gene not in G.nodes():\n node_size.append(0)\n G.add_node(gene)\n node_order.append(gene)\n node_color.append(len(set(marker_labels.values())))\n print(marker, gene)\n G.add_edge(marker, gene, weight=similarity)\n edge_color.append(similarity)\n edge_order.append((marker,gene))\n edge_labels[(marker,gene)] = str(round(similarity,2))\n i += 1\n # print(node_color)\n # c = max(nx.connected_components(G), key=len)\n # G = G.subgraph(c).copy()\n for i in range(10):\n G.remove_node(i)\n print(G.nodes())\n print(G.edges())\n fig = plt.figure(figsize=(8,8))\n ax = plt.subplot(1,1,1)\n #pos = nx.nx_agraph.graphviz_layout(G, prog=\"neato\",args=\"-Goverlap=scale\")\n pos = nx.nx_agraph.graphviz_layout(G, prog=\"neato\",args=\"-Goverlap=scale -Elen=5 -Eweight=0.2\")\n #pos = nx.spring_layout(G)\n nx.draw(G,pos,ax=ax, cmap=cmap,nodelist=node_order, node_size=node_size,edgelist=edge_order, node_color=node_color, edge_color=edge_color, edge_vmin=0, edge_vmax=1.0, edge_cmap=plt.cm.Greys, with_labels=True, width=1,font_size=7)\n nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels, font_size=6)\n plt.axis('off')\n plt.tight_layout()\n if png:\n plt.savefig(png)\n else:\n plt.show()\n\nclass CellEmbedding(object):\n\n def __init__(self, context, embed):\n\n cell_to_gene = list(context.cell_to_gene.items())\n self.context = context\n self.embed = embed\n self.expression = context.expression\n self.data = collections.defaultdict(list)\n self.weights = collections.defaultdict(list)\n\n for cell, genes in tqdm.tqdm(cell_to_gene):\n if len(genes) < 2: continue\n if cell in self.expression:\n cell_weights = self.expression[cell]\n for gene in set(genes).intersection(set(embed.embeddings.keys())):\n if gene in cell_weights:\n weight = self.expression[cell][gene]\n if weight > 0:\n self.data[cell].append(embed.embeddings[gene])\n self.weights[cell].append(weight)\n self.matrix = []\n dataset_vector = []\n for cell, vectors in self.data.items():\n weights = self.weights[cell]\n xvec = list(numpy.average(vectors, axis=0, weights=weights))\n self.matrix.append(xvec)\n dataset_vector += vectors\n\n self.dataset_vector = numpy.average(dataset_vector, axis=0)\n _matrix = []\n for vec in self.matrix:\n _matrix.append(numpy.subtract(vec, self.dataset_vector))\n self.matrix = _matrix\n\n def batch_correct(self, column=None, clusters=None):\n if not column or not clusters:\n raise ValueError(\"Must supply batch column and clusters!\")\n column_labels = dict(zip(self.context.cells,self.context.metadata[column]))\n labels = []\n for key in self.data.keys():\n labels.append(column_labels[key])\n local_correction = collections.defaultdict(lambda : collections.defaultdict(list))\n correction_vectors = collections.defaultdict(dict)\n for cluster, batch, vec in zip(clusters, labels, self.matrix):\n local_correction[cluster][batch].append(vec)\n for cluster, batches in local_correction.items():\n cluster_vec = []\n batch_keys = list(batches.keys())\n base_batch = batch_keys.pop(0)\n max_distance = 1.0\n cluster_vec = numpy.average(batches[base_batch], axis=0)\n for batch in batch_keys:\n bvec = list(numpy.average(batches[batch], axis=0))\n distance = float(cosine_similarity(numpy.array(bvec).reshape(1, -1),numpy.array(cluster_vec).reshape(1, -1))[0])\n if max_distance > distance:\n max_distance = distance\n offset = numpy.subtract(cluster_vec,bvec)\n bvec = numpy.add(bvec,offset)\n distance = float(cosine_similarity(numpy.array(bvec).reshape(1, -1),numpy.array(cluster_vec).reshape(1, -1))[0])\n correction_vectors[cluster][batch] = offset\n\n self.matrix = []\n self.sample_vector = collections.defaultdict(list)\n i = 0\n self.cell_order = []\n for cell, vectors in self.data.items():\n cluster = clusters[i]\n xvec = list(numpy.average(vectors, axis=0))\n batch = column_labels[cell]\n if cluster in correction_vectors and batch in correction_vectors[cluster]:\n offset = correction_vectors[cluster][batch]\n xvec = numpy.add(xvec,offset)\n self.matrix.append(xvec)\n self.cell_order.append(cell)\n i += 1\n\n def cluster(self, k=12):\n kmeans = KMeans(n_clusters=k)\n kmeans.fit(self.matrix)\n clusters = kmeans.labels_\n _clusters = []\n for cluster in clusters:\n _clusters.append(\"C\"+str(cluster))\n self.clusters = _clusters\n return _clusters\n\n def subtract_vector(self, vector):\n corrected_matrix = []\n for cell_vector in self.matrix:\n corrected_matrix.append(numpy.subtract(cell_vector, vector))\n self.matrix = corrected_matrix\n\n def compute_gene_similarities(self):\n gene_similarities = dict()\n vectors = collections.defaultdict(list)\n for vec, label in zip(self.matrix, self.clusters):\n vectors[label].append(vec)\n for label, vecs in vectors.items():\n distances = dict()\n cell_vector = list(numpy.mean(vecs, axis=0))\n for gene, vector in self.embed.embeddings.items():\n distance = float(cosine_similarity(numpy.array(cell_vector).reshape(1, -1),numpy.array(vector).reshape(1, -1))[0])\n distances[gene] = distance\n sorted_distances = list(reversed(sorted(distances.items(), key=operator.itemgetter(1))))\n gene_similarities[label] = [x[0] for x in sorted_distances]\n print(label, sorted_distances[:10])\n return gene_similarities\n\n def group_cell_vectors(self, barcode_to_label):\n label_vector = dict()\n labels = []\n for cell, vectors in self.data.items():\n vector = list(numpy.median(vectors, axis=0))\n labels.append(barcode_to_label[cell])\n label_vector[barcode_to_label[cell]] = vector\n for cell, vectors in self.data.items():\n _vectors = []\n for vector in vectors:\n _vectors.append(numpy.subtract(vector, label_vector[barcode_to_label[cell]]))\n vectors = _vectors\n vector = list(numpy.median(vectors, axis=0))\n label_vector[barcode_to_label[cell]] = vector\n return label_vector, labels\n\n def compute_cell_similarities(self, barcode_to_label):\n vectors = dict()\n cell_similarities = dict()\n vectors, labels = self.group_cell_vectors(barcode_to_label)\n for label, vector in vectors.items():\n distances = dict()\n for label2, vector2 in vectors.items():\n xdist = []\n distance = float(cosine_similarity(numpy.array(vector).reshape(1, -1),numpy.array(vector2).reshape(1, -1))[0])\n xdist.append(distance)\n distances[label2] = distance\n cell_similarities[label] = distances\n return cell_similarities\n\n def plot_reduction(self, ax, pcs=None, method=\"TSNE\", clusters=None, labels=None):\n if type(pcs) != numpy.ndarray:\n if method == \"TSNE\":\n print(\"Running t-SNE\")\n pca = TSNE(n_components=2, n_jobs=-1, metric=\"cosine\")\n pcs = pca.fit_transform(self.matrix)\n pcs = numpy.transpose(pcs)\n print(\"Finished.\")\n else:\n print(\"Running UMAP\")\n trans = umap.UMAP(random_state=42,metric='cosine').fit(self.matrix)\n x = trans.embedding_[:, 0]\n y = trans.embedding_[:, 1]\n pcs = [x,y]\n print(\"Finished.\")\n data = {\"x\":pcs[0],\"y\":pcs[1],\"Cluster\": clusters}\n df = pandas.DataFrame.from_dict(data)\n sns.scatterplot(data=df,x=\"x\", y=\"y\", hue='Cluster', ax=ax,linewidth=0.1,s=13,alpha=1.0)\n return pcs\n\n def plot(self, png=None, pcs=None, method=\"TSNE\", column=None):\n if column:\n column_labels = dict(zip(self.context.cells,self.context.metadata[column]))\n labels = []\n for key in self.data.keys():\n labels.append(column_labels[key])\n else:\n labels = self.clusters\n plt.figure(figsize = (8, 8))\n ax1 = plt.subplot(1,1,1)\n pcs = self.plot_reduction(ax1, pcs=pcs, clusters=labels, method=method)\n plt.xlabel(\"{}-1\".format(method))\n plt.ylabel(\"{}-2\".format(method))\n ax1.set_xticks([])\n ax1.set_yticks([])\n if png:\n plt.savefig(png)\n plt.close()\n else:\n plt.show()\n return pcs\n\n def plot_distance(self, vector, pcs=None):\n plt.figure(figsize = (8,8))\n ax = plt.subplot(1,1, 1)\n if type(pcs) != numpy.ndarray:\n pca = TSNE(n_components=2)\n pcs = pca.fit_transform(self.matrix)\n pcs = numpy.transpose(pcs)\n distances = []\n dataset_distance = float(cosine_similarity(numpy.array(vector).reshape(1, -1),numpy.array(self.dataset_vector).reshape(1, -1))[0])\n for cell_vector in self.matrix:\n distance = float(cosine_similarity(numpy.array(cell_vector).reshape(1, -1),numpy.array(vector).reshape(1, -1))[0])\n distances.append(distance-dataset_distance)\n data = {\"x\":pcs[0],\"y\":pcs[1],\"Distance\": distances}\n df = pandas.DataFrame.from_dict(data)\n sns.scatterplot(data=df,x=\"x\", y=\"y\", hue='Distance', ax=ax,linewidth=0.00,s=7,alpha=0.7)\n return pcs\n\n def plot_gene_tsne(self, title, ax, genes, pcs=None):\n expression = [0 for _ in range(len(list(self.data.keys())))]\n for gene in genes:\n for i, cell in enumerate(self.data.keys()):\n if gene in self.expression[cell]:\n expression[i] += self.expression[cell][gene]\n if type(pcs) != numpy.ndarray:\n pca = TSNE(n_components=2)\n pcs = pca.fit_transform(self.matrix)\n pcs = numpy.transpose(pcs)\n data = {\"x\":pcs[0],\"y\":pcs[1],\"Gene Expression\": expression}\n df = pandas.DataFrame.from_dict(data)\n sns.scatterplot(data=df,x=\"x\", y=\"y\", hue='Gene Expression', ax=ax,linewidth=0.00,s=7,alpha=0.7)\n ax.set_title(title,fontsize=16)\n return pcs\n\n def plot_gene_expression(self, genes, pcs=None, png=None):\n plt.figure(figsize = (8,8))\n ax = plt.subplot(1,1, 1)\n pcs = self.plot_gene_tsne(\",\".join(genes[:10]), ax, genes, pcs=pcs)\n ax.set_xticks([])\n ax.set_yticks([])\n if not png:\n plt.show()\n else:\n plt.savefig(png)\n plt.close()\n return pcs\n \n def plot_similarity_matrix(self, vectors, column):\n similarity_matrix = []\n plt.figure(figsize = (12, 10))\n barcode_to_label = dict(zip(cembed.context.metadata.index, cembed.context.metadata[column]))\n ctypes = cembed.group_cell_vectors()\n matrix = []\n clusters = list(vectors.keys())\n celltypes = list(cytpes.keys())\n for cluster, genes in vectors.items():\n vector = embed.generate_vector(genes)\n row = []\n for cell in ctypes.keys():\n distance = float(cosine_similarity(numpy.array(ctypes[cell]).reshape(1, -1),numpy.array(vector).reshape(1, -1))[0])\n row.append()\n matrix.append(row)\n matrix = numpy.array(matrix)\n df = pandas.DataFrame(matrix,index=celltypes,columns=celltypes)\n sns.clustermap(df,figsize=(17,8))\n plt.tight_layout()\n plt.savefig(os.path.join(output_path,\"celltype_similarities_{}.png\".format(sample)))"
] | [
[
"numpy.transpose",
"numpy.mean",
"numpy.subtract",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"matplotlib.pyplot.axis",
"numpy.median",
"matplotlib.pyplot.savefig",
"sklearn.cluster.KMeans",
"sklearn.manifold.TSNE",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.add",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.average",
"pandas.DataFrame.from_dict"
]
] |
azawalich/flair | [
"f0101ab25381aefa586ecb688d4f412d5fab5de3"
] | [
"flair/trainers/language_model_trainer.py"
] | [
"\nimport time\nimport datetime\nimport random\nimport sys\nimport logging\nfrom pathlib import Path\nfrom typing import Union\nfrom torch import cuda\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.optim.sgd import SGD\ntry:\n from apex import amp\nexcept ImportError:\n amp = None\nimport flair\nfrom flair.data import Dictionary\nfrom flair.models import LanguageModel\nfrom flair.optim import *\nfrom flair.training_utils import add_file_handler\nlog = logging.getLogger('flair')\n\n\nclass TextDataset(Dataset):\n\n def __init__(self, path, dictionary, expand_vocab=False, forward=True, split_on_char=True, random_case_flip=True, shuffle_lines=True):\n assert path.exists()\n self.files = None\n self.path = path\n self.dictionary = dictionary\n self.split_on_char = split_on_char\n self.forward = forward\n self.random_case_flip = random_case_flip\n self.expand_vocab = expand_vocab\n self.shuffle_lines = shuffle_lines\n if path.is_dir():\n self.files = sorted([f for f in path.iterdir() if f.exists()])\n else:\n self.files = [path]\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, index=0):\n return self.charsplit(self.files[index], self.expand_vocab, self.forward, self.split_on_char, self.random_case_flip)\n\n def charsplit(self, path, expand_vocab=False, forward=True, split_on_char=True, random_case_flip=True):\n 'Tokenizes a text file on character basis.'\n assert path.exists()\n lines = open(path, 'r', encoding='utf-8').readlines()\n log.info(\n ''.join(['read text file with ', '{}'.format(len(lines)), ' lines']))\n if self.shuffle_lines:\n random.shuffle(lines)\n log.info('shuffled')\n tokens = 0\n for line in lines:\n if split_on_char:\n chars = list(line)\n else:\n chars = line.split()\n tokens += len(chars)\n if expand_vocab:\n for char in chars:\n self.dictionary.add_item(char)\n ids = torch.zeros(tokens, dtype=torch.long)\n if forward:\n token = 0\n for line in lines:\n if random_case_flip:\n line = self.random_casechange(line)\n if split_on_char:\n chars = list(line)\n else:\n chars = line.split()\n for char in chars:\n if (token >= tokens):\n break\n ids[token] = self.dictionary.get_idx_for_item(char)\n token += 1\n else:\n token = (tokens - 1)\n for line in lines:\n if random_case_flip:\n line = self.random_casechange(line)\n if split_on_char:\n chars = list(line)\n else:\n chars = line.split()\n for char in chars:\n if (token >= tokens):\n break\n ids[token] = self.dictionary.get_idx_for_item(char)\n token -= 1\n return ids\n\n @staticmethod\n def random_casechange(line):\n no = random.randint(0, 99)\n if (no is 0):\n line = line.lower()\n if (no is 1):\n line = line.upper()\n return line\n\n def tokenize(self, path):\n 'Tokenizes a text file.'\n assert path.exists()\n with open(path, 'r') as f:\n tokens = 0\n for line in f:\n words = (line.split() + ['<eos>'])\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n with open(path, 'r') as f:\n ids = torch.zeros(tokens, dtype=torch.long, device=flair.device)\n token = 0\n for line in f:\n words = (line.split() + ['<eos>'])\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n return ids\n\n\nclass TextCorpus(object):\n\n def __init__(self, path, dictionary, forward=True, character_level=True, random_case_flip=True, shuffle_lines=True):\n self.dictionary = dictionary\n self.forward = forward\n self.split_on_char = character_level\n self.random_case_flip = random_case_flip\n self.shuffle_lines = shuffle_lines\n if (type(path) == str):\n path = Path(path)\n self.train = TextDataset((path / 'train'), dictionary, False, self.forward,\n self.split_on_char, self.random_case_flip, shuffle_lines=self.shuffle_lines)\n self.valid = TextDataset((path / 'valid.txt'), dictionary, False, self.forward,\n self.split_on_char, self.random_case_flip, shuffle_lines=False)[0]\n self.test = TextDataset((path / 'test.txt'), dictionary, False, self.forward,\n self.split_on_char, self.random_case_flip, shuffle_lines=False)[0]\n\n\nclass LanguageModelTrainer():\n\n def __init__(self, model, corpus, optimizer=SGD, test_mode=False, epoch=0, split=0, loss=10000, optimizer_state=None):\n self.model = model\n self.optimizer = optimizer\n self.corpus = corpus\n self.test_mode = test_mode\n self.loss_function = torch.nn.CrossEntropyLoss()\n self.log_interval = 100\n self.epoch = epoch\n self.split = split\n self.loss = loss\n self.optimizer_state = optimizer_state\n\n def train(self, base_path, sequence_length, learning_rate=20, mini_batch_size=100, anneal_factor=0.25, patience=10, clip=0.25, max_epochs=1000, checkpoint=False, grow_to_sequence_length=0, num_workers=2, use_amp=False, amp_opt_level='O1', **kwargs):\n if use_amp:\n if (sys.version_info < (3, 0)):\n raise RuntimeError(\n 'Apex currently only supports Python 3. Aborting.')\n if (amp is None):\n raise RuntimeError(\n 'Failed to import apex. Please install apex from https://www.github.com/nvidia/apex to enable mixed-precision training.')\n if (type(base_path) is str):\n base_path = Path(base_path)\n add_file_handler(log, (base_path / 'training.log'))\n number_of_splits = len(self.corpus.train)\n val_data = self._batchify(self.corpus.valid, mini_batch_size)\n base_path.mkdir(parents=True, exist_ok=True)\n loss_txt = (base_path / 'loss.txt')\n savefile = (base_path / 'best-lm.pt')\n try:\n epoch = self.epoch\n best_val_loss = self.loss\n optimizer = self.optimizer(\n self.model.parameters(), lr=learning_rate, **kwargs)\n if (self.optimizer_state is not None):\n optimizer.load_state_dict(self.optimizer_state)\n if isinstance(optimizer, (AdamW, SGDW)):\n scheduler = ReduceLRWDOnPlateau(\n optimizer, verbose=True, factor=anneal_factor, patience=patience)\n else:\n scheduler = ReduceLROnPlateau(\n optimizer, verbose=True, factor=anneal_factor, patience=patience)\n if use_amp:\n (self.model, optimizer) = amp.initialize(\n self.model, optimizer, opt_level=amp_opt_level)\n training_generator = DataLoader(\n self.corpus.train, shuffle=False, num_workers=num_workers)\n for epoch in range(self.epoch, max_epochs):\n epoch_start_time = time.time()\n if (epoch > 0):\n training_generator = DataLoader(\n self.corpus.train, shuffle=True, num_workers=num_workers)\n self.model.save_checkpoint(\n (base_path / ''.join(['epoch_', '{}'.format(epoch), '.pt'])), optimizer, epoch, 0, best_val_loss)\n for (curr_split, train_slice) in enumerate(training_generator, self.split):\n if (sequence_length < grow_to_sequence_length):\n sequence_length += 1\n log.info(\n ''.join(['Sequence length is ', '{}'.format(sequence_length)]))\n split_start_time = time.time()\n curr_split += 1\n train_data = self._batchify(\n train_slice.flatten(), mini_batch_size)\n log.info((('Split %d' % curr_split) +\n '\\t - ({:%H:%M:%S})'.format(datetime.datetime.now())))\n for group in optimizer.param_groups:\n learning_rate = group['lr']\n self.model.train()\n hidden = self.model.init_hidden(mini_batch_size)\n ntokens = len(self.corpus.dictionary)\n total_loss = 0\n start_time = time.time()\n for (batch, i) in enumerate(range(0, (train_data.size(0) - 1), sequence_length)):\n (data, targets) = self._get_batch(\n train_data, i, sequence_length)\n if ((not data.is_cuda) and cuda.is_available()):\n log.info(\n ('Batch %d is not on CUDA, training will be very slow' % batch))\n raise Exception('data isnt on cuda')\n self.model.zero_grad()\n optimizer.zero_grad()\n (output, rnn_output, hidden) = self.model.forward(\n data, hidden)\n loss = self.loss_function(\n output.view((- 1), ntokens), targets)\n if use_amp:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), clip)\n optimizer.step()\n total_loss += loss.data\n hidden = self._repackage_hidden(hidden)\n del loss, output, rnn_output\n if (((batch % self.log_interval) == 0) and (batch > 0)):\n cur_loss = (total_loss.item() / self.log_interval)\n elapsed = (time.time() - start_time)\n log.info('| split {:3d} /{:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | loss {:5.2f} | ppl {:8.2f}'.format(\n curr_split, number_of_splits, batch, (len(train_data) // sequence_length), ((elapsed * 1000) / self.log_interval), cur_loss, math.exp(cur_loss)))\n total_loss = 0\n start_time = time.time()\n log.info(('%d seconds for train split %d' %\n ((time.time() - split_start_time), curr_split)))\n self.model.eval()\n val_loss = self.evaluate(\n val_data, mini_batch_size, sequence_length)\n scheduler.step(val_loss)\n log.info('best loss so far {:5.2f}'.format(best_val_loss))\n log.info(self.model.generate_text())\n if checkpoint:\n self.model.save_checkpoint(\n (base_path / 'checkpoint.pt'), optimizer, epoch, curr_split, best_val_loss)\n if (val_loss < best_val_loss):\n self.model.best_score = best_val_loss\n self.model.save(savefile)\n best_val_loss = val_loss\n log.info(('-' * 89))\n summary = '| end of split {:3d} /{:3d} | epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | valid ppl {:8.2f} | learning rate {:3.4f}'.format(\n curr_split, number_of_splits, (epoch + 1), (time.time() - split_start_time), val_loss, math.exp(val_loss), learning_rate)\n with open(loss_txt, 'a') as myfile:\n myfile.write(('%s\\n' % summary))\n log.info(summary)\n log.info(('-' * 89))\n log.info(('Epoch time: %.2f' %\n (time.time() - epoch_start_time)))\n except KeyboardInterrupt:\n log.info(('-' * 89))\n log.info('Exiting from training early')\n test_data = self._batchify(self.corpus.test, mini_batch_size)\n test_loss = self.evaluate(test_data, mini_batch_size, sequence_length)\n summary = 'TEST: valid loss {:5.2f} | valid ppl {:8.2f}'.format(\n test_loss, math.exp(test_loss))\n with open(loss_txt, 'a') as myfile:\n myfile.write(('%s\\n' % summary))\n log.info(summary)\n log.info(('-' * 89))\n\n def evaluate(self, data_source, eval_batch_size, sequence_length):\n self.model.eval()\n with torch.no_grad():\n total_loss = 0\n ntokens = len(self.corpus.dictionary)\n hidden = self.model.init_hidden(eval_batch_size)\n for i in range(0, (data_source.size(0) - 1), sequence_length):\n (data, targets) = self._get_batch(\n data_source, i, sequence_length)\n (prediction, rnn_output, hidden) = self.model.forward(data, hidden)\n output_flat = prediction.view((- 1), ntokens)\n total_loss += (len(data) *\n self.loss_function(output_flat, targets).data)\n hidden = self._repackage_hidden(hidden)\n return (total_loss.item() / len(data_source))\n\n @staticmethod\n def _batchify(data, batch_size):\n nbatch = (data.size(0) // batch_size)\n data = data.narrow(0, 0, (nbatch * batch_size))\n data = data.view(batch_size, (- 1)).t().contiguous()\n return data\n\n @staticmethod\n def _get_batch(source, i, sequence_length):\n seq_len = min(sequence_length, ((len(source) - 1) - i))\n data = source[i:(i + seq_len)].clone().detach()\n target = source[(i + 1):((i + 1) + seq_len)\n ].view((- 1)).clone().detach()\n data = data.to(flair.device)\n target = target.to(flair.device)\n return (data, target)\n\n @staticmethod\n def _repackage_hidden(h):\n 'Wraps hidden states in new tensors, to detach them from their history.'\n return tuple((v.clone().detach() for v in h))\n\n @staticmethod\n def load_from_checkpoint(checkpoint_file, corpus, optimizer=SGD):\n checkpoint = LanguageModel.load_checkpoint(checkpoint_file)\n return LanguageModelTrainer(checkpoint['model'], corpus, optimizer, epoch=checkpoint['epoch'], split=checkpoint['split'], loss=checkpoint['loss'], optimizer_state=checkpoint['optimizer_state_dict'])\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.cuda.is_available"
]
] |
mecanimatico/codigo_edp | [
"42080a4eb0f604873f9743ff0d0b8afde0735181"
] | [
"17abriledp.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 17 09:57:12 2020\n\n@author: Heber\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\n#%% valor exacto d ela derivada\nup = np.cos(1.0)\n\nh = 0.1\nup_aprox = (np.sin(1+h)-np.sin(1))/h\nerror = up - up_aprox\n\nprint (\"Valor aproximado: \",up_aprox)\nprint (\"Valor del error: \",error)\n#%%-----------------------------\n# muestra \n\nlist = [0.1, 0.01, 0.001, 0.0001, 0.00001]\n\naprox_values = []\nerrores_values = []\n\n\n# aproximacion a la segunda derivada \n\nerrores_values2 = []\naprox_values2 = []\nfor h in list:\n aux = (np.sin(1+h) - np.sin(1))/h\n aprox_values.append(aux)\n errores_values.append(up - aux)\n # print(h, up_aprox,error)\n # formula de segundo orden\n aux_2 = (np.sin(1+h)-np.sin(1-h))/(2*h)\n aprox_values2.append(aux_2)\n errores_values2.append(up - aux_2)\n\nplt.loglog(list,errores_values,'o-',list,errores_values2,'o-')\nplt.grid(True)\n#%%---------------------\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nlist = [0.1, 0.01, 0.001, 0.0001]\nvalor_exacto = 6*np.exp(1.0)\nvalor_aprox = []\nvalor_error = []\nfor h in list:\n aux = (np.exp((1+h)**2)-2*np.exp(1.0) + np.exp((1-h)**2))/h**2\n valor_aprox.append(aux)\n aux2 = abs(valor_exacto - aux)\n valor_error.append(aux2) \nplt.grid(True) \nplt.loglog(list,valor_error,'o-')\n\n#list,valor_aprox, 'o-'\n\n\n"
] | [
[
"matplotlib.pyplot.grid",
"numpy.cos",
"numpy.exp",
"matplotlib.pyplot.loglog",
"numpy.sin"
]
] |
zfrenchee/pandas | [
"d28f9326de26882a9b4dc0bee9dec5c598747190"
] | [
"pandas/tests/indexes/test_category.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport pytest\n\nimport pandas.util.testing as tm\nfrom pandas.core.indexes.api import Index, CategoricalIndex\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\nfrom .common import Base\n\nfrom pandas.compat import range, PY3\n\nimport numpy as np\n\nfrom pandas import Categorical, IntervalIndex, compat\nfrom pandas.util.testing import assert_almost_equal\nimport pandas.core.config as cf\nimport pandas as pd\n\nif PY3:\n unicode = lambda x: x\n\n\nclass TestCategoricalIndex(Base):\n _holder = CategoricalIndex\n\n def setup_method(self, method):\n self.indices = dict(catIndex=tm.makeCategoricalIndex(100))\n self.setup_indices()\n\n def create_index(self, categories=None, ordered=False):\n if categories is None:\n categories = list('cab')\n return CategoricalIndex(\n list('aabbca'), categories=categories, ordered=ordered)\n\n def test_construction(self):\n\n ci = self.create_index(categories=list('abcd'))\n categories = ci.categories\n\n result = Index(ci)\n tm.assert_index_equal(result, ci, exact=True)\n assert not result.ordered\n\n result = Index(ci.values)\n tm.assert_index_equal(result, ci, exact=True)\n assert not result.ordered\n\n # empty\n result = CategoricalIndex(categories=categories)\n tm.assert_index_equal(result.categories, Index(categories))\n tm.assert_numpy_array_equal(result.codes, np.array([], dtype='int8'))\n assert not result.ordered\n\n # passing categories\n result = CategoricalIndex(list('aabbca'), categories=categories)\n tm.assert_index_equal(result.categories, Index(categories))\n tm.assert_numpy_array_equal(result.codes,\n np.array([0, 0, 1,\n 1, 2, 0], dtype='int8'))\n\n c = pd.Categorical(list('aabbca'))\n result = CategoricalIndex(c)\n tm.assert_index_equal(result.categories, Index(list('abc')))\n tm.assert_numpy_array_equal(result.codes,\n np.array([0, 0, 1,\n 1, 2, 0], dtype='int8'))\n assert not result.ordered\n\n result = CategoricalIndex(c, categories=categories)\n tm.assert_index_equal(result.categories, Index(categories))\n tm.assert_numpy_array_equal(result.codes,\n np.array([0, 0, 1,\n 1, 2, 0], dtype='int8'))\n assert not result.ordered\n\n ci = CategoricalIndex(c, categories=list('abcd'))\n result = CategoricalIndex(ci)\n tm.assert_index_equal(result.categories, Index(categories))\n tm.assert_numpy_array_equal(result.codes,\n np.array([0, 0, 1,\n 1, 2, 0], dtype='int8'))\n assert not result.ordered\n\n result = CategoricalIndex(ci, categories=list('ab'))\n tm.assert_index_equal(result.categories, Index(list('ab')))\n tm.assert_numpy_array_equal(result.codes,\n np.array([0, 0, 1,\n 1, -1, 0], dtype='int8'))\n assert not result.ordered\n\n result = CategoricalIndex(ci, categories=list('ab'), ordered=True)\n tm.assert_index_equal(result.categories, Index(list('ab')))\n tm.assert_numpy_array_equal(result.codes,\n np.array([0, 0, 1,\n 1, -1, 0], dtype='int8'))\n assert result.ordered\n\n result = pd.CategoricalIndex(ci, categories=list('ab'), ordered=True)\n expected = pd.CategoricalIndex(ci, categories=list('ab'), ordered=True,\n dtype='category')\n tm.assert_index_equal(result, expected, exact=True)\n\n # turn me to an Index\n result = Index(np.array(ci))\n assert isinstance(result, Index)\n assert not isinstance(result, CategoricalIndex)\n\n def test_construction_with_dtype(self):\n\n # specify dtype\n ci = self.create_index(categories=list('abc'))\n\n result = Index(np.array(ci), dtype='category')\n tm.assert_index_equal(result, ci, exact=True)\n\n result = Index(np.array(ci).tolist(), dtype='category')\n tm.assert_index_equal(result, ci, exact=True)\n\n # these are generally only equal when the categories are reordered\n ci = self.create_index()\n\n result = Index(\n np.array(ci), dtype='category').reorder_categories(ci.categories)\n tm.assert_index_equal(result, ci, exact=True)\n\n # make sure indexes are handled\n expected = CategoricalIndex([0, 1, 2], categories=[0, 1, 2],\n ordered=True)\n idx = Index(range(3))\n result = CategoricalIndex(idx, categories=idx, ordered=True)\n tm.assert_index_equal(result, expected, exact=True)\n\n def test_construction_with_categorical_dtype(self):\n # construction with CategoricalDtype\n # GH18109\n data, cats, ordered = 'a a b b'.split(), 'c b a'.split(), True\n dtype = CategoricalDtype(categories=cats, ordered=ordered)\n\n result = pd.CategoricalIndex(data, dtype=dtype)\n expected = pd.CategoricalIndex(data, categories=cats,\n ordered=ordered)\n tm.assert_index_equal(result, expected, exact=True)\n\n # error to combine categories or ordered and dtype keywords args\n with pytest.raises(ValueError, match=\"Cannot specify both `dtype` and \"\n \"`categories` or `ordered`.\"):\n pd.CategoricalIndex(data, categories=cats, dtype=dtype)\n with pytest.raises(ValueError, match=\"Cannot specify both `dtype` and \"\n \"`categories` or `ordered`.\"):\n pd.CategoricalIndex(data, ordered=ordered, dtype=dtype)\n\n def test_create_categorical(self):\n # https://github.com/pandas-dev/pandas/pull/17513\n # The public CI constructor doesn't hit this code path with\n # instances of CategoricalIndex, but we still want to test the code\n ci = CategoricalIndex(['a', 'b', 'c'])\n # First ci is self, second ci is data.\n result = CategoricalIndex._create_categorical(ci, ci)\n expected = Categorical(['a', 'b', 'c'])\n tm.assert_categorical_equal(result, expected)\n\n def test_disallow_set_ops(self):\n\n # GH 10039\n # set ops (+/-) raise TypeError\n idx = pd.Index(pd.Categorical(['a', 'b']))\n\n pytest.raises(TypeError, lambda: idx - idx)\n pytest.raises(TypeError, lambda: idx + idx)\n pytest.raises(TypeError, lambda: idx - ['a', 'b'])\n pytest.raises(TypeError, lambda: idx + ['a', 'b'])\n pytest.raises(TypeError, lambda: ['a', 'b'] - idx)\n pytest.raises(TypeError, lambda: ['a', 'b'] + idx)\n\n def test_method_delegation(self):\n\n ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))\n result = ci.set_categories(list('cab'))\n tm.assert_index_equal(result, CategoricalIndex(\n list('aabbca'), categories=list('cab')))\n\n ci = CategoricalIndex(list('aabbca'), categories=list('cab'))\n result = ci.rename_categories(list('efg'))\n tm.assert_index_equal(result, CategoricalIndex(\n list('ffggef'), categories=list('efg')))\n\n # GH18862 (let rename_categories take callables)\n result = ci.rename_categories(lambda x: x.upper())\n tm.assert_index_equal(result, CategoricalIndex(\n list('AABBCA'), categories=list('CAB')))\n\n ci = CategoricalIndex(list('aabbca'), categories=list('cab'))\n result = ci.add_categories(['d'])\n tm.assert_index_equal(result, CategoricalIndex(\n list('aabbca'), categories=list('cabd')))\n\n ci = CategoricalIndex(list('aabbca'), categories=list('cab'))\n result = ci.remove_categories(['c'])\n tm.assert_index_equal(result, CategoricalIndex(\n list('aabb') + [np.nan] + ['a'], categories=list('ab')))\n\n ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))\n result = ci.as_unordered()\n tm.assert_index_equal(result, ci)\n\n ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))\n result = ci.as_ordered()\n tm.assert_index_equal(result, CategoricalIndex(\n list('aabbca'), categories=list('cabdef'), ordered=True))\n\n # invalid\n pytest.raises(ValueError, lambda: ci.set_categories(\n list('cab'), inplace=True))\n\n def test_contains(self):\n\n ci = self.create_index(categories=list('cabdef'))\n\n assert 'a' in ci\n assert 'z' not in ci\n assert 'e' not in ci\n assert np.nan not in ci\n\n # assert codes NOT in index\n assert 0 not in ci\n assert 1 not in ci\n\n ci = CategoricalIndex(\n list('aabbca') + [np.nan], categories=list('cabdef'))\n assert np.nan in ci\n\n def test_min_max(self):\n\n ci = self.create_index(ordered=False)\n pytest.raises(TypeError, lambda: ci.min())\n pytest.raises(TypeError, lambda: ci.max())\n\n ci = self.create_index(ordered=True)\n\n assert ci.min() == 'c'\n assert ci.max() == 'b'\n\n def test_map(self):\n ci = pd.CategoricalIndex(list('ABABC'), categories=list('CBA'),\n ordered=True)\n result = ci.map(lambda x: x.lower())\n exp = pd.CategoricalIndex(list('ababc'), categories=list('cba'),\n ordered=True)\n tm.assert_index_equal(result, exp)\n\n ci = pd.CategoricalIndex(list('ABABC'), categories=list('BAC'),\n ordered=False, name='XXX')\n result = ci.map(lambda x: x.lower())\n exp = pd.CategoricalIndex(list('ababc'), categories=list('bac'),\n ordered=False, name='XXX')\n tm.assert_index_equal(result, exp)\n\n # GH 12766: Return an index not an array\n tm.assert_index_equal(ci.map(lambda x: 1),\n Index(np.array([1] * 5, dtype=np.int64),\n name='XXX'))\n\n # change categories dtype\n ci = pd.CategoricalIndex(list('ABABC'), categories=list('BAC'),\n ordered=False)\n\n def f(x):\n return {'A': 10, 'B': 20, 'C': 30}.get(x)\n\n result = ci.map(f)\n exp = pd.CategoricalIndex([10, 20, 10, 20, 30],\n categories=[20, 10, 30],\n ordered=False)\n tm.assert_index_equal(result, exp)\n\n result = ci.map(pd.Series([10, 20, 30], index=['A', 'B', 'C']))\n tm.assert_index_equal(result, exp)\n\n result = ci.map({'A': 10, 'B': 20, 'C': 30})\n tm.assert_index_equal(result, exp)\n\n def test_map_with_categorical_series(self):\n # GH 12756\n a = pd.Index([1, 2, 3, 4])\n b = pd.Series([\"even\", \"odd\", \"even\", \"odd\"],\n dtype=\"category\")\n c = pd.Series([\"even\", \"odd\", \"even\", \"odd\"])\n\n exp = CategoricalIndex([\"odd\", \"even\", \"odd\", np.nan])\n tm.assert_index_equal(a.map(b), exp)\n exp = pd.Index([\"odd\", \"even\", \"odd\", np.nan])\n tm.assert_index_equal(a.map(c), exp)\n\n @pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])\n def test_where(self, klass):\n i = self.create_index()\n cond = [True] * len(i)\n expected = i\n result = i.where(klass(cond))\n tm.assert_index_equal(result, expected)\n\n cond = [False] + [True] * (len(i) - 1)\n expected = CategoricalIndex([np.nan] + i[1:].tolist(),\n categories=i.categories)\n result = i.where(klass(cond))\n tm.assert_index_equal(result, expected)\n\n def test_append(self):\n\n ci = self.create_index()\n categories = ci.categories\n\n # append cats with the same categories\n result = ci[:3].append(ci[3:])\n tm.assert_index_equal(result, ci, exact=True)\n\n foos = [ci[:1], ci[1:3], ci[3:]]\n result = foos[0].append(foos[1:])\n tm.assert_index_equal(result, ci, exact=True)\n\n # empty\n result = ci.append([])\n tm.assert_index_equal(result, ci, exact=True)\n\n # appending with different categories or reoreded is not ok\n pytest.raises(\n TypeError,\n lambda: ci.append(ci.values.set_categories(list('abcd'))))\n pytest.raises(\n TypeError,\n lambda: ci.append(ci.values.reorder_categories(list('abc'))))\n\n # with objects\n result = ci.append(Index(['c', 'a']))\n expected = CategoricalIndex(list('aabbcaca'), categories=categories)\n tm.assert_index_equal(result, expected, exact=True)\n\n # invalid objects\n pytest.raises(TypeError, lambda: ci.append(Index(['a', 'd'])))\n\n # GH14298 - if base object is not categorical -> coerce to object\n result = Index(['c', 'a']).append(ci)\n expected = Index(list('caaabbca'))\n tm.assert_index_equal(result, expected, exact=True)\n\n def test_insert(self):\n\n ci = self.create_index()\n categories = ci.categories\n\n # test 0th element\n result = ci.insert(0, 'a')\n expected = CategoricalIndex(list('aaabbca'), categories=categories)\n tm.assert_index_equal(result, expected, exact=True)\n\n # test Nth element that follows Python list behavior\n result = ci.insert(-1, 'a')\n expected = CategoricalIndex(list('aabbcaa'), categories=categories)\n tm.assert_index_equal(result, expected, exact=True)\n\n # test empty\n result = CategoricalIndex(categories=categories).insert(0, 'a')\n expected = CategoricalIndex(['a'], categories=categories)\n tm.assert_index_equal(result, expected, exact=True)\n\n # invalid\n pytest.raises(TypeError, lambda: ci.insert(0, 'd'))\n\n # GH 18295 (test missing)\n expected = CategoricalIndex(['a', np.nan, 'a', 'b', 'c', 'b'])\n for na in (np.nan, pd.NaT, None):\n result = CategoricalIndex(list('aabcb')).insert(1, na)\n tm.assert_index_equal(result, expected)\n\n def test_delete(self):\n\n ci = self.create_index()\n categories = ci.categories\n\n result = ci.delete(0)\n expected = CategoricalIndex(list('abbca'), categories=categories)\n tm.assert_index_equal(result, expected, exact=True)\n\n result = ci.delete(-1)\n expected = CategoricalIndex(list('aabbc'), categories=categories)\n tm.assert_index_equal(result, expected, exact=True)\n\n with pytest.raises((IndexError, ValueError)):\n # Either depending on NumPy version\n ci.delete(10)\n\n def test_astype(self):\n\n ci = self.create_index()\n result = ci.astype(object)\n tm.assert_index_equal(result, Index(np.array(ci)))\n\n # this IS equal, but not the same class\n assert result.equals(ci)\n assert isinstance(result, Index)\n assert not isinstance(result, CategoricalIndex)\n\n # interval\n ii = IntervalIndex.from_arrays(left=[-0.001, 2.0],\n right=[2, 4],\n closed='right')\n\n ci = CategoricalIndex(Categorical.from_codes(\n [0, 1, -1], categories=ii, ordered=True))\n\n result = ci.astype('interval')\n expected = ii.take([0, 1, -1])\n tm.assert_index_equal(result, expected)\n\n result = IntervalIndex.from_intervals(result.values)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize('name', [None, 'foo'])\n @pytest.mark.parametrize('dtype_ordered', [True, False])\n @pytest.mark.parametrize('index_ordered', [True, False])\n def test_astype_category(self, name, dtype_ordered, index_ordered):\n # GH 18630\n index = self.create_index(ordered=index_ordered)\n if name:\n index = index.rename(name)\n\n # standard categories\n dtype = CategoricalDtype(ordered=dtype_ordered)\n result = index.astype(dtype)\n expected = CategoricalIndex(index.tolist(),\n name=name,\n categories=index.categories,\n ordered=dtype_ordered)\n tm.assert_index_equal(result, expected)\n\n # non-standard categories\n dtype = CategoricalDtype(index.unique().tolist()[:-1], dtype_ordered)\n result = index.astype(dtype)\n expected = CategoricalIndex(index.tolist(), name=name, dtype=dtype)\n tm.assert_index_equal(result, expected)\n\n if dtype_ordered is False:\n # dtype='category' can't specify ordered, so only test once\n result = index.astype('category')\n expected = index\n tm.assert_index_equal(result, expected)\n\n def test_reindex_base(self):\n # Determined by cat ordering.\n idx = CategoricalIndex(list(\"cab\"), categories=list(\"cab\"))\n expected = np.arange(len(idx), dtype=np.intp)\n\n actual = idx.get_indexer(idx)\n tm.assert_numpy_array_equal(expected, actual)\n\n with tm.assert_raises_regex(ValueError, \"Invalid fill method\"):\n idx.get_indexer(idx, method=\"invalid\")\n\n def test_reindexing(self):\n np.random.seed(123456789)\n\n ci = self.create_index()\n oidx = Index(np.array(ci))\n\n for n in [1, 2, 5, len(ci)]:\n finder = oidx[np.random.randint(0, len(ci), size=n)]\n expected = oidx.get_indexer_non_unique(finder)[0]\n\n actual = ci.get_indexer(finder)\n tm.assert_numpy_array_equal(expected, actual)\n\n # see gh-17323\n #\n # Even when indexer is equal to the\n # members in the index, we should\n # respect duplicates instead of taking\n # the fast-track path.\n for finder in [list(\"aabbca\"), list(\"aababca\")]:\n expected = oidx.get_indexer_non_unique(finder)[0]\n\n actual = ci.get_indexer(finder)\n tm.assert_numpy_array_equal(expected, actual)\n\n def test_reindex_dtype(self):\n c = CategoricalIndex(['a', 'b', 'c', 'a'])\n res, indexer = c.reindex(['a', 'c'])\n tm.assert_index_equal(res, Index(['a', 'a', 'c']), exact=True)\n tm.assert_numpy_array_equal(indexer,\n np.array([0, 3, 2], dtype=np.intp))\n\n c = CategoricalIndex(['a', 'b', 'c', 'a'])\n res, indexer = c.reindex(Categorical(['a', 'c']))\n\n exp = CategoricalIndex(['a', 'a', 'c'], categories=['a', 'c'])\n tm.assert_index_equal(res, exp, exact=True)\n tm.assert_numpy_array_equal(indexer,\n np.array([0, 3, 2], dtype=np.intp))\n\n c = CategoricalIndex(['a', 'b', 'c', 'a'],\n categories=['a', 'b', 'c', 'd'])\n res, indexer = c.reindex(['a', 'c'])\n exp = Index(['a', 'a', 'c'], dtype='object')\n tm.assert_index_equal(res, exp, exact=True)\n tm.assert_numpy_array_equal(indexer,\n np.array([0, 3, 2], dtype=np.intp))\n\n c = CategoricalIndex(['a', 'b', 'c', 'a'],\n categories=['a', 'b', 'c', 'd'])\n res, indexer = c.reindex(Categorical(['a', 'c']))\n exp = CategoricalIndex(['a', 'a', 'c'], categories=['a', 'c'])\n tm.assert_index_equal(res, exp, exact=True)\n tm.assert_numpy_array_equal(indexer,\n np.array([0, 3, 2], dtype=np.intp))\n\n def test_reindex_empty_index(self):\n # See GH16770\n c = CategoricalIndex([])\n res, indexer = c.reindex(['a', 'b'])\n tm.assert_index_equal(res, Index(['a', 'b']), exact=True)\n tm.assert_numpy_array_equal(indexer,\n np.array([-1, -1], dtype=np.intp))\n\n def test_is_monotonic(self):\n c = CategoricalIndex([1, 2, 3])\n assert c.is_monotonic_increasing\n assert not c.is_monotonic_decreasing\n\n c = CategoricalIndex([1, 2, 3], ordered=True)\n assert c.is_monotonic_increasing\n assert not c.is_monotonic_decreasing\n\n c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1])\n assert not c.is_monotonic_increasing\n assert c.is_monotonic_decreasing\n\n c = CategoricalIndex([1, 3, 2], categories=[3, 2, 1])\n assert not c.is_monotonic_increasing\n assert not c.is_monotonic_decreasing\n\n c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1], ordered=True)\n assert not c.is_monotonic_increasing\n assert c.is_monotonic_decreasing\n\n # non lexsorted categories\n categories = [9, 0, 1, 2, 3]\n\n c = CategoricalIndex([9, 0], categories=categories)\n assert c.is_monotonic_increasing\n assert not c.is_monotonic_decreasing\n\n c = CategoricalIndex([0, 1], categories=categories)\n assert c.is_monotonic_increasing\n assert not c.is_monotonic_decreasing\n\n def test_duplicates(self):\n\n idx = CategoricalIndex([0, 0, 0], name='foo')\n assert not idx.is_unique\n assert idx.has_duplicates\n\n expected = CategoricalIndex([0], name='foo')\n tm.assert_index_equal(idx.drop_duplicates(), expected)\n tm.assert_index_equal(idx.unique(), expected)\n\n def test_get_indexer(self):\n\n idx1 = CategoricalIndex(list('aabcde'), categories=list('edabc'))\n idx2 = CategoricalIndex(list('abf'))\n\n for indexer in [idx2, list('abf'), Index(list('abf'))]:\n r1 = idx1.get_indexer(idx2)\n assert_almost_equal(r1, np.array([0, 1, 2, -1], dtype=np.intp))\n\n pytest.raises(NotImplementedError,\n lambda: idx2.get_indexer(idx1, method='pad'))\n pytest.raises(NotImplementedError,\n lambda: idx2.get_indexer(idx1, method='backfill'))\n pytest.raises(NotImplementedError,\n lambda: idx2.get_indexer(idx1, method='nearest'))\n\n def test_get_loc(self):\n # GH 12531\n cidx1 = CategoricalIndex(list('abcde'), categories=list('edabc'))\n idx1 = Index(list('abcde'))\n assert cidx1.get_loc('a') == idx1.get_loc('a')\n assert cidx1.get_loc('e') == idx1.get_loc('e')\n\n for i in [cidx1, idx1]:\n with pytest.raises(KeyError):\n i.get_loc('NOT-EXIST')\n\n # non-unique\n cidx2 = CategoricalIndex(list('aacded'), categories=list('edabc'))\n idx2 = Index(list('aacded'))\n\n # results in bool array\n res = cidx2.get_loc('d')\n tm.assert_numpy_array_equal(res, idx2.get_loc('d'))\n tm.assert_numpy_array_equal(res, np.array([False, False, False,\n True, False, True]))\n # unique element results in scalar\n res = cidx2.get_loc('e')\n assert res == idx2.get_loc('e')\n assert res == 4\n\n for i in [cidx2, idx2]:\n with pytest.raises(KeyError):\n i.get_loc('NOT-EXIST')\n\n # non-unique, slicable\n cidx3 = CategoricalIndex(list('aabbb'), categories=list('abc'))\n idx3 = Index(list('aabbb'))\n\n # results in slice\n res = cidx3.get_loc('a')\n assert res == idx3.get_loc('a')\n assert res == slice(0, 2, None)\n\n res = cidx3.get_loc('b')\n assert res == idx3.get_loc('b')\n assert res == slice(2, 5, None)\n\n for i in [cidx3, idx3]:\n with pytest.raises(KeyError):\n i.get_loc('c')\n\n def test_repr_roundtrip(self):\n\n ci = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)\n str(ci)\n tm.assert_index_equal(eval(repr(ci)), ci, exact=True)\n\n # formatting\n if PY3:\n str(ci)\n else:\n compat.text_type(ci)\n\n # long format\n # this is not reprable\n ci = CategoricalIndex(np.random.randint(0, 5, size=100))\n if PY3:\n str(ci)\n else:\n compat.text_type(ci)\n\n def test_isin(self):\n\n ci = CategoricalIndex(\n list('aabca') + [np.nan], categories=['c', 'a', 'b'])\n tm.assert_numpy_array_equal(\n ci.isin(['c']),\n np.array([False, False, False, True, False, False]))\n tm.assert_numpy_array_equal(\n ci.isin(['c', 'a', 'b']), np.array([True] * 5 + [False]))\n tm.assert_numpy_array_equal(\n ci.isin(['c', 'a', 'b', np.nan]), np.array([True] * 6))\n\n # mismatched categorical -> coerced to ndarray so doesn't matter\n result = ci.isin(ci.set_categories(list('abcdefghi')))\n expected = np.array([True] * 6)\n tm.assert_numpy_array_equal(result, expected)\n\n result = ci.isin(ci.set_categories(list('defghi')))\n expected = np.array([False] * 5 + [True])\n tm.assert_numpy_array_equal(result, expected)\n\n def test_identical(self):\n\n ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)\n ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'],\n ordered=True)\n assert ci1.identical(ci1)\n assert ci1.identical(ci1.copy())\n assert not ci1.identical(ci2)\n\n def test_ensure_copied_data(self):\n # gh-12309: Check the \"copy\" argument of each\n # Index.__new__ is honored.\n #\n # Must be tested separately from other indexes because\n # self.value is not an ndarray.\n _base = lambda ar: ar if ar.base is None else ar.base\n\n for index in self.indices.values():\n result = CategoricalIndex(index.values, copy=True)\n tm.assert_index_equal(index, result)\n assert _base(index.values) is not _base(result.values)\n\n result = CategoricalIndex(index.values, copy=False)\n assert _base(index.values) is _base(result.values)\n\n def test_equals_categorical(self):\n ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)\n ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'],\n ordered=True)\n\n assert ci1.equals(ci1)\n assert not ci1.equals(ci2)\n assert ci1.equals(ci1.astype(object))\n assert ci1.astype(object).equals(ci1)\n\n assert (ci1 == ci1).all()\n assert not (ci1 != ci1).all()\n assert not (ci1 > ci1).all()\n assert not (ci1 < ci1).all()\n assert (ci1 <= ci1).all()\n assert (ci1 >= ci1).all()\n\n assert not (ci1 == 1).all()\n assert (ci1 == Index(['a', 'b'])).all()\n assert (ci1 == ci1.values).all()\n\n # invalid comparisons\n with tm.assert_raises_regex(ValueError, \"Lengths must match\"):\n ci1 == Index(['a', 'b', 'c'])\n pytest.raises(TypeError, lambda: ci1 == ci2)\n pytest.raises(\n TypeError, lambda: ci1 == Categorical(ci1.values, ordered=False))\n pytest.raises(\n TypeError,\n lambda: ci1 == Categorical(ci1.values, categories=list('abc')))\n\n # tests\n # make sure that we are testing for category inclusion properly\n ci = CategoricalIndex(list('aabca'), categories=['c', 'a', 'b'])\n assert not ci.equals(list('aabca'))\n # Same categories, but different order\n # Unordered\n assert ci.equals(CategoricalIndex(list('aabca')))\n # Ordered\n assert not ci.equals(CategoricalIndex(list('aabca'), ordered=True))\n assert ci.equals(ci.copy())\n\n ci = CategoricalIndex(list('aabca') + [np.nan],\n categories=['c', 'a', 'b'])\n assert not ci.equals(list('aabca'))\n assert not ci.equals(CategoricalIndex(list('aabca')))\n assert ci.equals(ci.copy())\n\n ci = CategoricalIndex(list('aabca') + [np.nan],\n categories=['c', 'a', 'b'])\n assert not ci.equals(list('aabca') + [np.nan])\n assert ci.equals(CategoricalIndex(list('aabca') + [np.nan]))\n assert not ci.equals(CategoricalIndex(list('aabca') + [np.nan],\n ordered=True))\n assert ci.equals(ci.copy())\n\n def test_string_categorical_index_repr(self):\n # short\n idx = pd.CategoricalIndex(['a', 'bb', 'ccc'])\n if PY3:\n expected = u\"\"\"CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')\"\"\" # noqa\n assert repr(idx) == expected\n else:\n expected = u\"\"\"CategoricalIndex([u'a', u'bb', u'ccc'], categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')\"\"\" # noqa\n assert unicode(idx) == expected\n\n # multiple lines\n idx = pd.CategoricalIndex(['a', 'bb', 'ccc'] * 10)\n if PY3:\n expected = u\"\"\"CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',\n 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb',\n 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')\"\"\" # noqa\n\n assert repr(idx) == expected\n else:\n expected = u\"\"\"CategoricalIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',\n u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',\n u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc',\n u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],\n categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')\"\"\" # noqa\n\n assert unicode(idx) == expected\n\n # truncated\n idx = pd.CategoricalIndex(['a', 'bb', 'ccc'] * 100)\n if PY3:\n expected = u\"\"\"CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',\n ...\n 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)\"\"\" # noqa\n\n assert repr(idx) == expected\n else:\n expected = u\"\"\"CategoricalIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',\n u'ccc', u'a',\n ...\n u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',\n u'bb', u'ccc'],\n categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category', length=300)\"\"\" # noqa\n\n assert unicode(idx) == expected\n\n # larger categories\n idx = pd.CategoricalIndex(list('abcdefghijklmmo'))\n if PY3:\n expected = u\"\"\"CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',\n 'm', 'm', 'o'],\n categories=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', ...], ordered=False, dtype='category')\"\"\" # noqa\n\n assert repr(idx) == expected\n else:\n expected = u\"\"\"CategoricalIndex([u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', u'i', u'j',\n u'k', u'l', u'm', u'm', u'o'],\n categories=[u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', ...], ordered=False, dtype='category')\"\"\" # noqa\n\n assert unicode(idx) == expected\n\n # short\n idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'])\n if PY3:\n expected = u\"\"\"CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')\"\"\" # noqa\n assert repr(idx) == expected\n else:\n expected = u\"\"\"CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')\"\"\" # noqa\n assert unicode(idx) == expected\n\n # multiple lines\n idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 10)\n if PY3:\n expected = u\"\"\"CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',\n 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],\n categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')\"\"\" # noqa\n\n assert repr(idx) == expected\n else:\n expected = u\"\"\"CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',\n u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',\n u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう',\n u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')\"\"\" # noqa\n\n assert unicode(idx) == expected\n\n # truncated\n idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 100)\n if PY3:\n expected = u\"\"\"CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',\n ...\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],\n categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)\"\"\" # noqa\n\n assert repr(idx) == expected\n else:\n expected = u\"\"\"CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',\n u'ううう', u'あ',\n ...\n u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',\n u'いい', u'ううう'],\n categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)\"\"\" # noqa\n\n assert unicode(idx) == expected\n\n # larger categories\n idx = pd.CategoricalIndex(list(u'あいうえおかきくけこさしすせそ'))\n if PY3:\n expected = u\"\"\"CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し',\n 'す', 'せ', 'そ'],\n categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')\"\"\" # noqa\n\n assert repr(idx) == expected\n else:\n expected = u\"\"\"CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', u'け', u'こ',\n u'さ', u'し', u'す', u'せ', u'そ'],\n categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')\"\"\" # noqa\n\n assert unicode(idx) == expected\n\n # Emable Unicode option -----------------------------------------\n with cf.option_context('display.unicode.east_asian_width', True):\n\n # short\n idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'])\n if PY3:\n expected = u\"\"\"CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')\"\"\" # noqa\n assert repr(idx) == expected\n else:\n expected = u\"\"\"CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')\"\"\" # noqa\n assert unicode(idx) == expected\n\n # multiple lines\n idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 10)\n if PY3:\n expected = u\"\"\"CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],\n categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')\"\"\" # noqa\n\n assert repr(idx) == expected\n else:\n expected = u\"\"\"CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',\n u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',\n u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',\n u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',\n u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')\"\"\" # noqa\n\n assert unicode(idx) == expected\n\n # truncated\n idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 100)\n if PY3:\n expected = u\"\"\"CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',\n 'ううう', 'あ',\n ...\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n 'あ', 'いい', 'ううう'],\n categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)\"\"\" # noqa\n\n assert repr(idx) == expected\n else:\n expected = u\"\"\"CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',\n u'いい', u'ううう', u'あ',\n ...\n u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',\n u'ううう', u'あ', u'いい', u'ううう'],\n categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)\"\"\" # noqa\n\n assert unicode(idx) == expected\n\n # larger categories\n idx = pd.CategoricalIndex(list(u'あいうえおかきくけこさしすせそ'))\n if PY3:\n expected = u\"\"\"CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ',\n 'さ', 'し', 'す', 'せ', 'そ'],\n categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')\"\"\" # noqa\n\n assert repr(idx) == expected\n else:\n expected = u\"\"\"CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く',\n u'け', u'こ', u'さ', u'し', u'す', u'せ', u'そ'],\n categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')\"\"\" # noqa\n\n assert unicode(idx) == expected\n\n def test_fillna_categorical(self):\n # GH 11343\n idx = CategoricalIndex([1.0, np.nan, 3.0, 1.0], name='x')\n # fill by value in categories\n exp = CategoricalIndex([1.0, 1.0, 3.0, 1.0], name='x')\n tm.assert_index_equal(idx.fillna(1.0), exp)\n\n # fill by value not in categories raises ValueError\n with tm.assert_raises_regex(ValueError,\n 'fill value must be in categories'):\n idx.fillna(2.0)\n\n def test_take_fill_value(self):\n # GH 12631\n\n # numeric category\n idx = pd.CategoricalIndex([1, 2, 3], name='xxx')\n result = idx.take(np.array([1, 0, -1]))\n expected = pd.CategoricalIndex([2, 1, 3], name='xxx')\n tm.assert_index_equal(result, expected)\n tm.assert_categorical_equal(result.values, expected.values)\n\n # fill_value\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\n expected = pd.CategoricalIndex([2, 1, np.nan], categories=[1, 2, 3],\n name='xxx')\n tm.assert_index_equal(result, expected)\n tm.assert_categorical_equal(result.values, expected.values)\n\n # allow_fill=False\n result = idx.take(np.array([1, 0, -1]), allow_fill=False,\n fill_value=True)\n expected = pd.CategoricalIndex([2, 1, 3], name='xxx')\n tm.assert_index_equal(result, expected)\n tm.assert_categorical_equal(result.values, expected.values)\n\n # object category\n idx = pd.CategoricalIndex(list('CBA'), categories=list('ABC'),\n ordered=True, name='xxx')\n result = idx.take(np.array([1, 0, -1]))\n expected = pd.CategoricalIndex(list('BCA'), categories=list('ABC'),\n ordered=True, name='xxx')\n tm.assert_index_equal(result, expected)\n tm.assert_categorical_equal(result.values, expected.values)\n\n # fill_value\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\n expected = pd.CategoricalIndex(['B', 'C', np.nan],\n categories=list('ABC'), ordered=True,\n name='xxx')\n tm.assert_index_equal(result, expected)\n tm.assert_categorical_equal(result.values, expected.values)\n\n # allow_fill=False\n result = idx.take(np.array([1, 0, -1]), allow_fill=False,\n fill_value=True)\n expected = pd.CategoricalIndex(list('BCA'), categories=list('ABC'),\n ordered=True, name='xxx')\n tm.assert_index_equal(result, expected)\n tm.assert_categorical_equal(result.values, expected.values)\n\n msg = ('When allow_fill=True and fill_value is not None, '\n 'all indices must be >= -1')\n with tm.assert_raises_regex(ValueError, msg):\n idx.take(np.array([1, 0, -2]), fill_value=True)\n with tm.assert_raises_regex(ValueError, msg):\n idx.take(np.array([1, 0, -5]), fill_value=True)\n\n with pytest.raises(IndexError):\n idx.take(np.array([1, -5]))\n\n def test_take_fill_value_datetime(self):\n\n # datetime category\n idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],\n name='xxx')\n idx = pd.CategoricalIndex(idx)\n result = idx.take(np.array([1, 0, -1]))\n expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],\n name='xxx')\n expected = pd.CategoricalIndex(expected)\n tm.assert_index_equal(result, expected)\n\n # fill_value\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\n expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'],\n name='xxx')\n exp_cats = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'])\n expected = pd.CategoricalIndex(expected, categories=exp_cats)\n tm.assert_index_equal(result, expected)\n\n # allow_fill=False\n result = idx.take(np.array([1, 0, -1]), allow_fill=False,\n fill_value=True)\n expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],\n name='xxx')\n expected = pd.CategoricalIndex(expected)\n tm.assert_index_equal(result, expected)\n\n msg = ('When allow_fill=True and fill_value is not None, '\n 'all indices must be >= -1')\n with tm.assert_raises_regex(ValueError, msg):\n idx.take(np.array([1, 0, -2]), fill_value=True)\n with tm.assert_raises_regex(ValueError, msg):\n idx.take(np.array([1, 0, -5]), fill_value=True)\n\n with pytest.raises(IndexError):\n idx.take(np.array([1, -5]))\n\n def test_take_invalid_kwargs(self):\n idx = pd.CategoricalIndex([1, 2, 3], name='foo')\n indices = [1, 0, -1]\n\n msg = r\"take\\(\\) got an unexpected keyword argument 'foo'\"\n tm.assert_raises_regex(TypeError, msg, idx.take,\n indices, foo=2)\n\n msg = \"the 'out' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, idx.take,\n indices, out=indices)\n\n msg = \"the 'mode' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, idx.take,\n indices, mode='clip')\n"
] | [
[
"pandas.Series",
"pandas.CategoricalIndex",
"numpy.random.seed",
"pandas.Categorical",
"pandas.util.testing.assert_categorical_equal",
"pandas.util.testing.makeCategoricalIndex",
"pandas.IntervalIndex.from_arrays",
"pandas.core.indexes.api.Index",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.Categorical.from_codes",
"pandas.util.testing.assert_index_equal",
"pandas.Index",
"pandas.DatetimeIndex",
"pandas.util.testing.assert_raises_regex",
"pandas.core.dtypes.dtypes.CategoricalDtype",
"pandas.core.indexes.api.CategoricalIndex",
"pandas.IntervalIndex.from_intervals",
"pandas.core.indexes.api.CategoricalIndex._create_categorical",
"pandas.compat.range",
"pandas.compat.text_type",
"pandas.core.config.option_context",
"numpy.array",
"numpy.random.randint"
]
] |
adammoody/Megatron-DeepSpeed | [
"972211163608818fe9e5ba821246f18d0a5dc264"
] | [
"megatron/checkpointing.py"
] | [
"# coding=utf-8\n# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Input/output checkpointing.\"\"\"\n\nimport os\nimport random\nimport sys\nimport numpy as np\n\nimport torch\n\nfrom megatron.global_vars import codecarbon_tracker_flush\nfrom megatron import (get_args,\n mpu,\n print_rank_0,\n update_num_microbatches,\n utils)\n\n_CHECKPOINT_VERSION = None\n\ndef set_checkpoint_version(value):\n global _CHECKPOINT_VERSION\n if _CHECKPOINT_VERSION is not None:\n assert _CHECKPOINT_VERSION == value, \\\n \"checkpoint versions do not match\"\n _CHECKPOINT_VERSION = value\n\ndef get_checkpoint_version():\n global _CHECKPOINT_VERSION\n return _CHECKPOINT_VERSION\n\ndef check_checkpoint_args(checkpoint_args):\n \"\"\"Ensure fixed arguments for a model are the same for the input\n arguments and the one retrieved from checkpoint.\"\"\"\n args = get_args()\n\n def _compare(arg_name, old_arg_name=None):\n if old_arg_name is not None:\n checkpoint_value = getattr(checkpoint_args, old_arg_name)\n else:\n checkpoint_value = getattr(checkpoint_args, arg_name)\n args_value = getattr(args, arg_name)\n error_message = '{} value from checkpoint ({}) is not equal to the ' \\\n 'input argument value ({}).'.format(\n arg_name, checkpoint_value, args_value)\n assert checkpoint_value == args_value, error_message\n\n _compare('num_layers')\n _compare('hidden_size')\n _compare('num_attention_heads')\n _compare('max_position_embeddings')\n _compare('position_embedding_type')\n if args.vocab_file:\n _compare('make_vocab_size_divisible_by')\n _compare('padded_vocab_size')\n _compare('tokenizer_type')\n if get_checkpoint_version() < 3.0:\n _compare('tensor_model_parallel_size',\n old_arg_name='model_parallel_size')\n if get_checkpoint_version() >= 3.0:\n _compare('tensor_model_parallel_size')\n _compare('pipeline_model_parallel_size')\n\n\ndef ensure_directory_exists(filename):\n \"\"\"Build filename's path if it does not already exists.\"\"\"\n dirname = os.path.dirname(filename)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n\ndef get_checkpoint_name(checkpoints_path, iteration,\n release=False):\n \"\"\"A unified checkpoint name.\"\"\"\n if release:\n directory = 'release'\n else:\n directory = 'iter_{:07d}'.format(iteration)\n # Use both the tensor and pipeline MP rank.\n if mpu.get_pipeline_model_parallel_world_size() == 1:\n return os.path.join(checkpoints_path, directory,\n 'mp_rank_{:02d}'.format(\n mpu.get_tensor_model_parallel_rank()),\n 'model_optim_rng.pt')\n return os.path.join(checkpoints_path, directory,\n 'mp_rank_{:02d}_{:03d}'.format(\n mpu.get_tensor_model_parallel_rank(),\n mpu.get_pipeline_model_parallel_rank()),\n 'model_optim_rng.pt')\n\n\ndef get_checkpoint_tracker_filename(checkpoints_path):\n \"\"\"Tracker file rescords the latest chckpoint during\n training to restart from.\"\"\"\n return os.path.join(checkpoints_path, 'latest_checkpointed_iteration.txt')\n\n\ndef save_checkpoint(iteration, model, optimizer, lr_scheduler):\n \"\"\"Save a model checkpoint.\"\"\"\n args = get_args()\n\n # Only rank zero of the data parallel writes to the disk.\n if not args.deepspeed:\n model = utils.unwrap_model(model)\n\n print_rank_0('saving checkpoint at iteration {:7d} to {}'.format(\n iteration, args.save))\n\n if not torch.distributed.is_initialized() or mpu.get_data_parallel_rank() == 0 \\\n or args.deepspeed:\n\n # Arguments, iteration, and model.\n state_dict = {}\n state_dict['args'] = args\n state_dict['checkpoint_version'] = 3.0\n state_dict['iteration'] = iteration\n\n # DeepSpeed saves the model/optimizer/scheduler\n if not args.deepspeed:\n if len(model) == 1:\n state_dict['model'] = model[0].state_dict_for_save_checkpoint()\n else:\n for i in range(len(model)):\n mpu.set_virtual_pipeline_model_parallel_rank(i)\n state_dict['model%d' % i] = model[i].state_dict_for_save_checkpoint()\n\n # Optimizer stuff.\n if not args.no_save_optim:\n if optimizer is not None:\n state_dict['optimizer'] = optimizer.state_dict()\n if lr_scheduler is not None:\n state_dict['lr_scheduler'] = lr_scheduler.state_dict()\n\n # RNG states.\n if not args.no_save_rng:\n state_dict['random_rng_state'] = random.getstate()\n state_dict['np_rng_state'] = np.random.get_state()\n state_dict['torch_rng_state'] = torch.get_rng_state()\n state_dict['cuda_rng_state'] = torch.cuda.get_rng_state()\n state_dict['rng_tracker_states'] \\\n = mpu.get_cuda_rng_tracker().get_states()\n\n # Save.\n checkpoint_name = get_checkpoint_name(args.save, iteration)\n if not args.deepspeed:\n ensure_directory_exists(checkpoint_name)\n torch.save(state_dict, checkpoint_name)\n\n if args.deepspeed:\n # Saving is a collective communication\n checkpoint_name = get_checkpoint_name(args.save, iteration)\n # Trim off the filename and mp_rank_* directory.\n for _ in range(3):\n checkpoint_name = os.path.dirname(checkpoint_name)\n model[0].save_checkpoint(checkpoint_name, client_state=state_dict)\n\n # Wait so everyone is done (necessary)\n if torch.distributed.is_initialized():\n torch.distributed.barrier()\n\n print_rank_0(' successfully saved checkpoint at iteration {:7d} to {}'.format(\n iteration, args.save))\n\n # And update the latest iteration\n if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:\n tracker_filename = get_checkpoint_tracker_filename(args.save)\n with open(tracker_filename, 'w') as f:\n f.write(str(iteration))\n\n # Wait so everyone is done (not necessary)\n if torch.distributed.is_initialized():\n torch.distributed.barrier()\n\n # since the code can be exited or aborted in various places we use the checkpoint saving as\n # a save saving point for the codecarbon tracker. If the program doesn't run to its normal\n # end, then only the data since the last saved checkpoint will be lost.\n codecarbon_tracker_flush()\n\ndef _transpose_first_dim(t, num_splits, num_splits_first, model):\n input_shape = t.size()\n # We use a self_attention module but the values extracted aren't\n # specific to self attention so should work for cross attention as well\n while hasattr(model, 'module'):\n model = model.module\n attention_module = model.language_model.encoder.layers[0].self_attention\n hidden_size_per_attention_head = attention_module.hidden_size_per_attention_head\n num_attention_heads_per_partition = attention_module.num_attention_heads_per_partition\n if num_splits_first:\n \"\"\"[num_splits * np * hn, h]\n -->(view) [num_splits, np, hn, h]\n -->(tranpose) [np, num_splits, hn, h]\n -->(view) [np * num_splits * hn, h] \"\"\"\n\n intermediate_shape = \\\n (num_splits, num_attention_heads_per_partition,\n hidden_size_per_attention_head) + input_shape[1:]\n\n t = t.view(*intermediate_shape)\n t = t.transpose(0, 1).contiguous()\n else:\n \"\"\"[np * hn * num_splits, h]\n -->(view) [np, hn, num_splits, h]\n -->(tranpose) [np, num_splits, hn, h]\n -->(view) [np * num_splits * hn, h] \"\"\"\n\n intermediate_shape = \\\n (num_attention_heads_per_partition,\n hidden_size_per_attention_head, num_splits) +\\\n input_shape[1:]\n\n t = t.view(*intermediate_shape)\n t = t.transpose(1, 2).contiguous()\n t = t.view(*input_shape)\n\n return t\n\ndef fix_query_key_value_ordering(model, checkpoint_version):\n \"\"\"Fix up query/key/value matrix ordering if checkpoint\n version is smaller than 2.0\n \"\"\"\n if checkpoint_version < 2.0:\n if isinstance(model, list):\n assert len(model)==1\n model = model[0]\n for name, param in model.named_parameters():\n if name.endswith(('.query_key_value.weight', '.query_key_value.bias')):\n if checkpoint_version == 0:\n fixed_param = _transpose_first_dim(param.data, 3, True, model)\n elif checkpoint_version == 1.0:\n fixed_param = _transpose_first_dim(param.data, 3, False, model)\n else:\n print_rank_0(f\"Invalid checkpoint version {checkpoint_version}.\")\n sys.exit()\n param.data.copy_(fixed_param)\n if name.endswith(('.key_value.weight', '.key_value.bias')):\n if checkpoint_version == 0:\n fixed_param = _transpose_first_dim(param.data, 2, True, model)\n elif checkpoint_version == 1.0:\n fixed_param = _transpose_first_dim(param.data, 2, False, model)\n else:\n print_rank_0(f\"Invalid checkpoint version {checkpoint_version}.\")\n sys.exit()\n param.data.copy_(fixed_param)\n print_rank_0(\" succesfully fixed query-key-values ordering for\"\n \" checkpoint version {}\".format(checkpoint_version))\n\ndef load_checkpoint(model, optimizer, lr_scheduler, load_arg='load', strict=True):\n \"\"\"Load a model checkpoint and return the iteration.\n strict (bool): whether to strictly enforce that the keys in\n :attr:`state_dict` of the checkpoint match the names of\n parameters and buffers in model.\n \"\"\"\n args = get_args()\n load_dir = getattr(args, load_arg)\n\n if args.deepspeed:\n loaded_dir, state_dict = model[0].load_checkpoint(load_dir)\n if loaded_dir is None:\n print_rank_0('WARNING: could not find the metadata file {} '.format(\n load_dir))\n print_rank_0(' will not load any checkpoints and will start from '\n 'random')\n return 0\n release = False\n else:\n model = utils.unwrap_model(model)\n\n # Read the tracker file and set the iteration.\n tracker_filename = get_checkpoint_tracker_filename(load_dir)\n\n # If no tracker file, return iretation zero.\n if not os.path.isfile(tracker_filename):\n print_rank_0('WARNING: could not find the metadata file {} '.format(\n tracker_filename))\n print_rank_0(' will not load any checkpoints and will start from '\n 'random')\n return 0\n\n # Otherwise, read the tracker file and either set the iteration or\n # mark it as a release checkpoint.\n iteration = 0\n release = False\n with open(tracker_filename, 'r') as f:\n metastring = f.read().strip()\n try:\n iteration = int(metastring)\n except ValueError:\n release = metastring == 'release'\n if not release:\n print_rank_0('ERROR: Invalid metadata file {}. Exiting'.format(\n tracker_filename))\n sys.exit()\n\n assert iteration > 0 or release, 'error parsing metadata file {}'.format(\n tracker_filename)\n\n # Checkpoint.\n checkpoint_name = get_checkpoint_name(load_dir, iteration, release)\n print_rank_0(f' loading checkpoint from {args.load} at iteration {iteration}')\n\n # Load the checkpoint.\n try:\n state_dict = torch.load(checkpoint_name, map_location='cpu')\n except ModuleNotFoundError:\n from megatron.fp16_deprecated import loss_scaler\n # For backward compatibility.\n print_rank_0(' > deserializing using the old code structure ...')\n sys.modules['fp16.loss_scaler'] = sys.modules[\n 'megatron.fp16_deprecated.loss_scaler']\n sys.modules['megatron.fp16.loss_scaler'] = sys.modules[\n 'megatron.fp16_deprecated.loss_scaler']\n state_dict = torch.load(checkpoint_name, map_location='cpu')\n sys.modules.pop('fp16.loss_scaler', None)\n sys.modules.pop('megatron.fp16.loss_scaler', None)\n except BaseException as e:\n print_rank_0('could not load the checkpoint')\n print_rank_0(e)\n sys.exit()\n\n # set checkpoint version\n set_checkpoint_version(state_dict.get('checkpoint_version', 0))\n\n # Set iteration.\n if args.finetune or release:\n iteration = 0\n else:\n try:\n iteration = state_dict['iteration']\n except KeyError:\n try: # Backward compatible with older checkpoints\n iteration = state_dict['total_iters']\n except KeyError:\n print_rank_0('A metadata file exists but unable to load '\n 'iteration from checkpoint {}, exiting'.format(\n checkpoint_name))\n sys.exit()\n\n # Check arguments.\n assert args.consumed_train_samples == 0\n assert args.consumed_valid_samples == 0\n if 'args' in state_dict:\n checkpoint_args = state_dict['args']\n check_checkpoint_args(checkpoint_args)\n args.consumed_train_samples = getattr(checkpoint_args,\n 'consumed_train_samples', 0)\n update_num_microbatches(consumed_samples=args.consumed_train_samples)\n args.consumed_valid_samples = getattr(checkpoint_args,\n 'consumed_valid_samples', 0)\n else:\n print_rank_0('could not find arguments in the checkpoint ...')\n\n # Model.\n if not args.deepspeed:\n if len(model) == 1:\n model[0].load_state_dict(state_dict['model'], strict=strict)\n else:\n for i in range(len(model)):\n mpu.set_virtual_pipeline_model_parallel_rank(i)\n model[i].load_state_dict(state_dict['model%d' % i], strict=strict)\n\n # Fix up query/key/value matrix ordering if needed\n checkpoint_version = get_checkpoint_version()\n print_rank_0(f' checkpoint version {checkpoint_version}')\n fix_query_key_value_ordering(model, checkpoint_version)\n\n # Optimizer.\n if not args.deepspeed:\n if not release and not args.finetune and not args.no_load_optim:\n try:\n if optimizer is not None:\n optimizer.load_state_dict(state_dict['optimizer'])\n if lr_scheduler is not None:\n lr_scheduler.load_state_dict(state_dict['lr_scheduler'])\n except KeyError:\n print_rank_0('Unable to load optimizer from checkpoint {}. '\n 'Specify --no-load-optim or --finetune to prevent '\n 'attempting to load the optimizer state, '\n 'exiting ...'.format(checkpoint_name))\n sys.exit()\n\n # rng states.\n if not release and not args.finetune and not args.no_load_rng:\n try:\n random.setstate(state_dict['random_rng_state'])\n np.random.set_state(state_dict['np_rng_state'])\n torch.set_rng_state(state_dict['torch_rng_state'])\n torch.cuda.set_rng_state(state_dict['cuda_rng_state'])\n # Check for empty states array\n if not state_dict['rng_tracker_states']:\n raise KeyError\n mpu.get_cuda_rng_tracker().set_states(\n state_dict['rng_tracker_states'])\n except KeyError:\n print_rank_0('Unable to load rng state from checkpoint {}. '\n 'Specify --no-load-rng or --finetune to prevent '\n 'attempting to load the rng state, '\n 'exiting ...'.format(checkpoint_name))\n sys.exit()\n\n # Some utilities want to load a checkpoint without distributed being initialized\n if torch.distributed.is_initialized():\n torch.distributed.barrier()\n\n print_rank_0(f' successfully loaded checkpoint from {args.load} '\n f'at iteration {iteration}')\n\n return iteration\n\n\ndef load_biencoder_checkpoint(model, only_query_model=False,\n only_context_model=False, custom_load_path=None):\n \"\"\"\n selectively load retrieval models for indexing/retrieving\n from saved checkpoints\n \"\"\"\n\n args = get_args()\n\n model = utils.unwrap_model(model)\n\n load_path = custom_load_path if custom_load_path is not None else args.load\n\n tracker_filename = get_checkpoint_tracker_filename(load_path)\n with open(tracker_filename, 'r') as f:\n iteration = int(f.read().strip())\n\n checkpoint_name = get_checkpoint_name(load_path, iteration, False)\n if mpu.get_data_parallel_rank() == 0:\n print('global rank {} is loading checkpoint {}'.format(\n torch.distributed.get_rank(), checkpoint_name))\n\n state_dict = torch.load(checkpoint_name, map_location='cpu')\n ret_state_dict = state_dict['model']\n\n if only_query_model:\n ret_state_dict.pop('context_model')\n if only_context_model:\n ret_state_dict.pop('query_model')\n\n assert len(model) == 1\n model[0].load_state_dict(ret_state_dict)\n torch.distributed.barrier()\n\n if mpu.get_data_parallel_rank() == 0:\n print(' successfully loaded {}'.format(checkpoint_name))\n\n return model\n"
] | [
[
"numpy.random.set_state",
"torch.get_rng_state",
"torch.load",
"torch.distributed.get_rank",
"numpy.random.get_state",
"torch.cuda.set_rng_state",
"torch.cuda.get_rng_state",
"torch.save",
"torch.distributed.is_initialized",
"torch.distributed.barrier",
"torch.set_rng_state"
]
] |
tycallen/fast-reid | [
"66683fa95bc7d7222659e8db3ac04e5b8e366190"
] | [
"fastreid/layers/cos_softmax.py"
] | [
"# encoding: utf-8\n\"\"\"\n@author: xingyu liao\n@contact: [email protected]\n\"\"\"\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.nn import Parameter\n\n\nclass CosSoftmax(nn.Module):\n r\"\"\"Implement of large margin cosine distance:\n Args:\n in_feat: size of each input sample\n num_classes: size of each output sample\n \"\"\"\n\n def __init__(self, cfg, in_feat, num_classes):\n super().__init__()\n self.in_features = in_feat\n self._num_classes = num_classes\n self.s = cfg.MODEL.HEADS.SCALE\n self.m = cfg.MODEL.HEADS.MARGIN\n self.weight = Parameter(torch.Tensor(num_classes, in_feat))\n nn.init.xavier_uniform_(self.weight)\n\n def forward(self, features, targets):\n # --------------------------- cos(theta) & phi(theta) ---------------------------\n cosine = F.linear(F.normalize(features), F.normalize(self.weight))\n phi = cosine - self.m\n # --------------------------- convert label to one-hot ---------------------------\n targets = F.one_hot(targets, num_classes=self._num_classes)\n output = (targets * phi) + ((1.0 - targets) * cosine)\n output *= self.s\n\n return output\n\n def extra_repr(self):\n return 'in_features={}, num_classes={}, scale={}, margin={}'.format(\n self.in_feat, self._num_classes, self.s, self.m\n )\n"
] | [
[
"torch.nn.functional.normalize",
"torch.nn.init.xavier_uniform_",
"torch.nn.functional.one_hot",
"torch.Tensor"
]
] |
JinYAnGHe/openvino_training_extensions | [
"a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee",
"a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee"
] | [
"pytorch_toolkit/face_recognition/model/blocks/mobilenet_v2_blocks.py",
"pytorch_toolkit/face_recognition/utils/read_tfboard.py"
] | [
"\"\"\"\n Copyright (c) 2018 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport torch.nn as nn\n\nfrom model.blocks.shared_blocks import SELayer\n\n\nclass InvertedResidual(nn.Module):\n \"\"\"Implementation of the modified Inverted residual block\"\"\"\n def __init__(self, in_channels, out_channels, stride, expand_ratio, outp_size=None):\n super(InvertedResidual, self).__init__()\n self.stride = stride\n assert stride in [1, 2]\n\n self.use_res_connect = self.stride == 1 and in_channels == out_channels\n\n self.inv_block = nn.Sequential(\n nn.Conv2d(in_channels, in_channels * expand_ratio, 1, 1, 0, bias=False),\n nn.BatchNorm2d(in_channels * expand_ratio),\n nn.PReLU(),\n\n nn.Conv2d(in_channels * expand_ratio, in_channels * expand_ratio, 3, stride, 1,\n groups=in_channels * expand_ratio, bias=False),\n nn.BatchNorm2d(in_channels * expand_ratio),\n nn.PReLU(),\n\n nn.Conv2d(in_channels * expand_ratio, out_channels, 1, 1, 0, bias=False),\n nn.BatchNorm2d(out_channels),\n # SELayer(out_channels, 8, nn.PReLU, outp_size)\n )\n\n def forward(self, x):\n if self.use_res_connect:\n return x + self.inv_block(x)\n\n return self.inv_block(x)\n",
"from tensorboard.backend.event_processing import event_accumulator\n\nea=event_accumulator.EventAccumulator('../logs_landm/2019_08_04_14_38_LandNet-NoSE/events.out.tfevents.1564900704.Workstation')\nea.Reload()\n# print(ea.scalars.Keys())\n# ea1=event_accumulator.EventAccumulator('./dsm_aug/eval_0/events.out.tfevents.1562407251.Workstation')\n# ea1.Reload()\n# print(ea.scalars.Keys())\n\n# print([(i.step, i.value) for i in class_loss])\nimport matplotlib.pyplot as plt\nfig, axes = plt.subplots(3, 1)\nloss=ea.scalars.Items('Loss/train_loss')\naxes[0].plot([i.step for i in loss],[i.value for i in loss],label='loss', linewidth=1)\naxes[0].set_xlim(0)\naxes[0].set_ylim(0, 0.2)\naxes[0].set_yticks([0.02, 0.1, 0.2])\naxes[0].grid(True, linestyle='-.')\n\navg_error=ea.scalars.Items('Quality/Avg_error')\naxes[1].plot([i.step for i in avg_error],[i.value for i in avg_error],label='avg_error', linewidth=1, color='mediumblue')\naxes[1].set_xlim(0)\naxes[1].set_ylim(0, 0.4)\naxes[1].set_yticks([0.2, 0.04, 0.4])\naxes[1].grid(True, linestyle='-.')\n\nfr=ea.scalars.Items('Quality/Failure_rate')\naxes[2].plot([i.step for i in fr],[i.value for i in fr],label='failure_rate', linewidth=1, color='c')\naxes[2].set_xlim(0)\naxes[2].set_yticks([1,0.5,0.04])\naxes[2].grid(True, linestyle='-.')\naxes[0].set_ylabel(\"Loss\")\naxes[1].set_ylabel(\"NME\")\naxes[2].set_ylabel(\"Failure rate\")\naxes[2].set_xlabel(\"step\")\nplt.show()"
] | [
[
"torch.nn.PReLU",
"torch.nn.BatchNorm2d",
"torch.nn.Conv2d"
],
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] |
jaipradeesh/sagemaker-python-sdk | [
"ef842108ccaa324d2be15978aa678926dd1c21ea"
] | [
"tests/unit/test_amazon_estimator.py"
] | [
"# Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nfrom __future__ import absolute_import\n\nimport numpy as np\nimport pytest\nfrom mock import Mock, patch, call\n\n# Use PCA as a test implementation of AmazonAlgorithmEstimator\nfrom sagemaker.amazon.pca import PCA\nfrom sagemaker.amazon.amazon_estimator import upload_numpy_to_s3_shards, _build_shards, registry, get_image_uri\n\nCOMMON_ARGS = {'role': 'myrole', 'train_instance_count': 1, 'train_instance_type': 'ml.c4.xlarge'}\n\nREGION = \"us-west-2\"\nBUCKET_NAME = \"Some-Bucket\"\nTIMESTAMP = '2017-11-06-14:14:15.671'\n\n\[email protected]()\ndef sagemaker_session():\n boto_mock = Mock(name='boto_session', region_name=REGION)\n sms = Mock(name='sagemaker_session', boto_session=boto_mock,\n region_name=REGION, config=None, local_mode=False)\n sms.boto_region_name = REGION\n sms.default_bucket = Mock(name='default_bucket', return_value=BUCKET_NAME)\n returned_job_description = {'AlgorithmSpecification': {'TrainingInputMode': 'File',\n 'TrainingImage': registry(\"us-west-2\") + \"/pca:1\"},\n 'ModelArtifacts': {'S3ModelArtifacts': \"s3://some-bucket/model.tar.gz\"},\n 'HyperParameters':\n {'sagemaker_submit_directory': '\"s3://some/sourcedir.tar.gz\"',\n 'checkpoint_path': '\"s3://other/1508872349\"',\n 'sagemaker_program': '\"iris-dnn-classifier.py\"',\n 'sagemaker_enable_cloudwatch_metrics': 'false',\n 'sagemaker_container_log_level': '\"logging.INFO\"',\n 'sagemaker_job_name': '\"neo\"',\n 'training_steps': '100'},\n 'RoleArn': 'arn:aws:iam::366:role/IMRole',\n 'ResourceConfig':\n {'VolumeSizeInGB': 30,\n 'InstanceCount': 1,\n 'InstanceType': 'ml.c4.xlarge'},\n 'StoppingCondition': {'MaxRuntimeInSeconds': 24 * 60 * 60},\n 'TrainingJobName': 'neo',\n 'TrainingJobStatus': 'Completed',\n 'OutputDataConfig': {'KmsKeyId': '',\n 'S3OutputPath': 's3://place/output/neo'},\n 'TrainingJobOutput': {'S3TrainingJobOutput': 's3://here/output.tar.gz'}}\n sms.sagemaker_client.describe_training_job = Mock(name='describe_training_job',\n return_value=returned_job_description)\n return sms\n\n\ndef test_gov_ecr_uri():\n assert get_image_uri('us-gov-west-1', 'kmeans', 'latest') == \\\n '226302683700.dkr.ecr.us-gov-west-1.amazonaws.com/kmeans:latest'\n\n assert get_image_uri('us-iso-east-1', 'kmeans', 'latest') == \\\n '490574956308.dkr.ecr.us-iso-east-1.c2s.ic.gov/kmeans:latest'\n\n\ndef test_init(sagemaker_session):\n pca = PCA(num_components=55, sagemaker_session=sagemaker_session, **COMMON_ARGS)\n assert pca.num_components == 55\n\n\ndef test_init_all_pca_hyperparameters(sagemaker_session):\n pca = PCA(num_components=55, algorithm_mode='randomized',\n subtract_mean=True, extra_components=33, sagemaker_session=sagemaker_session,\n **COMMON_ARGS)\n assert pca.num_components == 55\n assert pca.algorithm_mode == 'randomized'\n assert pca.extra_components == 33\n\n\ndef test_init_estimator_args(sagemaker_session):\n pca = PCA(num_components=1, train_max_run=1234, sagemaker_session=sagemaker_session,\n data_location='s3://some-bucket/some-key/', **COMMON_ARGS)\n assert pca.train_instance_type == COMMON_ARGS['train_instance_type']\n assert pca.train_instance_count == COMMON_ARGS['train_instance_count']\n assert pca.role == COMMON_ARGS['role']\n assert pca.train_max_run == 1234\n assert pca.data_location == 's3://some-bucket/some-key/'\n\n\ndef test_data_location_validation(sagemaker_session):\n pca = PCA(num_components=2, sagemaker_session=sagemaker_session, **COMMON_ARGS)\n with pytest.raises(ValueError):\n pca.data_location = \"nots3://abcd/efgh\"\n\n\ndef test_data_location_does_not_call_default_bucket(sagemaker_session):\n data_location = \"s3://my-bucket/path/\"\n pca = PCA(num_components=2, sagemaker_session=sagemaker_session, data_location=data_location, **COMMON_ARGS)\n assert pca.data_location == data_location\n assert not sagemaker_session.default_bucket.called\n\n\ndef test_prepare_for_training(sagemaker_session):\n pca = PCA(num_components=55, sagemaker_session=sagemaker_session, **COMMON_ARGS)\n\n train = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 8.0], [44.0, 55.0, 66.0]]\n labels = [99, 85, 87, 2]\n records = pca.record_set(np.array(train), np.array(labels))\n\n pca._prepare_for_training(records, mini_batch_size=1)\n assert pca.feature_dim == 3\n assert pca.mini_batch_size == 1\n\n\ndef test_prepare_for_training_list(sagemaker_session):\n pca = PCA(num_components=55, sagemaker_session=sagemaker_session, **COMMON_ARGS)\n\n train = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 8.0], [44.0, 55.0, 66.0]]\n labels = [99, 85, 87, 2]\n records = [pca.record_set(np.array(train), np.array(labels))]\n\n pca._prepare_for_training(records, mini_batch_size=1)\n assert pca.feature_dim == 3\n assert pca.mini_batch_size == 1\n\n\ndef test_prepare_for_training_list_no_train_channel(sagemaker_session):\n pca = PCA(num_components=55, sagemaker_session=sagemaker_session, **COMMON_ARGS)\n\n train = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 8.0], [44.0, 55.0, 66.0]]\n labels = [99, 85, 87, 2]\n records = [pca.record_set(np.array(train), np.array(labels), 'test')]\n\n with pytest.raises(ValueError) as ex:\n pca._prepare_for_training(records, mini_batch_size=1)\n\n assert 'Must provide train channel.' in str(ex)\n\n\n@patch('time.strftime', return_value=TIMESTAMP)\ndef test_fit_ndarray(time, sagemaker_session):\n mock_s3 = Mock()\n mock_object = Mock()\n mock_s3.Object = Mock(return_value=mock_object)\n sagemaker_session.boto_session.resource = Mock(return_value=mock_s3)\n kwargs = dict(COMMON_ARGS)\n kwargs['train_instance_count'] = 3\n pca = PCA(num_components=55, sagemaker_session=sagemaker_session,\n data_location='s3://{}/key-prefix/'.format(BUCKET_NAME), **kwargs)\n train = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 8.0], [44.0, 55.0, 66.0]]\n labels = [99, 85, 87, 2]\n pca.fit(pca.record_set(np.array(train), np.array(labels)))\n mock_s3.Object.assert_any_call(\n BUCKET_NAME, 'key-prefix/PCA-2017-11-06-14:14:15.671/matrix_0.pbr'.format(TIMESTAMP))\n mock_s3.Object.assert_any_call(\n BUCKET_NAME, 'key-prefix/PCA-2017-11-06-14:14:15.671/matrix_1.pbr'.format(TIMESTAMP))\n mock_s3.Object.assert_any_call(\n BUCKET_NAME, 'key-prefix/PCA-2017-11-06-14:14:15.671/matrix_2.pbr'.format(TIMESTAMP))\n mock_s3.Object.assert_any_call(\n BUCKET_NAME, 'key-prefix/PCA-2017-11-06-14:14:15.671/.amazon.manifest'.format(TIMESTAMP))\n\n assert mock_object.put.call_count == 4\n\n\ndef test_build_shards():\n array = np.array([1, 2, 3, 4])\n shards = _build_shards(4, array)\n assert shards == [np.array([1]), np.array([2]), np.array([3]), np.array([4])]\n\n shards = _build_shards(3, array)\n for out, expected in zip(shards, map(np.array, [[1], [2], [3, 4]])):\n assert np.array_equal(out, expected)\n\n with pytest.raises(ValueError):\n shards = _build_shards(5, array)\n\n\ndef test_upload_numpy_to_s3_shards():\n mock_s3 = Mock()\n mock_object = Mock()\n mock_s3.Object = Mock(return_value=mock_object)\n array = np.array([[j for j in range(10)] for i in range(10)])\n labels = np.array([i for i in range(10)])\n upload_numpy_to_s3_shards(3, mock_s3, BUCKET_NAME, \"key-prefix\", array, labels)\n mock_s3.Object.assert_has_calls([call(BUCKET_NAME, 'key-prefix/matrix_0.pbr')])\n mock_s3.Object.assert_has_calls([call(BUCKET_NAME, 'key-prefix/matrix_1.pbr')])\n mock_s3.Object.assert_has_calls([call(BUCKET_NAME, 'key-prefix/matrix_2.pbr')])\n"
] | [
[
"numpy.array",
"numpy.array_equal"
]
] |
UmaTaru/run | [
"be29e4d41a4de3dee27cd6796801bfe51382d294"
] | [
"torchMoji/torchmoji/model_def.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\" Model definition functions and weight loading.\n\"\"\"\n\nfrom __future__ import print_function, division, unicode_literals\n\nfrom os.path import exists\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, PackedSequence\n\nfrom torchMoji.torchmoji.lstm import LSTMHardSigmoid\nfrom torchMoji.torchmoji.attlayer import Attention\nfrom torchMoji.torchmoji.global_variables import NB_TOKENS, NB_EMOJI_CLASSES\n\n\ndef torchmoji_feature_encoding(weight_path, return_attention=False):\n \"\"\" Loads the pretrained torchMoji model for extracting features\n from the penultimate feature layer. In this way, it transforms\n the text into its emotional encoding.\n\n # Arguments:\n weight_path: Path to model weights to be loaded.\n return_attention: If true, output will include weight of each input token\n used for the prediction\n\n # Returns:\n Pretrained model for encoding text into feature vectors.\n \"\"\"\n\n model = TorchMoji(nb_classes=None,\n nb_tokens=NB_TOKENS,\n feature_output=True,\n return_attention=return_attention)\n load_specific_weights(model, weight_path, exclude_names=['output_layer'])\n return model\n\n\ndef torchmoji_emojis(weight_path, return_attention=False):\n \"\"\" Loads the pretrained torchMoji model for extracting features\n from the penultimate feature layer. In this way, it transforms\n the text into its emotional encoding.\n\n # Arguments:\n weight_path: Path to model weights to be loaded.\n return_attention: If true, output will include weight of each input token\n used for the prediction\n\n # Returns:\n Pretrained model for encoding text into feature vectors.\n \"\"\"\n\n model = TorchMoji(nb_classes=NB_EMOJI_CLASSES,\n nb_tokens=NB_TOKENS,\n return_attention=return_attention)\n model.load_state_dict(torch.load(weight_path))\n return model\n\n\ndef torchmoji_transfer(nb_classes, weight_path=None, extend_embedding=0,\n embed_dropout_rate=0.1, final_dropout_rate=0.5):\n \"\"\" Loads the pretrained torchMoji model for finetuning/transfer learning.\n Does not load weights for the softmax layer.\n\n Note that if you are planning to use class average F1 for evaluation,\n nb_classes should be set to 2 instead of the actual number of classes\n in the dataset, since binary classification will be performed on each\n class individually.\n\n Note that for the 'new' method, weight_path should be left as None.\n\n # Arguments:\n nb_classes: Number of classes in the dataset.\n weight_path: Path to model weights to be loaded.\n extend_embedding: Number of tokens that have been added to the\n vocabulary on top of NB_TOKENS. If this number is larger than 0,\n the embedding layer's dimensions are adjusted accordingly, with the\n additional weights being set to random values.\n embed_dropout_rate: Dropout rate for the embedding layer.\n final_dropout_rate: Dropout rate for the final Softmax layer.\n\n # Returns:\n Model with the given parameters.\n \"\"\"\n\n model = TorchMoji(nb_classes=nb_classes,\n nb_tokens=NB_TOKENS + extend_embedding,\n embed_dropout_rate=embed_dropout_rate,\n final_dropout_rate=final_dropout_rate,\n output_logits=True)\n if weight_path is not None:\n load_specific_weights(model, weight_path,\n exclude_names=['output_layer'],\n extend_embedding=extend_embedding)\n return model\n\n\nclass TorchMoji(nn.Module):\n def __init__(self, nb_classes, nb_tokens, feature_output=False, output_logits=False,\n embed_dropout_rate=0, final_dropout_rate=0, return_attention=False):\n \"\"\"\n torchMoji model.\n IMPORTANT: The model is loaded in evaluation mode by default (self.eval())\n\n # Arguments:\n nb_classes: Number of classes in the dataset.\n nb_tokens: Number of tokens in the dataset (i.e. vocabulary size).\n feature_output: If True the model returns the penultimate\n feature vector rather than Softmax probabilities\n (defaults to False).\n output_logits: If True the model returns logits rather than probabilities\n (defaults to False).\n embed_dropout_rate: Dropout rate for the embedding layer.\n final_dropout_rate: Dropout rate for the final Softmax layer.\n return_attention: If True the model also returns attention weights over the sentence\n (defaults to False).\n \"\"\"\n super(TorchMoji, self).__init__()\n\n embedding_dim = 256\n hidden_size = 512\n attention_size = 4 * hidden_size + embedding_dim\n\n self.feature_output = feature_output\n self.embed_dropout_rate = embed_dropout_rate\n self.final_dropout_rate = final_dropout_rate\n self.return_attention = return_attention\n self.hidden_size = hidden_size\n self.output_logits = output_logits\n self.nb_classes = nb_classes\n\n self.add_module('embed', nn.Embedding(nb_tokens, embedding_dim))\n # dropout2D: embedding channels are dropped out instead of words\n # many exampels in the datasets contain few words that losing one or more words can alter the emotions completely\n self.add_module('embed_dropout', nn.Dropout2d(embed_dropout_rate))\n self.add_module('lstm_0', LSTMHardSigmoid(embedding_dim, hidden_size, batch_first=True, bidirectional=True))\n self.add_module('lstm_1', LSTMHardSigmoid(hidden_size*2, hidden_size, batch_first=True, bidirectional=True))\n self.add_module('attention_layer', Attention(attention_size=attention_size, return_attention=return_attention))\n if not feature_output:\n self.add_module('final_dropout', nn.Dropout(final_dropout_rate))\n if output_logits:\n self.add_module('output_layer', nn.Sequential(nn.Linear(attention_size, nb_classes if self.nb_classes > 2 else 1)))\n else:\n self.add_module('output_layer', nn.Sequential(nn.Linear(attention_size, nb_classes if self.nb_classes > 2 else 1),\n nn.Softmax() if self.nb_classes > 2 else nn.Sigmoid()))\n self.init_weights()\n # Put model in evaluation mode by default\n self.eval()\n\n def init_weights(self):\n \"\"\"\n Here we reproduce Keras default initialization weights for consistency with Keras version\n \"\"\"\n ih = (param.data for name, param in self.named_parameters() if 'weight_ih' in name)\n hh = (param.data for name, param in self.named_parameters() if 'weight_hh' in name)\n b = (param.data for name, param in self.named_parameters() if 'bias' in name)\n nn.init.uniform(self.embed.weight.data, a=-0.5, b=0.5)\n for t in ih:\n nn.init.xavier_uniform(t)\n for t in hh:\n nn.init.orthogonal(t)\n for t in b:\n nn.init.constant(t, 0)\n if not self.feature_output:\n nn.init.xavier_uniform(self.output_layer[0].weight.data)\n\n def forward(self, input_seqs):\n \"\"\" Forward pass.\n\n # Arguments:\n input_seqs: Can be one of Numpy array, Torch.LongTensor, Torch.Variable, Torch.PackedSequence.\n\n # Return:\n Same format as input format (except for PackedSequence returned as Variable).\n \"\"\"\n # Check if we have Torch.LongTensor inputs or not Torch.Variable (assume Numpy array in this case), take note to return same format\n return_numpy = False\n return_tensor = False\n if isinstance(input_seqs, (torch.LongTensor, torch.cuda.LongTensor)):\n input_seqs = Variable(input_seqs)\n return_tensor = True\n elif not isinstance(input_seqs, Variable):\n input_seqs = Variable(torch.from_numpy(input_seqs.astype('int64')).long())\n return_numpy = True\n\n # If we don't have a packed inputs, let's pack it\n reorder_output = False\n if not isinstance(input_seqs, PackedSequence):\n ho = self.lstm_0.weight_hh_l0.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()\n co = self.lstm_0.weight_hh_l0.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()\n\n # Reorder batch by sequence length\n input_lengths = torch.LongTensor([torch.max(input_seqs[i, :].data.nonzero()) + 1 for i in range(input_seqs.size()[0])])\n input_lengths, perm_idx = input_lengths.sort(0, descending=True)\n input_seqs = input_seqs[perm_idx][:, :input_lengths.max()]\n\n # Pack sequence and work on data tensor to reduce embeddings/dropout computations\n packed_input = pack_padded_sequence(input_seqs, input_lengths.cpu().numpy(), batch_first=True)\n reorder_output = True\n else:\n ho = self.lstm_0.weight_hh_l0.data.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()\n co = self.lstm_0.weight_hh_l0.data.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()\n input_lengths = input_seqs.batch_sizes\n packed_input = input_seqs\n\n hidden = (Variable(ho, requires_grad=False), Variable(co, requires_grad=False))\n\n # Embed with an activation function to bound the values of the embeddings\n x = self.embed(packed_input.data)\n x = nn.Tanh()(x)\n\n # pyTorch 2D dropout2d operate on axis 1 which is fine for us\n x = self.embed_dropout(x)\n\n # Update packed sequence data for RNN\n packed_input = PackedSequence(x, packed_input.batch_sizes)\n\n # skip-connection from embedding to output eases gradient-flow and allows access to lower-level features\n # ordering of the way the merge is done is important for consistency with the pretrained model\n lstm_0_output, _ = self.lstm_0(packed_input, hidden)\n lstm_1_output, _ = self.lstm_1(lstm_0_output, hidden)\n\n # Update packed sequence data for attention layer\n packed_input = PackedSequence(torch.cat((lstm_1_output.data,\n lstm_0_output.data,\n packed_input.data), dim=1),\n packed_input.batch_sizes)\n\n input_seqs, _ = pad_packed_sequence(packed_input, batch_first=True)\n\n x, att_weights = self.attention_layer(input_seqs, input_lengths)\n\n # output class probabilities or penultimate feature vector\n if not self.feature_output:\n x = self.final_dropout(x)\n outputs = self.output_layer(x)\n else:\n outputs = x\n\n # Reorder output if needed\n if reorder_output:\n reorered = Variable(outputs.data.new(outputs.size()))\n reorered[perm_idx] = outputs\n outputs = reorered\n\n # Adapt return format if needed\n if return_tensor:\n outputs = outputs.data\n if return_numpy:\n outputs = outputs.data.numpy()\n\n if self.return_attention:\n return outputs, att_weights\n else:\n return outputs\n\n\ndef load_specific_weights(model, weight_path, exclude_names=[], extend_embedding=0, verbose=True):\n \"\"\" Loads model weights from the given file path, excluding any\n given layers.\n\n # Arguments:\n model: Model whose weights should be loaded.\n weight_path: Path to file containing model weights.\n exclude_names: List of layer names whose weights should not be loaded.\n extend_embedding: Number of new words being added to vocabulary.\n verbose: Verbosity flag.\n\n # Raises:\n ValueError if the file at weight_path does not exist.\n \"\"\"\n if not exists(weight_path):\n raise ValueError('ERROR (load_weights): The weights file at {} does '\n 'not exist. Refer to the README for instructions.'\n .format(weight_path))\n\n if extend_embedding and 'embed' in exclude_names:\n raise ValueError('ERROR (load_weights): Cannot extend a vocabulary '\n 'without loading the embedding weights.')\n\n # Copy only weights from the temporary model that are wanted\n # for the specific task (e.g. the Softmax is often ignored)\n weights = torch.load(weight_path)\n for key, weight in weights.items():\n if any(excluded in key for excluded in exclude_names):\n if verbose:\n print('Ignoring weights for {}'.format(key))\n continue\n\n try:\n model_w = model.state_dict()[key]\n except KeyError:\n raise KeyError(\"Weights had parameters {},\".format(key)\n + \" but could not find this parameters in model.\")\n\n if verbose:\n print('Loading weights for {}'.format(key))\n\n # extend embedding layer to allow new randomly initialized words\n # if requested. Otherwise, just load the weights for the layer.\n if 'embed' in key and extend_embedding > 0:\n weight = torch.cat((weight, model_w[NB_TOKENS:, :]), dim=0)\n if verbose:\n print('Extended vocabulary for embedding layer ' +\n 'from {} to {} tokens.'.format(\n NB_TOKENS, NB_TOKENS + extend_embedding))\n try:\n model_w.copy_(weight)\n except:\n print('While copying the weigths named {}, whose dimensions in the model are'\n ' {} and whose dimensions in the saved file are {}, ...'.format(\n key, model_w.size(), weight.size()))\n raise\n"
] | [
[
"torch.nn.init.uniform",
"torch.nn.init.orthogonal",
"torch.nn.init.xavier_uniform",
"torch.nn.Linear",
"torch.load",
"torch.nn.Dropout2d",
"torch.nn.init.constant",
"torch.nn.Dropout",
"torch.nn.Softmax",
"torch.nn.utils.rnn.PackedSequence",
"torch.autograd.Variable",
"torch.nn.Embedding",
"torch.nn.Tanh",
"torch.nn.Sigmoid",
"torch.cat",
"torch.nn.utils.rnn.pad_packed_sequence"
]
] |
madokast/cctpy | [
"b02c64220ea533a4fc9cad0b882d1be6edadf1c0"
] | [
"cctpy6_r100_wn_change/run.py"
] | [
"# from visdom import Visdom\n\nfrom cctpy import *\nfrom ccpty_cuda import *\nimport time\nimport numpy as np\n\nVIZ_PORT = 8098\n\nga32 = GPU_ACCELERATOR()\n\nmomentum_dispersions = [-0.05, -0.025, 0.0, 0.025, 0.05]\nparticle_number_per_plane_per_dp = 12\n\nparticle_number_per_gantry = len(momentum_dispersions) * particle_number_per_plane_per_dp * 2\n\ndefault_gantry = HUST_SC_GANTRY(\n DL1=0.9007765,\n GAP1=0.4301517,\n GAP2=0.370816,\n qs1_length=0.2340128,\n qs1_aperture_radius=60 * MM,\n qs1_gradient=0.0,\n qs1_second_gradient=0.0,\n qs2_length=0.200139,\n qs2_aperture_radius=60 * MM,\n qs2_gradient=0.0,\n qs2_second_gradient=0.0,\n\n DL2=2.35011,\n GAP3=0.43188,\n qs3_length=0.24379,\n)\ndefault_beamline = default_gantry.create_beamline()\nfirst_bending_length = default_gantry.first_bending_part_length()\nrun_distance = default_beamline.get_length() - first_bending_length\n\nsecond_bending_part_start_point = default_beamline.trajectory.point_at(first_bending_length)\nsecond_bending_part_start_direct = default_beamline.trajectory.direct_at(first_bending_length)\n\nip = ParticleFactory.create_proton_along(\n trajectory=default_beamline.trajectory,\n s=first_bending_length,\n kinetic_MeV=215\n)\n\nip_ran = ParticleFactory.create_proton_along(\n trajectory=default_beamline.trajectory,\n s=default_beamline.get_length(),\n kinetic_MeV=215\n)\n\npps = []\nfor dp in momentum_dispersions:\n pps.extend(PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_xxp_plane(\n xMax=3.5 * MM, xpMax=7.5 * MM, delta=dp, number=particle_number_per_plane_per_dp\n ))\n pps.extend(PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_yyp_plane(\n yMax=3.5 * MM, ypMax=7.5 * MM, delta=dp, number=particle_number_per_plane_per_dp\n ))\n\ntimes = 1\n\nparams_and_objs = []\n\n\ndef run(params: np.ndarray):\n global times\n start_time = time.time()\n\n gantry_number = params.shape[0]\n\n print(f\"机架数目{gantry_number}\")\n\n beamlines = create_beamlines(gantry_number, params)\n\n print(f\"制作机架用时{time.time() - start_time}\")\n ps = ParticleFactory.create_from_phase_space_particles(\n ip, ip.get_natural_coordinate_system(), pps\n )\n\n print(f\"粒子总数{len(ps) * gantry_number}\")\n\n ps_ran_list = ga32.track_multi_particle_beamlime_for_magnet_with_single_qs(\n bls=beamlines,\n ps=ps,\n distance=run_distance,\n footstep=20 * MM\n )\n\n statistic_x = BaseUtils.Statistic()\n statistic_y = BaseUtils.Statistic()\n statistic_beam_sizes = BaseUtils.Statistic()\n objs: List[List[float]] = []\n for gid in range(gantry_number): # ~120\n ps_ran = ps_ran_list[gid]\n pps_ran = PhaseSpaceParticle.create_from_running_particles(\n ip_ran, ip_ran.get_natural_coordinate_system(), ps_ran\n )\n obj: List[float] = []\n # 对于所有粒子\n for pid in range(0, len(pps_ran), particle_number_per_plane_per_dp):\n # 每 particle_number_per_plane_per_dp 个一组\n for pp in pps_ran[pid:pid + particle_number_per_plane_per_dp]:\n # 统计 x 和 y\n statistic_x.add(pp.x / MM)\n statistic_y.add(pp.y / MM) # mm\n # 分别求束斑\n beam_size_x = (statistic_x.max() - statistic_x.min()) / 2\n beam_size_y = (statistic_y.max() - statistic_y.min()) / 2\n statistic_x.clear()\n statistic_y.clear()\n\n # 只有 x 和 y 中大的我需要\n beam_size = max(beam_size_x, beam_size_y)\n statistic_beam_sizes.add(beam_size) # 用于统计均值\n obj.append(beam_size) # 用于记录每次束斑\n\n # 均值\n beam_size_avg = statistic_beam_sizes.average()\n statistic_beam_sizes.clear()\n objs.append([abs(bs - beam_size_avg) for bs in obj] + [beam_size_avg])\n\n objs_np = np.array(objs)\n\n for gid in range(gantry_number):\n param = params[gid]\n obj = objs_np[gid]\n params_and_objs.append(np.concatenate((param, obj)))\n\n np.savetxt(fname='./record/' + str(times) + '.txt', X=params_and_objs)\n try:\n # draw_viz(params_and_objs)\n pass\n except Exception as e:\n print(e)\n pass\n times += 1\n\n print(f\"用时{time.time() - start_time} s\")\n\n return objs_np\n\n\ndef create_beamlines(gantry_number, params):\n return BaseUtils.submit_process_task(\n task=create_beamline,\n param_list=[\n [params[i], second_bending_part_start_point, second_bending_part_start_direct] for i in range(gantry_number)\n ]\n )\n\n\ndef create_beamline(param, second_bending_part_start_point, second_bending_part_start_direct) -> Beamline:\n qs3_g = param[0]\n qs3_sg = param[1]\n\n dicct_tilt_1 = param[2]\n dicct_tilt_2 = param[3]\n dicct_tilt_3 = param[4]\n\n agcct_tilt_0 = param[5]\n agcct_tilt_2 = param[6]\n agcct_tilt_3 = param[7]\n\n dicct_current = param[8]\n agcct_current = param[9]\n\n agcct3_wn = int(param[10])\n agcct4_wn = int(param[11])\n agcct5_wn = int(param[12])\n\n return HUST_SC_GANTRY(\n qs3_gradient=qs3_g,\n qs3_second_gradient=qs3_sg,\n dicct345_tilt_angles=[30, dicct_tilt_1, dicct_tilt_2, dicct_tilt_3],\n agcct345_tilt_angles=[agcct_tilt_0, 30, agcct_tilt_2, agcct_tilt_3],\n dicct345_current=dicct_current,\n agcct345_current=agcct_current,\n agcct3_winding_number=agcct3_wn,\n agcct4_winding_number=agcct4_wn,\n agcct5_winding_number=agcct5_wn,\n agcct3_bending_angle=-67.5 * (agcct3_wn / (agcct3_wn + agcct4_wn + agcct5_wn)),\n agcct4_bending_angle=-67.5 * (agcct4_wn / (agcct3_wn + agcct4_wn + agcct5_wn)),\n agcct5_bending_angle=-67.5 * (agcct5_wn / (agcct3_wn + agcct4_wn + agcct5_wn)),\n\n DL1=0.9007765,\n GAP1=0.4301517,\n GAP2=0.370816,\n qs1_length=0.2340128,\n qs1_aperture_radius=60 * MM,\n qs1_gradient=0.0,\n qs1_second_gradient=0.0,\n qs2_length=0.200139,\n qs2_aperture_radius=60 * MM,\n qs2_gradient=0.0,\n qs2_second_gradient=0.0,\n\n DL2=2.35011,\n GAP3=0.43188,\n qs3_length=0.24379,\n\n agcct345_inner_small_r=92.5 * MM + 17.1 * MM,# 92.5\n agcct345_outer_small_r=108.5 * MM + 17.1 * MM, # 83+15\n dicct345_inner_small_r=124.5 * MM + 17.1 * MM, # 83+30+1\n dicct345_outer_small_r=140.5 * MM + 17.1 * MM, # 83+45 +2\n ).create_second_bending_part(\n start_point=second_bending_part_start_point,\n start_driect=second_bending_part_start_direct\n )\n\n\nwins = [] # 画图窗口\n\n\ndef draw_viz(params_and_objs):\n viz = Visdom(server='Http://127.0.0.1', port=VIZ_PORT)\n assert viz.check_connection()\n\n data = np.array(params_and_objs)\n\n x = np.array(list(range(data.shape[0])))\n\n xd = np.concatenate((x.reshape((-1, 1)), data), axis=1)\n\n # xd 每一列的意义\n # 0 编号 0-34265\n # 12 qs参数\n # 345 / 678 CCT倾斜角参数\n # 9 10 电流\n # 11 12 13 匝数\n # 14 15 16 17 18\n # 19 20 21 22 23 束斑和均值差\n # 24 束斑均值\n\n lables = ['qs-q', 'qs-s',\n 'dicct-t4', 'dicct-t6', 'dicct-t8',\n 'agcct-t2', 'agcct-t6', 'agcct-t8',\n 'dicct-I', 'agcct-I',\n 'agcct-wn0', 'agcct-wn1', 'agcct-wn2',\n 'diff_size1', 'diff_size2', 'diff_size3', 'diff_size4', 'diff_size5',\n 'diff_size6', 'diff_size7', 'diff_size8', 'diff_size9', 'diff_size0',\n 'beam_avg', 'max_diff_size']\n\n for i in range(len(lables)):\n if len(wins) != len(lables):\n if i == len(lables) - 1: # last\n wins.append(viz.scatter(\n X=np.vstack((xd[:, 0], np.max(xd[:, 14:24], axis=1))).T,\n opts={\n 'title': lables[i] + ' vs individual',\n 'xlabel': 'individual',\n 'ylabel': lables[i],\n 'markersize': 2\n }\n ))\n else:\n wins.append(viz.scatter(\n X=np.vstack((xd[:, 0], xd[:, i + 1])).T,\n opts={\n 'title': lables[i] + ' vs individual',\n 'xlabel': 'individual',\n 'ylabel': lables[i],\n 'markersize': 2\n }\n ))\n else:\n if i == len(lables) - 1: # last\n wins[i] = viz.scatter(\n X=np.vstack((xd[:, 0], np.max(xd[:, 14:24], axis=1))).T,\n win=wins[i],\n opts={\n 'title': lables[i] + ' vs individual',\n 'xlabel': 'individual',\n 'ylabel': lables[i],\n 'markersize': 2\n }\n )\n else:\n viz.scatter(\n X=np.vstack((xd[:, 0], xd[:, i + 1])).T,\n win=wins[i],\n opts={\n 'title': lables[i] + ' vs individual',\n 'xlabel': 'individual',\n 'ylabel': lables[i],\n 'markersize': 2\n }\n )\n"
] | [
[
"numpy.array",
"numpy.concatenate",
"numpy.max",
"numpy.vstack"
]
] |
dl4amc/dds | [
"2d53c74ea1f1452beb2c1c52d3048e4260f22948"
] | [
"subsamplers/cldnn.py"
] | [
"# coding: utf-8\n\n# Import all the things we need ---\n#get_ipython().magic(u'matplotlib inline')\nimport os,random\n#os.environ[\"KERAS_BACKEND\"] = \"theano\"\nos.environ[\"KERAS_BACKEND\"] = \"tensorflow\"\n#os.environ[\"THEANO_FLAGS\"] = \"device=gpu%d\"%(1) #disabled because we do not have a hardware GPU\nimport numpy as np\nfrom copy import deepcopy\n#import theano as th\n#import theano.tensor as T\nfrom keras.utils import np_utils\nfrom keras.models import load_model\nimport keras.models as models\nfrom keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D\nfrom keras.regularizers import *\nfrom keras.optimizers import adam\nfrom keras.optimizers import adagrad\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n#import seaborn as sns\nimport cPickle, random, sys, keras\nfrom keras.utils import multi_gpu_model\nfrom keras import backend as K\nK.tensorflow_backend._get_available_gpus()\nimport tensorflow as tf\n\n\n# Dataset setup\nXd = cPickle.load(open(\"../data/RML2016.10b_dict.dat\", 'rb'))\nsnrs, mods = map(lambda j: sorted(list(set(map(lambda x: x[j], Xd.keys())))), [1, 0])\nX = []\nY_snr = []\nlbl = []\nfor snr in snrs:\n for mod in mods:\n X.append(Xd[(mod, snr)])\n for i in range(Xd[(mod, snr)].shape[0]): lbl.append((mod, snr))\n Y_snr = Y_snr + [mod]*6000\nX = np.vstack(X)\nY_snr = np.vstack(Y_snr)\n\n\ndef to_onehot(yy):\n yy1 = np.zeros([len(yy), max(yy) + 1])\n yy1[np.arange(len(yy)), yy] = 1\n return yy1\n\n\n# Use only the train split\nnp.random.seed(2016)\nn_examples = X.shape[0]\nn_train_valid = n_examples // 2\ntrain_valid_idx = np.random.choice(range(0, n_examples), size=n_train_valid, replace=False)\nX_train_valid = X[train_valid_idx]\nn_train = 3 * n_train_valid // 4\ntrain_idx = np.random.choice(range(0, n_train_valid), size=n_train, replace=False)\nX = X_train_valid[train_idx]\nvalid_idx = list(set(range(0, n_train_valid))-set(train_idx))\nX_valid = X_train_valid[valid_idx]\nY_snr = to_onehot(map(lambda x: mods.index(lbl[x][0]), range(X.shape[0])))\n\nprint(\"shape of X\", np.shape(X))\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nnum_samples = 64\nnew_X = []\norig_model = load_model('../models/cldnn_ranker.h5')\nfor eva_iter in range(X.shape[0]//60000):\n snr_data = X[eva_iter*60000:(eva_iter+1)*60000]\n snr_out = Y_snr[eva_iter*60000:(eva_iter+1)*60000]\n snr_acc_list = []\n snr_data_copy = deepcopy(snr_data)\n for idx in range(X.shape[2]):\n snr_data = deepcopy(snr_data_copy)\n snr_data = snr_data.transpose((2, 1, 0))\n new_snr_data = np.append(snr_data[:idx], np.zeros((1, snr_data.shape[1], snr_data.shape[2])), axis=0)\n snr_data = np.append(new_snr_data, snr_data[idx+1:], axis=0)\n snr_data = snr_data.transpose((2, 1, 0))\n score = orig_model.evaluate(snr_data, snr_out, batch_size=60000, verbose=0)\n snr_acc_list.append((idx, score[1]))\n snr_acc_list.sort(key=lambda x: x[1])\n snr_acc_list = snr_acc_list[:num_samples]\n snr_acc_list.sort(key=lambda x: x[0]) \n snr_idxs = [ele[0] for ele in snr_acc_list]\n snr_data = snr_data.transpose((2, 1, 0))\n snr_data = snr_data[snr_idxs]\n snr_data = snr_data.transpose((2, 1, 0))\n new_X = new_X + [snr_data]\n print(eva_iter)\nX = np.vstack(new_X)\nnp.save('../ranker_samples/cldnn/cldnn_'+str(num_samples)+'.npy', X)\n"
] | [
[
"numpy.vstack",
"numpy.append",
"numpy.zeros",
"numpy.random.seed",
"numpy.shape",
"matplotlib.use"
]
] |
ofantomas/rlax | [
"58b3672b2f7ac1a400b3934ae9888c677f39b9e2"
] | [
"rlax/_src/mpo_ops_test.py"
] | [
"# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for mpo_ops.py.\"\"\"\n\nimport functools\nimport math\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport optax\nfrom rlax._src import distributions\nfrom rlax._src import mpo_ops\n\nNUM_SAMPLES = 10\nACTION_DIM = 3\nTIME_DIM = 8\nBATCH_DIM = 100\n\n# NOTE: These are not typical values used for MPO. In the test case, we know the\n# Q function perfectly so we loosen the bound on the mean to zone in to the\n# optimal policy very quickly. Similarly, we maintain a high variance to sample\n# distinct actions to explore and learn from.\n_INIT_TEMPERATURE = 0.2\n_INIT_ALPHA_MEAN = 0.001\n_INIT_ALPHA_COVARIANCE = float(1e6)\n\n_EPSILON_BOUND = 0.01\n_EPSILON_MEAN_BOUND = 10.0\n_EPSILON_COVARIANCE_BOUND = 1e-12\n\n_NUM_ITERATIONS = 5000\n_TARGET_UPDATE_PERIOD = 100\n_RANDOM_SEED = 42\n\n# The offset to ensure initially the policy is not close to 0\n_MEAN_OFFSET = 2.0\n\n# The final action should optimize down to be close to 0.0\n_MAX_ACTION_ERROR = 0.2\n_MAX_KL_ERROR = 1e-6\n\n_DIAGONAL_GAUSSIAN_DIST = distributions.gaussian_diagonal()\n_PROJECTION_OPERATOR = functools.partial(jnp.clip, a_min=1e-10)\n\n\ndef _hk_mock_policy_params(s_tm1):\n \"\"\"Returns mock policy params.\"\"\"\n # Outputs of the network are mu and sigma. Both shaped [B, ACTION_DIM].\n pi_out = hk.nets.MLP(\n output_sizes=[2 * ACTION_DIM],\n w_init=hk.initializers.VarianceScaling(1e-3),\n activation=jnp.tanh,\n activate_final=False,\n name='online_policy')(s_tm1)\n pi_mean, pi_cov = jnp.split(pi_out, 2, axis=-1)\n pi_cov = jax.nn.softplus(pi_cov)\n pi_mean = pi_mean + _MEAN_OFFSET\n return {'mean': pi_mean, 'stddev': pi_cov}\n\n\ndef _init_params(key):\n init_fn, _ = hk.transform(_hk_mock_policy_params)\n key_seq = hk.PRNGSequence(key)\n s_tm1 = jax.random.normal(\n next(key_seq), (TIME_DIM, BATCH_DIM, ACTION_DIM), jnp.float32)\n online_params = init_fn(next(key_seq), s_tm1)\n return dict(\n online=online_params,\n target=online_params,\n mpo=dict(\n temperature=_INIT_TEMPERATURE,\n alpha_mean=_INIT_ALPHA_MEAN,\n alpha_covariance=_INIT_ALPHA_COVARIANCE),\n )\n\n\ndef _mock_outputs(online_params, target_params, key, target_name):\n \"\"\"Returns mock network outputs.\"\"\"\n _, policy_params_fn = hk.transform(_hk_mock_policy_params)\n key_seq = hk.PRNGSequence(key)\n\n state_size = ACTION_DIM\n\n # Input state: [TIME_DIM, BATCH_DIM, DIM_STATE]\n s_tm1 = jax.random.normal(\n next(key_seq), (TIME_DIM, BATCH_DIM, state_size), jnp.float32)\n policy_params = policy_params_fn(online_params, None, s_tm1)\n target_policy_params = policy_params_fn(target_params, None, s_tm1)\n\n # Shape for actions: [NUM_SAMPLES, TIME_DIM, BATCH_DIM, ACTION_DIM]\n mean, stddev = target_policy_params['mean'], target_policy_params['stddev']\n mean_repeated = jnp.repeat(\n mean.reshape((1,) + mean.shape), NUM_SAMPLES, axis=0)\n stddev_repeated = jnp.repeat(\n stddev.reshape((1,) + stddev.shape), NUM_SAMPLES, axis=0)\n target_actions = _DIAGONAL_GAUSSIAN_DIST.sample(\n next(key_seq), mean_repeated, stddev_repeated)\n # If the target is advantages then num samples is 1.\n if target_name == 'advantages':\n target_actions = target_actions[0, ...]\n\n # Shape for Q: [NUM_SAMPLES, TIME_DIM, BATCH_DIM]\n # Setting Q = -a_t * tf.transpose(a_t) where a_t = s_t + a.\n # The solution to optimizing this is basically for the policy to output\n # 0 actions thereby minimizing the cost. Since this is a convex\n # optimization problem, the algorithm should get to a good solution quickly.\n\n # First compute a_t = s_t + a with shape: [NUM_SAMPLES, TIME_DIM, BATCH_DIM,\n # ACTION_DIM] since action dim is the same as shape dim here and then compute\n # the quadratic form.\n a_t = target_actions + jnp.expand_dims(s_tm1, 0)\n sample_q_values = -jnp.sum(a_t ** 2, axis=-1)\n # Set the advantage to the same as the q value.\n # Shape for advantages: [TIME_DIM, BATCH_DIM]\n advantages = sample_q_values[0, :, :]\n\n return dict(\n pi_params=policy_params,\n target_pi_params=target_policy_params,\n sample_q_values=sample_q_values,\n advantages=advantages,\n target_actions=target_actions,\n )\n\n\ndef get_common_loss_fn_inputs(params, key, target_name):\n out = _mock_outputs(params['online'], params['target'], key, target_name)\n pi_sample_log_probs = _DIAGONAL_GAUSSIAN_DIST.logprob(\n out['target_actions'], out['pi_params']['mean'],\n out['pi_params']['stddev'])\n\n return out, {\n 'sample_log_probs': pi_sample_log_probs,\n target_name: out[target_name],\n 'temperature_constraint': mpo_ops.LagrangePenalty(\n params['mpo']['temperature'], _EPSILON_BOUND)}\n\n\ndef get_decoupled_kl_constraints(out, params, per_dimension):\n \"\"\"Factorises KL for Gaussian.\"\"\"\n kl_mean, kl_covariance = (\n distributions.decoupled_multivariate_normal_kl_divergence(\n out['target_pi_params']['mean'], out['target_pi_params']['stddev'],\n out['pi_params']['mean'], out['pi_params']['stddev'],\n per_dimension=per_dimension))\n alpha_mean = params['mpo']['alpha_mean'] * jnp.ones_like(kl_mean)\n alpha_covariance = params['mpo']['alpha_covariance'] * jnp.ones_like(\n kl_covariance)\n\n return [\n (kl_mean, mpo_ops.LagrangePenalty(\n alpha=alpha_mean, epsilon=_EPSILON_MEAN_BOUND,\n per_dimension=per_dimension)),\n (kl_covariance, mpo_ops.LagrangePenalty(\n alpha=alpha_covariance, epsilon=_EPSILON_COVARIANCE_BOUND,\n per_dimension=per_dimension)),\n ]\n\n\ndef get_coupled_kl_constraints(out, params, per_dimension):\n kl_mean, kl_covariance = (\n distributions.decoupled_multivariate_normal_kl_divergence(\n out['target_pi_params']['mean'], out['target_pi_params']['stddev'],\n out['pi_params']['mean'], out['pi_params']['stddev'],\n per_dimension=per_dimension))\n alpha_mean = params['mpo']['alpha_mean'] * jnp.ones_like(kl_mean)\n return [\n (kl_mean + kl_covariance, mpo_ops.LagrangePenalty(\n alpha=alpha_mean,\n epsilon=_EPSILON_MEAN_BOUND + _EPSILON_COVARIANCE_BOUND,\n per_dimension=per_dimension))\n ]\n\n\ndef vmpo_e_step_without_restarting_or_importance_weights(advantages, **kwargs):\n restarting_weights = jnp.ones_like(advantages)\n importance_weights = jnp.ones_like(advantages)\n return mpo_ops.vmpo_compute_weights_and_temperature_loss(\n advantages=advantages, restarting_weights=restarting_weights,\n importance_weights=importance_weights, **kwargs)\n\n\nclass MPOTest(parameterized.TestCase):\n \"\"\"Tests for the MPO losses.\"\"\"\n\n @parameterized.parameters(\n {'target_name': 'sample_q_values',\n 'loss_fn': mpo_ops.mpo_loss,\n 'get_kl_constraints': get_decoupled_kl_constraints,\n 'per_dimension': False},\n {'target_name': 'advantages',\n 'loss_fn': mpo_ops.vmpo_loss,\n 'get_kl_constraints': get_decoupled_kl_constraints,\n 'per_dimension': False},\n {'target_name': 'sample_q_values',\n 'loss_fn': mpo_ops.mpo_loss,\n 'get_kl_constraints': get_coupled_kl_constraints,\n 'per_dimension': False},\n {'target_name': 'advantages',\n 'loss_fn': mpo_ops.vmpo_loss,\n 'get_kl_constraints': get_coupled_kl_constraints,\n 'per_dimension': False},\n {'target_name': 'sample_q_values',\n 'loss_fn': mpo_ops.mpo_loss,\n 'get_kl_constraints': get_decoupled_kl_constraints,\n 'per_dimension': True},\n {'target_name': 'advantages',\n 'loss_fn': mpo_ops.vmpo_loss,\n 'get_kl_constraints': get_decoupled_kl_constraints,\n 'per_dimension': True},\n {'target_name': 'sample_q_values',\n 'loss_fn': mpo_ops.mpo_loss,\n 'get_kl_constraints': get_coupled_kl_constraints,\n 'per_dimension': True},\n {'target_name': 'advantages',\n 'loss_fn': mpo_ops.vmpo_loss,\n 'get_kl_constraints': get_coupled_kl_constraints,\n 'per_dimension': True},\n )\n def test_optimization(\n self, target_name, loss_fn, get_kl_constraints, per_dimension):\n \"\"\"Tests that the policy optimization works correctly.\"\"\"\n\n def _loss(params, key):\n out, loss_fn_inputs = get_common_loss_fn_inputs(params, key, target_name)\n kl_constraints = get_kl_constraints(out, params, per_dimension)\n loss_fn_inputs.update({'kl_constraints': kl_constraints})\n loss, mpo_stats = loss_fn(**loss_fn_inputs)\n loss = jnp.mean(loss)\n temperature_bound = jnp.mean(mpo_stats.normalized_weights * jnp.log(\n mpo_stats.num_samples * mpo_stats.normalized_weights + 1e-8))\n return loss, {'outputs': out, 'temperature_bound': temperature_bound}\n\n key = jax.random.PRNGKey(_RANDOM_SEED)\n grad_fn = jax.jit(jax.grad(_loss, has_aux=True))\n optimizer = optax.adam(1e-3)\n key, new_key = jax.random.split(key)\n params = _init_params(new_key)\n opt_state = optimizer.init((params['online'], params['mpo']))\n\n @jax.jit\n def _update(params_, opt_state_, key_):\n next_key, key_ = jax.random.split(key_)\n grad, stats = grad_fn(params_, key_)\n updates, opt_state_ = optimizer.update(\n (grad['online'], grad['mpo']), opt_state_)\n online_params, mpo_params = optax.apply_updates(\n (params_['online'], params_['mpo']), updates)\n params_['online'] = online_params\n params_['mpo'] = mpo_params\n return params_, opt_state_, stats, next_key\n\n for iter_idx in range(_NUM_ITERATIONS):\n params, opt_state, extra, key = _update(params, opt_state, key)\n if iter_idx % _TARGET_UPDATE_PERIOD == 0:\n params['target'] = params['online']\n\n # Test the bounds are within tolerance.\n key, new_key = jax.random.split(key)\n _, extra = _loss(params, new_key)\n action_mean = jnp.mean(extra['outputs']['pi_params']['mean'])\n # Check action mean is close to 0.\n self.assertBetween(action_mean, -_MAX_ACTION_ERROR, _MAX_ACTION_ERROR)\n\n # Check the temperature are within the bounds.\n self.assertLess(extra['temperature_bound'], _EPSILON_BOUND)\n\n @parameterized.parameters(\n {'e_step_fn': mpo_ops.mpo_compute_weights_and_temperature_loss,\n 'additional_inputs': {},\n # dL/dq == 1 and dL/dt == epsilon (for one sample)\n 'expected_deriv_of_target': [[[1]]],\n 'sample_dimension': True},\n {'e_step_fn': vmpo_e_step_without_restarting_or_importance_weights,\n 'additional_inputs': {'top_k_fraction': 1.0},\n 'expected_deriv_of_target': [[1]],\n 'sample_dimension': False},\n )\n def test_e_step_gradient_computation(\n self, e_step_fn, additional_inputs, expected_deriv_of_target,\n sample_dimension):\n \"\"\"Tests the gradients from the E-step against the analytic ones.\"\"\"\n # Target has shape [NUM_SAMPLES, T, B] => [1, 1, 1]\n target = jnp.array([[3]], jnp.float32)\n if sample_dimension:\n target = jnp.expand_dims(target, axis=0)\n temperature = jnp.array(0.1, jnp.float32)\n def fn(target_, temperature_):\n temperature_constraint = mpo_ops.LagrangePenalty(\n temperature_, _EPSILON_BOUND)\n temperature_loss, _, _ = e_step_fn(\n target_, temperature_constraint=temperature_constraint,\n projection_operator=_PROJECTION_OPERATOR,\n **additional_inputs)\n return jnp.mean(temperature_loss)\n grad = jax.grad(fn, argnums=(0, 1))(target, temperature)\n\n np.testing.assert_almost_equal(np.array(grad[0]), np.array(\n expected_deriv_of_target, np.float32), decimal=4)\n self.assertAlmostEqual(grad[1], _EPSILON_BOUND, places=4)\n\n @parameterized.parameters(\n {'e_step_fn': mpo_ops.mpo_compute_weights_and_temperature_loss,\n 'additional_inputs': {},\n 'sample_dimension': True},\n {'e_step_fn': vmpo_e_step_without_restarting_or_importance_weights,\n 'additional_inputs': {'top_k_fraction': 1.0},\n 'sample_dimension': False},\n )\n def test_e_step_stop_gradient(\n self, e_step_fn, additional_inputs, sample_dimension):\n \"\"\"Tests no gradients flow through `weights` in the E-Step.\"\"\"\n # Target has shape [NUM_SAMPLES, T, B] => [1, 1, 1]\n target = jnp.array([[3]], jnp.float32)\n if sample_dimension:\n target = jnp.expand_dims(target, axis=0)\n temperature = 0.1\n # pylint: disable=g-long-lambda\n def mean_weights_fn(target_, temperature_):\n temperature_constraint = mpo_ops.LagrangePenalty(\n temperature_, _EPSILON_BOUND)\n _, weights, _ = e_step_fn(\n target_, temperature_constraint=temperature_constraint,\n projection_operator=_PROJECTION_OPERATOR,\n **additional_inputs)\n return jnp.mean(weights)\n grad = jax.grad(mean_weights_fn, argnums=(0, 1))(target, temperature)\n np.testing.assert_almost_equal(\n np.array(grad[0]), np.zeros_like(grad[0]), decimal=4)\n self.assertAlmostEqual(grad[1], 0., places=4)\n\n def test_kl_constraint_loss_gradients(self):\n \"\"\"Tests the gradients in the `_kl_constraint_loss` method.\"\"\"\n kl = jnp.array(1., jnp.float32)\n alpha = jnp.array(1., jnp.float32)\n _, _, alpha = mpo_ops.kl_constraint_loss(kl, mpo_ops.LagrangePenalty(\n alpha=alpha, epsilon=_EPSILON_MEAN_BOUND, per_dimension=False),\n _PROJECTION_OPERATOR)\n\n def alpha_loss_fn(alpha_):\n penalty = mpo_ops.LagrangePenalty(\n alpha=alpha_, epsilon=_EPSILON_MEAN_BOUND, per_dimension=False)\n _, alpha_loss, _ = mpo_ops.kl_constraint_loss(\n kl, penalty, _PROJECTION_OPERATOR)\n return alpha_loss\n alpha_gradients = jax.grad(alpha_loss_fn)(alpha)\n actual_alpha_gradients = _EPSILON_MEAN_BOUND - kl\n\n def kl_loss_fn(kl_):\n penalty = mpo_ops.LagrangePenalty(\n alpha=alpha, epsilon=_EPSILON_MEAN_BOUND, per_dimension=False)\n kl_loss, _, _ = mpo_ops.kl_constraint_loss(\n kl_, penalty, _PROJECTION_OPERATOR)\n return kl_loss\n kl_gradients = jax.grad(kl_loss_fn)(kl)\n actual_kl_gradients = alpha\n\n self.assertAlmostEqual(kl_gradients, actual_kl_gradients)\n self.assertAlmostEqual(alpha_gradients, actual_alpha_gradients)\n\n def test_kl_constraint_loss_stop_gradients(self):\n \"\"\"Tests the stop gradients in the `kl_constraint_loss` function.\n\n The `alpha_loss` term should not affect the KL and the `kl` term should\n not affect `alpha`.\n \"\"\"\n kl = jnp.array(1., jnp.float32)\n alpha = jnp.array(1., jnp.float32)\n _, _, alpha = mpo_ops.kl_constraint_loss(kl, mpo_ops.LagrangePenalty(\n alpha=alpha, epsilon=_EPSILON_MEAN_BOUND, per_dimension=False),\n _PROJECTION_OPERATOR)\n\n def kl_loss_fn(alpha_):\n penalty = mpo_ops.LagrangePenalty(\n alpha=alpha_, epsilon=_EPSILON_MEAN_BOUND, per_dimension=False)\n kl_loss, _, _ = mpo_ops.kl_constraint_loss(\n kl, penalty, _PROJECTION_OPERATOR)\n return kl_loss\n\n kl_gradients = jax.grad(kl_loss_fn)(alpha)\n\n def alpha_loss_fn(kl_):\n penalty = mpo_ops.LagrangePenalty(\n alpha=alpha, epsilon=_EPSILON_MEAN_BOUND, per_dimension=False)\n _, alpha_loss, _ = mpo_ops.kl_constraint_loss(\n kl_, penalty, _PROJECTION_OPERATOR)\n return alpha_loss\n alpha_gradients = jax.grad(alpha_loss_fn)(kl)\n\n # Test that there are no gradients of KL w.r.t alpha\n self.assertEqual(kl_gradients, 0.)\n\n # Test that there are no gradients of alpha w.r.t kl\n self.assertEqual(alpha_gradients, 0.)\n\n @parameterized.parameters(\n # With restarting weights of 1 (and temperature of 1) the weights should\n # be e^-1, 1, max advantage is 2 and num samples is 2 so temperature loss\n # is log(1 + e^-1) + 2 - log(2) + temperature epsilon\n {'advantages': np.array([[1.0, 2.0]]),\n 'restarting_weights': np.array([[1.0, 1.0]]),\n 'expected_temperature_loss': (math.log(1.0 + math.exp(-1.0)) + 2.0 -\n math.log(2.0) + _EPSILON_BOUND)},\n # With the second restarting weight set to 0 the weights become 1, 0\n # max advantage is 1 and num samples is 1 so temperature loss is\n # log(1) + 1 - log(1) + temperature epsilon\n {'advantages': np.array([[1.0, 2.0]]),\n 'restarting_weights': np.array([[1.0, 0.0]]),\n 'expected_temperature_loss': 1.0 + _EPSILON_BOUND},\n )\n def test_restarting_weights(\n self, advantages, restarting_weights, expected_temperature_loss):\n \"\"\"Test that calculation is correct if restarting weight is set to 0.\"\"\"\n temperature_loss, _, _ = mpo_ops.vmpo_compute_weights_and_temperature_loss(\n advantages, restarting_weights, np.ones_like(restarting_weights),\n mpo_ops.LagrangePenalty(1.0, _EPSILON_BOUND),\n functools.partial(np.clip, a_min=1e-8, a_max=None), 1.0)\n self.assertAlmostEqual(\n temperature_loss, expected_temperature_loss, places=4)\n\n @parameterized.parameters(\n # When the top k fraction is 1.0 all of the weights should be 1\n {'top_k_fraction': 1.0,\n 'scaled_advantages': np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]),\n 'expected_top_k_weights': np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])},\n # When the top k fraction is 0.5 it will take the bottom row as these are\n # the highest.\n {'top_k_fraction': 0.5,\n 'scaled_advantages': np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]),\n 'expected_top_k_weights': np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])}\n )\n def test_top_k_fraction(\n self, top_k_fraction, scaled_advantages, expected_top_k_weights):\n \"\"\"Test that only the top k fraction are used.\"\"\"\n top_k_weights = mpo_ops.get_top_k_weights(\n top_k_fraction, jnp.ones_like(scaled_advantages), scaled_advantages)\n np.testing.assert_allclose(top_k_weights, expected_top_k_weights)\n\n def test_top_k_fraction_too_low(self):\n \"\"\"Test if the top k fraction returns 0 advantages we raise an error.\"\"\"\n with self.assertRaises(ValueError):\n mpo_ops.get_top_k_weights(0.01, jnp.ones((3, 2)), jnp.ones((3, 2)))\n\n @parameterized.parameters(\n # With importance weights of 1 (and temperature of 1) the weights should\n # be e^-1, 1, max advantage is 2 and num samples is 2 so temperature loss\n # is log(1 + e^-1) + 2 - log(2) + temperature epsilon\n {'advantages': np.array([[1.0, 2.0]]),\n 'importance_weights': np.array([[1.0, 1.0]]),\n 'expected_temperature_loss': (math.log(1.0 + math.exp(-1.0)) + 2.0 -\n math.log(2.0) + _EPSILON_BOUND)},\n # If the second importance weight is 0.5 temperature loss becomes\n # log(0.5 + e^-1) + 2 - log(2) + temperature epsilon\n {'advantages': np.array([[1.0, 2.0]]),\n 'importance_weights': np.array([[1.0, 0.5]]),\n 'expected_temperature_loss': (math.log(0.5 + math.exp(-1.0)) + 2.0 -\n math.log(2.0) + _EPSILON_BOUND)},\n )\n def test_importance_weights(\n self, advantages, importance_weights, expected_temperature_loss):\n \"\"\"Test that importance weights have the correct effect.\"\"\"\n temperature_loss, _, _ = mpo_ops.vmpo_compute_weights_and_temperature_loss(\n advantages, np.ones_like(importance_weights), importance_weights,\n mpo_ops.LagrangePenalty(1.0, _EPSILON_BOUND),\n functools.partial(np.clip, a_min=1e-8, a_max=None), 1.0)\n self.assertAlmostEqual(\n temperature_loss, expected_temperature_loss, places=4)\n\n @parameterized.parameters({'per_dimension': True}, {'per_dimension': False})\n def test_mpo_input_axis_order_equivalence(self, per_dimension):\n \"\"\"Test loss functions are equivalent regardless of axis order.\"\"\"\n key = jax.random.PRNGKey(_RANDOM_SEED)\n key, new_key = jax.random.split(key)\n params = _init_params(new_key)\n out, mpo_inputs = get_common_loss_fn_inputs(params, key, 'sample_q_values')\n kl_constraints = get_coupled_kl_constraints(out, params,\n per_dimension=per_dimension)\n mpo_inputs.update({'kl_constraints': kl_constraints})\n\n # Original loss fn inputs are [S T B],\n stb_loss, stb_outputs = mpo_ops.mpo_loss(**mpo_inputs)\n mean_stb_loss = jnp.mean(stb_loss)\n\n # Swap axes and try [S B T]\n mpo_inputs.update({\n 'sample_log_probs': jnp.swapaxes(mpo_inputs['sample_log_probs'], 1, 2),\n 'sample_q_values': jnp.swapaxes(mpo_inputs['sample_q_values'], 1, 2),\n 'kl_constraints': [(jnp.swapaxes(kl, 0, 1), mpo_ops.LagrangePenalty(\n alpha=jnp.swapaxes(pen.alpha, 0, 1), epsilon=pen.epsilon,\n per_dimension=pen.per_dimension)) for (kl, pen) in kl_constraints],\n })\n sbt_loss, sbt_outputs = mpo_ops.mpo_loss(**mpo_inputs)\n mean_sbt_loss = jnp.mean(sbt_loss)\n\n # Try [T B S] denoting sample_axis at 2 instead of 0.\n mpo_inputs.update({\n 'sample_log_probs': jnp.swapaxes(mpo_inputs['sample_log_probs'], 0, 2),\n 'sample_q_values': jnp.swapaxes(mpo_inputs['sample_q_values'], 0, 2),\n 'kl_constraints': kl_constraints, # T B\n 'sample_axis': 2\n })\n tbs_loss, tbs_outputs = mpo_ops.mpo_loss(**mpo_inputs)\n mean_tbs_loss = jnp.mean(tbs_loss)\n\n self.assertAlmostEqual(mean_stb_loss, mean_sbt_loss, places=4)\n self.assertAlmostEqual(mean_tbs_loss, mean_sbt_loss, places=4)\n self.assertEqual(tbs_outputs.num_samples, sbt_outputs.num_samples)\n self.assertEqual(tbs_outputs.num_samples, stb_outputs.num_samples)\n\n @parameterized.parameters({'per_dimension': True}, {'per_dimension': False})\n def test_vmpo_input_axis_order_equivalence(self, per_dimension):\n \"\"\"Test loss functions are equivalent regardless of axis order.\"\"\"\n key = jax.random.PRNGKey(_RANDOM_SEED)\n key, new_key = jax.random.split(key)\n params = _init_params(new_key)\n out, vmpo_inputs = get_common_loss_fn_inputs(params, key, 'advantages')\n kl_constraints = get_coupled_kl_constraints(out, params,\n per_dimension=per_dimension)\n vmpo_inputs.update({'kl_constraints': kl_constraints})\n\n # Original loss fn inputs are [T B],\n tb_loss, tb_outputs = mpo_ops.vmpo_loss(**vmpo_inputs)\n mean_tb_loss = jnp.mean(tb_loss)\n\n # Swap axes and try [B T]\n vmpo_inputs.update({\n 'sample_log_probs': jnp.swapaxes(vmpo_inputs['sample_log_probs'], 0, 1),\n 'advantages': jnp.swapaxes(vmpo_inputs['advantages'], 0, 1),\n 'kl_constraints': [(jnp.swapaxes(kl, 0, 1), mpo_ops.LagrangePenalty(\n alpha=jnp.swapaxes(pen.alpha, 0, 1), epsilon=pen.epsilon,\n per_dimension=pen.per_dimension)) for (kl, pen) in kl_constraints],\n })\n bt_loss, bt_outputs = mpo_ops.vmpo_loss(**vmpo_inputs)\n mean_bt_loss = jnp.mean(bt_loss)\n\n self.assertAlmostEqual(mean_tb_loss, mean_bt_loss, places=4)\n self.assertEqual(tb_outputs.num_samples, bt_outputs.num_samples)\n\n\nif __name__ == '__main__':\n absltest.main()\n"
] | [
[
"numpy.array",
"numpy.zeros_like",
"numpy.ones_like",
"numpy.testing.assert_allclose"
]
] |
wesmith/CSI-Camera | [
"8bcb7c58f3546dbe8c1c81054185d347056b4ff6"
] | [
"modules/ws_dual_camera.py"
] | [
"# ws_dual_camera.py\n# WSmith 12/23/20\n# utilize modified module ws_csi_camera for the camera class\n\nimport cv2\nimport numpy as np\nimport ws_csi_camera as ws\nfrom importlib import reload\n\nreload(ws) # ws is under development\n\ndef display(sensor_mode=ws.S_MODE_3_1280_720_60, \n dispW=ws.DISP_W_M3_M4_one_half, \n dispH=ws.DISP_H_M3_M4_one_half,\n display_fps=True):\n\n # at present, display the picam and a webcam: in the future, display two picams\n\n picam = ws.CSI_Camera(display_fps=display_fps)\n webcam = ws.CSI_Camera(display_fps=display_fps)\n\n # this only needed for the picam\n picam.create_gstreamer_pipeline(sensor_id=0, sensor_mode=sensor_mode, flip_method=0,\n display_height=dispH, display_width=dispW)\n\n picam.open(picam.gstreamer_pipeline)\n webcam.open(1)\n\n picam.start()\n webcam.start()\n\n txt = \"Picam on left: Sensor Mode {}, Display {} x {}\".format(sensor_mode, dispW, dispH)\n cv2.namedWindow(txt, cv2.WINDOW_AUTOSIZE)\n\n while True:\n\n _, imgL = picam.read()\n _, imgR = webcam.read()\n\n imgR = cv2.resize(imgR, (imgL.shape[1], imgL.shape[0]))\n img = np.hstack((imgL, imgR))\n\n cv2.imshow(txt, img)\n\n keyCode = cv2.waitKey(5) & 0xFF\n \n if keyCode == ord('q'):\n break\n\n picam.stop()\n webcam.stop()\n picam.release()\n webcam.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n\n display(sensor_mode=ws.S_MODE_2_1920_1080_30, \n dispW=ws.DISP_W_M2_one_quarter, dispH=ws.DISP_H_M2_one_quarter)\n\n\n"
] | [
[
"numpy.hstack"
]
] |
MitchellAcoustics/MoSQITo | [
"15e45888d08b2932909f50fd6af0ef9d5595a588"
] | [
"mosqito/sq_metrics/tonality/tone_to_noise_ecma/_spectrum_smoothing.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 21 16:44:36 2020\n\n@author: wantysal\n\"\"\"\n# Standard library import\nimport numpy as np\n\n# Local import\nfrom mosqito.sound_level_meter.noct_spectrum._getFrequencies import _getFrequencies\n\n\ndef _spectrum_smoothing(freqs_in, spec, noct, low_freq, high_freq, freqs_out):\n \"\"\"\n Compute smoothed spectrum according to the n-th octave band chosen\n\n Parameters\n ----------\n freqs : numpy.array\n frequency axis\n spec : numpy.array\n spectrum in dB\n noct : integer\n n-th octave-band according to which smooth the spectrum\n low_freq : float\n lowest frequency of the n-th octave bands\n high_freq : float\n highest frequency of the n-th octave bands\n freqs_out : numpy.array\n frequency axis along which the smoothed spectrum is given\n\n Returns\n -------\n smoothed-spectrum : numpy.array\n smoothed spectrum along the given frequency axis\n\n \"\"\"\n\n # n-th octave bands filter\n filter_freqs = _getFrequencies(\n low_freq, high_freq, noct, G=10, fr=1000)[\"f\"]\n filter_freqs[len(filter_freqs) - 1, 2] = high_freq\n filter_freqs[0, 0] = low_freq\n\n # Smoothed spectrum creation\n nb_bands = filter_freqs.shape[0]\n smoothed_spectrum = np.zeros((nb_bands))\n i = 0\n # Each band is considered individually until all of them have been treated\n while nb_bands > 0:\n # Find the index of the spectral components within the frequency bin\n bin_index = np.where(\n (freqs_in >= filter_freqs[i, 0]) & (freqs_in <= filter_freqs[i, 2])\n )[0]\n # If the frequency bin is empty, it is deleted from the list\n if len(bin_index) == 0:\n smoothed_spectrum = np.delete(smoothed_spectrum, i, axis=0)\n filter_freqs = np.delete(filter_freqs, i, axis=0)\n nb_bands -= 1\n\n else:\n # The spectral components within the frequency bin are averaged on an energy basis\n spec_sum = 0\n for j in bin_index:\n spec_sum += 10 ** (spec[j] / 10)\n smoothed_spectrum[i] = 10 * np.log10(spec_sum / len(bin_index))\n nb_bands -= 1\n i += 1\n\n # Pose of the smoothed spectrum on the frequency-axis\n cor = []\n low = []\n high = []\n # Index of the lower, center and higher limit of each frequency bin into the original spectrum\n for i in range(len(filter_freqs)):\n cor.append(np.argmin(np.abs(freqs_out - filter_freqs[i, 1])))\n low.append(np.argmin(np.abs(freqs_out - filter_freqs[i, 0])))\n high.append(np.argmin(np.abs(freqs_out - filter_freqs[i, 2])))\n\n smooth_spec = np.zeros((spec.shape))\n for i in range(filter_freqs.shape[0]):\n smooth_spec[low[i]: high[i]] = smoothed_spectrum[i]\n\n return smooth_spec\n"
] | [
[
"numpy.where",
"numpy.abs",
"numpy.delete",
"numpy.zeros"
]
] |
maltius/tf_blazeface_training | [
"c4c73590f5084fcac56fa1625d227acf45a918ae"
] | [
"predictor.py"
] | [
"import tensorflow as tf\nfrom utils import bbox_utils, data_utils, drawing_utils, io_utils, train_utils, landmark_utils\nimport blazeface\n\nargs = io_utils.handle_args()\nif args.handle_gpu:\n io_utils.handle_gpu_compatibility()\n\nbatch_size = 1\nuse_custom_images = False\ncustom_image_path = \"data/images/\"\nhyper_params = train_utils.get_hyper_params()\nimg_size = hyper_params[\"img_size\"]\n\ndata_types = data_utils.get_data_types()\ndata_shapes = data_utils.get_data_shapes()\npadding_values = data_utils.get_padding_values()\n\nif use_custom_images:\n img_paths = data_utils.get_custom_imgs(custom_image_path)\n total_items = len(img_paths)\n test_data = tf.data.Dataset.from_generator(lambda: data_utils.custom_data_generator(\n img_paths, img_size, img_size), data_types, data_shapes)\nelse:\n test_split = \"train[80%:]\"\n test_data, info = data_utils.get_dataset(\"the300w_lp\", test_split)\n total_items = data_utils.get_total_item_size(info, test_split)\n test_data = test_data.map(lambda x: data_utils.preprocessing(x, img_size, img_size))\n \n# train_split = \"train[:80%]\"\n# val_split = \"train[80%:]\"\n# train_data, info = data_utils.get_dataset(\"the300w_lp\", train_split)\n# val_data, _ = data_utils.get_dataset(\"the300w_lp\", val_split)\n# train_total_items = data_utils.get_total_item_size(info, train_split)\n# val_total_items = data_utils.get_total_item_size(info, val_split)\n# #\n# img_size = hyper_params[\"img_size\"]\n\n# train_data = train_data.map(lambda x : data_utils.preprocessing(x, img_size, img_size, augmentation.apply))\n# val_data = val_data.map(lambda x : data_utils.preprocessing(x, img_size, img_size))\n\n#\n\ntest_data=ds_val\ntest_data = test_data.padded_batch(batch_size, padded_shapes=data_shapes, padding_values=padding_values)\n\n\n\nmodel = blazeface.get_model(hyper_params)\nmodel_path = io_utils.get_model_path()\nmodel.load_weights('D:/Downloads/tf-blazeface-master/trained/blazeface_model_weights_85.h5')\n\n# model.load_weights('C:/Users/altius/Downloads/blazeface80_epochs15_any139.h5')\n\nprior_boxes = bbox_utils.generate_prior_boxes(hyper_params[\"feature_map_shapes\"], hyper_params[\"aspect_ratios\"])\n\nvariances = hyper_params[\"variances\"]\ntotal_landmarks = hyper_params[\"total_landmarks\"]\nlandmark_variances = total_landmarks * variances[0:2]\nvariances += landmark_variances\n\n\nfor image_data in test_data:\n img, lands, coords = image_data\n print(img.shape)\n pass\n \n # ind=0\n # pred_deltas, pred_scores = model.predict_on_batch(img)\n # pred_deltas *= variances\n # #\n # pred_bboxes_and_landmarks = bbox_utils.get_bboxes_and_landmarks_from_deltas(prior_boxes, pred_deltas)\n # pred_bboxes_and_landmarks = tf.clip_by_value(pred_bboxes_and_landmarks, 0, 1)\n # #\n # pred_scores = tf.cast(pred_scores, tf.float32)\n # #\n # weighted_suppressed_data = bbox_utils.weighted_suppression(pred_scores[ind], pred_bboxes_and_landmarks[ind])\n # #\n # weighted_bboxes = weighted_suppressed_data[..., 0:4]\n # weighted_landmarks = weighted_suppressed_data[..., 4:]\n # #\n # denormalized_bboxes = bbox_utils.denormalize_bboxes(weighted_bboxes, img_size, img_size)\n # weighted_landmarks = tf.reshape(weighted_landmarks, (-1, total_landmarks, 2))\n # denormalized_landmarks = landmark_utils.denormalize_landmarks(weighted_landmarks, img_size, img_size)\n # drawing_utils.draw_bboxes_with_landmarks(img[ind], denormalized_bboxes, denormalized_landmarks)\n\n ind=0\n pred_deltas, pred_scores = model.predict_on_batch(img)\n pred_deltas *= variances\n #\n pred_bboxes_and_landmarks = bbox_utils.get_bboxes_and_landmarks_from_deltas(prior_boxes, pred_deltas)\n pred_bboxes_and_landmarks = tf.clip_by_value(pred_bboxes_and_landmarks, 0, 1)\n #\n pred_scores = tf.cast(pred_scores, tf.float32)\n #\n weighted_suppressed_data = bbox_utils.weighted_suppression(pred_scores[ind]*10, pred_bboxes_and_landmarks[ind])\n #\n weighted_bboxes = weighted_suppressed_data[..., 0:4]\n weighted_landmarks = weighted_suppressed_data[..., 4:]\n #\n denormalized_bboxes = bbox_utils.denormalize_bboxes(weighted_bboxes, img_size, img_size)\n weighted_landmarks = tf.reshape(weighted_landmarks, (-1, total_landmarks, 2))\n denormalized_landmarks = landmark_utils.denormalize_landmarks(weighted_landmarks, img_size, img_size)\n drawing_utils.draw_bboxes_with_landmarks(img[ind], denormalized_bboxes, denormalized_landmarks)\n \n# for item in weighted_landmarks:\n# print(item)"
] | [
[
"tensorflow.cast",
"tensorflow.clip_by_value",
"tensorflow.reshape"
]
] |
rkiman/astropy | [
"99de28bc0dbfe2ee0bef95b67f5619e03d22cc06"
] | [
"astropy/io/misc/asdf/tags/unit/tests/test_quantity.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# -*- coding: utf-8 -*-\n\nimport io\nimport pytest\n\nfrom astropy import units\n\nasdf = pytest.importorskip('asdf', minversion='2.0.0')\nfrom asdf.tests import helpers\n\n\ndef roundtrip_quantity(yaml, quantity):\n buff = helpers.yaml_to_asdf(yaml)\n with asdf.AsdfFile.open(buff) as ff:\n assert (ff.tree['quantity'] == quantity).all()\n buff2 = io.BytesIO()\n ff.write_to(buff2)\n\n buff2.seek(0)\n with asdf.AsdfFile.open(buff2) as ff:\n assert (ff.tree['quantity'] == quantity).all()\n\ndef test_value_scalar(tmpdir):\n testval = 2.71828\n testunit = units.kpc\n yaml = \"\"\"\nquantity: !unit/quantity-1.1.0\n value: {}\n unit: {}\n\"\"\".format(testval, testunit)\n\n quantity = units.Quantity(testval, unit=testunit)\n roundtrip_quantity(yaml, quantity)\n\ndef test_value_array(tmpdir):\n testval = [3.14159]\n testunit = units.kg\n yaml = \"\"\"\nquantity: !unit/quantity-1.1.0\n value: !core/ndarray-1.0.0 {}\n unit: {}\n\"\"\".format(testval, testunit)\n\n quantity = units.Quantity(testval, unit=testunit)\n roundtrip_quantity(yaml, quantity)\n\ndef test_value_multiarray(tmpdir):\n testval = [x*2.3081 for x in range(10)]\n testunit = units.ampere\n yaml = \"\"\"\nquantity: !unit/quantity-1.1.0\n value: !core/ndarray-1.0.0 {}\n unit: {}\n\"\"\".format(testval, testunit)\n\n quantity = units.Quantity(testval, unit=testunit)\n roundtrip_quantity(yaml, quantity)\n\ndef test_value_ndarray(tmpdir):\n from numpy import array, float64\n testval = [[1,2,3],[4,5,6]]\n testunit = units.km\n yaml = \"\"\"\nquantity: !unit/quantity-1.1.0\n value: !core/ndarray-1.0.0\n datatype: float64\n data:\n {}\n unit: {}\n\"\"\".format(testval, testunit)\n\n data = array(testval, float64)\n quantity = units.Quantity(data, unit=testunit)\n roundtrip_quantity(yaml, quantity)\n"
] | [
[
"numpy.array"
]
] |
YannCabanes/geomstats | [
"ce3f4bab6cd59c2f071371a46e336086771d0493",
"ce3f4bab6cd59c2f071371a46e336086771d0493"
] | [
"tests/tests_geomstats/test_estimators.py",
"examples/learning_graph_embedding_and_predicting.py"
] | [
"\"\"\"Template unit tests for scikit-learn estimators.\"\"\"\n\nimport pytest\nfrom sklearn.datasets import load_iris\n\nimport geomstats.backend as gs\nimport geomstats.tests\nfrom geomstats.learning._template import (\n TemplateClassifier,\n TemplateEstimator,\n TemplateTransformer,\n)\n\nESTIMATORS = (TemplateClassifier, TemplateEstimator, TemplateTransformer)\n\n\nclass TestEstimators(geomstats.tests.TestCase):\n _multiprocess_can_split_ = True\n\n def setup_method(self):\n self.data = load_iris(return_X_y=True)\n\n @geomstats.tests.np_and_autograd_only\n def test_template_estimator(self):\n est = TemplateEstimator()\n self.assertEqual(est.demo_param, \"demo_param\")\n\n X, y = self.data\n\n est.fit(X, y)\n self.assertTrue(hasattr(est, \"is_fitted_\"))\n\n y_pred = est.predict(X)\n self.assertAllClose(y_pred, gs.ones(gs.shape(X)[0]))\n\n @geomstats.tests.np_and_autograd_only\n def test_template_transformer_error(self):\n X, _ = self.data\n n_samples = gs.shape(X)[0]\n trans = TemplateTransformer()\n trans.fit(X)\n X_diff_size = gs.ones((n_samples, gs.shape(X)[1] + 1))\n with pytest.raises(ValueError):\n trans.transform(X_diff_size)\n\n def test_template_transformer(self):\n X, _ = self.data\n trans = TemplateTransformer()\n self.assertTrue(trans.demo_param == \"demo\")\n\n trans.fit(X)\n self.assertTrue(trans.n_features_ == X.shape[1])\n\n X_trans = trans.transform(X)\n self.assertAllClose(X_trans, gs.sqrt(X))\n\n X_trans = trans.fit_transform(X)\n self.assertAllClose(X_trans, gs.sqrt(X))\n\n @geomstats.tests.np_autograd_and_tf_only\n def test_template_classifier(self):\n X, y = self.data\n clf = TemplateClassifier()\n self.assertTrue(clf.demo_param == \"demo\")\n\n clf.fit(X, y)\n self.assertTrue(hasattr(clf, \"classes_\"))\n self.assertTrue(hasattr(clf, \"X_\"))\n self.assertTrue(hasattr(clf, \"y_\"))\n\n y_pred = clf.predict(X)\n self.assertTrue(y_pred.shape == (X.shape[0],))\n",
"\"\"\"Learning embedding of graph using Poincare Ball Model.\"\"\"\n\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\n\nimport geomstats.backend as gs\nimport geomstats.visualization as visualization\nfrom geomstats.datasets.prepare_graph_data import HyperbolicEmbedding\nfrom geomstats.datasets.utils import load_karate_graph\nfrom geomstats.learning.kmeans import RiemannianKMeans\nfrom geomstats.learning.kmedoids import RiemannianKMedoids\n\n\ndef main():\n \"\"\"Learning Poincaré graph embedding.\n\n Learns Poincaré Ball embedding by using Riemannian\n gradient descent algorithm. Then K-means is applied\n to learn labels of each data sample.\n \"\"\"\n gs.random.seed(1234)\n\n karate_graph = load_karate_graph()\n hyperbolic_embedding = HyperbolicEmbedding(max_epochs=3)\n embeddings = hyperbolic_embedding.embed(karate_graph)\n\n colors = {1: \"b\", 2: \"r\"}\n group_1 = mpatches.Patch(color=colors[1], label=\"Group 1\")\n group_2 = mpatches.Patch(color=colors[2], label=\"Group 2\")\n\n circle = visualization.PoincareDisk(point_type=\"ball\")\n\n _, ax = plt.subplots(figsize=(8, 8))\n ax.axes.xaxis.set_visible(False)\n ax.axes.yaxis.set_visible(False)\n circle.set_ax(ax)\n circle.draw(ax=ax)\n for i_embedding, embedding in enumerate(embeddings):\n x_coords = embedding[0]\n y_coords = embedding[1]\n pt_id = i_embedding\n plt.scatter(x_coords, y_coords, c=colors[karate_graph.labels[pt_id][0]], s=150)\n ax.annotate(pt_id, (x_coords, y_coords))\n\n plt.tick_params(which=\"both\")\n plt.title(\"Poincare Ball Embedding of the Karate Club Network\")\n plt.legend(handles=[group_1, group_2])\n plt.show()\n\n n_clusters = 2\n\n kmeans = RiemannianKMeans(\n metric=hyperbolic_embedding.manifold.metric,\n n_clusters=n_clusters,\n init=\"random\",\n )\n\n centroids = kmeans.fit(X=embeddings)\n labels = kmeans.predict(X=embeddings)\n\n colors = [\"g\", \"c\", \"m\"]\n circle = visualization.PoincareDisk(point_type=\"ball\")\n _, ax2 = plt.subplots(figsize=(8, 8))\n circle.set_ax(ax2)\n circle.draw(ax=ax2)\n ax2.axes.xaxis.set_visible(False)\n ax2.axes.yaxis.set_visible(False)\n group_1_predicted = mpatches.Patch(color=colors[0], label=\"Predicted Group 1\")\n group_2_predicted = mpatches.Patch(color=colors[1], label=\"Predicted Group 2\")\n group_centroids = mpatches.Patch(color=colors[2], label=\"Cluster centroids\")\n\n for _ in range(n_clusters):\n for i_embedding, embedding in enumerate(embeddings):\n x_coords = embedding[0]\n y_coords = embedding[1]\n pt_id = i_embedding\n if labels[i_embedding] == 0:\n color = colors[0]\n else:\n color = colors[1]\n plt.scatter(x_coords, y_coords, c=color, s=150)\n ax2.annotate(pt_id, (x_coords, y_coords))\n\n for _, centroid in enumerate(centroids):\n x_coords = centroid[0]\n y_coords = centroid[1]\n plt.scatter(\n x_coords,\n y_coords,\n c=colors[2],\n marker=\"*\",\n s=150,\n )\n\n plt.title(\"K-means applied to Karate club embedding\")\n plt.legend(handles=[group_1_predicted, group_2_predicted, group_centroids])\n plt.show()\n\n kmedoid = RiemannianKMedoids(\n metric=hyperbolic_embedding.manifold.metric,\n n_clusters=n_clusters,\n init=\"random\",\n n_jobs=2,\n )\n\n centroids = kmedoid.fit(data=embeddings, max_iter=100)\n labels = kmedoid.predict(data=embeddings)\n\n colors = [\"g\", \"c\", \"m\"]\n circle = visualization.PoincareDisk(point_type=\"ball\")\n _, ax2 = plt.subplots(figsize=(8, 8))\n circle.set_ax(ax2)\n circle.draw(ax=ax2)\n ax2.axes.xaxis.set_visible(False)\n ax2.axes.yaxis.set_visible(False)\n group_1_predicted = mpatches.Patch(color=colors[0], label=\"Predicted Group 1\")\n group_2_predicted = mpatches.Patch(color=colors[1], label=\"Predicted Group 2\")\n group_centroids = mpatches.Patch(color=colors[2], label=\"Cluster centroids\")\n\n for _ in range(n_clusters):\n for i_embedding, embedding in enumerate(embeddings):\n x_coords = embedding[0]\n y_coords = embedding[1]\n pt_id = i_embedding\n if labels[i_embedding] == 0:\n color = colors[0]\n else:\n color = colors[1]\n plt.scatter(x_coords, y_coords, c=color, s=150)\n ax2.annotate(pt_id, (x_coords, y_coords))\n\n for _, centroid in enumerate(centroids):\n x_coords = centroid[0]\n y_coords = centroid[1]\n plt.scatter(\n x_coords,\n y_coords,\n c=colors[2],\n marker=\"*\",\n s=150,\n )\n\n plt.title(\"K-Medoids applied to Karate club embedding\")\n plt.legend(handles=[group_1_predicted, group_2_predicted, group_centroids])\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"sklearn.datasets.load_iris"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.patches.Patch",
"matplotlib.pyplot.scatter"
]
] |
MuhammadEzzatHBK/CyclopeptideSequencing | [
"cd07045169758478b4845a54d5710bd329a836ca"
] | [
"test/testing.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 16 09:31:53 2021\n\n@author: Muhammad Ayman Ezzat \n Youmna Magdy Abdullah\n\"\"\"\nfrom algorithms import branch_and_bound\nimport timeit\nimport pandas as pd\n\n''' Accuracy Testing '''\nLabSpectrum = [97, 97, 99, 101, 103, 196, 198, 198, 200, 202, 295,\n 297, 299, 299, 301, 394, 396, 398, 400, 400, 497]\n\nLabResults = sorted(['PVCPT', 'PTPVC', 'PTPCV', 'PCVPT', 'VPTPC',\n 'VCPTP', 'TPVCP', 'TPCVP', 'CPTPV', 'CVPTP'])\n\nAssignmentResults = branch_and_bound(LabSpectrum)\n\nprint('Input: ', LabSpectrum)\nprint('Provided Lab Results: ', *LabResults)\nprint('Our Assignment Results: ', *AssignmentResults)\nprint('Are they identical? ', LabResults == AssignmentResults)\n\n''' Perforamnce Testing '''\ntime_taken = []\n\nfor i in range(500):\n start = timeit.timeit()\n branch_and_bound(LabSpectrum)\n end = timeit.timeit()\n time_taken.append(abs(end - start))\n\ndata = {'duration' : time_taken}\nDataFrame = pd.DataFrame(data)\nDataFrame.to_csv('test_data.csv')"
] | [
[
"pandas.DataFrame"
]
] |
Vizards8/pytorch-spine-segmentation | [
"588b7e7b09c5a370e337e2f12614df69d177ccaa"
] | [
"utils/metrics.py"
] | [
"import torch\nimport torch.nn as nn\nimport numpy as np\nimport math\nimport scipy.spatial\nimport scipy.ndimage.morphology\n\n\"\"\"\nTrue Positive (真正, TP)预测为正的正样本\nTrue Negative(真负 , TN)预测为负的负样本 \nFalse Positive (假正, FP)预测为正的负样本\nFalse Negative(假负 , FN)预测为负的正样本\n\"\"\"\n\n\ndef metrics(predict, label, out_class):\n \"\"\"Calculate the required metrics\n pred = label = [BS, class_num, H, W]\n \"\"\"\n IOU_list = []\n Dice_list = []\n false_positive_rate_list = []\n false_negative_rate_list = []\n acc = []\n for i in range(1, out_class):\n N = label.size(0)\n # indices = []\n # # 根据batch_size筛去全0label,有标签才计算评价指标\n # for j in range(N):\n # gt_true = torch.sum(label[j, i, :, :])\n # if gt_true:\n # indice.append(j)\n #\n # if indices:\n Dice_list.append(diceCoeffv2(predict[:, i, :, :], label[:, i, :, :]))\n IOU_list.append(IOU(predict[:, i, :, :], label[:, i, :, :]))\n FP_FN_rate_list = FP_FN_rate(predict[:, i, :, :], label[:, i, :, :])\n false_positive_rate_list.append(FP_FN_rate_list[0])\n false_negative_rate_list.append(FP_FN_rate_list[1])\n # accu = pixel_accuracy(predict[indices, i, :, :], label[indices, i, :, :])\n # if accu > 0.9:\n # print(f'slice id:{i}, acc:{accu}')\n acc.append(pixel_accuracy(predict[:, i, :, :], label[:, i, :, :]))\n # return mean(IOU_list), mean(Dice_list), mean(acc), mean(false_positive_rate_list), mean(false_negative_rate_list)\n return mean(IOU_list), Dice_list, mean(acc), mean(false_positive_rate_list), mean(false_negative_rate_list)\n\n\ndef mean(list):\n \"\"\"计算平均值\"\"\"\n if not len(list):\n return 0\n return sum(list) / len(list)\n\n\ndef mean_class(list):\n \"\"\"分别计算每个class平均值,返回list\"\"\"\n res = []\n for i in list:\n if not len(i):\n print('Warning class missing!')\n res.append(0)\n else:\n res.append(mean(i).item())\n return res\n\n\ndef batch_pix_accuracy(predict, target):\n \"\"\"Batch Pixel Accuracy\n Args:\n predict: input 4D tensor\n target: label 3D tensor\n \"\"\"\n _, predict = torch.max(predict, 1)\n predict = predict.cpu().numpy() + 1\n target = target.cpu().numpy() + 1\n pixel_labeled = np.sum(target > 0)\n pixel_correct = np.sum((predict == target) * (target > 0))\n assert pixel_correct <= pixel_labeled, \\\n \"Correct area should be smaller than Labeled\"\n return pixel_correct, pixel_labeled\n\n\ndef batch_intersection_union(predict, target, nclass):\n \"\"\"Batch Intersection of Union\n Args:\n predict: input 4D tensor\n target: label 3D tensor\n nclass: number of categories (int)\n \"\"\"\n _, predict = torch.max(predict, 1)\n mini = 1\n maxi = nclass\n nbins = nclass\n predict = predict.cpu().numpy() + 1\n target = target.cpu().numpy() + 1\n\n predict = predict * (target > 0).astype(predict.dtype)\n intersection = predict * (predict == target)\n # areas of intersection and union\n area_inter, _ = np.histogram(intersection, bins=nbins, range=(mini, maxi))\n area_pred, _ = np.histogram(predict, bins=nbins, range=(mini, maxi))\n area_lab, _ = np.histogram(target, bins=nbins, range=(mini, maxi))\n area_union = area_pred + area_lab - area_inter\n assert (area_inter <= area_union).all(), \\\n \"Intersection area should be smaller than Union area\"\n return area_inter, area_union\n\n\ndef intersection_and_union(im_pred, im_lab, num_class):\n im_pred = np.asarray(im_pred)\n im_lab = np.asarray(im_lab)\n # Remove classes from unlabeled pixels in gt image.\n im_pred = im_pred * (im_lab > 0)\n # Compute area intersection:\n intersection = im_pred * (im_pred == im_lab)\n area_inter, _ = np.histogram(intersection, bins=num_class - 1,\n range=(1, num_class - 1))\n # Compute area union:\n area_pred, _ = np.histogram(im_pred, bins=num_class - 1,\n range=(1, num_class - 1))\n area_lab, _ = np.histogram(im_lab, bins=num_class - 1,\n range=(1, num_class - 1))\n area_union = area_pred + area_lab - area_inter\n return area_inter, area_union\n\n\ndef diceCoeff(pred, gt, smooth=1e-5, ):\n r\"\"\" computational formula:\n dice = (2 * (pred ∩ gt)) / |pred| + |gt|\n |pred|:pred中的元素和\n \"\"\"\n\n N = gt.size(0)\n pred_flat = pred.view(N, -1)\n gt_flat = gt.view(N, -1)\n\n intersection = (pred_flat * gt_flat).sum(1)\n unionset = pred_flat.sum(1) + gt_flat.sum(1)\n score = (2 * intersection + smooth) / (unionset + smooth)\n\n return score.sum() / N\n\n\ndef diceFlat(pred, gt, smooth=1e-5):\n intersection = ((pred * gt).sum()).item()\n\n unionset = (pred.sum() + gt.sum()).item()\n score = (2 * intersection + smooth) / (unionset + smooth)\n return score\n\n\ndef diceCoeffv2(pred, gt, eps=1e-5):\n r\"\"\" computational formula:\n dice = (2 * tp) / (2 * tp + fp + fn)\n \"\"\"\n\n N = gt.size(0)\n pred_flat = pred.view(N, -1)\n gt_flat = gt.view(N, -1)\n\n tp = torch.sum(gt_flat * pred_flat, dim=1)\n fp = torch.sum(pred_flat, dim=1) - tp\n fn = torch.sum(gt_flat, dim=1) - tp\n score = (2 * tp + eps) / (2 * tp + fp + fn + eps)\n return score.sum() / N\n\n\ndef IOU(pred, gt, eps=1e-5):\n r\"\"\" computational formula:\n IOU = pred ∩ gt / pred ∪ gt\n IOU = tp / (tp + fp + fn)\n \"\"\"\n\n N = gt.size(0)\n pred_flat = pred.view(N, -1)\n gt_flat = gt.view(N, -1)\n\n tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)\n fp = torch.sum((pred_flat != 0) * (gt_flat == 0), dim=1)\n tn = torch.sum((pred_flat == 0) * (gt_flat == 0), dim=1)\n fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)\n score = (tp + eps) / (tp + fp + fn + eps)\n\n return score.sum() / N\n\n\ndef FP_FN_rate(pred, gt, eps=1e-5):\n r\"\"\"computational formula:\n False_Positive_rate = fp / (fp + tn)\n False_Negtive_rate = fn / (fn + tp)\n \"\"\"\n\n N = gt.size(0)\n pred_flat = pred.view(N, -1)\n gt_flat = gt.view(N, -1)\n\n tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)\n fp = torch.sum((pred_flat != 0) * (gt_flat == 0), dim=1)\n tn = torch.sum((pred_flat == 0) * (gt_flat == 0), dim=1)\n fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)\n\n false_positive_rate = fp / (fp + tn + eps)\n false_negtive_rate = fn / (fn + tp + eps)\n return false_positive_rate.sum() / N, false_negtive_rate.sum() / N\n\n\ndef pixel_accuracy(pred, gt, eps=1e-5):\n \"\"\"TP / (TP + FN)\"\"\"\n N = gt.size(0)\n pred_flat = pred.view(N, -1)\n gt_flat = gt.view(N, -1)\n\n tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)\n fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)\n\n score = (tp.float() + eps) / ((tp + fn).float() + eps)\n # if score < 0.01:\n # print(\n # f'score:{score.item()}, gt:{torch.sum(gt_flat, dim=1).item()}, pred:{torch.sum(pred_flat, dim=1).item()}, tp:{tp.item()}, fn:{fn.item()}')\n return score.sum() / N\n\n\ndef diceCoeffv3(pred, gt, eps=1e-5):\n r\"\"\" computational formula:\n dice = (2 * tp) / (2 * tp + fp + fn)\n \"\"\"\n\n N = gt.size(0)\n pred_flat = pred.view(N, -1)\n gt_flat = gt.view(N, -1)\n\n tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)\n fp = torch.sum((pred_flat != 0) * (gt_flat == 0), dim=1)\n fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)\n # 转为float,以防long类型之间相除结果为0\n score = (2 * tp + eps).float() / (2 * tp + fp + fn + eps).float()\n\n return score.sum() / N\n\n\ndef jaccard(pred, gt, eps=1e-5):\n \"\"\"TP / (TP + FP + FN)\"\"\"\n\n N = gt.size(0)\n pred_flat = pred.view(N, -1)\n gt_flat = gt.view(N, -1)\n tp = torch.sum((pred_flat != 0) * (gt_flat != 0))\n fp = torch.sum((pred_flat != 0) * (gt_flat == 0))\n fn = torch.sum((pred_flat == 0) * (gt_flat != 0))\n\n score = (tp.float() + eps) / ((tp + fp + fn).float() + eps)\n return score.sum() / N\n\n\ndef jaccardFlat(pred, gt, eps=1e-5):\n pred_flat = pred.squeeze()\n gt_flat = gt.squeeze()\n tp = torch.sum((pred_flat != 0) * (gt_flat != 0))\n fp = torch.sum((pred_flat != 0) * (gt_flat == 0))\n fn = torch.sum((pred_flat == 0) * (gt_flat != 0))\n score = (tp.float() + eps) / ((tp + fp + fn).float() + eps)\n return score\n\n\ndef jaccardv2(pred, gt, eps=1e-5):\n \"\"\"TP / (TP + FP + FN)\"\"\"\n\n N = gt.size(0)\n pred_flat = pred.view(N, -1)\n gt_flat = gt.view(N, -1)\n\n tp = torch.sum(gt_flat * pred_flat, dim=1)\n fp = torch.sum(pred_flat, dim=1) - tp\n fn = torch.sum(gt_flat, dim=1) - tp\n\n score = (tp + eps).float() / (tp + fp + fn + eps).float()\n return score.sum() / N\n\n\ndef tversky(pred, gt, eps=1e-5, alpha=0.7):\n \"\"\"TP / (TP + (1-alpha) * FP + alpha * FN)\"\"\"\n N = gt.size(0)\n pred_flat = pred.view(N, -1)\n gt_flat = gt.view(N, -1)\n\n tp = torch.sum(gt_flat * pred_flat, dim=1)\n fp = torch.sum(pred_flat, dim=1) - tp\n fn = torch.sum(gt_flat, dim=1) - tp\n score = (tp + eps) / (tp + (1 - alpha) * fp + alpha * fn + eps)\n return score.sum() / N\n\n\ndef accuracy(pred, gt, eps=1e-5):\n \"\"\"(TP + TN) / (TP + FP + FN + TN)\"\"\"\n\n N = gt.size(0)\n pred_flat = pred.view(N, -1)\n gt_flat = gt.view(N, -1)\n\n tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)\n fp = torch.sum((pred_flat != 0) * (gt_flat == 0), dim=1)\n tn = torch.sum((pred_flat == 0) * (gt_flat == 0), dim=1)\n fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)\n\n score = ((tp + tn).float() + eps) / ((tp + fp + tn + fn).float() + eps)\n return score.sum() / N\n\n\ndef precision(pred, gt, eps=1e-5):\n \"\"\"TP / (TP + FP)\"\"\"\n\n N = gt.size(0)\n pred_flat = pred.view(N, -1)\n gt_flat = gt.view(N, -1)\n tp = torch.sum((pred_flat != 0) * (gt_flat != 0))\n fp = torch.sum((pred_flat != 0) * (gt_flat == 0))\n\n score = (tp.float() + eps) / ((tp + fp).float() + eps)\n\n return score.sum() / N\n\n\ndef specificity(pred, gt, eps=1e-5):\n \"\"\"TN / (TN + FP)\"\"\"\n\n N = gt.size(0)\n pred_flat = pred.view(N, -1)\n gt_flat = gt.view(N, -1)\n fp = torch.sum((pred_flat != 0) * (gt_flat == 0))\n tn = torch.sum((pred_flat == 0) * (gt_flat == 0))\n\n score = (tn.float() + eps) / ((fp + tn).float() + eps)\n\n return score.sum() / N\n\n\nif __name__ == '__main__':\n # shape = torch.Size([2, 3, 4, 4])\n # 模拟batch_size = 2\n '''\n 1 0 0= bladder\n 0 1 0 = tumor\n 0 0 1= background \n '''\n pred = torch.Tensor([[\n [[0, 1, 0, 0],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 1, 0]],\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 1, 1, 0],\n [0, 0, 0, 0]],\n [[1, 0, 1, 1],\n [0, 1, 1, 0],\n [0, 0, 0, 0],\n [1, 0, 0, 1]]]\n ])\n\n gt = torch.Tensor([[\n [[0, 1, 1, 0],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 1, 0]],\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 1, 1, 0],\n [0, 0, 0, 0]],\n [[1, 0, 0, 1],\n [0, 1, 1, 0],\n [0, 0, 0, 0],\n [1, 0, 0, 1]]]\n ])\n\n dice1 = diceCoeff(pred[:, 0:1, :], gt[:, 0:1, :])\n dice2 = jaccard(pred[:, 0:1, :], gt[:, 0:1, :])\n dice3 = diceCoeffv3(pred[:, 0:1, :], gt[:, 0:1, :])\n print(dice1, dice2, dice3)\n"
] | [
[
"torch.sum",
"numpy.sum",
"numpy.histogram",
"numpy.asarray",
"torch.max",
"torch.Tensor"
]
] |
AIM3-RUC/VideoIC | [
"ea324938e839a679324f42161d195f5bef3db26f"
] | [
"src/MML-CG/train.py"
] | [
"'''\n Re-organize the MMIG model\n 2021-09-20\n'''\n\nimport os\nimport sys\nimport time\nimport json\nimport logging\nimport argparse\n\nimport torch\nimport torch.optim as Optim\nfrom torch.autograd import Variable\n\nimport utils\nimport modules\nimport dataset\nimport metrics\n\n\n# set gpu\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '0,1,2,3'\n\nparser = argparse.ArgumentParser(description='train.py')\n# set model parameters\nparser.add_argument('-n_emb', type=int, default=512, help='Embedding size')\nparser.add_argument('-n_hidden', type=int, default=512, help='Hidden size')\nparser.add_argument('-n_head', type=int, default=8, help='Number of head')\nparser.add_argument('-n_block', type=int, default=6, help=\"Number of block\") \n\nparser.add_argument('-max_len', type=int, default=20, help=\"Limited length for text\")\nparser.add_argument('-time_range', type=int, default=5, help='Time range')\nparser.add_argument('-max_cnum', type=int, default=15, help=\"Max comments each second\")\nparser.add_argument('-beam_size', type=int, default=1, help=\"Bean size\") # 1 means greedy search, which is the same with our paper implement\n\n# training setting\nparser.add_argument('-batch_size', type=int, default=32, help='Batch size')\nparser.add_argument('-epoch', type=int, default=100, help='Number of epoch')\nparser.add_argument('-dropout', type=float, default=0.2, help='Dropout rate')\nparser.add_argument('-lr', type=float, default=1e-3, help=\"Learning rate\")\nparser.add_argument('-weight_decay', type=float, default=0.001, help=\"Learning rate\")\nparser.add_argument('-early_stop', type=float, default=20, help=\"Early Stop\")\n\n# data path\nparser.add_argument('-data_path', type=str, default=None, help='dict and image path')\nparser.add_argument('-out_path', type=str, default=None, help='out path')\nparser.add_argument('-outfile', type=str, default='out.json', help='outfile for generation')\nparser.add_argument('-restore', type=str, default=None, help=\"Restoring model path\")\nparser.add_argument('-mode', type=str, default=None)\nargs = parser.parse_args()\n\n# set random seed\ntorch.manual_seed(116)\ntorch.cuda.manual_seed(116)\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n# log file\nif args.mode == 'train':\n if not os.path.exists(args.out_path):\n os.mkdir(args.out_path)\n logger.addHandler(logging.FileHandler(os.path.join(args.out_path, 'log'), \"w\"))\n \n# load img\nimages = utils.load_images(args.data_path)\n\n# load vocabs\nvocabs, rev_vocabs = utils.load_vocabs(args.data_path)\n#logger.info('Load vocabs file ' + str(len(vocabs)))\n\ndef get_dataset(data_path, images, is_train, set_name):\n return dataset.Dataset(data_path = data_path,\n vocabs = vocabs,\n rev_vocabs=rev_vocabs,\n images = images,\n left_time_range = args.time_range,\n right_time_range = args.time_range,\n max_len = args.max_len,\n max_cnum = args.max_cnum,\n is_train = is_train,\n set_name = set_name)\n \ndef get_dataloader(dataset, batch_size, is_train):\n return torch.utils.data.DataLoader(dataset = dataset,\n batch_size = batch_size,\n shuffle = is_train)\n \ndef save_model(path, model):\n model_state_dict = model.state_dict()\n torch.save(model_state_dict, path)\n\n\ndef train():\n # load dataset\n train_set = get_dataset(data_path = os.path.join(args.data_path, 'train.json'),\n images = images,\n is_train = True)\n valid_set = get_dataset(data_path = os.path.join(args.data_path, 'dev.json'),\n images = images,\n is_train = False)\n train_batch = get_dataloader(dataset = train_set,\n batch_size = args.batch_size,\n is_train = True)\n \n model = modules.Model(n_embs = args.n_emb,\n n_hidden = args.n_hidden,\n n_head = args.n_head,\n n_block = args.n_block,\n max_len = args.max_len,\n dropout = args.dropout,\n vocab_size = len(vocabs),\n left_range = args.time_range,\n right_range = args.time_range)\n \n if args.restore is not None:\n model_dict = torch.load(args.restore)\n model.load_state_dict(model_dict)\n \n model.cuda()\n optim = Optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr = args.lr, weight_decay=args.weight_decay)\n best_score = -100000\n early_stop_count = 0\n \n for i in range(args.epoch):\n model.train()\n report_loss, start_time, n_batches = 0, time.time(), 0\n \n for batch in train_batch:\n model.zero_grad()\n V, S, Y = batch\n \n # V: video feature\n V = Variable(V).cuda()\n # S: Surrounding comments\n S = Variable(S).cuda()\n # Y: Ground truth\n Y = Variable(Y).cuda()\n \n multi_gpu_loss = model(V, S, Y)\n loss = torch.sum(multi_gpu_loss)\n loss.backward()\n optim.step()\n \n report_loss += torch.mean(multi_gpu_loss).item()\n n_batches += 1\n \n # report loss\n print('\\nEpoch: %d, report_loss: %.3f, time: %.2f'\n % (i+1, report_loss / n_batches, time.time() - start_time))\n logger.info('\\nEpoch '+str(i) + ', report_loss: '+str(report_loss/n_batches) + ' , time: ' + str(time.time() - start_time))\n \n # eval\n score = eval(model, valid_set)\n if score > best_score:\n best_score = score\n print('Best score ', best_score)\n save_model(os.path.join(args.out_path, 'best_checkpoint.pt'), model)\n logger.info('Evaluation score ' + str(score) + ', Best score ' + str(best_score))\n early_stop_count = 0\n else:\n early_stop_count += 1\n save_model(os.path.join(args.out_path, 'checkpoint.pt'), model)\n print('Evaluation score ', score, '. Best score ', best_score, '. Early stop count ', early_stop_count)\n if early_stop_count == args.early_stop:\n sys.exit()\n return 0\n\ndef eval(model, valid_set):\n print('Start Evaluation ... ')\n start_time = time.time()\n model.eval()\n valid_batch = get_dataloader(valid_set, args.batch_size, is_train=False)\n loss = 0\n total_batch = 0\n with torch.no_grad():\n for batch in valid_batch:\n V, S, Y = batch\n V = Variable(V).cuda()\n S = Variable(S).cuda()\n Y = Variable(Y).cuda()\n loss += torch.mean(model(V, S, Y)).item()\n total_batch += 1\n loss = loss / total_batch\n print('Loss: ', loss)\n print(\"evaluting time:\", time.time() - start_time)\n return -loss\n\ndef test_generation():\n # build model\n test_set = get_dataset(data_path = os.path.join(args.data_path, 'test.json'),\n images = images,\n is_train = False,\n set_name = 'test')\n \n model = modules.Model(n_embs = args.n_emb,\n n_hidden = args.n_hidden,\n n_head = args.n_head,\n n_block = args.n_block,\n max_len = args.max_len,\n dropout = args.dropout,\n vocab_size = len(vocabs),\n left_range = args.time_range,\n right_range = args.time_range)\n \n if args.restore is not None:\n model_dict = torch.load(args.restore)\n model.load_state_dict({k.replace('module.', ''):v for k,v in model_dict.items()})\n else:\n print('Error! Fail to load model for test mode')\n sys.exit()\n \n model.cuda()\n model.eval()\n \n with torch.no_grad():\n with open(args.outfile, 'w') as fout:\n for i in range(len(test_set)):\n data = test_set.get_data(i)\n V = data['video_feature']\n S = data['context_feature']\n V = Variable(V).cuda()\n S = Variable(S).cuda()\n comment_ids = model.generate(V, S, BOS_token=vocabs['<BOS>'], EOS_token=vocabs['<EOS>'], beam_size=args.beam_size).data.tolist()\n comment = transform(comment_ids[0])\n for key in data:\n print(key)\n sample = {'video_time': data['video_time'],\n 'context': data['context'],\n 'comment': data['comment'],\n 'candidate': data['candidate'],\n 'generation': comment}\n term = json.dumps(sample, ensure_ascii=False)\n fout.write(str(term)+'\\n')\n\ndef transform(ids):\n sentences = []\n for wid in ids:\n if wid == vocabs['<BOS>']:\n continue\n if wid == vocabs['<EOS>']:\n break\n sentences.append(rev_vocabs[wid])\n return sentences\n\ndef test_ranking():\n # build model\n test_set = get_dataset(data_path = os.path.join(args.data_path, 'test.json'),\n images = images,\n is_train = False,\n set_name = 'test')\n \n model = modules.Model(n_embs = args.n_emb,\n n_hidden = args.n_hidden,\n n_head = args.n_head,\n n_block = args.n_block,\n max_len = args.max_len,\n dropout = args.dropout,\n vocab_size = len(vocabs),\n left_range = args.time_range,\n right_range = args.time_range)\n \n if args.restore is not None:\n model_dict = torch.load(args.restore)\n model.load_state_dict({k.replace('module.', ''):v for k,v in model_dict.items()})\n else:\n print('Error! Fail to load model for test mode')\n sys.exit()\n \n model.cuda()\n model.eval()\n \n predictions, references = [], []\n \n with torch.no_grad():\n for i in range(len(test_set)):\n data = test_set.get_data(i)\n V = Variable(data['video_feature']).cuda()\n S = Variable(data['context_feature']).cuda()\n C = Variable(torch.stack(data['candidate_feature'])).cuda()\n comment_ids = model.ranking(V, S, C).data\n \n candidate = []\n comments = list(data['candidate'].keys())\n for id in comment_ids:\n candidate.append(comments[id])\n predictions.append(candidate)\n references.append(data['candidate'])\n\n recall_1 = metrics.recall(predictions, references, 1)\n recall_5 = metrics.recall(predictions, references, 5)\n recall_10 = metrics.recall(predictions, references, 10)\n mr = metrics.mean_rank(predictions, references)\n mrr = metrics.mean_reciprocal_rank(predictions, references)\n print('Report ranking result')\n print('Recall 1: ', recall_1)\n print('Recall 5: ', recall_5)\n print('Recall 10: ', recall_10)\n print('MR: ', mr)\n print('MRR: ', mrr)\n\n\nif __name__ == '__main__':\n if args.mode == 'train':\n print('-----------Train Mode-----------')\n train()\n elif args.mode == 'generate':\n print('-----------Generation Mode-----------')\n test_generation()\n elif args.mode == 'ranking':\n print('-----------Ranking Mode-----------')\n test_ranking()\n else:\n print('Wrong Mode')"
] | [
[
"torch.sum",
"torch.utils.data.DataLoader",
"torch.stack",
"torch.load",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.save",
"torch.no_grad",
"torch.autograd.Variable",
"torch.mean"
]
] |
Caged/splineworks | [
"0fad1e98ba6928f6ffeef0018a4d52696a38cce2"
] | [
"sandworks/generators/splines.py"
] | [
"from numpy import pi\nfrom numpy import array\nfrom numpy import linspace\nfrom numpy import arange\nfrom numpy import zeros\nfrom numpy import column_stack\nfrom numpy import array\nfrom time import time\nfrom math import radians\n\nimport cairocffi as cairo\nfrom sand import Sand\nfrom ..lib.sand_spline import SandSpline\nfrom ..lib.helpers import hex_to_rgb_decimal, SimpleLinearScale\n\n\ndef guide_iterator(x, y):\n while True:\n yield array([[x, y]])\n\n\ndef make_vertical_surface(sand, gamma, canvas_width, canvas_height, flipped_height):\n \"\"\"\n Make a vertical image\n \"\"\"\n sand.write_to_surface(gamma)\n surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, canvas_width, canvas_height)\n context = cairo.Context(surface)\n context.rotate(radians(90))\n context.translate(0, -flipped_height)\n context.scale(1.0, 1.0)\n context.set_source_surface(sand.sur, 0, 0)\n context.paint()\n\n return surface\n\n\ndef generate(args):\n # Number of lines\n line_count = args.lines\n\n width = args.width\n height = args.height\n\n if args.dir == 'vertical':\n width = args.height\n height = args.width\n\n xscale = SimpleLinearScale(domain=array([0, width]), range=array([0, 1]))\n yscale = SimpleLinearScale(domain=array([0, height]), range=array([0, 1]))\n\n # Margin as a pixel value of total size. Convert that margin to a number between 0..1\n # representing the percentage of total pixel size\n margin = args.margin\n margin_x = xscale(margin)\n margin_y = yscale(margin)\n\n # Output PNG gamma\n gamma = 1.5\n\n # What frame to write out\n save_frame = args.save_every\n\n # TODO: Step. Appears to be jitter multiplier for points along the spline\n # Causes the sand to be more \"windswept\" towards the later points\n step = 0.0000003 * 0.15\n\n # The number of points along the spline. More points means a denser-looking spline.\n point_count = 1000\n\n # Convert colors to RGB decimal\n sand_color = hex_to_rgb_decimal(args.color)\n bg_color = hex_to_rgb_decimal(args.bg_color)\n\n # Set alpha\n sand_color.append(0.001)\n bg_color.append(1)\n\n sand = Sand(width, height)\n sand.set_rgba(sand_color)\n sand.set_bg(bg_color)\n\n splines = []\n\n # For each y column\n for index, ypos in enumerate(linspace(margin_y, 1.0 - margin_y, line_count)):\n # TODO: point_number? Appears to affect the tightness of the wave noise. That is, higher\n # values like 500 appear to produce more nodes in each spline, resulting in more noise\n # detail.\n pnum = 4 + index\n guide = guide_iterator(0.5, ypos)\n\n x = linspace(-1, 1.0, pnum) * (1.0 - 2 * margin_x) * 0.5\n y = zeros(pnum, 'float')\n path = column_stack([x, y])\n scale = arange(pnum).astype('float') * step\n\n spline = SandSpline(guide, path, point_count, scale)\n splines.append(spline)\n\n j = 0\n while True:\n for s in splines:\n start = time()\n xy = next(s)\n sand.paint_dots(xy)\n if j is not 0 and not j % (save_frame * line_count):\n frame_number = int(j / save_frame)\n file_name = '{}/{}-{}.png'.format(\n args.out_dir,\n int(time()),\n frame_number)\n\n if args.dir == 'vertical':\n surface = make_vertical_surface(sand, gamma, args.width, args.height, height)\n surface.write_to_png(file_name)\n else:\n sand.write_to_png(file_name, gamma)\n\n print('Saved frame {} in {}'.format(frame_number, time() - start))\n\n j += 1\n"
] | [
[
"numpy.zeros",
"numpy.column_stack",
"numpy.arange",
"numpy.array",
"numpy.linspace"
]
] |
devchai123/Paddle-Lite | [
"eea59b66f61bb2acad471010c9526eeec43a15ca"
] | [
"lite/tests/unittest_py/op/test_layer_norm_op.py"
] | [
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nsys.path.append('../')\n\nfrom auto_scan_test import AutoScanTest, IgnoreReasons\nfrom program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place\nimport unittest\n\nimport hypothesis\nfrom hypothesis import given, settings, seed, example, assume\nimport hypothesis.strategies as st\nimport argparse\nimport numpy as np\nfrom functools import partial\n\n\nclass TestLayerNormOp(AutoScanTest):\n def __init__(self, *args, **kwargs):\n AutoScanTest.__init__(self, *args, **kwargs)\n self.enable_testing_on_place(\n TargetType.X86,\n PrecisionType.FP32,\n DataLayoutType.NCHW,\n thread=[1, 2])\n self.enable_testing_on_place(\n TargetType.ARM,\n PrecisionType.FP32,\n DataLayoutType.NCHW,\n thread=[1, 2, 4])\n\n def is_program_valid(self,\n program_config: ProgramConfig,\n predictor_config: CxxConfig) -> bool:\n return True\n\n def sample_program_configs(self, draw):\n in_shape = draw(\n st.lists(\n st.integers(\n min_value=1, max_value=64), min_size=4, max_size=4))\n epsilon = draw(st.floats(min_value=0.0001, max_value=0.0005))\n begin_norm_axis = draw(st.sampled_from([1, 2]))\n\n def generate_input(*args, **kwargs):\n return np.random.random(in_shape).astype(np.float32)\n\n channel_dim = 1\n for dim in range(begin_norm_axis, 4):\n channel_dim = channel_dim * in_shape[dim]\n\n def generate_scale(*args, **kwargs):\n return np.random.random([channel_dim]).astype(np.float32)\n\n def generate_bias(*args, **kwargs):\n return np.random.random([channel_dim]).astype(np.float32)\n\n run_op = OpConfig(\n type=\"layer_norm\",\n inputs={\n \"X\": [\"input_data\"],\n \"Scale\": [\"scale_data\"],\n \"Bias\": [\"bias_data\"]\n },\n outputs={\n \"Y\": [\"output_data\"],\n \"Mean\": [\"mean_data\"],\n \"Variance\": [\"var_data\"],\n },\n attrs={\"epsilon\": epsilon,\n \"begin_norm_axis\": begin_norm_axis})\n program_config = ProgramConfig(\n ops=[run_op],\n weights={},\n inputs={\n \"input_data\": TensorConfig(data_gen=partial(generate_input)),\n \"scale_data\": TensorConfig(data_gen=partial(generate_scale)),\n \"bias_data\": TensorConfig(data_gen=partial(generate_bias)),\n },\n outputs=[\"output_data\", \"mean_data\", \"var_data\"])\n return program_config\n\n def sample_predictor_configs(self):\n return self.get_predictor_configs(), [\"layer_norm\"], (5e-5, 5e-5)\n\n def add_ignore_pass_case(self):\n pass\n\n def test(self, *args, **kwargs):\n self.run_and_statis(quant=False, max_examples=25)\n\n\nif __name__ == \"__main__\":\n unittest.main(argv=[''])\n"
] | [
[
"numpy.random.random"
]
] |
camponogaraviera/qutip | [
"1b1f6dffcb3ab97f11b8c6114293e09f378d2e8f"
] | [
"qutip/cy/br_codegen.py"
] | [
"import os\nimport numpy as np\nimport qutip.settings as qset\nfrom qutip.interpolate import Cubic_Spline\n_cython_path = os.path.dirname(os.path.abspath(__file__)).replace(\"\\\\\", \"/\")\n_include_string = \"'\"+_cython_path+\"/complex_math.pxi'\"\n__all__ = ['BR_Codegen']\n\n\nclass BR_Codegen(object):\n \"\"\"\n Class for generating Bloch-Redfield time-dependent code\n at runtime.\n \"\"\"\n def __init__(self, h_terms=None, h_td_terms=None, h_obj=None,\n c_terms=None, c_td_terms=None, c_obj=None,\n a_terms=None, a_td_terms=None,\n spline_count=[0,0],\n coupled_ops=[],\n coupled_lengths=[],\n coupled_spectra=[],\n config=None, sparse=False,\n use_secular=None,\n sec_cutoff=0.1,\n args=None,\n use_openmp=False,\n omp_thresh=None,\n omp_threads=None,\n atol=None):\n try:\n import cython\n except (ImportError, ModuleNotFoundError):\n raise ModuleNotFoundError(\"Cython is needed for \"\n \"time-depdendent brmesolve\")\n import sys\n import os\n sys.path.append(os.getcwd())\n\n # Hamiltonian time-depdendent pieces\n self.h_terms = h_terms # number of H pieces\n self.h_td_terms = h_td_terms\n self.h_obj = h_obj\n # Collapse operator time-depdendent pieces\n self.c_terms = c_terms # number of C pieces\n self.c_td_terms = c_td_terms\n self.c_obj = c_obj\n # BR operator time-depdendent pieces\n self.a_terms = a_terms # number of A pieces\n self.a_td_terms = a_td_terms\n self.spline_count = spline_count\n self.use_secular = int(use_secular)\n self.sec_cutoff = sec_cutoff\n self.args = args\n self.sparse = sparse\n self.spline = 0\n # Code generator properties\n self.code = [] # strings to be written to file\n self.level = 0 # indent level\n self.config = config\n if atol is None:\n self.atol = qset.atol\n else:\n self.atol = atol\n\n self.use_openmp = use_openmp\n self.omp_thresh = omp_thresh\n self.omp_threads = omp_threads\n\n self.coupled_ops = coupled_ops\n self.coupled_lengths = coupled_lengths\n self.coupled_spectra = coupled_spectra\n\n def write(self, string):\n \"\"\"write lines of code to self.code\"\"\"\n self.code.append(\" \" * self.level + string + \"\\n\")\n\n def file(self, filename):\n \"\"\"open file called filename for writing\"\"\"\n self.file = open(filename, \"w\")\n\n def generate(self, filename=\"rhs.pyx\"):\n \"\"\"generate the file\"\"\"\n for line in cython_preamble(self.use_openmp)+self.aop_td_funcs():\n self.write(line)\n\n # write function for Hamiltonian terms (there is always\n # be at least one term)\n for line in cython_checks() + self.ODE_func_header():\n self.write(line)\n self.indent()\n #Reset spline count\n self.spline = 0\n for line in self.func_vars()+self.ham_add_and_eigsolve()+ \\\n self.br_matvec_terms()+[\"\\n\"]:\n self.write(line)\n\n for line in self.func_end():\n self.write(line)\n self.dedent()\n\n self.file(filename)\n self.file.writelines(self.code)\n self.file.close()\n self.config.cgen_num += 1\n\n def indent(self):\n \"\"\"increase indention level by one\"\"\"\n self.level += 1\n\n def dedent(self):\n \"\"\"decrease indention level by one\"\"\"\n if self.level == 0:\n raise SyntaxError(\"Error in code generator\")\n self.level -= 1\n\n\n def _get_arg_str(self, args):\n if len(args) == 0:\n return ''\n\n ret = ''\n for name, value in self.args.items():\n if isinstance(value, np.ndarray):\n ret += \",\\n np.ndarray[np.%s_t, ndim=1] %s\" % \\\n (value.dtype.name, name)\n else:\n if isinstance(value, (int, np.int32, np.int64)):\n kind = 'int'\n elif isinstance(value, (float, np.float32, np.float64)):\n kind = 'float'\n elif isinstance(value, (complex, np.complex128)):\n kind = 'complex'\n #kind = type(value).__name__\n ret += \",\\n \" + kind + \" \" + name\n return ret\n\n\n def ODE_func_header(self):\n \"\"\"Creates function header for time-dependent ODE RHS.\"\"\"\n func_name = \"def cy_td_ode_rhs(\"\n # strings for time and vector variables\n input_vars = (\"\\n double t\" +\n \",\\n complex[::1] vec\")\n for k in range(self.h_terms):\n input_vars += (\",\\n \" +\n \"complex[::1,:] H%d\" % k)\n\n #Add array for each Cubic_Spline H term\n for htd in self.h_td_terms:\n if isinstance(htd, Cubic_Spline):\n if not htd.is_complex:\n input_vars += (\",\\n \" +\n \"double[::1] spline%d\" % self.spline)\n else:\n input_vars += (\",\\n \" +\n \"complex[::1] spline%d\" % self.spline)\n self.spline += 1\n\n\n for k in range(self.c_terms):\n input_vars += (\",\\n \" +\n \"complex[::1,:] C%d\" % k)\n\n #Add array for each Cubic_Spline c_op term\n for ctd in self.c_td_terms:\n if isinstance(ctd, Cubic_Spline):\n if not ctd.is_complex:\n input_vars += (\",\\n \" +\n \"double[::1] spline%d\" % self.spline)\n else:\n input_vars += (\",\\n \" +\n \"complex[::1] spline%d\" % self.spline)\n self.spline += 1\n\n\n #Add coupled a_op terms\n for _a in self.a_td_terms:\n if isinstance(_a, Cubic_Spline):\n if not _a.is_complex:\n input_vars += (\",\\n \" +\n \"double[::1] spline%d\" % self.spline)\n else:\n input_vars += (\",\\n \" +\n \"complex[::1] spline%d\" % self.spline)\n self.spline += 1\n\n\n #Add a_op terms\n for k in range(self.a_terms):\n input_vars += (\",\\n \" +\n \"complex[::1,:] A%d\" % k)\n\n\n input_vars += (\",\\n unsigned int nrows\")\n input_vars += self._get_arg_str(self.args)\n\n func_end = \"):\"\n return [func_name + input_vars + func_end]\n\n def func_vars(self):\n \"\"\"Writes the variables and their types & spmv parts\"\"\"\n func_vars = [\"\", \"cdef double complex * \" +\n 'out = <complex *>PyDataMem_NEW_ZEROED(nrows**2,sizeof(complex))']\n func_vars.append(\" \")\n return func_vars\n\n\n def aop_td_funcs(self):\n aop_func_str=[]\n spline_val = self.spline_count[0]\n coupled_val = 0\n kk = 0\n while kk < self.a_terms:\n if kk not in self.coupled_ops:\n aa = self.a_td_terms[kk]\n if isinstance(aa, str):\n aop_func_str += [\"cdef complex spectral{0}(double w, double t): return {1}\".format(kk, aa)]\n elif isinstance(aa, tuple):\n if isinstance(aa[0],str):\n str0 = aa[0]\n elif isinstance(aa[0],Cubic_Spline):\n if not aa[0].is_complex:\n aop_func_str += [\"cdef double[::1] spline{0} = np.array(\".format(spline_val)+np.array2string(aa[0].coeffs,separator=',',precision=16)+\",dtype=float)\"]\n str0 = \"interp(w, %s, %s, spline%s)\" % (aa[0].a, aa[0].b, spline_val)\n else:\n aop_func_str += [\"cdef complex[::1] spline{0} = np.array(\".format(spline_val)+np.array2string(aa[0].coeffs,separator=',',precision=16)+\",dtype=complex)\"]\n str0 = \"zinterp(w, %s, %s, spline%s)\" % (aa[0].a, aa[0].b, spline_val)\n spline_val += 1\n else:\n raise Exception('Error parsing tuple.')\n\n if isinstance(aa[1],str):\n str1 = aa[1]\n elif isinstance(aa[1],Cubic_Spline):\n if not aa[1].is_complex:\n aop_func_str += [\"cdef double[::1] spline{0} = np.array(\".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+\",dtype=float)\"]\n str1 = \"interp(t, %s, %s, spline%s)\" % (aa[1].a, aa[1].b, spline_val)\n else:\n aop_func_str += [\"cdef complex[::1] spline{0} = np.array(\".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+\",dtype=complex)\"]\n str1 = \"zinterp(t, %s, %s, spline%s)\" % (aa[1].a, aa[1].b, spline_val)\n spline_val += 1\n else:\n raise Exception('Error parsing tuple.')\n\n aop_func_str += [\"cdef complex spectral{0}(double w, double t): return ({1})*({2})\".format(kk, str0, str1)]\n else:\n raise Exception('Invalid a_td_term.')\n kk += 1\n else:\n aa = self.coupled_spectra[coupled_val]\n if isinstance(aa, str):\n aop_func_str += [\"cdef complex spectral{0}(double w, double t): return {1}\".format(kk, aa)]\n elif isinstance(aa, Cubic_Spline):\n if not aa[1].is_complex:\n aop_func_str += [\"cdef double[::1] spline{0} = np.array(\".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+\",dtype=float)\"]\n str1 = \"interp(t, %s, %s, spline%s)\" % (aa[1].a, aa[1].b, spline_val)\n else:\n aop_func_str += [\"cdef complex[::1] spline{0} = np.array(\".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+\",dtype=complex)\"]\n str1 = \"zinterp(t, %s, %s, spline%s)\" % (aa[1].a, aa[1].b, spline_val)\n spline_val += 1\n aop_func_str += [\"cdef complex spectral{0}(double w, double t): return {1}\".format(kk, str1)]\n kk += self.coupled_lengths[coupled_val]\n coupled_val += 1\n\n return aop_func_str\n\n\n def ham_add_and_eigsolve(self):\n ham_str = []\n #allocate initial zero-Hamiltonian and eigenvector array in Fortran-order\n ham_str += ['cdef complex[::1, :] H = farray_alloc(nrows)']\n ham_str += ['cdef complex[::1, :] evecs = farray_alloc(nrows)']\n #allocate double array for eigenvalues\n ham_str += ['cdef double * eigvals = <double *>PyDataMem_NEW_ZEROED(nrows,sizeof(double))']\n for kk in range(self.h_terms):\n if isinstance(self.h_td_terms[kk], Cubic_Spline):\n S = self.h_td_terms[kk]\n if not S.is_complex:\n td_str = \"interp(t, %s, %s, spline%s)\" % (S.a, S.b, self.spline)\n else:\n td_str = \"zinterp(t, %s, %s, spline%s)\" % (S.a, S.b, self.spline)\n ham_str += [\"dense_add_mult(H, H{0}, {1})\".format(kk,td_str)]\n self.spline += 1\n else:\n ham_str += [\"dense_add_mult(H, H{0}, {1})\".format(kk,self.h_td_terms[kk])]\n #Do the eigensolving\n ham_str += [\"ZHEEVR(H, eigvals, evecs, nrows)\"]\n #Free H as it is no longer needed\n ham_str += [\"PyDataMem_FREE(&H[0,0])\"]\n\n return ham_str\n\n def br_matvec_terms(self):\n br_str = []\n # Transform vector eigenbasis\n br_str += [\"cdef double complex * eig_vec = vec_to_eigbasis(vec, evecs, nrows)\"]\n # Do the diagonal liouvillian matvec\n br_str += [\"diag_liou_mult(eigvals, eig_vec, out, nrows)\"]\n # Do the cop_term matvec for each c_term\n for kk in range(self.c_terms):\n if isinstance(self.c_td_terms[kk], Cubic_Spline):\n S = self.c_td_terms[kk]\n if not S.is_complex:\n td_str = \"interp(t, %s, %s, spline%s)\" % (S.a, S.b, self.spline)\n else:\n td_str = \"zinterp(t, %s, %s, spline%s)\" % (S.a, S.b, self.spline)\n if self.use_openmp:\n\n br_str += [\"cop_super_mult_openmp(C{0}, evecs, eig_vec, {1}, out, nrows, {2}, {3}, {4})\".format(kk,\n td_str, self.omp_thresh, self.omp_threads, self.atol)]\n else:\n br_str += [\"cop_super_mult(C{0}, evecs, eig_vec, {1}, out, nrows, {2})\".format(kk, td_str, self.atol)]\n self.spline += 1\n else:\n if self.use_openmp:\n br_str += [\"cop_super_mult_openmp(C{0}, evecs, eig_vec, {1}, out, nrows, {2}, {3}, {4})\".format(kk,\n self.c_td_terms[kk], self.omp_thresh, self.omp_threads, self.atol)]\n else:\n br_str += [\"cop_super_mult(C{0}, evecs, eig_vec, {1}, out, nrows, {2})\".format(kk, self.c_td_terms[kk], self.atol)]\n\n if self.a_terms != 0:\n #Calculate skew and dw_min terms\n br_str += [\"cdef double[:,::1] skew = <double[:nrows,:nrows]><double *>PyDataMem_NEW_ZEROED(nrows**2,sizeof(double))\"]\n br_str += [\"cdef double dw_min = skew_and_dwmin(eigvals, skew, nrows)\"]\n\n #Compute BR term matvec\n kk = 0\n coupled_val = 0\n while kk < self.a_terms:\n if kk not in self.coupled_ops:\n if self.use_openmp:\n br_str += [\"br_term_mult_openmp(t, A{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3}, {4}, {5})\".format(kk,\n self.use_secular, self.sec_cutoff, self.omp_thresh, self.omp_threads, self.atol)]\n else:\n br_str += [\"br_term_mult(t, A{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3})\".format(kk, self.use_secular, self.sec_cutoff, self.atol)]\n kk += 1\n else:\n br_str += ['cdef complex[::1, :] Ac{0} = farray_alloc(nrows)'.format(kk)]\n for nn in range(self.coupled_lengths[coupled_val]):\n if isinstance(self.a_td_terms[kk+nn], str):\n br_str += [\"dense_add_mult(Ac{0}, A{1}, {2})\".format(kk,kk+nn,self.a_td_terms[kk+nn])]\n elif isinstance(self.a_td_terms[kk+nn], Cubic_Spline):\n S = self.a_td_terms[kk+nn]\n if not S.is_complex:\n td_str = \"interp(t, %s, %s, spline%s)\" % (S.a, S.b, self.spline)\n else:\n td_str = \"zinterp(t, %s, %s, spline%s)\" % (S.a, S.b, self.spline)\n br_str += [\"dense_add_mult(Ac{0}, A{1}, {2})\".format(kk,kk+nn,td_str)]\n else:\n raise Exception('Invalid time-dependence fot a_op.')\n\n if self.use_openmp:\n br_str += [\"br_term_mult_openmp(t, Ac{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3}, {4}, {5})\".format(kk,\n self.use_secular, self.sec_cutoff, self.omp_thresh, self.omp_threads, self.atol)]\n else:\n br_str += [\"br_term_mult(t, Ac{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3})\".format(kk, self.use_secular, self.sec_cutoff, self.atol)]\n\n br_str += [\"PyDataMem_FREE(&Ac{0}[0,0])\".format(kk)]\n kk += self.coupled_lengths[coupled_val]\n coupled_val += 1\n return br_str\n\n\n def func_end(self):\n end_str = []\n #Transform out vector back to fock basis\n end_str += [\"cdef np.ndarray[complex, ndim=1, mode='c'] arr_out = vec_to_fockbasis(out, evecs, nrows)\"]\n #Free everything at end\n if self.a_terms != 0:\n end_str += [\"PyDataMem_FREE(&skew[0,0])\"]\n end_str += [\"PyDataMem_FREE(&evecs[0,0])\"]\n end_str += [\"PyDataMem_FREE(eigvals)\"]\n end_str += [\"PyDataMem_FREE(eig_vec)\"]\n end_str += [\"PyDataMem_FREE(out)\"]\n end_str += [\"return arr_out\"]\n return end_str\n\n\n\ndef cython_preamble(use_omp=False):\n if use_omp:\n call_str = \"from qutip.cy.openmp.br_omp cimport (cop_super_mult_openmp, br_term_mult_openmp)\"\n else:\n call_str = \"from qutip.cy.brtools cimport (cop_super_mult, br_term_mult)\"\n \"\"\"\n Returns list of code segments for Cython preamble.\n \"\"\"\n return [\"\"\"#!python\n#cython: language_level=3\n# This file is generated automatically by QuTiP.\n# (C) 2011 and later, QuSTaR\nimport numpy as np\ncimport numpy as np\ncimport cython\nnp.import_array()\ncdef extern from \"numpy/arrayobject.h\" nogil:\n void PyDataMem_NEW_ZEROED(size_t size, size_t elsize)\n void PyArray_ENABLEFLAGS(np.ndarray arr, int flags)\n void PyDataMem_FREE(void * ptr)\nfrom qutip.cy.interpolate cimport interp, zinterp\nfrom qutip.cy.math cimport erf, zerf\ncdef double pi = 3.14159265358979323\nfrom qutip.cy.brtools cimport (dense_add_mult, ZHEEVR, dense_to_eigbasis,\n vec_to_eigbasis, vec_to_fockbasis, skew_and_dwmin,\n diag_liou_mult, spec_func, farray_alloc)\n\"\"\"\n+call_str+\n\"\"\"\ninclude \"\"\"+_include_string+\"\"\"\n\"\"\"]\n\n\n\ndef cython_checks():\n \"\"\"\n List of strings that turn off Cython checks.\n \"\"\"\n return [\"\"\"\[email protected](True)\[email protected](False)\[email protected](False)\"\"\"]\n"
] | [
[
"numpy.array2string"
]
] |
yarikoptic/fitlins | [
"ee7e06330b9cdd5a9b812d51eb545daa84b0d066"
] | [
"fitlins/interfaces/bids.py"
] | [
"import os\nfrom functools import reduce\nfrom pathlib import Path\nfrom gzip import GzipFile\nimport json\nimport shutil\nimport numpy as np\nimport nibabel as nb\n\nfrom collections import defaultdict\n\nfrom nipype import logging\nfrom nipype.utils.filemanip import makedirs, copyfile\nfrom nipype.interfaces.base import (\n BaseInterfaceInputSpec, TraitedSpec, SimpleInterface,\n InputMultiPath, OutputMultiPath, File, Directory,\n traits, isdefined\n )\nfrom nipype.interfaces.io import IOBase\n\nfrom ..utils import dict_intersection, snake_to_camel\n\niflogger = logging.getLogger('nipype.interface')\n\n\ndef bids_split_filename(fname):\n \"\"\"Split a filename into parts: path, base filename, and extension\n\n Respects multi-part file types used in BIDS standard and draft extensions\n\n Largely copied from nipype.utils.filemanip.split_filename\n\n Parameters\n ----------\n fname : str\n file or path name\n\n Returns\n -------\n pth : str\n path of fname\n fname : str\n basename of filename, without extension\n ext : str\n file extension of fname\n \"\"\"\n special_extensions = [\n \".R.surf.gii\", \".L.surf.gii\",\n \".R.func.gii\", \".L.func.gii\",\n \".nii.gz\", \".tsv.gz\",\n ]\n\n pth = os.path.dirname(fname)\n fname = os.path.basename(fname)\n\n for special_ext in special_extensions:\n if fname.lower().endswith(special_ext.lower()):\n ext_len = len(special_ext)\n ext = fname[-ext_len:]\n fname = fname[:-ext_len]\n break\n else:\n fname, ext = os.path.splitext(fname)\n\n return pth, fname, ext\n\n\ndef _ensure_model(model):\n model = getattr(model, 'filename', model)\n\n if isinstance(model, str):\n if os.path.exists(model):\n with open(model) as fobj:\n model = json.load(fobj)\n else:\n model = json.loads(model)\n return model\n\n\nclass ModelSpecLoaderInputSpec(BaseInterfaceInputSpec):\n bids_dir = Directory(exists=True,\n mandatory=True,\n desc='BIDS dataset root directory')\n model = traits.Either('default', InputMultiPath(File(exists=True)),\n desc='Model filename')\n selectors = traits.Dict(desc='Limit models to those with matching inputs')\n\n\nclass ModelSpecLoaderOutputSpec(TraitedSpec):\n model_spec = OutputMultiPath(traits.Dict())\n\n\nclass ModelSpecLoader(SimpleInterface):\n input_spec = ModelSpecLoaderInputSpec\n output_spec = ModelSpecLoaderOutputSpec\n\n def _run_interface(self, runtime):\n import bids\n from bids.analysis import auto_model\n models = self.inputs.model\n if not isinstance(models, list):\n layout = bids.BIDSLayout(self.inputs.bids_dir)\n\n if not isdefined(models):\n models = layout.get(type='model')\n if not models:\n raise ValueError(\"No models found\")\n elif models == 'default':\n models = auto_model(layout)\n\n models = [_ensure_model(m) for m in models]\n\n if self.inputs.selectors:\n # This is almost certainly incorrect\n models = [model for model in models\n if all(val in model['input'].get(key, [val])\n for key, val in self.inputs.selectors.items())]\n\n self._results['model_spec'] = models\n\n return runtime\n\n\nIMPUTATION_SNIPPET = \"\"\"\\\n<div class=\"warning\">\n The following confounds had NaN values for the first volume: {}.\n The mean of non-zero values for the remaining entries was imputed.\n If another strategy is desired, it must be explicitly specified in\n the model.\n</div>\n\"\"\"\n\n\nclass LoadBIDSModelInputSpec(BaseInterfaceInputSpec):\n bids_dir = Directory(exists=True,\n mandatory=True,\n desc='BIDS dataset root directory')\n preproc_dir = Directory(exists=True,\n desc='Optional preprocessed files directory')\n model = traits.Dict(desc='Model specification', mandatory=True)\n selectors = traits.Dict(desc='Limit collected sessions', usedefault=True)\n include_pattern = InputMultiPath(\n traits.Str, xor=['exclude_pattern'],\n desc='Patterns to select sub-directories of BIDS root')\n exclude_pattern = InputMultiPath(\n traits.Str, xor=['include_pattern'],\n desc='Patterns to ignore sub-directories of BIDS root')\n\n\nclass LoadBIDSModelOutputSpec(TraitedSpec):\n session_info = traits.List(traits.Dict())\n contrast_info = traits.List(traits.List(File()))\n contrast_indices = traits.List(traits.List(traits.List(traits.Dict)))\n entities = traits.List(traits.List(traits.Dict()))\n warnings = traits.List(File)\n\n\nclass LoadBIDSModel(SimpleInterface):\n input_spec = LoadBIDSModelInputSpec\n output_spec = LoadBIDSModelOutputSpec\n\n def _run_interface(self, runtime):\n import bids\n bids.config.set_options(loop_preproc=True)\n include = self.inputs.include_pattern\n exclude = self.inputs.exclude_pattern\n if not isdefined(include):\n include = None\n if not isdefined(exclude):\n exclude = None\n\n paths = [(self.inputs.bids_dir, 'bids')]\n if isdefined(self.inputs.preproc_dir):\n paths.append((self.inputs.preproc_dir, ['bids', 'derivatives']))\n layout = bids.BIDSLayout(paths, include=include, exclude=exclude)\n\n selectors = self.inputs.selectors\n\n analysis = bids.Analysis(model=self.inputs.model, layout=layout)\n analysis.setup(drop_na=False, **selectors)\n self._load_level1(runtime, analysis)\n self._load_higher_level(runtime, analysis)\n\n # Debug - remove, eventually\n runtime.analysis = analysis\n\n return runtime\n\n def _load_level1(self, runtime, analysis):\n block = analysis.blocks[0]\n block_subdir = Path(runtime.cwd) / block.level\n block_subdir.mkdir(parents=True, exist_ok=True)\n\n entities = []\n session_info = []\n contrast_indices = []\n contrast_info = []\n warnings = []\n for paradigm, _, ents in block.get_design_matrix(\n block.model['HRF_variables'], mode='sparse', force=True):\n info = {}\n\n space = analysis.layout.get_spaces(type='preproc',\n extensions=['.nii', '.nii.gz'])[0]\n preproc_files = analysis.layout.get(type='preproc',\n extensions=['.nii', '.nii.gz'],\n space=space,\n **ents)\n if len(preproc_files) != 1:\n raise ValueError('Too many BOLD files found')\n\n fname = preproc_files[0].filename\n\n # Required field in seconds\n TR = analysis.layout.get_metadata(fname, type='bold',\n full_search=True)['RepetitionTime']\n dense_vars = set(block.model['variables']) - set(block.model['HRF_variables'])\n\n _, confounds, _ = block.get_design_matrix(dense_vars,\n mode='dense',\n force=True,\n sampling_rate=1/TR,\n **ents)[0]\n\n ent_string = '_'.join('{}-{}'.format(key, val)\n for key, val in ents.items())\n\n events_file = block_subdir / '{}_events.h5'.format(ent_string)\n paradigm.to_hdf(events_file, key='events')\n\n imputed = []\n if confounds is not None:\n # Note that FMRIPREP includes CosineXX columns to accompany\n # t/aCompCor\n # We may want to add criteria to include HPF columns that are not\n # explicitly listed in the model\n names = [col for col in confounds.columns\n if col.startswith('NonSteadyStateOutlier') or\n col in block.model['variables']]\n confounds = confounds[names]\n\n # These confounds are defined pairwise with the current volume\n # and its predecessor, and thus may be undefined (have value\n # NaN) at the first volume.\n # In these cases, we impute the mean non-zero value, for the\n # expected NaN only.\n # Any other NaNs must be handled by an explicit transform in\n # the BIDS model.\n for imputable in ('FramewiseDisplacement',\n 'stdDVARS', 'non-stdDVARS',\n 'vx-wisestdDVARS'):\n if imputable in confounds.columns:\n vals = confounds[imputable].values\n if not np.isnan(vals[0]):\n continue\n\n # Impute the mean non-zero, non-NaN value\n confounds[imputable][0] = np.nanmean(vals[vals != 0])\n imputed.append(imputable)\n\n if np.isnan(confounds.values).any():\n iflogger.warning('Unexpected NaNs found in confounds; '\n 'regression may fail.')\n\n confounds_file = block_subdir / '{}_confounds.h5'.format(ent_string)\n confounds.to_hdf(confounds_file, key='confounds')\n\n else:\n confounds_file = None\n\n info['events'] = str(events_file)\n info['confounds'] = str(confounds_file)\n info['repetition_time'] = TR\n\n # Transpose so each contrast gets a row of data instead of column\n contrasts, index, _ = block.get_contrasts(**ents)[0]\n\n contrast_type_map = defaultdict(lambda: 'T')\n contrast_type_map.update({contrast['name']: contrast['type']\n for contrast in block.contrasts})\n contrast_type_list = [contrast_type_map[contrast]\n for contrast in contrasts.columns]\n\n contrasts = contrasts.T\n # Add test indicator column\n contrasts['type'] = contrast_type_list\n\n contrasts_file = block_subdir / '{}_contrasts.h5'.format(ent_string)\n contrasts_file.parent.mkdir(parents=True, exist_ok=True)\n contrasts.to_hdf(contrasts_file, key='contrasts')\n\n warning_file = block_subdir / '{}_warning.html'.format(ent_string)\n with warning_file.open('w') as fobj:\n if imputed:\n fobj.write(IMPUTATION_SNIPPET.format(', '.join(imputed)))\n\n entities.append(ents)\n session_info.append(info)\n contrast_indices.append(index.to_dict('records'))\n contrast_info.append(str(contrasts_file))\n warnings.append(str(warning_file))\n\n self._results['session_info'] = session_info\n self._results['warnings'] = warnings\n self._results.setdefault('entities', []).append(entities)\n self._results.setdefault('contrast_indices', []).append(contrast_indices)\n self._results.setdefault('contrast_info', []).append(contrast_info)\n\n def _load_higher_level(self, runtime, analysis):\n cwd = Path(runtime.cwd)\n for block in analysis.blocks[1:]:\n block_subdir = cwd / block.level\n block_subdir.mkdir(parents=True, exist_ok=True)\n\n entities = []\n contrast_indices = []\n contrast_info = []\n for contrasts, index, ents in block.get_contrasts():\n if contrasts.empty:\n continue\n\n # The contrast index is the name of the input contrasts,\n # which will very frequently be non-unique\n # Hence, add the contrast to the index (table of entities)\n # and switch to a matching numeric index\n index['contrast'] = contrasts.index\n contrasts.index = index.index\n\n contrast_type_map = defaultdict(lambda: 'T')\n contrast_type_map.update({contrast['name']: contrast['type']\n for contrast in block.contrasts})\n contrast_type_list = [contrast_type_map[contrast]\n for contrast in contrasts.columns]\n\n indices = index.to_dict('records')\n\n # Entities for a given contrast matrix include the intersection of\n # entities of inputs, e.g., if this level is within-subject, the\n # subject should persist\n out_ents = reduce(dict_intersection, indices)\n # Explicit entities take precedence over derived\n out_ents.update(ents)\n # Input-level contrasts will be overridden by the current level\n out_ents.pop('contrast', None)\n\n ent_string = '_'.join('{}-{}'.format(key, val)\n for key, val in out_ents.items())\n\n # Transpose so each contrast gets a row of data instead of column\n contrasts = contrasts.T\n # Add test indicator column\n contrasts['type'] = contrast_type_list\n\n contrasts_file = block_subdir / '{}_contrasts.h5'.format(ent_string)\n contrasts_file.parent.mkdir(parents=True, exist_ok=True)\n contrasts.to_hdf(contrasts_file, key='contrasts')\n\n entities.append(out_ents)\n contrast_indices.append(indices)\n contrast_info.append(str(contrasts_file))\n\n self._results['entities'].append(entities)\n self._results['contrast_info'].append(contrast_info)\n self._results['contrast_indices'].append(contrast_indices)\n\n\nclass BIDSSelectInputSpec(BaseInterfaceInputSpec):\n bids_dir = Directory(exists=True,\n mandatory=True,\n desc='BIDS dataset root directories')\n preproc_dir = Directory(exists=True,\n desc='Optional preprocessed files directory')\n entities = InputMultiPath(traits.Dict(), mandatory=True)\n selectors = traits.Dict(desc='Additional selectors to be applied',\n usedefault=True)\n\n\nclass BIDSSelectOutputSpec(TraitedSpec):\n bold_files = OutputMultiPath(File)\n mask_files = OutputMultiPath(traits.Either(File, None))\n entities = OutputMultiPath(traits.Dict)\n\n\nclass BIDSSelect(SimpleInterface):\n input_spec = BIDSSelectInputSpec\n output_spec = BIDSSelectOutputSpec\n\n def _run_interface(self, runtime):\n import bids\n paths = [(self.inputs.bids_dir, 'bids')]\n if isdefined(self.inputs.preproc_dir):\n paths.append((self.inputs.preproc_dir, ['bids', 'derivatives']))\n layout = bids.BIDSLayout(paths)\n\n bold_files = []\n mask_files = []\n entities = []\n for ents in self.inputs.entities:\n selectors = {**self.inputs.selectors, **ents}\n bold_file = layout.get(extensions=['.nii', '.nii.gz'], **selectors)\n\n if len(bold_file) == 0:\n raise FileNotFoundError(\n \"Could not find BOLD file in {} with entities {}\"\n \"\".format(self.inputs.bids_dir, selectors))\n elif len(bold_file) > 1:\n raise ValueError(\n \"Non-unique BOLD file in {} with entities {}.\\n\"\n \"Matches:\\n\\t{}\"\n \"\".format(self.inputs.bids_dir, selectors,\n \"\\n\\t\".join(\n '{} ({})'.format(\n f.filename,\n layout.files[f.filename].entities)\n for f in bold_file)))\n\n # Select exactly matching mask file (may be over-cautious)\n bold_ents = layout.parse_file_entities(\n bold_file[0].filename)\n bold_ents['type'] = 'brainmask'\n mask_file = layout.get(extensions=['.nii', '.nii.gz'], **bold_ents)\n bold_ents.pop('type')\n\n bold_files.append(bold_file[0].filename)\n mask_files.append(mask_file[0].filename if mask_file else None)\n entities.append(bold_ents)\n\n self._results['bold_files'] = bold_files\n self._results['mask_files'] = mask_files\n self._results['entities'] = entities\n\n return runtime\n\n\ndef _copy_or_convert(in_file, out_file):\n in_ext = bids_split_filename(in_file)[2]\n out_ext = bids_split_filename(out_file)[2]\n\n # Copy if filename matches\n if in_ext == out_ext:\n copyfile(in_file, out_file, copy=True, use_hardlink=True)\n return\n\n # gzip/gunzip if it's easy\n if in_ext == out_ext + '.gz' or in_ext + '.gz' == out_ext:\n read_open = GzipFile if in_ext.endswith('.gz') else open\n write_open = GzipFile if out_ext.endswith('.gz') else open\n with read_open(in_file, mode='rb') as in_fobj:\n with write_open(out_file, mode='wb') as out_fobj:\n shutil.copyfileobj(in_fobj, out_fobj)\n return\n\n # Let nibabel take a shot\n try:\n nb.save(nb.load(in_file), out_file)\n except Exception:\n pass\n else:\n return\n\n raise RuntimeError(\"Cannot convert {} to {}\".format(in_ext, out_ext))\n\n\nclass BIDSDataSinkInputSpec(BaseInterfaceInputSpec):\n base_directory = Directory(\n mandatory=True,\n desc='Path to BIDS (or derivatives) root directory')\n in_file = InputMultiPath(File(exists=True), mandatory=True)\n entities = InputMultiPath(traits.Dict, usedefault=True,\n desc='Per-file entities to include in filename')\n fixed_entities = traits.Dict(usedefault=True,\n desc='Entities to include in all filenames')\n path_patterns = InputMultiPath(\n traits.Str, desc='BIDS path patterns describing format of file names')\n\n\nclass BIDSDataSinkOutputSpec(TraitedSpec):\n out_file = OutputMultiPath(File, desc='output file')\n\n\nclass BIDSDataSink(IOBase):\n input_spec = BIDSDataSinkInputSpec\n output_spec = BIDSDataSinkOutputSpec\n\n _always_run = True\n\n def _list_outputs(self):\n import bids\n base_dir = self.inputs.base_directory\n\n os.makedirs(base_dir, exist_ok=True)\n\n layout = bids.BIDSLayout(base_dir)\n path_patterns = self.inputs.path_patterns\n if not isdefined(path_patterns):\n path_patterns = None\n\n out_files = []\n for entities, in_file in zip(self.inputs.entities,\n self.inputs.in_file):\n ents = {**self.inputs.fixed_entities}\n ents.update(entities)\n\n ents = {k: snake_to_camel(str(v)) for k, v in ents.items()}\n\n out_fname = os.path.join(\n base_dir, layout.build_path(ents, path_patterns))\n makedirs(os.path.dirname(out_fname), exist_ok=True)\n\n _copy_or_convert(in_file, out_fname)\n out_files.append(out_fname)\n\n return {'out_file': out_files}\n"
] | [
[
"numpy.nanmean",
"numpy.isnan"
]
] |
jtchilders/deephyper | [
"06f9653599757a69fa5720820f4de3a1f154b081"
] | [
"deephyper/search/nas/model/space/keras_search_space.py"
] | [
"from collections.abc import Iterable\nfrom functools import reduce\n\nimport networkx as nx\nfrom tensorflow import keras\nfrom tensorflow.python.keras.utils.vis_utils import model_to_dot\n\nfrom deephyper.core.exceptions.nas.space import (InputShapeOfWrongType,\n NodeAlreadyAdded,\n StructureHasACycle,\n WrongOutputShape,\n WrongSequenceToSetOperations)\nfrom deephyper.search.nas.model.space import NxSearchSpace\nfrom deephyper.search.nas.model.space.node import (ConstantNode, Node,\n VariableNode)\nfrom deephyper.search.nas.model.space.op.basic import Tensor\nfrom deephyper.search.nas.model.space.op.merge import Concatenate\nfrom deephyper.search.nas.model.space.op.op1d import Identity\n\n\nclass KSearchSpace(NxSearchSpace):\n \"\"\"A KSearchSpace represents a search space of neural networks.\n\n >>> from tensorflow.keras.utils import plot_model\n >>> from deephyper.search.nas.model.space import KSearchSpace\n >>> from deephyper.search.nas.model.space.node import VariableNode, ConstantNode\n >>> from deephyper.search.nas.model.space.op.op1d import Dense\n >>> struct = KSearchSpace((5, ), (1, ))\n >>> vnode = VariableNode()\n >>> struct.connect(struct.input_nodes[0], vnode)\n >>> vnode.add_op(Dense(10))\n >>> vnode.add_op(Dense(20))\n >>> output_node = ConstantNode(op=Dense(1))\n >>> struct.connect(vnode, output_node)\n >>> struct.set_ops([0])\n >>> model = struct.create_model()\n\n Args:\n input_shape (list(tuple(int))): list of shapes of all inputs.\n output_shape (tuple(int)): shape of output.\n\n Raises:\n InputShapeOfWrongType: [description]\n \"\"\"\n\n def __init__(self, input_shape, output_shape, *args, **kwargs):\n\n super().__init__()\n\n if type(input_shape) is tuple:\n # we have only one input tensor here\n op = Tensor(keras.layers.Input(input_shape, name=\"input_0\"))\n self.input_nodes = [ConstantNode(op=op, name='Input_0')]\n\n elif type(input_shape) is list and all(map(lambda x: type(x) is tuple, input_shape)):\n # we have a list of input tensors here\n self.input_nodes = list()\n for i in range(len(input_shape)):\n op = Tensor(keras.layers.Input(\n input_shape[i], name=f\"input_{i}\"))\n inode = ConstantNode(op=op, name=f'Input_{i}')\n self.input_nodes.append(inode)\n else:\n raise InputShapeOfWrongType(input_shape)\n\n for node in self.input_nodes:\n self.graph.add_node(node)\n\n self.output_shape = output_shape\n self.output_node = None\n\n self._model = None\n\n @property\n def depth(self):\n if self._model is None:\n raise RuntimeError(\n \"Can't compute depth of model without creating a model.\")\n return len(self.longest_path)\n\n @property\n def longest_path(self):\n if self._model is None:\n raise RuntimeError(\n \"Can't compute longest path of model without creating a model.\")\n nx_graph = nx.drawing.nx_pydot.from_pydot(model_to_dot(self._model))\n return nx.algorithms.dag.dag_longest_path(nx_graph)\n\n\n def set_ops(self, indexes):\n \"\"\"Set the operations for each node of each cell of the search_space.\n\n Args:\n indexes (list): element of list can be float in [0, 1] or int.\n\n Raises:\n WrongSequenceToSetOperations: raised when 'indexes' is of a wrong length.\n \"\"\"\n if len(indexes) != len(list(self.variable_nodes)):\n raise WrongSequenceToSetOperations(\n indexes, list(self.variable_nodes))\n\n for op_i, node in zip(indexes, self.variable_nodes):\n node.set_op(op_i)\n\n output_nodes = self.get_output_nodes()\n\n self.output_node = self.set_output_node(self.graph, output_nodes)\n\n def set_output_node(self, graph, output_nodes):\n \"\"\"Set the output node of the search_space.\n\n Args:\n graph (nx.DiGraph): graph of the search_space.\n output_nodes (Node): nodes of the current search_space without successors.\n\n Returns:\n Node: output node of the search_space.\n \"\"\"\n if len(output_nodes) == 1:\n node = ConstantNode(op=Identity(), name='Structure_Output')\n graph.add_node(node)\n graph.add_edge(output_nodes[0], node)\n else:\n node = ConstantNode(name='Structure_Output')\n op = Concatenate(self, output_nodes)\n node.set_op(op=op)\n return node\n\n def create_model(self):\n \"\"\"Create the tensors corresponding to the search_space.\n\n Returns:\n A keras.Model for the current search_space with the corresponding set of operations.\n \"\"\"\n\n output_tensor = self.create_tensor_aux(self.graph, self.output_node)\n if output_tensor.get_shape()[1:] != self.output_shape:\n raise WrongOutputShape(output_tensor, self.output_shape)\n\n input_tensors = [inode._tensor for inode in self.input_nodes]\n\n self._model = keras.Model(inputs=input_tensors, outputs=output_tensor)\n\n return keras.Model(inputs=input_tensors, outputs=output_tensor)\n"
] | [
[
"tensorflow.python.keras.utils.vis_utils.model_to_dot",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Input"
]
] |
leon-liangwu/PillarsRNN | [
"b6e7d64af4e2819098ae9a87a9dd676ee8288874"
] | [
"display3d/msic.py"
] | [
"from __future__ import division, print_function\nimport numpy as np\n\nfrom shapely.geometry import Polygon\nimport cv2\n\nfrom collections import defaultdict\n\nfrom kitti import Calibration\n\n\ndef camera_to_lidar(points, r_rect, velo2cam):\n points_shape = list(points.shape[0:-1])\n if points.shape[-1] == 3:\n points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)\n lidar_points = np.dot(points, np.linalg.inv(np.dot(r_rect, velo2cam).T))\n return lidar_points[..., :3]\n\n\ndef lidar_to_camera(points, r_rect, velo2cam):\n points_shape = list(points.shape[:-1])\n if points.shape[-1] == 3:\n points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)\n camera_points = np.dot(points, np.dot(r_rect, velo2cam).T)\n return camera_points[..., :3]\n\n\ndef box_lidar_to_camera(data, r_rect, velo2cam):\n xyz_lidar = data[:, 0:3]\n w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6]\n r = data[:, 6:7]\n xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam)\n return np.concatenate([xyz, l, h, w, r], axis=1)\n\n\ndef box_camera_to_lidar(data, r_rect, velo2cam):\n xyz = data[:, 0:3]\n l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6]\n r = data[:, 6:7]\n xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam)\n return np.concatenate([xyz_lidar, w, l, h, r], axis=1)\n\n\ndef cuboid_to_corners(cuboid):\n (cls_id, x, y, z, w, l, h, theta) = cuboid\n theta = (theta + np.pi / 2) # (theta + np.pi / 2)\n cos_t = np.cos(theta)\n sin_t = np.sin(theta)\n centre_x = x\n centre_y = y\n\n rear_left_x = centre_x - l / 2 * cos_t - w / 2 * sin_t\n rear_left_y = centre_y - l / 2 * sin_t + w / 2 * cos_t\n rear_right_x = centre_x - l / 2 * cos_t + w / 2 * sin_t\n rear_right_y = centre_y - l / 2 * sin_t - w / 2 * cos_t\n front_right_x = centre_x + l / 2 * cos_t + w / 2 * sin_t\n front_right_y = centre_y + l / 2 * sin_t - w / 2 * cos_t\n front_left_x = centre_x + l / 2 * cos_t - w / 2 * sin_t\n front_left_y = centre_y + l / 2 * sin_t + w / 2 * cos_t\n corners = np.array([rear_left_x, rear_left_y, rear_right_x, rear_right_y,\n front_right_x, front_right_y, front_left_x, front_left_y]).reshape((4, 2))\n return corners\n\n\ndef get_corners_list(reg_list):\n corners_list = []\n for reg in reg_list:\n (prob, w, l, h, centre_x, centre_y, z, theta) = reg\n\n cos_t = np.cos(theta)\n sin_t = np.sin(theta)\n\n rear_left_x = centre_x - l / 2 * cos_t - w / 2 * sin_t\n rear_left_y = centre_y - l / 2 * sin_t + w / 2 * cos_t\n rear_right_x = centre_x - l / 2 * cos_t + w / 2 * sin_t\n rear_right_y = centre_y - l / 2 * sin_t - w / 2 * cos_t\n front_right_x = centre_x + l / 2 * cos_t + w / 2 * sin_t\n front_right_y = centre_y + l / 2 * sin_t - w / 2 * cos_t\n front_left_x = centre_x + l / 2 * cos_t - w / 2 * sin_t\n front_left_y = centre_y + l / 2 * sin_t + w / 2 * cos_t\n corners = np.array([rear_left_x, rear_left_y, rear_right_x, rear_right_y,\n front_right_x, front_right_y, front_left_x, front_left_y]).reshape((4, 2))\n\n corners_list.append(corners)\n\n return corners_list\n\n\n\ndef roty(t):\n ''' Rotation about the y-axis. '''\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, 0, s],\n [0, 1, 0],\n [-s, 0, c]])\n\n\ndef rotz(t):\n ''' Rotation about the z-axis. '''\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])\n\n\ndef get_corners_3d(reg_list):\n corners_list = []\n for reg in reg_list:\n (prob, w, l, h, centre_x, centre_y, z, theta) = reg\n\n R = rotz(-theta-np.pi/2)\n\n x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]\n y_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]\n z_corners = [0, 0, 0, 0, h, h, h, h]\n # z_corners = [-h/2, -h/2, -h/2, -h/2, h/2, h/2, h/2, h/2]\n\n corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))\n # print corners_3d.shape\n corners_3d[0, :] = corners_3d[0, :] + centre_x\n corners_3d[1, :] = corners_3d[1, :] + centre_y\n corners_3d[2, :] = corners_3d[2, :] + z\n\n corners_3d = corners_3d.transpose(1, 0)\n\n corners_list.append(corners_3d)\n\n corners_list = np.array(corners_list)\n\n return corners_list\n\n\ndef decode_output_box3d(prediction, rpn_mode=False, anchors=None):\n reg_list, cls_list = get_reg_list_rpn(prediction, anchors)\n corners_3d = get_corners_3d(reg_list)\n # corners_list = get_corners_list(reg_list)\n return corners_3d, reg_list, cls_list\n\n\ndef get_det_info(prediction, bev_data, img_path, rpn_mode=False, anchors=None):\n if not rpn_mode:\n reg_list, cls_list = get_reg_list(prediction)\n else:\n reg_list, cls_list = get_reg_list_rpn(prediction, anchors)\n\n calib_path = img_path.replace('velodyne', 'calib')\n calib_path = calib_path.replace('.bin', '.txt')\n calib = Calibration(calib_path)\n\n reg_list[:, [5, 6, 4]] = calib.project_velo_to_rect(reg_list[:, 4:7])\n reg_list[:, 5] *= -1\n\n corners_list = get_corners_list(reg_list)\n prob_list = []\n\n for i in range(len(reg_list)):\n prob_list.append(reg_list[i][0])\n return corners_list, reg_list, prob_list, cls_list\n\n\n\ndef convert_format(boxes_array):\n \"\"\"\n\n :param array: an array of shape [# bboxs, 4, 2]\n :return: a shapely.geometry.Polygon object\n \"\"\"\n\n polygons = [Polygon([(box[i, 0], box[i, 1]) for i in range(4)]) for box in boxes_array]\n return np.array(polygons)\n\n\ndef compute_iou(box1, box2):\n \"\"\"Calculates IoU of the given box with the array of the given boxes.\n box: a polygon\n boxes: a vector of polygons\n Note: the areas are passed in rather than calculated here for\n efficiency. Calculate once in the caller to avoid duplicate work.\n \"\"\"\n # Calculate intersection areas\n iou = box1.intersection(box2).area / box1.union(box2).area\n\n return iou\n\n\n\n\ndef merge_mini_batch(batch_list, _unused=False):\n batch_size = len(batch_list)\n example_merged = defaultdict(list)\n for example in batch_list:\n for k, v in example.items():\n example_merged[k].append(v)\n ret = {}\n for key, elems in example_merged.items():\n if key in ['pillar']:\n print('pillar shape', elems[0].shape)\n ret[key] = np.concatenate(elems, axis=0)\n elif key == 'coords':\n coors = []\n for i, coor in enumerate(elems):\n print('coor shape', coor.shape)\n coor_pad = np.pad(\n coor, ((0, 0), (1, 0)),\n mode='constant',\n constant_values=i)\n coors.append(coor_pad)\n ret[key] = np.concatenate(coors, axis=0)\n else:\n ret[key] = np.stack(elems, axis=0)\n\n return ret\n"
] | [
[
"numpy.vstack",
"numpy.ones",
"numpy.dot",
"numpy.stack",
"numpy.cos",
"numpy.pad",
"numpy.array",
"numpy.sin",
"numpy.concatenate"
]
] |
cregouby/FARM | [
"552bc07acffbce4f1f84d926c040fdd17b4ddeb3"
] | [
"farm/file_utils.py"
] | [
"\"\"\"\nUtilities for working with the local dataset cache.\nThis file is adapted from the AllenNLP library at https://github.com/allenai/allennlp\nCopyright by the AllenNLP authors.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport fnmatch\nimport json\nimport logging\nimport os\nimport shutil\nimport sys\nimport tempfile\nfrom functools import wraps\nfrom hashlib import sha256\nfrom io import open\n\nimport boto3\nimport numpy as np\nimport requests\nfrom botocore.exceptions import ClientError\nfrom dotmap import DotMap\nfrom tqdm import tqdm\n\ntry:\n from torch.hub import _get_torch_home\n\n torch_cache_home = _get_torch_home()\nexcept ImportError:\n torch_cache_home = os.path.expanduser(\n os.getenv(\n \"TORCH_HOME\", os.path.join(os.getenv(\"XDG_CACHE_HOME\", \"~/.cache\"), \"torch\")\n )\n )\ndefault_cache_path = os.path.join(torch_cache_home, \"farm\")\n\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\n\ntry:\n from pathlib import Path\n\n FARM_CACHE = Path(os.getenv(\"FARM_CACHE\", default_cache_path))\nexcept (AttributeError, ImportError):\n FARM_CACHE = os.getenv(\"FARM_CACHE\", default_cache_path)\n\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\ndef url_to_filename(url, etag=None):\n \"\"\"\n Convert `url` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the url's, delimited\n by a period.\n \"\"\"\n url_bytes = url.encode(\"utf-8\")\n url_hash = sha256(url_bytes)\n filename = url_hash.hexdigest()\n\n if etag:\n etag_bytes = etag.encode(\"utf-8\")\n etag_hash = sha256(etag_bytes)\n filename += \".\" + etag_hash.hexdigest()\n\n return filename\n\n\ndef filename_to_url(filename, cache_dir=None):\n \"\"\"\n Return the url and etag (which may be ``None``) stored for `filename`.\n Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.\n \"\"\"\n if cache_dir is None:\n cache_dir = FARM_CACHE\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n cache_path = os.path.join(cache_dir, filename)\n if not os.path.exists(cache_path):\n raise EnvironmentError(\"file {} not found\".format(cache_path))\n\n meta_path = cache_path + \".json\"\n if not os.path.exists(meta_path):\n raise EnvironmentError(\"file {} not found\".format(meta_path))\n\n with open(meta_path, encoding=\"utf-8\") as meta_file:\n metadata = json.load(meta_file)\n url = metadata[\"url\"]\n etag = metadata[\"etag\"]\n\n return url, etag\n\n\ndef cached_path(url_or_filename, cache_dir=None):\n \"\"\"\n Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.\n \"\"\"\n if cache_dir is None:\n cache_dir = FARM_CACHE\n if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n parsed = urlparse(url_or_filename)\n\n if parsed.scheme in (\"http\", \"https\", \"s3\"):\n # URL, so get it from the cache (downloading if necessary)\n return get_from_cache(url_or_filename, cache_dir)\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n return url_or_filename\n elif parsed.scheme == \"\":\n # File, but it doesn't exist.\n raise EnvironmentError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\n \"unable to parse {} as a URL or as a local path\".format(url_or_filename)\n )\n\n\ndef split_s3_path(url):\n \"\"\"Split a full s3 path into the bucket name and path.\"\"\"\n parsed = urlparse(url)\n if not parsed.netloc or not parsed.path:\n raise ValueError(\"bad s3 path {}\".format(url))\n bucket_name = parsed.netloc\n s3_path = parsed.path\n # Remove '/' at beginning of path.\n if s3_path.startswith(\"/\"):\n s3_path = s3_path[1:]\n return bucket_name, s3_path\n\n\ndef s3_request(func):\n \"\"\"\n Wrapper function for s3 requests in order to create more helpful error\n messages.\n \"\"\"\n\n @wraps(func)\n def wrapper(url, *args, **kwargs):\n try:\n return func(url, *args, **kwargs)\n except ClientError as exc:\n if int(exc.response[\"Error\"][\"Code\"]) == 404:\n raise EnvironmentError(\"file {} not found\".format(url))\n else:\n raise\n\n return wrapper\n\n\n@s3_request\ndef s3_etag(url):\n \"\"\"Check ETag on S3 object.\"\"\"\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_object = s3_resource.Object(bucket_name, s3_path)\n return s3_object.e_tag\n\n\n@s3_request\ndef s3_get(url, temp_file):\n \"\"\"Pull a file directly from S3.\"\"\"\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)\n\n\ndef http_get(url, temp_file, proxies=None):\n req = requests.get(url, stream=True, proxies=proxies)\n content_length = req.headers.get(\"Content-Length\")\n total = int(content_length) if content_length is not None else None\n progress = tqdm(unit=\"B\", total=total)\n for chunk in req.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n progress.update(len(chunk))\n temp_file.write(chunk)\n progress.close()\n\n\ndef get_from_cache(url, cache_dir=None):\n \"\"\"\n Given a URL, look for the corresponding dataset in the local cache.\n If it's not there, download it. Then return the path to the cached file.\n \"\"\"\n if cache_dir is None:\n cache_dir = FARM_CACHE\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n # Get eTag to add to filename, if it exists.\n if url.startswith(\"s3://\"):\n etag = s3_etag(url)\n else:\n try:\n response = requests.head(url, allow_redirects=True)\n if response.status_code != 200:\n etag = None\n else:\n etag = response.headers.get(\"ETag\")\n except EnvironmentError:\n etag = None\n\n if sys.version_info[0] == 2 and etag is not None:\n etag = etag.decode(\"utf-8\")\n filename = url_to_filename(url, etag)\n\n # get cache path to put the file\n cache_path = os.path.join(cache_dir, filename)\n\n # If we don't have a connection (etag is None) and can't identify the file\n # try to get the last downloaded one\n if not os.path.exists(cache_path) and etag is None:\n matching_files = fnmatch.filter(os.listdir(cache_dir), filename + \".*\")\n matching_files = list(filter(lambda s: not s.endswith(\".json\"), matching_files))\n if matching_files:\n cache_path = os.path.join(cache_dir, matching_files[-1])\n\n if not os.path.exists(cache_path):\n # Download to temporary file, then copy to cache dir once finished.\n # Otherwise you get corrupt cache entries if the download gets interrupted.\n with tempfile.NamedTemporaryFile() as temp_file:\n logger.info(\"%s not found in cache, downloading to %s\", url, temp_file.name)\n\n # GET file object\n if url.startswith(\"s3://\"):\n s3_get(url, temp_file)\n else:\n http_get(url, temp_file)\n\n # we are copying the file before closing it, so flush to avoid truncation\n temp_file.flush()\n # shutil.copyfileobj() starts at the current position, so go to the start\n temp_file.seek(0)\n\n logger.info(\"copying %s to cache at %s\", temp_file.name, cache_path)\n with open(cache_path, \"wb\") as cache_file:\n shutil.copyfileobj(temp_file, cache_file)\n\n logger.info(\"creating metadata file for %s\", cache_path)\n meta = {\"url\": url, \"etag\": etag}\n meta_path = cache_path + \".json\"\n with open(meta_path, \"w\") as meta_file:\n output_string = json.dumps(meta)\n if sys.version_info[0] == 2 and isinstance(output_string, str):\n output_string = unicode(\n output_string, \"utf-8\"\n ) # The beauty of python 2\n meta_file.write(output_string)\n\n logger.info(\"removing temp file %s\", temp_file.name)\n\n return cache_path\n\n\ndef read_set_from_file(filename):\n \"\"\"\n Extract a de-duped collection (set) of text from a file.\n Expected file format is one item per line.\n \"\"\"\n collection = set()\n with open(filename, \"r\", encoding=\"utf-8\") as file_:\n for line in file_:\n collection.add(line.rstrip())\n return collection\n\n\ndef get_file_extension(path, dot=True, lower=True):\n ext = os.path.splitext(path)[1]\n ext = ext if dot else ext[1:]\n return ext.lower() if lower else ext\n\n\ndef read_config(path, flattend=False):\n if path:\n with open(path) as json_data_file:\n conf_args = json.load(json_data_file)\n else:\n raise ValueError(\"No config provided for classifier\")\n\n def getArgValue(arg):\n if \"value\" not in arg:\n logger.error(\n \"Only depth 2 config files supported. Failed to convert: %s\" % str(arg)\n )\n return arg[\"value\"] if (arg[\"value\"] is not None) else arg[\"default\"]\n\n # flatten last part of config, take either value or default as value\n for gk, gv in conf_args.items():\n for k, v in gv.items():\n if isinstance(getArgValue(v), dict):\n logger.error(\"Config is too deeply nested, at %s\" % str(v))\n conf_args[gk][k] = getArgValue(v)\n\n # DotMap for making nested dictionary accessible through dot notation\n flat_args = dict(\n conf_args[\"general\"],\n **conf_args[\"task\"],\n **conf_args[\"parameter\"],\n **conf_args[\"logging\"],\n )\n if flattend:\n args = DotMap(flat_args, _dynamic=False)\n else:\n args = DotMap(conf_args, _dynamic=False)\n\n return args\n\n\ndef unnestConfig(config, flattened=False):\n \"\"\"\n This function creates a list of config files for evaluating parameters with different values. If a config parameter\n is of type list this list is iterated over and a config object without lists is returned. Can handle lists inside any\n number of parameters.\n\n Can handle shallow or nested (one level) configs\n \"\"\"\n nestedKeys = []\n nestedVals = []\n if flattened:\n for k, v in config.items():\n if isinstance(v, list):\n if k != \"layer_dims\": # exclude layer dims, since it is already a list\n nestedKeys.append(k)\n nestedVals.append(v)\n else:\n for gk, gv in config.items():\n if(gk != \"task\"):\n for k, v in gv.items():\n if isinstance(v, list):\n if isinstance(v, list):\n if (\n k != \"layer_dims\"\n ): # exclude layer dims, since it is already a list\n nestedKeys.append([gk, k])\n nestedVals.append(v)\n elif isinstance(v, dict):\n logger.error(\"Config too deep!\")\n\n if len(nestedKeys) == 0:\n unnestedConfig = [config]\n else:\n if flattened:\n logger.info(\"Nested config at parameters: %s\" % (\", \".join(nestedKeys)))\n else:\n logger.info(\n \"Nested config at parameters: %s\"\n % (\", \".join(\".\".join(x) for x in nestedKeys))\n )\n unnestedConfig = []\n mesh = np.meshgrid(\n *nestedVals\n ) # get all combinations, each dimension corresponds to one parameter type\n # flatten mesh into shape: [num_parameters, num_combinations] so we can iterate in 2d over any paramter combinations\n mesh = [x.flatten() for x in mesh]\n\n # loop over all combinations\n for i in range(len(mesh[0])):\n tempconfig = config.copy()\n for j, k in enumerate(nestedKeys):\n if isinstance(k, str):\n tempconfig[k] = mesh[j][\n i\n ] # get ith val of correct param value and overwrite original config\n elif len(k) == 2:\n tempconfig[k[0]][k[1]] = mesh[j][i] # set nested dictionary keys\n else:\n logger.error(\"Config too deep!\")\n unnestedConfig.append(tempconfig)\n\n return unnestedConfig\n"
] | [
[
"torch.hub._get_torch_home",
"numpy.meshgrid"
]
] |
rubenrtorrado/NLP | [
"2ba6f153e428227fcf6f27080bdd0183d395ef64"
] | [
"alpha-zero-general_one_step/MCTS_Bleu.py"
] | [
"import math\nimport numpy as np\nEPS = 1e-8\n\nclass MCTS():\n \"\"\"\n This class handles the MCTS tree.\n \"\"\"\n\n def __init__(self, game, nnet, args):\n self.game = game\n self.nnet = nnet\n self.args = args\n self.Qsa = {} # stores Q values for s,a (as defined in the paper)\n self.Nsa = {} # stores #times edge s,a was visited\n self.Ns = {} # stores #times board s was visited\n self.Ps = {} # stores initial policy (returned by neural net)\n\n self.Es = {} # stores game.getGameEnded ended for board s\n self.Vs = {} # stores game.getValidMoves for board s\n\n def getActionProb(self, canonicalBoard, temp=1):\n \"\"\"\n This function performs numMCTSSims simulations of MCTS starting from\n canonicalBoard.\n\n Returns:\n probs: a policy vector where the probability of the ith action is\n proportional to Nsa[(s,a)]**(1./temp)\n \"\"\"\n for i in range(self.args.numMCTSSims):\n self.search(canonicalBoard)\n\n s = self.game.stringRepresentation(canonicalBoard)\n counts = [self.Nsa[(s,a)] if (s,a) in self.Nsa else 0 for a in range(self.game.getActionSize())]\n\n if temp==0:\n bestA = np.argmax(counts)\n probs = [0]*len(counts)\n probs[bestA]=1\n return probs\n\n counts = [x**(1./temp) for x in counts]\n probs = [x/float(sum(counts)) for x in counts]\n return probs\n\n\n def search(self, canonicalBoard):\n \"\"\"\n This function performs one iteration of MCTS. It is recursively called\n till a leaf node is found. The action chosen at each node is one that\n has the maximum upper confidence bound as in the paper.\n\n Once a leaf node is found, the neural network is called to return an\n initial policy P and a value v for the state. This value is propogated\n up the search path. In case the leaf node is a terminal state, the\n outcome is propogated up the search path. The values of Ns, Nsa, Qsa are\n updated.\n\n NOTE: the return values are the negative of the value of the current\n state. This is done since v is in [-1,1] and if v is the value of a\n state for the current player, then its value is -v for the other player.\n\n Returns:\n v: the negative of the value of the current canonicalBoard\n \"\"\"\n\n s = self.game.stringRepresentation(canonicalBoard)\n\n if s not in self.Es:\n self.Es[s] = self.game.getGameEnded_BLEU(canonicalBoard, 1)\n if self.Es[s]!=0:\n # terminal node\n\n #test=self.Es[s]\n\n return self.Es[s]\n\n if s not in self.Ps:\n # leaf node\n self.Ps[s], v = self.nnet.predict(canonicalBoard)\n valids = self.game.getValidMoves(canonicalBoard, 1)\n self.Ps[s] = self.Ps[s]*valids # masking invalid moves\n #Ruben\n self.Ps[s]=self.Ps[s].T\n sum_Ps_s = np.sum(self.Ps[s])\n if sum_Ps_s > 0:\n self.Ps[s] /= sum_Ps_s # renormalize\n else:\n # if all valid moves were masked make all valid moves equally probable\n \n # NB! All valid moves may be masked if either your NNet architecture is insufficient or you've get overfitting or something else.\n # If you have got dozens or hundreds of these messages you should pay attention to your NNet and/or training process. \n print(\"All valid moves were masked, do workaround.\")\n self.Ps[s] = self.Ps[s] + valids\n self.Ps[s] /= np.sum(self.Ps[s])\n\n self.Vs[s] = valids\n self.Ns[s] = 0\n return v#-v\n\n valids = self.Vs[s]\n cur_best = -float('inf')\n best_act = -1\n\n # pick the action with the highest upper confidence bound\n for a in range(self.game.getActionSize()):\n if valids[a]:\n if (s,a) in self.Qsa:\n\n u = self.Qsa[(s,a)] + self.args.cpuct*self.Ps[s][a]*math.sqrt(self.Ns[s])/(1+self.Nsa[(s,a)])\n\n\n else:\n u = self.args.cpuct*self.Ps[s][a]*math.sqrt(self.Ns[s] + EPS) # Q = 0 ?\n\n if u > cur_best:\n cur_best = u\n best_act = a\n\n a = best_act\n next_s, next_player = self.game.getNextState(canonicalBoard, 1, a)\n next_s = self.game.getCanonicalForm(next_s, next_player)\n\n v = self.search(next_s)\n\n if (s,a) in self.Qsa:\n self.Qsa[(s,a)] = (self.Nsa[(s,a)]*self.Qsa[(s,a)] + v)/(self.Nsa[(s,a)]+1)\n self.Nsa[(s,a)] += 1\n\n else:\n self.Qsa[(s,a)] = v\n self.Nsa[(s,a)] = 1\n\n self.Ns[s] += 1\n return v#-v\n"
] | [
[
"numpy.sum",
"numpy.argmax"
]
] |
schr476/EXARL | [
"7f4596bd8b3d7960aaf52bc677ceac4f37029834"
] | [
"exarl/candlelib/uq_utils.py"
] | [
"from __future__ import absolute_import\n\nimport numpy as np\nfrom scipy.stats import pearsonr, spearmanr\nfrom scipy import signal\nfrom scipy.interpolate import InterpolatedUnivariateSpline\n\n\ndef generate_index_distribution(numTrain, numTest, numValidation, params):\n \"\"\" Generates a vector of indices to partition the data for training.\n NO CHECKING IS DONE: it is assumed that the data could be partitioned\n in the specified blocks and that the block indices describe a coherent\n partition.\n\n Parameters\n ----------\n numTrain : int\n Number of training data points\n numTest : int\n Number of testing data points\n numValidation : int\n Number of validation data points (may be zero)\n params : dictionary with parameters\n Contains the keywords that control the behavior of the function\n (uq_train_fr, uq_valid_fr, uq_test_fr for fraction specification,\n uq_train_vec, uq_valid_vec, uq_test_vec for block list specification, and\n uq_train_bks, uq_valid_bks, uq_test_bks for block number specification)\n\n Return\n ----------\n indexTrain : int numpy array\n Indices for data in training\n indexValidation : int numpy array\n Indices for data in validation (if any)\n indexTest : int numpy array\n Indices for data in testing (if merging)\n \"\"\"\n if all(k in params for k in ('uq_train_fr', 'uq_valid_fr', 'uq_test_fr')):\n # specification by fraction\n print(\"Computing UQ cross-validation - Distributing by FRACTION\")\n return generate_index_distribution_from_fraction(numTrain, numTest, numValidation, params)\n elif all(k in params for k in ('uq_train_vec', 'uq_valid_vec', 'uq_test_vec')):\n # specification by block list\n print(\"Computing UQ cross-validation - Distributing by BLOCK LIST\")\n return generate_index_distribution_from_block_list(numTrain, numTest, numValidation, params)\n elif all(k in params for k in ('uq_train_bks', 'uq_valid_bks', 'uq_test_bks')):\n # specification by block size\n print(\"Computing UQ cross-validation - Distributing by BLOCK NUMBER\")\n return generate_index_distribution_from_blocks(numTrain, numTest, numValidation, params)\n else:\n print(\"ERROR !! No consistent UQ parameter specification found !! ... exiting \")\n raise KeyError(\"No valid triplet of ('uq_train_*', 'uq_valid_*', 'uq_test_*') found. (* is any of fr, vec or bks)\")\n\n\ndef generate_index_distribution_from_fraction(numTrain, numTest, numValidation, params):\n \"\"\" Generates a vector of indices to partition the data for training.\n It checks that the fractions provided are (0, 1) and add up to 1.\n\n Parameters\n ----------\n numTrain : int\n Number of training data points\n numTest : int\n Number of testing data points\n numValidation : int\n Number of validation data points (may be zero)\n params : dictionary with parameters\n Contains the keywords that control the behavior of the function\n (uq_train_fr, uq_valid_fr, uq_test_fr)\n\n Return\n ----------\n indexTrain : int numpy array\n Indices for data in training\n indexValidation : int numpy array\n Indices for data in validation (if any)\n indexTest : int numpy array\n Indices for data in testing (if merging)\n \"\"\"\n\n tol = 1e-7\n\n # Extract required parameters\n fractionTrain = params['uq_train_fr']\n fractionValidation = params['uq_valid_fr']\n fractionTest = params['uq_test_fr']\n\n if (fractionTrain < 0.) or (fractionTrain > 1.):\n raise ValueError('uq_train_fr is not in (0, 1) range. uq_train_fr: ', fractionTrain)\n if (fractionValidation < 0.) or (fractionValidation > 1.):\n raise ValueError('uq_valid_fr is not in (0, 1) range. uq_valid_fr: ', fractionValidation)\n if (fractionTest < 0.) or (fractionTest > 1.):\n raise ValueError('uq_test_fr is not in (0, 1) range. uq_test_fr: ', fractionTest)\n\n fractionSum = fractionTrain + fractionValidation + fractionTest\n # if (fractionSum > 1.) or (fractionSum < 1.):\n if abs(fractionSum - 1.) > tol:\n raise ValueError(\n 'Specified UQ fractions (uq_train_fr, uq_valid_fr, uq_test_fr) do not add up to 1. No cross-validation partition is computed ! sum:',\n fractionSum)\n\n # Determine data size and block size\n if fractionTest > 0:\n # Use all data and re-distribute the partitions\n numData = numTrain + numValidation + numTest\n else:\n # Preserve test partition\n numData = numTrain + numValidation\n\n sizeTraining = int(np.round(numData * fractionTrain))\n sizeValidation = int(np.round(numData * fractionValidation))\n\n # Fill partition indices\n # Fill train partition\n Folds = np.arange(numData)\n np.random.shuffle(Folds)\n indexTrain = Folds[:sizeTraining]\n # Fill validation partition\n indexValidation = None\n if fractionValidation > 0:\n indexValidation = Folds[sizeTraining:sizeTraining + sizeValidation]\n # Fill test partition\n indexTest = None\n if fractionTest > 0:\n indexTest = Folds[sizeTraining + sizeValidation:]\n\n return indexTrain, indexValidation, indexTest\n\n\ndef generate_index_distribution_from_blocks(numTrain, numTest, numValidation, params):\n \"\"\" Generates a vector of indices to partition the data for training.\n NO CHECKING IS DONE: it is assumed that the data could be partitioned\n in the specified block quantities and that the block quantities describe a\n coherent partition.\n\n Parameters\n ----------\n numTrain : int\n Number of training data points\n numTest : int\n Number of testing data points\n numValidation : int\n Number of validation data points (may be zero)\n params : dictionary with parameters\n Contains the keywords that control the behavior of the function\n (uq_train_bks, uq_valid_bks, uq_test_bks)\n\n Return\n ----------\n indexTrain : int numpy array\n Indices for data in training\n indexValidation : int numpy array\n Indices for data in validation (if any)\n indexTest : int numpy array\n Indices for data in testing (if merging)\n \"\"\"\n\n # Extract required parameters\n numBlocksTrain = params['uq_train_bks']\n numBlocksValidation = params['uq_valid_bks']\n numBlocksTest = params['uq_test_bks']\n numBlocksTotal = numBlocksTrain + numBlocksValidation + numBlocksTest\n\n # Determine data size and block size\n if numBlocksTest > 0:\n # Use all data and re-distribute the partitions\n numData = numTrain + numValidation + numTest\n else:\n # Preserve test partition\n numData = numTrain + numValidation\n\n blockSize = (numData + numBlocksTotal // 2) // numBlocksTotal # integer division with rounding\n remainder = numData - blockSize * numBlocksTotal\n if remainder != 0:\n print(\"Warning ! Requested partition does not distribute data evenly between blocks. \"\n \"Testing (if specified) or Validation (if specified) will use different block size.\")\n\n sizeTraining = numBlocksTrain * blockSize\n sizeValidation = numBlocksValidation * blockSize\n\n # Fill partition indices\n # Fill train partition\n Folds = np.arange(numData)\n np.random.shuffle(Folds)\n indexTrain = Folds[:sizeTraining]\n # Fill validation partition\n indexValidation = None\n if numBlocksValidation > 0:\n indexValidation = Folds[sizeTraining:sizeTraining + sizeValidation]\n # Fill test partition\n indexTest = None\n if numBlocksTest > 0:\n indexTest = Folds[sizeTraining + sizeValidation:]\n\n return indexTrain, indexValidation, indexTest\n\n\ndef generate_index_distribution_from_block_list(numTrain, numTest, numValidation, params):\n \"\"\" Generates a vector of indices to partition the data for training.\n NO CHECKING IS DONE: it is assumed that the data could be partitioned\n in the specified list of blocks and that the block indices describe a\n coherent partition.\n\n Parameters\n ----------\n numTrain : int\n Number of training data points\n numTest : int\n Number of testing data points\n numValidation : int\n Number of validation data points (may be zero)\n params : dictionary with parameters\n Contains the keywords that control the behavior of the function\n (uq_train_vec, uq_valid_vec, uq_test_vec)\n\n Return\n ----------\n indexTrain : int numpy array\n Indices for data in training\n indexValidation : int numpy array\n Indices for data in validation (if any)\n indexTest : int numpy array\n Indices for data in testing (if merging)\n \"\"\"\n\n # Extract required parameters\n blocksTrain = params['uq_train_vec']\n blocksValidation = params['uq_valid_vec']\n blocksTest = params['uq_test_vec']\n\n # Determine data size and block size\n numBlocksTrain = len(blocksTrain)\n numBlocksValidation = len(blocksValidation)\n numBlocksTest = len(blocksTest)\n numBlocksTotal = numBlocksTrain + numBlocksValidation + numBlocksTest\n\n if numBlocksTest > 0:\n # Use all data and re-distribute the partitions\n numData = numTrain + numValidation + numTest\n else:\n # Preserve test partition\n numData = numTrain + numValidation\n\n blockSize = (numData + numBlocksTotal // 2) // numBlocksTotal # integer division with rounding\n remainder = numData - blockSize * numBlocksTotal\n if remainder != 0:\n print(\"Warning ! Requested partition does not distribute data evenly between blocks. \"\n \"Last block will have different size.\")\n if remainder < 0:\n remainder = 0\n\n # Fill partition indices\n # Fill train partition\n maxSizeTrain = blockSize * numBlocksTrain + remainder\n indexTrain = fill_array(blocksTrain, maxSizeTrain, numData, numBlocksTotal, blockSize)\n # Fill validation partition\n indexValidation = None\n if numBlocksValidation > 0:\n maxSizeValidation = blockSize * numBlocksValidation + remainder\n indexValidation = fill_array(blocksValidation, maxSizeValidation, numData, numBlocksTotal, blockSize)\n # Fill test partition\n indexTest = None\n if numBlocksTest > 0:\n maxSizeTest = blockSize * numBlocksTest + remainder\n indexTest = fill_array(blocksTest, maxSizeTest, numData, numBlocksTotal, blockSize)\n\n return indexTrain, indexValidation, indexTest\n\n\ndef compute_limits(numdata, numblocks, blocksize, blockn):\n \"\"\" Generates the limit of indices corresponding to a\n specific block. It takes into account the non-exact\n divisibility of numdata into numblocks letting the\n last block to take the extra chunk.\n\n Parameters\n ----------\n numdata : int\n Total number of data points to distribute\n numblocks : int\n Total number of blocks to distribute into\n blocksize : int\n Size of data per block\n blockn : int\n Index of block, from 0 to numblocks-1\n\n Return\n ----------\n start : int\n Position to start assigning indices\n end : int\n One beyond position to stop assigning indices\n \"\"\"\n start = blockn * blocksize\n end = start + blocksize\n if blockn == (numblocks - 1): # last block gets the extra\n end = numdata\n\n return start, end\n\n\ndef fill_array(blocklist, maxsize, numdata, numblocks, blocksize):\n \"\"\" Fills a new array of integers with the indices corresponding\n to the specified block structure.\n\n Parameters\n ----------\n blocklist : list\n List of integers describes the block indices that\n go into the array\n maxsize : int\n Maximum possible length for the partition (the size of the\n common block size plus the remainder, if any).\n numdata : int\n Total number of data points to distribute\n numblocks : int\n Total number of blocks to distribute into\n blocksize : int\n Size of data per block\n\n Return\n ----------\n indexArray : int numpy array\n Indices for specific data partition. Resizes the array\n to the correct length.\n \"\"\"\n\n indexArray = np.zeros(maxsize, np.int)\n\n offset = 0\n for i in blocklist:\n start, end = compute_limits(numdata, numblocks, blocksize, i)\n length = end - start\n indexArray[offset:offset + length] = np.arange(start, end)\n offset += length\n\n return indexArray[:offset]\n\n\n# UTILS for COMPUTATION OF EMPIRICAL CALIBRATION\n\ndef compute_statistics_homoscedastic(df_data,\n col_true=0,\n col_pred=6,\n col_std_pred=7,\n ):\n \"\"\" Extracts ground truth, mean prediction, error and\n standard deviation of prediction from inference\n data frame. The latter includes the statistics\n over all the inference realizations.\n\n Parameters\n ----------\n df_data : pandas data frame\n Data frame generated by current CANDLE inference\n experiments. Indices are hard coded to agree with\n current CANDLE version. (The inference file usually\n has the name: <model>_pred.tsv).\n col_true : integer\n Index of the column in the data frame where the true\n value is stored (Default: 0, index in current CANDLE format).\n col_pred : integer\n Index of the column in the data frame where the predicted\n value is stored (Default: 6, index in current CANDLE format).\n col_std_pred : integer\n Index of the column in the data frame where the standard\n deviation of the predicted values is stored (Default: 7,\n index in current CANDLE format).\n\n Return\n ----------\n Ytrue : numpy array\n Array with true (observed) values\n Ypred : numpy array\n Array with predicted values.\n yerror : numpy array\n Array with errors computed (observed - predicted).\n sigma : numpy array\n Array with standard deviations learned with deep learning\n model. For homoscedastic inference this corresponds to the\n std value computed from prediction (and is equal to the\n following returned variable).\n Ypred_std : numpy array\n Array with standard deviations computed from regular\n (homoscedastic) inference.\n pred_name : string\n Name of data column or quantity predicted (as extracted\n from the data frame using the col_true index).\n \"\"\"\n\n Ytrue = df_data.iloc[:, col_true].values\n print('Ytrue shape: ', Ytrue.shape)\n pred_name = df_data.columns[col_true]\n Ypred = df_data.iloc[:, col_pred].values\n print('Ypred shape: ', Ypred.shape)\n Ypred_std = df_data.iloc[:, col_std_pred].values\n print('Ypred_std shape: ', Ypred_std.shape)\n yerror = Ytrue - Ypred\n print('yerror shape: ', yerror.shape)\n sigma = Ypred_std # std\n MSE = np.mean((Ytrue - Ypred)**2)\n print('MSE: ', MSE)\n MSE_STD = np.std((Ytrue - Ypred)**2)\n print('MSE_STD: ', MSE_STD)\n # p-value 'not entirely reliable, reasonable for datasets > 500'\n spearman_cc, pval = spearmanr(Ytrue, Ypred)\n print('Spearman CC: %f, p-value: %e' % (spearman_cc, pval))\n\n return Ytrue, Ypred, yerror, sigma, Ypred_std, pred_name\n\n\ndef compute_statistics_homoscedastic_all(df_data,\n col_true=4,\n col_pred_start=6\n ):\n \"\"\" Extracts ground truth, mean prediction, error and\n standard deviation of prediction from inference\n data frame. The latter includes all the individual\n inference realizations.\n\n Parameters\n ----------\n df_data : pandas data frame\n Data frame generated by current CANDLE inference\n experiments. Indices are hard coded to agree with\n current CANDLE version. (The inference file usually\n has the name: <model>.predicted_INFER.tsv).\n col_true : integer\n Index of the column in the data frame where the true\n value is stored (Default: 4, index in current HOM format).\n col_pred_start : integer\n Index of the column in the data frame where the first predicted\n value is stored. All the predicted values during inference\n are stored (Default: 6 index, in current HOM format).\n\n Return\n ----------\n Ytrue : numpy array\n Array with true (observed) values\n Ypred : numpy array\n Array with predicted values.\n yerror : numpy array\n Array with errors computed (observed - predicted).\n sigma : numpy array\n Array with standard deviations learned with deep learning\n model. For homoscedastic inference this corresponds to the\n std value computed from prediction (and is equal to the\n following returned variable).\n Ypred_std : numpy array\n Array with standard deviations computed from regular\n (homoscedastic) inference.\n pred_name : string\n Name of data column or quantity predicted (as extracted\n from the data frame using the col_true index).\n \"\"\"\n\n Ytrue = df_data.iloc[:, col_true].values\n print('Ytrue shape: ', Ytrue.shape)\n pred_name = df_data.columns[col_true]\n Ypred_mean_ = np.mean(df_data.iloc[:, col_pred_start:], axis=1)\n Ypred_mean = Ypred_mean_.values\n print('Ypred_mean shape: ', Ypred_mean.shape)\n Ypred_std_ = np.std(df_data.iloc[:, col_pred_start:], axis=1)\n Ypred_std = Ypred_std_.values\n print('Ypred_std shape: ', Ypred_std.shape)\n yerror = Ytrue - Ypred_mean\n print('yerror shape: ', yerror.shape)\n sigma = Ypred_std # std\n MSE = np.mean((Ytrue - Ypred_mean)**2)\n print('MSE: ', MSE)\n MSE_STD = np.std((Ytrue - Ypred_mean)**2)\n print('MSE_STD: ', MSE_STD)\n # p-value 'not entirely reliable, reasonable for datasets > 500'\n spearman_cc, pval = spearmanr(Ytrue, Ypred_mean)\n print('Spearman CC: %f, p-value: %e' % (spearman_cc, pval))\n\n return Ytrue, Ypred_mean, yerror, sigma, Ypred_std, pred_name\n\n\ndef compute_statistics_heteroscedastic(df_data,\n col_true=4,\n col_pred_start=6,\n col_std_pred_start=7,\n ):\n \"\"\" Extracts ground truth, mean prediction, error, standard\n deviation of prediction and predicted (learned) standard\n deviation from inference data frame. The latter includes\n all the individual inference realizations.\n\n Parameters\n ----------\n df_data : pandas data frame\n Data frame generated by current heteroscedastic inference\n experiments. Indices are hard coded to agree with\n current version. (The inference file usually\n has the name: <model>.predicted_INFER_HET.tsv).\n col_true : integer\n Index of the column in the data frame where the true\n value is stored (Default: 4, index in current HET format).\n col_pred_start : integer\n Index of the column in the data frame where the first predicted\n value is stored. All the predicted values during inference\n are stored and are interspaced with standard deviation\n predictions (Default: 6 index, step 2, in current HET format).\n col_std_pred_start : integer\n Index of the column in the data frame where the first predicted\n standard deviation value is stored. All the predicted values\n during inference are stored and are interspaced with predictions\n (Default: 7 index, step 2, in current HET format).\n\n Return\n ----------\n Ytrue : numpy array\n Array with true (observed) values\n Ypred : numpy array\n Array with predicted values.\n yerror : numpy array\n Array with errors computed (observed - predicted).\n sigma : numpy array\n Array with standard deviations learned with deep learning\n model. For homoscedastic inference this corresponds to the\n std value computed from prediction (and is equal to the\n following returned variable).\n Ypred_std : numpy array\n Array with standard deviations computed from regular\n (homoscedastic) inference.\n pred_name : string\n Name of data column or quantity predicted (as extracted\n from the data frame using the col_true index).\n \"\"\"\n\n Ytrue = df_data.iloc[:, col_true].values\n print('Ytrue shape: ', Ytrue.shape)\n pred_name = df_data.columns[col_true]\n Ypred_mean_ = np.mean(df_data.iloc[:, col_pred_start::2], axis=1)\n Ypred_mean = Ypred_mean_.values\n print('Ypred shape: ', Ypred_mean.shape)\n Ypred_std_ = np.std(df_data.iloc[:, col_pred_start::2], axis=1)\n Ypred_std = Ypred_std_.values\n print('Ypred_std shape: ', Ypred_std.shape)\n yerror = Ytrue - Ypred_mean\n print('yerror shape: ', yerror.shape)\n s_ = df_data.iloc[:, col_std_pred_start::2]\n s_mean = np.mean(s_, axis=1)\n var = np.exp(s_mean.values) # variance\n sigma = np.sqrt(var) # std\n print('sigma shape: ', sigma.shape)\n MSE = np.mean((Ytrue - Ypred_mean)**2)\n print('MSE: ', MSE)\n MSE_STD = np.std((Ytrue - Ypred_mean)**2)\n print('MSE_STD: ', MSE_STD)\n # p-value 'not entirely reliable, reasonable for datasets > 500'\n spearman_cc, pval = spearmanr(Ytrue, Ypred_mean)\n print('Spearman CC: %f, p-value: %e' % (spearman_cc, pval))\n\n return Ytrue, Ypred_mean, yerror, sigma, Ypred_std, pred_name\n\n\ndef compute_statistics_quantile(df_data,\n sigma_divisor=2.56,\n col_true=4,\n col_pred_start=6\n ):\n \"\"\" Extracts ground truth, 50th percentile mean prediction,\n low percentile and high percentile mean prediction\n (usually 10th percentile and 90th percentile respectively),\n error (using 50th percentile), standard deviation of\n prediction (using 50th percentile) and predicted (learned)\n standard deviation from interdecile range in inference data frame.\n The latter includes all the individual inference realizations.\n\n Parameters\n ----------\n df_data : pandas data frame\n Data frame generated by current quantile inference\n experiments. Indices are hard coded to agree with\n current version. (The inference file usually\n has the name: <model>.predicted_INFER_QTL.tsv).\n sigma_divisor : float\n Divisor to convert from the intercedile range to the corresponding\n standard deviation for a Gaussian distribution.\n (Default: 2.56, consistent with an interdecile range computed from\n the difference between the 90th and 10th percentiles).\n col_true : integer\n Index of the column in the data frame where the true\n value is stored (Default: 4, index in current QTL format).\n col_pred_start : integer\n Index of the column in the data frame where the first predicted\n value is stored. All the predicted values during inference\n are stored and are interspaced with other percentile\n predictions (Default: 6 index, step 3, in current QTL format).\n\n Return\n ----------\n Ytrue : numpy array\n Array with true (observed) values\n Ypred : numpy array\n Array with predicted values (based on the 50th percentile).\n yerror : numpy array\n Array with errors computed (observed - predicted).\n sigma : numpy array\n Array with standard deviations learned with deep learning\n model. This corresponds to the interdecile range divided\n by the sigma divisor.\n Ypred_std : numpy array\n Array with standard deviations computed from regular\n (homoscedastic) inference.\n pred_name : string\n Name of data column or quantity predicted (as extracted\n from the data frame using the col_true index).\n Ypred_Lp_mean : numpy array\n Array with predicted values of the lower percentile\n (usually the 10th percentile).\n Ypred_Hp_mean : numpy array\n Array with predicted values of the higher percentile\n (usually the 90th percentile).\n \"\"\"\n\n Ytrue = df_data.iloc[:, col_true].values\n print('Ytrue shape: ', Ytrue.shape)\n pred_name = df_data.columns[col_true]\n Ypred_50q_mean = np.mean(df_data.iloc[:, col_pred_start::3], axis=1)\n Ypred_mean = Ypred_50q_mean.values\n print('Ypred shape: ', Ypred_mean.shape)\n Ypred_Lp_mean_ = np.mean(df_data.iloc[:, col_pred_start + 1::3], axis=1)\n Ypred_Hp_mean_ = np.mean(df_data.iloc[:, col_pred_start + 2::3], axis=1)\n Ypred_Lp_mean = Ypred_Lp_mean_.values\n Ypred_Hp_mean = Ypred_Hp_mean_.values\n interdecile_range = Ypred_Hp_mean - Ypred_Lp_mean\n sigma = interdecile_range / sigma_divisor\n print('sigma shape: ', sigma.shape)\n yerror = Ytrue - Ypred_mean\n print('yerror shape: ', yerror.shape)\n Ypred_std_ = np.std(df_data.iloc[:, col_pred_start::3], axis=1)\n Ypred_std = Ypred_std_.values\n print('Ypred_std shape: ', Ypred_std.shape)\n MSE = np.mean((Ytrue - Ypred_mean)**2)\n print('MSE: ', MSE)\n MSE_STD = np.std((Ytrue - Ypred_mean)**2)\n print('MSE_STD: ', MSE_STD)\n # p-value 'not entirely reliable, reasonable for datasets > 500'\n spearman_cc, pval = spearmanr(Ytrue, Ypred_mean)\n print('Spearman CC: %f, p-value: %e' % (spearman_cc, pval))\n\n return Ytrue, Ypred_mean, yerror, sigma, Ypred_std, pred_name, Ypred_Lp_mean, Ypred_Hp_mean\n\n\ndef split_data_for_empirical_calibration(Ytrue, Ypred, sigma, cal_split=0.8):\n \"\"\"\n Extracts a portion of the arrays provided for the computation\n of the calibration and reserves the remainder portion\n for testing.\n\n Parameters\n ----------\n Ytrue : numpy array\n Array with true (observed) values\n Ypred : numpy array\n Array with predicted values.\n sigma : numpy array\n Array with standard deviations learned with deep learning\n model (or std value computed from prediction if homoscedastic\n inference).\n cal_split : float\n Split of data to use for estimating the calibration relationship.\n It is assumed that it will be a value in (0, 1).\n (Default: use 80% of predictions to generate empirical\n calibration).\n\n Return\n ----------\n index_perm_total : numpy array\n Random permutation of the array indices. The first 'num_cal'\n of the indices correspond to the samples that are used for\n calibration, while the remainder are the samples reserved\n for calibration testing.\n pSigma_cal : numpy array\n Part of the input sigma array to use for calibration.\n pSigma_test : numpy array\n Part of the input sigma array to reserve for testing.\n pPred_cal : numpy array\n Part of the input Ypred array to use for calibration.\n pPred_test : numpy array\n Part of the input Ypred array to reserve for testing.\n true_cal : numpy array\n Part of the input Ytrue array to use for calibration.\n true_test : numpy array\n Part of the input Ytrue array to reserve for testing.\n \"\"\"\n\n # shuffle data for calibration\n num_pred_total = sigma.shape[0]\n num_cal = np.int(num_pred_total * cal_split)\n index_perm_total = np.random.permutation(range(num_pred_total))\n\n # Permute data\n pSigma_perm_all = sigma[index_perm_total]\n pPred_perm_all = Ypred[index_perm_total]\n true_perm_all = Ytrue[index_perm_total]\n\n # Split in calibration and testing\n pSigma_cal = pSigma_perm_all[:num_cal]\n pSigma_test = pSigma_perm_all[num_cal:]\n pPred_cal = pPred_perm_all[:num_cal]\n pPred_test = pPred_perm_all[num_cal:]\n true_cal = true_perm_all[:num_cal]\n true_test = true_perm_all[num_cal:]\n\n print('Size of calibration set: ', true_cal.shape)\n print('Size of test set: ', true_test.shape)\n\n return index_perm_total, pSigma_cal, pSigma_test, pPred_cal, pPred_test, true_cal, true_test\n\n\ndef compute_empirical_calibration(pSigma_cal, pPred_cal, true_cal, bins, coverage_percentile):\n \"\"\" Use the arrays provided to estimate an empirical mapping\n between standard deviation and absolute value of error,\n both of which have been observed during inference. Since\n most of the times the raw statistics per bin are very noisy,\n a smoothing step (based on scipy's savgol filter) is performed.\n\n Parameters\n ----------\n pSigma_cal : numpy array\n Part of the standard deviations array to use for calibration.\n pPred_cal : numpy array\n Part of the predictions array to use for calibration.\n true_cal : numpy array\n Part of the true (observed) values array to use for calibration.\n bins : int\n Number of bins to split the range of standard deviations\n included in pSigma_cal array.\n coverage_percentile : float\n Value to use for estimating coverage when evaluating the percentiles\n of the observed absolute value of errors.\n\n Return\n ----------\n mean_sigma : numpy array\n Array with the mean standard deviations computed per bin.\n min_sigma : numpy array\n Array with the minimum standard deviations computed per bin.\n max_sigma : numpy array\n Array with the maximum standard deviations computed per bin.\n error_thresholds : numpy array\n Thresholds of the errors computed to attain a certain\n error coverage per bin.\n err_err : numpy array\n Error bars in errors (one standard deviation for a binomial\n distribution estimated by bin vs. the other bins) for the\n calibration error.\n error_thresholds_smooth : numpy array\n Thresholds of the errors computed to attain a certain\n error coverage per bin after a smoothed operation is applied\n to the frequently noisy bin-based estimations.\n sigma_start_index : non-negative integer\n Index in the mean_sigma array that defines the start of\n the valid empirical calibration interval (i.e. index to\n the smallest std for which a meaningful error mapping\n is obtained).\n sigma_end_index : non-negative integer\n Index in the mean_sigma array that defines the end of\n the valid empirical calibration interval (i.e. index to\n the largest std for which a meaningful error mapping\n is obtained).\n s_interpolate : scipy.interpolate python object\n A python object from scipy.interpolate that computes a\n univariate spline (InterpolatedUnivariateSpline) constructed\n to express the mapping from standard deviation to error. This\n spline is generated during the computational empirical\n calibration procedure.\n \"\"\"\n\n index_sigma_cal = np.argsort(pSigma_cal)\n pSigma_cal_ordered_ = pSigma_cal[index_sigma_cal]\n Er_vect_cal_ = np.abs(true_cal - pPred_cal)\n Er_vect_cal_orderedSigma_ = Er_vect_cal_[index_sigma_cal]\n\n minL_sigma = np.min(pSigma_cal_ordered_)\n maxL_sigma = np.max(pSigma_cal_ordered_)\n print('Complete Sigma range --> Min: %f, Max: %f' % (minL_sigma, maxL_sigma))\n\n # Bin statistics for error and sigma\n mean_sigma, min_sigma, max_sigma, error_thresholds, err_err = bining_for_calibration(pSigma_cal_ordered_,\n minL_sigma,\n maxL_sigma,\n Er_vect_cal_orderedSigma_,\n bins,\n coverage_percentile)\n\n # smooth error function\n # scipy.signal.savgol_filter(x, window_length, polyorder,\n # deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)\n # error_thresholds_smooth = signal.savgol_filter(error_thresholds, 5, 1)\n error_thresholds_smooth = signal.savgol_filter(error_thresholds, 5, 1, mode='nearest')\n\n # Build Interpolant over smooth plot (this will become the calibration function)\n s_interpolate = InterpolatedUnivariateSpline(mean_sigma, error_thresholds_smooth)\n # Determine limits of calibration (i.e. monotonicity range)\n sigma_start_index, sigma_end_index = computation_of_valid_calibration_interval(error_thresholds, error_thresholds_smooth, err_err)\n\n print('Range of valid sigma: %.6f --> %.6f' % (mean_sigma[sigma_start_index], mean_sigma[sigma_end_index]))\n\n return mean_sigma, min_sigma, max_sigma, error_thresholds, err_err, error_thresholds_smooth, sigma_start_index, sigma_end_index, s_interpolate\n\n\ndef bining_for_calibration(pSigma_cal_ordered_, minL_sigma,\n maxL_sigma, Er_vect_cal_orderedSigma_,\n bins, coverage_percentile):\n \"\"\" Bin the values of the standard deviations observed during\n inference and estimate a specified coverage percentile\n in the absolute error (observed during inference as well).\n Bins that have less than 50 samples are merged until they\n surpass this threshold.\n\n Parameters\n ----------\n pSigma_cal_ordered_ : numpy array\n Array of standard deviations ordered in ascending way.\n minL_sigma : float\n Minimum value of standard deviations included in\n pSigma_cal_ordered_ array.\n maxL_sigma : numpy array\n Maximum value of standard deviations included in\n pSigma_cal_ordered_ array.\n Er_vect_cal_orderedSigma_ : numpy array\n Array ob absolute value of errors corresponding with\n the array of ordered standard deviations.\n bins : int\n Number of bins to split the range of standard deviations\n included in pSigma_cal_ordered_ array.\n coverage_percentile : float\n Value to use for estimating coverage when evaluating the percentiles\n of the observed absolute value of errors.\n\n Return\n ----------\n mean_sigma : numpy array\n Array with the mean standard deviations computed per bin.\n min_sigma : numpy array\n Array with the minimum standard deviations computed per bin.\n max_sigma : numpy array\n Array with the maximum standard deviations computed per bin.\n error_thresholds : numpy array\n Thresholds of the errors computed to attain a certain\n error coverage per bin.\n err_err : numpy array\n Error bars in errors (one standard deviation for a binomial\n distribution estimated by bin vs. the other bins) for the\n calibration error.\n \"\"\"\n\n # thresholds = np.logspace(np.log10(minL_sigma), np.log10(maxL_sigma), num=bins)\n thresholds = np.linspace(minL_sigma, maxL_sigma, num=bins)\n classes = np.digitize(pSigma_cal_ordered_, thresholds)\n Nbin = np.zeros(bins + 1)\n for i in range(bins + 1):\n indices = (classes == i)\n Nbin[i] = indices.sum()\n\n # Repair bins\n new_thresholds_l = []\n new_nbins_l = []\n sumN = 0\n for i in range(Nbin.shape[0]):\n sumN += Nbin[i]\n if sumN > 50:\n if i > (thresholds.shape[0] - 1):\n new_thresholds_l.append(thresholds[-1])\n else:\n new_thresholds_l.append(thresholds[i])\n new_nbins_l.append(sumN)\n sumN = 0\n new_thresholds = np.array(new_thresholds_l)\n new_nbins = np.array(new_nbins_l)\n new_thresholds[-1] = thresholds[-1]\n new_nbins[-1] += sumN\n\n #\n classes = np.digitize(pSigma_cal_ordered_, new_thresholds[:-1])\n error_thresholds = -1. * np.ones(new_nbins.shape[0])\n mean_sigma = -1. * np.ones(new_nbins.shape[0])\n min_sigma = -1. * np.ones(new_nbins.shape[0])\n max_sigma = -1. * np.ones(new_nbins.shape[0])\n err_err = -1. * np.ones(new_nbins.shape[0])\n Ncal = pSigma_cal_ordered_.shape[0]\n for i in range(error_thresholds.shape[0]):\n indices = (classes == i)\n n_aux = indices.sum()\n assert n_aux == new_nbins[i]\n print('Points in bin %d: %d' % (i, n_aux))\n mean_sigma[i] = np.mean(pSigma_cal_ordered_[indices])\n min_sigma[i] = np.min(pSigma_cal_ordered_[indices])\n max_sigma[i] = np.max(pSigma_cal_ordered_[indices])\n error_thresholds[i] = np.percentile(Er_vect_cal_orderedSigma_[indices], coverage_percentile)\n err_err[i] = np.sqrt(new_nbins[i] * (Ncal - new_nbins[i])) / Ncal * error_thresholds[i]\n\n return mean_sigma, min_sigma, max_sigma, error_thresholds, err_err\n\n\ndef computation_of_valid_calibration_interval(error_thresholds, error_thresholds_smooth, err_err):\n \"\"\" Function that estimates the empirical range in which a\n monotonic relation is observed between standard deviation\n and coverage of absolute value of error. Since the\n statistics computed per bin are relatively noisy, the\n application of a greedy criterion (e.g. guarantee a\n monotonically increasing relationship) does not yield\n good results. Therefore, a softer version is constructed\n based on the satisfaction of certain criteria depending\n on: the values of the error coverage computed per bin,\n a smoothed version of them and the associated error\n estimated (based on one standard deviation for a binomial\n distribution estimated by bin vs. the other bins).\n A minimal validation requiring the end idex to be\n largest than the starting index is performed before\n the function return.\n\n Current criteria:\n - the smoothed errors are inside the error bars AND\n they are almost increasing (a small tolerance is\n allowed, so a small wobbliness in the smoother\n values is permitted).\n OR\n - both the raw values for the bins (with a small tolerance)\n are increasing, AND the smoothed value is greater than the\n raw value.\n OR\n - the current smoothed value is greater than the previous AND\n the smoothed values for the next been are inside the error\n bars.\n\n Parameters\n ----------\n error_thresholds : numpy array\n Thresholds of the errors computed to attain a certain\n error coverage per bin.\n error_thresholds_smooth : numpy array\n Thresholds of the errors computed to attain a certain\n error coverage per bin after a smoothed operation is applied\n to the frequently noisy bin-based estimations.\n err_err : numpy array\n Error bars in errors (one standard deviation for a binomial\n distribution estimated by bin vs. the other bins) for the\n calibration error.\n\n Return\n ----------\n sigma_start_index : non-negative integer\n Index estimated in the mean_sigma array corresponding to\n the value that defines the start of the valid empirical\n calibration interval (i.e. index to the smallest std for\n which a meaningful error mapping is obtained, according\n to the criteria explained before).\n sigma_end_index : non-negative integer\n Index estimated in the mean_sigma array corresponding to\n the value that defines the end of the valid empirical\n calibration interval (i.e. index to the largest std for\n which a meaningful error mapping is obtained, according\n to the criteria explained before).\n \"\"\"\n\n # Computation of the calibration interval\n limitH = error_thresholds + err_err\n limitL = error_thresholds - err_err\n\n # search for starting point\n for i in range(err_err.shape[0]):\n if ((error_thresholds_smooth[i] >= limitL[i]) and\n (error_thresholds_smooth[i] <= limitH[i])): # Ask if the current is in the interval\n sigma_start_index = i\n break\n sigma_end_index = sigma_start_index - 1\n\n restart = max(1, sigma_start_index)\n for i in range(restart, err_err.shape[0] - 1):\n if (((error_thresholds_smooth[i] >= limitL[i]) and\n (error_thresholds_smooth[i] <= limitH[i]) and\n ((error_thresholds_smooth[i] * 1.005 > error_thresholds_smooth[i - 1]) or\n ((error_thresholds[i] * 1.01 > error_thresholds[i - 1]) and\n (error_thresholds_smooth[i] > error_thresholds[i])))) # Ask if the current is in the interval with slightly increasing trend\n or # Ask if the current is greater than the previous and the next is in the interval\n ((error_thresholds_smooth[i] > error_thresholds_smooth[i - 1]) and\n ((error_thresholds_smooth[i + 1] >= limitL[i + 1]) and\n (error_thresholds_smooth[i + 1] <= limitH[i + 1])))):\n\n sigma_end_index = i\n else: # Finalize search for monotonic range\n if (sigma_end_index - sigma_start_index) > 4:\n break\n else: # Reset indices\n sigma_start_index = i + 1\n sigma_end_index = i\n\n print('Range of valid sigma indices (inclusive): %d --> %d' % (sigma_start_index, sigma_end_index))\n\n assert (sigma_end_index > sigma_start_index)\n\n return sigma_start_index, sigma_end_index\n\n\ndef applying_calibration(pSigma_test, pPred_test, true_test, s_interpolate, minL_sigma_auto, maxL_sigma_auto):\n \"\"\" Use the empirical mapping between standard deviation and\n absolute value of error estimated during calibration (i.e.\n apply the univariate spline computed) to estimate the error\n for the part of the standard deviation array that was reserved\n for testing the empirical calibration. The resulting error array\n (yp_test) should overestimate the true observed error (eabs_red).\n All the computations are restricted to the valid calibration\n interval: [minL_sigma_auto, maxL_sigma_auto].\n\n Parameters\n ----------\n pSigma_test : numpy array\n Part of the standard deviations array to use for calibration testing.\n pPred_test : numpy array\n Part of the predictions array to use for calibration testing.\n true_test : numpy array\n Part of the true (observed) values array to use for calibration testing.\n s_interpolate : scipy.interpolate python object\n A python object from scipy.interpolate that computes a\n univariate spline (InterpolatedUnivariateSpline) expressing\n the mapping from standard deviation to error. This\n spline is generated during the computational empirical\n calibration procedure.\n minL_sigma_auto : float\n Starting value of the valid empirical calibration interval\n (i.e. smallest std for which a meaningful error mapping\n is obtained).\n maxL_sigma_auto : float\n Ending value of the valid empirical calibration interval\n (i.e. largest std for which a meaningful error mapping\n is obtained).\n\n Return\n ----------\n index_sigma_range_test : numpy array\n Indices of the pSigma_test array that are included in the\n valid calibration interval, given by:\n [minL_sigma_auto, maxL_sigma_auto].\n xp_test : numpy array\n Array with the mean standard deviations in the calibration\n testing array.\n yp_test : numpy array\n Mapping of the given standard deviation to error computed\n from the interpolation spline constructed by empirical\n calibration.\n eabs_red : numpy array\n Array with the observed absolute errors in the part of the testing\n array for which the observed standard deviations are in the\n valid interval of calibration.\n \"\"\"\n\n # Filter to appropriate range\n index_sigma_range_test = (pSigma_test >= minL_sigma_auto) & (pSigma_test < maxL_sigma_auto)\n xp_test = pSigma_test[index_sigma_range_test]\n yp_test = s_interpolate(xp_test)\n Er_vect_ = true_test - pPred_test\n eabs_ = np.abs(Er_vect_)\n eabs_red = eabs_[index_sigma_range_test]\n\n return index_sigma_range_test, xp_test, yp_test, eabs_red\n\n\ndef overprediction_check(yp_test, eabs_red):\n \"\"\" Compute the percentage of overestimated absolute error\n predictions for the arrays reserved for calibration testing\n and whose corresponding standard deviations are included\n in the valid calibration interval.\n\n Parameters\n ----------\n yp_test : numpy array\n Mapping of the standard deviation to error computed\n from the interpolation spline constructed by empirical\n calibration.\n eabs_red : numpy array\n Array with the observed absolute errors in the part of the testing\n array for which the observed standard deviations are in the\n valid interval of calibration.\n \"\"\"\n\n over_pred_error_index = (yp_test >= eabs_red)\n percentage_over_predicted = (over_pred_error_index.sum() / yp_test.shape[0])\n print(\"percentage over predicted: \", percentage_over_predicted)\n"
] | [
[
"numpy.ones",
"numpy.argsort",
"scipy.interpolate.InterpolatedUnivariateSpline",
"numpy.abs",
"numpy.round",
"numpy.linspace",
"numpy.mean",
"numpy.int",
"numpy.sqrt",
"numpy.zeros",
"numpy.arange",
"numpy.max",
"numpy.min",
"numpy.std",
"numpy.percentile",
"scipy.signal.savgol_filter",
"numpy.random.shuffle",
"numpy.exp",
"numpy.array",
"scipy.stats.spearmanr",
"numpy.digitize"
]
] |
ibenemerito88/openBF_workshop | [
"a63a6fbd1ef8528890fb1072730124e054875008"
] | [
"Workshop/Part3/part3_sol.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import integrate\nimport reslast\n\n\nplt.close(\"all\")\n\n# Symmetric network\nq,a,p,u,c,n,s = reslast.resu(\"network\")\n# Non-symmetric network\nqn,an,pn,un,cn,nn,sn = reslast.resu(\"networknonsym\")\n\n\n\n\n\nplt.show()"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.close"
]
] |
AsaphLightricks/3DDFA | [
"7630986c0286cd2c85b5dfd14ae6e8322e4ba605"
] | [
"utils/cython/setup.py"
] | [
"'''\npython setup.py build_ext -i\nto compile\n'''\n\n# setup.py\nfrom distutils.core import setup, Extension\n# from Cython.Build import cythonize\nfrom Cython.Distutils import build_ext\nimport numpy\n\nsetup(\n name='mesh_core_cython',\n cmdclass={'build_ext': build_ext},\n ext_modules=[Extension(\"mesh_core_cython\",\n sources=[\"mesh_core_cython.pyx\", \"mesh_core.cpp\"],\n language='c++',\n include_dirs=[numpy.get_include()], extra_compile_args=['-std=c++11', '-D_hypot=hypot', '-stdlib=libc++'])],\n\n)\n"
] | [
[
"numpy.get_include"
]
] |
britta-wstnr/mne-python | [
"b69afd1ff3337ac84f219b26c53537a5c8ceb1b9",
"33146156f2660f122ecc04fa0d5b3fd3c34b549e"
] | [
"mne/io/pick.py",
"tutorials/plot_brainstorm_auditory.py"
] | [
"# Authors: Alexandre Gramfort <[email protected]>\n# Matti Hamalainen <[email protected]>\n# Martin Luessi <[email protected]>\n#\n# License: BSD (3-clause)\n\nfrom copy import deepcopy\nimport re\n\nimport numpy as np\n\nfrom .constants import FIFF\nfrom ..utils import logger, verbose\nfrom ..externals.six import string_types\n\n\ndef channel_type(info, idx):\n \"\"\"Get channel type.\n\n Parameters\n ----------\n info : dict\n Measurement info\n idx : int\n Index of channel\n\n Returns\n -------\n type : 'grad' | 'mag' | 'eeg' | 'stim' | 'eog' | 'emg' | 'ecg'\n 'ref_meg' | 'resp' | 'exci' | 'ias' | 'syst' | 'misc'\n 'seeg' | 'bio' | 'chpi' | 'dipole' | 'gof' | 'ecog' | 'hbo' | 'hbr'\n Type of channel\n \"\"\"\n kind = info['chs'][idx]['kind']\n if kind == FIFF.FIFFV_MEG_CH:\n if info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_T_M:\n return 'grad'\n elif info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_T:\n return 'mag'\n elif kind == FIFF.FIFFV_REF_MEG_CH:\n return 'ref_meg'\n elif kind == FIFF.FIFFV_EEG_CH:\n return 'eeg'\n elif kind == FIFF.FIFFV_STIM_CH:\n return 'stim'\n elif kind == FIFF.FIFFV_EOG_CH:\n return 'eog'\n elif kind == FIFF.FIFFV_EMG_CH:\n return 'emg'\n elif kind == FIFF.FIFFV_ECG_CH:\n return 'ecg'\n elif kind == FIFF.FIFFV_RESP_CH:\n return 'resp'\n elif kind == FIFF.FIFFV_MISC_CH:\n return 'misc'\n elif kind == FIFF.FIFFV_EXCI_CH:\n return 'exci'\n elif kind == FIFF.FIFFV_IAS_CH:\n return 'ias'\n elif kind == FIFF.FIFFV_SYST_CH:\n return 'syst'\n elif kind == FIFF.FIFFV_SEEG_CH:\n return 'seeg'\n elif kind == FIFF.FIFFV_BIO_CH:\n return 'bio'\n elif kind in [FIFF.FIFFV_QUAT_0, FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2,\n FIFF.FIFFV_QUAT_3, FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5,\n FIFF.FIFFV_QUAT_6, FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR,\n FIFF.FIFFV_HPI_MOV]:\n return 'chpi' # channels relative to head position monitoring\n elif kind == FIFF.FIFFV_DIPOLE_WAVE:\n return 'dipole'\n elif kind == FIFF.FIFFV_GOODNESS_FIT:\n return 'gof'\n elif kind == FIFF.FIFFV_ECOG_CH:\n return 'ecog'\n elif kind == FIFF.FIFFV_FNIRS_CH:\n if info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO:\n return 'hbo'\n elif info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBR:\n return 'hbr'\n raise Exception('Unknown channel type')\n\n\ndef pick_channels(ch_names, include, exclude=[]):\n \"\"\"Pick channels by names.\n\n Returns the indices of the good channels in ch_names.\n\n Parameters\n ----------\n ch_names : list of string\n List of channels.\n include : list of string\n List of channels to include (if empty include all available).\n\n .. note:: This is to be treated as a set. The order of this list\n is not used or maintained in ``sel``.\n\n exclude : list of string\n List of channels to exclude (if empty do not exclude any channel).\n Defaults to [].\n\n See Also\n --------\n pick_channels_regexp, pick_types\n\n Returns\n -------\n sel : array of int\n Indices of good channels.\n \"\"\"\n if len(np.unique(ch_names)) != len(ch_names):\n raise RuntimeError('ch_names is not a unique list, picking is unsafe')\n _check_excludes_includes(include)\n _check_excludes_includes(exclude)\n if not isinstance(include, set):\n include = set(include)\n if not isinstance(exclude, set):\n exclude = set(exclude)\n\n sel = []\n for k, name in enumerate(ch_names):\n if (len(include) == 0 or name in include) and name not in exclude:\n sel.append(k)\n return np.array(sel, int)\n\n\ndef pick_channels_regexp(ch_names, regexp):\n \"\"\"Pick channels using regular expression.\n\n Returns the indices of the good channels in ch_names.\n\n Parameters\n ----------\n ch_names : list of string\n List of channels\n\n regexp : string\n The regular expression. See python standard module for regular\n expressions.\n\n Returns\n -------\n sel : array of int\n Indices of good channels.\n\n See Also\n --------\n pick_channels\n\n Examples\n --------\n >>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG ...1')\n [0]\n >>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG *')\n [0, 1, 2]\n \"\"\"\n r = re.compile(regexp)\n return [k for k, name in enumerate(ch_names) if r.match(name)]\n\n\ndef _triage_meg_pick(ch, meg):\n \"\"\"Triage an MEG pick type.\"\"\"\n if meg is True:\n return True\n elif ch['unit'] == FIFF.FIFF_UNIT_T_M:\n if meg == 'grad':\n return True\n elif meg == 'planar1' and ch['ch_name'].endswith('2'):\n return True\n elif meg == 'planar2' and ch['ch_name'].endswith('3'):\n return True\n elif (meg == 'mag' and ch['unit'] == FIFF.FIFF_UNIT_T):\n return True\n return False\n\n\ndef _triage_fnirs_pick(ch, fnirs):\n \"\"\"Triage an fNIRS pick type.\"\"\"\n if fnirs is True:\n return True\n elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO and fnirs == 'hbo':\n return True\n elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBR and fnirs == 'hbr':\n return True\n return False\n\n\ndef _check_meg_type(meg, allow_auto=False):\n \"\"\"Ensure a valid meg type.\"\"\"\n if isinstance(meg, string_types):\n allowed_types = ['grad', 'mag', 'planar1', 'planar2']\n allowed_types += ['auto'] if allow_auto else []\n if meg not in allowed_types:\n raise ValueError('meg value must be one of %s or bool, not %s'\n % (allowed_types, meg))\n\n\ndef pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False,\n emg=False, ref_meg='auto', misc=False, resp=False, chpi=False,\n exci=False, ias=False, syst=False, seeg=False, dipole=False,\n gof=False, bio=False, ecog=False, fnirs=False, include=(),\n exclude='bads', selection=None):\n \"\"\"Pick channels by type and names.\n\n Parameters\n ----------\n info : dict\n The measurement info.\n meg : bool | str\n If True include all MEG channels. If False include None\n If string it can be 'mag', 'grad', 'planar1' or 'planar2' to select\n only magnetometers, all gradiometers, or a specific type of\n gradiometer.\n eeg : bool\n If True include EEG channels.\n stim : bool\n If True include stimulus channels.\n eog : bool\n If True include EOG channels.\n ecg : bool\n If True include ECG channels.\n emg : bool\n If True include EMG channels.\n ref_meg: bool | str\n If True include CTF / 4D reference channels. If 'auto', the reference\n channels are only included if compensations are present. Can also be\n the string options from `meg`.\n misc : bool\n If True include miscellaneous analog channels.\n resp : bool\n If True include response-trigger channel. For some MEG systems this\n is separate from the stim channel.\n chpi : bool\n If True include continuous HPI coil channels.\n exci : bool\n Flux excitation channel used to be a stimulus channel.\n ias : bool\n Internal Active Shielding data (maybe on Triux only).\n syst : bool\n System status channel information (on Triux systems only).\n seeg : bool\n Stereotactic EEG channels.\n dipole : bool\n Dipole time course channels.\n gof : bool\n Dipole goodness of fit channels.\n bio : bool\n Bio channels.\n ecog : bool\n Electrocorticography channels.\n fnirs : bool | str\n Functional near-infrared spectroscopy channels. If True include all\n fNIRS channels. If False (default) include none. If string it can be\n 'hbo' (to include channels measuring oxyhemoglobin) or 'hbr' (to\n include channels measuring deoxyhemoglobin).\n include : list of string\n List of additional channels to include. If empty do not include any.\n exclude : list of string | str\n List of channels to exclude. If 'bads' (default), exclude channels\n in ``info['bads']``.\n selection : list of string\n Restrict sensor channels (MEG, EEG) to this list of channel names.\n\n Returns\n -------\n sel : array of int\n Indices of good channels.\n \"\"\"\n # NOTE: Changes to this function's signature should also be changed in\n # PickChannelsMixin\n from .meas_info import Info\n if not isinstance(info, Info):\n raise TypeError('info must be an instance of Info, not %s'\n % type(info))\n info._check_consistency()\n nchan = info['nchan']\n pick = np.zeros(nchan, dtype=np.bool)\n\n if exclude is None:\n raise ValueError('exclude must be a list of strings or \"bads\"')\n elif exclude == 'bads':\n exclude = info.get('bads', [])\n elif not isinstance(exclude, (list, tuple)):\n raise ValueError('exclude must either be \"bads\" or a list of strings.'\n ' If only one channel is to be excluded, use '\n '[ch_name] instead of passing ch_name.')\n\n _check_meg_type(ref_meg, allow_auto=True)\n _check_meg_type(meg)\n if isinstance(ref_meg, string_types) and ref_meg == 'auto':\n ref_meg = ('comps' in info and info['comps'] is not None and\n len(info['comps']) > 0)\n\n for param in (eeg, stim, eog, ecg, emg, misc, resp, chpi, exci,\n ias, syst, seeg, dipole, gof, bio, ecog):\n if not isinstance(param, bool):\n w = ('Parameters for all channel types (with the exception '\n 'of \"meg\", \"ref_meg\" and \"fnirs\") must be of type bool, '\n 'not {0}.')\n raise ValueError(w.format(type(param)))\n\n for k in range(nchan):\n kind = info['chs'][k]['kind']\n # XXX eventually we should de-duplicate this with channel_type!\n if kind == FIFF.FIFFV_MEG_CH and meg:\n pick[k] = _triage_meg_pick(info['chs'][k], meg)\n elif kind == FIFF.FIFFV_EEG_CH and eeg:\n pick[k] = True\n elif kind == FIFF.FIFFV_STIM_CH and stim:\n pick[k] = True\n elif kind == FIFF.FIFFV_EOG_CH and eog:\n pick[k] = True\n elif kind == FIFF.FIFFV_ECG_CH and ecg:\n pick[k] = True\n elif kind == FIFF.FIFFV_EMG_CH and emg:\n pick[k] = True\n elif kind == FIFF.FIFFV_MISC_CH and misc:\n pick[k] = True\n elif kind == FIFF.FIFFV_REF_MEG_CH and ref_meg:\n pick[k] = _triage_meg_pick(info['chs'][k], ref_meg)\n elif kind == FIFF.FIFFV_RESP_CH and resp:\n pick[k] = True\n elif kind == FIFF.FIFFV_SYST_CH and syst:\n pick[k] = True\n elif kind == FIFF.FIFFV_SEEG_CH and seeg:\n pick[k] = True\n elif kind == FIFF.FIFFV_IAS_CH and ias:\n pick[k] = True\n elif kind == FIFF.FIFFV_EXCI_CH and exci:\n pick[k] = True\n elif kind in [FIFF.FIFFV_QUAT_0, FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2,\n FIFF.FIFFV_QUAT_3, FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5,\n FIFF.FIFFV_QUAT_6, FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR,\n FIFF.FIFFV_HPI_MOV] and chpi:\n pick[k] = True\n elif kind == FIFF.FIFFV_DIPOLE_WAVE and dipole:\n pick[k] = True\n elif kind == FIFF.FIFFV_GOODNESS_FIT and gof:\n pick[k] = True\n elif kind == FIFF.FIFFV_BIO_CH and bio:\n pick[k] = True\n elif kind == FIFF.FIFFV_ECOG_CH and ecog:\n pick[k] = True\n elif kind == FIFF.FIFFV_FNIRS_CH:\n pick[k] = _triage_fnirs_pick(info['chs'][k], fnirs)\n\n # restrict channels to selection if provided\n if selection is not None:\n # the selection only restricts these types of channels\n sel_kind = [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH,\n FIFF.FIFFV_EEG_CH]\n for k in np.where(pick)[0]:\n if (info['chs'][k]['kind'] in sel_kind and\n info['ch_names'][k] not in selection):\n pick[k] = False\n\n myinclude = [info['ch_names'][k] for k in range(nchan) if pick[k]]\n myinclude += include\n\n if len(myinclude) == 0:\n sel = np.array([], int)\n else:\n sel = pick_channels(info['ch_names'], myinclude, exclude)\n\n return sel\n\n\ndef pick_info(info, sel=(), copy=True):\n \"\"\"Restrict an info structure to a selection of channels.\n\n Parameters\n ----------\n info : dict\n Info structure from evoked or raw data.\n sel : list of int | None\n Indices of channels to include.\n copy : bool\n If copy is False, info is modified inplace.\n\n Returns\n -------\n res : dict\n Info structure restricted to a selection of channels.\n \"\"\"\n info._check_consistency()\n info = info.copy() if copy else info\n if sel is None:\n return info\n elif len(sel) == 0:\n raise ValueError('No channels match the selection.')\n\n info['chs'] = [info['chs'][k] for k in sel]\n info._update_redundant()\n info['bads'] = [ch for ch in info['bads'] if ch in info['ch_names']]\n\n comps = deepcopy(info['comps'])\n for c in comps:\n row_idx = [k for k, n in enumerate(c['data']['row_names'])\n if n in info['ch_names']]\n row_names = [c['data']['row_names'][i] for i in row_idx]\n rowcals = c['rowcals'][row_idx]\n c['rowcals'] = rowcals\n c['data']['nrow'] = len(row_names)\n c['data']['row_names'] = row_names\n c['data']['data'] = c['data']['data'][row_idx]\n info['comps'] = comps\n info._check_consistency()\n return info\n\n\ndef _has_kit_refs(info, picks):\n \"\"\"Determine if KIT ref channels are chosen.\n\n This is currently only used by make_forward_solution, which cannot\n run when KIT reference channels are included.\n \"\"\"\n for p in picks:\n if info['chs'][p]['coil_type'] == FIFF.FIFFV_COIL_KIT_REF_MAG:\n return True\n return False\n\n\ndef pick_channels_evoked(orig, include=[], exclude='bads'):\n \"\"\"Pick channels from evoked data.\n\n Parameters\n ----------\n orig : Evoked object\n One evoked dataset.\n include : list of string, (optional)\n List of channels to include (if empty, include all available).\n exclude : list of string | str\n List of channels to exclude. If empty do not exclude any (default).\n If 'bads', exclude channels in orig.info['bads']. Defaults to 'bads'.\n\n Returns\n -------\n res : instance of Evoked\n Evoked data restricted to selected channels. If include and\n exclude are empty it returns orig without copy.\n \"\"\"\n if len(include) == 0 and len(exclude) == 0:\n return orig\n\n exclude = _check_excludes_includes(exclude, info=orig.info,\n allow_bads=True)\n sel = pick_channels(orig.info['ch_names'], include=include,\n exclude=exclude)\n\n if len(sel) == 0:\n raise ValueError('Warning : No channels match the selection.')\n\n res = deepcopy(orig)\n #\n # Modify the measurement info\n #\n res.info = pick_info(res.info, sel)\n #\n # Create the reduced data set\n #\n res.data = res.data[sel, :]\n\n return res\n\n\n@verbose\ndef pick_channels_forward(orig, include=[], exclude=[], verbose=None):\n \"\"\"Pick channels from forward operator.\n\n Parameters\n ----------\n orig : dict\n A forward solution.\n include : list of string\n List of channels to include (if empty, include all available).\n Defaults to [].\n exclude : list of string | 'bads'\n Channels to exclude (if empty, do not exclude any). Defaults to [].\n If 'bads', then exclude bad channels in orig.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n res : dict\n Forward solution restricted to selected channels. If include and\n exclude are empty it returns orig without copy.\n \"\"\"\n orig['info']._check_consistency()\n if len(include) == 0 and len(exclude) == 0:\n return orig\n exclude = _check_excludes_includes(exclude,\n info=orig['info'], allow_bads=True)\n\n # Allow for possibility of channel ordering in forward solution being\n # different from that of the M/EEG file it is based on.\n sel_sol = pick_channels(orig['sol']['row_names'], include=include,\n exclude=exclude)\n sel_info = pick_channels(orig['info']['ch_names'], include=include,\n exclude=exclude)\n\n fwd = deepcopy(orig)\n\n # Check that forward solution and original data file agree on #channels\n if len(sel_sol) != len(sel_info):\n raise ValueError('Forward solution and functional data appear to '\n 'have different channel names, please check.')\n\n # Do we have something?\n nuse = len(sel_sol)\n if nuse == 0:\n raise ValueError('Nothing remains after picking')\n\n logger.info(' %d out of %d channels remain after picking'\n % (nuse, fwd['nchan']))\n\n # Pick the correct rows of the forward operator using sel_sol\n fwd['sol']['data'] = fwd['sol']['data'][sel_sol, :]\n fwd['_orig_sol'] = fwd['_orig_sol'][sel_sol, :]\n fwd['sol']['nrow'] = nuse\n\n ch_names = [fwd['sol']['row_names'][k] for k in sel_sol]\n fwd['nchan'] = nuse\n fwd['sol']['row_names'] = ch_names\n\n # Pick the appropriate channel names from the info-dict using sel_info\n fwd['info']['chs'] = [fwd['info']['chs'][k] for k in sel_info]\n fwd['info']._update_redundant()\n fwd['info']['bads'] = [b for b in fwd['info']['bads'] if b in ch_names]\n\n if fwd['sol_grad'] is not None:\n fwd['sol_grad']['data'] = fwd['sol_grad']['data'][sel_sol, :]\n fwd['_orig_sol_grad'] = fwd['_orig_sol_grad'][sel_sol, :]\n fwd['sol_grad']['nrow'] = nuse\n fwd['sol_grad']['row_names'] = [fwd['sol_grad']['row_names'][k]\n for k in sel_sol]\n\n return fwd\n\n\ndef pick_types_forward(orig, meg=True, eeg=False, ref_meg=True, seeg=False,\n ecog=False, include=[], exclude=[]):\n \"\"\"Pick by channel type and names from a forward operator.\n\n Parameters\n ----------\n orig : dict\n A forward solution\n meg : bool or string\n If True include all MEG channels. If False include None\n If string it can be 'mag' or 'grad' to select only gradiometers\n or magnetometers.\n eeg : bool\n If True include EEG channels\n ref_meg : bool\n If True include CTF / 4D reference channels\n seeg : bool\n If True include stereotactic EEG channels\n ecog : bool\n If True include electrocorticography channels\n include : list of string\n List of additional channels to include. If empty do not include any.\n exclude : list of string | str\n List of channels to exclude. If empty do not exclude any (default).\n If 'bads', exclude channels in orig['info']['bads'].\n\n Returns\n -------\n res : dict\n Forward solution restricted to selected channel types.\n \"\"\"\n info = orig['info']\n sel = pick_types(info, meg, eeg, ref_meg=ref_meg, seeg=seeg, ecog=ecog,\n include=include, exclude=exclude)\n if len(sel) == 0:\n raise ValueError('No valid channels found')\n include_ch_names = [info['ch_names'][k] for k in sel]\n\n return pick_channels_forward(orig, include_ch_names)\n\n\ndef channel_indices_by_type(info):\n \"\"\"Get indices of channels by type.\"\"\"\n idx = dict((key, list()) for key in _PICK_TYPES_KEYS if\n key not in ('meg', 'fnirs'))\n idx.update(mag=list(), grad=list(), hbo=list(), hbr=list())\n for k, ch in enumerate(info['chs']):\n for key in idx.keys():\n if channel_type(info, k) == key:\n idx[key].append(k)\n\n return idx\n\n\ndef pick_channels_cov(orig, include=[], exclude='bads'):\n \"\"\"Pick channels from covariance matrix.\n\n Parameters\n ----------\n orig : Covariance\n A covariance.\n include : list of string, (optional)\n List of channels to include (if empty, include all available).\n exclude : list of string, (optional) | 'bads'\n Channels to exclude (if empty, do not exclude any). Defaults to 'bads'.\n\n Returns\n -------\n res : dict\n Covariance solution restricted to selected channels.\n \"\"\"\n from ..cov import Covariance\n exclude = orig['bads'] if exclude == 'bads' else exclude\n sel = pick_channels(orig['names'], include=include, exclude=exclude)\n data = orig['data'][sel][:, sel] if not orig['diag'] else orig['data'][sel]\n names = [orig['names'][k] for k in sel]\n bads = [name for name in orig['bads'] if name in orig['names']]\n res = Covariance(\n data=data, names=names, bads=bads, projs=deepcopy(orig['projs']),\n nfree=orig['nfree'], eig=None, eigvec=None,\n method=orig.get('method', None), loglik=orig.get('loglik', None))\n return res\n\n\ndef _picks_by_type(info, meg_combined=False, ref_meg=False, exclude='bads'):\n \"\"\"Get data channel indices as separate list of tuples.\n\n Parameters\n ----------\n info : instance of mne.measuerment_info.Info\n The info.\n meg_combined : bool\n Whether to return combined picks for grad and mag.\n ref_meg : bool\n If True include CTF / 4D reference channels\n exclude : list of string | str\n List of channels to exclude. If 'bads' (default), exclude channels\n in info['bads'].\n\n Returns\n -------\n picks_list : list of tuples\n The list of tuples of picks and the type string.\n \"\"\"\n from ..channels.channels import _contains_ch_type\n picks_list = []\n has_mag, has_grad, has_eeg = [_contains_ch_type(info, k)\n for k in ('mag', 'grad', 'eeg')]\n if has_mag and (meg_combined is not True or not has_grad):\n picks_list.append(\n ('mag', pick_types(info, meg='mag', eeg=False, stim=False,\n ref_meg=ref_meg, exclude=exclude))\n )\n if has_grad and (meg_combined is not True or not has_mag):\n picks_list.append(\n ('grad', pick_types(info, meg='grad', eeg=False, stim=False,\n ref_meg=ref_meg, exclude=exclude))\n )\n if has_mag and has_grad and meg_combined is True:\n picks_list.append(\n ('meg', pick_types(info, meg=True, eeg=False, stim=False,\n ref_meg=ref_meg, exclude=exclude))\n )\n if has_eeg:\n picks_list.append(\n ('eeg', pick_types(info, meg=False, eeg=True, stim=False,\n ref_meg=ref_meg, exclude=exclude))\n )\n return picks_list\n\n\ndef _check_excludes_includes(chs, info=None, allow_bads=False):\n \"\"\"Ensure that inputs to exclude/include are list-like or \"bads\".\n\n Parameters\n ----------\n chs : any input, should be list, tuple, string\n The channels passed to include or exclude.\n allow_bads : bool\n Allow the user to supply \"bads\" as a string for auto exclusion.\n\n Returns\n -------\n chs : list\n Channels to be excluded/excluded. If allow_bads, and chs==\"bads\",\n this will be the bad channels found in 'info'.\n \"\"\"\n from .meas_info import Info\n if not isinstance(chs, (list, tuple, np.ndarray)):\n if allow_bads is True:\n if not isinstance(info, Info):\n raise ValueError('Supply an info object if allow_bads is true')\n elif chs != 'bads':\n raise ValueError('If chs is a string, it must be \"bads\"')\n else:\n chs = info['bads']\n else:\n raise ValueError(\n 'include/exclude must be list, tuple, ndarray, or \"bads\". ' +\n 'You provided type {0}'.format(type(chs)))\n return chs\n\n\n_PICK_TYPES_DATA_DICT = dict(\n meg=True, eeg=True, stim=False, eog=False, ecg=False, emg=False,\n misc=False, resp=False, chpi=False, exci=False, ias=False, syst=False,\n seeg=True, dipole=False, gof=False, bio=False, ecog=True, fnirs=True)\n_PICK_TYPES_KEYS = tuple(list(_PICK_TYPES_DATA_DICT.keys()) + ['ref_meg'])\n_DATA_CH_TYPES_SPLIT = ['mag', 'grad', 'eeg', 'seeg', 'ecog', 'hbo', 'hbr']\n\n# Valid data types, ordered for consistency, used in viz/evoked.\n_VALID_CHANNEL_TYPES = ['eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'emg',\n 'dipole', 'gof', 'bio', 'ecog', 'hbo', 'hbr',\n 'misc']\n\n\ndef _pick_data_channels(info, exclude='bads', with_ref_meg=True):\n \"\"\"Pick only data channels.\"\"\"\n return pick_types(info, ref_meg=with_ref_meg, include=[], exclude=exclude,\n selection=None, **_PICK_TYPES_DATA_DICT)\n\n\ndef _pick_aux_channels(info, exclude='bads'):\n \"\"\"Pick only auxiliary channels.\n\n Corresponds to EOG, ECG, EMG and BIO\n \"\"\"\n return pick_types(info, meg=False, eog=True, ecg=True, emg=True, bio=True,\n ref_meg=False, exclude=exclude)\n\n\ndef _pick_data_or_ica(info):\n \"\"\"Pick only data or ICA channels.\"\"\"\n ch_names = [c['ch_name'] for c in info['chs']]\n if 'ICA ' in ','.join(ch_names):\n picks = pick_types(info, exclude=[], misc=True)\n else:\n picks = _pick_data_channels(info, exclude=[], with_ref_meg=True)\n return picks\n",
"# -*- coding: utf-8 -*-\n\"\"\"\n====================================\nBrainstorm auditory tutorial dataset\n====================================\n\nHere we compute the evoked from raw for the auditory Brainstorm\ntutorial dataset. For comparison, see [1]_ and:\n\n http://neuroimage.usc.edu/brainstorm/Tutorials/Auditory\n\nExperiment:\n\n - One subject, 2 acquisition runs 6 minutes each.\n - Each run contains 200 regular beeps and 40 easy deviant beeps.\n - Random ISI: between 0.7s and 1.7s seconds, uniformly distributed.\n - Button pressed when detecting a deviant with the right index finger.\n\nThe specifications of this dataset were discussed initially on the\n`FieldTrip bug tracker <http://bugzilla.fcdonders.nl/show_bug.cgi?id=2300>`_.\n\nReferences\n----------\n.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.\n Brainstorm: A User-Friendly Application for MEG/EEG Analysis.\n Computational Intelligence and Neuroscience, vol. 2011, Article ID\n 879716, 13 pages, 2011. doi:10.1155/2011/879716\n\"\"\"\n\n# Authors: Mainak Jas <[email protected]>\n# Eric Larson <[email protected]>\n# Jaakko Leppakangas <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport pandas as pd\nimport numpy as np\n\nimport mne\nfrom mne import combine_evoked\nfrom mne.minimum_norm import apply_inverse\nfrom mne.datasets.brainstorm import bst_auditory\nfrom mne.io import read_raw_ctf\nfrom mne.filter import notch_filter, filter_data\n\nprint(__doc__)\n\n###############################################################################\n# To reduce memory consumption and running time, some of the steps are\n# precomputed. To run everything from scratch change this to False. With\n# ``use_precomputed = False`` running time of this script can be several\n# minutes even on a fast computer.\nuse_precomputed = True\n\n###############################################################################\n# The data was collected with a CTF 275 system at 2400 Hz and low-pass\n# filtered at 600 Hz. Here the data and empty room data files are read to\n# construct instances of :class:`mne.io.Raw`.\ndata_path = bst_auditory.data_path()\n\nsubject = 'bst_auditory'\nsubjects_dir = op.join(data_path, 'subjects')\n\nraw_fname1 = op.join(data_path, 'MEG', 'bst_auditory',\n 'S01_AEF_20131218_01.ds')\nraw_fname2 = op.join(data_path, 'MEG', 'bst_auditory',\n 'S01_AEF_20131218_02.ds')\nerm_fname = op.join(data_path, 'MEG', 'bst_auditory',\n 'S01_Noise_20131218_01.ds')\n\n###############################################################################\n# In the memory saving mode we use ``preload=False`` and use the memory\n# efficient IO which loads the data on demand. However, filtering and some\n# other functions require the data to be preloaded in the memory.\npreload = not use_precomputed\nraw = read_raw_ctf(raw_fname1, preload=preload)\nn_times_run1 = raw.n_times\nmne.io.concatenate_raws([raw, read_raw_ctf(raw_fname2, preload=preload)])\nraw_erm = read_raw_ctf(erm_fname, preload=preload)\n\n###############################################################################\n# Data channel array consisted of 274 MEG axial gradiometers, 26 MEG reference\n# sensors and 2 EEG electrodes (Cz and Pz).\n# In addition:\n#\n# - 1 stim channel for marking presentation times for the stimuli\n# - 1 audio channel for the sent signal\n# - 1 response channel for recording the button presses\n# - 1 ECG bipolar\n# - 2 EOG bipolar (vertical and horizontal)\n# - 12 head tracking channels\n# - 20 unused channels\n#\n# The head tracking channels and the unused channels are marked as misc\n# channels. Here we define the EOG and ECG channels.\nraw.set_channel_types({'HEOG': 'eog', 'VEOG': 'eog', 'ECG': 'ecg'})\nif not use_precomputed:\n # Leave out the two EEG channels for easier computation of forward.\n raw.pick_types(meg=True, eeg=False, stim=True, misc=True, eog=True,\n ecg=True)\n\n###############################################################################\n# For noise reduction, a set of bad segments have been identified and stored\n# in csv files. The bad segments are later used to reject epochs that overlap\n# with them.\n# The file for the second run also contains some saccades. The saccades are\n# removed by using SSP. We use pandas to read the data from the csv files. You\n# can also view the files with your favorite text editor.\n\nannotations_df = pd.DataFrame()\noffset = n_times_run1\nfor idx in [1, 2]:\n csv_fname = op.join(data_path, 'MEG', 'bst_auditory',\n 'events_bad_0%s.csv' % idx)\n df = pd.read_csv(csv_fname, header=None,\n names=['onset', 'duration', 'id', 'label'])\n print('Events from run {0}:'.format(idx))\n print(df)\n\n df['onset'] += offset * (idx - 1)\n annotations_df = pd.concat([annotations_df, df], axis=0)\n\nsaccades_events = df[df['label'] == 'saccade'].values[:, :3].astype(int)\n\n# Conversion from samples to times:\nonsets = annotations_df['onset'].values / raw.info['sfreq']\ndurations = annotations_df['duration'].values / raw.info['sfreq']\ndescriptions = annotations_df['label'].values\n\nannotations = mne.Annotations(onsets, durations, descriptions)\nraw.annotations = annotations\ndel onsets, durations, descriptions\n\n###############################################################################\n# Here we compute the saccade and EOG projectors for magnetometers and add\n# them to the raw data. The projectors are added to both runs.\nsaccade_epochs = mne.Epochs(raw, saccades_events, 1, 0., 0.5, preload=True,\n reject_by_annotation=False)\n\nprojs_saccade = mne.compute_proj_epochs(saccade_epochs, n_mag=1, n_eeg=0,\n desc_prefix='saccade')\nif use_precomputed:\n proj_fname = op.join(data_path, 'MEG', 'bst_auditory',\n 'bst_auditory-eog-proj.fif')\n projs_eog = mne.read_proj(proj_fname)[0]\nelse:\n projs_eog, _ = mne.preprocessing.compute_proj_eog(raw.load_data(),\n n_mag=1, n_eeg=0)\nraw.add_proj(projs_saccade)\nraw.add_proj(projs_eog)\ndel saccade_epochs, saccades_events, projs_eog, projs_saccade # To save memory\n\n###############################################################################\n# Visually inspect the effects of projections. Click on 'proj' button at the\n# bottom right corner to toggle the projectors on/off. EOG events can be\n# plotted by adding the event list as a keyword argument. As the bad segments\n# and saccades were added as annotations to the raw data, they are plotted as\n# well.\nraw.plot(block=True)\n\n###############################################################################\n# Typical preprocessing step is the removal of power line artifact (50 Hz or\n# 60 Hz). Here we notch filter the data at 60, 120 and 180 to remove the\n# original 60 Hz artifact and the harmonics. The power spectra are plotted\n# before and after the filtering to show the effect. The drop after 600 Hz\n# appears because the data was filtered during the acquisition. In memory\n# saving mode we do the filtering at evoked stage, which is not something you\n# usually would do.\nif not use_precomputed:\n meg_picks = mne.pick_types(raw.info, meg=True, eeg=False)\n raw.plot_psd(tmax=np.inf, picks=meg_picks)\n notches = np.arange(60, 181, 60)\n raw.notch_filter(notches)\n raw.plot_psd(tmax=np.inf, picks=meg_picks)\n\n###############################################################################\n# We also lowpass filter the data at 100 Hz to remove the hf components.\nif not use_precomputed:\n raw.filter(None, 100., h_trans_bandwidth=0.5, filter_length='10s',\n phase='zero-double', fir_design='firwin2')\n\n###############################################################################\n# Epoching and averaging.\n# First some parameters are defined and events extracted from the stimulus\n# channel (UPPT001). The rejection thresholds are defined as peak-to-peak\n# values and are in T / m for gradiometers, T for magnetometers and\n# V for EOG and EEG channels.\ntmin, tmax = -0.1, 0.5\nevent_id = dict(standard=1, deviant=2)\nreject = dict(mag=4e-12, eog=250e-6)\n# find events\nevents = mne.find_events(raw, stim_channel='UPPT001')\n\n###############################################################################\n# The event timing is adjusted by comparing the trigger times on detected\n# sound onsets on channel UADC001-4408.\nsound_data = raw[raw.ch_names.index('UADC001-4408')][0][0]\nonsets = np.where(np.abs(sound_data) > 2. * np.std(sound_data))[0]\nmin_diff = int(0.5 * raw.info['sfreq'])\ndiffs = np.concatenate([[min_diff + 1], np.diff(onsets)])\nonsets = onsets[diffs > min_diff]\nassert len(onsets) == len(events)\ndiffs = 1000. * (events[:, 0] - onsets) / raw.info['sfreq']\nprint('Trigger delay removed (μ ± σ): %0.1f ± %0.1f ms'\n % (np.mean(diffs), np.std(diffs)))\nevents[:, 0] = onsets\ndel sound_data, diffs\n\n###############################################################################\n# We mark a set of bad channels that seem noisier than others. This can also\n# be done interactively with ``raw.plot`` by clicking the channel name\n# (or the line). The marked channels are added as bad when the browser window\n# is closed.\nraw.info['bads'] = ['MLO52-4408', 'MRT51-4408', 'MLO42-4408', 'MLO43-4408']\n\n###############################################################################\n# The epochs (trials) are created for MEG channels. First we find the picks\n# for MEG and EOG channels. Then the epochs are constructed using these picks.\n# The epochs overlapping with annotated bad segments are also rejected by\n# default. To turn off rejection by bad segments (as was done earlier with\n# saccades) you can use keyword ``reject_by_annotation=False``.\npicks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,\n exclude='bads')\n\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=(None, 0), reject=reject, preload=False,\n proj=True)\n\n###############################################################################\n# We only use first 40 good epochs from each run. Since we first drop the bad\n# epochs, the indices of the epochs are no longer same as in the original\n# epochs collection. Investigation of the event timings reveals that first\n# epoch from the second run corresponds to index 182.\nepochs.drop_bad()\nepochs_standard = mne.concatenate_epochs([epochs['standard'][range(40)],\n epochs['standard'][182:222]])\nepochs_standard.load_data() # Resampling to save memory.\nepochs_standard.resample(600, npad='auto')\nepochs_deviant = epochs['deviant'].load_data()\nepochs_deviant.resample(600, npad='auto')\ndel epochs, picks\n\n###############################################################################\n# The averages for each conditions are computed.\nevoked_std = epochs_standard.average()\nevoked_dev = epochs_deviant.average()\ndel epochs_standard, epochs_deviant\n\n###############################################################################\n# Typical preprocessing step is the removal of power line artifact (50 Hz or\n# 60 Hz). Here we notch filter the data at 60, 120 and 180 to remove the\n# original 60 Hz artifact and the harmonics. Normally this would be done to\n# raw data (with :func:`mne.io.Raw.filter`), but to reduce memory consumption\n# of this tutorial, we do it at evoked stage.\nif use_precomputed:\n sfreq = evoked_std.info['sfreq']\n notches = [60, 120, 180]\n for evoked in (evoked_std, evoked_dev):\n evoked.data[:] = notch_filter(evoked.data, sfreq, notches)\n evoked.data[:] = filter_data(evoked.data, sfreq, l_freq=None,\n h_freq=100.)\n\n###############################################################################\n# Here we plot the ERF of standard and deviant conditions. In both conditions\n# we can see the P50 and N100 responses. The mismatch negativity is visible\n# only in the deviant condition around 100-200 ms. P200 is also visible around\n# 170 ms in both conditions but much stronger in the standard condition. P300\n# is visible in deviant condition only (decision making in preparation of the\n# button press). You can view the topographies from a certain time span by\n# painting an area with clicking and holding the left mouse button.\nevoked_std.plot(window_title='Standard', gfp=True)\nevoked_dev.plot(window_title='Deviant', gfp=True)\n\n\n###############################################################################\n# Show activations as topography figures.\ntimes = np.arange(0.05, 0.301, 0.025)\nevoked_std.plot_topomap(times=times, title='Standard')\nevoked_dev.plot_topomap(times=times, title='Deviant')\n\n###############################################################################\n# We can see the MMN effect more clearly by looking at the difference between\n# the two conditions. P50 and N100 are no longer visible, but MMN/P200 and\n# P300 are emphasised.\nevoked_difference = combine_evoked([evoked_dev, -evoked_std], weights='equal')\nevoked_difference.plot(window_title='Difference', gfp=True)\n\n###############################################################################\n# Source estimation.\n# We compute the noise covariance matrix from the empty room measurement\n# and use it for the other runs.\nreject = dict(mag=4e-12)\ncov = mne.compute_raw_covariance(raw_erm, reject=reject)\ncov.plot(raw_erm.info)\ndel raw_erm\n\n###############################################################################\n# The transformation is read from a file. More information about coregistering\n# the data, see :ref:`ch_interactive_analysis` or\n# :func:`mne.gui.coregistration`.\ntrans_fname = op.join(data_path, 'MEG', 'bst_auditory',\n 'bst_auditory-trans.fif')\ntrans = mne.read_trans(trans_fname)\n\n###############################################################################\n# To save time and memory, the forward solution is read from a file. Set\n# ``use_precomputed=False`` in the beginning of this script to build the\n# forward solution from scratch. The head surfaces for constructing a BEM\n# solution are read from a file. Since the data only contains MEG channels, we\n# only need the inner skull surface for making the forward solution. For more\n# information: :ref:`CHDBBCEJ`, :func:`mne.setup_source_space`,\n# :ref:`create_bem_model`, :func:`mne.bem.make_watershed_bem`.\nif use_precomputed:\n fwd_fname = op.join(data_path, 'MEG', 'bst_auditory',\n 'bst_auditory-meg-oct-6-fwd.fif')\n fwd = mne.read_forward_solution(fwd_fname)\nelse:\n src = mne.setup_source_space(subject, spacing='ico4',\n subjects_dir=subjects_dir, overwrite=True)\n model = mne.make_bem_model(subject=subject, ico=4, conductivity=[0.3],\n subjects_dir=subjects_dir)\n bem = mne.make_bem_solution(model)\n fwd = mne.make_forward_solution(evoked_std.info, trans=trans, src=src,\n bem=bem)\n\ninv = mne.minimum_norm.make_inverse_operator(evoked_std.info, fwd, cov)\nsnr = 3.0\nlambda2 = 1.0 / snr ** 2\ndel fwd\n\n###############################################################################\n# The sources are computed using dSPM method and plotted on an inflated brain\n# surface. For interactive controls over the image, use keyword\n# ``time_viewer=True``.\n# Standard condition.\nstc_standard = mne.minimum_norm.apply_inverse(evoked_std, inv, lambda2, 'dSPM')\nbrain = stc_standard.plot(subjects_dir=subjects_dir, subject=subject,\n surface='inflated', time_viewer=False, hemi='lh',\n initial_time=0.1, time_unit='s')\ndel stc_standard, brain\n\n###############################################################################\n# Deviant condition.\nstc_deviant = mne.minimum_norm.apply_inverse(evoked_dev, inv, lambda2, 'dSPM')\nbrain = stc_deviant.plot(subjects_dir=subjects_dir, subject=subject,\n surface='inflated', time_viewer=False, hemi='lh',\n initial_time=0.1, time_unit='s')\ndel stc_deviant, brain\n\n###############################################################################\n# Difference.\nstc_difference = apply_inverse(evoked_difference, inv, lambda2, 'dSPM')\nbrain = stc_difference.plot(subjects_dir=subjects_dir, subject=subject,\n surface='inflated', time_viewer=False, hemi='lh',\n initial_time=0.15, time_unit='s')\n"
] | [
[
"numpy.array",
"numpy.where",
"numpy.unique",
"numpy.zeros"
],
[
"numpy.diff",
"pandas.read_csv",
"pandas.DataFrame",
"numpy.abs",
"numpy.arange",
"pandas.concat",
"numpy.std",
"numpy.mean"
]
] |
michaelyeah7/magics_mbrl | [
"7f1503986fd50c8336b8b9e7bb1d2f4be4e84b08"
] | [
"gym-rbdl/gym_rbdl/envs/real_pendulum.py"
] | [
"import gym\nfrom gym import spaces\nfrom gym.utils import seeding\nimport numpy as np\nfrom os import path\n\n\nclass PendulumEnv(gym.Env):\n metadata = {\"render.modes\": [\"human\", \"rgb_array\"], \"video.frames_per_second\": 30}\n\n def __init__(self, g=10.0):\n self.max_speed = 8\n self.max_torque = 2.0\n self.dt = 0.05\n self.g = g\n self.m = 1.0\n self.l = 1.0\n self.viewer = None\n\n high = np.array([1.0, 1.0, self.max_speed], dtype=np.float32)\n self.action_space = spaces.Box(\n low=-self.max_torque, high=self.max_torque, shape=(1,), dtype=np.float32\n )\n self.observation_space = spaces.Box(low=-high, high=high, dtype=np.float32)\n\n self.seed()\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def step(self, u):\n th, thdot = self.state # th := theta\n\n g = self.g\n m = self.m\n l = self.l\n dt = self.dt\n\n u = np.clip(u, -self.max_torque, self.max_torque)[0]\n self.last_u = u # for rendering\n # costs = angle_normalize(th) ** 2 + 0.1 * thdot ** 2 + 0.001 * (u ** 2)\n y = np.cos(th)\n x = np.sin(th)\n costs = y + .1 * np.abs(x) + 0.1 * thdot ** 2 + 0.001 * (u ** 2)\n\n newthdot = (\n thdot\n + (-3 * g / (2 * l) * np.sin(th + np.pi) + 3.0 / (m * l ** 2) * u) * dt\n )\n newth = th + newthdot * dt\n newthdot = np.clip(newthdot, -self.max_speed, self.max_speed)\n\n self.state = np.array([newth, newthdot])\n return self._get_obs(), -costs, False, {}\n\n def reset(self):\n high = np.array([np.pi, 1])\n self.state = self.np_random.uniform(low=-high, high=high)\n self.last_u = None\n return self._get_obs()\n\n def _get_obs(self):\n theta, thetadot = self.state\n return np.array([np.cos(theta), np.sin(theta), thetadot], dtype=np.float32)\n\n def render(self, mode=\"human\"):\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n\n self.viewer = rendering.Viewer(500, 500)\n self.viewer.set_bounds(-2.2, 2.2, -2.2, 2.2)\n rod = rendering.make_capsule(1, 0.2)\n rod.set_color(0.8, 0.3, 0.3)\n self.pole_transform = rendering.Transform()\n rod.add_attr(self.pole_transform)\n self.viewer.add_geom(rod)\n axle = rendering.make_circle(0.05)\n axle.set_color(0, 0, 0)\n self.viewer.add_geom(axle)\n fname = path.join(path.dirname(__file__), \"assets/clockwise.png\")\n self.img = rendering.Image(fname, 1.0, 1.0)\n self.imgtrans = rendering.Transform()\n self.img.add_attr(self.imgtrans)\n\n self.viewer.add_onetime(self.img)\n self.pole_transform.set_rotation(self.state[0] + np.pi / 2)\n if self.last_u is not None:\n self.imgtrans.scale = (-self.last_u / 2, np.abs(self.last_u) / 2)\n\n return self.viewer.render(return_rgb_array=mode == \"rgb_array\")\n\n def close(self):\n if self.viewer:\n self.viewer.close()\n self.viewer = None\n\n\ndef angle_normalize(x):\n return ((x + np.pi) % (2 * np.pi)) - np.pi\n"
] | [
[
"numpy.abs",
"numpy.cos",
"numpy.clip",
"numpy.array",
"numpy.sin"
]
] |
ZeroDesigner/quantum-gan | [
"76b12fe1be25ac2a5e75fdc472947a08d7065c50"
] | [
"utils.py"
] | [
"from sklearn.metrics import classification_report as sk_classification_report\nfrom sklearn.metrics import confusion_matrix\n\nimport pickle\nimport gzip\nfrom rdkit import DataStructs\nfrom rdkit import Chem\nfrom rdkit.Chem import QED\nfrom rdkit.Chem import Crippen\nfrom rdkit.Chem import AllChem\nfrom rdkit.Chem import Draw\n\nimport math\nimport numpy as np\n\nNP_model = pickle.load(gzip.open('data/NP_score.pkl.gz'))\nSA_model = {i[j]: float(i[0]) for i in pickle.load(gzip.open('data/SA_score.pkl.gz')) for j in range(1, len(i))}\n\n\nclass MolecularMetrics(object):\n\n @staticmethod\n def _avoid_sanitization_error(op):\n try:\n return op()\n except ValueError:\n return None\n\n @staticmethod\n def remap(x, x_min, x_max):\n return (x - x_min) / (x_max - x_min)\n\n @staticmethod\n def valid_lambda(x):\n return x is not None and Chem.MolToSmiles(x) != ''\n\n @staticmethod\n def valid_lambda_special(x):\n s = Chem.MolToSmiles(x) if x is not None else ''\n return x is not None and '*' not in s and '.' not in s and s != ''\n\n @staticmethod\n def valid_scores(mols):\n return np.array(list(map(MolecularMetrics.valid_lambda_special, mols)), dtype=np.float32)\n\n @staticmethod\n def valid_filter(mols):\n return list(filter(MolecularMetrics.valid_lambda, mols))\n\n @staticmethod\n def valid_total_score(mols):\n return np.array(list(map(MolecularMetrics.valid_lambda, mols)), dtype=np.float32).mean()\n\n @staticmethod\n def novel_scores(mols, data):\n return np.array(\n list(map(lambda x: MolecularMetrics.valid_lambda(x) and Chem.MolToSmiles(x) not in data.smiles, mols)))\n\n @staticmethod\n def novel_filter(mols, data):\n return list(filter(lambda x: MolecularMetrics.valid_lambda(x) and Chem.MolToSmiles(x) not in data.smiles, mols))\n\n @staticmethod\n def novel_total_score(mols, data):\n return MolecularMetrics.novel_scores(MolecularMetrics.valid_filter(mols), data).mean()\n\n @staticmethod\n def unique_scores(mols):\n smiles = list(map(lambda x: Chem.MolToSmiles(x) if MolecularMetrics.valid_lambda(x) else '', mols))\n return np.clip(\n 0.75 + np.array(list(map(lambda x: 1 / smiles.count(x) if x != '' else 0, smiles)), dtype=np.float32), 0, 1)\n\n @staticmethod\n def unique_total_score(mols):\n v = MolecularMetrics.valid_filter(mols)\n s = set(map(lambda x: Chem.MolToSmiles(x), v))\n return 0 if len(v) == 0 else len(s) / len(v)\n\n # @staticmethod\n # def novel_and_unique_total_score(mols, data):\n # return ((MolecularMetrics.unique_scores(mols) == 1).astype(float) * MolecularMetrics.novel_scores(mols,\n # data)).sum()\n #\n # @staticmethod\n # def reconstruction_scores(data, model, session, sample=False):\n #\n # m0, _, _, a, x, _, f, _, _ = data.next_validation_batch()\n # feed_dict = {model.edges_labels: a, model.nodes_labels: x, model.node_features: f, model.training: False}\n #\n # try:\n # feed_dict.update({model.variational: False})\n # except AttributeError:\n # pass\n #\n # n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [\n # model.nodes_argmax, model.edges_argmax], feed_dict=feed_dict)\n #\n # n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)\n #\n # m1 = [data.matrices2mol(n_, e_, strict=True) for n_, e_ in zip(n, e)]\n #\n # return np.mean([float(Chem.MolToSmiles(m0_) == Chem.MolToSmiles(m1_)) if m1_ is not None else 0\n # for m0_, m1_ in zip(m0, m1)])\n\n @staticmethod\n def natural_product_scores(mols, norm=False):\n\n # calculating the score\n scores = [sum(NP_model.get(bit, 0)\n for bit in Chem.rdMolDescriptors.GetMorganFingerprint(mol,\n 2).GetNonzeroElements()) / float(\n mol.GetNumAtoms()) if mol is not None else None\n for mol in mols]\n\n # preventing score explosion for exotic molecules\n scores = list(map(lambda score: score if score is None else (\n 4 + math.log10(score - 4 + 1) if score > 4 else (\n -4 - math.log10(-4 - score + 1) if score < -4 else score)), scores))\n\n scores = np.array(list(map(lambda x: -4 if x is None else x, scores)))\n scores = np.clip(MolecularMetrics.remap(scores, -3, 1), 0.0, 1.0) if norm else scores\n\n return scores\n\n @staticmethod\n def quantitative_estimation_druglikeness_scores(mols, norm=False):\n return np.array(list(map(lambda x: 0 if x is None else x, [\n MolecularMetrics._avoid_sanitization_error(lambda: QED.qed(mol)) if mol is not None else None for mol in\n mols])))\n\n @staticmethod\n def water_octanol_partition_coefficient_scores(mols, norm=False):\n scores = [MolecularMetrics._avoid_sanitization_error(lambda: Crippen.MolLogP(mol)) if mol is not None else None\n for mol in mols]\n scores = np.array(list(map(lambda x: -3 if x is None else x, scores)))\n scores = np.clip(MolecularMetrics.remap(scores, -2.12178879609, 6.0429063424), 0.0, 1.0) if norm else scores\n\n return scores\n\n @staticmethod\n def _compute_SAS(mol):\n fp = Chem.rdMolDescriptors.GetMorganFingerprint(mol, 2)\n fps = fp.GetNonzeroElements()\n score1 = 0.\n nf = 0\n # for bitId, v in fps.items():\n for bitId, v in fps.items():\n nf += v\n sfp = bitId\n score1 += SA_model.get(sfp, -4) * v\n score1 /= nf\n\n # features score\n nAtoms = mol.GetNumAtoms()\n nChiralCenters = len(Chem.FindMolChiralCenters(\n mol, includeUnassigned=True))\n ri = mol.GetRingInfo()\n nSpiro = Chem.rdMolDescriptors.CalcNumSpiroAtoms(mol)\n nBridgeheads = Chem.rdMolDescriptors.CalcNumBridgeheadAtoms(mol)\n nMacrocycles = 0\n for x in ri.AtomRings():\n if len(x) > 8:\n nMacrocycles += 1\n\n sizePenalty = nAtoms ** 1.005 - nAtoms\n stereoPenalty = math.log10(nChiralCenters + 1)\n spiroPenalty = math.log10(nSpiro + 1)\n bridgePenalty = math.log10(nBridgeheads + 1)\n macrocyclePenalty = 0.\n\n # ---------------------------------------\n # This differs from the paper, which defines:\n # macrocyclePenalty = math.log10(nMacrocycles+1)\n # This form generates better results when 2 or more macrocycles are present\n if nMacrocycles > 0:\n macrocyclePenalty = math.log10(2)\n\n score2 = 0. - sizePenalty - stereoPenalty - \\\n spiroPenalty - bridgePenalty - macrocyclePenalty\n\n # correction for the fingerprint density\n # not in the original publication, added in version 1.1\n # to make highly symmetrical molecules easier to synthetise\n score3 = 0.\n if nAtoms > len(fps):\n score3 = math.log(float(nAtoms) / len(fps)) * .5\n\n sascore = score1 + score2 + score3\n\n # need to transform \"raw\" value into scale between 1 and 10\n min = -4.0\n max = 2.5\n sascore = 11. - (sascore - min + 1) / (max - min) * 9.\n # smooth the 10-end\n if sascore > 8.:\n sascore = 8. + math.log(sascore + 1. - 9.)\n if sascore > 10.:\n sascore = 10.0\n elif sascore < 1.:\n sascore = 1.0\n\n return sascore\n\n @staticmethod\n def synthetic_accessibility_score_scores(mols, norm=False):\n scores = [MolecularMetrics._compute_SAS(mol) if mol is not None else None for mol in mols]\n scores = np.array(list(map(lambda x: 10 if x is None else x, scores)))\n scores = np.clip(MolecularMetrics.remap(scores, 5, 1.5), 0.0, 1.0) if norm else scores\n\n return scores\n\n @staticmethod\n def diversity_scores(mols, data):\n rand_mols = np.random.choice(data.data, 100)\n fps = [Chem.rdMolDescriptors.GetMorganFingerprintAsBitVect(mol, 4, nBits=2048) for mol in rand_mols]\n\n scores = np.array(\n list(map(lambda x: MolecularMetrics.__compute_diversity(x, fps) if x is not None else 0, mols)))\n scores = np.clip(MolecularMetrics.remap(scores, 0.9, 0.945), 0.0, 1.0)\n\n return scores\n\n @staticmethod\n def __compute_diversity(mol, fps):\n ref_fps = Chem.rdMolDescriptors.GetMorganFingerprintAsBitVect(mol, 4, nBits=2048)\n dist = DataStructs.BulkTanimotoSimilarity(ref_fps, fps, returnDistance=True)\n score = np.mean(dist)\n return score\n\n @staticmethod\n def drugcandidate_scores(mols, data):\n\n scores = (MolecularMetrics.constant_bump(\n MolecularMetrics.water_octanol_partition_coefficient_scores(mols, norm=True), 0.210,\n 0.945) + MolecularMetrics.synthetic_accessibility_score_scores(mols,\n norm=True) + MolecularMetrics.novel_scores(\n mols, data) + (1 - MolecularMetrics.novel_scores(mols, data)) * 0.3) / 4\n\n return scores\n\n @staticmethod\n def constant_bump(x, x_low, x_high, decay=0.025):\n return np.select(condlist=[x <= x_low, x >= x_high],\n choicelist=[np.exp(- (x - x_low) ** 2 / decay),\n np.exp(- (x - x_high) ** 2 / decay)],\n default=np.ones_like(x))\n\ndef mols2grid_image(mols, molsPerRow):\n mols = [e if e is not None else Chem.RWMol() for e in mols]\n\n for mol in mols:\n AllChem.Compute2DCoords(mol)\n\n return Draw.MolsToGridImage(mols, molsPerRow=molsPerRow, subImgSize=(150, 150))\n\n\ndef classification_report(data, model, session, sample=False):\n _, _, _, a, x, _, f, _, _ = data.next_validation_batch()\n\n n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [\n model.nodes_argmax, model.edges_argmax], feed_dict={model.edges_labels: a, model.nodes_labels: x,\n model.node_features: f, model.training: False,\n model.variational: False})\n n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)\n\n y_true = e.flatten()\n y_pred = a.flatten()\n target_names = [str(Chem.rdchem.BondType.values[int(e)]) for e in data.bond_decoder_m.values()]\n\n print('######## Classification Report ########\\n')\n print(sk_classification_report(y_true, y_pred, labels=list(range(len(target_names))),\n target_names=target_names))\n\n print('######## Confusion Matrix ########\\n')\n print(confusion_matrix(y_true, y_pred, labels=list(range(len(target_names)))))\n\n y_true = n.flatten()\n y_pred = x.flatten()\n target_names = [Chem.Atom(e).GetSymbol() for e in data.atom_decoder_m.values()]\n\n print('######## Classification Report ########\\n')\n print(sk_classification_report(y_true, y_pred, labels=list(range(len(target_names))),\n target_names=target_names))\n\n print('\\n######## Confusion Matrix ########\\n')\n print(confusion_matrix(y_true, y_pred, labels=list(range(len(target_names)))))\n\n\ndef reconstructions(data, model, session, batch_dim=10, sample=False):\n m0, _, _, a, x, _, f, _, _ = data.next_train_batch(batch_dim)\n\n n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [\n model.nodes_argmax, model.edges_argmax], feed_dict={model.edges_labels: a, model.nodes_labels: x,\n model.node_features: f, model.training: False,\n model.variational: False})\n n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)\n\n m1 = np.array([e if e is not None else Chem.RWMol() for e in [data.matrices2mol(n_, e_, strict=True)\n for n_, e_ in zip(n, e)]])\n\n mols = np.vstack((m0, m1)).T.flatten()\n\n return mols\n\n\ndef samples(data, model, session, embeddings, sample=False):\n n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [\n model.nodes_argmax, model.edges_argmax], feed_dict={\n model.embeddings: embeddings, model.training: False})\n n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)\n\n mols = [data.matrices2mol(n_, e_, strict=True) for n_, e_ in zip(n, e)]\n\n return mols\n\n\ndef all_scores(mols, data, norm=False, reconstruction=False):\n m0 = {k: list(filter(lambda e: e is not None, v)) for k, v in {\n 'NP score': MolecularMetrics.natural_product_scores(mols, norm=norm),\n 'QED score': MolecularMetrics.quantitative_estimation_druglikeness_scores(mols),\n 'logP score': MolecularMetrics.water_octanol_partition_coefficient_scores(mols, norm=norm),\n 'SA score': MolecularMetrics.synthetic_accessibility_score_scores(mols, norm=norm),\n 'diversity score': MolecularMetrics.diversity_scores(mols, data),\n 'drugcandidate score': MolecularMetrics.drugcandidate_scores(mols, data)}.items()}\n\n m1 = {'valid score': MolecularMetrics.valid_total_score(mols) * 100,\n 'unique score': MolecularMetrics.unique_total_score(mols) * 100,\n 'novel score': MolecularMetrics.novel_total_score(mols, data) * 100}\n\n return m0, m1\n"
] | [
[
"numpy.vstack",
"numpy.ones_like",
"numpy.random.choice",
"numpy.argmax",
"numpy.exp",
"numpy.mean"
]
] |
akaszynski/keepa | [
"ffc35edc2f7a4601408b0f0a22a8856be88dcb3e"
] | [
"keepa/interface.py"
] | [
"\"\"\"Interface module to download Amazon product and history data from\nkeepa.com\n\"\"\"\n\nimport requests\nimport asyncio\nimport datetime\nimport json\nimport logging\nimport time\nfrom functools import wraps\n\nimport aiohttp\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom keepa.query_keys import DEAL_REQUEST_KEYS, PRODUCT_REQUEST_KEYS\n\n\ndef is_documented_by(original):\n \"\"\"Avoid copying the documentation\"\"\"\n\n def wrapper(target):\n target.__doc__ = original.__doc__\n return target\n\n return wrapper\n\n\nlog = logging.getLogger(__name__)\nlog.setLevel('DEBUG')\n\n# hardcoded ordinal time from\nKEEPA_ST_ORDINAL = np.datetime64('2011-01-01')\n\n# Request limit\nREQUEST_LIMIT = 100\n\n# Status code dictionary/key\nSCODES = {'400': 'REQUEST_REJECTED',\n '402': 'PAYMENT_REQUIRED',\n '405': 'METHOD_NOT_ALLOWED',\n '429': 'NOT_ENOUGH_TOKEN'}\n\n# domain codes\n# Valid values: [ 1: com | 2: co.uk | 3: de | 4: fr | 5:\n# co.jp | 6: ca | 7: cn | 8: it | 9: es | 10: in | 11: com.mx ]\nDCODES = ['RESERVED', 'US', 'GB', 'DE', 'FR', 'JP', 'CA', 'CN', 'IT', 'ES',\n 'IN', 'MX']\n\n# csv indices. used when parsing csv and stats fields.\n# https://github.com/keepacom/api_backend\n # see api_backend/src/main/java/com/keepa/api/backend/structs/Product.java\n # [index in csv, key name, isfloat(is price or rating)]\ncsv_indices = [[0, 'AMAZON', True],\n [1, 'NEW', True],\n [2, 'USED', True],\n [3, 'SALES', False],\n [4, 'LISTPRICE', True],\n [5, 'COLLECTIBLE', True],\n [6, 'REFURBISHED', True],\n [7, 'NEW_FBM_SHIPPING', True],\n [8, 'LIGHTNING_DEAL', True],\n [9, 'WAREHOUSE', True],\n [10, 'NEW_FBA', True],\n [11, 'COUNT_NEW', False],\n [12, 'COUNT_USED', False],\n [13, 'COUNT_REFURBISHED', False],\n [14, 'CollectableOffers', False],\n [15, 'EXTRA_INFO_UPDATES', False],\n [16, 'RATING', True],\n [17, 'COUNT_REVIEWS', False],\n [18, 'BUY_BOX_SHIPPING', True],\n [19, 'USED_NEW_SHIPPING', True],\n [20, 'USED_VERY_GOOD_SHIPPING', True],\n [21, 'USED_GOOD_SHIPPING', True],\n [22, 'USED_ACCEPTABLE_SHIPPING', True],\n [23, 'COLLECTIBLE_NEW_SHIPPING', True],\n [24, 'COLLECTIBLE_VERY_GOOD_SHIPPING', True],\n [25, 'COLLECTIBLE_GOOD_SHIPPING', True],\n [26, 'COLLECTIBLE_ACCEPTABLE_SHIPPING', True],\n [27, 'REFURBISHED_SHIPPING', True],\n [28, 'EBAY_NEW_SHIPPING', True],\n [29, 'EBAY_USED_SHIPPING', True],\n [30, 'TRADE_IN', True],\n [31, 'RENT', False]]\n\n\ndef _parse_stats(stats, to_datetime):\n \"\"\"Parses *numeric* stats object. There is no need to parse strings or list of strings.\\n\n Keepa stats object response documentation: https://keepa.com/#!discuss/t/statistics-object/1308\"\"\"\n\n stats_keys_parse_not_required = {\n 'buyBoxSellerId',\n 'sellerIdsLowestFBA',\n 'sellerIdsLowestFBM',\n 'buyBoxShippingCountry',\n 'buyBoxAvailabilityMessage',\n }\n stats_parsed = {}\n\n for stat_key, stat_value in stats.items():\n if stat_key in stats_keys_parse_not_required:\n stat_value = None\n\n elif isinstance(stat_value, int) and stat_value < 0: # -1 or -2 means not exist. 0 doesn't mean not exist.\n stat_value = None\n\n if stat_value is not None:\n if stat_key == 'lastOffersUpdate':\n stats_parsed[stat_key] = keepa_minutes_to_time([stat_value], to_datetime)[0]\n elif isinstance(stat_value, list) and len(stat_value) > 0:\n stat_value_dict = {}\n convert_time_in_value_pair = any(map(lambda v: v is not None and isinstance(v, list), stat_value))\n\n for ind, key, isfloat in csv_indices:\n stat_value_item = stat_value[ind] if ind < len(stat_value) else None\n\n def normalize_value(v):\n if v < 0:\n return None\n\n if isfloat:\n v = float(v) / 100\n if key == 'RATING':\n v = v * 10\n\n return v\n\n if stat_value_item is not None:\n if convert_time_in_value_pair:\n stat_value_time, stat_value_item = stat_value_item\n stat_value_item = normalize_value(stat_value_item)\n if stat_value_item is not None:\n stat_value_time = keepa_minutes_to_time([stat_value_time], to_datetime)[0]\n stat_value_item = (stat_value_time, stat_value_item)\n else:\n stat_value_item = normalize_value(stat_value_item)\n\n if stat_value_item is not None:\n stat_value_dict[key] = stat_value_item\n\n if len(stat_value_dict) > 0:\n stats_parsed[stat_key] = stat_value_dict\n else:\n stats_parsed[stat_key] = stat_value\n\n return stats_parsed\n\n\n_seller_time_data_keys = ['trackedSince', 'lastUpdate']\n\ndef _parse_seller(seller_raw_response, to_datetime):\n sellers = list(seller_raw_response.values())\n for seller in sellers:\n\n def convert_time_data(key):\n date_val = seller.get(key, None)\n if date_val is not None:\n return (key, keepa_minutes_to_time([date_val], to_datetime)[0])\n else:\n return None\n\n seller.update(filter(lambda p: p is not None, map(convert_time_data, _seller_time_data_keys)))\n\n return dict(map(lambda seller: (seller['sellerId'], seller), sellers))\n\n\ndef parse_csv(csv, to_datetime=True, out_of_stock_as_nan=True):\n \"\"\"Parses csv list from keepa into a python dictionary.\n\n Parameters\n ----------\n csv : list\n csv list from keepa\n\n to_datetime : bool, optional\n Modifies numpy minutes to datetime.datetime values.\n Default True.\n\n out_of_stock_as_nan : bool, optional\n When True, prices are NAN when price category is out of stock.\n When False, prices are -0.01\n Default True\n\n Returns\n -------\n product_data : dict\n Dictionary containing the following fields with timestamps:\n\n AMAZON: Amazon price history\n\n NEW: Marketplace/3rd party New price history - Amazon is\n considered to be part of the marketplace as well, so if\n Amazon has the overall lowest new (!) price, the\n marketplace new price in the corresponding time interval\n will be identical to the Amazon price (except if there is\n only one marketplace offer). Shipping and Handling costs\n not included!\n\n USED: Marketplace/3rd party Used price history\n\n SALES: Sales Rank history. Not every product has a Sales Rank.\n\n LISTPRICE: List Price history\n\n 5 COLLECTIBLE: Collectible Price history\n\n 6 REFURBISHED: Refurbished Price history\n\n 7 NEW_FBM_SHIPPING: 3rd party (not including Amazon) New price\n history including shipping costs, only fulfilled by\n merchant (FBM).\n\n 8 LIGHTNING_DEAL: 3rd party (not including Amazon) New price\n history including shipping costs, only fulfilled by\n merchant (FBM).\n\n 9 WAREHOUSE: Amazon Warehouse Deals price history. Mostly of\n used condition, rarely new.\n\n 10 NEW_FBA: Price history of the lowest 3rd party (not\n including Amazon/Warehouse) New offer that is fulfilled\n by Amazon\n\n 11 COUNT_NEW: New offer count history\n\n 12 COUNT_USED: Used offer count history\n\n 13 COUNT_REFURBISHED: Refurbished offer count history\n\n 14 COUNT_COLLECTIBLE: Collectible offer count history\n\n 16 RATING: The product's rating history. A rating is an\n integer from 0 to 50 (e.g. 45 = 4.5 stars)\n\n 17 COUNT_REVIEWS: The product's review count history.\n\n 18 BUY_BOX_SHIPPING: The price history of the buy box. If no\n offer qualified for the buy box the price has the value\n -1. Including shipping costs. The ``buybox`` parameter\n must be True for this field to be in the data.\n\n 19 USED_NEW_SHIPPING: \"Used - Like New\" price history\n including shipping costs.\n\n 20 USED_VERY_GOOD_SHIPPING: \"Used - Very Good\" price history\n including shipping costs.\n\n 21 USED_GOOD_SHIPPING: \"Used - Good\" price history including\n shipping costs.\n\n 22 USED_ACCEPTABLE_SHIPPING: \"Used - Acceptable\" price history\n including shipping costs.\n\n 23 COLLECTIBLE_NEW_SHIPPING: \"Collectible - Like New\" price\n history including shipping costs.\n\n 24 COLLECTIBLE_VERY_GOOD_SHIPPING: \"Collectible - Very Good\"\n price history including shipping costs.\n\n 25 COLLECTIBLE_GOOD_SHIPPING: \"Collectible - Good\" price\n history including shipping costs.\n\n 26 COLLECTIBLE_ACCEPTABLE_SHIPPING: \"Collectible - Acceptable\"\n price history including shipping costs.\n\n 27 REFURBISHED_SHIPPING: Refurbished price history including\n shipping costs.\n\n 30 TRADE_IN: The trade in price history. Amazon trade-in is\n not available for every locale.\n\n 31 RENT: Rental price history. Requires use of the rental\n and offers parameter. Amazon Rental is only available\n for Amazon US.\n\n Notes\n -----\n Negative prices\n\n \"\"\"\n product_data = {}\n\n for ind, key, isfloat in csv_indices:\n if csv[ind]: # Check if entry it exists\n if 'SHIPPING' in key: # shipping price is included\n # Data goes [time0, value0, shipping0, time1, value1,\n # shipping1, ...]\n times = csv[ind][::3]\n values = np.array(csv[ind][1::3])\n values += np.array(csv[ind][2::3])\n else:\n # Data goes [time0, value0, time1, value1, ...]\n times = csv[ind][::2]\n values = np.array(csv[ind][1::2])\n\n # Convert to float price if applicable\n if isfloat:\n nan_mask = values < 0\n values = values.astype(np.float)/100\n if out_of_stock_as_nan:\n values[nan_mask] = np.nan\n\n if key == 'RATING':\n values *= 10\n\n timeval = keepa_minutes_to_time(times, to_datetime)\n\n product_data['%s_time' % key] = timeval\n product_data[key] = values\n\n # combine time and value into a data frame using time as index\n product_data['df_%s' % key] = pd.DataFrame({'value': values}, index=timeval)\n\n return product_data\n\n\ndef format_items(items):\n \"\"\" Checks if the input items are valid and formats them \"\"\"\n if isinstance(items, list) or isinstance(items, np.ndarray):\n return np.unique(items)\n elif isinstance(items, str):\n return np.asarray([items])\n\n\nclass Keepa():\n \"\"\"Support a synchronous Python interface to keepa server.\n\n Initializes API with access key. Access key can be obtained by\n signing up for a reoccurring or one time plan at:\n https://keepa.com/#!api\n\n Parameters\n ----------\n accesskey : str\n 64 character access key string.\n\n timeout : float, optional\n Default timeout when issuing any request. This is not a time\n limit on the entire response download; rather, an exception is\n raised if the server has not issued a response for timeout\n seconds. Setting this to 0 disables the timeout, but will\n cause any request to hang indefiantly should keepa.com be down\n\n Examples\n --------\n Create the api object\n\n >>> import keepa\n >>> mykey = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'\n >>> api = keepa.Keepa(mykey)\n\n Request data from two ASINs\n\n >>> products = api.query(['0439064872', '1426208081'])\n\n Print item details\n\n >>> print('Item 1')\n >>> print('\\t ASIN: {:s}'.format(products[0]['asin']))\n >>> print('\\t Title: {:s}'.format(products[0]['title']))\n\n Print item price\n\n >>> usedprice = products[0]['data']['MarketplaceUsed']\n >>> usedtimes = products[0]['data']['MarketplaceUsed_time']\n >>> print('\\t Used price: ${:.2f}'.format(usedprice[-1]))\n >>> print('\\t as of: {:s}'.format(str(usedtimes[-1])))\n \"\"\"\n\n def __init__(self, accesskey, timeout=10):\n self.accesskey = accesskey\n self.status = None\n self.tokens_left = 0\n self._timeout = timeout\n\n # Store user's available tokens\n log.info('Connecting to keepa using key ending in %s', accesskey[-6:])\n self.update_status()\n log.info('%d tokens remain', self.tokens_left)\n\n @property\n def time_to_refill(self):\n \"\"\" Returns the time to refill in seconds \"\"\"\n # Get current timestamp in milliseconds from UNIX epoch\n now = int(time.time() * 1000)\n timeatrefile = self.status['timestamp'] + self.status['refillIn']\n\n # wait plus one second fudge factor\n timetorefil = timeatrefile - now + 1000\n if timetorefil < 0:\n timetorefil = 0\n\n # Account for negative tokens left\n if self.tokens_left < 0:\n timetorefil += (abs(self.tokens_left) / self.status['refillRate']) * 60000\n\n # Return value in seconds\n return timetorefil / 1000.0\n\n def update_status(self):\n \"\"\" Updates available tokens \"\"\"\n self.status = self._request('token', {'key': self.accesskey}, wait=False)\n\n def wait_for_tokens(self):\n \"\"\"Checks any remaining tokens and waits if none are available. \"\"\"\n self.update_status()\n\n # Wait if no tokens available\n if self.tokens_left <= 0:\n tdelay = self.time_to_refill\n log.warning('Waiting %.0f seconds for additional tokens' % tdelay)\n time.sleep(tdelay)\n self.update_status()\n\n def query(self, items, stats=None, domain='US', history=True,\n offers=None, update=None, to_datetime=True,\n rating=False, out_of_stock_as_nan=True, stock=False,\n product_code_is_asin=True, progress_bar=True, buybox=False,\n wait=True, days=None, only_live_offers=None, raw=False):\n \"\"\"Performs a product query of a list, array, or single ASIN.\n\n Returns a list of product data with one entry for each\n product.\n\n Parameters\n ----------\n items : str, list, np.ndarray\n A list, array, or single asin, UPC, EAN, or ISBN-13\n identifying a product. ASINs should be 10 characters and\n match a product on Amazon. Items not matching Amazon\n product or duplicate Items will return no data. When\n using non-ASIN items, set product_code_is_asin to False\n\n stats : int or date, optional\n No extra token cost. If specified the product object will\n have a stats field with quick access to current prices,\n min/max prices and the weighted mean values. If the offers\n parameter was used it will also provide stock counts and\n buy box information.\n\n You can provide the stats parameter in two forms:\n\n Last x days (positive integer value): calculates the stats\n of the last x days, where x is the value of the stats\n parameter. Interval: You can provide a date range for the\n stats calculation. You can specify the range via two\n timestamps (unix epoch time milliseconds) or two date\n strings (ISO8601, with or without time in UTC).\n\n domain : str, optional\n One of the following Amazon domains: RESERVED, US, GB, DE,\n FR, JP, CA, CN, IT, ES, IN, MX Defaults to US.\n\n offers : int, optional\n Adds available offers to product data. Default 0. Must\n be between 20 and 100.\n\n update : int, optional\n if data is older than the input integer, keepa will\n update their database and return live data. If set to 0\n (live data), request may cost an additional token.\n Default None\n\n history : bool, optional\n When set to True includes the price, sales, and offer\n history of a product. Set to False to reduce request time\n if data is not required. Default True\n\n rating : bool, optional\n When set to to True, includes the existing RATING and\n COUNT_REVIEWS history of the csv field. Default False\n\n to_datetime : bool, optional\n Modifies numpy minutes to datetime.datetime values.\n Default True.\n\n out_of_stock_as_nan : bool, optional\n When True, prices are NAN when price category is out of\n stock. When False, prices are -0.01 Default True\n\n stock : bool, optional\n Can only be used if the offers parameter is also True. If\n True, the stock will be collected for all retrieved live\n offers. Note: We can only determine stock up 10 qty. Stock\n retrieval takes additional time, expect the request to\n take longer. Existing stock history will be included\n whether or not the stock parameter is used.\n\n product_code_is_asin : bool, optional\n The type of product code you are requesting. True when\n product code is an ASIN, an Amazon standard identification\n number, or 'code', for UPC, EAN, or ISBN-13 codes.\n\n progress_bar : bool, optional\n Display a progress bar using ``tqdm``. Defaults to\n ``True``.\n\n buybox : bool, optional\n Additional token cost: 2 per product). When true the\n product and statistics object will include all available\n buy box related data:\n\n - current price, price history, and statistical values\n - buyBoxSellerIdHistory\n - all buy box fields in the statistics object\n\n The buybox parameter\n does not trigger a fresh data collection. If the offers\n parameter is used the buybox parameter is ignored, as the\n offers parameter also provides access to all buy box\n related data. To access the statistics object the stats\n parameter is required.\n\n wait : bool, optional\n Wait available token before doing effective query,\n Defaults to ``True``.\n\n only_live_offers : bool, optional\n If set to True, the product object will only include live\n marketplace offers (when used in combination with the\n offers parameter). If you do not need historical offers\n use this to have them removed from the response. This can\n improve processing time and considerably decrease the size\n of the response. Default None\n\n days : int, optional\n Any positive integer value. If specified and has positive\n value X the product object will limit all historical data\n to the recent X days. This includes the csv,\n buyBoxSellerIdHistory, salesRanks, offers and\n offers.offerCSV fields. If you do not need old historical\n data use this to have it removed from the response. This\n can improve processing time and considerably decrease the\n size of the response. The parameter does not use calendar\n days - so 1 day equals the last 24 hours. The oldest data\n point of each field may have a date value which is out of\n the specified range. This means the value of the field has\n not changed since that date and is still active. Default\n ``None``\n\n raw : bool, optional\n When ``True``, return the raw request response. This is\n only available in the non-async class.\n\n Returns\n -------\n list\n List of products when ``raw=False``. Each product\n within the list is a dictionary. The keys of each item\n may vary, so see the keys within each product for further\n details.\n\n Each product should contain at a minimum a \"data\" key\n containing a formatted dictionary. For the available\n fields see the notes section\n\n When ``raw=True``, a list of unparsed responses are\n returned as :class:`requests.models.Response`.\n\n See: https://keepa.com/#!discuss/t/product-object/116\n\n Notes\n -----\n The following are data fields a product dictionary\n\n AMAZON\n Amazon price history\n\n NEW\n Marketplace/3rd party New price history - Amazon is\n considered to be part of the marketplace as well, so if\n Amazon has the overall lowest new (!) price, the\n marketplace new price in the corresponding time interval\n will be identical to the Amazon price (except if there is\n only one marketplace offer). Shipping and Handling costs\n not included!\n\n USED\n Marketplace/3rd party Used price history\n\n SALES\n Sales Rank history. Not every product has a Sales Rank.\n\n LISTPRICE\n List Price history\n\n COLLECTIBLE\n Collectible Price history\n\n REFURBISHED\n Refurbished Price history\n\n NEW_FBM_SHIPPING\n 3rd party (not including Amazon) New price history\n including shipping costs, only fulfilled by merchant\n (FBM).\n\n LIGHTNING_DEAL\n 3rd party (not including Amazon) New price history\n including shipping costs, only fulfilled by merchant\n (FBM).\n\n WAREHOUSE\n Amazon Warehouse Deals price history. Mostly of used\n condition, rarely new.\n\n NEW_FBA\n Price history of the lowest 3rd party (not including\n Amazon/Warehouse) New offer that is fulfilled by Amazon\n\n COUNT_NEW\n New offer count history\n\n COUNT_USED\n Used offer count history\n\n COUNT_REFURBISHED\n Refurbished offer count history\n\n COUNT_COLLECTIBLE\n Collectible offer count history\n\n RATING\n The product's rating history. A rating is an integer from\n 0 to 50 (e.g. 45 = 4.5 stars)\n\n COUNT_REVIEWS\n The product's review count history.\n\n BUY_BOX_SHIPPING\n The price history of the buy box. If no offer qualified\n for the buy box the price has the value -1. Including\n shipping costs.\n\n USED_NEW_SHIPPING\n \"Used - Like New\" price history including shipping costs.\n\n USED_VERY_GOOD_SHIPPING\n \"Used - Very Good\" price history including shipping costs.\n\n USED_GOOD_SHIPPING\n \"Used - Good\" price history including shipping costs.\n\n USED_ACCEPTABLE_SHIPPING\n \"Used - Acceptable\" price history including shipping costs.\n\n COLLECTIBLE_NEW_SHIPPING\n \"Collectible - Like New\" price history including shipping\n costs.\n\n COLLECTIBLE_VERY_GOOD_SHIPPING\n \"Collectible - Very Good\" price history including shipping\n costs.\n\n COLLECTIBLE_GOOD_SHIPPING\n \"Collectible - Good\" price history including shipping\n costs.\n\n COLLECTIBLE_ACCEPTABLE_SHIPPING\n \"Collectible - Acceptable\" price history including\n shipping costs.\n\n REFURBISHED_SHIPPING\n Refurbished price history including shipping costs.\n\n TRADE_IN\n The trade in price history. Amazon trade-in is not\n available for every locale.\n\n BUY_BOX_SHIPPING\n The price history of the buy box. If no offer qualified\n for the buy box the price has the value -1. Including\n shipping costs. The ``buybox`` parameter must be True for\n this field to be in the data.\n \"\"\"\n # Format items into numpy array\n try:\n items = format_items(items)\n except BaseException:\n raise Exception('Invalid product codes input')\n assert len(items), 'No valid product codes'\n\n nitems = len(items)\n if nitems == 1:\n log.debug('Executing single product query')\n else:\n log.debug('Executing %d item product query', nitems)\n\n # check offer input\n if offers:\n if not isinstance(offers, int):\n raise TypeError('Parameter \"offers\" must be an interger')\n\n if offers > 100 or offers < 20:\n raise ValueError('Parameter \"offers\" must be between 20 and 100')\n\n # Report time to completion\n tcomplete = float(nitems - self.tokens_left) / self.status['refillRate'] - (\n 60000 - self.status['refillIn']) / 60000.0\n if tcomplete < 0.0:\n tcomplete = 0.5\n log.debug('Estimated time to complete %d request(s) is %.2f minutes',\n nitems, tcomplete)\n log.debug('\\twith a refill rate of %d token(s) per minute',\n self.status['refillRate'])\n\n # product list\n products = []\n\n pbar = None\n if progress_bar:\n pbar = tqdm(total=nitems)\n\n # Number of requests is dependent on the number of items and\n # request limit. Use available tokens first\n idx = 0 # or number complete\n while idx < nitems:\n nrequest = nitems - idx\n\n # cap request\n if nrequest > REQUEST_LIMIT:\n nrequest = REQUEST_LIMIT\n\n # request from keepa and increment current position\n item_request = items[idx:idx + nrequest]\n response = self._product_query(\n item_request,\n product_code_is_asin,\n stats=stats,\n domain=domain, stock=stock,\n offers=offers, update=update,\n history=history, rating=rating,\n to_datetime=to_datetime,\n out_of_stock_as_nan=out_of_stock_as_nan,\n buybox=buybox,\n wait=wait,\n days=days,\n only_live_offers=only_live_offers,\n raw=raw,\n )\n idx += nrequest\n if raw:\n products.append(response)\n else:\n products.extend(response['products'])\n\n if pbar is not None:\n pbar.update(nrequest)\n\n return products\n\n def _product_query(self, items, product_code_is_asin=True, **kwargs):\n \"\"\"Sends query to keepa server and returns parsed JSON result.\n\n Parameters\n ----------\n items : np.ndarray\n Array of asins. If UPC, EAN, or ISBN-13, as_asin must be\n False. Must be between 1 and 100 ASINs\n\n as_asin : bool, optional\n Interpret product codes as ASINs only.\n\n stats : int or date format\n Set the stats time for get sales rank inside this range\n\n domain : str\n One of the following Amazon domains:\n RESERVED, US, GB, DE, FR, JP, CA, CN, IT, ES, IN, MX\n\n offers : bool, optional\n Adds product offers to product data.\n\n update : int, optional\n If data is older than the input integer, keepa will update\n their database and return live data. If set to 0 (live\n data), then request may cost an additional token.\n\n history : bool, optional\n When set to True includes the price, sales, and offer\n history of a product. Set to False to reduce request time\n if data is not required.\n\n as_asin : bool, optional\n Queries keepa using asin codes. Otherwise, queries using\n the code key.\n\n Returns\n -------\n products : list\n List of products. Length equal to number of successful\n ASINs.\n\n refillIn : float\n Time in milliseconds to the next refill of tokens.\n\n refilRate : float\n Number of tokens refilled per minute\n\n timestamp : float\n\n tokensLeft : int\n Remaining tokens\n\n tz : int\n Timezone. 0 is UTC\n\n \"\"\"\n # ASINs convert to comma joined string\n assert len(items) <= 100\n\n if product_code_is_asin:\n kwargs['asin'] = ','.join(items)\n else:\n kwargs['code'] = ','.join(items)\n\n kwargs['key'] = self.accesskey\n kwargs['domain'] = DCODES.index(kwargs['domain'])\n\n # Convert bool values to 0 and 1.\n kwargs['stock'] = int(kwargs['stock'])\n kwargs['history'] = int(kwargs['history'])\n kwargs['rating'] = int(kwargs['rating'])\n kwargs['buybox'] = int(kwargs['buybox'])\n\n if kwargs['update'] is None:\n del kwargs['update']\n else:\n kwargs['update'] = int(kwargs['update'])\n\n if kwargs['offers'] is None:\n del kwargs['offers']\n else:\n kwargs['offers'] = int(kwargs['offers'])\n\n if kwargs['only_live_offers'] is None:\n del kwargs['only_live_offers']\n else:\n kwargs['only-live-offers'] = int(kwargs.pop('only_live_offers'))\n # Keepa's param actually doesn't use snake_case.\n # I believe using snake case throughout the Keepa interface is better.\n\n if kwargs['days'] is None:\n del kwargs['days']\n else:\n assert kwargs['days'] > 0\n\n if kwargs['stats'] is None:\n del kwargs['stats']\n\n out_of_stock_as_nan = kwargs.pop('out_of_stock_as_nan', True)\n to_datetime = kwargs.pop('to_datetime', True)\n\n # Query and replace csv with parsed data if history enabled\n wait = kwargs.get(\"wait\")\n kwargs.pop(\"wait\", None)\n raw_response = kwargs.pop('raw', False)\n response = self._request('product', kwargs, wait=wait,\n raw_response=raw_response)\n\n if kwargs['history'] and not raw_response:\n for product in response['products']:\n if product['csv']: # if data exists\n product['data'] = parse_csv(product['csv'],\n to_datetime,\n out_of_stock_as_nan)\n\n if kwargs.get('stats', None) and not raw_response:\n for product in response['products']:\n stats = product.get('stats', None)\n if stats:\n product['stats_parsed'] = _parse_stats(stats, to_datetime)\n\n return response\n\n def best_sellers_query(self, category, rank_avg_range=0, domain='US', wait=True):\n \"\"\"\n Retrieve an ASIN list of the most popular products based on\n sales in a specific category or product group. See\n \"search_for_categories\" for information on how to get a\n category.\n\n Root category lists (e.g. \"Home & Kitchen\") or product group\n lists contain up to 100,000 ASINs.\n\n Sub-category lists (e.g. \"Home Entertainment Furniture\")\n contain up to 3,000 ASINs. As we only have access to the\n product's primary sales rank and not the ones of all\n categories it is listed in, the sub-category lists are created\n by us based on the product's primary sales rank and do not\n reflect the actual ordering on Amazon.\n\n Lists are ordered, starting with the best selling product.\n\n Lists are updated daily. If a product does not have an\n accessible sales rank it will not be included in the\n lists. This in particular affects many products in the\n Clothing and Sports & Outdoors categories.\n\n We can not correctly identify the sales rank reference\n category in all cases, so some products may be misplaced.\n\n Parameters\n ----------\n category : str\n The category node id of the category you want to request\n the best sellers list for. You can find category node ids\n via the category search \"search_for_categories\"\n\n domain : str\n Amazon locale you want to access. Must be one of the following\n RESERVED, US, GB, DE, FR, JP, CA, CN, IT, ES, IN, MX\n Default US\n\n wait : bool, optional\n Wait available token before doing effective query.\n Defaults to ``True``.\n\n Returns\n -------\n best_sellers : list\n List of best seller ASINs\n \"\"\"\n assert domain in DCODES, 'Invalid domain code'\n\n payload = {'key': self.accesskey,\n 'domain': DCODES.index(domain),\n 'category': category,\n 'range': rank_avg_range}\n\n response = self._request('bestsellers', payload, wait=wait)\n if 'bestSellersList' in response:\n return response['bestSellersList']['asinList']\n else: # pragma: no cover\n log.info('Best sellers search results not yet available')\n\n def search_for_categories(self, searchterm, domain='US', wait=True):\n \"\"\"Searches for categories from Amazon.\n\n Parameters\n ----------\n searchterm : str\n Input search term.\n\n wait : bool, optional\n Wait available token before doing effective query.\n Defaults to ``True``.\n\n Returns\n -------\n categories : list\n The response contains a categories list with all matching\n categories.\n\n Examples\n --------\n Print all categories from science\n\n >>> categories = api.search_for_categories('science')\n >>> for cat_id in categories:\n >>> print(cat_id, categories[cat_id]['name'])\n\n \"\"\"\n assert domain in DCODES, 'Invalid domain code'\n\n payload = {'key': self.accesskey,\n 'domain': DCODES.index(domain),\n 'type': 'category',\n 'term': searchterm}\n\n response = self._request('search', payload, wait=wait)\n if response['categories'] == {}: # pragma no cover\n raise Exception('Categories search results not yet available ' +\n 'or no search terms found.')\n else:\n return response['categories']\n\n def category_lookup(self, category_id, domain='US',\n include_parents=0, wait=True):\n \"\"\"\n Return root categories given a categoryId.\n\n Parameters\n ----------\n category_id : int\n ID for specific category or 0 to return a list of root\n categories.\n\n domain : str\n Amazon locale you want to access. Must be one of the following\n RESERVED, US, GB, DE, FR, JP, CA, CN, IT, ES, IN, MX\n Default US\n\n include_parents : int\n Include parents.\n\n wait : bool, optional\n Wait available token before doing effective query.\n Defaults to ``True``.\n\n Returns\n -------\n categories : list\n Output format is the same as search_for_categories.\n\n Examples\n --------\n Use 0 to return all root categories\n >>> categories = api.category_lookup(0)\n\n Print all root categories\n >>> for cat_id in categories:\n >>> print(cat_id, categories[cat_id]['name'])\n \"\"\"\n assert domain in DCODES, 'Invalid domain code'\n\n payload = {'key': self.accesskey,\n 'domain': DCODES.index(domain),\n 'category': category_id,\n 'parents': include_parents}\n\n response = self._request('category', payload, wait=wait)\n if response['categories'] == {}: # pragma no cover\n raise Exception('Category lookup results not yet available or no' +\n 'match found.')\n else:\n return response['categories']\n\n def seller_query(self, seller_id, domain='US', to_datetime=True, \n storefront=False, update=None, wait=True):\n \"\"\"Receives seller information for a given seller id. If a\n seller is not found no tokens will be consumed.\n\n Token cost: 1 per requested seller\n\n Parameters\n ----------\n seller_id : str or list\n The seller id of the merchant you want to request. For\n batch requests, you may submit a list of 100 seller_ids.\n The seller id can also be found on Amazon on seller\n profile pages in the seller parameter of the URL as well\n as in the offers results from a product query.\n\n domain : str, optional\n One of the following Amazon domains: RESERVED, US, GB, DE,\n FR, JP, CA, CN, IT, ES, IN, MX Defaults to US.\n\n storefront : bool, optional\n If specified the seller object will contain additional\n information about what items the seller is listing on Amazon.\n This includes a list of ASINs as well as the total amount of\n items the seller has listed. The following seller object\n fields will be set if data is available: asinList,\n asinListLastSeen, totalStorefrontAsinsCSV. If no data is\n available no additional tokens will be consumed. The ASIN\n list can contain up to 100,000 items. As using the storefront\n parameter does not trigger any new collection it does not\n increase the processing time of the request, though the\n response may be much bigger in size. The total storefront\n ASIN count will not be updated, only historical data will\n be provided (when available).\n\n update : int, optional\n Positive integer value. If the last live data collection from\n the Amazon storefront page is older than update hours force a\n new collection. Use this parameter in conjunction with the\n storefront parameter. Token cost will only be applied if a new\n collection is triggered.\n\n Using this parameter you can achieve the following:\n\n - Retrieve data from Amazon: a storefront ASIN list\n containing up to 2,400 ASINs, in addition to all ASINs\n already collected through our database.\n - Force a refresh: Always retrieve live data with the\n value 0.\n - Retrieve the total number of listings of this seller:\n the totalStorefrontAsinsCSV field of the seller object\n will be updated.\n\n wait : bool, optional\n Wait available token before doing effective query.\n Defaults to ``True``.\n\n Returns\n -------\n seller_info : dict\n Dictionary containing one entry per input ``seller_id``.\n\n Examples\n --------\n >>> seller_info = api.seller_query('A2L77EE7U53NWQ', 'US')\n\n Notes\n -----\n Seller data is not available for Amazon China.\n \"\"\"\n if isinstance(seller_id, list):\n if len(seller_id) > 100:\n err_str = 'seller_id can contain at maximum 100 sellers'\n raise RuntimeError(err_str)\n seller = ','.join(seller_id)\n else:\n seller = seller_id\n\n payload = {'key': self.accesskey,\n 'domain': DCODES.index(domain),\n 'seller': seller}\n\n if storefront:\n payload[\"storefront\"] = int(storefront)\n if update:\n payload[\"update\"] = update\n\n response = self._request('seller', payload, wait=wait)\n return _parse_seller(response['sellers'], to_datetime)\n\n def product_finder(self, product_parms, domain='US', wait=True):\n \"\"\"Query the keepa product database to find products matching\n your criteria. Almost all product fields can be searched for\n and sorted by.\n\n Parameters\n ----------\n product_parms : dict\n Dictionary containing one or more of the following keys:\n\n - ``'author': str``\n - ``'availabilityAmazon': int``\n - ``'avg180_AMAZON_lte': int``\n - ``'avg180_AMAZON_gte': int``\n - ``'avg180_BUY_BOX_SHIPPING_lte': int``\n - ``'avg180_BUY_BOX_SHIPPING_gte': int``\n - ``'avg180_COLLECTIBLE_lte': int``\n - ``'avg180_COLLECTIBLE_gte': int``\n - ``'avg180_COUNT_COLLECTIBLE_lte': int``\n - ``'avg180_COUNT_COLLECTIBLE_gte': int``\n - ``'avg180_COUNT_NEW_lte': int``\n - ``'avg180_COUNT_NEW_gte': int``\n - ``'avg180_COUNT_REFURBISHED_lte': int``\n - ``'avg180_COUNT_REFURBISHED_gte': int``\n - ``'avg180_COUNT_REVIEWS_lte': int``\n - ``'avg180_COUNT_REVIEWS_gte': int``\n - ``'avg180_COUNT_USED_lte': int``\n - ``'avg180_COUNT_USED_gte': int``\n - ``'avg180_EBAY_NEW_SHIPPING_lte': int``\n - ``'avg180_EBAY_NEW_SHIPPING_gte': int``\n - ``'avg180_EBAY_USED_SHIPPING_lte': int``\n - ``'avg180_EBAY_USED_SHIPPING_gte': int``\n - ``'avg180_LIGHTNING_DEAL_lte': int``\n - ``'avg180_LIGHTNING_DEAL_gte': int``\n - ``'avg180_LISTPRICE_lte': int``\n - ``'avg180_LISTPRICE_gte': int``\n - ``'avg180_NEW_lte': int``\n - ``'avg180_NEW_gte': int``\n - ``'avg180_NEW_FBA_lte': int``\n - ``'avg180_NEW_FBA_gte': int``\n - ``'avg180_NEW_FBM_SHIPPING_lte': int``\n - ``'avg180_NEW_FBM_SHIPPING_gte': int``\n - ``'avg180_RATING_lte': int``\n - ``'avg180_RATING_gte': int``\n - ``'avg180_REFURBISHED_lte': int``\n - ``'avg180_REFURBISHED_gte': int``\n - ``'avg180_REFURBISHED_SHIPPING_lte': int``\n - ``'avg180_REFURBISHED_SHIPPING_gte': int``\n - ``'avg180_RENT_lte': int``\n - ``'avg180_RENT_gte': int``\n - ``'avg180_SALES_lte': int``\n - ``'avg180_SALES_gte': int``\n - ``'avg180_TRADE_IN_lte': int``\n - ``'avg180_TRADE_IN_gte': int``\n - ``'avg180_USED_lte': int``\n - ``'avg180_USED_gte': int``\n - ``'avg180_USED_ACCEPTABLE_SHIPPING_lte': int``\n - ``'avg180_USED_ACCEPTABLE_SHIPPING_gte': int``\n - ``'avg180_USED_GOOD_SHIPPING_lte': int``\n - ``'avg180_USED_GOOD_SHIPPING_gte': int``\n - ``'avg180_USED_NEW_SHIPPING_lte': int``\n - ``'avg180_USED_NEW_SHIPPING_gte': int``\n - ``'avg180_USED_VERY_GOOD_SHIPPING_lte': int``\n - ``'avg180_USED_VERY_GOOD_SHIPPING_gte': int``\n - ``'avg180_WAREHOUSE_lte': int``\n - ``'avg180_WAREHOUSE_gte': int``\n - ``'avg1_AMAZON_lte': int``\n - ``'avg1_AMAZON_gte': int``\n - ``'avg1_BUY_BOX_SHIPPING_lte': int``\n - ``'avg1_BUY_BOX_SHIPPING_gte': int``\n - ``'avg1_COLLECTIBLE_lte': int``\n - ``'avg1_COLLECTIBLE_gte': int``\n - ``'avg1_COUNT_COLLECTIBLE_lte': int``\n - ``'avg1_COUNT_COLLECTIBLE_gte': int``\n - ``'avg1_COUNT_NEW_lte': int``\n - ``'avg1_COUNT_NEW_gte': int``\n - ``'avg1_COUNT_REFURBISHED_lte': int``\n - ``'avg1_COUNT_REFURBISHED_gte': int``\n - ``'avg1_COUNT_REVIEWS_lte': int``\n - ``'avg1_COUNT_REVIEWS_gte': int``\n - ``'avg1_COUNT_USED_lte': int``\n - ``'avg1_COUNT_USED_gte': int``\n - ``'avg1_EBAY_NEW_SHIPPING_lte': int``\n - ``'avg1_EBAY_NEW_SHIPPING_gte': int``\n - ``'avg1_EBAY_USED_SHIPPING_lte': int``\n - ``'avg1_EBAY_USED_SHIPPING_gte': int``\n - ``'avg1_LIGHTNING_DEAL_lte': int``\n - ``'avg1_LIGHTNING_DEAL_gte': int``\n - ``'avg1_LISTPRICE_lte': int``\n - ``'avg1_LISTPRICE_gte': int``\n - ``'avg1_NEW_lte': int``\n - ``'avg1_NEW_gte': int``\n - ``'avg1_NEW_FBA_lte': int``\n - ``'avg1_NEW_FBA_gte': int``\n - ``'avg1_NEW_FBM_SHIPPING_lte': int``\n - ``'avg1_NEW_FBM_SHIPPING_gte': int``\n - ``'avg1_RATING_lte': int``\n - ``'avg1_RATING_gte': int``\n - ``'avg1_REFURBISHED_lte': int``\n - ``'avg1_REFURBISHED_gte': int``\n - ``'avg1_REFURBISHED_SHIPPING_lte': int``\n - ``'avg1_REFURBISHED_SHIPPING_gte': int``\n - ``'avg1_RENT_lte': int``\n - ``'avg1_RENT_gte': int``\n - ``'avg1_SALES_lte': int``\n - ``'avg1_SALES_lte': int``\n - ``'avg1_SALES_gte': int``\n - ``'avg1_TRADE_IN_lte': int``\n - ``'avg1_TRADE_IN_gte': int``\n - ``'avg1_USED_lte': int``\n - ``'avg1_USED_gte': int``\n - ``'avg1_USED_ACCEPTABLE_SHIPPING_lte': int``\n - ``'avg1_USED_ACCEPTABLE_SHIPPING_gte': int``\n - ``'avg1_USED_GOOD_SHIPPING_lte': int``\n - ``'avg1_USED_GOOD_SHIPPING_gte': int``\n - ``'avg1_USED_NEW_SHIPPING_lte': int``\n - ``'avg1_USED_NEW_SHIPPING_gte': int``\n - ``'avg1_USED_VERY_GOOD_SHIPPING_lte': int``\n - ``'avg1_USED_VERY_GOOD_SHIPPING_gte': int``\n - ``'avg1_WAREHOUSE_lte': int``\n - ``'avg1_WAREHOUSE_gte': int``\n - ``'avg30_AMAZON_lte': int``\n - ``'avg30_AMAZON_gte': int``\n - ``'avg30_BUY_BOX_SHIPPING_lte': int``\n - ``'avg30_BUY_BOX_SHIPPING_gte': int``\n - ``'avg30_COLLECTIBLE_lte': int``\n - ``'avg30_COLLECTIBLE_gte': int``\n - ``'avg30_COUNT_COLLECTIBLE_lte': int``\n - ``'avg30_COUNT_COLLECTIBLE_gte': int``\n - ``'avg30_COUNT_NEW_lte': int``\n - ``'avg30_COUNT_NEW_gte': int``\n - ``'avg30_COUNT_REFURBISHED_lte': int``\n - ``'avg30_COUNT_REFURBISHED_gte': int``\n - ``'avg30_COUNT_REVIEWS_lte': int``\n - ``'avg30_COUNT_REVIEWS_gte': int``\n - ``'avg30_COUNT_USED_lte': int``\n - ``'avg30_COUNT_USED_gte': int``\n - ``'avg30_EBAY_NEW_SHIPPING_lte': int``\n - ``'avg30_EBAY_NEW_SHIPPING_gte': int``\n - ``'avg30_EBAY_USED_SHIPPING_lte': int``\n - ``'avg30_EBAY_USED_SHIPPING_gte': int``\n - ``'avg30_LIGHTNING_DEAL_lte': int``\n - ``'avg30_LIGHTNING_DEAL_gte': int``\n - ``'avg30_LISTPRICE_lte': int``\n - ``'avg30_LISTPRICE_gte': int``\n - ``'avg30_NEW_lte': int``\n - ``'avg30_NEW_gte': int``\n - ``'avg30_NEW_FBA_lte': int``\n - ``'avg30_NEW_FBA_gte': int``\n - ``'avg30_NEW_FBM_SHIPPING_lte': int``\n - ``'avg30_NEW_FBM_SHIPPING_gte': int``\n - ``'avg30_RATING_lte': int``\n - ``'avg30_RATING_gte': int``\n - ``'avg30_REFURBISHED_lte': int``\n - ``'avg30_REFURBISHED_gte': int``\n - ``'avg30_REFURBISHED_SHIPPING_lte': int``\n - ``'avg30_REFURBISHED_SHIPPING_gte': int``\n - ``'avg30_RENT_lte': int``\n - ``'avg30_RENT_gte': int``\n - ``'avg30_SALES_lte': int``\n - ``'avg30_SALES_gte': int``\n - ``'avg30_TRADE_IN_lte': int``\n - ``'avg30_TRADE_IN_gte': int``\n - ``'avg30_USED_lte': int``\n - ``'avg30_USED_gte': int``\n - ``'avg30_USED_ACCEPTABLE_SHIPPING_lte': int``\n - ``'avg30_USED_ACCEPTABLE_SHIPPING_gte': int``\n - ``'avg30_USED_GOOD_SHIPPING_lte': int``\n - ``'avg30_USED_GOOD_SHIPPING_gte': int``\n - ``'avg30_USED_NEW_SHIPPING_lte': int``\n - ``'avg30_USED_NEW_SHIPPING_gte': int``\n - ``'avg30_USED_VERY_GOOD_SHIPPING_lte': int``\n - ``'avg30_USED_VERY_GOOD_SHIPPING_gte': int``\n - ``'avg30_WAREHOUSE_lte': int``\n - ``'avg30_WAREHOUSE_gte': int``\n - ``'avg7_AMAZON_lte': int``\n - ``'avg7_AMAZON_gte': int``\n - ``'avg7_BUY_BOX_SHIPPING_lte': int``\n - ``'avg7_BUY_BOX_SHIPPING_gte': int``\n - ``'avg7_COLLECTIBLE_lte': int``\n - ``'avg7_COLLECTIBLE_gte': int``\n - ``'avg7_COUNT_COLLECTIBLE_lte': int``\n - ``'avg7_COUNT_COLLECTIBLE_gte': int``\n - ``'avg7_COUNT_NEW_lte': int``\n - ``'avg7_COUNT_NEW_gte': int``\n - ``'avg7_COUNT_REFURBISHED_lte': int``\n - ``'avg7_COUNT_REFURBISHED_gte': int``\n - ``'avg7_COUNT_REVIEWS_lte': int``\n - ``'avg7_COUNT_REVIEWS_gte': int``\n - ``'avg7_COUNT_USED_lte': int``\n - ``'avg7_COUNT_USED_gte': int``\n - ``'avg7_EBAY_NEW_SHIPPING_lte': int``\n - ``'avg7_EBAY_NEW_SHIPPING_gte': int``\n - ``'avg7_EBAY_USED_SHIPPING_lte': int``\n - ``'avg7_EBAY_USED_SHIPPING_gte': int``\n - ``'avg7_LIGHTNING_DEAL_lte': int``\n - ``'avg7_LIGHTNING_DEAL_gte': int``\n - ``'avg7_LISTPRICE_lte': int``\n - ``'avg7_LISTPRICE_gte': int``\n - ``'avg7_NEW_lte': int``\n - ``'avg7_NEW_gte': int``\n - ``'avg7_NEW_FBA_lte': int``\n - ``'avg7_NEW_FBA_gte': int``\n - ``'avg7_NEW_FBM_SHIPPING_lte': int``\n - ``'avg7_NEW_FBM_SHIPPING_gte': int``\n - ``'avg7_RATING_lte': int``\n - ``'avg7_RATING_gte': int``\n - ``'avg7_REFURBISHED_lte': int``\n - ``'avg7_REFURBISHED_gte': int``\n - ``'avg7_REFURBISHED_SHIPPING_lte': int``\n - ``'avg7_REFURBISHED_SHIPPING_gte': int``\n - ``'avg7_RENT_lte': int``\n - ``'avg7_RENT_gte': int``\n - ``'avg7_SALES_lte': int``\n - ``'avg7_SALES_gte': int``\n - ``'avg7_TRADE_IN_lte': int``\n - ``'avg7_TRADE_IN_gte': int``\n - ``'avg7_USED_lte': int``\n - ``'avg7_USED_gte': int``\n - ``'avg7_USED_ACCEPTABLE_SHIPPING_lte': int``\n - ``'avg7_USED_ACCEPTABLE_SHIPPING_gte': int``\n - ``'avg7_USED_GOOD_SHIPPING_lte': int``\n - ``'avg7_USED_GOOD_SHIPPING_gte': int``\n - ``'avg7_USED_NEW_SHIPPING_lte': int``\n - ``'avg7_USED_NEW_SHIPPING_gte': int``\n - ``'avg7_USED_VERY_GOOD_SHIPPING_lte': int``\n - ``'avg7_USED_VERY_GOOD_SHIPPING_gte': int``\n - ``'avg7_WAREHOUSE_lte': int``\n - ``'avg7_WAREHOUSE_gte': int``\n - ``'avg90_AMAZON_lte': int``\n - ``'avg90_AMAZON_gte': int``\n - ``'avg90_BUY_BOX_SHIPPING_lte': int``\n - ``'avg90_BUY_BOX_SHIPPING_gte': int``\n - ``'avg90_COLLECTIBLE_lte': int``\n - ``'avg90_COLLECTIBLE_gte': int``\n - ``'avg90_COUNT_COLLECTIBLE_lte': int``\n - ``'avg90_COUNT_COLLECTIBLE_gte': int``\n - ``'avg90_COUNT_NEW_lte': int``\n - ``'avg90_COUNT_NEW_gte': int``\n - ``'avg90_COUNT_REFURBISHED_lte': int``\n - ``'avg90_COUNT_REFURBISHED_gte': int``\n - ``'avg90_COUNT_REVIEWS_lte': int``\n - ``'avg90_COUNT_REVIEWS_gte': int``\n - ``'avg90_COUNT_USED_lte': int``\n - ``'avg90_COUNT_USED_gte': int``\n - ``'avg90_EBAY_NEW_SHIPPING_lte': int``\n - ``'avg90_EBAY_NEW_SHIPPING_gte': int``\n - ``'avg90_EBAY_USED_SHIPPING_lte': int``\n - ``'avg90_EBAY_USED_SHIPPING_gte': int``\n - ``'avg90_LIGHTNING_DEAL_lte': int``\n - ``'avg90_LIGHTNING_DEAL_gte': int``\n - ``'avg90_LISTPRICE_lte': int``\n - ``'avg90_LISTPRICE_gte': int``\n - ``'avg90_NEW_lte': int``\n - ``'avg90_NEW_gte': int``\n - ``'avg90_NEW_FBA_lte': int``\n - ``'avg90_NEW_FBA_gte': int``\n - ``'avg90_NEW_FBM_SHIPPING_lte': int``\n - ``'avg90_NEW_FBM_SHIPPING_gte': int``\n - ``'avg90_RATING_lte': int``\n - ``'avg90_RATING_gte': int``\n - ``'avg90_REFURBISHED_lte': int``\n - ``'avg90_REFURBISHED_gte': int``\n - ``'avg90_REFURBISHED_SHIPPING_lte': int``\n - ``'avg90_REFURBISHED_SHIPPING_gte': int``\n - ``'avg90_RENT_lte': int``\n - ``'avg90_RENT_gte': int``\n - ``'avg90_SALES_lte': int``\n - ``'avg90_SALES_gte': int``\n - ``'avg90_TRADE_IN_lte': int``\n - ``'avg90_TRADE_IN_gte': int``\n - ``'avg90_USED_lte': int``\n - ``'avg90_USED_gte': int``\n - ``'avg90_USED_ACCEPTABLE_SHIPPING_lte': int``\n - ``'avg90_USED_ACCEPTABLE_SHIPPING_gte': int``\n - ``'avg90_USED_GOOD_SHIPPING_lte': int``\n - ``'avg90_USED_GOOD_SHIPPING_gte': int``\n - ``'avg90_USED_NEW_SHIPPING_lte': int``\n - ``'avg90_USED_NEW_SHIPPING_gte': int``\n - ``'avg90_USED_VERY_GOOD_SHIPPING_lte': int``\n - ``'avg90_USED_VERY_GOOD_SHIPPING_gte': int``\n - ``'avg90_WAREHOUSE_lte': int``\n - ``'avg90_WAREHOUSE_gte': int``\n - ``'backInStock_AMAZON': bool``\n - ``'backInStock_BUY_BOX_SHIPPING': bool``\n - ``'backInStock_COLLECTIBLE': bool``\n - ``'backInStock_COUNT_COLLECTIBLE': bool``\n - ``'backInStock_COUNT_NEW': bool``\n - ``'backInStock_COUNT_REFURBISHED': bool``\n - ``'backInStock_COUNT_REVIEWS': bool``\n - ``'backInStock_COUNT_USED': bool``\n - ``'backInStock_EBAY_NEW_SHIPPING': bool``\n - ``'backInStock_EBAY_USED_SHIPPING': bool``\n - ``'backInStock_LIGHTNING_DEAL': bool``\n - ``'backInStock_LISTPRICE': bool``\n - ``'backInStock_NEW': bool``\n - ``'backInStock_NEW_FBA': bool``\n - ``'backInStock_NEW_FBM_SHIPPING': bool``\n - ``'backInStock_RATING': bool``\n - ``'backInStock_REFURBISHED': bool``\n - ``'backInStock_REFURBISHED_SHIPPING': bool``\n - ``'backInStock_RENT': bool``\n - ``'backInStock_SALES': bool``\n - ``'backInStock_TRADE_IN': bool``\n - ``'backInStock_USED': bool``\n - ``'backInStock_USED_ACCEPTABLE_SHIPPING': bool``\n - ``'backInStock_USED_GOOD_SHIPPING': bool``\n - ``'backInStock_USED_NEW_SHIPPING': bool``\n - ``'backInStock_USED_VERY_GOOD_SHIPPING': bool``\n - ``'backInStock_WAREHOUSE': bool``\n - ``'binding': str``\n - ``'brand': str``\n - ``'buyBoxSellerId': str``\n - ``'color': str``\n - ``'couponOneTimeAbsolute_lte': int``\n - ``'couponOneTimeAbsolute_gte': int``\n - ``'couponOneTimePercent_lte': int``\n - ``'couponOneTimePercent_gte': int``\n - ``'couponSNSAbsolute_lte': int``\n - ``'couponSNSAbsolute_gte': int``\n - ``'couponSNSPercent_lte': int``\n - ``'couponSNSPercent_gte': int``\n - ``'current_AMAZON_lte': int``\n - ``'current_AMAZON_gte': int``\n - ``'current_BUY_BOX_SHIPPING_lte': int``\n - ``'current_BUY_BOX_SHIPPING_gte': int``\n - ``'current_COLLECTIBLE_lte': int``\n - ``'current_COLLECTIBLE_gte': int``\n - ``'current_COUNT_COLLECTIBLE_lte': int``\n - ``'current_COUNT_COLLECTIBLE_gte': int``\n - ``'current_COUNT_NEW_lte': int``\n - ``'current_COUNT_NEW_gte': int``\n - ``'current_COUNT_REFURBISHED_lte': int``\n - ``'current_COUNT_REFURBISHED_gte': int``\n - ``'current_COUNT_REVIEWS_lte': int``\n - ``'current_COUNT_REVIEWS_gte': int``\n - ``'current_COUNT_USED_lte': int``\n - ``'current_COUNT_USED_gte': int``\n - ``'current_EBAY_NEW_SHIPPING_lte': int``\n - ``'current_EBAY_NEW_SHIPPING_gte': int``\n - ``'current_EBAY_USED_SHIPPING_lte': int``\n - ``'current_EBAY_USED_SHIPPING_gte': int``\n - ``'current_LIGHTNING_DEAL_lte': int``\n - ``'current_LIGHTNING_DEAL_gte': int``\n - ``'current_LISTPRICE_lte': int``\n - ``'current_LISTPRICE_gte': int``\n - ``'current_NEW_lte': int``\n - ``'current_NEW_gte': int``\n - ``'current_NEW_FBA_lte': int``\n - ``'current_NEW_FBA_gte': int``\n - ``'current_NEW_FBM_SHIPPING_lte': int``\n - ``'current_NEW_FBM_SHIPPING_gte': int``\n - ``'current_RATING_lte': int``\n - ``'current_RATING_gte': int``\n - ``'current_REFURBISHED_lte': int``\n - ``'current_REFURBISHED_gte': int``\n - ``'current_REFURBISHED_SHIPPING_lte': int``\n - ``'current_REFURBISHED_SHIPPING_gte': int``\n - ``'current_RENT_lte': int``\n - ``'current_RENT_gte': int``\n - ``'current_SALES_lte': int``\n - ``'current_SALES_gte': int``\n - ``'current_TRADE_IN_lte': int``\n - ``'current_TRADE_IN_gte': int``\n - ``'current_USED_lte': int``\n - ``'current_USED_gte': int``\n - ``'current_USED_ACCEPTABLE_SHIPPING_lte': int``\n - ``'current_USED_ACCEPTABLE_SHIPPING_gte': int``\n - ``'current_USED_GOOD_SHIPPING_lte': int``\n - ``'current_USED_GOOD_SHIPPING_gte': int``\n - ``'current_USED_NEW_SHIPPING_lte': int``\n - ``'current_USED_NEW_SHIPPING_gte': int``\n - ``'current_USED_VERY_GOOD_SHIPPING_lte': int``\n - ``'current_USED_VERY_GOOD_SHIPPING_gte': int``\n - ``'current_WAREHOUSE_lte': int``\n - ``'current_WAREHOUSE_gte': int``\n - ``'delta1_AMAZON_lte': int``\n - ``'delta1_AMAZON_gte': int``\n - ``'delta1_BUY_BOX_SHIPPING_lte': int``\n - ``'delta1_BUY_BOX_SHIPPING_gte': int``\n - ``'delta1_COLLECTIBLE_lte': int``\n - ``'delta1_COLLECTIBLE_gte': int``\n - ``'delta1_COUNT_COLLECTIBLE_lte': int``\n - ``'delta1_COUNT_COLLECTIBLE_gte': int``\n - ``'delta1_COUNT_NEW_lte': int``\n - ``'delta1_COUNT_NEW_gte': int``\n - ``'delta1_COUNT_REFURBISHED_lte': int``\n - ``'delta1_COUNT_REFURBISHED_gte': int``\n - ``'delta1_COUNT_REVIEWS_lte': int``\n - ``'delta1_COUNT_REVIEWS_gte': int``\n - ``'delta1_COUNT_USED_lte': int``\n - ``'delta1_COUNT_USED_gte': int``\n - ``'delta1_EBAY_NEW_SHIPPING_lte': int``\n - ``'delta1_EBAY_NEW_SHIPPING_gte': int``\n - ``'delta1_EBAY_USED_SHIPPING_lte': int``\n - ``'delta1_EBAY_USED_SHIPPING_gte': int``\n - ``'delta1_LIGHTNING_DEAL_lte': int``\n - ``'delta1_LIGHTNING_DEAL_gte': int``\n - ``'delta1_LISTPRICE_lte': int``\n - ``'delta1_LISTPRICE_gte': int``\n - ``'delta1_NEW_lte': int``\n - ``'delta1_NEW_gte': int``\n - ``'delta1_NEW_FBA_lte': int``\n - ``'delta1_NEW_FBA_gte': int``\n - ``'delta1_NEW_FBM_SHIPPING_lte': int``\n - ``'delta1_NEW_FBM_SHIPPING_gte': int``\n - ``'delta1_RATING_lte': int``\n - ``'delta1_RATING_gte': int``\n - ``'delta1_REFURBISHED_lte': int``\n - ``'delta1_REFURBISHED_gte': int``\n - ``'delta1_REFURBISHED_SHIPPING_lte': int``\n - ``'delta1_REFURBISHED_SHIPPING_gte': int``\n - ``'delta1_RENT_lte': int``\n - ``'delta1_RENT_gte': int``\n - ``'delta1_SALES_lte': int``\n - ``'delta1_SALES_gte': int``\n - ``'delta1_TRADE_IN_lte': int``\n - ``'delta1_TRADE_IN_gte': int``\n - ``'delta1_USED_lte': int``\n - ``'delta1_USED_gte': int``\n - ``'delta1_USED_ACCEPTABLE_SHIPPING_lte': int``\n - ``'delta1_USED_ACCEPTABLE_SHIPPING_gte': int``\n - ``'delta1_USED_GOOD_SHIPPING_lte': int``\n - ``'delta1_USED_GOOD_SHIPPING_gte': int``\n - ``'delta1_USED_NEW_SHIPPING_lte': int``\n - ``'delta1_USED_NEW_SHIPPING_gte': int``\n - ``'delta1_USED_VERY_GOOD_SHIPPING_lte': int``\n - ``'delta1_USED_VERY_GOOD_SHIPPING_gte': int``\n - ``'delta1_WAREHOUSE_lte': int``\n - ``'delta1_WAREHOUSE_gte': int``\n - ``'delta30_AMAZON_lte': int``\n - ``'delta30_AMAZON_gte': int``\n - ``'delta30_BUY_BOX_SHIPPING_lte': int``\n - ``'delta30_BUY_BOX_SHIPPING_gte': int``\n - ``'delta30_COLLECTIBLE_lte': int``\n - ``'delta30_COLLECTIBLE_gte': int``\n - ``'delta30_COUNT_COLLECTIBLE_lte': int``\n - ``'delta30_COUNT_COLLECTIBLE_gte': int``\n - ``'delta30_COUNT_NEW_lte': int``\n - ``'delta30_COUNT_NEW_gte': int``\n - ``'delta30_COUNT_REFURBISHED_lte': int``\n - ``'delta30_COUNT_REFURBISHED_gte': int``\n - ``'delta30_COUNT_REVIEWS_lte': int``\n - ``'delta30_COUNT_REVIEWS_gte': int``\n - ``'delta30_COUNT_USED_lte': int``\n - ``'delta30_COUNT_USED_gte': int``\n - ``'delta30_EBAY_NEW_SHIPPING_lte': int``\n - ``'delta30_EBAY_NEW_SHIPPING_gte': int``\n - ``'delta30_EBAY_USED_SHIPPING_lte': int``\n - ``'delta30_EBAY_USED_SHIPPING_gte': int``\n - ``'delta30_LIGHTNING_DEAL_lte': int``\n - ``'delta30_LIGHTNING_DEAL_gte': int``\n - ``'delta30_LISTPRICE_lte': int``\n - ``'delta30_LISTPRICE_gte': int``\n - ``'delta30_NEW_lte': int``\n - ``'delta30_NEW_gte': int``\n - ``'delta30_NEW_FBA_lte': int``\n - ``'delta30_NEW_FBA_gte': int``\n - ``'delta30_NEW_FBM_SHIPPING_lte': int``\n - ``'delta30_NEW_FBM_SHIPPING_gte': int``\n - ``'delta30_RATING_lte': int``\n - ``'delta30_RATING_gte': int``\n - ``'delta30_REFURBISHED_lte': int``\n - ``'delta30_REFURBISHED_gte': int``\n - ``'delta30_REFURBISHED_SHIPPING_lte': int``\n - ``'delta30_REFURBISHED_SHIPPING_gte': int``\n - ``'delta30_RENT_lte': int``\n - ``'delta30_RENT_gte': int``\n - ``'delta30_SALES_lte': int``\n - ``'delta30_SALES_gte': int``\n - ``'delta30_TRADE_IN_lte': int``\n - ``'delta30_TRADE_IN_gte': int``\n - ``'delta30_USED_lte': int``\n - ``'delta30_USED_gte': int``\n - ``'delta30_USED_ACCEPTABLE_SHIPPING_lte': int``\n - ``'delta30_USED_ACCEPTABLE_SHIPPING_gte': int``\n - ``'delta30_USED_GOOD_SHIPPING_lte': int``\n - ``'delta30_USED_GOOD_SHIPPING_gte': int``\n - ``'delta30_USED_NEW_SHIPPING_lte': int``\n - ``'delta30_USED_NEW_SHIPPING_gte': int``\n - ``'delta30_USED_VERY_GOOD_SHIPPING_lte': int``\n - ``'delta30_USED_VERY_GOOD_SHIPPING_gte': int``\n - ``'delta30_WAREHOUSE_lte': int``\n - ``'delta30_WAREHOUSE_gte': int``\n - ``'delta7_AMAZON_lte': int``\n - ``'delta7_AMAZON_gte': int``\n - ``'delta7_BUY_BOX_SHIPPING_lte': int``\n - ``'delta7_BUY_BOX_SHIPPING_gte': int``\n - ``'delta7_COLLECTIBLE_lte': int``\n - ``'delta7_COLLECTIBLE_gte': int``\n - ``'delta7_COUNT_COLLECTIBLE_lte': int``\n - ``'delta7_COUNT_COLLECTIBLE_gte': int``\n - ``'delta7_COUNT_NEW_lte': int``\n - ``'delta7_COUNT_NEW_gte': int``\n - ``'delta7_COUNT_REFURBISHED_lte': int``\n - ``'delta7_COUNT_REFURBISHED_gte': int``\n - ``'delta7_COUNT_REVIEWS_lte': int``\n - ``'delta7_COUNT_REVIEWS_gte': int``\n - ``'delta7_COUNT_USED_lte': int``\n - ``'delta7_COUNT_USED_gte': int``\n - ``'delta7_EBAY_NEW_SHIPPING_lte': int``\n - ``'delta7_EBAY_NEW_SHIPPING_gte': int``\n - ``'delta7_EBAY_USED_SHIPPING_lte': int``\n - ``'delta7_EBAY_USED_SHIPPING_gte': int``\n - ``'delta7_LIGHTNING_DEAL_lte': int``\n - ``'delta7_LIGHTNING_DEAL_gte': int``\n - ``'delta7_LISTPRICE_lte': int``\n - ``'delta7_LISTPRICE_gte': int``\n - ``'delta7_NEW_lte': int``\n - ``'delta7_NEW_gte': int``\n - ``'delta7_NEW_FBA_lte': int``\n - ``'delta7_NEW_FBA_gte': int``\n - ``'delta7_NEW_FBM_SHIPPING_lte': int``\n - ``'delta7_NEW_FBM_SHIPPING_gte': int``\n - ``'delta7_RATING_lte': int``\n - ``'delta7_RATING_gte': int``\n - ``'delta7_REFURBISHED_lte': int``\n - ``'delta7_REFURBISHED_gte': int``\n - ``'delta7_REFURBISHED_SHIPPING_lte': int``\n - ``'delta7_REFURBISHED_SHIPPING_gte': int``\n - ``'delta7_RENT_lte': int``\n - ``'delta7_RENT_gte': int``\n - ``'delta7_SALES_lte': int``\n - ``'delta7_SALES_gte': int``\n - ``'delta7_TRADE_IN_lte': int``\n - ``'delta7_TRADE_IN_gte': int``\n - ``'delta7_USED_lte': int``\n - ``'delta7_USED_gte': int``\n - ``'delta7_USED_ACCEPTABLE_SHIPPING_lte': int``\n - ``'delta7_USED_ACCEPTABLE_SHIPPING_gte': int``\n - ``'delta7_USED_GOOD_SHIPPING_lte': int``\n - ``'delta7_USED_GOOD_SHIPPING_gte': int``\n - ``'delta7_USED_NEW_SHIPPING_lte': int``\n - ``'delta7_USED_NEW_SHIPPING_gte': int``\n - ``'delta7_USED_VERY_GOOD_SHIPPING_lte': int``\n - ``'delta7_USED_VERY_GOOD_SHIPPING_gte': int``\n - ``'delta7_WAREHOUSE_lte': int``\n - ``'delta7_WAREHOUSE_gte': int``\n - ``'delta90_AMAZON_lte': int``\n - ``'delta90_AMAZON_gte': int``\n - ``'delta90_BUY_BOX_SHIPPING_lte': int``\n - ``'delta90_BUY_BOX_SHIPPING_gte': int``\n - ``'delta90_COLLECTIBLE_lte': int``\n - ``'delta90_COLLECTIBLE_gte': int``\n - ``'delta90_COUNT_COLLECTIBLE_lte': int``\n - ``'delta90_COUNT_COLLECTIBLE_gte': int``\n - ``'delta90_COUNT_NEW_lte': int``\n - ``'delta90_COUNT_NEW_gte': int``\n - ``'delta90_COUNT_REFURBISHED_lte': int``\n - ``'delta90_COUNT_REFURBISHED_gte': int``\n - ``'delta90_COUNT_REVIEWS_lte': int``\n - ``'delta90_COUNT_REVIEWS_gte': int``\n - ``'delta90_COUNT_USED_lte': int``\n - ``'delta90_COUNT_USED_gte': int``\n - ``'delta90_EBAY_NEW_SHIPPING_lte': int``\n - ``'delta90_EBAY_NEW_SHIPPING_gte': int``\n - ``'delta90_EBAY_USED_SHIPPING_lte': int``\n - ``'delta90_EBAY_USED_SHIPPING_gte': int``\n - ``'delta90_LIGHTNING_DEAL_lte': int``\n - ``'delta90_LIGHTNING_DEAL_gte': int``\n - ``'delta90_LISTPRICE_lte': int``\n - ``'delta90_LISTPRICE_gte': int``\n - ``'delta90_NEW_lte': int``\n - ``'delta90_NEW_gte': int``\n - ``'delta90_NEW_FBA_lte': int``\n - ``'delta90_NEW_FBA_gte': int``\n - ``'delta90_NEW_FBM_SHIPPING_lte': int``\n - ``'delta90_NEW_FBM_SHIPPING_gte': int``\n - ``'delta90_RATING_lte': int``\n - ``'delta90_RATING_gte': int``\n - ``'delta90_REFURBISHED_lte': int``\n - ``'delta90_REFURBISHED_gte': int``\n - ``'delta90_REFURBISHED_SHIPPING_lte': int``\n - ``'delta90_REFURBISHED_SHIPPING_gte': int``\n - ``'delta90_RENT_lte': int``\n - ``'delta90_RENT_gte': int``\n - ``'delta90_SALES_lte': int``\n - ``'delta90_SALES_gte': int``\n - ``'delta90_TRADE_IN_lte': int``\n - ``'delta90_TRADE_IN_gte': int``\n - ``'delta90_USED_lte': int``\n - ``'delta90_USED_gte': int``\n - ``'delta90_USED_ACCEPTABLE_SHIPPING_lte': int``\n - ``'delta90_USED_ACCEPTABLE_SHIPPING_gte': int``\n - ``'delta90_USED_GOOD_SHIPPING_lte': int``\n - ``'delta90_USED_GOOD_SHIPPING_gte': int``\n - ``'delta90_USED_NEW_SHIPPING_lte': int``\n - ``'delta90_USED_NEW_SHIPPING_gte': int``\n - ``'delta90_USED_VERY_GOOD_SHIPPING_lte': int``\n - ``'delta90_USED_VERY_GOOD_SHIPPING_gte': int``\n - ``'delta90_WAREHOUSE_lte': int``\n - ``'delta90_WAREHOUSE_gte': int``\n - ``'deltaLast_AMAZON_lte': int``\n - ``'deltaLast_AMAZON_gte': int``\n - ``'deltaLast_BUY_BOX_SHIPPING_lte': int``\n - ``'deltaLast_BUY_BOX_SHIPPING_gte': int``\n - ``'deltaLast_COLLECTIBLE_lte': int``\n - ``'deltaLast_COLLECTIBLE_gte': int``\n - ``'deltaLast_COUNT_COLLECTIBLE_lte': int``\n - ``'deltaLast_COUNT_COLLECTIBLE_gte': int``\n - ``'deltaLast_COUNT_NEW_lte': int``\n - ``'deltaLast_COUNT_NEW_gte': int``\n - ``'deltaLast_COUNT_REFURBISHED_lte': int``\n - ``'deltaLast_COUNT_REFURBISHED_gte': int``\n - ``'deltaLast_COUNT_REVIEWS_lte': int``\n - ``'deltaLast_COUNT_REVIEWS_gte': int``\n - ``'deltaLast_COUNT_USED_lte': int``\n - ``'deltaLast_COUNT_USED_gte': int``\n - ``'deltaLast_EBAY_NEW_SHIPPING_lte': int``\n - ``'deltaLast_EBAY_NEW_SHIPPING_gte': int``\n - ``'deltaLast_EBAY_USED_SHIPPING_lte': int``\n - ``'deltaLast_EBAY_USED_SHIPPING_gte': int``\n - ``'deltaLast_LIGHTNING_DEAL_lte': int``\n - ``'deltaLast_LIGHTNING_DEAL_gte': int``\n - ``'deltaLast_LISTPRICE_lte': int``\n - ``'deltaLast_LISTPRICE_gte': int``\n - ``'deltaLast_NEW_lte': int``\n - ``'deltaLast_NEW_gte': int``\n - ``'deltaLast_NEW_FBA_lte': int``\n - ``'deltaLast_NEW_FBA_gte': int``\n - ``'deltaLast_NEW_FBM_SHIPPING_lte': int``\n - ``'deltaLast_NEW_FBM_SHIPPING_gte': int``\n - ``'deltaLast_RATING_lte': int``\n - ``'deltaLast_RATING_gte': int``\n - ``'deltaLast_REFURBISHED_lte': int``\n - ``'deltaLast_REFURBISHED_gte': int``\n - ``'deltaLast_REFURBISHED_SHIPPING_lte': int``\n - ``'deltaLast_REFURBISHED_SHIPPING_gte': int``\n - ``'deltaLast_RENT_lte': int``\n - ``'deltaLast_RENT_gte': int``\n - ``'deltaLast_SALES_lte': int``\n - ``'deltaLast_SALES_gte': int``\n - ``'deltaLast_TRADE_IN_lte': int``\n - ``'deltaLast_TRADE_IN_gte': int``\n - ``'deltaLast_USED_lte': int``\n - ``'deltaLast_USED_gte': int``\n - ``'deltaLast_USED_ACCEPTABLE_SHIPPING_lte': int``\n - ``'deltaLast_USED_ACCEPTABLE_SHIPPING_gte': int``\n - ``'deltaLast_USED_GOOD_SHIPPING_lte': int``\n - ``'deltaLast_USED_GOOD_SHIPPING_gte': int``\n - ``'deltaLast_USED_NEW_SHIPPING_lte': int``\n - ``'deltaLast_USED_NEW_SHIPPING_gte': int``\n - ``'deltaLast_USED_VERY_GOOD_SHIPPING_lte': int``\n - ``'deltaLast_USED_VERY_GOOD_SHIPPING_gte': int``\n - ``'deltaLast_WAREHOUSE_lte': int``\n - ``'deltaLast_WAREHOUSE_gte': int``\n - ``'deltaPercent1_AMAZON_lte': int``\n - ``'deltaPercent1_AMAZON_gte': int``\n - ``'deltaPercent1_BUY_BOX_SHIPPING_lte': int``\n - ``'deltaPercent1_BUY_BOX_SHIPPING_gte': int``\n - ``'deltaPercent1_COLLECTIBLE_lte': int``\n - ``'deltaPercent1_COLLECTIBLE_gte': int``\n - ``'deltaPercent1_COUNT_COLLECTIBLE_lte': int``\n - ``'deltaPercent1_COUNT_COLLECTIBLE_gte': int``\n - ``'deltaPercent1_COUNT_NEW_lte': int``\n - ``'deltaPercent1_COUNT_NEW_gte': int``\n - ``'deltaPercent1_COUNT_REFURBISHED_lte': int``\n - ``'deltaPercent1_COUNT_REFURBISHED_gte': int``\n - ``'deltaPercent1_COUNT_REVIEWS_lte': int``\n - ``'deltaPercent1_COUNT_REVIEWS_gte': int``\n - ``'deltaPercent1_COUNT_USED_lte': int``\n - ``'deltaPercent1_COUNT_USED_gte': int``\n - ``'deltaPercent1_EBAY_NEW_SHIPPING_lte': int``\n - ``'deltaPercent1_EBAY_NEW_SHIPPING_gte': int``\n - ``'deltaPercent1_EBAY_USED_SHIPPING_lte': int``\n - ``'deltaPercent1_EBAY_USED_SHIPPING_gte': int``\n - ``'deltaPercent1_LIGHTNING_DEAL_lte': int``\n - ``'deltaPercent1_LIGHTNING_DEAL_gte': int``\n - ``'deltaPercent1_LISTPRICE_lte': int``\n - ``'deltaPercent1_LISTPRICE_gte': int``\n - ``'deltaPercent1_NEW_lte': int``\n - ``'deltaPercent1_NEW_gte': int``\n - ``'deltaPercent1_NEW_FBA_lte': int``\n - ``'deltaPercent1_NEW_FBA_gte': int``\n - ``'deltaPercent1_NEW_FBM_SHIPPING_lte': int``\n - ``'deltaPercent1_NEW_FBM_SHIPPING_gte': int``\n - ``'deltaPercent1_RATING_lte': int``\n - ``'deltaPercent1_RATING_gte': int``\n - ``'deltaPercent1_REFURBISHED_lte': int``\n - ``'deltaPercent1_REFURBISHED_gte': int``\n - ``'deltaPercent1_REFURBISHED_SHIPPING_lte': int``\n - ``'deltaPercent1_REFURBISHED_SHIPPING_gte': int``\n - ``'deltaPercent1_RENT_lte': int``\n - ``'deltaPercent1_RENT_gte': int``\n - ``'deltaPercent1_SALES_lte': int``\n - ``'deltaPercent1_SALES_gte': int``\n - ``'deltaPercent1_TRADE_IN_lte': int``\n - ``'deltaPercent1_TRADE_IN_gte': int``\n - ``'deltaPercent1_USED_lte': int``\n - ``'deltaPercent1_USED_gte': int``\n - ``'deltaPercent1_USED_ACCEPTABLE_SHIPPING_lte': int``\n - ``'deltaPercent1_USED_ACCEPTABLE_SHIPPING_gte': int``\n - ``'deltaPercent1_USED_GOOD_SHIPPING_lte': int``\n - ``'deltaPercent1_USED_GOOD_SHIPPING_gte': int``\n - ``'deltaPercent1_USED_NEW_SHIPPING_lte': int``\n - ``'deltaPercent1_USED_NEW_SHIPPING_gte': int``\n - ``'deltaPercent1_USED_VERY_GOOD_SHIPPING_lte': int``\n - ``'deltaPercent1_USED_VERY_GOOD_SHIPPING_gte': int``\n - ``'deltaPercent1_WAREHOUSE_lte': int``\n - ``'deltaPercent1_WAREHOUSE_gte': int``\n - ``'deltaPercent30_AMAZON_lte': int``\n - ``'deltaPercent30_AMAZON_gte': int``\n - ``'deltaPercent30_BUY_BOX_SHIPPING_lte': int``\n - ``'deltaPercent30_BUY_BOX_SHIPPING_gte': int``\n - ``'deltaPercent30_COLLECTIBLE_lte': int``\n - ``'deltaPercent30_COLLECTIBLE_gte': int``\n - ``'deltaPercent30_COUNT_COLLECTIBLE_lte': int``\n - ``'deltaPercent30_COUNT_COLLECTIBLE_gte': int``\n - ``'deltaPercent30_COUNT_NEW_lte': int``\n - ``'deltaPercent30_COUNT_NEW_gte': int``\n - ``'deltaPercent30_COUNT_REFURBISHED_lte': int``\n - ``'deltaPercent30_COUNT_REFURBISHED_gte': int``\n - ``'deltaPercent30_COUNT_REVIEWS_lte': int``\n - ``'deltaPercent30_COUNT_REVIEWS_gte': int``\n - ``'deltaPercent30_COUNT_USED_lte': int``\n - ``'deltaPercent30_COUNT_USED_gte': int``\n - ``'deltaPercent30_EBAY_NEW_SHIPPING_lte': int``\n - ``'deltaPercent30_EBAY_NEW_SHIPPING_gte': int``\n - ``'deltaPercent30_EBAY_USED_SHIPPING_lte': int``\n - ``'deltaPercent30_EBAY_USED_SHIPPING_gte': int``\n - ``'deltaPercent30_LIGHTNING_DEAL_lte': int``\n - ``'deltaPercent30_LIGHTNING_DEAL_gte': int``\n - ``'deltaPercent30_LISTPRICE_lte': int``\n - ``'deltaPercent30_LISTPRICE_gte': int``\n - ``'deltaPercent30_NEW_lte': int``\n - ``'deltaPercent30_NEW_gte': int``\n - ``'deltaPercent30_NEW_FBA_lte': int``\n - ``'deltaPercent30_NEW_FBA_gte': int``\n - ``'deltaPercent30_NEW_FBM_SHIPPING_lte': int``\n - ``'deltaPercent30_NEW_FBM_SHIPPING_gte': int``\n - ``'deltaPercent30_RATING_lte': int``\n - ``'deltaPercent30_RATING_gte': int``\n - ``'deltaPercent30_REFURBISHED_lte': int``\n - ``'deltaPercent30_REFURBISHED_gte': int``\n - ``'deltaPercent30_REFURBISHED_SHIPPING_lte': int``\n - ``'deltaPercent30_REFURBISHED_SHIPPING_gte': int``\n - ``'deltaPercent30_RENT_lte': int``\n - ``'deltaPercent30_RENT_gte': int``\n - ``'deltaPercent30_SALES_lte': int``\n - ``'deltaPercent30_SALES_gte': int``\n - ``'deltaPercent30_TRADE_IN_lte': int``\n - ``'deltaPercent30_TRADE_IN_gte': int``\n - ``'deltaPercent30_USED_lte': int``\n - ``'deltaPercent30_USED_gte': int``\n - ``'deltaPercent30_USED_ACCEPTABLE_SHIPPING_lte': int``\n - ``'deltaPercent30_USED_ACCEPTABLE_SHIPPING_gte': int``\n - ``'deltaPercent30_USED_GOOD_SHIPPING_lte': int``\n - ``'deltaPercent30_USED_GOOD_SHIPPING_gte': int``\n - ``'deltaPercent30_USED_NEW_SHIPPING_lte': int``\n - ``'deltaPercent30_USED_NEW_SHIPPING_gte': int``\n - ``'deltaPercent30_USED_VERY_GOOD_SHIPPING_lte': int``\n - ``'deltaPercent30_USED_VERY_GOOD_SHIPPING_gte': int``\n - ``'deltaPercent30_WAREHOUSE_lte': int``\n - ``'deltaPercent30_WAREHOUSE_gte': int``\n - ``'deltaPercent7_AMAZON_lte': int``\n - ``'deltaPercent7_AMAZON_gte': int``\n - ``'deltaPercent7_BUY_BOX_SHIPPING_lte': int``\n - ``'deltaPercent7_BUY_BOX_SHIPPING_gte': int``\n - ``'deltaPercent7_COLLECTIBLE_lte': int``\n - ``'deltaPercent7_COLLECTIBLE_gte': int``\n - ``'deltaPercent7_COUNT_COLLECTIBLE_lte': int``\n - ``'deltaPercent7_COUNT_COLLECTIBLE_gte': int``\n - ``'deltaPercent7_COUNT_NEW_lte': int``\n - ``'deltaPercent7_COUNT_NEW_gte': int``\n - ``'deltaPercent7_COUNT_REFURBISHED_lte': int``\n - ``'deltaPercent7_COUNT_REFURBISHED_gte': int``\n - ``'deltaPercent7_COUNT_REVIEWS_lte': int``\n - ``'deltaPercent7_COUNT_REVIEWS_gte': int``\n - ``'deltaPercent7_COUNT_USED_lte': int``\n - ``'deltaPercent7_COUNT_USED_gte': int``\n - ``'deltaPercent7_EBAY_NEW_SHIPPING_lte': int``\n - ``'deltaPercent7_EBAY_NEW_SHIPPING_gte': int``\n - ``'deltaPercent7_EBAY_USED_SHIPPING_lte': int``\n - ``'deltaPercent7_EBAY_USED_SHIPPING_gte': int``\n - ``'deltaPercent7_LIGHTNING_DEAL_lte': int``\n - ``'deltaPercent7_LIGHTNING_DEAL_gte': int``\n - ``'deltaPercent7_LISTPRICE_lte': int``\n - ``'deltaPercent7_LISTPRICE_gte': int``\n - ``'deltaPercent7_NEW_lte': int``\n - ``'deltaPercent7_NEW_gte': int``\n - ``'deltaPercent7_NEW_FBA_lte': int``\n - ``'deltaPercent7_NEW_FBA_gte': int``\n - ``'deltaPercent7_NEW_FBM_SHIPPING_lte': int``\n - ``'deltaPercent7_NEW_FBM_SHIPPING_gte': int``\n - ``'deltaPercent7_RATING_lte': int``\n - ``'deltaPercent7_RATING_gte': int``\n - ``'deltaPercent7_REFURBISHED_lte': int``\n - ``'deltaPercent7_REFURBISHED_gte': int``\n - ``'deltaPercent7_REFURBISHED_SHIPPING_lte': int``\n - ``'deltaPercent7_REFURBISHED_SHIPPING_gte': int``\n - ``'deltaPercent7_RENT_lte': int``\n - ``'deltaPercent7_RENT_gte': int``\n - ``'deltaPercent7_SALES_lte': int``\n - ``'deltaPercent7_SALES_gte': int``\n - ``'deltaPercent7_TRADE_IN_lte': int``\n - ``'deltaPercent7_TRADE_IN_gte': int``\n - ``'deltaPercent7_USED_lte': int``\n - ``'deltaPercent7_USED_gte': int``\n - ``'deltaPercent7_USED_ACCEPTABLE_SHIPPING_lte': int``\n - ``'deltaPercent7_USED_ACCEPTABLE_SHIPPING_gte': int``\n - ``'deltaPercent7_USED_GOOD_SHIPPING_lte': int``\n - ``'deltaPercent7_USED_GOOD_SHIPPING_gte': int``\n - ``'deltaPercent7_USED_NEW_SHIPPING_lte': int``\n - ``'deltaPercent7_USED_NEW_SHIPPING_gte': int``\n - ``'deltaPercent7_USED_VERY_GOOD_SHIPPING_lte': int``\n - ``'deltaPercent7_USED_VERY_GOOD_SHIPPING_gte': int``\n - ``'deltaPercent7_WAREHOUSE_lte': int``\n - ``'deltaPercent7_WAREHOUSE_gte': int``\n - ``'deltaPercent90_AMAZON_lte': int``\n - ``'deltaPercent90_AMAZON_gte': int``\n - ``'deltaPercent90_BUY_BOX_SHIPPING_lte': int``\n - ``'deltaPercent90_BUY_BOX_SHIPPING_gte': int``\n - ``'deltaPercent90_COLLECTIBLE_lte': int``\n - ``'deltaPercent90_COLLECTIBLE_gte': int``\n - ``'deltaPercent90_COUNT_COLLECTIBLE_lte': int``\n - ``'deltaPercent90_COUNT_COLLECTIBLE_gte': int``\n - ``'deltaPercent90_COUNT_NEW_lte': int``\n - ``'deltaPercent90_COUNT_NEW_gte': int``\n - ``'deltaPercent90_COUNT_REFURBISHED_lte': int``\n - ``'deltaPercent90_COUNT_REFURBISHED_gte': int``\n - ``'deltaPercent90_COUNT_REVIEWS_lte': int``\n - ``'deltaPercent90_COUNT_REVIEWS_gte': int``\n - ``'deltaPercent90_COUNT_USED_lte': int``\n - ``'deltaPercent90_COUNT_USED_gte': int``\n - ``'deltaPercent90_EBAY_NEW_SHIPPING_lte': int``\n - ``'deltaPercent90_EBAY_NEW_SHIPPING_gte': int``\n - ``'deltaPercent90_EBAY_USED_SHIPPING_lte': int``\n - ``'deltaPercent90_EBAY_USED_SHIPPING_gte': int``\n - ``'deltaPercent90_LIGHTNING_DEAL_lte': int``\n - ``'deltaPercent90_LIGHTNING_DEAL_gte': int``\n - ``'deltaPercent90_LISTPRICE_lte': int``\n - ``'deltaPercent90_LISTPRICE_gte': int``\n - ``'deltaPercent90_NEW_lte': int``\n - ``'deltaPercent90_NEW_gte': int``\n - ``'deltaPercent90_NEW_FBA_lte': int``\n - ``'deltaPercent90_NEW_FBA_gte': int``\n - ``'deltaPercent90_NEW_FBM_SHIPPING_lte': int``\n - ``'deltaPercent90_NEW_FBM_SHIPPING_gte': int``\n - ``'deltaPercent90_RATING_lte': int``\n - ``'deltaPercent90_RATING_gte': int``\n - ``'deltaPercent90_REFURBISHED_lte': int``\n - ``'deltaPercent90_REFURBISHED_gte': int``\n - ``'deltaPercent90_REFURBISHED_SHIPPING_lte': int``\n - ``'deltaPercent90_REFURBISHED_SHIPPING_gte': int``\n - ``'deltaPercent90_RENT_lte': int``\n - ``'deltaPercent90_RENT_gte': int``\n - ``'deltaPercent90_SALES_lte': int``\n - ``'deltaPercent90_SALES_gte': int``\n - ``'deltaPercent90_TRADE_IN_lte': int``\n - ``'deltaPercent90_TRADE_IN_gte': int``\n - ``'deltaPercent90_USED_lte': int``\n - ``'deltaPercent90_USED_gte': int``\n - ``'deltaPercent90_USED_ACCEPTABLE_SHIPPING_lte': int``\n - ``'deltaPercent90_USED_ACCEPTABLE_SHIPPING_gte': int``\n - ``'deltaPercent90_USED_GOOD_SHIPPING_lte': int``\n - ``'deltaPercent90_USED_GOOD_SHIPPING_gte': int``\n - ``'deltaPercent90_USED_NEW_SHIPPING_lte': int``\n - ``'deltaPercent90_USED_NEW_SHIPPING_gte': int``\n - ``'deltaPercent90_USED_VERY_GOOD_SHIPPING_lte': int``\n - ``'deltaPercent90_USED_VERY_GOOD_SHIPPING_gte': int``\n - ``'deltaPercent90_WAREHOUSE_lte': int``\n - ``'deltaPercent90_WAREHOUSE_gte': int``\n - ``'department': str``\n - ``'edition': str``\n - ``'fbaFees_lte': int``\n - ``'fbaFees_gte': int``\n - ``'format': str``\n - ``'genre': str``\n - ``'hasParentASIN': bool``\n - ``'hasReviews': bool``\n - ``'hazardousMaterialType_lte': int``\n - ``'hazardousMaterialType_gte': int``\n - ``'isAdultProduct': bool``\n - ``'isEligibleForSuperSaverShipping': bool``\n - ``'isEligibleForTradeIn': bool``\n - ``'isHighestOffer': bool``\n - ``'isHighest_AMAZON': bool``\n - ``'isHighest_BUY_BOX_SHIPPING': bool``\n - ``'isHighest_COLLECTIBLE': bool``\n - ``'isHighest_COUNT_COLLECTIBLE': bool``\n - ``'isHighest_COUNT_NEW': bool``\n - ``'isHighest_COUNT_REFURBISHED': bool``\n - ``'isHighest_COUNT_REVIEWS': bool``\n - ``'isHighest_COUNT_USED': bool``\n - ``'isHighest_EBAY_NEW_SHIPPING': bool``\n - ``'isHighest_EBAY_USED_SHIPPING': bool``\n - ``'isHighest_LIGHTNING_DEAL': bool``\n - ``'isHighest_LISTPRICE': bool``\n - ``'isHighest_NEW': bool``\n - ``'isHighest_NEW_FBA': bool``\n - ``'isHighest_NEW_FBM_SHIPPING': bool``\n - ``'isHighest_RATING': bool``\n - ``'isHighest_REFURBISHED': bool``\n - ``'isHighest_REFURBISHED_SHIPPING': bool``\n - ``'isHighest_RENT': bool``\n - ``'isHighest_SALES': bool``\n - ``'isHighest_TRADE_IN': bool``\n - ``'isHighest_USED': bool``\n - ``'isHighest_USED_ACCEPTABLE_SHIPPING': bool``\n - ``'isHighest_USED_GOOD_SHIPPING': bool``\n - ``'isHighest_USED_NEW_SHIPPING': bool``\n - ``'isHighest_USED_VERY_GOOD_SHIPPING': bool``\n - ``'isHighest_WAREHOUSE': bool``\n - ``'isLowestOffer': bool``\n - ``'isLowest_AMAZON': bool``\n - ``'isLowest_BUY_BOX_SHIPPING': bool``\n - ``'isLowest_COLLECTIBLE': bool``\n - ``'isLowest_COUNT_COLLECTIBLE': bool``\n - ``'isLowest_COUNT_NEW': bool``\n - ``'isLowest_COUNT_REFURBISHED': bool``\n - ``'isLowest_COUNT_REVIEWS': bool``\n - ``'isLowest_COUNT_USED': bool``\n - ``'isLowest_EBAY_NEW_SHIPPING': bool``\n - ``'isLowest_EBAY_USED_SHIPPING': bool``\n - ``'isLowest_LIGHTNING_DEAL': bool``\n - ``'isLowest_LISTPRICE': bool``\n - ``'isLowest_NEW': bool``\n - ``'isLowest_NEW_FBA': bool``\n - ``'isLowest_NEW_FBM_SHIPPING': bool``\n - ``'isLowest_RATING': bool``\n - ``'isLowest_REFURBISHED': bool``\n - ``'isLowest_REFURBISHED_SHIPPING': bool``\n - ``'isLowest_RENT': bool``\n - ``'isLowest_SALES': bool``\n - ``'isLowest_TRADE_IN': bool``\n - ``'isLowest_USED': bool``\n - ``'isLowest_USED_ACCEPTABLE_SHIPPING': bool``\n - ``'isLowest_USED_GOOD_SHIPPING': bool``\n - ``'isLowest_USED_NEW_SHIPPING': bool``\n - ``'isLowest_USED_VERY_GOOD_SHIPPING': bool``\n - ``'isLowest_WAREHOUSE': bool``\n - ``'isPrimeExclusive': bool``\n - ``'isSNS': bool``\n - ``'label': str``\n - ``'languages': str``\n - ``'lastOffersUpdate_lte': int``\n - ``'lastOffersUpdate_gte': int``\n - ``'lastPriceChange_lte': int``\n - ``'lastPriceChange_gte': int``\n - ``'lastRatingUpdate_lte': int``\n - ``'lastRatingUpdate_gte': int``\n - ``'lastUpdate_lte': int``\n - ``'lastUpdate_gte': int``\n - ``'lightningEnd_lte': int``\n - ``'lightningEnd_gte': int``\n - ``'lightningStart_lte': int``\n - ``'lightningStart_gte': int``\n - ``'listedSince_lte': int``\n - ``'listedSince_gte': int``\n - ``'manufacturer': str``\n - ``'model': str``\n - ``'newPriceIsMAP': bool``\n - ``'nextUpdate_lte': int``\n - ``'nextUpdate_gte': int``\n - ``'numberOfItems_lte': int``\n - ``'numberOfItems_gte': int``\n - ``'numberOfPages_lte': int``\n - ``'numberOfPages_gte': int``\n - ``'numberOfTrackings_lte': int``\n - ``'numberOfTrackings_gte': int``\n - ``'offerCountFBA_lte': int``\n - ``'offerCountFBA_gte': int``\n - ``'offerCountFBM_lte': int``\n - ``'offerCountFBM_gte': int``\n - ``'outOfStockPercentageInInterval_lte': int``\n - ``'outOfStockPercentageInInterval_gte': int``\n - ``'packageDimension_lte': int``\n - ``'packageDimension_gte': int``\n - ``'packageHeight_lte': int``\n - ``'packageHeight_gte': int``\n - ``'packageLength_lte': int``\n - ``'packageLength_gte': int``\n - ``'packageQuantity_lte': int``\n - ``'packageQuantity_gte': int``\n - ``'packageWeight_lte': int``\n - ``'packageWeight_gte': int``\n - ``'packageWidth_lte': int``\n - ``'packageWidth_gte': int``\n - ``'partNumber': str``\n - ``'platform': str``\n - ``'productGroup': str``\n - ``'productType': int``\n - ``'promotions': int``\n - ``'publicationDate_lte': int``\n - ``'publicationDate_gte': int``\n - ``'publisher': str``\n - ``'releaseDate_lte': int``\n - ``'releaseDate_gte': int``\n - ``'rootCategory': int``\n - ``'sellerIds': str``\n - ``'sellerIdsLowestFBA': str``\n - ``'sellerIdsLowestFBM': str``\n - ``'size': str``\n - ``'salesRankDrops180_lte': int``\n - ``'salesRankDrops180_gte': int``\n - ``'salesRankDrops90_lte': int``\n - ``'salesRankDrops90_gte': int``\n - ``'salesRankDrops30_lte': int``\n - ``'salesRankDrops30_gte': int``\n - ``'stockAmazon_lte': int``\n - ``'stockAmazon_gte': int``\n - ``'stockBuyBox_lte': int``\n - ``'stockBuyBox_gte': int``\n - ``'studio': str``\n - ``'title': str``\n - ``'title_flag': str``\n - ``'trackingSince_lte': int``\n - ``'trackingSince_gte': int``\n - ``'type': str``\n - ``'mpn': str``\n - ``'outOfStockPercentage90_lte': int``\n - ``'outOfStockPercentage90_gte': int``\n - ``'categories_include': int``\n - ``'categories_exclude': int``\n\n domain : str, optional\n One of the following Amazon domains: RESERVED, US, GB, DE,\n FR, JP, CA, CN, IT, ES, IN, MX Defaults to US.\n\n wait : bool, optional\n Wait available token before doing effective query, Defaults to ``True``.\n\n Examples\n --------\n Query for all of Jim Butcher's books\n\n >>> import keepa\n >>> api = keepa.AsyncKeepa('ENTER_ACTUAL_KEY_HERE')\n >>> product_parms = {'author': 'jim butcher'}\n >>> products = api.product_finder(product_parms)\n \"\"\"\n # verify valid keys\n for key in product_parms:\n if key not in PRODUCT_REQUEST_KEYS:\n raise RuntimeError('Invalid key \"%s\"' % key)\n\n # verify json type\n key_type = PRODUCT_REQUEST_KEYS[key]\n product_parms[key] = key_type(product_parms[key])\n\n payload = {'key': self.accesskey,\n 'domain': DCODES.index(domain),\n 'selection': json.dumps(product_parms)}\n\n response = self._request('query', payload, wait=wait)\n return response['asinList']\n\n def deals(self, deal_parms, domain='US', wait=True):\n \"\"\"Query the Keepa API for product deals.\n\n You can find products that recently changed and match your\n search criteria. A single request will return a maximum of\n 150 deals. Try out the deals page to first get accustomed to\n the options:\n https://keepa.com/#!deals\n\n For more details please visit:\n https://keepa.com/#!discuss/t/browsing-deals/338\n\n Parameters\n ----------\n deal_parms : dict\n Dictionary containing one or more of the following keys:\n\n - ``\"page\"``: int\n - ``\"domainId\"``: int\n - ``\"excludeCategories\"``: list\n - ``\"includeCategories\"``: list\n - ``\"priceTypes\"``: list\n - ``\"deltaRange\"``: list\n - ``\"deltaPercentRange\"``: list\n - ``\"deltaLastRange\"``: list\n - ``\"salesRankRange\"``: list\n - ``\"currentRange\"``: list\n - ``\"minRating\"``: int\n - ``\"isLowest\"``: bool\n - ``\"isLowestOffer\"``: bool\n - ``\"isOutOfStock\"``: bool\n - ``\"titleSearch\"``: String\n - ``\"isRangeEnabled\"``: bool\n - ``\"isFilterEnabled\"``: bool\n - ``\"hasReviews\"``: bool\n - ``\"filterErotic\"``: bool\n - ``\"sortType\"``: int\n - ``\"dateRange\"``: int\n\n domain : str, optional\n One of the following Amazon domains: RESERVED, US, GB, DE,\n FR, JP, CA, CN, IT, ES, IN, MX Defaults to US.\n\n wait : bool, optional\n Wait available token before doing effective query, Defaults to ``True``.\n\n Examples\n --------\n >>> import keepa\n >>> api = keepa.AsyncKeepa('ENTER_YOUR_KEY_HERE')\n >>> deal_parms = {\"page\": 0,\n \"domainId\": 1,\n \"excludeCategories\": [1064954, 11091801],\n \"includeCategories\": [16310101]}\n >>> deals = api.deals(deal_parms)\n >>> print(deals[:5])\n ['B00U20FN1Y', 'B078HR932T', 'B00L88ERK2',\n 'B07G5TDMZ7', 'B00GYMQAM0']\n \"\"\"\n # verify valid keys\n for key in deal_parms:\n if key not in DEAL_REQUEST_KEYS:\n raise RuntimeError('Invalid key \"%s\"' % key)\n\n # verify json type\n key_type = DEAL_REQUEST_KEYS[key]\n deal_parms[key] = key_type(deal_parms[key])\n\n payload = {'key': self.accesskey,\n 'domain': DCODES.index(domain),\n 'selection': json.dumps(deal_parms)}\n\n response = self._request('query', payload, wait=wait)\n return response['asinList']\n\n def _request(self, request_type, payload, wait=True, raw_response=False):\n \"\"\"Queries keepa api server. \n\n Parses raw response from keepa into a json format. Handles\n errors and waits for available tokens if allowed.\n \"\"\"\n if wait:\n self.wait_for_tokens()\n\n while True:\n raw = requests.get(f'https://api.keepa.com/{request_type}/?', payload,\n timeout=self._timeout)\n status_code = str(raw.status_code)\n if status_code != '200':\n if status_code in SCODES:\n if status_code == '429' and wait:\n print('Response from server: %s' % SCODES[status_code])\n self.wait_for_tokens()\n continue\n else:\n raise Exception(SCODES[status_code])\n else:\n raise Exception('REQUEST_FAILED')\n break\n\n response = raw.json()\n\n if 'tokensConsumed' in response:\n log.debug('%d tokens consumed', response['tokensConsumed'])\n\n if 'error' in response:\n if response['error']:\n raise Exception(response['error']['message'])\n\n # always update tokens\n self.tokens_left = response['tokensLeft']\n\n if raw_response:\n return raw\n return response\n\n\nclass AsyncKeepa():\n \"\"\"Class to support an asynchronous Python interface to keepa server.\n\n Initializes API with access key. Access key can be obtained by\n signing up for a reoccurring or one time plan at:\n https://keepa.com/#!api\n\n Parameters\n ----------\n accesskey : str\n 64 character access key string.\n\n timeout : float, optional\n Default timeout when issuing any request. This is not a time\n limit on the entire response download; rather, an exception is\n raised if the server has not issued a response for timeout\n seconds. Setting this to 0 disables the timeout, but will\n cause any request to hang indefiantly should keepa.com be down\n\n Examples\n --------\n Create the api object\n\n >>> import keepa\n >>> mykey = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'\n >>> api = await keepa.AsyncKeepa.create(mykey)\n\n Request data from two ASINs\n\n >>> products = await api.query(['0439064872', '1426208081'])\n\n Print item details\n\n >>> print('Item 1')\n >>> print('\\t ASIN: {:s}'.format(products[0]['asin']))\n >>> print('\\t Title: {:s}'.format(products[0]['title']))\n\n Print item price\n\n >>> usedprice = products[0]['data']['MarketplaceUsed']\n >>> usedtimes = products[0]['data']['MarketplaceUsed_time']\n >>> print('\\t Used price: ${:.2f}'.format(usedprice[-1]))\n >>> print('\\t as of: {:s}'.format(str(usedtimes[-1])))\n \"\"\"\n\n @classmethod\n async def create(cls, accesskey, timeout=10):\n self = AsyncKeepa()\n self.accesskey = accesskey\n self.status = None\n self.tokens_left = 0\n self._timeout = timeout\n\n # Store user's available tokens\n log.info('Connecting to keepa using key ending in %s', accesskey[-6:])\n await self.update_status()\n log.info('%d tokens remain', self.tokens_left)\n return self\n\n @property\n def time_to_refill(self):\n \"\"\" Returns the time to refill in seconds \"\"\"\n # Get current timestamp in milliseconds from UNIX epoch\n now = int(time.time() * 1000)\n timeatrefile = self.status['timestamp'] + self.status['refillIn']\n\n # wait plus one second fudge factor\n timetorefil = timeatrefile - now + 1000\n if timetorefil < 0:\n timetorefil = 0\n\n # Account for negative tokens left\n if self.tokens_left < 0:\n timetorefil += (abs(self.tokens_left) / self.status['refillRate']) * 60000\n\n # Return value in seconds\n return timetorefil / 1000.0\n\n async def update_status(self):\n \"\"\" Updates available tokens \"\"\"\n self.status = await self._request('token', {'key': self.accesskey}, wait=False)\n\n async def wait_for_tokens(self):\n \"\"\"Checks any remaining tokens and waits if none are available. \"\"\"\n await self.update_status()\n\n # Wait if no tokens available\n if self.tokens_left <= 0:\n tdelay = self.time_to_refill\n log.warning('Waiting %.0f seconds for additional tokens' % tdelay)\n await asyncio.sleep(tdelay)\n await self.update_status()\n\n @is_documented_by(Keepa.query)\n async def query(self, items, stats=None, domain='US', history=True,\n offers=None, update=None, to_datetime=True,\n rating=False, out_of_stock_as_nan=True, stock=False,\n product_code_is_asin=True, progress_bar=True, buybox=False,\n wait=True, days=None, only_live_offers=None, raw=False):\n if raw:\n raise ValueError('Raw response is only available in the non-async class')\n\n # Format items into numpy array\n try:\n items = format_items(items)\n except BaseException:\n raise Exception('Invalid product codes input')\n assert len(items), 'No valid product codes'\n\n nitems = len(items)\n if nitems == 1:\n log.debug('Executing single product query')\n else:\n log.debug('Executing %d item product query', nitems)\n\n # check offer input\n if offers:\n if not isinstance(offers, int):\n raise TypeError('Parameter \"offers\" must be an interger')\n\n if offers > 100 or offers < 20:\n raise ValueError('Parameter \"offers\" must be between 20 and 100')\n\n # Report time to completion\n tcomplete = float(nitems - self.tokens_left) / self.status['refillRate'] - (\n 60000 - self.status['refillIn']) / 60000.0\n if tcomplete < 0.0:\n tcomplete = 0.5\n log.debug('Estimated time to complete %d request(s) is %.2f minutes',\n nitems, tcomplete)\n log.debug('\\twith a refill rate of %d token(s) per minute',\n self.status['refillRate'])\n\n # product list\n products = []\n\n pbar = None\n if progress_bar:\n pbar = tqdm(total=nitems)\n\n # Number of requests is dependent on the number of items and\n # request limit. Use available tokens first\n idx = 0 # or number complete\n while idx < nitems:\n nrequest = nitems - idx\n\n # cap request\n if nrequest > REQUEST_LIMIT:\n nrequest = REQUEST_LIMIT\n\n # request from keepa and increment current position\n item_request = items[idx:idx + nrequest]\n response = await self._product_query(\n item_request,\n product_code_is_asin,\n stats=stats,\n domain=domain, stock=stock,\n offers=offers, update=update,\n history=history, rating=rating,\n to_datetime=to_datetime,\n out_of_stock_as_nan=out_of_stock_as_nan,\n buybox=buybox,\n wait=wait,\n days=days,\n only_live_offers=only_live_offers,\n )\n idx += nrequest\n products.extend(response['products'])\n\n if pbar is not None:\n pbar.update(nrequest)\n\n return products\n\n @is_documented_by(Keepa._product_query)\n async def _product_query(self, items, product_code_is_asin=True, **kwargs):\n # ASINs convert to comma joined string\n assert len(items) <= 100\n\n if product_code_is_asin:\n kwargs['asin'] = ','.join(items)\n else:\n kwargs['code'] = ','.join(items)\n\n kwargs['key'] = self.accesskey\n kwargs['domain'] = DCODES.index(kwargs['domain'])\n\n # Convert bool values to 0 and 1.\n kwargs['stock'] = int(kwargs['stock'])\n kwargs['history'] = int(kwargs['history'])\n kwargs['rating'] = int(kwargs['rating'])\n kwargs['buybox'] = int(kwargs['buybox'])\n\n if kwargs['update'] is None:\n del kwargs['update']\n else:\n kwargs['update'] = int(kwargs['update'])\n\n if kwargs['offers'] is None:\n del kwargs['offers']\n else:\n kwargs['offers'] = int(kwargs['offers'])\n\n if kwargs['only_live_offers'] is None:\n del kwargs['only_live_offers']\n else:\n kwargs['only-live-offers'] = int(kwargs.pop('only_live_offers'))\n # Keepa's param actually doesn't use snake_case.\n # I believe using snake case throughout the Keepa interface is better.\n\n if kwargs['days'] is None:\n del kwargs['days']\n else:\n assert kwargs['days'] > 0\n\n if kwargs['stats'] is None:\n del kwargs['stats']\n\n out_of_stock_as_nan = kwargs.pop('out_of_stock_as_nan', True)\n to_datetime = kwargs.pop('to_datetime', True)\n\n # Query and replace csv with parsed data if history enabled\n wait = kwargs.get(\"wait\")\n kwargs.pop(\"wait\", None)\n response = await self._request('product', kwargs, wait=wait)\n if kwargs['history']:\n for product in response['products']:\n if product['csv']: # if data exists\n product['data'] = parse_csv(product['csv'],\n to_datetime,\n out_of_stock_as_nan)\n\n if kwargs.get('stats', None):\n for product in response['products']:\n stats = product.get('stats', None)\n if stats:\n product['stats_parsed'] = _parse_stats(stats, to_datetime)\n\n return response\n\n @is_documented_by(Keepa.best_sellers_query)\n async def best_sellers_query(self, category, rank_avg_range=0,\n domain='US', wait=True):\n assert domain in DCODES, 'Invalid domain code'\n\n payload = {'key': self.accesskey,\n 'domain': DCODES.index(domain),\n 'category': category,\n 'range': rank_avg_range}\n\n response = await self._request('bestsellers', payload, wait=wait)\n if 'bestSellersList' in response:\n return response['bestSellersList']['asinList']\n else: # pragma: no cover\n log.info('Best sellers search results not yet available')\n\n @is_documented_by(Keepa.search_for_categories)\n async def search_for_categories(self, searchterm, domain='US', wait=True):\n assert domain in DCODES, 'Invalid domain code'\n\n payload = {'key': self.accesskey,\n 'domain': DCODES.index(domain),\n 'type': 'category',\n 'term': searchterm}\n\n response = await self._request('search', payload, wait=wait)\n if response['categories'] == {}: # pragma no cover\n raise Exception('Categories search results not yet available ' +\n 'or no search terms found.')\n else:\n return response['categories']\n\n @is_documented_by(Keepa.category_lookup)\n async def category_lookup(self, category_id, domain='US',\n include_parents=0, wait=True):\n assert domain in DCODES, 'Invalid domain code'\n\n payload = {'key': self.accesskey,\n 'domain': DCODES.index(domain),\n 'category': category_id,\n 'parents': include_parents}\n\n response = await self._request('category', payload, wait=wait)\n if response['categories'] == {}: # pragma no cover\n raise Exception('Category lookup results not yet available or no' +\n 'match found.')\n else:\n return response['categories']\n\n @is_documented_by(Keepa.seller_query)\n async def seller_query(self, seller_id, domain='US', to_datetime=True, \n storefront=False, update=None, wait=True):\n if isinstance(seller_id, list):\n if len(seller_id) > 100:\n err_str = 'seller_id can contain at maximum 100 sellers'\n raise RuntimeError(err_str)\n seller = ','.join(seller_id)\n else:\n seller = seller_id\n\n payload = {'key': self.accesskey,\n 'domain': DCODES.index(domain),\n 'seller': seller}\n\n if storefront:\n payload[\"storefront\"] = int(storefront)\n if update:\n payload[\"update\"] = update\n\n response = await self._request('seller', payload, wait=wait)\n return _parse_seller(response['sellers'], to_datetime)\n\n @is_documented_by(Keepa.product_finder)\n async def product_finder(self, product_parms, domain='US', wait=True):\n # verify valid keys\n for key in product_parms:\n if key not in PRODUCT_REQUEST_KEYS:\n raise RuntimeError('Invalid key \"%s\"' % key)\n\n # verify json type\n key_type = PRODUCT_REQUEST_KEYS[key]\n product_parms[key] = key_type(product_parms[key])\n\n payload = {'key': self.accesskey,\n 'domain': DCODES.index(domain),\n 'selection': json.dumps(product_parms)}\n\n response = await self._request('query', payload, wait=wait)\n return response['asinList']\n\n @is_documented_by(Keepa.deals)\n async def deals(self, deal_parms, domain='US', wait=True):\n # verify valid keys\n for key in deal_parms:\n if key not in DEAL_REQUEST_KEYS:\n raise RuntimeError('Invalid key \"%s\"' % key)\n\n # verify json type\n key_type = DEAL_REQUEST_KEYS[key]\n deal_parms[key] = key_type(deal_parms[key])\n\n payload = {'key': self.accesskey,\n 'domain': DCODES.index(domain),\n 'selection': json.dumps(deal_parms)}\n\n response = await self._request('query', payload, wait=wait)\n return response['asinList']\n\n async def _request(self, request_type, payload, wait=True):\n \"\"\"Queries keepa api server. Parses raw response from keepa\n into a json format. Handles errors and waits for available\n tokens if allowed.\n \"\"\"\n\n while True:\n async with aiohttp.ClientSession() as session:\n async with session.get(\n f'https://api.keepa.com/{request_type}/?', params=payload,\n timeout=self._timeout\n ) as raw:\n status_code = str(raw.status)\n if status_code != '200':\n if status_code in SCODES:\n if status_code == '429' and wait:\n await self.wait_for_tokens()\n continue\n else:\n raise Exception(SCODES[status_code])\n else:\n raise Exception('REQUEST_FAILED')\n\n response = await raw.json()\n\n if 'error' in response:\n if response['error']:\n raise Exception(response['error']['message'])\n\n # always update tokens\n self.tokens_left = response['tokensLeft']\n return response\n break\n\n\ndef convert_offer_history(csv, to_datetime=True):\n \"\"\"Converts an offer history to human readable values.\n\n Parameters\n ----------\n csv : list\n Offer list csv obtained from ``['offerCSV']``\n\n to_datetime : bool, optional\n Modifies ``numpy`` minutes to ``datetime.datetime`` values.\n Default ``True``.\n\n Returns\n -------\n times : numpy.ndarray\n List of time values for an offer history.\n\n prices : numpy.ndarray\n Price (including shipping) of an offer for each time at an\n index of times.\n\n \"\"\"\n # convert these values to numpy arrays\n times = csv[::3]\n values = np.array(csv[1::3])\n values += np.array(csv[2::3]) # add in shipping\n\n # convert to dollars and datetimes\n times = keepa_minutes_to_time(times, to_datetime)\n prices = values/100.0\n return times, prices\n\n\ndef keepa_minutes_to_time(minutes, to_datetime=True):\n \"\"\"Accepts an array or list of minutes and converts it to a numpy\n datetime array. Assumes that keepa time is from keepa minutes\n from ordinal.\n \"\"\"\n\n # Convert to timedelta64 and shift\n dt = np.array(minutes, dtype='timedelta64[m]')\n dt = dt + KEEPA_ST_ORDINAL # shift from ordinal\n\n # Convert to datetime if requested\n if to_datetime:\n return dt.astype(datetime.datetime)\n else:\n return dt\n\n\ndef run_and_get(coro):\n try:\n loop = asyncio.get_event_loop()\n except RuntimeError:\n loop = asyncio.new_event_loop()\n task = loop.create_task(coro)\n loop.run_until_complete(task)\n return task.result()\n"
] | [
[
"pandas.DataFrame",
"numpy.asarray",
"numpy.datetime64",
"numpy.array",
"numpy.unique"
]
] |
se-hwan/MIT_Driverless | [
"05e416fb26f968300826f0deb0953be9afb22bfe"
] | [
"mpc/kmpc_casadi/utility/casadi-example_pack-v3.4.4/python/vdp_collocation2.py"
] | [
"#\n# This file is part of CasADi.\n#\n# CasADi -- A symbolic framework for dynamic optimization.\n# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,\n# K.U. Leuven. All rights reserved.\n# Copyright (C) 2011-2014 Greg Horn\n#\n# CasADi is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Lesser General Public\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or (at your option) any later version.\n#\n# CasADi is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with CasADi; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n#\n#\n# -*- coding: utf-8 -*-\nfrom casadi import *\nfrom casadi.tools import *\n\nimport numpy as NP\nimport matplotlib.pyplot as plt\n\nnk = 20 # Control discretization\ntf = 10.0 # End time\n\n# Declare variables (use scalar graph)\nt = SX.sym(\"t\") # time\nu = SX.sym(\"u\") # control\n\nstates = struct_symSX([\n entry('x',shape=2), # vdp oscillator states\n entry('L') # helper state: Langrange integrand\n ])\n\n# Create a structure for the right hand side\nrhs = struct_SX(states)\nx = states['x']\nrhs[\"x\"] = vertcat((1 - x[1]*x[1])*x[0] - x[1] + u, x[0])\nrhs[\"L\"] = x[0]*x[0] + x[1]*x[1] + u*u\n\n# ODE right hand side function\nf = Function('f', [t,states,u],[rhs])\n\n# Objective function (meyer term)\nm = Function('m', [t,states,u],[states[\"L\"]])\n\n# Control bounds\nu_min = -0.75\nu_max = 1.0\nu_init = 0.0\n\nu_lb = NP.array([u_min])\nu_ub = NP.array([u_max])\nu_init = NP.array([u_init])\n\n# State bounds and initial guess\nx_min = [-inf, -inf, -inf]\nx_max = [ inf, inf, inf]\nxi_min = [ 0.0, 1.0, 0.0]\nxi_max = [ 0.0, 1.0, 0.0]\nxf_min = [ 0.0, 0.0, -inf]\nxf_max = [ 0.0, 0.0, inf]\nx_init = [ 0.0, 0.0, 0.0]\n\n# Dimensions\nnx = 3\nnu = 1\n\n# Choose collocation points\ntau_root = [0] + collocation_points(3,\"radau\")\n\n# Degree of interpolating polynomial\nd = len(tau_root)-1\n\n# Size of the finite elements\nh = tf/nk\n\n# Coefficients of the collocation equation\nC = NP.zeros((d+1,d+1))\n\n# Coefficients of the continuity equation\nD = NP.zeros(d+1)\n\n# Dimensionless time inside one control interval\ntau = SX.sym(\"tau\")\n\n# All collocation time points\nT = NP.zeros((nk,d+1))\nfor k in range(nk):\n for j in range(d+1):\n T[k,j] = h*(k + tau_root[j])\n\n# For all collocation points\nfor j in range(d+1):\n # Construct Lagrange polynomials to get the polynomial basis at the collocation point\n L = 1\n for r in range(d+1):\n if r != j:\n L *= (tau-tau_root[r])/(tau_root[j]-tau_root[r])\n\n # Evaluate the polynomial at the final time to get the coefficients of the continuity equation\n lfcn = Function('lfcn', [tau],[L])\n D[j] = lfcn(1.0)\n\n # Evaluate the time derivative of the polynomial at all collocation points to get the coefficients of the continuity equation\n tfcn = Function('tfcn', [tau],[tangent(L,tau)])\n for r in range(d+1):\n C[j,r] = tfcn(tau_root[r])\n\n# Structure holding NLP variables\nV = struct_symMX([\n (\n entry(\"X\",repeat=[nk+1,d+1],struct=states),\n entry(\"U\",repeat=[nk],shape=nu)\n )\n ])\n\nvars_lb = V()\nvars_ub = V()\nvars_init = V()\n\n# Set states and its bounds\nvars_init[\"X\",:,:] = repeated(repeated(x_init))\nvars_lb[\"X\",:,:] = repeated(repeated(x_min))\nvars_ub[\"X\",:,:] = repeated(repeated(x_max))\n\n# Set controls and its bounds\nvars_init[\"U\",:] = repeated(u_init)\nvars_lb[\"U\",:] = repeated(u_min)\nvars_ub[\"U\",:] = repeated(u_max)\n\n# State at initial time\nvars_lb[\"X\",0,0] = xi_min\nvars_ub[\"X\",0,0] = xi_max\n\n# State at end time\nvars_lb[\"X\",-1,0] = xf_min\nvars_ub[\"X\",-1,0] = xf_max\n\n# Constraint function for the NLP\ng = []\nlbg = []\nubg = []\n\n# For all finite elements\nfor k in range(nk):\n\n # For all collocation points\n for j in range(1,d+1):\n\n # Get an expression for the state derivative at the collocation point\n xp_jk = 0\n for r in range (d+1):\n xp_jk += C[r,j]*V[\"X\",k,r]\n\n # Add collocation equations to the NLP\n fk = f(T[k][j], V[\"X\",k,j], V[\"U\",k])\n g.append(h*fk - xp_jk)\n lbg.append(NP.zeros(nx)) # equality constraints\n ubg.append(NP.zeros(nx)) # equality constraints\n\n # Get an expression for the state at the end of the finite element\n xf_k = 0\n for r in range(d+1):\n xf_k += D[r]*V[\"X\",k,r]\n\n # Add continuity equation to NLP\n g.append(V[\"X\",k+1,0] - xf_k)\n lbg.append(NP.zeros(nx))\n ubg.append(NP.zeros(nx))\n\n# Concatenate constraints\ng = vertcat(*g)\n\n# Objective function\nf = m(T[nk-1][d],V[\"X\",nk,0],V[\"U\",nk-1])\n\n# NLP\nnlp = {'x':V, 'f':f, 'g':g}\n\n## ----\n## SOLVE THE NLP\n## ----\n\n# Set options\nopts = {}\nopts[\"expand\"] = True\n#opts[\"ipopt.max_iter\"] = 4\nopts[\"ipopt.linear_solver\"] = 'ma27'\n\n# Allocate an NLP solver\nsolver = nlpsol(\"solver\", \"ipopt\", nlp, opts)\narg = {}\n\n# Initial condition\narg[\"x0\"] = vars_init\n\n# Bounds on x\narg[\"lbx\"] = vars_lb\narg[\"ubx\"] = vars_ub\n\n# Bounds on g\narg[\"lbg\"] = NP.concatenate(lbg)\narg[\"ubg\"] = NP.concatenate(ubg)\n\n# Solve the problem\nres = solver(**arg)\n\n# Print the optimal cost\nprint(\"optimal cost: \", float(res[\"f\"]))\n\n# Retrieve the solution\nopt = V(res[\"x\"])\n\n# Get values at the beginning of each finite element\nx0_opt = opt[\"X\",:,0,\"x\",0]\nx1_opt = opt[\"X\",:,0,\"x\",1]\nx2_opt = opt[\"X\",:,0,\"L\"]\n\nu_opt = opt[\"U\",:,0]\n\ntgrid = NP.linspace(0,tf,nk+1)\ntgrid_u = NP.linspace(0,tf,nk)\n\n# Plot the results\nplt.figure(1)\nplt.clf()\nplt.plot(tgrid,x0_opt,'--')\nplt.plot(tgrid,x1_opt,'-.')\nplt.step(tgrid_u,u_opt,'-')\nplt.title(\"Van der Pol optimization\")\nplt.xlabel('time')\nplt.legend(['x[0] trajectory','x[1] trajectory','u trajectory'])\nplt.grid()\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.title",
"matplotlib.pyplot.step",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"numpy.linspace",
"matplotlib.pyplot.xlabel"
]
] |
ghiffaryr/grplot | [
"43ea08febac4ffecbce0a6a3d679850f5013aa28"
] | [
"grplot/features/plot/treemaps.py"
] | [
"# Squarified Treemap Layout\n# Implements algorithm from Bruls, Huizing, van Wijk, \"Squarified Treemaps\" and Laserson with some modifications\n# (but not using their pseudocode)\n\n\n# INTERNAL FUNCTIONS not meant to be used by the user\n\n\ndef pad_rectangle(rect):\n if rect[\"dx\"] > 2:\n rect[\"x\"] += 1\n rect[\"dx\"] -= 2\n if rect[\"dy\"] > 2:\n rect[\"y\"] += 1\n rect[\"dy\"] -= 2\n\n\ndef layoutrow(sizes, x, y, dx, dy):\n # generate rects for each size in sizes\n # dx >= dy\n # they will fill up height dy, and width will be determined by their area\n # sizes should be pre-normalized wrt dx * dy (i.e., they should be same units)\n covered_area = sum(sizes)\n width = covered_area / dy\n rects = []\n for size in sizes:\n rects.append({\"x\": x, \"y\": y, \"dx\": width, \"dy\": size / width})\n y += size / width\n return rects\n\n\ndef layoutcol(sizes, x, y, dx, dy):\n # generate rects for each size in sizes\n # dx < dy\n # they will fill up width dx, and height will be determined by their area\n # sizes should be pre-normalized wrt dx * dy (i.e., they should be same units)\n covered_area = sum(sizes)\n height = covered_area / dx\n rects = []\n for size in sizes:\n rects.append({\"x\": x, \"y\": y, \"dx\": size / height, \"dy\": height})\n x += size / height\n return rects\n\n\ndef layout(sizes, x, y, dx, dy):\n return (\n layoutrow(sizes, x, y, dx, dy) if dx >= dy else layoutcol(sizes, x, y, dx, dy)\n )\n\n\ndef leftoverrow(sizes, x, y, dx, dy):\n # compute remaining area when dx >= dy\n covered_area = sum(sizes)\n width = covered_area / dy\n leftover_x = x + width\n leftover_y = y\n leftover_dx = dx - width\n leftover_dy = dy\n return (leftover_x, leftover_y, leftover_dx, leftover_dy)\n\n\ndef leftovercol(sizes, x, y, dx, dy):\n # compute remaining area when dx >= dy\n covered_area = sum(sizes)\n height = covered_area / dx\n leftover_x = x\n leftover_y = y + height\n leftover_dx = dx\n leftover_dy = dy - height\n return (leftover_x, leftover_y, leftover_dx, leftover_dy)\n\n\ndef leftover(sizes, x, y, dx, dy):\n return (\n leftoverrow(sizes, x, y, dx, dy)\n if dx >= dy\n else leftovercol(sizes, x, y, dx, dy)\n )\n\n\ndef worst_ratio(sizes, x, y, dx, dy):\n return max(\n [\n max(rect[\"dx\"] / rect[\"dy\"], rect[\"dy\"] / rect[\"dx\"])\n for rect in layout(sizes, x, y, dx, dy)\n ]\n )\n\n\n# PUBLIC API\n\n\ndef squarify(sizes, x, y, dx, dy):\n \"\"\"Compute treemap rectangles.\n Given a set of values, computes a treemap layout in the specified geometry\n using an algorithm based on Bruls, Huizing, van Wijk, \"Squarified Treemaps\".\n See README for example usage.\n Parameters\n ----------\n sizes : list-like of numeric values\n The set of values to compute a treemap for. `sizes` must be positive\n values sorted in descending order and they should be normalized to the\n total area (i.e., `dx * dy == sum(sizes)`)\n x, y : numeric\n The coordinates of the \"origin\".\n dx, dy : numeric\n The full width (`dx`) and height (`dy`) of the treemap.\n Returns\n -------\n list[dict]\n Each dict in the returned list represents a single rectangle in the\n treemap. The order corresponds to the input order.\n \"\"\"\n sizes = list(map(float, sizes))\n\n if len(sizes) == 0:\n return []\n\n if len(sizes) == 1:\n return layout(sizes, x, y, dx, dy)\n\n # figure out where 'split' should be\n i = 1\n while i < len(sizes) and worst_ratio(sizes[:i], x, y, dx, dy) >= worst_ratio(\n sizes[: (i + 1)], x, y, dx, dy\n ):\n i += 1\n current = sizes[:i]\n remaining = sizes[i:]\n\n (leftover_x, leftover_y, leftover_dx, leftover_dy) = leftover(current, x, y, dx, dy)\n return layout(current, x, y, dx, dy) + squarify(\n remaining, leftover_x, leftover_y, leftover_dx, leftover_dy\n )\n\n\ndef padded_squarify(sizes, x, y, dx, dy):\n \"\"\"Compute padded treemap rectangles.\n See `squarify` docstring for details. The only difference is that the\n returned rectangles have been \"padded\" to allow for a visible border.\n \"\"\"\n rects = squarify(sizes, x, y, dx, dy)\n for rect in rects:\n pad_rectangle(rect)\n return rects\n\n\ndef normalize_sizes(sizes, dx, dy):\n \"\"\"Normalize list of values.\n Normalizes a list of numeric values so that `sum(sizes) == dx * dy`.\n Parameters\n ----------\n sizes : list-like of numeric values\n Input list of numeric values to normalize.\n dx, dy : numeric\n The dimensions of the full rectangle to normalize total values to.\n Returns\n -------\n list[numeric]\n The normalized values.\n \"\"\"\n total_size = sum(sizes)\n total_area = dx * dy\n sizes = map(float, sizes)\n sizes = map(lambda size: size * total_area / total_size, sizes)\n return list(sizes)\n\n\ndef plot(\n sizes,\n norm_x=100,\n norm_y=100,\n color=None,\n label=None,\n value=None,\n ax=None,\n pad=False,\n bar_kwargs=None,\n text_kwargs=None,\n **kwargs\n):\n \"\"\"Plotting with Matplotlib.\n Parameters\n ----------\n sizes\n input for squarify\n norm_x, norm_y\n x and y values for normalization\n color\n color string or list-like (see Matplotlib documentation for details)\n label\n list-like used as label text\n value\n list-like used as value text (in most cases identical with sizes argument)\n ax\n Matplotlib Axes instance\n pad\n draw rectangles with a small gap between them\n bar_kwargs : dict\n keyword arguments passed to matplotlib.Axes.bar\n text_kwargs : dict\n keyword arguments passed to matplotlib.Axes.text\n **kwargs\n Any additional kwargs are merged into `bar_kwargs`. Explicitly provided\n kwargs here will take precedence.\n Returns\n -------\n matplotlib.axes.Axes\n Matplotlib Axes\n \"\"\"\n\n import matplotlib.pyplot as plt\n from math import ceil\n\n if ax is None:\n ax = plt.gca()\n\n if color is None:\n color_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']\n if len(sizes) > len(color_cycle):\n color_cycle = color_cycle * ceil(len(color_cycle)/len(sizes))\n color = color_cycle[:len(sizes)]\n\n if bar_kwargs is None:\n bar_kwargs = {}\n if text_kwargs is None:\n text_kwargs = {}\n if len(kwargs) > 0:\n bar_kwargs.update(kwargs)\n\n normed = normalize_sizes(sizes, norm_x, norm_y)\n\n if pad:\n rects = padded_squarify(normed, 0, 0, norm_x, norm_y)\n else:\n rects = squarify(normed, 0, 0, norm_x, norm_y)\n\n x = [rect[\"x\"] for rect in rects]\n y = [rect[\"y\"] for rect in rects]\n dx = [rect[\"dx\"] for rect in rects]\n dy = [rect[\"dy\"] for rect in rects]\n\n ax.bar(\n x, dy, width=dx, bottom=y, color=color, label=label, align=\"edge\", **bar_kwargs\n )\n\n if value is not None:\n va = \"center\" if label is None else \"top\"\n\n for v, r in zip(value, rects):\n x, y, dx, dy = r[\"x\"], r[\"y\"], r[\"dx\"], r[\"dy\"]\n ax.text(x + dx / 2, y + dy / 2, v, va=va, ha=\"center\", **text_kwargs)\n\n if label is not None:\n va = \"center\" if value is None else \"bottom\"\n for l, r in zip(label, rects):\n x, y, dx, dy = r[\"x\"], r[\"y\"], r[\"dx\"], r[\"dy\"]\n ax.text(x + dx / 2, y + dy / 2, l, va=va, ha=\"center\", **text_kwargs)\n\n ax.set_xlim(0, norm_x)\n ax.set_ylim(0, norm_y)\n ax.axis('off')\n\n return ax"
] | [
[
"matplotlib.pyplot.gca"
]
] |
bkktimber/gluon-nlp | [
"205acce13a83b30eabd7a638e4773e7a4f91059a"
] | [
"tests/unittest/test_sampler.py"
] | [
"import pytest\nimport numpy as np\nfrom mxnet.gluon import data\nimport gluonnlp as nlp\nfrom gluonnlp.data import sampler as s\n\n\nN = 1000\ndef test_sorted_sampler():\n dataset = data.SimpleDataset([np.random.normal(0, 1, (np.random.randint(10, 100), 1, 1))\n for _ in range(N)])\n gt_sample_id = sorted(range(len(dataset)), key=lambda i: dataset[i].shape, reverse=True)\n sample_ret = list(s.SortedSampler([ele.shape[0] for ele in dataset]))\n for lhs, rhs in zip(gt_sample_id, sample_ret):\n assert lhs == rhs\n\[email protected]('seq_lengths', [[np.random.randint(10, 100) for _ in range(N)],\n [(np.random.randint(10, 100), np.random.randint(10, 100))\n for _ in range(N)]])\[email protected]('ratio', [0.0, 0.5])\[email protected]('shuffle', [False, True])\[email protected]('num_buckets', [1, 10, 100, 5000])\[email protected]('bucket_scheme', [s.ConstWidthBucket(),\n s.LinearWidthBucket(),\n s.ExpWidthBucket()])\[email protected]('use_average_length', [False, True])\[email protected]('num_shards', range(4))\ndef test_fixed_bucket_sampler(seq_lengths, ratio, shuffle, num_buckets, bucket_scheme,\n use_average_length, num_shards):\n sampler = s.FixedBucketSampler(seq_lengths,\n batch_size=8,\n num_buckets=num_buckets,\n ratio=ratio, shuffle=shuffle,\n use_average_length=use_average_length,\n bucket_scheme=bucket_scheme,\n num_shards=num_shards)\n print(sampler.stats())\n total_sampled_ids = []\n for batch_sample_ids in sampler:\n if num_shards > 0:\n assert len(batch_sample_ids) == num_shards\n else:\n total_sampled_ids.extend(batch_sample_ids)\n if num_shards == 0:\n assert len(set(total_sampled_ids)) == len(total_sampled_ids) == N\n\[email protected]('bucket_keys', [[1, 5, 10, 100], [10, 100], [200]])\[email protected]('ratio', [0.0, 0.5])\[email protected]('shuffle', [False, True])\ndef test_fixed_bucket_sampler_with_single_key(bucket_keys, ratio, shuffle):\n seq_lengths = [np.random.randint(10, 100) for _ in range(N)]\n sampler = s.FixedBucketSampler(seq_lengths, batch_size=8, num_buckets=None,\n bucket_keys=bucket_keys, ratio=ratio, shuffle=shuffle)\n print(sampler.stats())\n total_sampled_ids = []\n for batch_sample_ids in sampler:\n total_sampled_ids.extend(batch_sample_ids)\n assert len(set(total_sampled_ids)) == len(total_sampled_ids) == N\n\[email protected]('bucket_keys', [[(1, 1), (5, 10), (10, 20), (20, 10), (100, 100)],\n [(20, 20), (30, 15), (100, 100)],\n [(100, 200)]])\[email protected]('ratio', [0.0, 0.5])\[email protected]('shuffle', [False, True])\ndef test_fixed_bucket_sampler_with_single_key(bucket_keys, ratio, shuffle):\n seq_lengths = [(np.random.randint(10, 100), np.random.randint(10, 100)) for _ in range(N)]\n sampler = s.FixedBucketSampler(seq_lengths, batch_size=8, num_buckets=None,\n bucket_keys=bucket_keys, ratio=ratio, shuffle=shuffle)\n print(sampler.stats())\n total_sampled_ids = []\n for batch_sample_ids in sampler:\n total_sampled_ids.extend(batch_sample_ids)\n assert len(set(total_sampled_ids)) == len(total_sampled_ids) == N\n\n\ndef test_fixed_bucket_sampler_compactness():\n samples = list(\n s.FixedBucketSampler(\n np.arange(16, 32), 8, num_buckets=2,\n bucket_scheme=nlp.data.ConstWidthBucket()))\n assert len(samples) == 2\n\n\[email protected]('seq_lengths', [[np.random.randint(10, 100) for _ in range(N)],\n [(np.random.randint(10, 100), np.random.randint(10, 100))\n for _ in range(N)]])\[email protected]('mult', [10, 100])\[email protected]('batch_size', [5, 7])\[email protected]('shuffle', [False, True])\ndef test_sorted_bucket_sampler(seq_lengths, mult, batch_size, shuffle):\n sampler = s.SortedBucketSampler(sort_keys=seq_lengths,\n batch_size=batch_size,\n mult=mult, shuffle=shuffle)\n total_sampled_ids = []\n for batch_sample_ids in sampler:\n total_sampled_ids.extend(batch_sample_ids)\n assert len(set(total_sampled_ids)) == len(total_sampled_ids) == N\n"
] | [
[
"numpy.arange",
"numpy.random.randint"
]
] |
jun2tong/bnp-anomaly | [
"c7fa106b5bb29ed6688a3d91e3f302a0a130b896"
] | [
"tests/endtoend/TestRealRandomGroupXData.py"
] | [
"import numpy as np\nimport unittest\nfrom collections import OrderedDict\n\nimport bnpy\nfrom AbstractEndToEndTest import AbstractEndToEndTest\n\n\nclass TestEndToEnd(AbstractEndToEndTest):\n __test__ = True\n\n def setUp(self):\n \"\"\" Create the dataset\n \"\"\"\n rng = np.random.RandomState(0)\n X = rng.rand(100, 2)\n doc_range = [0, 20, 40, 50, 100]\n self.Data = bnpy.data.GroupXData(X=X, doc_range=doc_range)\n\n self.possibleAllocModelNames = [\"FiniteMixtureModel\",\n \"FiniteTopicModel\",\n \"HDPTopicModel\",\n ]\n self.possibleObsModelNames = [\"Gauss\",\n \"DiagGauss\",\n \"ZeroMeanGauss\",\n ]\n self.possibleInitNames = [\"randexamples\",\n \"randexamplesbydist\",\n ]\n\n self.possibleLearnAlgsForAllocModel = dict(\n FiniteMixtureModel=[\"EM\", \"VB\", \"soVB\", \"moVB\"],\n FiniteTopicModel=[\"VB\", \"soVB\", \"moVB\"],\n HDPTopicModel=[\"VB\", \"soVB\", \"moVB\"],\n )\n\n def nextAllocKwArgsForVB(self):\n for aName in self.possibleAllocModelNames:\n kwargs = OrderedDict()\n kwargs['name'] = aName\n if aName == 'FiniteMixtureModel':\n for gamma in [0.1, 1.0, 9.9]:\n kwargs['gamma'] = gamma\n yield kwargs\n elif aName == 'DPMixtureModel':\n for gamma0 in [1.0, 9.9]:\n kwargs['gamma0'] = gamma0\n yield kwargs\n elif aName == 'FiniteTopicModel':\n for alpha in [0.1, 0.5, 22]:\n kwargs['alpha'] = alpha\n yield kwargs\n elif aName == 'HDPTopicModel':\n for alpha in [0.1, 0.5]:\n for gamma in [1.0, 5.0]:\n kwargs['gamma'] = gamma\n yield kwargs\n\n def nextObsKwArgsForVB(self, aName):\n for oName in self.possibleObsModelNames:\n for sF in [0.5, 1.0, 5.0]:\n for ECovMat in ['eye', 'covdata']:\n kwargs = OrderedDict()\n kwargs['name'] = oName\n kwargs['ECovMat'] = ECovMat\n kwargs['sF'] = sF\n yield kwargs\n\n def nextInitKwArgs(self, aName, oName):\n for iName in self.possibleInitNames:\n for K in [5, 10]:\n kwargs = OrderedDict()\n kwargs['initname'] = iName\n kwargs['K'] = K\n yield kwargs\n"
] | [
[
"numpy.random.RandomState"
]
] |
BBN-E/nlplingo | [
"32ff17b1320937faa3d3ebe727032f4b3e7a353d"
] | [
"nlplingo/nn/extractor.py"
] | [
"import codecs\nimport json\nimport os\n\nimport numpy as np\nfrom nlplingo.nn.sequence_model import SequenceXLMRBase, SequenceXLMRCustom\nfrom nlplingo.nn.spanpair_model import SpanPairModelEmbedded\nfrom nlplingo.tasks.entitycoref.feature import EntityCorefFeatureGenerator\nfrom nlplingo.tasks.entitycoref.generator import EntityCorefExampleGenerator\nfrom nlplingo.tasks.eventcoref.feature import EventCorefFeatureGenerator\nfrom nlplingo.tasks.eventcoref.generator import EventCorefExampleGenerator\nfrom nlplingo.tasks.eventpair.feature import EventPairFeatureGenerator\nfrom nlplingo.tasks.eventpair.generator import EventPairExampleGenerator\nfrom nlplingo.tasks.eventframe.feature import EventFramePairFeatureGenerator\nfrom nlplingo.tasks.eventframe.generator import EventFramePairExampleGenerator\n\nfrom keras.models import load_model as keras_load_model\nfrom keras.models import Model as KerasModel\n\nfrom nlplingo.tasks.eventargument.feature import EventArgumentFeatureGenerator\nfrom nlplingo.tasks.eventargument.generator import EventArgumentExampleGenerator\nfrom nlplingo.tasks.eventrelation.feature import EventEventRelationFeatureGenerator\nfrom nlplingo.tasks.eventrelation.generator import EventEventRelationExampleGenerator\nfrom nlplingo.tasks.entityrelation.feature import EntityRelationFeatureGenerator\nfrom nlplingo.tasks.entityrelation.generator import EntityRelationExampleGenerator\n\nfrom nlplingo.tasks.event_domain import EventDomain\nfrom nlplingo.tasks.eventtrigger.feature import EventTriggerFeatureGenerator\nfrom nlplingo.tasks.eventtrigger.generator import EventTriggerExampleGenerator\n\nfrom nlplingo.nn.argument_model import CNNArgumentModel\nfrom nlplingo.nn.argument_model import GNNArgumentModel\nfrom nlplingo.nn.argument_model import MultiLayerArgumentModelEmbedded, WithinSentenceArgumentModel\nfrom nlplingo.nn.extraction_model import ExtractionModel\nfrom nlplingo.nn.keras_models.common import keras_custom_objects\nfrom nlplingo.nn.trigger_model import CNNTriggerModel\nfrom nlplingo.nn.trigger_model import MultiLayerTriggerModelEmbedded\nfrom nlplingo.nn.eventpair_model import EventPairModelEmbeddedTrigger\nfrom nlplingo.nn.event_event_relation_model import MultiLayerEventEventRelationModel, WithinSentenceEER, EventEventRelationStackedOpenNREModel, WithinSentenceEERGeneral\nfrom nlplingo.nn.entity_entity_relation_model import MultiLayerEntityRelationModelEmbedded, WithinSentenceEntityRelationModel\nfrom nlplingo.nn.eventframepair_model import EventFramePairModelEmbedded\n\nfrom nlplingo.nn.hyperparameters import HyperParameters\nfrom nlplingo.tasks.common.feature.feature_setting import FeatureSetting\n\nimport logging\n\nfrom nlplingo.tasks.sequence.feature import SequenceFeatureGenerator\nfrom nlplingo.tasks.sequence.generator import SequenceExampleGenerator\n\nlogger = logging.getLogger(__name__)\n\n\nclass Extractor(object):\n trigger_model_table = {\n 'event-trigger_cnn': CNNTriggerModel,\n 'event-trigger_cnn-embedded': CNNTriggerModel,\n 'event-trigger_multilayer-embedded': MultiLayerTriggerModelEmbedded,\n 'event-trigger_sentence-embedded': MultiLayerTriggerModelEmbedded,\n }\n\n argument_model_table = {\n 'event-argument_cnn': CNNArgumentModel,\n 'event-argument_cnn-embedded': CNNArgumentModel,\n 'event-argument_gnn': GNNArgumentModel,\n 'event-argument_multilayer-embedded': MultiLayerArgumentModelEmbedded,\n 'event-argument_bert-mention': WithinSentenceArgumentModel\n }\n\n eer_model_table = {\n 'event-event-relation_multilayer': MultiLayerEventEventRelationModel,\n 'event-event-relation_multilayer-embedded': MultiLayerEventEventRelationModel,\n 'event-event-relation_two_models_with_postprocessing': EventEventRelationStackedOpenNREModel,\n 'event-event-relation_cnn-embedded': WithinSentenceEER, # This exists for legacy reasons\n 'event-event-relation_within-sentence': WithinSentenceEER,\n 'event-event-relation_general_decode-embedded': WithinSentenceEERGeneral\n }\n\n entity_relation_model_table = {\n 'entity-entity-relation_multilayer-embedded': MultiLayerEntityRelationModelEmbedded,\n 'entity-entity-relation_bert-mention': WithinSentenceEntityRelationModel\n }\n\n eventpair_model_table = {\n 'event-pair_embedded': SpanPairModelEmbedded,\n 'event-pair_embedded_trigger': EventPairModelEmbeddedTrigger\n }\n\n eventframepair_model_table = {\n 'event-framepair_embedded': EventFramePairModelEmbedded\n }\n\n entitycoref_model_table = {\n 'entitycoref_embedded': SpanPairModelEmbedded\n }\n\n eventcoref_model_table = {\n 'eventcoref_embedded': SpanPairModelEmbedded\n }\n\n sequence_model_table = {\n 'sequence_xlmr-base': SequenceXLMRBase,\n 'sequence_xlmr-custom': SequenceXLMRCustom\n }\n\n\n def __init__(self, params, extractor_params, embeddings, load_from_file=False):\n \"\"\"\n :type params: dict # general parameters\n :type extractor_params: dict # specific to this extractor\n :type embeddings: dict[str : nlplingo.embeddings.word_embeddings.WordEmbedding]\n \"\"\"\n\n self.extractor_params = extractor_params\n self.extractor_name = extractor_params.get('extractor_name', None)\n\n self.task = extractor_params.get('task', None)\n self.engine = extractor_params.get('engine', None)\n\n self.model_type = extractor_params['model_type']\n \"\"\":type: str\"\"\"\n\n self.domain = EventDomain.read_domain_ontology_file(extractor_params['domain_ontology'],\n domain_name=extractor_params.get('domain_name', 'general'))\n \"\"\":type: nlplingo.tasks.event_domain.EventDomain\"\"\"\n self.domain.build_prior(extractor_params.get('ontology_yaml'))\n\n self.model_file = extractor_params['model_file']\n \"\"\":type: str\"\"\"\n\n self.class_thresholds_path = extractor_params.get('class_thresholds')\n \"\"\":type: str\"\"\"\n\n self.class_thresholds_global = float(\n extractor_params.get('class_thresholds_global', -1.0))\n \"\"\":type: float\"\"\"\n\n self.use_trigger_safelist = extractor_params.get('trigger.use_safelist', False)\n\n if 'engine' not in extractor_params or (('engine' in extractor_params) and (extractor_params['engine'] == 'keras')):\n self.hyper_parameters = HyperParameters(extractor_params['hyper-parameters'], load_from_file)\n elif extractor_params['engine'] == 'pytorch':\n self.hyper_parameters = HyperParameters(extractor_params['hyper-parameters'], load_from_file)\n # elif extractor_params['engine'] == 'transformers':\n # pass\n else:\n raise RuntimeError('Extractor model type: {} not implemented.'.format(self.model_type))\n\n \"\"\":type: nlplingo.nn.extractor.HyperParameters\"\"\"\n\n self.feature_setting = FeatureSetting(self.extractor_params['features'])\n\n self.extraction_model = None\n if self.model_type in self.trigger_model_table:\n self.extraction_model = self.trigger_model_table[self.model_type](params, extractor_params, self.domain, embeddings,\n self.hyper_parameters,\n self.feature_setting)\n elif self.model_type in self.argument_model_table:\n self.extraction_model = self.argument_model_table[self.model_type](params, extractor_params, self.domain,\n embeddings, self.hyper_parameters,\n self.feature_setting)\n elif self.model_type in self.eventpair_model_table:\n self.extraction_model = self.eventpair_model_table[self.model_type](params, extractor_params, self.domain, embeddings,\n self.hyper_parameters,\n self.feature_setting) # TODO: fix this model\n elif self.model_type in self.eer_model_table:\n self.extraction_model = self.eer_model_table[self.model_type](params, extractor_params, self.domain, embeddings,\n self.hyper_parameters,\n self.feature_setting)\n elif self.model_type in self.entity_relation_model_table:\n self.extraction_model = self.entity_relation_model_table[self.model_type](params, extractor_params, self.domain, embeddings,\n self.hyper_parameters,\n self.feature_setting)\n elif self.model_type in self.eventframepair_model_table:\n self.extraction_model = self.eventframepair_model_table[self.model_type](params, extractor_params, self.domain, embeddings,\n self.hyper_parameters,\n self.feature_setting) # TODO: fix this model\n elif self.model_type in self.entitycoref_model_table:\n self.extraction_model = self.entitycoref_model_table[self.model_type](params, extractor_params, self.domain, embeddings,\n self.hyper_parameters,\n self.feature_setting)\n elif self.model_type in self.eventcoref_model_table:\n self.extraction_model = self.eventcoref_model_table[self.model_type](params, extractor_params, self.domain, embeddings,\n self.hyper_parameters,\n self.feature_setting)\n elif self.model_type in self.sequence_model_table:\n if self.task == 'event-trigger':\n self.domain.create_sequence_types(self.domain.event_types)\n elif self.task == 'event-argument':\n self.domain.create_sequence_types(self.domain.event_roles)\n elif self.task == 'ner':\n self.domain.create_sequence_types(self.domain.entity_types)\n\n self.extraction_model = self.sequence_model_table[self.model_type](params, extractor_params, self.domain, embeddings,\n self.hyper_parameters, self.feature_setting)\n elif self.model_type.startswith('oregon'): # TODO hack, until YS has time to properly integrate after BETTER eval\n pass\n else:\n raise RuntimeError('Extractor model type: {} not implemented.'.format(self.model_type))\n\n \"\"\":type: nlplingo.nn.event_model.ExtractionModel\"\"\"\n # TODO: extend this to support EventEventRelation models\n if load_from_file:\n logging.info('Loading previously trained model')\n\n if extractor_params.get('engine', None) == 'keras':\n self.load_keras()\n if extractor_params.get('engine', None) is None: # TODO use framework\n self.load_keras()\n elif extractor_params['engine'] == 'pytorch':\n pass\n # elif extractor_params['engine'] == 'transformers':\n # pass\n else:\n raise Exception(\n 'Only Keras or PyTorch engines are supported.')\n\n #if ('engine' in extractor_params) and (extractor_params['engine'] == 'pytorch'):\n # if load_from_file or self.extraction_model.hyper_params.load:\n # pass\n \"\"\"\n self.extraction_model.hyper_params.num_class = self.extraction_model.num_output\n if self.extraction_model.word_embeddings is not None:\n trainer = self.extraction_model.model(self.extraction_model.extractor_params, self.extraction_model.hyper_params.dict, self.extraction_model.optimizer,\n feature_names=self.extraction_model.features.feature_strings, emb_matrix=self.extraction_model.word_embeddings)\n else: # frozen, external embedding case\n if self.extraction_model.embeddings_vector_size is not None:\n self.extraction_model.hyper_params.dict['emb_dim'] = self.extraction_model.embeddings_vector_size\n trainer = self.extraction_model.model(self.extraction_model.extractor_params, self.extraction_model.hyper_params.dict, self.extraction_model.optimizer,\n feature_names=self.extraction_model.features.feature_strings)\n\n if self.model_file:\n trainer.load(self.model_file)\n self.extraction_model.trained_model = trainer\n \"\"\"\n\n self.feature_generator = None # feature generator\n self.example_generator = None # example generator\n\n # TODO this should really be renamed as task instead of model_type\n if self.model_type.startswith('event-trigger_'):\n self.feature_generator = EventTriggerFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting, self.domain)\n self.example_generator = EventTriggerExampleGenerator(self.domain, params, extractor_params,\n self.hyper_parameters)\n elif self.model_type.startswith('event-argument_'):\n self.feature_generator = EventArgumentFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)\n self.example_generator = EventArgumentExampleGenerator(self.domain, params, extractor_params,\n self.hyper_parameters)\n elif self.model_type.startswith('event-pair_'):\n self.feature_generator = EventPairFeatureGenerator(extractor_params)\n self.example_generator = EventPairExampleGenerator(self.domain, params, extractor_params,\n self.hyper_parameters)\n elif self.model_type.startswith('event-event-relation_'):\n self.feature_generator = EventEventRelationFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)\n self.example_generator = EventEventRelationExampleGenerator(self.domain, params, extractor_params,\n self.hyper_parameters)\n elif self.model_type.startswith('entity-entity-relation_'):\n self.feature_generator = EntityRelationFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)\n self.example_generator = EntityRelationExampleGenerator(self.domain, params, extractor_params,\n self.hyper_parameters)\n elif self.model_type.startswith('event-framepair_'):\n self.feature_generator = EventFramePairFeatureGenerator(extractor_params)\n self.example_generator = EventFramePairExampleGenerator(self.domain, params, extractor_params,\n self.hyper_parameters)\n elif self.model_type.startswith('entitycoref_'):\n self.feature_generator = EntityCorefFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)\n self.example_generator = EntityCorefExampleGenerator(self.domain, params, extractor_params,\n self.hyper_parameters)\n elif self.model_type.startswith('eventcoref_'):\n self.feature_generator = EventCorefFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting)\n self.example_generator = EventCorefExampleGenerator(self.domain, params, extractor_params,\n self.hyper_parameters)\n elif self.model_type.startswith('oregon'): # TODO hack, until YS has time to properly integrate after BETTER eval\n pass\n elif self.model_type.startswith('sequence_'):\n self.feature_generator = SequenceFeatureGenerator(extractor_params, self.hyper_parameters, self.feature_setting, self.extraction_model.tokenizer, self.domain)\n self.example_generator = SequenceExampleGenerator(self.domain, params, extractor_params,\n self.hyper_parameters)\n else:\n raise RuntimeError('Extractor model type: {} not implemented.'.format(self.model_type))\n\n self.extraction_model_last_layer = None\n \"\"\":type: keras.models.Model\"\"\"\n self.emit_vectors = extractor_params.get('output_vectors', False)\n\n self.class_thresholds = None\n # load saved thresholds from file\n self._build_threshold_vector()\n\n # use a global threshold value if they were not loaded\n if self.class_thresholds is None:\n logging.info('Using global threshold override for {}'.format(\n self.extractor_name))\n\n # use defaults, if no global override given in extractor parameters\n if self.class_thresholds_global < 0.0:\n logging.info('Using default thresholds for {}'.format(\n self.extractor_name))\n self.class_thresholds_global = 0.5\n number_of_classes = len(self.domain.event_types.keys())\n\n logging.info('- global threshold ={}'.format(self.class_thresholds_global))\n self.class_thresholds = np.asarray(\n [self.class_thresholds_global] * number_of_classes)\n\n def _build_threshold_vector(self):\n path = self.class_thresholds_path\n if path is not None and os.path.isfile(str(path)):\n if path.endswith('.npz'):\n self.class_thresholds = np.load(str(path))['thresholds']\n print('Loaded saved thresholds from NPZ for {}'.format(\n self.extractor_name))\n elif path.endswith('.json'):\n number_of_classes = len(self.domain.event_types.keys())\n self.class_thresholds = np.asarray([0.5] * number_of_classes)\n with codecs.open(path, 'r', encoding='utf8') as fh:\n thresholds_json = json.load(fh)\n for label, threshold in thresholds_json.items():\n try:\n index = self.domain.get_event_type_index(label)\n self.class_thresholds[index] = float(threshold)\n except ValueError as e:\n print('The following error occurred while loading '\n 'thresholds from json and will be ignored:\\n'\n '{}'.format(e))\n print('Loaded saved thresholds from JSON for {}'.format(\n self.extractor_name))\n\n def make_last_layer_model(self):\n\n if self.extraction_model_last_layer is not None:\n print(\"Last layer of model has already been built\")\n return\n\n keras_model = self.extraction_model\n if type(keras_model) is not KerasModel:\n keras_model = keras_model.model\n \"\"\":type: keras.models.Model\"\"\"\n print(\"Original model:\")\n try:\n print(keras_model.summary())\n except TypeError:\n print(\"Keras encountered an error when trying to print the model \"\n \"summary. Skipping this printout...\")\n\n self.extraction_model_last_layer = KerasModel(\n inputs=keras_model.input,\n outputs=keras_model.layers[-2].output)\n\n print(\"Copy model:\")\n try:\n print(self.extraction_model_last_layer.summary())\n except TypeError:\n print(\"Keras encountered an error when trying to print the copy's \"\n \"summary. Skipping this printout...\")\n\n def get_embeddings(self, examples, data_list):\n ret = []\n self.make_last_layer_model()\n vectors = self.extraction_model_last_layer.predict(data_list)\n for i, ex in enumerate(examples):\n output_vector = vectors[i, :]\n ret.append(output_vector)\n return ret\n\n def load_keras(self):\n try:\n trained_keras_model = keras_load_model(self.model_file)\n except ValueError:\n custom_objects = keras_custom_objects\n trained_keras_model = keras_load_model(self.model_file, custom_objects)\n\n weights = trained_keras_model.get_weights()\n new_weights = []\n for i, w in enumerate(weights):\n pretrained = self.extraction_model.layers.pretrained_embeddings\n using_pretrained = pretrained is not None\n if using_pretrained and i > 1 and w.shape[0] == pretrained.shape[0]:\n # TODO retrain models to avoid this hack\n pass\n else:\n new_weights.append(w)\n weights = new_weights\n # for i, w in enumerate(weights):\n # print(i, w.shape\n\n\n self.extraction_model.model.set_weights(weights)\n\n"
] | [
[
"numpy.asarray"
]
] |
jacobbieker/NUR_Handin2 | [
"6e620b23191edaec4452d29eac90ec37ced0c038"
] | [
"one.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom one_a import one_a\nfrom one_b import one_b\nfrom one_c import one_c\nfrom one_d import one_d\nfrom one_e import one_e\n\n\ndef random_generator(seed, m=2 ** 64 - 1, a=2349543, c=913842, a1=21, a2=35, a3=4, a4=4294957665):\n \"\"\"\n Generates psuedorandom numbers with a combination of (M)LCC, 64 bit shift, and MWC\n :param seed: Seed to use\n :param m: Determines period of the MLCC\n :param a: For the MLCC\n :param c: For the MLCC\n :param a1: For the first bit shift\n :param a2: For the second bit shift\n :param a3: For the third bit shift\n :param a4: For the MWC\n :return:\n \"\"\"\n\n # First linear congruential generator\n # While true, so the generator never stops making new numbers\n # This is used to make sure teh XOR shift is 64 bit\n bit_64 = 0xffffffffffffffff\n while True:\n # This is MLCC part\n generated_number = (a * seed + c) % m\n # Now bit shift\n generated_number = generated_number ^ (generated_number >> a1) & bit_64\n generated_number = generated_number ^ (generated_number << a2) & bit_64\n generated_number = generated_number ^ (generated_number >> a3) & bit_64\n\n # Now MWC part\n mwc_out = a4 * (generated_number & (2 ** 32 - 1)) + (generated_number >> 32)\n\n seed = mwc_out # set the seed to a new number, so a different number generated next time\n mwc_out = mwc_out / m\n\n if mwc_out > 1.:\n # Have to make it between 1 and 0, so mod 1. makes sure its between 0 and 1 now\n close_to_final = mwc_out % 1.\n else:\n close_to_final = mwc_out\n\n yield close_to_final\n\n\ndef all_one(rand_gen):\n one_a(rand_gen)\n plt.cla()\n one_b(rand_gen)\n plt.cla()\n one_c(rand_gen)\n plt.cla()\n one_d(rand_gen)\n plt.cla()\n one_e(rand_gen)\n plt.cla()\n"
] | [
[
"matplotlib.pyplot.cla"
]
] |
manish-pra/trcopo | [
"df8730f07ef554970c7a0aa653cc42d4886948ec"
] | [
"others/maddpg/utils/noise.py"
] | [
"import numpy as np\n\n\n# from https://github.com/songrotek/DDPG/blob/master/ou_noise.py\nclass OUNoise:\n def __init__(self, action_dimension, scale=0.1, mu=0, theta=0.15, sigma=1):#sigma=0.2\n self.action_dimension = action_dimension\n self.scale = scale\n self.mu = mu\n self.theta = theta\n self.sigma = sigma\n self.state = np.ones(self.action_dimension) * self.mu\n self.reset()\n\n def reset(self):\n self.state = np.ones(self.action_dimension) * self.mu\n\n def noise(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))\n self.state = x + dx\n return self.state * self.scale\n"
] | [
[
"numpy.ones"
]
] |
cyc/estimator | [
"742a07296c8f584150bb02f97be7207130ded5fd"
] | [
"tensorflow_estimator/python/estimator/tpu/tpu_estimator_signals_test.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TPU Estimator Signalling Tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.client import session\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.platform import test\nfrom tensorflow_estimator.python.estimator.tpu import tpu_estimator\n\n\ndef make_input_fn(num_samples):\n a = np.linspace(0, 100.0, num=num_samples)\n b = np.reshape(np.array(a, dtype=np.float32), (len(a), 1))\n\n def input_fn(params):\n batch_size = params['batch_size']\n da1 = dataset_ops.Dataset.from_tensor_slices(a)\n da2 = dataset_ops.Dataset.from_tensor_slices(b)\n\n dataset = dataset_ops.Dataset.zip((da1, da2))\n dataset = dataset.map(lambda fa, fb: {'a': fa, 'b': fb})\n dataset = dataset.batch(batch_size)\n return dataset\n return input_fn, (a, b)\n\n\ndef make_input_fn_with_labels(num_samples):\n a = np.linspace(0, 100.0, num=num_samples)\n b = np.reshape(np.array(a, dtype=np.float32), (len(a), 1))\n\n def input_fn(params):\n batch_size = params['batch_size']\n da1 = dataset_ops.Dataset.from_tensor_slices(a)\n da2 = dataset_ops.Dataset.from_tensor_slices(b)\n\n dataset = dataset_ops.Dataset.zip((da1, da2))\n dataset = dataset.map(lambda fa, fb: ({'a': fa}, fb))\n dataset = dataset.batch(batch_size)\n return dataset\n return input_fn, (a, b)\n\n\nclass TPUEstimatorStoppingSignalsTest(test.TestCase):\n\n def test_normal_output_without_signals(self):\n num_samples = 4\n batch_size = 2\n\n params = {'batch_size': batch_size}\n input_fn, (a, b) = make_input_fn(num_samples=num_samples)\n\n with ops.Graph().as_default():\n dataset = input_fn(params)\n features = dataset_ops.make_one_shot_iterator(dataset).get_next()\n\n # With tf.data.Dataset.batch, the batch is None, i.e., dynamic shape.\n self.assertIsNone(features['a'].shape.as_list()[0])\n\n with session.Session() as sess:\n result = sess.run(features)\n self.assertAllEqual(a[:batch_size], result['a'])\n self.assertAllEqual(b[:batch_size], result['b'])\n\n # This run should work as num_samples / batch_size = 2.\n result = sess.run(features)\n self.assertAllEqual(a[batch_size:num_samples], result['a'])\n self.assertAllEqual(b[batch_size:num_samples], result['b'])\n\n with self.assertRaises(errors.OutOfRangeError):\n # Given num_samples and batch_size, this run should fail.\n sess.run(features)\n\n def test_output_with_stopping_signals(self):\n num_samples = 4\n batch_size = 2\n\n params = {'batch_size': batch_size}\n input_fn, (a, b) = make_input_fn(num_samples=num_samples)\n\n with ops.Graph().as_default():\n dataset = input_fn(params)\n inputs = tpu_estimator._InputsWithStoppingSignals(dataset, batch_size)\n dataset_initializer = inputs.dataset_initializer()\n features, _ = inputs.features_and_labels()\n signals = inputs.signals()\n\n # With tf.data.Dataset.batch, the batch is None, i.e., dynamic shape.\n self.assertIsNone(features['a'].shape.as_list()[0])\n\n with session.Session() as sess:\n sess.run(dataset_initializer)\n\n result, evaluated_signals = sess.run([features, signals])\n self.assertAllEqual(a[:batch_size], result['a'])\n self.assertAllEqual(b[:batch_size], result['b'])\n self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])\n\n # This run should work as num_samples / batch_size = 2.\n result, evaluated_signals = sess.run([features, signals])\n self.assertAllEqual(a[batch_size:num_samples], result['a'])\n self.assertAllEqual(b[batch_size:num_samples], result['b'])\n self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])\n\n # This run should work, *but* see STOP ('1') as signals\n _, evaluated_signals = sess.run([features, signals])\n self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])\n\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(features)\n\n\nclass TPUEstimatorStoppingSignalsWithPaddingTest(test.TestCase):\n\n def test_num_samples_divisible_by_batch_size(self):\n num_samples = 4\n batch_size = 2\n\n params = {'batch_size': batch_size}\n input_fn, (a, b) = make_input_fn(num_samples=num_samples)\n\n with ops.Graph().as_default():\n dataset = input_fn(params)\n inputs = tpu_estimator._InputsWithStoppingSignals(dataset, batch_size,\n add_padding=True)\n dataset_initializer = inputs.dataset_initializer()\n features, _ = inputs.features_and_labels()\n signals = inputs.signals()\n\n # With padding, all shapes are static now.\n self.assertEqual(batch_size, features['a'].shape.as_list()[0])\n\n with session.Session() as sess:\n sess.run(dataset_initializer)\n\n result, evaluated_signals = sess.run([features, signals])\n self.assertAllEqual(a[:batch_size], result['a'])\n self.assertAllEqual(b[:batch_size], result['b'])\n self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])\n self.assertAllEqual([0.] * batch_size,\n evaluated_signals['padding_mask'])\n\n # This run should work as num_samples / batch_size = 2.\n result, evaluated_signals = sess.run([features, signals])\n self.assertAllEqual(a[batch_size:num_samples], result['a'])\n self.assertAllEqual(b[batch_size:num_samples], result['b'])\n self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])\n self.assertAllEqual([0.] * batch_size,\n evaluated_signals['padding_mask'])\n\n # This run should work, *but* see STOP ('1') as signals\n _, evaluated_signals = sess.run([features, signals])\n self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])\n\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(features)\n\n def test_num_samples_not_divisible_by_batch_size(self):\n num_samples = 5\n batch_size = 2\n\n params = {'batch_size': batch_size}\n input_fn, (a, b) = make_input_fn_with_labels(num_samples=num_samples)\n\n with ops.Graph().as_default():\n dataset = input_fn(params)\n inputs = tpu_estimator._InputsWithStoppingSignals(dataset, batch_size,\n add_padding=True)\n dataset_initializer = inputs.dataset_initializer()\n features, labels = inputs.features_and_labels()\n signals = inputs.signals()\n\n # With padding, all shapes are static.\n self.assertEqual(batch_size, features['a'].shape.as_list()[0])\n\n with session.Session() as sess:\n sess.run(dataset_initializer)\n\n evaluated_features, evaluated_labels, evaluated_signals = (\n sess.run([features, labels, signals]))\n self.assertAllEqual(a[:batch_size], evaluated_features['a'])\n self.assertAllEqual(b[:batch_size], evaluated_labels)\n self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])\n self.assertAllEqual([0.] * batch_size,\n evaluated_signals['padding_mask'])\n\n # This run should work as num_samples / batch_size >= 2.\n evaluated_features, evaluated_labels, evaluated_signals = (\n sess.run([features, labels, signals]))\n self.assertAllEqual(a[batch_size:2*batch_size], evaluated_features['a'])\n self.assertAllEqual(b[batch_size:2*batch_size], evaluated_labels)\n self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])\n self.assertAllEqual([0.] * batch_size,\n evaluated_signals['padding_mask'])\n\n # This is the final partial batch.\n evaluated_features, evaluated_labels, evaluated_signals = (\n sess.run([features, labels, signals]))\n real_batch_size = num_samples % batch_size\n\n # Assert the real part.\n self.assertAllEqual(a[2*batch_size:num_samples],\n evaluated_features['a'][:real_batch_size])\n self.assertAllEqual(b[2*batch_size:num_samples],\n evaluated_labels[:real_batch_size])\n # Assert the padded part.\n self.assertAllEqual([0.0] * (batch_size - real_batch_size),\n evaluated_features['a'][real_batch_size:])\n self.assertAllEqual([[0.0]] * (batch_size - real_batch_size),\n evaluated_labels[real_batch_size:])\n\n self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])\n\n padding = ([.0] * real_batch_size\n + [1.] * (batch_size - real_batch_size))\n self.assertAllEqual(padding, evaluated_signals['padding_mask'])\n\n # This run should work, *but* see STOP ('1') as signals\n _, evaluated_signals = sess.run([features, signals])\n self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])\n\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(features)\n\n def test_slice(self):\n num_samples = 3\n batch_size = 2\n\n params = {'batch_size': batch_size}\n input_fn, (a, b) = make_input_fn(num_samples=num_samples)\n\n with ops.Graph().as_default():\n dataset = input_fn(params)\n inputs = tpu_estimator._InputsWithStoppingSignals(dataset, batch_size,\n add_padding=True)\n dataset_initializer = inputs.dataset_initializer()\n features, _ = inputs.features_and_labels()\n signals = inputs.signals()\n\n sliced_features = (\n tpu_estimator._PaddingSignals.slice_tensor_or_dict(\n features, signals))\n\n with session.Session() as sess:\n sess.run(dataset_initializer)\n\n result, evaluated_signals = sess.run([sliced_features, signals])\n self.assertAllEqual(a[:batch_size], result['a'])\n self.assertAllEqual(b[:batch_size], result['b'])\n self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])\n\n # This is the final partial batch.\n result, evaluated_signals = sess.run([sliced_features, signals])\n self.assertEqual(1, len(result['a']))\n self.assertAllEqual(a[batch_size:num_samples], result['a'])\n self.assertAllEqual(b[batch_size:num_samples], result['b'])\n self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])\n\n # This run should work, *but* see STOP ('1') as signals\n _, evaluated_signals = sess.run([sliced_features, signals])\n self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])\n\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(sliced_features)\n\n def test_slice_with_multi_invocations_per_step(self):\n num_samples = 3\n batch_size = 2\n\n params = {'batch_size': batch_size}\n input_fn, (a, b) = make_input_fn(num_samples=num_samples)\n\n with ops.Graph().as_default():\n dataset = input_fn(params)\n inputs = tpu_estimator._InputsWithStoppingSignals(\n dataset, batch_size, add_padding=True, num_invocations_per_step=2)\n dataset_initializer = inputs.dataset_initializer()\n features, _ = inputs.features_and_labels()\n signals = inputs.signals()\n\n sliced_features = (\n tpu_estimator._PaddingSignals.slice_tensor_or_dict(features, signals))\n\n with session.Session() as sess:\n sess.run(dataset_initializer)\n\n result, evaluated_signals = sess.run([sliced_features, signals])\n self.assertAllEqual(a[:batch_size], result['a'])\n self.assertAllEqual(b[:batch_size], result['b'])\n self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])\n\n # This is the final partial batch.\n result, evaluated_signals = sess.run([sliced_features, signals])\n self.assertEqual(1, len(result['a']))\n self.assertAllEqual(a[batch_size:num_samples], result['a'])\n self.assertAllEqual(b[batch_size:num_samples], result['b'])\n self.assertAllEqual([[0.]] * batch_size, evaluated_signals['stopping'])\n\n # We should see 3 continuous batches with STOP ('1') as signals and all\n # of them have mask 1.\n _, evaluated_signals = sess.run([sliced_features, signals])\n self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])\n self.assertAllEqual([1.] * batch_size,\n evaluated_signals['padding_mask'])\n\n _, evaluated_signals = sess.run([sliced_features, signals])\n self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])\n self.assertAllEqual([1.] * batch_size,\n evaluated_signals['padding_mask'])\n\n _, evaluated_signals = sess.run([sliced_features, signals])\n self.assertAllEqual([[1.]] * batch_size, evaluated_signals['stopping'])\n self.assertAllEqual([1.] * batch_size,\n evaluated_signals['padding_mask'])\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(sliced_features)\n\n\nif __name__ == '__main__':\n test.main()\n"
] | [
[
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.data.ops.dataset_ops.make_one_shot_iterator",
"tensorflow.python.platform.test.main",
"tensorflow.python.client.session.Session",
"numpy.array",
"numpy.linspace",
"tensorflow.python.data.ops.dataset_ops.Dataset.zip"
]
] |
kavigupta/program_synthesis | [
"0b04b1d3b63954ba3d404a8d96c4da18667a1b02"
] | [
"program_synthesis/algolisp/dataset/evaluation.py"
] | [
"import numpy as np\n\nfrom program_synthesis.algolisp.tools import bleu\nfrom program_synthesis.algolisp.dataset import executor\n\n\ndef is_same_code(example, res):\n correct = False\n if hasattr(res, 'code_sequence'):\n if res.code_sequence is not None:\n correct = res.code_sequence == example.code_sequence\n elif res.code_tree is not None:\n correct = res.code_tree == example.code_tree\n else:\n correct = res == example.code_sequence\n return correct\n\n\ndef compute_bleu(example, res):\n try:\n if hasattr(res, 'code_sequence'):\n if res.code_sequence is not None:\n score = bleu.compute_bleu([example.code_sequence], [res.code_sequence])\n else:\n score = bleu.compute_bleu([example.code_sequence], [res])\n return np.asscalar(score)\n except ZeroDivisionError:\n return 0.0\n\n\ndef get_stats_from_code(args):\n res, example, executor_ = args\n if len(example.tests) == 0:\n return None\n if executor_ is not None:\n stats = executor.evaluate_code(\n res.code_tree if res.code_tree else res.code_sequence, example.schema.args, example.tests,\n executor_)\n stats['exact-code-match'] = is_same_code(example, res)\n stats['correct-program'] = int(stats['tests-executed'] == stats['tests-passed'])\n else:\n stats = {'tests-executed': 0, 'tests-passed': 0, 'result-none': 0, 'syntax-error': 0,\n 'runtime-exception': 0, 'exceptions': []}\n stats['correct-program'] = stats['exact-code-match'] = is_same_code(example, res)\n stats['bleu'] = compute_bleu(example, res)\n stats['example'] = example.to_dict()\n stats['res'] = res.to_dict() if hasattr(res, 'to_dict') else res\n return stats\n\n\ndef run_inference(dataset, model, executor_):\n \"\"\"Runs inference of given model on eval set, and executes resulting code.\n\n Args:\n dataset: Dataset, iterable of CodeExample to evaluate on.\n model: Model that runs the inference.\n executor: executor class from executor.py.\n \"\"\"\n for batch in dataset:\n results = model.inference(batch)\n for stats in model.worker_pool.imap(get_stats_from_code, zip(results, batch, [executor_]*len(batch))):\n if stats is not None:\n yield stats\n return\n\n\ndef compute_metrics(all_stats):\n tests_num = 0\n programs_num = 0\n bleu_acc = 0.0\n correct_program_acc = 0\n # Almost correct programs are those that were executed on more than one test and passed at least 50% tests.\n almost_correct_program_acc = 0\n exact_code_match_acc = 0\n syntax_error_acc = 0\n runtime_exception_acc = 0\n other_exception_acc = 0\n for stats in all_stats:\n tests_num += stats['tests-executed']\n programs_num += 1\n bleu_acc += stats['bleu']\n correct_program_acc += stats['correct-program']\n if (stats['correct-program'] != 0 or\n stats['tests-executed'] > 1 and stats['tests-passed']/stats['tests-executed'] >= 0.5):\n almost_correct_program_acc += 1\n exact_code_match_acc += stats['exact-code-match']\n syntax_error_acc += stats['syntax-error']\n runtime_exception_acc += stats['runtime-exception']\n other_exception_acc += len(stats['exceptions'])\n\n return {'bleu': (bleu_acc/programs_num) if programs_num else 0.0,\n 'accuracy': (correct_program_acc/programs_num) if programs_num else 0.0,\n '50p_accuracy': (almost_correct_program_acc/programs_num) if programs_num else 0.0,\n 'exact_match_accuracy': (exact_code_match_acc/programs_num) if programs_num else 0.0,\n 'syntax_error_freq': (syntax_error_acc/tests_num) if tests_num else 0.0,\n 'runtime_exception_freq': (runtime_exception_acc/tests_num) if tests_num else 0.0,\n 'other_exception_freq': (other_exception_acc/tests_num) if tests_num else 0.0,\n 'programs_num': programs_num,\n 'tests_num': tests_num,\n 'correct_program_num': correct_program_acc,\n 'almost_correct_program_num': almost_correct_program_acc,\n 'exact_code_match_num': exact_code_match_acc,\n }\n"
] | [
[
"numpy.asscalar"
]
] |
oliverkinch/dtu_mlops | [
"ce3a1f8f02ee95105b7b907735c39ad082321a4b"
] | [
"s2_organisation_and_version_control/exercise_files/typing_exercise_solution.py"
] | [
"from typing import Callable, Tuple, Union, Optional, List\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass Network(nn.Module):\n def __init__(self, input_size: int, output_size: int, hidden_layers: List[int], drop_p: float = 0.5) -> None:\n ''' Builds a feedforward network with arbitrary hidden layers.\n \n Arguments\n ---------\n input_size: integer, size of the input layer\n output_size: integer, size of the output layer\n hidden_layers: list of integers, the sizes of the hidden layers\n \n '''\n super().__init__()\n # Input to a hidden layer\n self.hidden_layers = nn.ModuleList([nn.Linear(input_size, hidden_layers[0])])\n \n # Add a variable number of more hidden layers\n layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:])\n self.hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in layer_sizes])\n \n self.output = nn.Linear(hidden_layers[-1], output_size)\n \n self.dropout = nn.Dropout(p=drop_p)\n \n def forward(self, x: torch.Tensor) -> torch.Tensor:\n ''' Forward pass through the network, returns the output logits '''\n \n for each in self.hidden_layers:\n x = F.relu(each(x))\n x = self.dropout(x)\n x = self.output(x)\n \n return F.log_softmax(x, dim=1)\n\n\ndef validation(\n model: nn.Module, \n testloader: torch.utils.data.DataLoader, \n criterion: Union[Callable, nn.Module]\n) -> Tuple[float, float]:\n accuracy = 0\n test_loss = 0\n for images, labels in testloader:\n\n images = images.resize_(images.size()[0], 784)\n\n output = model.forward(images)\n test_loss += criterion(output, labels).item()\n\n ## Calculating the accuracy \n # Model's output is log-softmax, take exponential to get the probabilities\n ps = torch.exp(output)\n # Class with highest probability is our predicted class, compare with true label\n equality = (labels.data == ps.max(1)[1])\n # Accuracy is number of correct predictions divided by all predictions, just take the mean\n accuracy += equality.type_as(torch.FloatTensor()).mean().item()\n\n return test_loss, accuracy\n\n\ndef train(\n model: nn.Module, \n trainloader: torch.utils.data.DataLoader, \n testloader: torch.utils.data.DataLoader, \n criterion: Union[Callable, nn.Module], \n optimizer: Optional[torch.optim.Optimizer] = None, \n epochs: int = 5, \n print_every: int = 40,\n) -> None:\n if optimizer is None:\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)\n steps = 0\n running_loss = 0\n for e in range(epochs):\n # Model in training mode, dropout is on\n model.train()\n for images, labels in trainloader:\n steps += 1\n \n # Flatten images into a 784 long vector\n images.resize_(images.size()[0], 784)\n \n optimizer.zero_grad()\n \n output = model.forward(images)\n loss = criterion(output, labels)\n loss.backward()\n optimizer.step()\n \n running_loss += loss.item()\n\n if steps % print_every == 0:\n # Model in inference mode, dropout is off\n model.eval()\n \n # Turn off gradients for validation, will speed up inference\n with torch.no_grad():\n test_loss, accuracy = validation(model, testloader, criterion)\n \n print(\"Epoch: {}/{}.. \".format(e+1, epochs),\n \"Training Loss: {:.3f}.. \".format(running_loss/print_every),\n \"Test Loss: {:.3f}.. \".format(test_loss/len(testloader)),\n \"Test Accuracy: {:.3f}\".format(accuracy/len(testloader)))\n \n running_loss = 0\n \n # Make sure dropout and grads are on for training\n model.train()\n"
] | [
[
"torch.nn.functional.log_softmax",
"torch.FloatTensor",
"torch.nn.Linear",
"torch.no_grad",
"torch.exp",
"torch.nn.Dropout"
]
] |
yuyiming/mars | [
"5e6990d1ea022444dd646c56697e596ef5d7e747"
] | [
"mars/services/subtask/tests/test_service.py"
] | [
"# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport time\n\nimport numpy as np\nimport pytest\n\nfrom .... import oscar as mo\nfrom .... import tensor as mt\nfrom .... import remote as mr\nfrom ....core.graph import TileableGraph, TileableGraphBuilder, ChunkGraphBuilder\n\nfrom ....resource import Resource\nfrom ....utils import Timer\nfrom ... import start_services, stop_services, NodeRole\nfrom ...meta import MetaAPI\nfrom ...session import SessionAPI\nfrom ...storage import MockStorageAPI\nfrom ...task import new_task_id\nfrom ...task.supervisor.manager import TaskManagerActor\nfrom .. import SubtaskAPI, Subtask, SubtaskResult\n\n\nclass FakeTaskManager(TaskManagerActor):\n def set_subtask_result(self, subtask_result: SubtaskResult):\n return\n\n\ndef _gen_subtask(t, session_id):\n graph = TileableGraph([t.data])\n next(TileableGraphBuilder(graph).build())\n\n chunk_graph = next(ChunkGraphBuilder(graph, fuse_enabled=False).build())\n subtask = Subtask(new_task_id(), session_id, new_task_id(), chunk_graph)\n\n return subtask\n\n\[email protected]\nasync def actor_pools():\n async def start_pool(is_worker: bool):\n if is_worker:\n kw = dict(\n n_process=2,\n labels=[\"main\"] + [\"numa-0\"] * 2,\n subprocess_start_method=\"spawn\",\n )\n else:\n kw = dict(n_process=0, subprocess_start_method=\"spawn\")\n pool = await mo.create_actor_pool(\"127.0.0.1\", **kw)\n await pool.start()\n return pool\n\n sv_pool, worker_pool = await asyncio.gather(start_pool(False), start_pool(True))\n yield sv_pool, worker_pool\n await asyncio.gather(sv_pool.stop(), worker_pool.stop())\n\n\[email protected]\nasync def test_subtask_service(actor_pools):\n sv_pool, worker_pool = actor_pools\n\n config = {\n \"services\": [\n \"cluster\",\n \"session\",\n \"meta\",\n \"lifecycle\",\n \"scheduling\",\n \"subtask\",\n \"task\",\n \"mutable\",\n ],\n \"cluster\": {\n \"backend\": \"fixed\",\n \"lookup_address\": sv_pool.external_address,\n \"resource\": {\"numa-0\": Resource(num_cpus=2)},\n },\n \"meta\": {\"store\": \"dict\"},\n \"scheduling\": {},\n \"subtask\": {},\n }\n await start_services(NodeRole.SUPERVISOR, config, address=sv_pool.external_address)\n await start_services(NodeRole.WORKER, config, address=worker_pool.external_address)\n\n session_id = \"test_session\"\n session_api = await SessionAPI.create(sv_pool.external_address)\n await session_api.create_session(session_id)\n ref = await mo.actor_ref(\n FakeTaskManager.gen_uid(session_id), address=sv_pool.external_address\n )\n await mo.destroy_actor(ref)\n await mo.create_actor(\n FakeTaskManager,\n session_id,\n uid=FakeTaskManager.gen_uid(session_id),\n address=sv_pool.external_address,\n )\n\n subtask_api = await SubtaskAPI.create(worker_pool.external_address)\n # create mock meta and storage APIs\n meta_api = await MetaAPI.create(session_id, sv_pool.external_address)\n storage_api = await MockStorageAPI.create(session_id, worker_pool.external_address)\n\n a = mt.ones((10, 10), chunk_size=10)\n b = a + 1\n\n subtask = _gen_subtask(b, session_id)\n assert \"TensorAdd\" in repr(subtask)\n await subtask_api.run_subtask_in_slot(\"numa-0\", 0, subtask)\n\n # check storage\n expected = np.ones((10, 10)) + 1\n result_key = subtask.chunk_graph.results[0].key\n result = await storage_api.get(result_key)\n np.testing.assert_array_equal(expected, result)\n\n # check meta\n chunk_meta = await meta_api.get_chunk_meta(result_key)\n assert chunk_meta is not None\n assert chunk_meta[\"bands\"][0] == (worker_pool.external_address, \"numa-0\")\n\n def sleep(timeout: int):\n time.sleep(timeout)\n return timeout\n\n b = mr.spawn(sleep, 1)\n\n subtask2 = _gen_subtask(b, session_id)\n asyncio.create_task(subtask_api.run_subtask_in_slot(\"numa-0\", 0, subtask2))\n await asyncio.sleep(0.2)\n with Timer() as timer:\n # normal cancel by cancel asyncio Task\n await asyncio.wait_for(\n subtask_api.cancel_subtask_in_slot(\"numa-0\", 0), timeout=2\n )\n # need 1 sec to reach timeout, then killing actor and wait for auto recovering\n # the time would not be over 5 sec\n assert timer.duration < 2\n\n await MockStorageAPI.cleanup(worker_pool.external_address)\n await stop_services(NodeRole.WORKER, config, address=worker_pool.external_address)\n await stop_services(NodeRole.SUPERVISOR, config, address=sv_pool.external_address)\n"
] | [
[
"numpy.ones",
"numpy.testing.assert_array_equal"
]
] |
emorynlp/stem-cell-hypothesis | [
"48a628093d93d653865fbac6409d179cddd99293"
] | [
"elit/components/srl/span_rank/span_ranking_srl_model.py"
] | [
"from typing import Dict\n\nfrom alnlp.modules.feedforward import FeedForward\nfrom alnlp.modules.time_distributed import TimeDistributed\n\nfrom .highway_variational_lstm import *\nimport torch\nfrom alnlp.modules import util\n\nfrom ...parsers.biaffine.biaffine import Biaffine\n\n\ndef initializer_1d(input_tensor, initializer):\n assert len(input_tensor.size()) == 1\n input_tensor = input_tensor.view(-1, 1)\n input_tensor = initializer(input_tensor)\n return input_tensor.view(-1)\n\n\nclass SpanRankingSRLDecoder(nn.Module):\n\n def __init__(self, context_layer_output_dim, label_space_size, config) -> None:\n super().__init__()\n self.config = config\n self.label_space_size = label_space_size\n self.dropout = float(config.dropout)\n self.use_gold_predicates = config.use_gold_predicates\n # span width feature embedding\n self.span_width_embedding = nn.Embedding(self.config.max_arg_width, self.config.span_width_feature_size)\n # self.context_projective_layer = nn.Linear(2 * self.lstm_hidden_size, self.config.num_attention_heads)\n # span scores\n self.span_emb_size = 3 * context_layer_output_dim + self.config.span_width_feature_size\n self.arg_unary_score_layers = nn.ModuleList([nn.Linear(self.span_emb_size, self.config.ffnn_size) if i == 0\n else nn.Linear(self.config.ffnn_size, self.config.ffnn_size) for i\n in range(self.config.ffnn_depth)]) # [,150]\n self.arg_dropout_layers = nn.ModuleList([nn.Dropout(self.dropout) for _ in range(self.config.ffnn_depth)])\n self.arg_unary_score_projection = nn.Linear(self.config.ffnn_size, 1)\n # predicate scores\n self.pred_unary_score_layers = nn.ModuleList(\n [nn.Linear(context_layer_output_dim, self.config.ffnn_size) if i == 0\n else nn.Linear(self.config.ffnn_size, self.config.ffnn_size) for i\n in range(self.config.ffnn_depth)]) # [,150]\n self.pred_dropout_layers = nn.ModuleList([nn.Dropout(self.dropout) for _ in range(self.config.ffnn_depth)])\n self.pred_unary_score_projection = nn.Linear(self.config.ffnn_size, 1)\n # srl scores\n self.srl_unary_score_input_size = self.span_emb_size + context_layer_output_dim\n self.srl_unary_score_layers = nn.ModuleList([nn.Linear(self.srl_unary_score_input_size, self.config.ffnn_size)\n if i == 0 else nn.Linear(self.config.ffnn_size,\n self.config.ffnn_size)\n for i in range(self.config.ffnn_depth)])\n self.srl_dropout_layers = nn.ModuleList([nn.Dropout(self.dropout) for _ in range(self.config.ffnn_depth)])\n self.srl_unary_score_projection = nn.Linear(self.config.ffnn_size, self.label_space_size - 1)\n if config.use_biaffine:\n self.predicate_scale = TimeDistributed(FeedForward(context_layer_output_dim, 1, self.span_emb_size, 'ReLU'))\n self.biaffine = Biaffine(self.span_emb_size, self.label_space_size - 1)\n self.loss_reduction = config.loss_reduction\n self.reset_parameters()\n\n def reset_parameters(self):\n init.xavier_uniform_(self.span_width_embedding.weight)\n # init.xavier_uniform_(self.context_projective_layer.weight)\n # initializer_1d(self.context_projective_layer.bias, init.xavier_uniform_)\n\n for layer in self.arg_unary_score_layers:\n init.xavier_uniform_(layer.weight)\n initializer_1d(layer.bias, init.xavier_uniform_)\n init.xavier_uniform_(self.arg_unary_score_projection.weight)\n initializer_1d(self.arg_unary_score_projection.bias, init.xavier_uniform_)\n\n for layer in self.pred_unary_score_layers:\n init.xavier_uniform_(layer.weight)\n initializer_1d(layer.bias, init.xavier_uniform_)\n init.xavier_uniform_(self.pred_unary_score_projection.weight)\n initializer_1d(self.pred_unary_score_projection.bias, init.xavier_uniform_)\n\n for layer in self.srl_unary_score_layers:\n init.xavier_uniform_(layer.weight)\n initializer_1d(layer.bias, init.xavier_uniform_)\n init.xavier_uniform_(self.srl_unary_score_projection.weight)\n initializer_1d(self.srl_unary_score_projection.bias, init.xavier_uniform_)\n return None\n\n def forward(self, hidden_states, batch, mask=None):\n gold_arg_ends, gold_arg_labels, gold_arg_starts, gold_predicates, masks, sent_lengths = SpanRankingSRLModel.unpack(\n batch, mask=mask, training=self.training)\n return self.decode(hidden_states, sent_lengths, masks, gold_arg_starts, gold_arg_ends, gold_arg_labels,\n gold_predicates)\n\n @staticmethod\n def get_candidate_spans(sent_lengths: torch.Tensor, max_sent_length, max_arg_width):\n num_sentences = len(sent_lengths)\n device = sent_lengths.device\n candidate_starts = torch.arange(0, max_sent_length, device=device).expand(num_sentences, max_arg_width, -1)\n candidate_width = torch.arange(0, max_arg_width, device=device).view(1, -1, 1)\n candidate_ends = candidate_starts + candidate_width\n\n candidate_starts = candidate_starts.contiguous().view(num_sentences, max_sent_length * max_arg_width)\n candidate_ends = candidate_ends.contiguous().view(num_sentences, max_sent_length * max_arg_width)\n actual_sent_lengths = sent_lengths.view(-1, 1).expand(-1, max_sent_length * max_arg_width)\n candidate_mask = candidate_ends < actual_sent_lengths\n\n candidate_starts = candidate_starts * candidate_mask\n candidate_ends = candidate_ends * candidate_mask\n return candidate_starts, candidate_ends, candidate_mask\n\n @staticmethod\n def exclusive_cumsum(input: torch.Tensor, exclusive=True):\n \"\"\"\n\n Args:\n input: input is the sentence lengths tensor.\n exclusive: exclude the last sentence length (Default value = True)\n input(torch.Tensor :): \n input: torch.Tensor: \n\n Returns:\n\n \n \"\"\"\n assert exclusive is True\n if exclusive is True:\n exclusive_sent_lengths = input.new_zeros(1, dtype=torch.long)\n result = torch.cumsum(torch.cat([exclusive_sent_lengths, input], 0)[:-1], 0).view(-1, 1)\n else:\n result = torch.cumsum(input, 0).view(-1, 1)\n return result\n\n def flatten_emb(self, emb):\n num_sentences, max_sentence_length = emb.size()[0], emb.size()[1]\n assert len(emb.size()) == 3\n flatted_emb = emb.contiguous().view(num_sentences * max_sentence_length, -1)\n return flatted_emb\n\n def flatten_emb_in_sentence(self, emb, batch_sentences_mask):\n num_sentences, max_sentence_length = emb.size()[0], emb.size()[1]\n flatted_emb = self.flatten_emb(emb)\n return flatted_emb[batch_sentences_mask.reshape(num_sentences * max_sentence_length)]\n\n def get_span_emb(self, flatted_context_emb, flatted_candidate_starts, flatted_candidate_ends,\n config, dropout=0.0):\n batch_word_num = flatted_context_emb.size()[0]\n # gather slices from embeddings according to indices\n span_start_emb = flatted_context_emb[flatted_candidate_starts]\n span_end_emb = flatted_context_emb[flatted_candidate_ends]\n span_emb_feature_list = [span_start_emb, span_end_emb] # store the span vector representations for span rep.\n\n span_width = 1 + flatted_candidate_ends - flatted_candidate_starts # [num_spans], generate the span width\n max_arg_width = config.max_arg_width\n\n # get the span width feature emb\n span_width_index = span_width - 1\n span_width_emb = self.span_width_embedding(span_width_index)\n span_width_emb = F.dropout(span_width_emb, dropout, self.training)\n span_emb_feature_list.append(span_width_emb)\n\n \"\"\"head features\"\"\"\n cpu_flatted_candidte_starts = flatted_candidate_starts\n span_indices = torch.arange(0, max_arg_width, device=flatted_context_emb.device).view(1, -1) + \\\n cpu_flatted_candidte_starts.view(-1, 1) # For all the i, where i in [begin, ..i, end] for span\n # reset the position index to the batch_word_num index with index - 1\n span_indices = torch.clamp(span_indices, max=batch_word_num - 1)\n num_spans, spans_width = span_indices.size()[0], span_indices.size()[1]\n flatted_span_indices = span_indices.view(-1) # so Huge!!!, column is the span?\n # if torch.cuda.is_available():\n flatted_span_indices = flatted_span_indices\n span_text_emb = flatted_context_emb.index_select(0, flatted_span_indices).view(num_spans, spans_width, -1)\n span_indices_mask = util.lengths_to_mask(span_width, max_len=max_arg_width)\n # project context output to num head\n # head_scores = self.context_projective_layer.forward(flatted_context_emb)\n # get span attention\n # span_attention = head_scores.index_select(0, flatted_span_indices).view(num_spans, spans_width)\n # span_attention = torch.add(span_attention, expanded_span_indices_log_mask).unsqueeze(2) # control the span len\n # span_attention = F.softmax(span_attention, dim=1)\n span_text_emb = span_text_emb * span_indices_mask.unsqueeze(2).expand(-1, -1, span_text_emb.size()[-1])\n span_head_emb = torch.mean(span_text_emb, 1)\n span_emb_feature_list.append(span_head_emb)\n\n span_emb = torch.cat(span_emb_feature_list, 1)\n return span_emb, None, span_text_emb, span_indices, span_indices_mask\n\n def get_arg_unary_scores(self, span_emb):\n \"\"\"Compute span score with FFNN(span embedding)\n\n Args:\n span_emb: tensor of [num_sentences, num_spans, emb_size]\n config: param dropout:\n num_labels: param name:\n\n Returns:\n\n \n \"\"\"\n input = span_emb\n for i, ffnn in enumerate(self.arg_unary_score_layers):\n input = F.relu(ffnn.forward(input))\n input = self.arg_dropout_layers[i].forward(input)\n output = self.arg_unary_score_projection.forward(input)\n return output\n\n def get_pred_unary_scores(self, span_emb):\n input = span_emb\n for i, ffnn in enumerate(self.pred_unary_score_layers):\n input = F.relu(ffnn.forward(input))\n input = self.pred_dropout_layers[i].forward(input)\n output = self.pred_unary_score_projection.forward(input)\n return output\n\n def extract_spans(self, candidate_scores, candidate_starts, candidate_ends, topk, max_sentence_length,\n sort_spans, enforce_non_crossing):\n \"\"\"extract the topk span indices\n\n Args:\n candidate_scores: param candidate_starts:\n candidate_ends: param topk: [num_sentences]\n max_sentence_length: param sort_spans:\n enforce_non_crossing: return: indices [num_sentences, max_num_predictions]\n candidate_starts: \n topk: \n sort_spans: \n\n Returns:\n\n \n \"\"\"\n # num_sentences = candidate_scores.size()[0]\n # num_input_spans = candidate_scores.size()[1]\n max_num_output_spans = int(torch.max(topk))\n indices = [score.topk(k)[1] for score, k in zip(candidate_scores, topk)]\n output_span_indices_tensor = [F.pad(item, [0, max_num_output_spans - item.size()[0]], value=item[-1])\n for item in indices]\n output_span_indices_tensor = torch.stack(output_span_indices_tensor)\n return output_span_indices_tensor\n\n def batch_index_select(self, emb, indices):\n num_sentences = emb.size()[0]\n max_sent_length = emb.size()[1]\n flatten_emb = self.flatten_emb(emb)\n offset = (torch.arange(0, num_sentences, device=emb.device) * max_sent_length).unsqueeze(1)\n return torch.index_select(flatten_emb, 0, (indices + offset).view(-1)) \\\n .view(indices.size()[0], indices.size()[1], -1)\n\n def get_batch_topk(self, candidate_starts: torch.Tensor, candidate_ends, candidate_scores, topk_ratio, text_len,\n max_sentence_length, sort_spans=False, enforce_non_crossing=True):\n num_sentences = candidate_starts.size()[0]\n max_sentence_length = candidate_starts.size()[1]\n\n topk = torch.floor(text_len.to(torch.float) * topk_ratio).to(torch.long)\n topk = torch.max(topk, torch.ones(num_sentences, device=candidate_starts.device, dtype=torch.long))\n\n # this part should be implemented with C++\n predicted_indices = self.extract_spans(candidate_scores, candidate_starts, candidate_ends, topk,\n max_sentence_length, sort_spans, enforce_non_crossing)\n predicted_starts = torch.gather(candidate_starts, 1, predicted_indices)\n predicted_ends = torch.gather(candidate_ends, 1, predicted_indices)\n predicted_scores = torch.gather(candidate_scores, 1, predicted_indices)\n return predicted_starts, predicted_ends, predicted_scores, topk, predicted_indices\n\n def get_dense_span_labels(self, span_starts, span_ends, span_labels, max_sentence_length,\n span_parents=None):\n num_sentences = span_starts.size()[0]\n max_spans_num = span_starts.size()[1]\n\n # span_starts = span_starts + 1 - (span_labels > 0).to(torch.long)\n span_starts[(span_labels == 0) & (span_starts < max_sentence_length - 1)] += 1 # make start > end\n sentence_indices = torch.arange(0, num_sentences, device=span_starts.device).unsqueeze(1).expand(-1,\n max_spans_num)\n\n sparse_indices = torch.cat([sentence_indices.unsqueeze(2), span_starts.unsqueeze(2), span_ends.unsqueeze(2)],\n dim=2)\n if span_parents is not None: # semantic span predicate offset\n sparse_indices = torch.cat([sparse_indices, span_parents.unsqueeze(2)], 2)\n\n rank = 3 if span_parents is None else 4\n dense_labels = torch.sparse.LongTensor(sparse_indices.view(num_sentences * max_spans_num, rank).t(),\n span_labels.view(-1),\n torch.Size([num_sentences] + [max_sentence_length] * (rank - 1))) \\\n .to_dense()\n return dense_labels\n\n @staticmethod\n def gather_4d(params, indices):\n assert len(params.size()) == 4 and len(indices) == 4\n indices_a, indices_b, indices_c, indices_d = indices\n result = params[indices_a, indices_b, indices_c, indices_d]\n return result\n\n def get_srl_labels(self,\n arg_starts,\n arg_ends,\n predicates,\n gold_predicates,\n gold_arg_starts,\n gold_arg_ends,\n gold_arg_labels,\n max_sentence_length\n ):\n num_sentences = arg_starts.size()[0]\n max_arg_num = arg_starts.size()[1]\n max_pred_num = predicates.size()[1]\n\n sentence_indices_2d = torch.arange(0, num_sentences, device=arg_starts.device).unsqueeze(1).unsqueeze(2).expand(\n -1, max_arg_num, max_pred_num)\n expanded_arg_starts = arg_starts.unsqueeze(2).expand(-1, -1, max_pred_num)\n expanded_arg_ends = arg_ends.unsqueeze(2).expand(-1, -1, max_pred_num)\n expanded_predicates = predicates.unsqueeze(1).expand(-1, max_arg_num, -1)\n\n dense_srl_labels = self.get_dense_span_labels(gold_arg_starts,\n gold_arg_ends,\n gold_arg_labels,\n max_sentence_length, span_parents=gold_predicates) # ans\n srl_labels = self.gather_4d(dense_srl_labels,\n [sentence_indices_2d, expanded_arg_starts, expanded_arg_ends, expanded_predicates])\n return srl_labels\n\n def get_srl_unary_scores(self, span_emb):\n input = span_emb\n for i, ffnn in enumerate(self.srl_unary_score_layers):\n input = F.relu(ffnn.forward(input))\n input = self.srl_dropout_layers[i].forward(input)\n output = self.srl_unary_score_projection.forward(input)\n return output\n\n def get_srl_scores(self, arg_emb, pred_emb, arg_scores, pred_scores, num_labels, config, dropout):\n num_sentences = arg_emb.size()[0]\n num_args = arg_emb.size()[1] # [batch_size, max_arg_num, arg_emb_size]\n num_preds = pred_emb.size()[1] # [batch_size, max_pred_num, pred_emb_size]\n\n unsqueezed_arg_emb = arg_emb.unsqueeze(2)\n unsqueezed_pred_emb = pred_emb.unsqueeze(1)\n expanded_arg_emb = unsqueezed_arg_emb.expand(-1, -1, num_preds, -1)\n expanded_pred_emb = unsqueezed_pred_emb.expand(-1, num_args, -1, -1)\n pair_emb_list = [expanded_arg_emb, expanded_pred_emb]\n pair_emb = torch.cat(pair_emb_list, 3) # concatenate the argument emb and pre emb\n pair_emb_size = pair_emb.size()[3]\n flat_pair_emb = pair_emb.view(num_sentences * num_args * num_preds, pair_emb_size)\n # get unary scores\n flat_srl_scores = self.get_srl_unary_scores(flat_pair_emb)\n srl_scores = flat_srl_scores.view(num_sentences, num_args, num_preds, -1)\n if self.config.use_biaffine:\n srl_scores += self.biaffine(arg_emb, self.predicate_scale(pred_emb)).permute([0, 2, 3, 1])\n unsqueezed_arg_scores, unsqueezed_pred_scores = \\\n arg_scores.unsqueeze(2).unsqueeze(3), pred_scores.unsqueeze(1).unsqueeze(3)\n srl_scores = srl_scores + unsqueezed_arg_scores + unsqueezed_pred_scores\n dummy_scores = torch.zeros([num_sentences, num_args, num_preds, 1], device=arg_emb.device)\n srl_scores = torch.cat([dummy_scores, srl_scores], 3)\n return srl_scores\n\n def get_srl_softmax_loss(self, srl_scores, srl_labels, num_predicted_args, num_predicted_preds):\n srl_loss_mask = self.get_srl_loss_mask(srl_scores, num_predicted_args, num_predicted_preds)\n\n loss = torch.nn.functional.cross_entropy(srl_scores[srl_loss_mask], srl_labels[srl_loss_mask],\n reduction=self.loss_reduction)\n return loss, srl_loss_mask\n\n def get_srl_loss_mask(self, srl_scores, num_predicted_args, num_predicted_preds):\n max_num_arg = srl_scores.size()[1]\n max_num_pred = srl_scores.size()[2]\n # num_predicted_args, 1D tensor; max_num_arg: a int variable means the gold ans's max arg number\n args_mask = util.lengths_to_mask(num_predicted_args, max_num_arg)\n pred_mask = util.lengths_to_mask(num_predicted_preds, max_num_pred)\n srl_loss_mask = args_mask.unsqueeze(2) & pred_mask.unsqueeze(1)\n return srl_loss_mask\n\n def decode(self, contextualized_embeddings, sent_lengths, masks, gold_arg_starts, gold_arg_ends, gold_arg_labels,\n gold_predicates):\n num_sentences, max_sent_length = masks.size()\n device = sent_lengths.device\n \"\"\"generate candidate spans with argument pruning\"\"\"\n # candidate_starts [num_sentences, max_sent_length * max_arg_width]\n candidate_starts, candidate_ends, candidate_mask = self.get_candidate_spans(\n sent_lengths, max_sent_length, self.config.max_arg_width)\n flatted_candidate_mask = candidate_mask.view(-1)\n batch_word_offset = self.exclusive_cumsum(sent_lengths) # get the word offset in a batch\n # choose the flatted_candidate_starts with the actual existing positions, i.e. exclude the illegal starts\n flatted_candidate_starts = candidate_starts + batch_word_offset\n flatted_candidate_starts = flatted_candidate_starts.view(-1)[flatted_candidate_mask].to(torch.long)\n flatted_candidate_ends = candidate_ends + batch_word_offset\n flatted_candidate_ends = flatted_candidate_ends.view(-1)[flatted_candidate_mask].to(torch.long)\n # flatten the lstm output according to the sentence mask, i.e. exclude the illegal (padding) lstm output\n flatted_context_output = self.flatten_emb_in_sentence(contextualized_embeddings, masks)\n \"\"\"generate the span embedding\"\"\"\n candidate_span_emb, head_scores, span_head_emb, head_indices, head_indices_log_mask = self.get_span_emb(\n flatted_context_output, flatted_candidate_starts, flatted_candidate_ends,\n self.config, dropout=self.dropout)\n \"\"\"Get the span ids\"\"\"\n candidate_span_number = candidate_span_emb.size()[0]\n max_candidate_spans_num_per_sentence = candidate_mask.size()[1]\n sparse_indices = candidate_mask.nonzero(as_tuple=False)\n sparse_values = torch.arange(0, candidate_span_number, device=device)\n candidate_span_ids = torch.sparse.FloatTensor(sparse_indices.t(), sparse_values,\n torch.Size([num_sentences,\n max_candidate_spans_num_per_sentence])).to_dense()\n spans_log_mask = torch.log(candidate_mask.to(torch.float))\n predict_dict = {\"candidate_starts\": candidate_starts, \"candidate_ends\": candidate_ends,\n 'candidate_arg_mask': candidate_mask, \"head_scores\": head_scores}\n \"\"\"Get unary scores and topk of candidate argument spans.\"\"\"\n flatted_candidate_arg_scores = self.get_arg_unary_scores(candidate_span_emb)\n candidate_arg_scores = flatted_candidate_arg_scores.index_select(0, candidate_span_ids.view(-1)) \\\n .view(candidate_span_ids.size()[0], candidate_span_ids.size()[1])\n candidate_arg_scores = candidate_arg_scores + spans_log_mask\n arg_starts, arg_ends, arg_scores, num_args, top_arg_indices = \\\n self.get_batch_topk(candidate_starts, candidate_ends, candidate_arg_scores,\n self.config.argument_ratio, sent_lengths, max_sent_length,\n sort_spans=False, enforce_non_crossing=False)\n \"\"\"Get the candidate predicate\"\"\"\n candidate_pred_ids = torch.arange(0, max_sent_length, device=device).unsqueeze(0).expand(num_sentences, -1)\n candidate_pred_emb = contextualized_embeddings\n candidate_pred_scores = self.get_pred_unary_scores(candidate_pred_emb)\n candidate_pred_scores = candidate_pred_scores + torch.log(masks.to(torch.float).unsqueeze(2))\n candidate_pred_scores = candidate_pred_scores.squeeze(2)\n if self.use_gold_predicates is True:\n predicates = gold_predicates[0]\n num_preds = gold_predicates[1]\n pred_scores = torch.zeros_like(predicates)\n top_pred_indices = predicates\n else:\n predicates, _, pred_scores, num_preds, top_pred_indices = self.get_batch_topk(\n candidate_pred_ids, candidate_pred_ids, candidate_pred_scores, self.config.predicate_ratio,\n sent_lengths, max_sent_length,\n sort_spans=False, enforce_non_crossing=False)\n \"\"\"Get top arg embeddings\"\"\"\n arg_span_indices = torch.gather(candidate_span_ids, 1, top_arg_indices) # [num_sentences, max_num_args]\n arg_emb = candidate_span_emb.index_select(0, arg_span_indices.view(-1)).view(\n arg_span_indices.size()[0], arg_span_indices.size()[1], -1\n ) # [num_sentences, max_num_args, emb]\n \"\"\"Get top predicate embeddings\"\"\"\n pred_emb = self.batch_index_select(candidate_pred_emb,\n top_pred_indices) # [num_sentences, max_num_preds, emb]\n \"\"\"Get the srl scores according to the arg emb and pre emb.\"\"\"\n srl_scores = self.get_srl_scores(arg_emb, pred_emb, arg_scores, pred_scores, self.label_space_size, self.config,\n self.dropout) # [num_sentences, max_num_args, max_num_preds, num_labels]\n if gold_arg_labels is not None:\n \"\"\"Get the answers according to the labels\"\"\"\n srl_labels = self.get_srl_labels(arg_starts, arg_ends, predicates, gold_predicates, gold_arg_starts,\n gold_arg_ends, gold_arg_labels, max_sent_length)\n\n \"\"\"Compute the srl loss\"\"\"\n srl_loss, srl_mask = self.get_srl_softmax_loss(srl_scores, srl_labels, num_args, num_preds)\n predict_dict.update({\n 'srl_mask': srl_mask,\n 'loss': srl_loss\n })\n else:\n predict_dict['srl_mask'] = self.get_srl_loss_mask(srl_scores, num_args, num_preds)\n predict_dict.update({\n \"candidate_arg_scores\": candidate_arg_scores,\n \"candidate_pred_scores\": candidate_pred_scores,\n \"predicates\": predicates,\n \"arg_starts\": arg_starts,\n \"arg_ends\": arg_ends,\n \"arg_scores\": arg_scores,\n \"pred_scores\": pred_scores,\n \"num_args\": num_args,\n \"num_preds\": num_preds,\n \"arg_labels\": torch.max(srl_scores, 1)[1], # [num_sentences, num_args, num_preds]\n \"srl_scores\": srl_scores,\n })\n return predict_dict\n\n\nclass SpanRankingSRLModel(nn.Module):\n\n def __init__(self, config, embed: torch.nn.Module, context_layer: torch.nn.Module, label_space_size):\n super(SpanRankingSRLModel, self).__init__()\n self.config = config\n self.dropout = float(config.dropout)\n self.lexical_dropout = float(self.config.lexical_dropout)\n self.label_space_size = label_space_size\n\n # Initialize layers and parameters\n self.word_embedding_dim = embed.get_output_dim() # get the embedding dim\n self.embed = embed\n # Initialize context layer\n self.context_layer = context_layer\n context_layer_output_dim = context_layer.get_output_dim()\n self.decoder = SpanRankingSRLDecoder(context_layer_output_dim, label_space_size, config)\n\n def forward(self,\n batch: Dict[str, torch.Tensor]\n ):\n gold_arg_ends, gold_arg_labels, gold_arg_starts, gold_predicates, masks, sent_lengths = \\\n self.unpack(batch, training=self.training)\n\n context_embeddings = self.embed(batch)\n context_embeddings = F.dropout(context_embeddings, self.lexical_dropout, self.training)\n contextualized_embeddings = self.context_layer(context_embeddings, masks)\n\n return self.decoder.decode(contextualized_embeddings, sent_lengths, masks, gold_arg_starts, gold_arg_ends,\n gold_arg_labels, gold_predicates)\n\n @staticmethod\n def unpack(batch, mask=None, training=False):\n keys = 'token_length', 'predicate_offset', 'argument_begin_offset', 'argument_end_offset', 'srl_label_id'\n sent_lengths, gold_predicates, gold_arg_starts, gold_arg_ends, gold_arg_labels = [batch.get(k, None) for k in\n keys]\n if mask is None:\n mask = util.lengths_to_mask(sent_lengths)\n # elif not training:\n # sent_lengths = mask.sum(dim=1)\n return gold_arg_ends, gold_arg_labels, gold_arg_starts, gold_predicates, mask, sent_lengths\n"
] | [
[
"torch.ones",
"torch.stack",
"torch.Size",
"torch.clamp",
"torch.zeros_like",
"torch.gather",
"torch.cumsum",
"torch.arange",
"torch.nn.functional.cross_entropy",
"torch.max",
"torch.zeros",
"torch.cat",
"torch.mean"
]
] |
artyompal/kaggle_salt | [
"3c323755730745ac7bbfd106f1f20919cceef0ee"
] | [
"code_gazay/lenin/lenin/transforms.py"
] | [
"import numpy as np\n\ndef hwc_to_chw(image):\n return np.einsum('hwc->chw', image) # change to pytorch format\n"
] | [
[
"numpy.einsum"
]
] |
miniTsl/IC3Net | [
"897ed3bae6ad5f65fb3cc4577d4392af6e456703"
] | [
"ic3net_envs/ic3net_envs/predator_prey_env.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nSimulate a predator prey environment.\nEach agent can just observe itself (it's own identity) i.e. s_j = j and vision sqaure around it.\n\nDesign Decisions:\n - Memory cheaper than time (compute)\n - Using Vocab for class of box:\n -1 out of bound,\n indexing for predator agent (from 2?)\n ??? for prey agent (1 for fixed case, for now)\n - Action Space & Observation Space are according to an agent\n - Rewards -0.05 at each time step till the time\n - Episode never ends\n - Obs. State: Vocab of 1-hot < predator, preys & units >\n\"\"\"\n\n# core modules\nimport random\nimport math\nimport curses\n\n# 3rd party modules\nimport gym\nimport numpy as np\nfrom gym import spaces\n\n\nclass PredatorPreyEnv(gym.Env):\n # metadata = {'render.modes': ['human']}\n\n def __init__(self,):\n self.__version__ = \"0.0.1\"\n\n # TODO: better config handling\n self.OUTSIDE_CLASS = 1\n self.PREY_CLASS = 2\n self.PREDATOR_CLASS = 3\n self.TIMESTEP_PENALTY = -0.05\n self.PREY_REWARD = 0\n self.POS_PREY_REWARD = 0.05\n self.episode_over = False\n\n def init_curses(self):\n self.stdscr = curses.initscr()\n curses.start_color()\n curses.use_default_colors()\n curses.init_pair(1, curses.COLOR_RED, -1)\n curses.init_pair(2, curses.COLOR_YELLOW, -1)\n curses.init_pair(3, curses.COLOR_CYAN, -1)\n curses.init_pair(4, curses.COLOR_GREEN, -1)\n\n\n def init_args(self, parser):\n env = parser.add_argument_group('Prey Predator task')\n env.add_argument('--nenemies', type=int, default=1,\n help=\"Total number of preys in play\")\n env.add_argument('--dim', type=int, default=5,\n help=\"Dimension of box\")\n env.add_argument('--vision', type=int, default=2,\n help=\"Vision of predator\")\n env.add_argument('--moving_prey', action=\"store_true\", default=False,\n help=\"Whether prey is fixed or moving\")\n env.add_argument('--no_stay', action=\"store_true\", default=False,\n help=\"Whether predators have an action to stay in place\")\n parser.add_argument('--mode', default='mixed', type=str,\n help='cooperative|competitive|mixed (default: mixed)')\n env.add_argument('--enemy_comm', action=\"store_true\", default=False,\n help=\"Whether prey can communicate.\")\n\n def multi_agent_init(self, args):\n\n # General variables defining the environment : CONFIG\n params = ['dim', 'vision', 'moving_prey', 'mode', 'enemy_comm']\n for key in params:\n setattr(self, key, getattr(args, key))\n\n self.nprey = args.nenemies\n self.npredator = args.nfriendly\n self.dims = dims = (self.dim, self.dim)\n self.stay = not args.no_stay\n\n if args.moving_prey:\n raise NotImplementedError\n # TODO\n\n # (0: UP, 1: RIGHT, 2: DOWN, 3: LEFT, 4: STAY)\n # Define what an agent can do -\n if self.stay:\n self.naction = 5\n else:\n self.naction = 4\n\n self.action_space = spaces.MultiDiscrete([self.naction])\n\n self.BASE = (dims[0] * dims[1])\n self.OUTSIDE_CLASS += self.BASE\n self.PREY_CLASS += self.BASE\n self.PREDATOR_CLASS += self.BASE\n\n # Setting max vocab size for 1-hot encoding\n self.vocab_size = 1 + 1 + self.BASE + 1 + 1\n # predator + prey + grid + outside\n\n # Observation for each agent will be vision * vision ndarray\n self.observation_space = spaces.Box(low=0, high=1, shape=(self.vocab_size, (2 * self.vision) + 1, (2 * self.vision) + 1), dtype=int)\n # Actual observation will be of the shape 1 * npredator * (2v+1) * (2v+1) * vocab_size\n\n return\n\n def step(self, action):\n \"\"\"\n The agents take a step in the environment.\n\n Parameters\n ----------\n action : list/ndarray of length m, containing the indexes of what lever each 'm' chosen agents pulled.\n\n Returns\n -------\n obs, reward, episode_over, info : tuple\n obs (object) :\n\n reward (float) : Ratio of Number of discrete levers pulled to total number of levers.\n episode_over (bool) : Will be true as episode length is 1\n info (dict) : diagnostic information useful for debugging.\n \"\"\"\n if self.episode_over:\n raise RuntimeError(\"Episode is done\")\n action = np.array(action).squeeze()\n action = np.atleast_1d(action)\n\n for i, a in enumerate(action):\n self._take_action(i, a)\n\n assert np.all(action <= self.naction), \"Actions should be in the range [0,naction).\"\n\n\n self.episode_over = False\n self.obs = self._get_obs()\n\n debug = {'predator_locs':self.predator_loc,'prey_locs':self.prey_loc}\n return self.obs, self._get_reward(), self.episode_over, debug\n\n def reset(self):\n \"\"\"\n Reset the state of the environment and returns an initial observation.\n\n Returns\n -------\n observation (object): the initial observation of the space.\n \"\"\"\n self.episode_over = False\n self.reached_prey = np.zeros(self.npredator)\n\n # Locations\n locs = self._get_cordinates()\n self.predator_loc, self.prey_loc = locs[:self.npredator], locs[self.npredator:]\n\n self._set_grid()\n\n # stat - like success ratio\n self.stat = dict()\n\n # Observation will be npredator * vision * vision ndarray\n self.obs = self._get_obs()\n return self.obs\n\n def seed(self):\n return\n\n def _get_cordinates(self):\n idx = np.random.choice(np.prod(self.dims),(self.npredator + self.nprey), replace=False)\n return np.vstack(np.unravel_index(idx, self.dims)).T\n\n def _set_grid(self):\n self.grid = np.arange(self.BASE).reshape(self.dims)\n # Mark agents in grid\n # self.grid[self.predator_loc[:,0], self.predator_loc[:,1]] = self.predator_ids\n # self.grid[self.prey_loc[:,0], self.prey_loc[:,1]] = self.prey_ids\n\n # Padding for vision\n self.grid = np.pad(self.grid, self.vision, 'constant', constant_values = self.OUTSIDE_CLASS)\n\n self.empty_bool_base_grid = self._onehot_initialization(self.grid)\n\n def _get_obs(self):\n self.bool_base_grid = self.empty_bool_base_grid.copy()\n\n for i, p in enumerate(self.predator_loc):\n self.bool_base_grid[p[0] + self.vision, p[1] + self.vision, self.PREDATOR_CLASS] += 1\n\n for i, p in enumerate(self.prey_loc):\n self.bool_base_grid[p[0] + self.vision, p[1] + self.vision, self.PREY_CLASS] += 1\n\n obs = []\n for p in self.predator_loc:\n slice_y = slice(p[0], p[0] + (2 * self.vision) + 1)\n slice_x = slice(p[1], p[1] + (2 * self.vision) + 1)\n obs.append(self.bool_base_grid[slice_y, slice_x])\n\n if self.enemy_comm:\n for p in self.prey_loc:\n slice_y = slice(p[0], p[0] + (2 * self.vision) + 1)\n slice_x = slice(p[1], p[1] + (2 * self.vision) + 1)\n obs.append(self.bool_base_grid[slice_y, slice_x])\n\n obs = np.stack(obs)\n return obs\n\n def _take_action(self, idx, act):\n # prey action\n if idx >= self.npredator:\n # fixed prey\n if not self.moving_prey:\n return\n else:\n raise NotImplementedError\n\n if self.reached_prey[idx] == 1:\n return\n\n # STAY action\n if act==5:\n return\n\n # UP\n if act==0 and self.grid[max(0,\n self.predator_loc[idx][0] + self.vision - 1),\n self.predator_loc[idx][1] + self.vision] != self.OUTSIDE_CLASS:\n self.predator_loc[idx][0] = max(0, self.predator_loc[idx][0]-1)\n\n # RIGHT\n elif act==1 and self.grid[self.predator_loc[idx][0] + self.vision,\n min(self.dims[1] -1,\n self.predator_loc[idx][1] + self.vision + 1)] != self.OUTSIDE_CLASS:\n self.predator_loc[idx][1] = min(self.dims[1]-1,\n self.predator_loc[idx][1]+1)\n\n # DOWN\n elif act==2 and self.grid[min(self.dims[0]-1,\n self.predator_loc[idx][0] + self.vision + 1),\n self.predator_loc[idx][1] + self.vision] != self.OUTSIDE_CLASS:\n self.predator_loc[idx][0] = min(self.dims[0]-1,\n self.predator_loc[idx][0]+1)\n\n # LEFT\n elif act==3 and self.grid[self.predator_loc[idx][0] + self.vision,\n max(0,\n self.predator_loc[idx][1] + self.vision - 1)] != self.OUTSIDE_CLASS:\n self.predator_loc[idx][1] = max(0, self.predator_loc[idx][1]-1)\n\n def _get_reward(self):\n n = self.npredator if not self.enemy_comm else self.npredator + self.nprey\n reward = np.full(n, self.TIMESTEP_PENALTY)\n\n on_prey = np.where(np.all(self.predator_loc == self.prey_loc,axis=1))[0]\n nb_predator_on_prey = on_prey.size\n\n if self.mode == 'cooperative':\n reward[on_prey] = self.POS_PREY_REWARD * nb_predator_on_prey\n elif self.mode == 'competitive':\n if nb_predator_on_prey:\n reward[on_prey] = self.POS_PREY_REWARD / nb_predator_on_prey\n elif self.mode == 'mixed':\n reward[on_prey] = self.PREY_REWARD\n else:\n raise RuntimeError(\"Incorrect mode, Available modes: [cooperative|competitive|mixed]\")\n\n self.reached_prey[on_prey] = 1\n\n if np.all(self.reached_prey == 1) and self.mode == 'mixed':\n self.episode_over = True\n\n # Prey reward\n if nb_predator_on_prey == 0:\n reward[self.npredator:] = -1 * self.TIMESTEP_PENALTY\n else:\n # TODO: discuss & finalise\n reward[self.npredator:] = 0\n\n # Success ratio\n if self.mode != 'competitive':\n if nb_predator_on_prey == self.npredator:\n self.stat['success'] = 1\n else:\n self.stat['success'] = 0\n\n return reward\n\n def reward_terminal(self):\n return np.zeros_like(self._get_reward())\n\n\n def _onehot_initialization(self, a):\n ncols = self.vocab_size\n out = np.zeros(a.shape + (ncols,), dtype=int)\n out[self._all_idx(a, axis=2)] = 1\n return out\n\n def _all_idx(self, idx, axis):\n grid = np.ogrid[tuple(map(slice, idx.shape))]\n grid.insert(axis, idx)\n return tuple(grid)\n\n def render(self, mode='human', close=False):\n grid = np.zeros(self.BASE, dtype=object).reshape(self.dims)\n self.stdscr.clear()\n\n for p in self.predator_loc:\n if grid[p[0]][p[1]] != 0:\n grid[p[0]][p[1]] = str(grid[p[0]][p[1]]) + 'X'\n else:\n grid[p[0]][p[1]] = 'X'\n\n for p in self.prey_loc:\n if grid[p[0]][p[1]] != 0:\n grid[p[0]][p[1]] = str(grid[p[0]][p[1]]) + 'P'\n else:\n grid[p[0]][p[1]] = 'P'\n\n for row_num, row in enumerate(grid):\n for idx, item in enumerate(row):\n if item != 0:\n if 'X' in item and 'P' in item:\n self.stdscr.addstr(row_num, idx * 4, item.center(3), curses.color_pair(3))\n elif 'X' in item:\n self.stdscr.addstr(row_num, idx * 4, item.center(3), curses.color_pair(1))\n else:\n self.stdscr.addstr(row_num, idx * 4, item.center(3), curses.color_pair(2))\n else:\n self.stdscr.addstr(row_num, idx * 4, '0'.center(3), curses.color_pair(4))\n\n self.stdscr.addstr(len(grid), 0, '\\n')\n self.stdscr.refresh()\n\n def exit_render(self):\n curses.endwin()\n"
] | [
[
"numpy.zeros",
"numpy.atleast_1d",
"numpy.arange",
"numpy.all",
"numpy.prod",
"numpy.stack",
"numpy.pad",
"numpy.full",
"numpy.array",
"numpy.unravel_index"
]
] |
antoyang/TubeDETR | [
"3c32cc92a0fdaa0c770d95a59d8764e0e212424c"
] | [
"util/box_ops.py"
] | [
"# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nUtilities for bounding box manipulation and GIoU.\n\"\"\"\nimport torch\nimport numpy as np\nfrom torchvision.ops.boxes import box_area\nfrom typing import Tuple\n\n#### Bounding box utilities imported from torchvision and converted to numpy\ndef np_box_area(boxes: np.array) -> np.array:\n \"\"\"\n Computes the area of a set of bounding boxes, which are specified by its\n (x1, y1, x2, y2) coordinates.\n\n Args:\n boxes (Tensor[N, 4]): boxes for which the area will be computed. They\n are expected to be in (x1, y1, x2, y2) format with\n ``0 <= x1 < x2`` and ``0 <= y1 < y2``.\n\n Returns:\n area (Tensor[N]): area for each box\n \"\"\"\n assert boxes.ndim == 2 and boxes.shape[-1] == 4\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n\n\n# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py\n# with slight modifications\ndef _box_inter_union(boxes1: np.array, boxes2: np.array) -> Tuple[np.array, np.array]:\n area1 = np_box_area(boxes1)\n area2 = np_box_area(boxes2)\n\n lt = np.maximum(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]\n rb = np.minimum(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]\n\n wh = (rb - lt).clip(min=0) # [N,M,2]\n inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]\n\n union = area1[:, None] + area2 - inter\n\n return inter, union\n\n\ndef np_box_iou(boxes1: np.array, boxes2: np.array) -> np.array:\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n\n Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with\n ``0 <= x1 < x2`` and ``0 <= y1 < y2``.\n\n Args:\n boxes1 (Tensor[N, 4])\n boxes2 (Tensor[M, 4])\n\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2\n \"\"\"\n inter, union = _box_inter_union(boxes1, boxes2)\n iou = inter / union\n return iou\n\n\ndef box_cxcywh_to_xyxy(x):\n x_c, y_c, w, h = x.unbind(-1)\n b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]\n return torch.stack(b, dim=-1)\n\n\ndef box_xyxy_to_cxcywh(x):\n x0, y0, x1, y1 = x.unbind(-1)\n b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)]\n return torch.stack(b, dim=-1)\n\n\n# modified from torchvision to also return the union\ndef box_iou(boxes1, boxes2):\n area1 = box_area(boxes1)\n area2 = box_area(boxes2)\n\n lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]\n rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]\n\n wh = (rb - lt).clamp(min=0) # [N,M,2]\n inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]\n\n union = area1[:, None] + area2 - inter\n\n iou = inter / union\n return iou, union\n\n\ndef generalized_box_iou(boxes1, boxes2):\n \"\"\"\n Generalized IoU from https://giou.stanford.edu/\n\n The boxes should be in [x0, y0, x1, y1] format\n\n Returns a [N, M] pairwise matrix, where N = len(boxes1)\n and M = len(boxes2)\n \"\"\"\n # degenerate boxes gives inf / nan results\n # so do an early check\n assert (boxes1[:, 2:] >= boxes1[:, :2]).all()\n assert (boxes2[:, 2:] >= boxes2[:, :2]).all()\n iou, union = box_iou(boxes1, boxes2)\n\n lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])\n rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])\n\n wh = (rb - lt).clamp(min=0) # [N,M,2]\n area = wh[:, :, 0] * wh[:, :, 1]\n\n return iou - (area - union) / area\n\n\ndef masks_to_boxes(masks):\n \"\"\"Compute the bounding boxes around the provided masks\n\n The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.\n\n Returns a [N, 4] tensors, with the boxes in xyxy format\n \"\"\"\n if masks.numel() == 0:\n return torch.zeros((0, 4), device=masks.device)\n\n h, w = masks.shape[-2:]\n\n y = torch.arange(0, h, dtype=torch.float)\n x = torch.arange(0, w, dtype=torch.float)\n y, x = torch.meshgrid(y, x)\n\n x_mask = masks * x.unsqueeze(0)\n x_max = x_mask.flatten(1).max(-1)[0]\n x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]\n\n y_mask = masks * y.unsqueeze(0)\n y_max = y_mask.flatten(1).max(-1)[0]\n y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]\n\n return torch.stack([x_min, y_min, x_max, y_max], 1)\n"
] | [
[
"torch.min",
"torch.stack",
"torch.zeros",
"torch.arange",
"torch.meshgrid",
"torch.max",
"numpy.maximum",
"numpy.minimum"
]
] |
kejsitake/sktime | [
"5c608f09ce0f5216677ce9f6ad61d71584211db9"
] | [
"sktime/contrib/vector_classifiers/_rotation_forest.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"RotationForest vector classifier.\n\nRotation Forest, sktime implementation for continuous values only.\n\"\"\"\n\n__author__ = [\"MatthewMiddlehurst\"]\n__all__ = [\"RotationForest\"]\n\nimport time\n\nimport numpy as np\nfrom joblib import Parallel, delayed\nfrom sklearn.base import BaseEstimator\nfrom sklearn.decomposition import PCA\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.utils import check_random_state, check_X_y\n\nfrom sktime.base._base import _clone_estimator\nfrom sktime.exceptions import NotFittedError\nfrom sktime.utils.validation import check_n_jobs\n\n\nclass RotationForest(BaseEstimator):\n \"\"\"Rotation Forest Classifier.\n\n Implementation of the Rotation Forest classifier described in Rodriguez et al\n (2013). [1]_\n Intended as a benchmark for time series data and a base classifier for\n transformation based appraoches such as ShapeletTransformClassifier, this sktime\n implementation only works with continuous attributes.\n\n Parameters\n ----------\n n_estimators : int, default=200\n Number of estimators to build for the ensemble.\n min_group : int, default=3\n The minimum size of a group.\n max_group : int, default=3\n The maximum size of a group.\n remove_proportion : float, default=0.5\n The proportion of cases to be removed.\n base_estimator : BaseEstimator or None, default=\"None\"\n Base estimator for the ensemble. By default uses the sklearn\n DecisionTreeClassifier using entropy as a splitting measure.\n time_limit_in_minutes : int, default=0\n Time contract to limit build time in minutes, overriding n_estimators.\n Default of 0 means n_estimators is used.\n contract_max_n_estimators : int, default=500\n Max number of estimators when time_limit_in_minutes is set.\n save_transformed_data : bool, default=False\n Save the data transformed in fit for use in _get_train_probs.\n n_jobs : int, default=1\n The number of jobs to run in parallel for both `fit` and `predict`.\n ``-1`` means using all processors.\n random_state : int or None, default=None\n Seed for random number generation.\n\n Attributes\n ----------\n n_classes : int\n The number of classes.\n n_instances : int\n The number of train cases.\n n_atts : int\n The number of attributes in each train case.\n classes_ : list\n The classes labels.\n estimators_ : list of shape (n_estimators) of BaseEstimator\n The collections of estimators trained in fit.\n transformed_data : list of shape (n_estimators) of ndarray\n The transformed dataset for all classifiers. Only saved when\n save_transformed_data is true.\n\n See Also\n --------\n ShapeletTransformClassifier\n\n Notes\n -----\n For the Java version, see\n `TSML <https://github.com/uea-machine-learning/tsml/blob/master/src/main/java\n /weka/classifiers/meta/RotationForest.java>`_.\n\n References\n ----------\n .. [1] Rodriguez, Juan José, Ludmila I. Kuncheva, and Carlos J. Alonso. \"Rotation\n forest: A new classifier ensemble method.\" IEEE transactions on pattern analysis\n and machine intelligence 28.10 (2006).\n\n .. [2] Bagnall, A., et al. \"Is rotation forest the best classifier for problems\n with continuous features?.\" arXiv preprint arXiv:1809.06705 (2018).\n\n Examples\n --------\n >>> from sktime.contrib.vector_classifiers._rotation_forest import RotationForest\n >>> from sktime.datasets import load_unit_test\n >>> from sktime.datatypes._panel._convert import from_nested_to_3d_numpy\n >>> X_train, y_train = load_unit_test(split=\"train\", return_X_y=True)\n >>> X_test, y_test = load_unit_test(split=\"test\", return_X_y=True)\n >>> X_train = from_nested_to_3d_numpy(X_train)\n >>> X_test = from_nested_to_3d_numpy(X_test)\n >>> clf = RotationForest(n_estimators=10)\n >>> clf.fit(X_train, y_train)\n RotationForest(...)\n >>> y_pred = clf.predict(X_test)\n \"\"\"\n\n def __init__(\n self,\n n_estimators=200,\n min_group=3,\n max_group=3,\n remove_proportion=0.5,\n base_estimator=None,\n time_limit_in_minutes=0.0,\n contract_max_n_estimators=500,\n save_transformed_data=False,\n n_jobs=1,\n random_state=None,\n ):\n self.n_estimators = n_estimators\n self.min_group = min_group\n self.max_group = max_group\n self.remove_proportion = remove_proportion\n self.base_estimator = base_estimator\n\n self.time_limit_in_minutes = time_limit_in_minutes\n self.contract_max_n_estimators = contract_max_n_estimators\n self.save_transformed_data = save_transformed_data\n\n self.n_jobs = n_jobs\n self.random_state = random_state\n\n self.n_classes = 0\n self.n_instances = 0\n self.n_atts = 0\n self.classes_ = []\n self.estimators_ = []\n self.transformed_data = []\n\n self._n_estimators = n_estimators\n self._base_estimator = base_estimator\n self._min = 0\n self._ptp = 0\n self._useful_atts = []\n self._pcas = []\n self._groups = []\n self._class_dictionary = {}\n self._n_jobs = n_jobs\n self._n_atts = 0\n # We need to add is-fitted state when inheriting from scikit-learn\n self._is_fitted = False\n\n super(RotationForest, self).__init__()\n\n def fit(self, X, y):\n \"\"\"Fit a forest of trees on cases (X,y), where y is the target variable.\n\n Parameters\n ----------\n X : ndarray of shape = [n_instances,n_attributes]\n The training input samples.\n y : array-like, shape = [n_instances]\n The class labels.\n\n Returns\n -------\n self : object\n \"\"\"\n if isinstance(X, np.ndarray) and len(X.shape) == 3 and X.shape[1] == 1:\n X = np.reshape(X, (X.shape[0], -1))\n elif not isinstance(X, np.ndarray) or len(X.shape) > 2:\n raise ValueError(\n \"RotationForest is not a time series classifier. \"\n \"A 2d numpy array is required.\"\n )\n X, y = check_X_y(X, y)\n\n self._n_jobs = check_n_jobs(self.n_jobs)\n\n self.n_instances, self.n_atts = X.shape\n self.classes_ = np.unique(y)\n self.n_classes = self.classes_.shape[0]\n for index, classVal in enumerate(self.classes_):\n self._class_dictionary[classVal] = index\n\n time_limit = self.time_limit_in_minutes * 60\n start_time = time.time()\n train_time = 0\n\n if self.base_estimator is None:\n self._base_estimator = DecisionTreeClassifier(criterion=\"entropy\")\n\n # replace missing values with 0 and remove useless attributes\n X = np.nan_to_num(X, False, 0, 0, 0)\n self._useful_atts = ~np.all(X[1:] == X[:-1], axis=0)\n X = X[:, self._useful_atts]\n\n self._n_atts = X.shape[1]\n\n # normalise attributes\n self._min = X.min(axis=0)\n self._ptp = X.max(axis=0) - self._min\n X = (X - self._min) / self._ptp\n\n X_cls_split = [X[np.where(y == i)] for i in self.classes_]\n\n if time_limit > 0:\n self._n_estimators = 0\n self.estimators_ = []\n self._pcas = []\n self._groups = []\n\n while (\n train_time < time_limit\n and self._n_estimators < self.contract_max_n_estimators\n ):\n fit = Parallel(n_jobs=self._n_jobs)(\n delayed(self._fit_estimator)(\n X,\n X_cls_split,\n y,\n i,\n )\n for i in range(self._n_jobs)\n )\n\n estimators, pcas, groups, transformed_data = zip(*fit)\n\n self.estimators_ += estimators\n self._pcas += pcas\n self._groups += groups\n self.transformed_data += transformed_data\n\n self._n_estimators += self._n_jobs\n train_time = time.time() - start_time\n else:\n fit = Parallel(n_jobs=self._n_jobs)(\n delayed(self._fit_estimator)(\n X,\n X_cls_split,\n y,\n i,\n )\n for i in range(self._n_estimators)\n )\n\n self.estimators_, self._pcas, self._groups, self.transformed_data = zip(\n *fit\n )\n\n self._is_fitted = True\n return self\n\n def predict(self, X):\n \"\"\"Predict for all cases in X. Built on top of predict_proba.\n\n Parameters\n ----------\n X : ndarray of shape = [n_instances,n_attributes]\n\n Returns\n -------\n output : array of shape = [n_test_instances]\n \"\"\"\n rng = check_random_state(self.random_state)\n return np.array(\n [\n self.classes_[int(rng.choice(np.flatnonzero(prob == prob.max())))]\n for prob in self.predict_proba(X)\n ]\n )\n\n def predict_proba(self, X):\n \"\"\"Probability estimates for each class for all cases in X.\n\n Parameters\n ----------\n X : ndarray of shape = [n_instances,n_attributes]\n\n Returns\n -------\n output : array of shape = [n_test_instances, num_classes] of\n probabilities\n \"\"\"\n if not self._is_fitted:\n raise NotFittedError(\n f\"This instance of {self.__class__.__name__} has not \"\n f\"been fitted yet; please call `fit` first.\"\n )\n if isinstance(X, np.ndarray) and len(X.shape) == 3 and X.shape[1] == 1:\n X = np.reshape(X, (X.shape[0], -1))\n elif not isinstance(X, np.ndarray) or len(X.shape) > 2:\n raise ValueError(\n \"RotationForest is not a time series classifier. \"\n \"A 2d numpy array is required.\"\n )\n\n # replace missing values with 0 and remove useless attributes\n X = np.nan_to_num(X, False, 0, 0, 0)\n X = X[:, self._useful_atts]\n\n # normalise the data.\n X = (X - self._min) / self._ptp\n\n y_probas = Parallel(n_jobs=self._n_jobs)(\n delayed(self._predict_proba_for_estimator)(\n X,\n self.estimators_[i],\n self._pcas[i],\n self._groups[i],\n )\n for i in range(self._n_estimators)\n )\n\n output = np.sum(y_probas, axis=0) / (\n np.ones(self.n_classes) * self._n_estimators\n )\n return output\n\n def _get_train_probs(self, X, y):\n if not self._is_fitted:\n raise NotFittedError(\n f\"This instance of {self.__class__.__name__} has not \"\n f\"been fitted yet; please call `fit` first.\"\n )\n if isinstance(X, np.ndarray) and len(X.shape) == 3 and X.shape[1] == 1:\n X = np.reshape(X, (X.shape[0], -1))\n elif not isinstance(X, np.ndarray) or len(X.shape) > 2:\n raise ValueError(\n \"RotationForest is not a time series classifier. \"\n \"A 2d numpy array is required.\"\n )\n\n n_instances, n_atts = X.shape\n\n if n_instances != self.n_instances or n_atts != self.n_atts:\n raise ValueError(\n \"n_instances, n_dims, series_length mismatch. X should be \"\n \"the same as the training data used in fit for generating train \"\n \"probabilities.\"\n )\n\n if not self.save_transformed_data:\n raise ValueError(\"Currently only works with saved transform data from fit.\")\n\n p = Parallel(n_jobs=self._n_jobs)(\n delayed(self._train_probas_for_estimator)(\n y,\n i,\n )\n for i in range(self._n_estimators)\n )\n y_probas, oobs = zip(*p)\n\n results = np.sum(y_probas, axis=0)\n divisors = np.zeros(n_instances)\n for oob in oobs:\n for inst in oob:\n divisors[inst] += 1\n\n for i in range(n_instances):\n results[i] = (\n np.ones(self.n_classes) * (1 / self.n_classes)\n if divisors[i] == 0\n else results[i] / (np.ones(self.n_classes) * divisors[i])\n )\n\n return results\n\n def _fit_estimator(self, X, X_cls_split, y, idx):\n rs = 255 if self.random_state == 0 else self.random_state\n rs = (\n None\n if self.random_state is None\n else (rs * 37 * (idx + 1)) % np.iinfo(np.int32).max\n )\n rng = check_random_state(rs)\n\n groups = self._generate_groups(rng)\n pcas = []\n\n # construct the slices to fit the PCAs too.\n for group in groups:\n classes = rng.choice(\n range(self.n_classes),\n size=rng.randint(1, self.n_classes + 1),\n replace=False,\n )\n\n # randomly add the classes with the randomly selected attributes.\n X_t = np.zeros((0, len(group)))\n for cls_idx in classes:\n c = X_cls_split[cls_idx]\n X_t = np.concatenate((X_t, c[:, group]), axis=0)\n\n sample_ind = rng.choice(\n X_t.shape[0],\n int(X_t.shape[0] * self.remove_proportion),\n replace=False,\n )\n X_t = X_t[sample_ind]\n\n # try to fit the PCA if it fails, remake it, and add 10 random data instances.\n while True:\n # ignore err state on PCA because we account if it fails.\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n # differences between os occasionally. seems to happen when there\n # are low amounts of cases in the fit\n pca = PCA(random_state=rs).fit(X_t)\n\n if not np.isnan(pca.explained_variance_ratio_).all():\n break\n X_t = np.concatenate(\n (X_t, rng.random_sample((10, X_t.shape[1]))), axis=0\n )\n\n pcas.append(pca)\n\n # merge all the pca_transformed data into one instance and build a classifier on it.\n X_t = np.concatenate(\n [pcas[i].transform(X[:, group]) for i, group in enumerate(groups)], axis=1\n )\n tree = _clone_estimator(self._base_estimator, random_state=rs)\n tree.fit(X_t, y)\n\n return tree, pcas, groups, X_t if self.save_transformed_data else None\n\n def _predict_proba_for_estimator(self, X, clf, pcas, groups):\n X_t = np.concatenate(\n [pcas[i].transform(X[:, group]) for i, group in enumerate(groups)], axis=1\n )\n probas = clf.predict_proba(X_t)\n\n if probas.shape[1] != self.n_classes:\n new_probas = np.zeros((probas.shape[0], self.n_classes))\n for i, cls in enumerate(clf.classes_):\n cls_idx = self._class_dictionary[cls]\n new_probas[:, cls_idx] = probas[:, i]\n probas = new_probas\n\n return probas\n\n def _train_probas_for_estimator(self, y, idx):\n rs = 255 if self.random_state == 0 else self.random_state\n rs = (\n None\n if self.random_state is None\n else (rs * 37 * (idx + 1)) % np.iinfo(np.int32).max\n )\n rng = check_random_state(rs)\n\n indices = range(self.n_instances)\n subsample = rng.choice(self.n_instances, size=self.n_instances)\n oob = [n for n in indices if n not in subsample]\n\n clf = _clone_estimator(self._base_estimator, rs)\n clf.fit(self.transformed_data[idx][subsample], y[subsample])\n probas = clf.predict_proba(self.transformed_data[idx][oob])\n\n if probas.shape[1] != self.n_classes:\n new_probas = np.zeros((probas.shape[0], self.n_classes))\n for i, cls in enumerate(clf.classes_):\n cls_idx = self._class_dictionary[cls]\n new_probas[:, cls_idx] = probas[:, i]\n probas = new_probas\n\n results = np.zeros((self.n_instances, self.n_classes))\n for n, proba in enumerate(probas):\n results[oob[n]] += proba\n\n return [results, oob]\n\n def _generate_groups(self, rng):\n permutation = rng.permutation((np.arange(0, self._n_atts)))\n\n # select the size of each group.\n group_size_count = np.zeros(self.max_group - self.min_group + 1)\n n_attributes = 0\n n_groups = 0\n while n_attributes < self._n_atts:\n n = rng.randint(group_size_count.shape[0])\n group_size_count[n] += 1\n n_attributes += self.min_group + n\n n_groups += 1\n\n groups = []\n current_attribute = 0\n current_size = 0\n for i in range(0, n_groups):\n while group_size_count[current_size] == 0:\n current_size += 1\n group_size_count[current_size] -= 1\n\n n = self.min_group + current_size\n groups.append(np.zeros(n, dtype=int))\n for k in range(0, n):\n if current_attribute < permutation.shape[0]:\n groups[i][k] = permutation[current_attribute]\n else:\n groups[i][k] = permutation[rng.randint(permutation.shape[0])]\n current_attribute += 1\n\n return groups\n"
] | [
[
"numpy.sum",
"numpy.ones",
"sklearn.utils.check_random_state",
"numpy.zeros",
"sklearn.tree.DecisionTreeClassifier",
"numpy.reshape",
"numpy.errstate",
"numpy.arange",
"numpy.where",
"numpy.iinfo",
"numpy.all",
"numpy.isnan",
"sklearn.utils.check_X_y",
"numpy.concatenate",
"numpy.nan_to_num",
"numpy.unique",
"sklearn.decomposition.PCA"
]
] |
Benjamin15/shap | [
"4b6472c90c89aad403e00dff0cc8a6416f354fea"
] | [
"shap/plots/dependence.py"
] | [
"from __future__ import division\n\nfrom io import BytesIO\nimport base64\nimport numpy as np\nimport warnings\ntry:\n import matplotlib.pyplot as pl\n import matplotlib\nexcept ImportError:\n warnings.warn(\"matplotlib could not be loaded!\")\n pass\nfrom . import labels\nfrom . import colors\nfrom ..common import convert_name, approximate_interactions\n\ndef dependence_plot(ind, shap_values, features, feature_names=None, display_features=None,\n interaction_index=\"auto\",\n color=\"#1E88E5\", axis_color=\"#333333\", cmap=None,\n dot_size=16, x_jitter=0, alpha=1, title=None, xmin=None, xmax=None, ax=None, show=True,\n get_png=False):\n \"\"\" Create a SHAP dependence plot, colored by an interaction feature.\n\n Plots the value of the feature on the x-axis and the SHAP value of the same feature\n on the y-axis. This shows how the model depends on the given feature, and is like a\n richer extenstion of the classical parital dependence plots. Vertical dispersion of the\n data points represents interaction effects. Grey ticks along the y-axis are data\n points where the feature's value was NaN.\n\n\n Parameters\n ----------\n ind : int or string\n If this is an int it is the index of the feature to plot. If this is a string it is\n either the name of the feature to plot, or it can have the form \"rank(int)\" to specify\n the feature with that rank (ordered by mean absolute SHAP value over all the samples).\n\n shap_values : numpy.array\n Matrix of SHAP values (# samples x # features).\n\n features : numpy.array or pandas.DataFrame\n Matrix of feature values (# samples x # features).\n\n feature_names : list\n Names of the features (length # features).\n\n display_features : numpy.array or pandas.DataFrame\n Matrix of feature values for visual display (such as strings instead of coded values).\n\n interaction_index : \"auto\", None, int, or string\n The index of the feature used to color the plot. The name of a feature can also be passed\n as a string. If \"auto\" then shap.common.approximate_interactions is used to pick what\n seems to be the strongest interaction (note that to find to true stongest interaction you\n need to compute the SHAP interaction values).\n\n x_jitter : float (0 - 1)\n Adds random jitter to feature values. May increase plot readability when feature\n is discrete.\n\n alpha : float\n The transparency of the data points (between 0 and 1). This can be useful to the\n show density of the data points when using a large dataset.\n\n xmin : float or string\n Represents the lower bound of the plot's x-axis. It can be a string of the format\n \"percentile(float)\" to denote that percentile of the feature's value used on the x-axis.\n\n xmax : float or string\n Represents the upper bound of the plot's x-axis. It can be a string of the format\n \"percentile(float)\" to denote that percentile of the feature's value used on the x-axis.\n\n ax : matplotlib Axes object\n Optionally specify an existing matplotlib Axes object, into which the plot will be placed.\n In this case we do not create a Figure, otherwise we do.\n\n \"\"\"\n\n if cmap is None:\n cmap = colors.red_blue\n\n if type(shap_values) is list:\n raise TypeError(\"The passed shap_values are a list not an array! If you have a list of explanations try \" \\\n \"passing shap_values[0] instead to explain the first output class of a multi-output model.\")\n\n # convert from DataFrames if we got any\n if str(type(features)).endswith(\"'pandas.core.frame.DataFrame'>\"):\n if feature_names is None:\n feature_names = features.columns\n features = features.values\n if str(type(display_features)).endswith(\"'pandas.core.frame.DataFrame'>\"):\n if feature_names is None:\n feature_names = display_features.columns\n display_features = display_features.values\n elif display_features is None:\n display_features = features\n\n if feature_names is None:\n feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]\n\n # allow vectors to be passed\n if len(shap_values.shape) == 1:\n shap_values = np.reshape(shap_values, len(shap_values), 1)\n if len(features.shape) == 1:\n features = np.reshape(features, len(features), 1)\n\n ind = convert_name(ind, shap_values, feature_names)\n\n # guess what other feature as the stongest interaction with the plotted feature\n if not hasattr(ind, \"__len__\"):\n if interaction_index == \"auto\":\n interaction_index = approximate_interactions(ind, shap_values, features)[0]\n interaction_index = convert_name(interaction_index, shap_values, feature_names)\n categorical_interaction = False\n\n # create a matplotlib figure, if `ax` hasn't been specified.\n if not ax:\n figsize = (7.5, 5) if interaction_index != ind and interaction_index is not None else (6, 5)\n fig = pl.figure(figsize=figsize)\n ax = fig.gca()\n else:\n fig = ax.get_figure()\n\n # plotting SHAP interaction values\n if len(shap_values.shape) == 3 and hasattr(ind, \"__len__\") and len(ind) == 2:\n ind1 = convert_name(ind[0], shap_values, feature_names)\n ind2 = convert_name(ind[1], shap_values, feature_names)\n if ind1 == ind2:\n proj_shap_values = shap_values[:, ind2, :]\n else:\n proj_shap_values = shap_values[:, ind2, :] * 2 # off-diag values are split in half\n\n # there is no interaction coloring for the main effect\n if ind1 == ind2:\n fig.set_size_inches(6, 5, forward=True)\n\n # TODO: remove recursion; generally the functions should be shorter for more maintainable code\n dependence_plot(\n ind1, proj_shap_values, features, feature_names=feature_names,\n interaction_index=(None if ind1 == ind2 else ind2), display_features=display_features, ax=ax, show=False,\n xmin=xmin, xmax=xmax, x_jitter=x_jitter, alpha=alpha, get_png=get_png\n )\n if ind1 == ind2:\n ax.set_ylabel(labels['MAIN_EFFECT'] % feature_names[ind1])\n else:\n ax.set_ylabel(labels['INTERACTION_EFFECT'] % (feature_names[ind1], feature_names[ind2]))\n\n if show:\n pl.show()\n return\n\n assert shap_values.shape[0] == features.shape[0], \\\n \"'shap_values' and 'features' values must have the same number of rows!\"\n assert shap_values.shape[1] == features.shape[1], \\\n \"'shap_values' must have the same number of columns as 'features'!\"\n\n # get both the raw and display feature values\n oinds = np.arange(shap_values.shape[0]) # we randomize the ordering so plotting overlaps are not related to data ordering\n np.random.shuffle(oinds)\n xv = features[oinds, ind].astype(np.float64)\n xd = display_features[oinds, ind]\n s = shap_values[oinds, ind]\n if type(xd[0]) == str:\n name_map = {}\n for i in range(len(xv)):\n name_map[xd[i]] = xv[i]\n xnames = list(name_map.keys())\n\n # allow a single feature name to be passed alone\n if type(feature_names) == str:\n feature_names = [feature_names]\n name = feature_names[ind]\n\n # get both the raw and display color values\n color_norm = None\n if interaction_index is not None:\n cv = features[:, interaction_index]\n cd = display_features[:, interaction_index]\n clow = np.nanpercentile(cv.astype(np.float), 5)\n chigh = np.nanpercentile(cv.astype(np.float), 95)\n if clow == chigh:\n clow = np.nanmin(cv.astype(np.float))\n chigh = np.nanmax(cv.astype(np.float))\n if type(cd[0]) == str:\n cname_map = {}\n for i in range(len(cv)):\n cname_map[cd[i]] = cv[i]\n cnames = list(cname_map.keys())\n categorical_interaction = True\n elif clow % 1 == 0 and chigh % 1 == 0 and chigh - clow < 10:\n categorical_interaction = True\n\n # discritize colors for categorical features\n if categorical_interaction and clow != chigh:\n clow = np.nanmin(cv.astype(np.float))\n chigh = np.nanmax(cv.astype(np.float))\n bounds = np.linspace(clow, chigh, int(chigh - clow + 2))\n color_norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N-1)\n\n # optionally add jitter to feature values\n if x_jitter > 0:\n if x_jitter > 1: x_jitter = 1\n xvals = xv.copy()\n if isinstance(xvals[0], float):\n xvals = xvals.astype(np.float)\n xvals = xvals[~np.isnan(xvals)]\n xvals = np.unique(xvals) # returns a sorted array\n if len(xvals) >= 2:\n smallest_diff = np.min(np.diff(xvals))\n jitter_amount = x_jitter * smallest_diff\n xv += (np.random.ranf(size = len(xv))*jitter_amount) - (jitter_amount/2)\n\n # the actual scatter plot, TODO: adapt the dot_size to the number of data points?\n xv_nan = np.isnan(xv)\n xv_notnan = np.invert(xv_nan)\n if interaction_index is not None:\n\n # plot the nan values in the interaction feature as grey\n cvals = features[oinds, interaction_index].astype(np.float64)\n cvals_imp = cvals.copy()\n cvals_imp[np.isnan(cvals)] = (clow + chigh) / 2.0\n cvals[cvals_imp > chigh] = chigh\n cvals[cvals_imp < clow] = clow\n p = ax.scatter(\n xv[xv_notnan], s[xv_notnan], s=dot_size, linewidth=0, c=cvals[xv_notnan],\n cmap=cmap, alpha=alpha, vmin=clow, vmax=chigh,\n norm=color_norm, rasterized=len(xv) > 500\n )\n p.set_array(cvals[xv_notnan])\n else:\n p = ax.scatter(xv, s, s=dot_size, linewidth=0, color=color,\n alpha=alpha, rasterized=len(xv) > 500)\n\n if interaction_index != ind and interaction_index is not None:\n # draw the color bar\n if type(cd[0]) == str:\n tick_positions = [cname_map[n] for n in cnames]\n if len(tick_positions) == 2:\n tick_positions[0] -= 0.25\n tick_positions[1] += 0.25\n cb = pl.colorbar(p, ticks=tick_positions, ax=ax)\n cb.set_ticklabels(cnames)\n else:\n cb = pl.colorbar(p, ax=ax)\n\n cb.set_label(feature_names[interaction_index], size=13)\n cb.ax.tick_params(labelsize=11)\n if categorical_interaction:\n cb.ax.tick_params(length=0)\n cb.set_alpha(1)\n cb.outline.set_visible(False)\n bbox = cb.ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())\n cb.ax.set_aspect((bbox.height - 0.7) * 20)\n\n # handles any setting of xmax and xmin\n # note that we handle None,float, or \"percentile(float)\" formats\n if xmin is not None or xmax is not None:\n if type(xmin) == str and xmin.startswith(\"percentile\"):\n xmin = np.nanpercentile(xv, float(xmin[11:-1]))\n if type(xmax) == str and xmax.startswith(\"percentile\"):\n xmax = np.nanpercentile(xv, float(xmax[11:-1]))\n\n if xmin is None or xmin == np.nanmin(xv):\n xmin = np.nanmin(xv) - (xmax - np.nanmin(xv))/20\n if xmax is None or xmax == np.nanmax(xv):\n xmax = np.nanmax(xv) + (np.nanmax(xv) - xmin)/20\n\n ax.set_xlim(xmin, xmax)\n\n # plot any nan feature values as tick marks along the y-axis\n xlim = ax.get_xlim()\n if interaction_index is not None:\n p = ax.scatter(\n xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,\n linewidth=2, c=cvals_imp[xv_nan], cmap=cmap, alpha=alpha,\n vmin=clow, vmax=chigh\n )\n p.set_array(cvals[xv_nan])\n else:\n ax.scatter(\n xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,\n linewidth=2, color=color, alpha=alpha\n )\n ax.set_xlim(xlim)\n\n # make the plot more readable\n ax.set_xlabel(name, color=axis_color, fontsize=13)\n ax.set_ylabel(labels['VALUE_FOR'] % name, color=axis_color, fontsize=13)\n if title is not None:\n ax.set_title(title, color=axis_color, fontsize=13)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.tick_params(color=axis_color, labelcolor=axis_color, labelsize=11)\n for spine in ax.spines.values():\n spine.set_edgecolor(axis_color)\n if type(xd[0]) == str:\n ax.set_xticks([name_map[n] for n in xnames])\n ax.set_xticklabels(xnames, dict(rotation='vertical', fontsize=11))\n if show:\n with warnings.catch_warnings(): # ignore expected matplotlib warnings\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n pl.show()\n if get_png:\n file = BytesIO()\n pl.savefig(file, format='png', bbox_inches=\"tight\")\n return file\n"
] | [
[
"numpy.random.shuffle",
"matplotlib.colors.BoundaryNorm",
"numpy.nanmax",
"numpy.diff",
"numpy.invert",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.nanmin",
"numpy.arange",
"matplotlib.pyplot.show",
"numpy.isnan",
"matplotlib.pyplot.colorbar",
"numpy.unique"
]
] |
Awesomex005/CarND-Vehicle-Detection | [
"e12068887946605d148284aeea0262695d54743f"
] | [
"train_classifier.py"
] | [
"import matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport glob\nimport time\nfrom sklearn.svm import LinearSVC\nfrom sklearn.preprocessing import StandardScaler\nfrom extract_feature import *\n# NOTE: the next import is only valid for scikit-learn version <= 0.17\n# for scikit-learn >= 0.18 use:\n# from sklearn.model_selection import train_test_split\nfrom sklearn.cross_validation import train_test_split\nimport pickle\n\n\n# Read in cars and notcars\ncars = glob.glob('./train_data/vehicles/*/*.png'); train_data_tpye = 'png'; notcars = glob.glob('./train_data/non-vehicles/*/*.png')\n#cars = glob.glob('./hog_test_imgs/vehicles_smallset/*/*.jpeg'); train_data_tpye = 'jpeg'; #notcars = glob.glob('./hog_test_imgs/non-vehicles_smallset/*/*.jpeg')\nsample_size = None\ncars = cars[0:sample_size]\nnotcars = notcars[0:sample_size]\n\ncolor_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb\norient = 9 # HOG orientations\npix_per_cell = 8 # HOG pixels per cell\ncell_per_block = 2 # HOG cells per block\nhog_channel = \"ALL\" # Can be 0, 1, 2, or \"ALL\"\nspatial_size = (32, 32) # Spatial binning dimensions\nhist_bins = 32 # Number of histogram bins\nspatial_feat = True # Spatial features on or off\nhist_feat = True # Histogram features on or off\nhog_feat = True # HOG features on or off\ny_start_stop = [None, None] # Min and max in y to search in slide_window()\n\nt = time.time()\nprint(\"start extract car_features\")\ncar_features = extract_features(cars, train_data_tpye, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, spatial_feat=spatial_feat, \n hist_feat=hist_feat, hog_feat=hog_feat)\nprint(\"start extract notcar_features\")\nnotcar_features = extract_features(notcars, train_data_tpye, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, spatial_feat=spatial_feat, \n hist_feat=hist_feat, hog_feat=hog_feat)\n \nX = np.vstack((car_features, notcar_features)).astype(np.float64)\n\n# Define the labels vector\ny = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))\n\n# Split up data into randomized training and test sets\nrand_state = np.random.randint(0, 100)\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=rand_state)\n\nt2 = time.time()\nprint(round(t2-t, 2), 'Seconds to extract features SVC...')\n \nprint(\"X_train shape: {} \\X_test shape: {}\".format(X_train.shape, X_test.shape))\n \n# Fit a per-column scaler\nX_scaler = StandardScaler().fit(X_train)\n# Apply the scaler to X\nX_train = X_scaler.transform(X_train)\nX_test = X_scaler.transform(X_test)\n\nprint('Using:',orient,'orientations',pix_per_cell,\n 'pixels per cell and', cell_per_block,'cells per block')\nprint('Feature vector length:', len(X_train[0]))\n# Use a linear SVC \nsvc = LinearSVC()\nt=time.time()\nsvc.fit(X_train, y_train)\nt2 = time.time()\nprint(round(t2-t, 2), 'Seconds to train SVC...')\n\n# Check the score of the SVC\nprint('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))\n\n# pickle SVC\npickle_file = 'svc_acc_%f.p'%round(svc.score(X_test, y_test), 4)\ntry:\n with open(pickle_file, 'wb') as pfile:\n pickle.dump(\n {\n 'svc': svc,\n 'scaler': X_scaler,\n 'color_space': color_space,\n 'orient': orient,\n 'pix_per_cell': pix_per_cell,\n 'cell_per_block': cell_per_block,\n 'spatial_size': spatial_size,\n 'hist_bins': hist_bins,\n 'spatial_feat': spatial_feat,\n 'hist_feat': hist_feat,\n },\n pfile, pickle.HIGHEST_PROTOCOL)\nexcept Exception as e:\n print('Unable to save data to', pickle_file, ':', e)\n raise"
] | [
[
"numpy.vstack",
"sklearn.svm.LinearSVC",
"sklearn.cross_validation.train_test_split",
"sklearn.preprocessing.StandardScaler",
"numpy.random.randint"
]
] |
DannySalem/chemprop | [
"f99cea2c08f54640ccd8ad3851a93f47badc72dd"
] | [
"chemprop/models/FFNetwork.py"
] | [
"import torch.nn as nn\nfrom chemprop.nn_utils import get_activation_function\nfrom chemprop.args import TrainArgs\n\n\ndef create_ffn(output_size: int, input_size: int, args: TrainArgs):\n \"\"\"\n Creates the feed-forward layers for the model.\n\n :param args: A :class:`~chemprop.args.TrainArgs` object containing model arguments.\n \"\"\"\n\n first_linear_dim = args.hidden_size * args.number_of_molecules\n # need to also add other 2 network outputs\n if args.use_input_features:\n first_linear_dim += args.features_size\n\n if args.atom_descriptors == \"descriptor\":\n first_linear_dim += args.atom_descriptors_size\n\n first_linear_dim = input_size\n dropout = nn.Dropout(args.dropout)\n activation = get_activation_function(args.activation)\n\n # Create FFN layers\n if args.ffn_num_layers == 1:\n ffn = [dropout, nn.Linear(first_linear_dim, output_size)]\n else:\n ffn = [dropout, nn.Linear(first_linear_dim, args.ffn_hidden_size)]\n for _ in range(args.ffn_num_layers - 2):\n ffn.extend(\n [activation, dropout, nn.Linear(args.ffn_hidden_size, args.ffn_hidden_size),]\n )\n ffn.extend(\n [activation, dropout, nn.Linear(args.ffn_hidden_size, output_size),]\n )\n\n # return FFN model\n return nn.Sequential(*ffn)\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.Sequential"
]
] |
lucasdornelles/2DBESO | [
"b92a42346ed4945a3668a3277d67ef412e200cbb"
] | [
"BESO.py"
] | [
"import numpy as np\r\nfrom FEM import get_element_dof\r\nfrom tqdm import tqdm\r\nfrom scipy.spatial.distance import pdist\r\n\r\n\r\ndef get_elements_sensibilities(local_matrix, minimum_density, elements_density,\r\n displacements, penalty, connectivity, nodes_dof):\r\n\r\n # calculate elements sensibilities\r\n\r\n sensibilities = []\r\n for i in tqdm(range(len(connectivity))):\r\n element_dof = get_element_dof(connectivity[i], nodes_dof)\r\n element_displacements = displacements[element_dof]\r\n\r\n element_sensibility = np.matmul(element_displacements,\r\n np.matmul(local_matrix[i],\r\n np.transpose(np.asmatrix(element_displacements))))\r\n if elements_density[i] == 1:\r\n sensibilities.append(element_sensibility[0, 0])\r\n\r\n else:\r\n element_sensibility[0, 0] = element_sensibility[0, 0] * (minimum_density ** (penalty - 1))\r\n sensibilities.append(element_sensibility[0, 0])\r\n\r\n return sensibilities\r\n\r\n\r\ndef get_elements_on_filtering_radius(centers, element_index, filter_radius):\r\n\r\n # identify elements index on filtering radius of element_index element\r\n\r\n element_center = centers[element_index]\r\n\r\n elements_on_filtering_radius = []\r\n for i in range(len(centers)):\r\n if (element_center[0] - filter_radius) <= centers[i][0] <= (element_center[0] + filter_radius) and \\\r\n (element_center[1] - filter_radius) <= centers[i][1] <= (element_center[1] + filter_radius) and \\\r\n pdist([centers[i], element_center]) <= filter_radius:\r\n elements_on_filtering_radius = elements_on_filtering_radius + [i]\r\n\r\n return elements_on_filtering_radius\r\n\r\n\r\ndef get_filtering_weights(centers, filter_radius, all_elements_on_filtering_radius):\r\n\r\n # calculate filtering weights for all elements\r\n\r\n filtering_weights = []\r\n for element_index in range(len(centers)):\r\n element_weights = []\r\n element_center = centers[element_index]\r\n elements_on_filtering_radius = all_elements_on_filtering_radius[element_index]\r\n for elements in elements_on_filtering_radius:\r\n center = centers[elements]\r\n weight = filter_radius - pdist([element_center, center])\r\n element_weights = element_weights + [weight]\r\n filtering_weights.append(element_weights)\r\n\r\n return filtering_weights\r\n\r\n\r\ndef filter_sensibilities(sensibilities, all_elements_on_filtering_radius, filtering_weights):\r\n\r\n # filter sensibilities using filtering weights and elements on filtering radius\r\n\r\n filtered_sensibilities = []\r\n for element_index in range(len(sensibilities)):\r\n element_sensibilitie = 0\r\n elements_on_filtering_radius = all_elements_on_filtering_radius[element_index]\r\n element_filtering_weights = filtering_weights[element_index]\r\n for index in range(len(elements_on_filtering_radius)):\r\n sensibilitie_index = elements_on_filtering_radius[index]\r\n element_sensibilitie = element_sensibilitie + element_filtering_weights[index] * sensibilities[sensibilitie_index]\r\n element_sensibilitie = element_sensibilitie / sum(element_filtering_weights)\r\n filtered_sensibilities.append(element_sensibilitie[0])\r\n\r\n return filtered_sensibilities\r\n\r\n\r\ndef average_sensibilities(last_sensibilities, filtered_sensibilities):\r\n\r\n # average sensibilities with last iteration sensibilities\r\n\r\n averaged_sensibilities = []\r\n\r\n for element_index in range(len(filtered_sensibilities)):\r\n element_sensibilitie = (last_sensibilities[element_index] + filtered_sensibilities[element_index]) / 2\r\n averaged_sensibilities.append(element_sensibilitie)\r\n\r\n return averaged_sensibilities\r\n\r\n\r\ndef update_elements_density(averaged_sensibilities, last_elements_density, minimum_area, evolutionary_rate, areas,\r\n surface_type, surface_elements):\r\n\r\n # update elements density using BESO softkill optimum criteria\r\n\r\n last_area = sum(list(np.array(last_elements_density) * np.array(areas)))\r\n new_area = max(minimum_area, last_area * (1 - evolutionary_rate))\r\n\r\n design_elements = []\r\n for i in range(len(surface_type)):\r\n if surface_type[i]:\r\n design_elements = design_elements + surface_elements[i]\r\n\r\n design_sensibilities = [averaged_sensibilities[i] for i in design_elements]\r\n\r\n low = min(design_sensibilities)\r\n high = max(design_sensibilities)\r\n residue = 10 ** (-5)\r\n new_elements_density = []\r\n while ((high - low) / high) > residue:\r\n new_elements_density = list(last_elements_density)\r\n threshold = (high + low) / 2\r\n\r\n for i in range(len(design_sensibilities)):\r\n if design_sensibilities[i] < threshold:\r\n new_elements_density[i] = 0\r\n else:\r\n new_elements_density[i] = 1\r\n\r\n area = sum(list(np.array(new_elements_density) * np.array(areas)))\r\n\r\n if area > new_area:\r\n low = threshold\r\n else:\r\n high = threshold\r\n\r\n new_area = area\r\n\r\n return new_elements_density, new_area\r\n\r\n\r\ndef get_minimum_area(areas, minimum_area_ratio):\r\n\r\n # get minimum area for optimization\r\n\r\n minimum_area = sum(areas) * minimum_area_ratio\r\n return minimum_area\r\n\r\n\r\ndef check_convergence(compliances_list, iteration):\r\n\r\n # check BESO algorithm convergence\r\n\r\n compliance_diference = (sum(compliances_list[(iteration - 5): iteration]) -\r\n sum(compliances_list[(iteration - 10): (iteration - 5)]))\r\n\r\n residue = 0.001\r\n convergence = bool(abs(compliance_diference) <= residue)\r\n\r\n return convergence, compliance_diference\r\n"
] | [
[
"numpy.array",
"scipy.spatial.distance.pdist",
"numpy.asmatrix"
]
] |
ZJCV/PyCls | [
"1ef59301646b6134f2ffcc009b4fd76550fa4089"
] | [
"tests/test_model/test_recognizer/test_sknet.py"
] | [
"# -*- coding: utf-8 -*-\n\n\"\"\"\n@date: 2020/11/21 下午4:16\n@file: test_resnest.py\n@author: zj\n@description: \n\"\"\"\n\nimport torch\n\nfrom zcls.config import cfg\nfrom zcls.config.key_word import KEY_OUTPUT\nfrom zcls.model.recognizers.resnet.resnet import ResNet\n\n\ndef test_data(model, input_shape, output_shape):\n data = torch.randn(input_shape)\n outputs = model(data)[KEY_OUTPUT]\n print(outputs.shape)\n\n assert outputs.shape == output_shape\n\n\ndef test_sknet():\n config_file = 'configs/benchmarks/resnet-resnext/sknet50_zcls_imagenet_224.yaml'\n cfg.merge_from_file(config_file)\n\n model = ResNet(cfg)\n print(model)\n test_data(model, (3, 3, 224, 224), (3, 1000))\n\n\nif __name__ == '__main__':\n print('*' * 10 + ' sknet')\n test_sknet()\n"
] | [
[
"torch.randn"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.