repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
johnlyzhou/behavenet | [
"aa7187322e491299cd7fefbecb8f8f215b33edba",
"aa7187322e491299cd7fefbecb8f8f215b33edba"
] | [
"search/nogamma/search_utils_nogamma.py",
"tests/test_plotting/test_arhmm_utils.py"
] | [
"import os\nimport numpy as np\nimport pandas as pd\nimport pickle as pkl\nfrom tqdm import tqdm\nfrom sklearn.metrics import r2_score\nfrom behavenet import get_user_dir\nfrom behavenet.fitting.eval import export_latents\nfrom behavenet.fitting.utils import (\n get_expt_dir,\n get_session_dir,\n get_lab_example\n)\n\n\ndef apply_masks(data, masks):\n return data[masks == 1]\n\n\ndef get_expt_dir_wrapper(lab, expt, animal, session, expt_name, n_ae_latents):\n hparams = get_psvae_hparams()\n get_lab_example(hparams, lab, expt)\n hparams['experiment_name'] = expt_name\n hparams['n_ae_latents'] = n_ae_latents\n hparams['animal'] = animal\n hparams['session'] = session\n hparams['session_dir'], sess_ids = get_session_dir(hparams)\n return get_expt_dir(hparams)\n\n\ndef get_version_dir(lab, expt, animal, session, expt_name, n_ae_latents, alpha, beta):\n hparams = get_psvae_hparams()\n get_lab_example(hparams, lab, expt)\n hparams['experiment_name'] = expt_name\n hparams['n_ae_latents'] = n_ae_latents\n hparams['animal'] = animal\n hparams['session'] = session\n hparams['session_dir'], sess_ids = get_session_dir(hparams)\n hparams['expt_dir'] = get_expt_dir(hparams)\n for version_dir in os.listdir(hparams['expt_dir']):\n filename = os.path.join(hparams['expt_dir'], version_dir, 'meta_tags.pkl')\n if os.path.exists(filename):\n meta_tags = pkl.load(open(filename, 'rb'))\n if alpha == meta_tags['ps_vae.alpha'] and beta == meta_tags['ps_vae.beta']:\n return os.path.join(hparams['expt_dir'], version_dir)\n print(\"Version does not exist for alpha: {} and beta: {}\".format(alpha, beta))\n\n\ndef get_psvae_hparams(**kwargs):\n hparams = {\n 'data_dir': get_user_dir('data'),\n 'save_dir': get_user_dir('save'),\n 'model_class': 'ps-vae',\n 'model_type': 'conv',\n 'rng_seed_data': 0,\n 'trial_splits': '8;1;1;0',\n 'train_frac': 1,\n 'rng_seed_model': 0,\n 'fit_sess_io_layers': False,\n 'learning_rate': 1e-4,\n 'l2_reg': 0,\n 'conditional_encoder': False,\n 'vae.beta': 1}\n # update based on kwargs\n for key, val in kwargs.items():\n if key == 'alpha' or key == 'beta':\n hparams['ps_vae.%s' % key] = val\n else:\n hparams[key] = val\n return hparams\n\n\ndef get_meta_tags(expt_dir, version):\n filename = os.path.join(expt_dir, 'version_{}'.format(version), 'meta_tags.pkl')\n try:\n meta_tags = pkl.load(open(filename, 'rb'))\n return meta_tags\n except OSError as e:\n print(e)\n\n\ndef list_hparams(lab, expt, animal, session, expt_name, n_ae_latents):\n hparams = get_psvae_hparams()\n get_lab_example(hparams, lab, expt)\n hparams['experiment_name'] = expt_name\n hparams['n_ae_latents'] = n_ae_latents\n hparams['animal'] = animal\n hparams['session'] = session\n hparams['session_dir'], sess_ids = get_session_dir(hparams)\n hparams['expt_dir'] = get_expt_dir(hparams)\n alphas = set()\n betas = set()\n for version_dir in os.listdir(hparams['expt_dir']):\n if 'version' in version_dir:\n filename = os.path.join(hparams['expt_dir'], version_dir, 'meta_tags.pkl')\n if os.path.exists(filename):\n meta_tags = pkl.load(open(filename, 'rb'))\n alphas.add(meta_tags['ps_vae.alpha'])\n betas.add(meta_tags['ps_vae.beta'])\n return sorted(list(alphas)), sorted(list(betas))\n\n\ndef get_label_r2(hparams, model, data_generator, version, label_names, dtype='val', overwrite=False):\n save_file = os.path.join(\n hparams['expt_dir'], 'version_%i' % version, 'r2_supervised.csv'\n )\n if not os.path.exists(save_file) or overwrite:\n if not os.path.exists(save_file):\n print('R^2 metrics do not exist; computing from scratch')\n else:\n print('Overwriting metrics at %s' % save_file)\n metrics_df = []\n data_generator.reset_iterators(dtype)\n for _ in tqdm(range(data_generator.n_tot_batches[dtype])):\n # get next minibatch and put it on the device\n data, sess = data_generator.next_batch(dtype)\n x = data['images'][0]\n y = data['labels'][0].cpu().detach().numpy()\n if 'labels_masks' in data:\n n = data['labels_masks'][0].cpu().detach().numpy()\n else:\n n = np.ones_like(y)\n z = model.get_transformed_latents(x, dataset=sess)\n for i in range(len(label_names)):\n y_true = apply_masks(y[:, i], n[:, i])\n y_pred = apply_masks(z[:, i], n[:, i])\n if len(y_true) > 10:\n r2 = r2_score(y_true, y_pred,\n multioutput='variance_weighted')\n mse = np.mean(np.square(y_true - y_pred))\n else:\n r2 = np.nan\n mse = np.nan\n metrics_df.append(pd.DataFrame({\n 'Trial': data['batch_idx'].item(),\n 'Label': label_names[i],\n 'R2': r2,\n 'MSE': mse,\n 'Model': 'PS-VAE'}, index=[0]))\n\n metrics_df = pd.concat(metrics_df)\n print('Saving results to %s' % save_file)\n metrics_df.to_csv(save_file, index=False, header=True)\n else:\n print('Loading results from %s' % save_file)\n metrics_df = pd.read_csv(save_file)\n return metrics_df\n\n\ndef load_latents_trials_frames(hparams, data_generator, model_ae=None, dtype='test'):\n sess_id = '{}_{}_{}_{}_latents.pkl'.format(\n hparams['lab'], hparams['expt'], hparams['animal'], hparams['session'])\n filename = os.path.join(\n hparams['expt_dir'], 'version_{}'.format(0), sess_id)\n if not os.path.exists(filename):\n print('Exporting latents...', end='')\n export_latents(data_generator, model_ae)\n print('Done')\n latent_dict = pkl.load(open(filename, 'rb'))\n # get all test latents\n latents = []\n trials = []\n frames = []\n for trial in latent_dict['trials'][dtype]:\n ls = latent_dict['latents'][trial]\n n_frames_batch = ls.shape[0]\n latents.append(ls)\n trials.append([trial] * n_frames_batch)\n frames.append(np.arange(n_frames_batch))\n latents = np.concatenate(latents)\n trials = np.concatenate(trials)\n frames = np.concatenate(frames)\n return latents, trials, frames\n",
"import numpy as np\nfrom behavenet.plotting import arhmm_utils\n\n\ndef test_get_discrete_chunks():\n\n states = [\n np.array([0, 1, 1, 1, 2, 2, 0]),\n np.array([3, 3, 3, 4, 4, 2, 2, 2])\n ]\n\n chunks = arhmm_utils.get_discrete_chunks(states, include_edges=True)\n assert np.all(chunks[0] == np.array([[0, 0, 1], [0, 6, 7]]))\n assert np.all(chunks[1] == np.array([[0, 1, 4]]))\n assert np.all(chunks[2] == np.array([[0, 4, 6], [1, 5, 8]]))\n assert np.all(chunks[3] == np.array([[1, 0, 3]]))\n assert np.all(chunks[4] == np.array([[1, 3, 5]]))\n\n chunks = arhmm_utils.get_discrete_chunks(states, include_edges=False)\n assert np.all(chunks[0] == np.array([]))\n assert np.all(chunks[1] == np.array([[0, 1, 4]]))\n assert np.all(chunks[2] == np.array([[0, 4, 6]]))\n assert np.all(chunks[3] == np.array([]))\n assert np.all(chunks[4] == np.array([[1, 3, 5]]))\n\n\ndef test_get_state_durations():\n\n # construct mock HMM class that passes argument through function `most_likely_states`\n class HMM(object):\n @classmethod\n def most_likely_states(cls, x):\n return x\n hmm = HMM()\n hmm.K = 4\n latents = [\n np.array([0, 1, 1, 1, 2, 2, 0]),\n np.array([3, 3, 3, 4, 4, 2, 2, 2]),\n np.array([0, 0, 0, 3, 3, 3, 1, 1, 2])\n ]\n\n durations = arhmm_utils.get_state_durations(latents, hmm, include_edges=True)\n assert np.all(durations[0] == np.array([1, 1, 3]))\n assert np.all(durations[1] == np.array([3, 2]))\n assert np.all(durations[2] == np.array([2, 3, 1]))\n assert np.all(durations[3] == np.array([3, 3]))\n\n durations = arhmm_utils.get_state_durations(latents, hmm, include_edges=False)\n assert np.all(durations[0] == np.array([]))\n assert np.all(durations[1] == np.array([3, 2]))\n assert np.all(durations[2] == np.array([2]))\n assert np.all(durations[3] == np.array([3]))\n\n hmm.K = 1\n durations = arhmm_utils.get_state_durations(latents, hmm)\n assert len(durations) == 0\n"
] | [
[
"numpy.square",
"pandas.concat",
"pandas.read_csv",
"numpy.ones_like",
"sklearn.metrics.r2_score",
"numpy.arange",
"numpy.concatenate"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jeffpollock9/probability | [
"a4e6841b3d5116a56ef5383ddc6a6e03ccc4ce82",
"24352279e5e255e054bfe9c7bdc7080ecb280fba",
"24352279e5e255e054bfe9c7bdc7080ecb280fba"
] | [
"tensorflow_probability/examples/statistical_rethinking/rethinking/quap.py",
"tensorflow_probability/python/distributions/truncated_cauchy.py",
"tensorflow_probability/python/experimental/distribute/distribute_lib.py"
] | [
"# Copyright 2020 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Quadratic approximation for `JointDistributions`.\n\nThis is supporting code for Statistical Rethinking, and provides light wrappers\naround existing TensorFlow Probability functions.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\ntfd = tfp.distributions\n\n__all__ = [\"quap\"]\n\n\ndef merge_flat_args(args, defaults):\n idx = 0\n new_args = []\n for val in defaults:\n if val is None:\n new_args.append(args[idx])\n idx += 1\n else:\n new_args.append(val)\n return new_args\n\n\ndef quap(joint_dist, data=None, max_tries=20, initial_position=None, name=None):\n \"\"\"Compute a quadratic approximation to a ``JointDistributionNamed``.\n\n Traverses a JointDistribution*, uses bfgs to minimize the negative\n log probability and estimate the hessian, and returns a JointDistribution of\n the same type, whose distributions are all Gaussians, and covariances are\n set appropriately.\n\n Args:\n joint_dist: A `JointDistributionNamed` or `JointDistributionSequential`\n model. Also works with auto batched versions of the same.\n data: Optional `dict` of data to condition the joint_dist with. The return\n value will be conditioned on this data. If this is `None`, the return\n value will be a quadratic approximation to the distribution itself.\n max_tries: Optional `int` of number of times to run the optimizer internally\n before raising a `RuntimeError`. Default is 20.\n initial_position: Optional `dict` to initialize the optimizer. Keys should\n correspond to names in the JointDistribution. Defaults to random draws\n from `joint_dist`.\n name: Python `str` name prefixed to ops created by this function.\n Default value: `None` (i.e., 'quap').\n\n Returns:\n `JointDistributionNamed` which is a quadratic approximation to the input\n `joint_dist`, conditioned on `data`.\n\n Raises:\n RuntimeError: In case the optimizer does not converge within `max_tries`.\n \"\"\"\n with tf.name_scope(name or \"quap\"):\n structure = joint_dist.sample()\n\n # A dictionary is the only structure that does not already\n # have None's as placeholders\n if isinstance(data, dict):\n data = {k: data.get(k) for k in structure}\n\n if data is None:\n data = tf.nest.map_structure(lambda j: None, structure)\n\n data = tf.nest.map_structure(lambda j: None if j is None else j, data)\n flat_data = tf.nest.flatten(data)\n\n def try_optimize(idx, opt): # pylint: disable=unused-argument\n locs = tf.nest.flatten(joint_dist.sample(value=initial_position))\n locs = [j for idx, j in enumerate(locs) if flat_data[idx] is None]\n def neg_logp_and_grad(vals):\n def neg_logp(vals):\n args = merge_flat_args(vals, flat_data)\n return -joint_dist.log_prob(tf.nest.pack_sequence_as(\n structure, tf.unstack(args)))\n return tfp.math.value_and_gradient(neg_logp, vals)\n return idx + 1, tfp.optimizer.bfgs_minimize(neg_logp_and_grad, locs)\n\n def should_continue(idx, opt):\n return (idx < max_tries) & ~opt.converged\n\n idx, opt = try_optimize(0, None)\n _, opt = tf.while_loop(should_continue, try_optimize, [idx, opt])\n\n with tf.control_dependencies([tf.debugging.Assert(\n condition=opt.converged, data=opt)]):\n\n stddevs = tf.sqrt(tf.linalg.diag_part(opt.inverse_hessian_estimate))\n\n gaussians = tf.nest.map_structure(\n tfd.Normal,\n tf.unstack(opt.position),\n tf.unstack(stddevs))\n dists = merge_flat_args(gaussians, flat_data)\n dists = [v if isinstance(v, tfd.Distribution) else\n tfd.Deterministic(v) for v in dists]\n\n approx = joint_dist.__class__(\n tf.nest.pack_sequence_as(structure, dists), name=name)\n return approx\n",
"# Copyright 2020 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The Truncated Cauchy distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import sigmoid as sigmoid_bijector\nfrom tensorflow_probability.python.bijectors import softplus as softplus_bijector\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import parameter_properties\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow_probability.python.internal import samplers\nfrom tensorflow_probability.python.internal import tensor_util\nfrom tensorflow_probability.python.math import numeric\nfrom tensorflow_probability.python.math import special as tfp_math\n\n__all__ = [\n 'TruncatedCauchy',\n]\n\n\ndef _cauchy_cdf_diff(x, y):\n return tfp_math.atan_difference(x, y) / np.pi\n\n\nclass TruncatedCauchy(distribution.Distribution):\n \"\"\"The Truncated Cauchy distribution.\n\n The truncated Cauchy is a Cauchy distribution bounded between `low`\n and `high` (the pdf is 0 outside these bounds and renormalized).\n\n Samples from this distribution are differentiable with respect to `loc`\n and `scale`, but not with respect to the bounds `low` and `high`.\n\n ### Mathematical Details\n\n The probability density function (pdf) of this distribution is:\n ```none\n pdf(x; loc, scale, low, high) =\n { 1 / (pi * scale * (1 + z**2) * A) for low <= x <= high\n { 0 otherwise\n z = (x - loc) / scale\n A = CauchyCDF((high - loc) / scale) - CauchyCDF((low - loc) / scale)\n ```\n\n where:\n\n * `CauchyCDF` is the cumulative density function of the Cauchy distribution\n with 0 mean and unit variance.\n\n This is a scalar distribution so the event shape is always scalar and the\n dimensions of the parameters define the batch_shape.\n\n #### Examples\n ```python\n\n tfd = tfp.distributions\n # Define a batch of two scalar TruncatedCauchy distributions with modes\n # at 0. and 1.0 .\n dist = tfd.TruncatedCauchy(loc=[0., 1.], scale=1.,\n low=[-1., 0.],\n high=[1., 1.])\n\n # Evaluate the pdf of the distributions at 0.5 and 0.8 respectively returning\n # a 2-vector tensor.\n dist.prob([0.5, 0.8])\n\n # Get 3 samples, returning a 3 x 2 tensor.\n dist.sample([3])\n ```\n \"\"\"\n\n def __init__(self,\n loc,\n scale,\n low,\n high,\n validate_args=False,\n allow_nan_stats=True,\n name='TruncatedCauchy'):\n \"\"\"Construct a TruncatedCauchy.\n\n All parameters of the distribution will be broadcast to the same shape,\n so the resulting distribution will have a batch_shape of the broadcast\n shape of all parameters.\n\n Args:\n loc: Floating point tensor; the modes of the corresponding non-truncated\n Cauchy distribution(s).\n scale: Floating point tensor; the scales of the distribution(s).\n Must contain only positive values.\n low: `float` `Tensor` representing lower bound of the distribution's\n support. Must be such that `low < high`.\n high: `float` `Tensor` representing upper bound of the distribution's\n support. Must be such that `low < high`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked at run-time.\n allow_nan_stats: Python `bool`, default `True`. When `True`,\n statistics (e.g., mean, mode, variance) use the value '`NaN`' to\n indicate the result is undefined. When `False`, an exception is raised\n if one or more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n dtype = dtype_util.common_dtype([loc, scale, low, high], tf.float32)\n self._loc = tensor_util.convert_nonref_to_tensor(\n loc, name='loc', dtype=dtype)\n self._scale = tensor_util.convert_nonref_to_tensor(\n scale, name='scale', dtype=dtype)\n self._low = tensor_util.convert_nonref_to_tensor(\n low, name='low', dtype=dtype)\n self._high = tensor_util.convert_nonref_to_tensor(\n high, name='high', dtype=dtype)\n dtype_util.assert_same_float_dtype(\n [self._loc, self._scale, self._low, self._high])\n\n super(TruncatedCauchy, self).__init__(\n dtype=dtype,\n # Samples do not have gradients with respect to `_low` and `_high`.\n # TODO(b/161297284): Implement these gradients.\n reparameterization_type=reparameterization.NOT_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n name=name)\n\n def _loc_scale_low_high(self, loc=None, scale=None, low=None, high=None):\n loc = tf.convert_to_tensor(self.loc if loc is None else loc)\n scale = tf.convert_to_tensor(self.scale if scale is None else scale)\n low = tf.convert_to_tensor(self.low if low is None else low)\n high = tf.convert_to_tensor(self.high if high is None else high)\n return loc, scale, low, high\n\n def _standardized_low_and_high(self,\n loc=None,\n scale=None,\n low=None,\n high=None):\n loc, scale, low, high = self._loc_scale_low_high(\n loc=loc, scale=scale, low=low, high=high)\n return (low - loc) / scale, (high - loc) / scale\n\n def _normalizer(self,\n loc=None,\n scale=None,\n low=None,\n high=None,\n std_low=None,\n std_high=None):\n if std_low is None or std_high is None:\n std_low, std_high = self._standardized_low_and_high(\n loc=loc, scale=scale, low=low, high=high)\n return _cauchy_cdf_diff(std_high, std_low)\n\n def _log_normalizer(self,\n loc=None,\n scale=None,\n low=None,\n high=None,\n std_low=None,\n std_high=None):\n return tf.math.log(self._normalizer(\n loc=loc,\n scale=scale,\n low=low,\n high=high,\n std_low=std_low,\n std_high=std_high))\n\n @classmethod\n def _parameter_properties(cls, dtype, num_classes=None):\n # pylint: disable=g-long-lambda\n return dict(\n loc=parameter_properties.ParameterProperties(),\n scale=parameter_properties.ParameterProperties(\n default_constraining_bijector_fn=(\n lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),\n low=parameter_properties.ParameterProperties(),\n # TODO(b/169874884): Support decoupled parameterization.\n high=parameter_properties.ParameterProperties(\n default_constraining_bijector_fn=parameter_properties\n .BIJECTOR_NOT_IMPLEMENTED,))\n # pylint: enable=g-long-lambda\n\n @property\n def loc(self):\n return self._loc\n\n @property\n def scale(self):\n return self._scale\n\n @property\n def low(self):\n return self._low\n\n @property\n def high(self):\n return self._high\n\n def _batch_shape(self):\n return functools.reduce(\n tf.broadcast_static_shape,\n (self.loc.shape, self.scale.shape, self.low.shape, self.high.shape))\n\n def _batch_shape_tensor(self, loc=None, scale=None, low=None, high=None):\n return functools.reduce(\n ps.broadcast_shape,\n (ps.shape(self.loc if loc is None else loc),\n ps.shape(self.scale if scale is None else scale),\n ps.shape(self.low if low is None else low),\n ps.shape(self.high if high is None else high)))\n\n def _event_shape(self):\n return tf.TensorShape([])\n\n def _sample_n(self, n, seed=None):\n loc, scale, low, high = self._loc_scale_low_high()\n batch_shape = self._batch_shape_tensor(\n loc=loc, scale=scale, low=low, high=high)\n sample_and_batch_shape = ps.concat([[n], batch_shape], axis=0)\n\n u = samplers.uniform(sample_and_batch_shape, dtype=self.dtype, seed=seed)\n return self._quantile(u, loc=loc, scale=scale, low=low, high=high)\n\n def _log_prob(self, x):\n loc, scale, low, high = self._loc_scale_low_high()\n log_prob = (\n -tf.math.log1p(tf.square((x - loc) / scale))\n - (np.log(np.pi) + tf.math.log(scale))\n - self._log_normalizer(loc=loc, scale=scale, low=low, high=high))\n # p(x) is 0 outside the bounds.\n return tf.where((x > high) | (x < low),\n dtype_util.as_numpy_dtype(x.dtype)(-np.inf),\n log_prob)\n\n def _cdf(self, x):\n loc, scale, low, high = self._loc_scale_low_high()\n std_low, std_high = self._standardized_low_and_high(\n low=low, high=high, loc=loc, scale=scale)\n return tf.clip_by_value(\n ((_cauchy_cdf_diff((x - loc) / scale, std_low))\n / self._normalizer(std_low=std_low, std_high=std_high)),\n clip_value_min=0., clip_value_max=1.)\n\n def _log_cdf(self, x):\n loc, scale, low, high = self._loc_scale_low_high()\n std_low, std_high = self._standardized_low_and_high(\n low=low, high=high, loc=loc, scale=scale)\n return (\n tf.math.log(_cauchy_cdf_diff((x - loc) / scale, std_low))\n - self._log_normalizer(std_low=std_low, std_high=std_high))\n\n def _mean(self):\n loc, scale, low, high = self._loc_scale_low_high()\n std_low, std_high = self._standardized_low_and_high(\n low=low, high=high, loc=loc, scale=scale)\n\n # Formula from David Olive, \"Applied Robust Statistics\" --\n # see http://parker.ad.siu.edu/Olive/ch4.pdf .\n t = (tf.math.log1p(tf.math.square(std_high))\n - tf.math.log1p(tf.math.square(std_low)))\n t = t / (2 * tfp_math.atan_difference(std_high, std_low))\n return loc + scale * t\n\n def _mode(self):\n # mode = { loc: for low <= loc <= high\n # low: for loc < low\n # high: for loc > high\n # }\n loc = tf.convert_to_tensor(self.loc)\n low = tf.convert_to_tensor(self.low)\n high = tf.convert_to_tensor(self.high)\n shape = self._batch_shape_tensor(loc=loc, low=low, high=high)\n # We *must* broadcast with scale to get a correctly shaped output, but\n # TODO(b/141460015): we should not have to explicitly broadcast the first\n # parameter to clip_by_value to align with the second and third parameters.\n return tf.clip_by_value(tf.broadcast_to(loc, shape), low, high)\n\n def _variance(self):\n loc, scale, low, high = self._loc_scale_low_high()\n std_low, std_high = self._standardized_low_and_high(\n low=low, high=high, loc=loc, scale=scale)\n\n # Formula from David Olive, \"Applied Robust Statistics\" --\n # see http://parker.ad.siu.edu/Olive/ch4.pdf .\n atan_diff = tfp_math.atan_difference(std_high, std_low)\n t = (std_high - std_low - atan_diff) / atan_diff\n std_mean = ((tf.math.log1p(tf.math.square(std_high))\n - tf.math.log1p(tf.math.square(std_low))) / (2 * atan_diff))\n return tf.math.square(scale) * (t - tf.math.square(std_mean))\n\n def _quantile(self, p, loc=None, scale=None, low=None, high=None):\n loc, scale, low, high = self._loc_scale_low_high(loc, scale, low, high)\n std_low, std_high = self._standardized_low_and_high(\n low=low, high=high, loc=loc, scale=scale)\n # Use the sum of tangents formula.\n # First, the quantile of the cauchy distribution is tan(pi * (x - 0.5)).\n # and the cdf of the cauchy distribution is 0.5 + arctan(x) / np.pi\n # WLOG, we will assume loc = 0 , scale = 1 (these can be taken in to account\n # by rescaling and shifting low and high, and then scaling the output).\n # We would like to compute quantile(p * (cdf(high) - cdf(low)) + cdf(low))\n # This is the same as:\n # tan(pi * (cdf(low) + (cdf(high) - cdf(low)) * p - 0.5))\n # Let a = pi * (cdf(low) - 0.5), b = pi * (cdf(high) - cdf(low)) * u\n # By using the formula for the cdf we have:\n # a = arctan(low), b = arctan_difference(high, low) * u\n # Thus the quantile is now tan(a + b).\n # By appealing to the sum of tangents formula we have:\n # tan(a + b) = (tan(a) + tan(b)) / (1 - tan(a) * tan(b)) =\n # (low + tan(b)) / (1 - low * tan(b))\n # Thus for a 'standard' truncated cauchy we have the quantile as:\n # quantile(p) = (low + tan(b)) / (1 - low * tan(b)) where\n # b = arctan_difference(high, low) * p.\n\n tanb = tf.math.tan(tfp_math.atan_difference(std_high, std_low) * p)\n x = (std_low + tanb) / (1 - std_low * tanb)\n # Clip the answer to prevent it from falling numerically outside\n # the support.\n return numeric.clip_by_value_preserve_gradient(\n x * scale + loc, clip_value_min=low, clip_value_max=high)\n\n def _default_event_space_bijector(self):\n return sigmoid_bijector.Sigmoid(\n low=self.low, high=self.high, validate_args=self.validate_args)\n\n def _parameter_control_dependencies(self, is_init):\n if not self.validate_args:\n return []\n assertions = []\n low = None\n high = None\n if is_init != tensor_util.is_ref(self.low):\n low = tf.convert_to_tensor(self.low)\n assertions.append(\n assert_util.assert_finite(low, message='`low` is not finite'))\n if is_init != tensor_util.is_ref(self.high):\n high = tf.convert_to_tensor(self.high)\n assertions.append(\n assert_util.assert_finite(high, message='`high` is not finite'))\n if is_init != tensor_util.is_ref(self.loc):\n assertions.append(\n assert_util.assert_finite(self.loc, message='`loc` is not finite'))\n if is_init != tensor_util.is_ref(self.scale):\n scale = tf.convert_to_tensor(self.scale)\n assertions.extend([\n assert_util.assert_positive(\n scale, message='`scale` must be positive'),\n assert_util.assert_finite(scale, message='`scale` is not finite'),\n ])\n if (is_init != tensor_util.is_ref(self.low) or\n is_init != tensor_util.is_ref(self.high)):\n low = tf.convert_to_tensor(self.low) if low is None else low\n high = tf.convert_to_tensor(self.high) if high is None else high\n assertions.append(\n assert_util.assert_greater(\n high,\n low,\n message='TruncatedCauchy not defined when `low >= high`.'))\n return assertions\n\n def _sample_control_dependencies(self, x):\n assertions = []\n if not self.validate_args:\n return assertions\n assertions.append(assert_util.assert_greater_equal(\n x, self.low, message='Sample must be greater than or equal to `low`.'))\n assertions.append(assert_util.assert_less_equal(\n x, self.high, message='Sample must be less than or equal to `high`.'))\n return assertions\n",
"# Copyright 2020 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Utilities for writing distributed log prob functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python.internal import custom_gradient as tfp_custom_gradient\nfrom tensorflow_probability.python.math import gradient as math_gradient\n\nfrom tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import\n\nJAX_MODE = False\n\nif JAX_MODE:\n from jax import lax # pylint: disable=g-import-not-at-top\n\n\ndef canonicalize_axis_name(axis_name):\n \"\"\"Converts an input into a list of axis strings.\"\"\"\n if not axis_name:\n return []\n if (isinstance(axis_name, str) or\n not isinstance(axis_name, collections.Iterable)):\n return [axis_name]\n return list(axis_name)\n\n\ndef psum(x, axis_name=None):\n if JAX_MODE:\n axis_name = canonicalize_axis_name(axis_name)\n for name in axis_name:\n x = lax.psum(x, name)\n return x\n ctx = tf.distribute.get_replica_context()\n return ctx.all_reduce('sum', x)\n\n\ndef pmean(x, axis_name=None):\n if JAX_MODE:\n axis_name = canonicalize_axis_name(axis_name)\n for name in axis_name:\n x = lax.pmean(x, name)\n return x\n ctx = tf.distribute.get_replica_context()\n return ctx.all_reduce('mean', x)\n\n\ndef get_axis_index(axis_name=None):\n if JAX_MODE:\n return lax.axis_index(axis_name)\n ctx = tf.distribute.get_replica_context()\n return ctx.replica_id_in_sync_group\n\n\ndef get_axis_size(axis_name=None):\n if JAX_MODE:\n return lax.psum(1, axis_name)\n ctx = tf.distribute.get_replica_context()\n return ctx.num_replicas_in_sync\n\n\nclass _DummyGrads(object):\n \"\"\"Wraps gradients to preserve structure when computing a custom gradient.\"\"\"\n\n def __init__(self, grads):\n self.grads = grads\n\n def tree_flatten(self):\n return (self.grads,), ()\n\n @classmethod\n def tree_unflatten(cls, _, xs):\n return cls(*xs)\n\n def __repr__(self):\n return f'_DummyGrads({self.grads})'\n\n\nif JAX_MODE:\n from jax import tree_util # pylint: disable=g-import-not-at-top\n tree_util.register_pytree_node_class(_DummyGrads)\n\n\ndef make_psum_function(fn, in_axes, out_axes):\n \"\"\"Constructs a function that psums over outputs and corrects input gradients.\n\n Given a function `fn`, this `make_psum_function` returns a new one that\n includes psums over terms according to axis names provided in `out_axes`. It\n also adds psums for the vector-Jacobian product of the outputs of `fn` w.r.t.\n its inputs according to `in_axes` if there are axes in the outputs that are\n not present in an input.\n\n Args:\n fn: a callable to be transformed to have psums at its outputs and on the\n gradients to its inputs.\n in_axes: A structure of axis names that should match the structure of the\n input to `fn`. If the set of input axes for an input value does not match\n the output axes of a particular output value, the gradient of that output\n value w.r.t. the input value will be psum-ed over the axes present in the\n output but not the input.\n out_axes: A structure of axis names that should match the structure of the\n output of `fn`. The outputs of `fn` will be psum-med according to their\n respective output axes.\n\n Returns:\n A new function that applies psums on to the output of the original\n function and corrects the gradient with respect to its inputs.\n \"\"\"\n\n if not isinstance(in_axes, tuple):\n in_axes = (in_axes,)\n\n def _psum_fn_fwd(*args):\n nest.assert_shallow_structure(args, in_axes)\n out_parts = fn(*args)\n nest.assert_shallow_structure(out_parts, out_axes)\n map_out_axes = nest.map_structure_up_to(out_parts, canonicalize_axis_name,\n out_axes)\n\n total_out_parts = nest.map_structure_up_to(\n out_parts,\n lambda out_part, axis_name: ( # pylint: disable=g-long-lambda\n psum(out_part, axis_name=axis_name) if axis_name else out_part),\n out_parts,\n map_out_axes)\n\n return total_out_parts, (args, out_parts)\n\n def _psum_fn_bwd(args_and_out_parts, gs):\n args, out_parts = args_and_out_parts\n map_in_axes = nest.map_structure_up_to(args, canonicalize_axis_name,\n in_axes)\n map_out_axes = nest.map_structure_up_to(out_parts, canonicalize_axis_name,\n out_axes)\n\n def flat_fn(flat_args):\n unflat_args = tf.nest.pack_sequence_as(args, flat_args)\n out_parts = fn(*unflat_args)\n return tf.nest.flatten(out_parts)\n\n # Operate with flattened lists, to make it easier to tease-out individual\n # outputs for the local grads.\n flat_value = tf.nest.flatten(args)\n flat_gs = tf.nest.flatten(gs)\n local_grads = [\n math_gradient.value_and_gradient( # pylint: disable=g-complex-comprehension\n lambda *val: flat_fn(val)[out_idx], # pylint: disable=cell-var-from-loop\n flat_value,\n output_gradients=out_g)[1] for out_idx, out_g in enumerate(flat_gs)\n ]\n # Transpose.\n local_grads = list(zip(*local_grads))\n # Repack.\n local_grads = tf.nest.pack_sequence_as(args, [\n _DummyGrads(tf.nest.pack_sequence_as(out_parts, v)) for v in local_grads\n ])\n\n def value_grad(v, in_axis_names, term_grads):\n \"\"\"Computes reductions of output gradients.\n\n A `log_prob_parts` function takes in a list of values and outputs\n a log density for each input to the function. The vector-Jacobian\n product (VJP) of a `log_prob_parts` function thus needs to compute the\n gradient of each output term w.r.t. each input value. This function\n overrides the default VJP of an output term `j` w.r.t to an input\n value `i` to include an all-reduce-sum when:\n 1) The gradient of `j` w.r.t. `i` is connected.\n 2) `j` is a sharded term and `i` is an unsharded value.\n\n If these conditions do not hold, the gradient remains the same and\n either corresponds to:\n 1) The gradient of a sharded term w.r.t to a sharded value\n 2) The gradient of an unsharded term w.r.t. to an unsharded value.\n 3) The gradient of an unsharded term w.r.t. to an sharded value.\n In any of these cases, no all-reduce-sum is necessary.\n Args:\n v: The output term of a `log_prob_part` function.\n in_axis_names: A list of axis names indicating whether or not the output\n term is sharded or not, `None` if no sharding.\n term_grads: The gradient of the output term w.r.t. to each of the input\n values to the `log_prob_part` function.\n\n Returns:\n The vector Jacobian product of `v` w.r.t. the input parts of the\n `log_prob_parts` function.\n \"\"\"\n term_grads = term_grads.grads\n\n def psum_grads(term_grad, out_axis_names):\n if term_grad is not None:\n psum_axes = [\n axis_name for axis_name in out_axis_names\n if axis_name not in in_axis_names\n ]\n if psum_axes:\n term_grad = psum(term_grad, axis_name=psum_axes)\n return term_grad\n\n total_grad = nest.map_structure_up_to(term_grads, psum_grads, term_grads,\n map_out_axes)\n if all([grad is None for grad in tf.nest.flatten(total_grad)]):\n return None\n return tf.add_n([\n v for v in tf.nest.flatten(total_grad)\n if tfp_custom_gradient.is_valid_gradient(v)\n ])\n\n out = nest.map_structure_up_to(args, value_grad, args, map_in_axes,\n local_grads)\n return out\n\n @tfp_custom_gradient.custom_gradient(\n vjp_fwd=_psum_fn_fwd, vjp_bwd=_psum_fn_bwd)\n def psum_fn(*args):\n return _psum_fn_fwd(*args)[0]\n\n return psum_fn\n\n\ndef make_sharded_log_prob_parts(log_prob_parts_fn, axis_names):\n \"\"\"Constructs a log prob parts function that all-reduces over terms.\n\n Given a log_prob_parts function, this function will return a new one that\n includes all-reduce sums over terms according to the `is_sharded` property. It\n will also add all-reduce sums for the gradient of sharded terms w.r.t.\n unsharded terms.\n\n Args:\n log_prob_parts_fn: a callable that takes in a structured value and returns a\n structure of log densities for each of the terms, that when summed returns\n a locally correct log-density.\n axis_names: a structure of values that matches the input and output of\n `log_prob_parts_fn`. Each value in `axis_names` is either `None, a string\n name of a mapped axis in the JAX backend or any non-`None` value in TF\n backend, or an iterable thereof corresponding to multiple sharding axes.\n If the `axis_name` is not `None`, the returned function will add\n all-reduce sum(s) for its term in the log prob calculation. If it is\n `None`, the returned function will have an all-reduce sum over the\n gradient of sharded terms w.r.t. to the unsharded value.\n\n Returns:\n A new log prob parts function that can be run inside of a strategy.\n \"\"\"\n return make_psum_function(log_prob_parts_fn, (axis_names,), axis_names)\n"
] | [
[
"tensorflow.compat.v2.nest.map_structure",
"tensorflow.compat.v2.linalg.diag_part",
"tensorflow.compat.v2.unstack",
"tensorflow.compat.v2.debugging.Assert",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.nest.pack_sequence_as",
"tensorflow.compat.v2.nest.flatten",
"tensorflow.compat.v2.while_loop"
],
[
"numpy.log",
"tensorflow.compat.v2.math.log",
"tensorflow.compat.v2.square",
"tensorflow.compat.v2.broadcast_to",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.math.square",
"tensorflow.compat.v2.TensorShape"
],
[
"tensorflow.python.util.nest.assert_shallow_structure",
"tensorflow.python.util.nest.map_structure_up_to",
"tensorflow.compat.v2.nest.pack_sequence_as",
"tensorflow.compat.v2.nest.flatten",
"tensorflow.compat.v2.distribute.get_replica_context"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Zino-chata/question_gen_v2 | [
"440fbb4eaccb86232a54287d0890c79a4935e418"
] | [
"backUp/prepare_data_orig.py"
] | [
"import pyforest\nimport os\nimport logging\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List, Optional\nimport sys\n\nimport torch\nimport nlp\nfrom transformers import T5Tokenizer, BartTokenizer, HfArgumentParser\nfrom datasets import list_datasets, load_dataset, list_metrics, load_metric, Dataset\nimport tqdm\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n task: str = field(\n metadata={\"help\": \"Which task 'qa', 'qg', 'e2e_qg', 'ans_ext', 'multi'. 'multi' means 'qa', 'qg', 'ans_ext' tasks\"}, \n )\n model_type: str = field(metadata={\"help\": \"One of 't5', 'bart'\"})\n dataset_path: Optional[str] = field(\n default=\"data/squad_multitask\",\n metadata={\"help\": \"Path for dataset directory\"}, \n )\n train_file_name: Optional[str] = field(\n default=None,\n metadata={\"help\": \"name for cached train dataset\"},\n )\n valid_file_name: Optional[str] = field(\n default=None,\n metadata={\"help\": \"name for cached valid dataset\"},\n )\n valid_for_qg_only: bool = field(\n default=False,\n metadata={\"help\": \"For multitask dataset valid split should contain only qg task or all tasks.\"}\n )\n qg_format: Optional[str] = field(\n default='highlight_qg_format',\n metadata={\"help\": \"How to format inputs for que generation, 'highlight_qg_format' or 'prepend_qg_format'\"}, \n )\n max_source_length: Optional[int] = field(\n default=512,\n metadata={\"help\": \"Max input length for the source text\"},\n )\n max_target_length: Optional[int] = field(\n default=32,\n metadata={\"help\": \"Max input length for the target text\"},\n )\n\nclass DataProcessor:\n def __init__(self, tokenizer, model_type=\"t5\", max_source_length=512, max_target_length=32):\n self.tokenizer = tokenizer\n self.max_source_length = max_source_length\n self.max_target_length = max_target_length\n self.model_type = model_type\n self.hl_token = \"<hl>\"\n \n if model_type == \"t5\":\n self.sep_token = \"<sep>\"\n elif model_type == \"bart\":\n self.sep_token = \"<sep>\"\n else:\n self.sep_token = \"[SEP]\"\n \n def process(self, dataset):\n if self.model_type == \"t5\":\n dataset = dataset.map(self._add_eos_examples)\n \n dataset = dataset.map(self._add_special_tokens)\n dataset = dataset.map(self._convert_to_features, batched=True)\n\n return dataset\n \n def _add_eos_examples(self, example):\n example['source_text'] = example['source_text'] + \" </s>\"\n example['target_text'] = example['target_text'] + \" </s>\"\n return example\n \n def _add_special_tokens(self, example):\n example['source_text'] = example['source_text'].replace(\"{hl_token}\", self.hl_token) \n example['target_text'] = example['target_text'].replace(\"{sep_token}\", self.sep_token)\n return example\n \n # tokenize the examples\n def _convert_to_features(self, example_batch):\n source_encoding = self.tokenizer.batch_encode_plus(\n example_batch['source_text'],\n max_length=self.max_source_length,\n padding='max_length',\n pad_to_max_length=True,\n truncation=True, \n )\n target_encoding = self.tokenizer.batch_encode_plus(\n example_batch['target_text'],\n max_length=self.max_target_length,\n padding='max_length',\n pad_to_max_length=True,\n truncation=True, \n )\n\n encodings = {\n 'source_ids': source_encoding['input_ids'], \n 'target_ids': target_encoding['input_ids'],\n 'attention_mask': source_encoding['attention_mask'],\n }\n\n return encodings\n\n\ndef filter_qa(example):\n return example['task'] == 'qa'\n\ndef filter_qg(example):\n return example['task'] == 'qg'\n\ndef filter_e2e_qg(example):\n return example['task'] == 'e2e_qg'\n\ndef filter_ans_ext(example):\n return example['task'] == 'ans_ext'\n\ndef filter_multi(example):\n return example['task'] != 'e2e_qg'\n\n\nTASK_TO_FILTER_FN = {\n 'qa': filter_qa,\n 'qg': filter_qg,\n 'e2e_qg': filter_e2e_qg,\n 'ans_ext': filter_ans_ext,\n 'multi': filter_multi\n}\n\n\ndef main():\n parser = HfArgumentParser((DataTrainingArguments,))\n\n data_args = parser.parse_args_into_dataclasses()[0]\n\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO\n )\n\n if data_args.model_type == 't5':\n tokenizer = T5Tokenizer.from_pretrained(\"t5-base\")\n else:\n tokenizer = BartTokenizer.from_pretrained(\"facebook/bart-base\")\n \n tokenizer.add_tokens(['<sep>', '<hl>'])\n \n train_dataset = load_dataset('eli5', split='train_eli5')\n valid_dataset = load_dataset('eli5', split='validation_eli5')\n\n processor = DataProcessor(\n tokenizer,\n model_type=data_args.model_type,\n max_source_length=data_args.max_source_length,\n max_target_length=data_args.max_target_length\n )\n\n print(\"Pre-processing datasets\")\n train_dataset=preprocess_data(train_dataset)\n valid_dataset=preprocess_data(valid_dataset)\n\n print(\"Tokenizing datasets\")\n train_dataset = processor.process(train_dataset)\n valid_dataset = processor.process(valid_dataset)\n\n columns = [\"source_ids\", \"target_ids\", \"attention_mask\"]\n valid_dataset.set_format(type='torch', columns=columns)\n train_dataset.set_format(type='torch', columns=columns)\n\n torch.save(train_dataset, data_args.train_file_name)\n logger.info(f\"saved train dataset at {data_args.train_file_name}\")\n \n torch.save(valid_dataset, data_args.valid_file_name)\n logger.info(f\"saved validation dataset at {data_args.valid_file_name}\")\n\n tokenizer_path = f\"{data_args.model_type}_qg_tokenizer\"\n if not os.path.exists(tokenizer_path):\n os.mkdir(tokenizer_path)\n tokenizer.save_pretrained(tokenizer_path)\n logger.info(f\"saved tokenizer at {tokenizer_path}\")\n\ndef preprocess_data(data):\n answers = [sub[\"answers\"][\"text\"] for sub in data]\n ans_num = [len(ans) for ans in answers]\n questions = [sub[\"title\"] for sub in data]\n questions = [[questions[i]] * ans_num[i] for i in range(len(ans_num))]\n answers = [item for sublist in answers for item in sublist]\n questions = [item for sublist in questions for item in sublist]\n\n data_dict= []\n for i in tqdm.tqdm(range(len(answers))):\n current={}\n current[\"question\"] = questions[i]\n current[\"context\"]=answers[i]\n current= process_e2e_qg(current)\n data_dict.append(current)\n\n source_text=[sub[\"source_text\"] for sub in data_dict]\n target_text = [sub[\"target_text\"] for sub in data_dict]\n task_text = [sub[\"task\"] for sub in data_dict]\n\n data_dict={\"source_text\":source_text, \"target_text\": target_text, \"task\": task_text}\n data_dict = nlp.Dataset.from_dict(data_dict)\n\n return data_dict\n\ndef process_e2e_qg(paragraph):\n source_text = f\"generate questions: {paragraph['context'].strip()}\"\n questions = [paragraph['question'].strip()]\n target_text = \" {sep_token} \".join(questions)\n target_text = f\"{target_text} {{sep_token}}\"\n return {\"source_text\":source_text, \"target_text\": target_text,\"task\":\"e2e_qg\"}\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vrdmr/Intro-to-AI-and-ML | [
"9b109a02949ae9cc71580b95c7e9a389412c056d"
] | [
"kmeans.py"
] | [
"import matplotlib.pyplot as plt\nfrom sklearn.datasets import make_blobs\nfrom sklearn.cluster import KMeans\n\n# create dataset\nX, y = make_blobs(\n n_samples=150, n_features=2,\n centers=3, cluster_std=0.5,\n shuffle=True, random_state=0\n)\n\nkm = KMeans(\n n_clusters=3, init='random',\n n_init=10, max_iter=300, \n tol=1e-04, random_state=0\n)\ny_km = km.fit_predict(X)\n\n# plot the 3 clusters\nplt.scatter(\n X[y_km == 0, 0], X[y_km == 0, 1],\n s=50, c='lightgreen',\n marker='s', edgecolor='black',\n label='cluster 1'\n)\n\nplt.scatter(\n X[y_km == 1, 0], X[y_km == 1, 1],\n s=50, c='orange',\n marker='o', edgecolor='black',\n label='cluster 2'\n)\n\nplt.scatter(\n X[y_km == 2, 0], X[y_km == 2, 1],\n s=50, c='lightblue',\n marker='v', edgecolor='black',\n label='cluster 3'\n)\n\n# plot the centroids\nplt.scatter(\n km.cluster_centers_[:, 0], km.cluster_centers_[:, 1],\n s=250, marker='*',\n c='red', edgecolor='black',\n label='centroids'\n)\nplt.legend(scatterpoints=1)\nplt.grid()\nplt.show()"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.scatter",
"sklearn.cluster.KMeans",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.show",
"sklearn.datasets.make_blobs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rajivpatel36/pyesg | [
"f16939f6de003c55fc89d8e1bd11af03011ee0aa"
] | [
"pyesg/yield_curve/yield_curve.py"
] | [
"import bisect\nimport numpy as np\n\nfrom typing import Dict, List\n\nCC_SPOT_RATE = \"cc_spot_rate\"\nZCB = \"zcb\"\n\n\nclass YieldCurve:\n \"\"\"\n Class for specifying and extracting rates from a yield curve including interpolation of unspecified points.\n \"\"\"\n def __init__(self):\n # Set so that term 0 gives rate = 0\n self._mapping = {0: 0} # type: Dict[float, float]\n self._terms = [] # type: List[float]\n self._rates = [] # type: List[float]\n self._is_sorted = False\n self._max_term = None\n self._min_term = None\n\n self._rate_functions = {\n CC_SPOT_RATE: self._get_spot_rate,\n ZCB: self._get_zcb,\n }\n\n def _resort_terms_and_rates(self) -> None:\n \"\"\"\n Resorts the self._terms and self._rates list.\n \"\"\"\n self._terms = sorted(self._mapping)\n self._rates = [self._mapping[term] for term in self._terms]\n self._min_term = self._terms[0]\n self._max_term = self._terms[-1]\n self._is_sorted = True\n\n def add_point(self, term: float, rate: float) -> None:\n \"\"\"\n Adds a point to the yield curve.\n Args:\n term: The term of the point.\n rate: The continuously compounded spot rate of the point.\n \"\"\"\n self._mapping[term] = rate\n self._is_sorted = False\n\n def get_rate(self, term: float, rate_type: str = CC_SPOT_RATE):\n rate_function = self._rate_functions.get(rate_type)\n if not rate_function:\n raise ValueError(f\"Rate type {rate_type} is not supported.\")\n\n return rate_function(term)\n\n def _get_spot_rate(self, term: float) -> float:\n \"\"\"\n Returns a rate with the specified term. If the term does not exist, interpolation between points is attempted.\n Args:\n term: The term of the point on the yield curve.\n\n Returns:\n The continuously compounded spot rate for the specified term.\n \"\"\"\n if term < 0:\n raise ValueError(\"A negative term cannot be used.\")\n\n # Get rate if it's already been specified.\n rate = self._mapping.get(term)\n\n if not self._is_sorted:\n self._resort_terms_and_rates()\n\n # Can't find rate so need to interpolate with what we've got\n if not self._terms:\n raise ValueError(\"No terms and rates specified that can be used for interpolation in the yield curve.\")\n\n if term > self._max_term:\n raise ValueError(\"The specified term exceeds the maximum term in the yield curve. Interpolation cannot\"\n \"be carried out.\")\n\n if term < self._min_term:\n raise ValueError(\"The specified term is below the minimum term in the yield curve. Interpolation cannot\"\n \"be carried out.\")\n\n # Look up the index in self._terms and self._rates for the first term immediately AFTER the specified term.\n index_after = bisect.bisect_left(self._terms, term)\n term_after = self._terms[index_after]\n term_before = self._terms[index_after - 1]\n rate_after = self._rates[index_after]\n rate_before = self._rates[index_after - 1]\n\n # interpolate\n return rate_before + (term - term_before) / (term_after - term_before) * (rate_after - rate_before)\n\n def _get_zcb(self, term: float) -> float:\n \"\"\"\n Returns a rate with the specified term. If the term does not exist, interpolation between points is attempted.\n Args:\n term: The term of the point on the yield curve.\n\n Returns:\n The rate for the specified term expressed as a zero coopon bond price.\n \"\"\"\n cc_rate = self._get_spot_rate(term)\n return np.exp(- term * cc_rate)\n"
] | [
[
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Turgibot/JupyterNinja | [
"93320e918bb4ef51392bf9b2a4ef76b48bb08815"
] | [
"Teaching_Tensorflow/utils.py"
] | [
"# The MIT License (MIT)\n# Copyright (c) 2018 Guy Tordjman. All Rights Reserved.\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# Some of this code was adopted from the stanford-tensorflow-tutorials github\n\nimport os\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\nimport numpy as np\n\n\ndef plot_images_grid(images, img_shape, given_class, predicted_class=None):\n assert len(images) == 32\n assert len(given_class) == 32\n\n fig, axes = plt.subplots(4, 8)\n fig.subplots_adjust(hspace=0.5, wspace=0.05, left=0, right=2.3)\n\n for i, ax in enumerate(axes.flat):\n # Plot each image\n ax.imshow(images[i].reshape(img_shape), cmap='binary')\n\n # Show given and predicted classes if exists.\n if predicted_class is None:\n xlabel = \"Class: {0}\".format(given_class[i])\n ax.set_xlabel(xlabel)\n\n else:\n xlabel = \"Class: {0}, Predicted: {1}\".format(given_class[i], predicted_class[i])\n if given_class[i] == predicted_class[i]:\n ax.set_xlabel(xlabel, color='green')\n else:\n ax.set_xlabel(xlabel)\n ax.set_xlabel(xlabel, color='red')\n\n # Remove ticks from the plot.\n ax.set_xticks([])\n ax.set_yticks([])\n\n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show()\n\n \ndef create_directory(dir_path):\n \"\"\" Create a directory but only if it doesnt exist. \"\"\"\n try:\n os.mkdir(dir_path)\n except OSError:\n pass\n\n\ndef plot_example_errors(cls_pred, correct, data_test_images, data_test_classes):\n\n # Negate the boolean array.\n incorrect = (correct == False)\n\n # Get the images from the test-set that have been\n # incorrectly classified.\n images = data_test_images[incorrect]\n\n # Get the predicted classes for those images.\n cls_pred = cls_pred[incorrect]\n\n # Get the true classes for those images.\n cls_true = data_test_classes[incorrect]\n\n # Plot the first 9 images.\n plot_images_grid(images=images[0:32],\n img_shape=(28, 28),\n given_class=cls_true[0:32],\n predicted_class=cls_pred[0:32])\n\n\ndef plot_confusion_matrix(cls_pred , data_test_cls, num_classes):\n # This is called from print_test_accuracy() below.\n\n # cls_pred is an array of the predicted class-number for\n # all images in the test-set.\n\n # Get the true classifications for the test-set.\n cls_true = data_test_cls\n\n # Get the confusion matrix using sklearn.\n cm = confusion_matrix(y_true=cls_true,\n y_pred=cls_pred)\n\n # Print the confusion matrix as text.\n print(cm)\n\n # Plot the confusion matrix as an image.\n plt.matshow(cm)\n\n # Make various adjustments to the plot.\n plt.colorbar()\n tick_marks = np.arange(num_classes)\n plt.xticks(tick_marks, range(num_classes))\n plt.yticks(tick_marks, range(num_classes))\n plt.xlabel('Predicted')\n plt.ylabel('True')\n\n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show()\n"
] | [
[
"numpy.arange",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.matshow",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rdarie/statsmodels | [
"55f9e73b7665bc6bda0f4c13a1ac27d60c021777"
] | [
"statsmodels/tsa/tests/test_ar.py"
] | [
"\"\"\"\nTest AR Model\n\"\"\"\nfrom statsmodels.compat.pytest import pytest_warns\nfrom typing import NamedTuple, Union\n\nimport datetime as dt\nfrom itertools import product\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_almost_equal\nimport pandas as pd\nfrom pandas import Index, Series, date_range, period_range\nfrom pandas.testing import assert_series_equal\nimport pytest\n\nfrom statsmodels.datasets import macrodata, sunspots\nfrom statsmodels.iolib.summary import Summary\nfrom statsmodels.regression.linear_model import OLS\nfrom statsmodels.tools.sm_exceptions import SpecificationWarning, ValueWarning\nfrom statsmodels.tools.tools import Bunch\nfrom statsmodels.tsa.ar_model import (\n AutoReg,\n AutoRegResultsWrapper,\n ar_select_order,\n)\nfrom statsmodels.tsa.arima_process import arma_generate_sample\nfrom statsmodels.tsa.deterministic import (\n DeterministicProcess,\n Seasonality,\n TimeTrend,\n)\nfrom statsmodels.tsa.statespace.sarimax import SARIMAX\nfrom statsmodels.tsa.tests.results import results_ar\n\nDECIMAL_6 = 6\nDECIMAL_5 = 5\nDECIMAL_4 = 4\n\n\ndef gen_ar_data(nobs):\n rs = np.random.RandomState(982739)\n idx = pd.date_range(dt.datetime(1900, 1, 1), freq=\"M\", periods=nobs)\n return pd.Series(rs.standard_normal(nobs), index=idx), rs\n\n\ndef gen_ols_regressors(ar, seasonal, trend, exog):\n nobs = 500\n y, rs = gen_ar_data(nobs)\n maxlag = ar if isinstance(ar, int) else max(ar)\n reg = []\n if \"c\" in trend:\n const = pd.Series(np.ones(nobs), index=y.index, name=\"const\")\n reg.append(const)\n if \"t\" in trend:\n time = np.arange(1, nobs + 1)\n time = pd.Series(time, index=y.index, name=\"time\")\n reg.append(time)\n if isinstance(ar, int) and ar:\n lags = np.arange(1, ar + 1)\n elif ar == 0:\n lags = None\n else:\n lags = ar\n if seasonal:\n seasons = np.zeros((500, 12))\n for i in range(12):\n seasons[i::12, i] = 1\n cols = [\"s.{0}\".format(i) for i in range(12)]\n seasons = pd.DataFrame(seasons, columns=cols, index=y.index)\n if \"c\" in trend:\n seasons = seasons.iloc[:, 1:]\n reg.append(seasons)\n if maxlag:\n for lag in lags:\n reg.append(y.shift(lag))\n if exog:\n x = rs.standard_normal((nobs, exog))\n cols = [\"x.{0}\".format(i) for i in range(exog)]\n x = pd.DataFrame(x, columns=cols, index=y.index)\n reg.append(x)\n else:\n x = None\n reg.insert(0, y)\n df = pd.concat(reg, axis=1).dropna()\n endog = df.iloc[:, 0]\n exog = df.iloc[:, 1:]\n return y, x, endog, exog\n\n\nar = [0, 3, [1, 3], [3]]\nseasonal = [True, False]\ntrend = [\"n\", \"c\", \"t\", \"ct\"]\nexog = [None, 2]\ncovs = [\"nonrobust\", \"HC0\"]\nparams = list(product(ar, seasonal, trend, exog, covs))\nfinal = []\nfor param in params:\n if param[0] != 0 or param[1] or param[2] != \"n\" or param[3]:\n final.append(param)\nparams = final\nnames = (\"AR\", \"Seasonal\", \"Trend\", \"Exog\", \"Cov Type\")\nids = [\n \", \".join([n + \": \" + str(p) for n, p in zip(names, param)])\n for param in params\n]\n\n\[email protected](scope=\"module\", params=params, ids=ids)\ndef ols_autoreg_result(request):\n ar, seasonal, trend, exog, cov_type = request.param\n y, x, endog, exog = gen_ols_regressors(ar, seasonal, trend, exog)\n ar_mod = AutoReg(y, ar, seasonal=seasonal, trend=trend, exog=x)\n ar_res = ar_mod.fit(cov_type=cov_type)\n ols = OLS(endog, exog)\n ols_res = ols.fit(cov_type=cov_type, use_t=False)\n return ar_res, ols_res\n\n\nattributes = [\n \"bse\",\n \"cov_params\",\n \"df_model\",\n \"df_resid\",\n \"fittedvalues\",\n \"llf\",\n \"nobs\",\n \"params\",\n \"resid\",\n \"scale\",\n \"tvalues\",\n \"use_t\",\n]\n\n\ndef fix_ols_attribute(val, attrib, res):\n \"\"\"\n fixes to correct for df adjustment b/t OLS and AutoReg with nonrobust cov\n \"\"\"\n nparam = res.k_constant + res.df_model\n nobs = nparam + res.df_resid\n df_correction = (nobs - nparam) / nobs\n if attrib in (\"scale\",):\n return val * df_correction\n elif attrib == \"df_model\":\n return val + res.k_constant\n elif res.cov_type != \"nonrobust\":\n return val\n elif attrib in (\"bse\", \"conf_int\"):\n return val * np.sqrt(df_correction)\n elif attrib in (\"cov_params\", \"scale\"):\n return val * df_correction\n elif attrib in (\"f_test\",):\n return val / df_correction\n elif attrib in (\"tvalues\",):\n return val / np.sqrt(df_correction)\n\n return val\n\n\[email protected](\"attribute\", attributes)\ndef test_equiv_ols_autoreg(ols_autoreg_result, attribute):\n a, o = ols_autoreg_result\n ols_a = getattr(o, attribute)\n ar_a = getattr(a, attribute)\n if callable(ols_a):\n ols_a = ols_a()\n ar_a = ar_a()\n ols_a = fix_ols_attribute(ols_a, attribute, o)\n assert_allclose(ols_a, ar_a)\n\n\ndef test_conf_int_ols_autoreg(ols_autoreg_result):\n a, o = ols_autoreg_result\n a_ci = a.conf_int()\n o_ci = o.conf_int()\n if o.cov_type == \"nonrobust\":\n spread = o_ci.T - o.params\n spread = fix_ols_attribute(spread, \"conf_int\", o)\n o_ci = (spread + o.params).T\n\n assert_allclose(a_ci, o_ci)\n\n\ndef test_f_test_ols_autoreg(ols_autoreg_result):\n a, o = ols_autoreg_result\n r = np.eye(a.params.shape[0])\n a_f = a.f_test(r).fvalue\n o_f = o.f_test(r).fvalue\n o_f = fix_ols_attribute(o_f, \"f_test\", o)\n\n assert_allclose(a_f, o_f)\n\n\[email protected]\ndef test_other_tests_autoreg(ols_autoreg_result):\n a, _ = ols_autoreg_result\n r = np.ones_like(a.params)\n a.t_test(r)\n r = np.eye(a.params.shape[0])\n a.wald_test(r, scalar=True)\n\n\n# TODO: test likelihood for ARX model?\n\n\nclass CheckARMixin(object):\n def test_params(self):\n assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_6)\n\n def test_bse(self):\n bse = np.sqrt(np.diag(self.res1.cov_params()))\n # no dof correction for compatability with Stata\n assert_almost_equal(bse, self.res2.bse_stata, DECIMAL_6)\n assert_almost_equal(self.res1.bse, self.res2.bse_gretl, DECIMAL_5)\n\n def test_llf(self):\n assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_6)\n\n def test_fpe(self):\n assert_almost_equal(self.res1.fpe, self.res2.fpe, DECIMAL_6)\n\n def test_pickle(self):\n from io import BytesIO\n\n fh = BytesIO()\n # test wrapped results load save pickle\n self.res1.save(fh)\n fh.seek(0, 0)\n res_unpickled = self.res1.__class__.load(fh)\n assert type(res_unpickled) is type(self.res1) # noqa: E721\n\n @pytest.mark.smoke\n def test_summary(self):\n assert isinstance(self.res1.summary().as_text(), str)\n\n @pytest.mark.smoke\n def test_pvalues(self):\n assert isinstance(self.res1.pvalues, (np.ndarray, pd.Series))\n\n\nparams = product(\n [0, 1, 3, [1, 3]],\n [\"n\", \"c\", \"t\", \"ct\"],\n [True, False],\n [0, 2],\n [None, 11],\n [\"none\", \"drop\"],\n [True, False],\n [None, 12],\n)\nparams = list(params)\nparams = [\n param\n for param in params\n if (param[0] or param[1] != \"n\" or param[2] or param[3])\n]\nparams = [\n param\n for param in params\n if not param[2] or (param[2] and (param[4] or param[6]))\n]\nparam_fmt = \"\"\"\\\nlags: {0}, trend: {1}, seasonal: {2}, nexog: {3}, periods: {4}, \\\nmissing: {5}, pandas: {6}, hold_back{7}\"\"\"\n\nids = [param_fmt.format(*param) for param in params]\n\n\ndef gen_data(nobs, nexog, pandas, seed=92874765):\n rs = np.random.RandomState(seed)\n endog = rs.standard_normal((nobs))\n exog = rs.standard_normal((nobs, nexog)) if nexog else None\n if pandas:\n index = pd.date_range(\n dt.datetime(1999, 12, 31), periods=nobs, freq=\"M\"\n )\n endog = pd.Series(endog, name=\"endog\", index=index)\n if nexog:\n cols = [\"exog.{0}\".format(i) for i in range(exog.shape[1])]\n exog = pd.DataFrame(exog, columns=cols, index=index)\n\n class DataSet(NamedTuple):\n endog: Union[np.ndarray, pd.Series]\n exog: Union[np.ndarray, pd.DataFrame]\n\n return DataSet(endog=endog, exog=exog)\n\n\[email protected](scope=\"module\", params=params, ids=ids)\ndef ar_data(request):\n lags, trend, seasonal = request.param[:3]\n nexog, period, missing, use_pandas, hold_back = request.param[3:]\n data = gen_data(250, nexog, use_pandas)\n return Bunch(\n trend=trend,\n lags=lags,\n seasonal=seasonal,\n period=period,\n endog=data.endog,\n exog=data.exog,\n missing=missing,\n hold_back=hold_back,\n )\n\n\[email protected](scope=\"module\")\ndef ar2(request):\n gen = np.random.RandomState(20210623)\n e = gen.standard_normal(52)\n y = 10 * np.ones_like(e)\n for i in range(2, y.shape[0]):\n y[i] = 1 + 0.5 * y[i - 1] + 0.4 * y[i - 2] + e[i]\n index = pd.period_range(\"2000-01-01\", periods=e.shape[0] - 2, freq=\"M\")\n return pd.Series(y[2:], index=index)\n\n\nparams = product(\n [0, 3, [1, 3]],\n [\"c\"],\n [True, False],\n [0],\n [None, 11],\n [\"drop\"],\n [True, False],\n [None, 12],\n)\nparams = list(params)\nparams = [\n param\n for param in params\n if (param[0] or param[1] != \"n\" or param[2] or param[3])\n]\nparams = [\n param\n for param in params\n if not param[2] or (param[2] and (param[4] or param[6]))\n]\nparam_fmt = \"\"\"\\\nlags: {0}, trend: {1}, seasonal: {2}, nexog: {3}, periods: {4}, \\\nmissing: {5}, pandas: {6}, hold_back: {7}\"\"\"\n\nids = [param_fmt.format(*param) for param in params]\n\n\n# Only test 1/3 to save time\[email protected](scope=\"module\", params=params[::3], ids=ids[::3])\ndef plot_data(request):\n lags, trend, seasonal = request.param[:3]\n nexog, period, missing, use_pandas, hold_back = request.param[3:]\n data = gen_data(250, nexog, use_pandas)\n return Bunch(\n trend=trend,\n lags=lags,\n seasonal=seasonal,\n period=period,\n endog=data.endog,\n exog=data.exog,\n missing=missing,\n hold_back=hold_back,\n )\n\n\[email protected]\[email protected]\ndef test_autoreg_smoke_plots(plot_data, close_figures):\n from matplotlib.figure import Figure\n\n mod = AutoReg(\n plot_data.endog,\n plot_data.lags,\n trend=plot_data.trend,\n seasonal=plot_data.seasonal,\n exog=plot_data.exog,\n hold_back=plot_data.hold_back,\n period=plot_data.period,\n missing=plot_data.missing,\n )\n res = mod.fit()\n fig = res.plot_diagnostics()\n assert isinstance(fig, Figure)\n if plot_data.exog is None:\n fig = res.plot_predict(end=300)\n assert isinstance(fig, Figure)\n fig = res.plot_predict(end=300, alpha=None, in_sample=False)\n assert isinstance(fig, Figure)\n assert isinstance(res.summary(), Summary)\n\n\[email protected]\ndef test_autoreg_predict_smoke(ar_data):\n mod = AutoReg(\n ar_data.endog,\n ar_data.lags,\n trend=ar_data.trend,\n seasonal=ar_data.seasonal,\n exog=ar_data.exog,\n hold_back=ar_data.hold_back,\n period=ar_data.period,\n missing=ar_data.missing,\n )\n res = mod.fit()\n exog_oos = None\n if ar_data.exog is not None:\n exog_oos = np.empty((1, ar_data.exog.shape[1]))\n mod.predict(res.params, 0, 250, exog_oos=exog_oos)\n if ar_data.lags == 0 and ar_data.exog is None:\n mod.predict(res.params, 0, 350, exog_oos=exog_oos)\n if isinstance(ar_data.endog, pd.Series) and (\n not ar_data.seasonal or ar_data.period is not None\n ):\n ar_data.endog.index = list(range(ar_data.endog.shape[0]))\n if ar_data.exog is not None:\n ar_data.exog.index = list(range(ar_data.endog.shape[0]))\n mod = AutoReg(\n ar_data.endog,\n ar_data.lags,\n trend=ar_data.trend,\n seasonal=ar_data.seasonal,\n exog=ar_data.exog,\n period=ar_data.period,\n missing=ar_data.missing,\n )\n mod.predict(res.params, 0, 250, exog_oos=exog_oos)\n\n\[email protected]\ndef test_parameterless_autoreg():\n data = gen_data(250, 0, False)\n mod = AutoReg(data.endog, 0, trend=\"n\", seasonal=False, exog=None)\n res = mod.fit()\n for attr in dir(res):\n if attr.startswith(\"_\"):\n continue\n\n # TODO\n if attr in (\n \"predict\",\n \"f_test\",\n \"t_test\",\n \"initialize\",\n \"load\",\n \"remove_data\",\n \"save\",\n \"t_test\",\n \"t_test_pairwise\",\n \"wald_test\",\n \"wald_test_terms\",\n \"apply\",\n \"append\",\n ):\n continue\n attr = getattr(res, attr)\n if callable(attr):\n attr()\n else:\n assert isinstance(attr, object)\n\n\ndef test_predict_errors():\n data = gen_data(250, 2, True)\n mod = AutoReg(data.endog, 3)\n res = mod.fit()\n with pytest.raises(ValueError, match=\"exog and exog_oos cannot be used\"):\n mod.predict(res.params, exog=data.exog)\n with pytest.raises(ValueError, match=\"exog and exog_oos cannot be used\"):\n mod.predict(res.params, exog_oos=data.exog)\n with pytest.raises(ValueError, match=\"hold_back must be >= lags\"):\n AutoReg(data.endog, 3, hold_back=1)\n with pytest.raises(ValueError, match=\"freq cannot be inferred\"):\n AutoReg(data.endog.values, 3, seasonal=True)\n\n mod = AutoReg(data.endog, 3, exog=data.exog)\n res = mod.fit()\n with pytest.raises(ValueError, match=r\"The shape of exog \\(200, 2\\)\"):\n mod.predict(res.params, exog=data.exog.iloc[:200])\n with pytest.raises(ValueError, match=\"The number of columns in exog_oos\"):\n mod.predict(res.params, exog_oos=data.exog.iloc[:, :1])\n with pytest.raises(ValueError, match=\"Prediction must have `end` after\"):\n mod.predict(res.params, start=200, end=199)\n with pytest.raises(ValueError, match=\"exog_oos must be provided\"):\n mod.predict(res.params, end=250, exog_oos=None)\n\n mod = AutoReg(data.endog, 0, exog=data.exog)\n res = mod.fit()\n with pytest.raises(ValueError, match=\"start and end indicate that 10\"):\n mod.predict(res.params, end=259, exog_oos=data.exog.iloc[:5])\n\n\ndef test_spec_errors():\n data = gen_data(250, 2, True)\n with pytest.raises(ValueError, match=\"lags must be a non-negative scalar\"):\n AutoReg(data.endog, -1)\n with pytest.raises(ValueError, match=\"All values in lags must be pos\"):\n AutoReg(data.endog, [1, 1, 1])\n with pytest.raises(ValueError, match=\"All values in lags must be pos\"):\n AutoReg(data.endog, [1, -2, 3])\n\n\[email protected]\ndef test_dynamic_forecast_smoke(ar_data):\n mod = AutoReg(\n ar_data.endog,\n ar_data.lags,\n trend=ar_data.trend,\n seasonal=ar_data.seasonal,\n exog=ar_data.exog,\n hold_back=ar_data.hold_back,\n period=ar_data.period,\n missing=ar_data.missing,\n )\n res = mod.fit()\n res.predict(dynamic=True)\n if ar_data.exog is None:\n res.predict(end=260, dynamic=True)\n\n\[email protected]\ndef test_ar_select_order_smoke():\n data = sunspots.load().data[\"SUNACTIVITY\"]\n ar_select_order(data, 4, glob=True, trend=\"n\")\n ar_select_order(data, 4, glob=False, trend=\"n\")\n ar_select_order(data, 4, seasonal=True, period=12)\n ar_select_order(data, 4, seasonal=False)\n ar_select_order(data, 4, glob=True)\n ar_select_order(data, 4, glob=True, seasonal=True, period=12)\n\n\nclass CheckAutoRegMixin(CheckARMixin):\n def test_bse(self):\n assert_almost_equal(self.res1.bse, self.res2.bse_stata, DECIMAL_6)\n\n\nclass TestAutoRegOLSConstant(CheckAutoRegMixin):\n \"\"\"\n Test AutoReg fit by OLS with a constant.\n \"\"\"\n\n @classmethod\n def setup_class(cls):\n data = sunspots.load()\n data.endog.index = list(range(len(data.endog)))\n cls.res1 = AutoReg(data.endog, lags=9).fit()\n cls.res2 = results_ar.ARResultsOLS(constant=True)\n\n def test_predict(self):\n model = self.res1.model\n params = self.res1.params\n assert_almost_equal(\n model.predict(params)[model.hold_back :],\n self.res2.FVOLSnneg1start0,\n DECIMAL_4,\n )\n assert_almost_equal(\n model.predict(params)[model.hold_back :],\n self.res2.FVOLSnneg1start9,\n DECIMAL_4,\n )\n assert_almost_equal(\n model.predict(params, start=100),\n self.res2.FVOLSnneg1start100,\n DECIMAL_4,\n )\n assert_almost_equal(\n model.predict(params, start=9, end=200),\n self.res2.FVOLSn200start0,\n DECIMAL_4,\n )\n assert_almost_equal(\n model.predict(params)[model.hold_back :],\n self.res2.FVOLSdefault,\n DECIMAL_4,\n )\n assert_almost_equal(\n model.predict(params, start=200, end=400),\n self.res2.FVOLSn200start200,\n DECIMAL_4,\n )\n assert_almost_equal(\n model.predict(params, start=308, end=424),\n self.res2.FVOLSn100start325,\n DECIMAL_4,\n )\n assert_almost_equal(\n model.predict(params, start=9, end=310),\n self.res2.FVOLSn301start9,\n DECIMAL_4,\n )\n assert_almost_equal(\n model.predict(params, start=308, end=316),\n self.res2.FVOLSn4start312,\n DECIMAL_4,\n )\n assert_almost_equal(\n model.predict(params, start=308, end=327),\n self.res2.FVOLSn15start312,\n DECIMAL_4,\n )\n\n\nclass TestAutoRegOLSNoConstant(CheckAutoRegMixin):\n \"\"\"f\n Test AR fit by OLS without a constant.\n \"\"\"\n\n @classmethod\n def setup_class(cls):\n data = sunspots.load()\n cls.res1 = AutoReg(np.asarray(data.endog), lags=9, trend=\"n\").fit()\n cls.res2 = results_ar.ARResultsOLS(constant=False)\n\n def test_predict(self):\n model = self.res1.model\n params = self.res1.params\n assert_almost_equal(\n model.predict(params)[model.hold_back :],\n self.res2.FVOLSnneg1start0,\n DECIMAL_4,\n )\n assert_almost_equal(\n model.predict(params)[model.hold_back :],\n self.res2.FVOLSnneg1start9,\n DECIMAL_4,\n )\n assert_almost_equal(\n model.predict(params, start=100),\n self.res2.FVOLSnneg1start100,\n DECIMAL_4,\n )\n assert_almost_equal(\n model.predict(params, start=9, end=200),\n self.res2.FVOLSn200start0,\n DECIMAL_4,\n )\n assert_almost_equal(\n model.predict(params)[model.hold_back :],\n self.res2.FVOLSdefault,\n DECIMAL_4,\n )\n assert_almost_equal(\n model.predict(params, start=200, end=400),\n self.res2.FVOLSn200start200,\n DECIMAL_4,\n )\n assert_almost_equal(\n model.predict(params, start=308, end=424),\n self.res2.FVOLSn100start325,\n DECIMAL_4,\n )\n assert_almost_equal(\n model.predict(params, start=9, end=310),\n self.res2.FVOLSn301start9,\n DECIMAL_4,\n )\n assert_almost_equal(\n model.predict(params, start=308, end=316),\n self.res2.FVOLSn4start312,\n DECIMAL_4,\n )\n assert_almost_equal(\n model.predict(params, start=308, end=327),\n self.res2.FVOLSn15start312,\n DECIMAL_4,\n )\n\n\[email protected](\"lag\", list(np.arange(1, 16 + 1)))\ndef test_autoreg_info_criterion(lag):\n data = sunspots.load()\n endog = np.asarray(data.endog)\n endog_tmp = endog[16 - lag :]\n r = AutoReg(endog_tmp, lags=lag).fit()\n # See issue #324 for the corrections vs. R\n aic = r.aic\n hqic = r.hqic\n bic = r.bic\n\n res1 = np.array([aic, hqic, bic, r.fpe])\n # aic correction to match R\n res2 = results_ar.ARLagResults(\"const\").ic.T\n comp = res2[lag - 1, :].copy()\n k = 2 + lag\n pen = np.array([2, 2 * np.log(np.log(r.nobs)), np.log(r.nobs)])\n comp[:3] = -2 * r.llf + pen * k\n assert_almost_equal(res1, comp, DECIMAL_6)\n\n r2 = AutoReg(endog, lags=lag, hold_back=16).fit()\n assert_allclose(r.aic, r2.aic)\n assert_allclose(r.bic, r2.bic)\n assert_allclose(r.hqic, r2.hqic)\n assert_allclose(r.fpe, r2.fpe)\n\n\[email protected](\"old_names\", [True, False])\ndef test_autoreg_named_series(reset_randomstate, old_names):\n warning = FutureWarning if old_names else None\n dates = period_range(start=\"2011-1\", periods=72, freq=\"M\")\n y = Series(np.random.randn(72), name=\"foobar\", index=dates)\n with pytest_warns(warning):\n results = AutoReg(y, lags=2, old_names=old_names).fit()\n\n if old_names:\n idx = Index([\"intercept\", \"foobar.L1\", \"foobar.L2\"])\n else:\n idx = Index([\"const\", \"foobar.L1\", \"foobar.L2\"])\n assert results.params.index.equals(idx)\n\n\[email protected]\ndef test_autoreg_series():\n # GH#773\n dta = macrodata.load_pandas().data[\"cpi\"].diff().dropna()\n dates = period_range(start=\"1959Q1\", periods=len(dta), freq=\"Q\")\n dta.index = dates\n ar = AutoReg(dta, lags=15).fit()\n ar.bse\n\n\ndef test_ar_order_select():\n # GH#2118\n np.random.seed(12345)\n y = arma_generate_sample([1, -0.75, 0.3], [1], 100)\n ts = Series(\n y,\n index=date_range(start=dt.datetime(1990, 1, 1), periods=100, freq=\"M\"),\n )\n res = ar_select_order(ts, maxlag=12, ic=\"aic\")\n assert tuple(res.ar_lags) == (1, 2)\n assert isinstance(res.aic, dict)\n assert isinstance(res.bic, dict)\n assert isinstance(res.hqic, dict)\n assert isinstance(res.model, AutoReg)\n assert not res.seasonal\n assert res.trend == \"c\"\n assert res.period is None\n\n\ndef test_autoreg_constant_column_trend():\n sample = np.array(\n [\n 0.46341460943222046,\n 0.46341460943222046,\n 0.39024388790130615,\n 0.4146341383457184,\n 0.4146341383457184,\n 0.4146341383457184,\n 0.3414634168148041,\n 0.4390243887901306,\n 0.46341460943222046,\n 0.4390243887901306,\n ]\n )\n\n with pytest.raises(ValueError, match=\"The model specification cannot\"):\n AutoReg(sample, lags=7)\n with pytest.raises(ValueError, match=\"The model specification cannot\"):\n AutoReg(sample, lags=7, trend=\"n\")\n\n\[email protected](\"old_names\", [True, False])\ndef test_autoreg_summary_corner(old_names):\n data = macrodata.load_pandas().data[\"cpi\"].diff().dropna()\n dates = period_range(start=\"1959Q1\", periods=len(data), freq=\"Q\")\n data.index = dates\n warning = FutureWarning if old_names else None\n with pytest_warns(warning):\n res = AutoReg(data, lags=4, old_names=old_names).fit()\n summ = res.summary().as_text()\n assert \"AutoReg(4)\" in summ\n assert \"cpi.L4\" in summ\n assert \"03-31-1960\" in summ\n with pytest_warns(warning):\n res = AutoReg(data, lags=0, old_names=old_names).fit()\n summ = res.summary().as_text()\n if old_names:\n assert \"intercept\" in summ\n else:\n assert \"const\" in summ\n assert \"AutoReg(0)\" in summ\n\n\[email protected]\ndef test_autoreg_score():\n data = sunspots.load_pandas()\n ar = AutoReg(np.asarray(data.endog), 3)\n res = ar.fit()\n score = ar.score(res.params)\n assert isinstance(score, np.ndarray)\n assert score.shape == (4,)\n assert ar.information(res.params).shape == (4, 4)\n assert_allclose(-ar.hessian(res.params), ar.information(res.params))\n\n\ndef test_autoreg_roots():\n data = sunspots.load_pandas()\n ar = AutoReg(np.asarray(data.endog), lags=1)\n res = ar.fit()\n assert_almost_equal(res.roots, np.array([1.0 / res.params[-1]]))\n\n\ndef test_equiv_dynamic(reset_randomstate):\n e = np.random.standard_normal(1001)\n y = np.empty(1001)\n y[0] = e[0] * np.sqrt(1.0 / (1 - 0.9 ** 2))\n for i in range(1, 1001):\n y[i] = 0.9 * y[i - 1] + e[i]\n mod = AutoReg(y, 1)\n res = mod.fit()\n pred0 = res.predict(500, 800, dynamic=0)\n pred1 = res.predict(500, 800, dynamic=True)\n idx = pd.date_range(dt.datetime(2000, 1, 30), periods=1001, freq=\"M\")\n y = pd.Series(y, index=idx)\n mod = AutoReg(y, 1)\n res = mod.fit()\n pred2 = res.predict(idx[500], idx[800], dynamic=idx[500])\n pred3 = res.predict(idx[500], idx[800], dynamic=0)\n pred4 = res.predict(idx[500], idx[800], dynamic=True)\n assert_allclose(pred0, pred1)\n assert_allclose(pred0, pred2)\n assert_allclose(pred0, pred3)\n assert_allclose(pred0, pred4)\n\n\ndef test_dynamic_against_sarimax():\n rs = np.random.RandomState(12345678)\n e = rs.standard_normal(1001)\n y = np.empty(1001)\n y[0] = e[0] * np.sqrt(1.0 / (1 - 0.9 ** 2))\n for i in range(1, 1001):\n y[i] = 0.9 * y[i - 1] + e[i]\n smod = SARIMAX(y, order=(1, 0, 0), trend=\"c\")\n sres = smod.fit(disp=False)\n mod = AutoReg(y, 1)\n spred = sres.predict(900, 1100)\n pred = mod.predict(sres.params[:2], 900, 1100)\n assert_allclose(spred, pred)\n\n spred = sres.predict(900, 1100, dynamic=True)\n pred = mod.predict(sres.params[:2], 900, 1100, dynamic=True)\n assert_allclose(spred, pred)\n\n spred = sres.predict(900, 1100, dynamic=50)\n pred = mod.predict(sres.params[:2], 900, 1100, dynamic=50)\n assert_allclose(spred, pred)\n\n\ndef test_predict_seasonal():\n rs = np.random.RandomState(12345678)\n e = rs.standard_normal(1001)\n y = np.empty(1001)\n y[0] = e[0] * np.sqrt(1.0 / (1 - 0.9 ** 2))\n effects = 10 * np.cos(np.arange(12) / 11 * 2 * np.pi)\n for i in range(1, 1001):\n y[i] = 10 + 0.9 * y[i - 1] + e[i] + effects[i % 12]\n ys = pd.Series(\n y, index=pd.date_range(dt.datetime(1950, 1, 1), periods=1001, freq=\"M\")\n )\n mod = AutoReg(ys, 1, seasonal=True)\n res = mod.fit()\n c = res.params.iloc[0]\n seasons = np.zeros(12)\n seasons[1:] = res.params.iloc[1:-1]\n ar = res.params.iloc[-1]\n pred = res.predict(900, 1100, True)\n direct = np.zeros(201)\n direct[0] = y[899] * ar + c + seasons[900 % 12]\n for i in range(1, 201):\n direct[i] = direct[i - 1] * ar + c + seasons[(900 + i) % 12]\n direct = pd.Series(\n direct, index=pd.date_range(ys.index[900], periods=201, freq=\"M\")\n )\n assert_series_equal(pred, direct)\n\n pred = res.predict(900, dynamic=False)\n direct = y[899:-1] * ar + c + seasons[np.arange(900, 1001) % 12]\n direct = pd.Series(\n direct, index=pd.date_range(ys.index[900], periods=101, freq=\"M\")\n )\n assert_series_equal(pred, direct)\n\n\ndef test_predict_exog():\n rs = np.random.RandomState(12345678)\n e = rs.standard_normal(1001)\n y = np.empty(1001)\n x = rs.standard_normal((1001, 2))\n y[:3] = e[:3] * np.sqrt(1.0 / (1 - 0.9 ** 2)) + x[:3].sum(1)\n for i in range(3, 1001):\n y[i] = 10 + 0.9 * y[i - 1] - 0.5 * y[i - 3] + e[i] + x[i].sum()\n ys = pd.Series(\n y, index=pd.date_range(dt.datetime(1950, 1, 1), periods=1001, freq=\"M\")\n )\n xdf = pd.DataFrame(x, columns=[\"x0\", \"x1\"], index=ys.index)\n mod = AutoReg(ys, [1, 3], trend=\"c\", exog=xdf)\n res = mod.fit()\n assert \"-X\" in str(res.summary())\n\n pred = res.predict(900)\n c = res.params.iloc[0]\n ar = res.params.iloc[1:3]\n ex = np.asarray(res.params.iloc[3:])\n direct = c + ar[0] * y[899:-1] + ar[1] * y[897:-3]\n direct += ex[0] * x[900:, 0] + ex[1] * x[900:, 1]\n idx = pd.date_range(ys.index[900], periods=101, freq=\"M\")\n direct = pd.Series(direct, index=idx)\n assert_series_equal(pred, direct)\n exog_oos = rs.standard_normal((100, 2))\n\n pred = res.predict(900, 1100, dynamic=True, exog_oos=exog_oos)\n direct = np.zeros(201)\n direct[0] = c + ar[0] * y[899] + ar[1] * y[897] + x[900] @ ex\n direct[1] = c + ar[0] * direct[0] + ar[1] * y[898] + x[901] @ ex\n direct[2] = c + ar[0] * direct[1] + ar[1] * y[899] + x[902] @ ex\n for i in range(3, 201):\n direct[i] = c + ar[0] * direct[i - 1] + ar[1] * direct[i - 3]\n if 900 + i < x.shape[0]:\n direct[i] += x[900 + i] @ ex\n else:\n direct[i] += exog_oos[i - 101] @ ex\n\n direct = pd.Series(\n direct, index=pd.date_range(ys.index[900], periods=201, freq=\"M\")\n )\n assert_series_equal(pred, direct)\n\n\ndef test_predict_irregular_ar():\n rs = np.random.RandomState(12345678)\n e = rs.standard_normal(1001)\n y = np.empty(1001)\n y[:3] = e[:3] * np.sqrt(1.0 / (1 - 0.9 ** 2))\n for i in range(3, 1001):\n y[i] = 10 + 0.9 * y[i - 1] - 0.5 * y[i - 3] + e[i]\n ys = pd.Series(\n y, index=pd.date_range(dt.datetime(1950, 1, 1), periods=1001, freq=\"M\")\n )\n mod = AutoReg(ys, [1, 3], trend=\"ct\")\n res = mod.fit()\n c = res.params.iloc[0]\n t = res.params.iloc[1]\n ar = np.asarray(res.params.iloc[2:])\n\n pred = res.predict(900, 1100, True)\n direct = np.zeros(201)\n direct[0] = c + t * 901 + ar[0] * y[899] + ar[1] * y[897]\n direct[1] = c + t * 902 + ar[0] * direct[0] + ar[1] * y[898]\n direct[2] = c + t * 903 + ar[0] * direct[1] + ar[1] * y[899]\n for i in range(3, 201):\n direct[i] = (\n c + t * (901 + i) + ar[0] * direct[i - 1] + ar[1] * direct[i - 3]\n )\n direct = pd.Series(\n direct, index=pd.date_range(ys.index[900], periods=201, freq=\"M\")\n )\n assert_series_equal(pred, direct)\n\n pred = res.predict(900)\n direct = (\n c\n + t * np.arange(901, 901 + 101)\n + ar[0] * y[899:-1]\n + ar[1] * y[897:-3]\n )\n idx = pd.date_range(ys.index[900], periods=101, freq=\"M\")\n direct = pd.Series(direct, index=idx)\n assert_series_equal(pred, direct)\n\n\[email protected](\"dynamic\", [True, False])\ndef test_forecast_start_end_equiv(dynamic):\n rs = np.random.RandomState(12345678)\n e = rs.standard_normal(1001)\n y = np.empty(1001)\n y[0] = e[0] * np.sqrt(1.0 / (1 - 0.9 ** 2))\n effects = 10 * np.cos(np.arange(12) / 11 * 2 * np.pi)\n for i in range(1, 1001):\n y[i] = 10 + 0.9 * y[i - 1] + e[i] + effects[i % 12]\n ys = pd.Series(\n y, index=pd.date_range(dt.datetime(1950, 1, 1), periods=1001, freq=\"M\")\n )\n mod = AutoReg(ys, 1, seasonal=True)\n res = mod.fit()\n pred_int = res.predict(1000, 1020, dynamic=dynamic)\n dates = pd.date_range(dt.datetime(1950, 1, 1), periods=1021, freq=\"M\")\n pred_dates = res.predict(dates[1000], dates[1020], dynamic=dynamic)\n assert_series_equal(pred_int, pred_dates)\n\n\[email protected](\"start\", [21, 25])\ndef test_autoreg_start(start):\n y_train = pd.Series(np.random.normal(size=20))\n m = AutoReg(y_train, lags=2)\n mf = m.fit()\n end = start + 5\n pred = mf.predict(start=start, end=end)\n assert pred.shape[0] == end - start + 1\n\n\ndef test_deterministic(reset_randomstate):\n y = pd.Series(np.random.normal(size=200))\n terms = [TimeTrend(constant=True, order=1), Seasonality(12)]\n dp = DeterministicProcess(y.index, additional_terms=terms)\n m = AutoReg(y, trend=\"n\", seasonal=False, lags=2, deterministic=dp)\n res = m.fit()\n m2 = AutoReg(y, trend=\"ct\", seasonal=True, lags=2, period=12)\n res2 = m2.fit()\n assert_almost_equal(np.asarray(res.params), np.asarray(res2.params))\n with pytest.warns(\n SpecificationWarning, match=\"When using deterministic, trend\"\n ):\n AutoReg(y, trend=\"ct\", seasonal=False, lags=2, deterministic=dp)\n with pytest.raises(TypeError, match=\"deterministic must be\"):\n AutoReg(y, 2, deterministic=\"ct\")\n\n\ndef test_autoreg_predict_forecast_equiv(reset_randomstate):\n e = np.random.normal(size=1000)\n nobs = e.shape[0]\n idx = pd.date_range(dt.datetime(2020, 1, 1), freq=\"D\", periods=nobs)\n for i in range(1, nobs):\n e[i] = 0.95 * e[i - 1] + e[i]\n y = pd.Series(e, index=idx)\n m = AutoReg(y, trend=\"c\", lags=1)\n res = m.fit()\n a = res.forecast(12)\n b = res.predict(nobs, nobs + 11)\n c = res.forecast(\"2022-10-08\")\n assert_series_equal(a, b)\n assert_series_equal(a, c)\n sarimax_res = SARIMAX(y, order=(1, 0, 0), trend=\"c\").fit(disp=False)\n d = sarimax_res.forecast(12)\n pd.testing.assert_index_equal(a.index, d.index)\n\n\ndef test_autoreg_forecast_period_index():\n pi = pd.period_range(\"1990-1-1\", periods=524, freq=\"M\")\n y = np.random.RandomState(0).standard_normal(500)\n ys = pd.Series(y, index=pi[:500], name=\"y\")\n mod = AutoReg(ys, 3, seasonal=True)\n res = mod.fit()\n fcast = res.forecast(24)\n assert isinstance(fcast.index, pd.PeriodIndex)\n pd.testing.assert_index_equal(fcast.index, pi[-24:])\n\n\[email protected]\ndef test_autoreg_plot_err():\n y = np.random.standard_normal(100)\n mod = AutoReg(y, lags=[1, 3])\n res = mod.fit()\n with pytest.raises(ValueError):\n res.plot_predict(0, end=50, in_sample=False)\n\n\ndef test_autoreg_resids():\n idx = pd.date_range(dt.datetime(1900, 1, 1), periods=250, freq=\"M\")\n rs = np.random.RandomState(0)\n idx_dates = sorted(rs.choice(idx, size=100, replace=False))\n e = rs.standard_normal(250)\n y = np.zeros(250)\n y[:2] = e[:2]\n for i in range(2, 250):\n y[i] = 2 + 1.8 * y[i - 1] - 0.95 * y[i - 2] + e[i]\n ys = pd.Series(y[-100:], index=idx_dates, name=\"y\")\n with pytest.warns(ValueWarning):\n res = AutoReg(ys, lags=2).fit()\n assert np.all(np.isfinite(res.resid))\n\n\ndef test_dynamic_predictions(ar2):\n mod = AutoReg(ar2, 2, trend=\"c\")\n res = mod.fit()\n\n d25 = res.predict(dynamic=25)\n s10_d15 = res.predict(start=10, dynamic=15)\n sd_index = res.predict(start=ar2.index[10], dynamic=ar2.index[25])\n reference = [np.nan, np.nan]\n p = np.asarray(res.params)\n for i in range(2, ar2.shape[0]):\n lag1 = ar2[i - 1]\n lag2 = ar2[i - 2]\n if i > 25:\n lag1 = reference[i - 1]\n if i > 26:\n lag2 = reference[i - 2]\n reference.append(p[0] + p[1] * lag1 + p[2] * lag2)\n expected = pd.Series(reference, index=ar2.index)\n assert_allclose(expected, d25)\n\n assert_allclose(s10_d15, sd_index)\n assert_allclose(d25[25:], sd_index[15:])\n\n full = res.predict()\n assert_allclose(d25[:25], full[:25])\n\n\ndef test_dynamic_predictions_oos(ar2):\n mod = AutoReg(ar2, 2, trend=\"c\")\n res = mod.fit()\n\n d25_end = res.predict(dynamic=25, end=61)\n s10_d15_end = res.predict(start=10, dynamic=15, end=61)\n end = ar2.index[-1] + 12 * (ar2.index[-1] - ar2.index[-2])\n sd_index_end = res.predict(\n start=ar2.index[10], dynamic=ar2.index[25], end=end\n )\n assert_allclose(s10_d15_end, sd_index_end)\n assert_allclose(d25_end[25:], sd_index_end[15:])\n\n reference = [np.nan, np.nan]\n p = np.asarray(res.params)\n for i in range(2, d25_end.shape[0]):\n if i < ar2.shape[0]:\n lag1 = ar2[i - 1]\n lag2 = ar2[i - 2]\n if i > 25:\n lag1 = reference[i - 1]\n if i > 26:\n lag2 = reference[i - 2]\n reference.append(p[0] + p[1] * lag1 + p[2] * lag2)\n expected = pd.Series(reference, index=d25_end.index)\n assert_allclose(expected, d25_end)\n\n\ndef test_invalid_dynamic(ar2):\n mod = AutoReg(ar2, 2, trend=\"c\")\n res = mod.fit()\n with pytest.raises(ValueError, match=\"Dynamic prediction cannot\"):\n res.predict(dynamic=-1)\n with pytest.raises(ValueError, match=\"Dynamic prediction cannot\"):\n res.predict(start=ar2.index[10], dynamic=ar2.index[5])\n\n\ndef test_exog_prediction(ar2):\n gen = np.random.RandomState(20210623)\n exog = pd.DataFrame(\n gen.standard_normal((ar2.shape[0], 2)),\n columns=[\"x1\", \"x2\"],\n index=ar2.index,\n )\n mod = AutoReg(ar2, 2, trend=\"c\", exog=exog)\n res = mod.fit()\n pred_base = res.predict()\n pred_repl = res.predict(exog=exog)\n assert_allclose(pred_base, pred_repl)\n\n dyn_base = res.predict(dynamic=25)\n dyn_repl = res.predict(dynamic=25, exog=exog)\n assert_allclose(dyn_base, dyn_repl)\n\n\ndef test_old_names(ar2):\n with pytest.warns(FutureWarning):\n mod = AutoReg(ar2, 2, trend=\"ct\", seasonal=True, old_names=True)\n new = AutoReg(ar2, 2, trend=\"ct\", seasonal=True, old_names=False)\n\n assert new.trend == \"ct\"\n assert new.period == 12\n\n assert \"intercept\" in mod.exog_names\n assert \"seasonal.1\" in mod.exog_names\n\n assert \"const\" in new.exog_names\n assert \"s(2,12)\" in new.exog_names\n\n\ndef test_diagnostic_summary_short(ar2):\n res = AutoReg(ar2[:10], 2).fit()\n assert isinstance(res.diagnostic_summary(), Summary)\n\n\ndef test_ar_model_predict(ar2):\n mod = AutoReg(ar2[:10], 2)\n res = mod.fit()\n res_pred = res.predict()\n mod_pred = mod.predict(res.params)\n assert_allclose(res_pred, mod_pred)\n\n\ndef test_autoreg_no_variables(ar2):\n mod = AutoReg(ar2[:10], None, trend=\"n\")\n res = mod.fit()\n summary = res.summary()\n summ_txt = summary.as_text()\n assert \"AutoReg(0)\" in summ_txt\n assert \"No Model Parameters\" in summ_txt\n\n\ndef test_removal(ar2):\n from statsmodels.tsa.ar_model import AR, ARResults\n\n with pytest.raises(NotImplementedError):\n AR(ar2)\n with pytest.raises(NotImplementedError):\n ARResults(ar2)\n\n\ndef test_autoreg_apply(ols_autoreg_result):\n res, _ = ols_autoreg_result\n y = res.model.endog\n n = y.shape[0] // 2\n y = y[:n]\n x = res.model.exog\n if x is not None:\n x = x[:n]\n res_apply = res.apply(endog=y, exog=x)\n assert \"using a different\" in str(res_apply.summary())\n assert isinstance(res_apply, AutoRegResultsWrapper)\n assert_allclose(res.params, res_apply.params)\n exog_oos = None\n if res.model.exog is not None:\n exog_oos = res.model.exog[-10:]\n fcasts_apply = res_apply.forecast(10, exog=exog_oos)\n assert isinstance(fcasts_apply, np.ndarray)\n assert fcasts_apply.shape == (10,)\n\n res_refit = res.apply(endog=y, exog=x, refit=True)\n assert not np.allclose(res.params, res_refit.params)\n assert not np.allclose(res.llf, res_refit.llf)\n assert res_apply.fittedvalues.shape == res_refit.fittedvalues.shape\n assert not np.allclose(res_apply.llf, res_refit.llf)\n if res.model.exog is None:\n fcasts_refit = res_refit.forecast(10, exog=exog_oos)\n assert isinstance(fcasts_refit, np.ndarray)\n assert fcasts_refit.shape == (10,)\n assert not np.allclose(fcasts_refit, fcasts_apply)\n\n\ndef test_autoreg_apply_exception(reset_randomstate):\n y = np.random.standard_normal(250)\n mod = AutoReg(y, lags=10)\n res = mod.fit()\n with pytest.raises(ValueError, match=\"An exception occured\"):\n res.apply(y[:5])\n\n x = np.random.standard_normal((y.shape[0], 3))\n res = AutoReg(y, lags=1, exog=x).fit()\n with pytest.raises(ValueError, match=\"exog must be provided\"):\n res.apply(y[50:150])\n x = np.random.standard_normal((y.shape[0], 3))\n res = AutoReg(y, lags=1, exog=x).fit()\n with pytest.raises(ValueError, match=\"The number of exog\"):\n res.apply(y[50:150], exog=x[50:150, :2])\n\n res = AutoReg(y, lags=1).fit()\n with pytest.raises(ValueError, match=\"exog must be None\"):\n res.apply(y[50:150], exog=x[50:150])\n\n\[email protected]\ndef append_data():\n rs = np.random.RandomState(0)\n y = rs.standard_normal(250)\n x = rs.standard_normal((250, 3))\n x_oos = rs.standard_normal((10, 3))\n y_oos = rs.standard_normal(10)\n index = pd.date_range(\n \"2020-1-1\", periods=y.shape[0] + y_oos.shape[0], freq=\"M\"\n )\n y = pd.Series(y, index=index[: y.shape[0]], name=\"y\")\n x = pd.DataFrame(\n x,\n index=index[: y.shape[0]],\n columns=[f\"x{i}\" for i in range(x.shape[1])],\n )\n y_oos = pd.Series(y_oos, index=index[y.shape[0] :], name=\"y\")\n x_oos = pd.DataFrame(x_oos, index=index[y.shape[0] :], columns=x.columns)\n y_both = pd.concat([y, y_oos], axis=0)\n x_both = pd.concat([x, x_oos], axis=0)\n\n class AppendData(NamedTuple):\n y: pd.Series\n y_oos: pd.Series\n y_both: pd.Series\n x: pd.Series\n x_oos: pd.DataFrame\n x_both: pd.DataFrame\n\n return AppendData(y, y_oos, y_both, x, x_oos, x_both)\n\n\[email protected](\"trend\", [\"n\", \"ct\"])\[email protected](\"use_pandas\", [True, False])\[email protected](\"lags\", [0, 1, 3])\[email protected](\"seasonal\", [True, False])\ndef test_autoreg_append(append_data, use_pandas, lags, trend, seasonal):\n period = 12 if not use_pandas else None\n y = append_data.y\n y_oos = append_data.y_oos\n y_both = append_data.y_both\n x = append_data.x\n x_oos = append_data.x_oos\n x_both = append_data.x_both\n if not use_pandas:\n y = np.asarray(y)\n x = np.asarray(x)\n y_oos = np.asarray(y_oos)\n x_oos = np.asarray(x_oos)\n y_both = np.asarray(y_both)\n x_both = np.asarray(x_both)\n\n res = AutoReg(\n y, lags=lags, trend=trend, seasonal=seasonal, period=period\n ).fit()\n res_append = res.append(y_oos, refit=True)\n res_direct = AutoReg(\n y_both, lags=lags, trend=trend, seasonal=seasonal, period=period\n ).fit()\n res_exog = AutoReg(\n y, exog=x, lags=lags, trend=trend, seasonal=seasonal, period=period\n ).fit()\n res_exog_append = res_exog.append(y_oos, exog=x_oos, refit=True)\n res_exog_direct = AutoReg(\n y_both,\n exog=x_both,\n lags=lags,\n trend=trend,\n seasonal=seasonal,\n period=period,\n ).fit()\n\n assert_allclose(res_direct.params, res_append.params)\n assert_allclose(res_exog_direct.params, res_exog_append.params)\n if use_pandas:\n with pytest.raises(TypeError, match=\"endog must have the same type\"):\n res.append(np.asarray(y_oos))\n with pytest.raises(TypeError, match=\"exog must have the same type\"):\n res_exog.append(y_oos, np.asarray(x_oos))\n with pytest.raises(ValueError, match=\"Original model does\"):\n res.append(y_oos, exog=x_oos)\n with pytest.raises(ValueError, match=\"Original model has exog\"):\n res_exog.append(y_oos)\n\n\ndef test_autoreg_append_deterministic(append_data):\n y = append_data.y\n y_oos = append_data.y_oos\n y_both = append_data.y_both\n x = append_data.x\n x_oos = append_data.x_oos\n x_both = append_data.x_both\n\n terms = [TimeTrend(constant=True, order=1), Seasonality(12)]\n dp = DeterministicProcess(y.index, additional_terms=terms)\n\n res = AutoReg(y, lags=3, trend=\"n\", deterministic=dp).fit()\n res_append = res.append(y_oos, refit=True)\n res_direct = AutoReg(\n y_both, lags=3, trend=\"n\", deterministic=dp.apply(y_both.index)\n ).fit()\n assert_allclose(res_append.params, res_direct.params)\n\n res_np = AutoReg(np.asarray(y), lags=3, trend=\"n\", deterministic=dp).fit()\n res_append_np = res_np.append(np.asarray(y_oos))\n assert_allclose(res_np.params, res_append_np.params)\n\n res = AutoReg(y, exog=x, lags=3, trend=\"n\", deterministic=dp).fit()\n res_append = res.append(y_oos, exog=x_oos, refit=True)\n res_direct = AutoReg(\n y_both,\n exog=x_both,\n lags=3,\n trend=\"n\",\n deterministic=dp.apply(y_both.index),\n ).fit()\n assert_allclose(res_append.params, res_direct.params)\n"
] | [
[
"pandas.testing.assert_series_equal",
"pandas.Series",
"numpy.sqrt",
"numpy.asarray",
"pandas.DataFrame",
"numpy.random.randn",
"numpy.ones_like",
"numpy.allclose",
"numpy.arange",
"numpy.eye",
"pandas.Index",
"numpy.testing.assert_almost_equal",
"pandas.testing.assert_index_equal",
"numpy.zeros",
"pandas.concat",
"numpy.log",
"pandas.date_range",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.random.RandomState",
"numpy.random.seed",
"pandas.period_range",
"numpy.isfinite",
"numpy.random.standard_normal",
"numpy.ones",
"numpy.random.normal",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
peterdodds-fb/SOFASonix | [
"6c7156ccc91a6e925599dc590c12b3bc99bd1243",
"6c7156ccc91a6e925599dc590c12b3bc99bd1243",
"6c7156ccc91a6e925599dc590c12b3bc99bd1243"
] | [
"Templates/SimpleFreeFieldHRIR_1.0_1.0.py",
"Templates/SimpleFreeFieldTF_1.0_1.0.py",
"Templates/GeneralFIRE_1.0_1.0.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2018, I.Laghidze\n#\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of SOFASonix nor the names of its contributors\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# =============================================================================\n#\n# File: SimpleFreeFieldHRIR_1.0_1.0.py\n# Project: SOFASonix\n# Author: I.Laghidze\n# License: BSD 3\n#\n# =============================================================================\n\nfrom SOFASonix import SOFAFile\nimport numpy as np\n\n\"\"\"\n=============================== Initial Config ================================\n\"\"\"\n\n# Create SOFAFile object with the latest SimpleFreeFieldHRIR convention\nsofa = SOFAFile(\"SimpleFreeFieldHRIR\", sofaConventionsVersion=1.0, version=1.0)\n\n# Set dimensions\nsofa._M = 100\nsofa._N = 1024\n\n# View parameters of convention\nsofa.view()\n\n\n\"\"\"\n=============================== Attributes ====================================\n\"\"\"\n\n# ----- Mandatory attributes -----\nsofa.GLOBAL_AuthorContact = \"\"\nsofa.GLOBAL_License = \"No license provided, ask the author for permission\"\nsofa.GLOBAL_Organization = \"\"\nsofa.GLOBAL_RoomType = \"free field\"\nsofa.GLOBAL_DateCreated = \"2019-06-07 20:32:05\"\nsofa.GLOBAL_DateModified = \"2019-06-07 20:32:05\"\nsofa.GLOBAL_Title = \"\"\nsofa.GLOBAL_DatabaseName = \"\"\nsofa.GLOBAL_ListenerShortName = \"\"\nsofa.ListenerPosition_Type = \"cartesian\"\nsofa.ListenerPosition_Units = \"metre\"\nsofa.ListenerView_Type = \"cartesian\"\nsofa.ListenerView_Units = \"metre\"\nsofa.ReceiverPosition_Type = \"cartesian\"\nsofa.ReceiverPosition_Units = \"metre\"\nsofa.SourcePosition_Type = \"spherical\"\nsofa.SourcePosition_Units = \"degree, degree, metre\"\nsofa.EmitterPosition_Type = \"cartesian\"\nsofa.EmitterPosition_Units = \"metre\"\nsofa.Data_SamplingRate_Units = \"hertz\"\n\n# ----- Non-Mandatory attributes -----\nsofa.GLOBAL_ApplicationName = None\nsofa.GLOBAL_ApplicationVersion = None\nsofa.GLOBAL_Comment = None\nsofa.GLOBAL_History = None\nsofa.GLOBAL_References = None\nsofa.GLOBAL_Origin = None\n\n\n\"\"\"\n=============================== Double Variables ==============================\n\"\"\"\n\n# ----- Mandatory double variables -----\n\n# Needs dimensions IC or MC\nsofa.ListenerPosition = np.zeros(1)\n\n# Needs dimensions IC or MC\nsofa.ListenerUp = np.zeros(1)\n\n# Needs dimensions IC or MC\nsofa.ListenerView = np.zeros(1)\n\n# Needs dimensions rCI or rCM\nsofa.ReceiverPosition = np.zeros(1)\n\n# Needs dimensions IC or MC\nsofa.SourcePosition = np.zeros(1)\n\n# Needs dimensions eCI or eCM\nsofa.EmitterPosition = np.zeros(1)\n\n# Needs dimensions mRn\nsofa.Data_IR = np.zeros(1)\n\n# Needs dimensions I\nsofa.Data_SamplingRate = np.zeros(1)\n\n# Needs dimensions IR or MR\nsofa.Data_Delay = np.zeros(1)\n\n# ----- Non-mandatory double variables -----\n\n\n\"\"\"\n=============================== Export ========================================\n\"\"\"\n\n# Save file upon completion\nsofa.export(\"filename\")\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2018, I.Laghidze\n#\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of SOFASonix nor the names of its contributors\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# =============================================================================\n#\n# File: SimpleFreeFieldTF_1.0_1.0.py\n# Project: SOFASonix\n# Author: I.Laghidze\n# License: BSD 3\n#\n# =============================================================================\n\nfrom SOFASonix import SOFAFile\nimport numpy as np\n\n\"\"\"\n=============================== Initial Config ================================\n\"\"\"\n\n# Create SOFAFile object with the latest SimpleFreeFieldTF convention\nsofa = SOFAFile(\"SimpleFreeFieldTF\", sofaConventionsVersion=1.0, version=1.0)\n\n# Set dimensions\nsofa._M = 100\nsofa._N = 1024\n\n# View parameters of convention\nsofa.view()\n\n\n\"\"\"\n=============================== Attributes ====================================\n\"\"\"\n\n# ----- Mandatory attributes -----\nsofa.GLOBAL_AuthorContact = \"\"\nsofa.GLOBAL_License = \"No license provided, ask the author for permission\"\nsofa.GLOBAL_ListenerShortName = \"\"\nsofa.GLOBAL_Organization = \"\"\nsofa.GLOBAL_RoomType = \"free field\"\nsofa.GLOBAL_DateCreated = \"2019-06-07 20:32:05\"\nsofa.GLOBAL_DateModified = \"2019-06-07 20:32:05\"\nsofa.GLOBAL_Title = \"\"\nsofa.GLOBAL_DatabaseName = \"\"\nsofa.ListenerPosition_Type = \"cartesian\"\nsofa.ListenerPosition_Units = \"metre\"\nsofa.ListenerView_Type = \"cartesian\"\nsofa.ListenerView_Units = \"metre\"\nsofa.ReceiverPosition_Type = \"cartesian\"\nsofa.ReceiverPosition_Units = \"metre\"\nsofa.SourcePosition_Type = \"spherical\"\nsofa.SourcePosition_Units = \"degree, degree, metre\"\nsofa.EmitterPosition_Type = \"cartesian\"\nsofa.EmitterPosition_Units = \"metre\"\n\n# ----- Non-Mandatory attributes -----\nsofa.GLOBAL_ApplicationName = None\nsofa.GLOBAL_ApplicationVersion = None\nsofa.GLOBAL_Comment = None\nsofa.GLOBAL_History = None\nsofa.GLOBAL_References = None\nsofa.GLOBAL_Origin = None\nsofa.N_LongName = None\nsofa.N_Units = None\n\n\n\"\"\"\n=============================== Double Variables ==============================\n\"\"\"\n\n# ----- Mandatory double variables -----\n\n# Needs dimensions N\nsofa.N = np.zeros(1)\n\n# Needs dimensions IC or MC\nsofa.ListenerPosition = np.zeros(1)\n\n# Needs dimensions IC or MC\nsofa.ListenerUp = np.zeros(1)\n\n# Needs dimensions IC or MC\nsofa.ListenerView = np.zeros(1)\n\n# Needs dimensions rCI or rCM\nsofa.ReceiverPosition = np.zeros(1)\n\n# Needs dimensions IC or MC\nsofa.SourcePosition = np.zeros(1)\n\n# Needs dimensions eCI or eCM\nsofa.EmitterPosition = np.zeros(1)\n\n# Needs dimensions mRn\nsofa.Data_Real = np.zeros(1)\n\n# Needs dimensions MRN\nsofa.Data_Imag = np.zeros(1)\n\n# ----- Non-mandatory double variables -----\n\n\n\"\"\"\n=============================== Export ========================================\n\"\"\"\n\n# Save file upon completion\nsofa.export(\"filename\")\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2018, I.Laghidze\n#\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of SOFASonix nor the names of its contributors\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# =============================================================================\n#\n# File: GeneralFIRE_1.0_1.0.py\n# Project: SOFASonix\n# Author: I.Laghidze\n# License: BSD 3\n#\n# =============================================================================\n\nfrom SOFASonix import SOFAFile\nimport numpy as np\n\n\"\"\"\n=============================== Initial Config ================================\n\"\"\"\n\n# Create SOFAFile object with the latest GeneralFIRE convention\nsofa = SOFAFile(\"GeneralFIRE\", sofaConventionsVersion=1.0, version=1.0)\n\n# Set dimensions\nsofa._M = 100\nsofa._N = 1024\nsofa._R = 2\nsofa._E = 4\n\n# View parameters of convention\nsofa.view()\n\n\n\"\"\"\n=============================== Attributes ====================================\n\"\"\"\n\n# ----- Mandatory attributes -----\nsofa.GLOBAL_AuthorContact = \"\"\nsofa.GLOBAL_Comment = \"\"\nsofa.GLOBAL_License = \"No license provided, ask the author for permission\"\nsofa.GLOBAL_Organization = \"\"\nsofa.GLOBAL_RoomType = \"free field\"\nsofa.GLOBAL_DateCreated = \"2019-06-07 20:32:05\"\nsofa.GLOBAL_DateModified = \"2019-06-07 20:32:05\"\nsofa.GLOBAL_Title = \"\"\nsofa.ListenerPosition_Type = \"cartesian\"\nsofa.ListenerPosition_Units = \"metre\"\nsofa.ReceiverPosition_Type = \"cartesian\"\nsofa.ReceiverPosition_Units = \"metre\"\nsofa.SourcePosition_Type = \"spherical\"\nsofa.SourcePosition_Units = \"degree, degree, metre\"\nsofa.EmitterPosition_Type = \"cartesian\"\nsofa.EmitterPosition_Units = \"metre\"\nsofa.Data_SamplingRate_Units = \"hertz\"\n\n# ----- Non-Mandatory attributes -----\nsofa.GLOBAL_ApplicationName = None\nsofa.GLOBAL_ApplicationVersion = None\nsofa.GLOBAL_History = None\nsofa.GLOBAL_References = None\nsofa.GLOBAL_Origin = None\n\n\n\"\"\"\n=============================== Double Variables ==============================\n\"\"\"\n\n# ----- Mandatory double variables -----\n\n# Needs dimensions IC or MC\nsofa.ListenerPosition = np.zeros(1)\n\n# Needs dimensions rCI or rCM\nsofa.ReceiverPosition = np.zeros(1)\n\n# Needs dimensions IC or MC\nsofa.SourcePosition = np.zeros(1)\n\n# Needs dimensions eCI or eCM\nsofa.EmitterPosition = np.zeros(1)\n\n# Needs dimensions mREn\nsofa.Data_IR = np.zeros(1)\n\n# Needs dimensions I\nsofa.Data_SamplingRate = np.zeros(1)\n\n# Needs dimensions IRE or MRE\nsofa.Data_Delay = np.zeros(1)\n\n# ----- Non-mandatory double variables -----\n\n\n\"\"\"\n=============================== Export ========================================\n\"\"\"\n\n# Save file upon completion\nsofa.export(\"filename\")\n"
] | [
[
"numpy.zeros"
],
[
"numpy.zeros"
],
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yycho0108/ai604-video-object-pose | [
"7067f36281038272b0e39166d8f9718076bb6e75",
"7067f36281038272b0e39166d8f9718076bb6e75"
] | [
"scripts/keypoint_regression.py",
"src/top/model/loss.py"
] | [
"#!/usr/bin/env python3\n#PYTHON_ARGCOMPLETE_OK\n\nimport enum\nimport logging\nfrom dataclasses import dataclass, replace\nfrom simple_parsing import Serializable\nfrom typing import Dict, Any\nfrom tqdm.auto import tqdm\n\nimport torch as th\nfrom torchvision.transforms import Compose\nfrom torch.utils.tensorboard import SummaryWriter\nimport torch.autograd.profiler as profiler\n\nfrom top.train.saver import Saver\nfrom top.train.trainer import Trainer\nfrom top.train.event.hub import Hub\nfrom top.train.event.topics import Topic\nfrom top.train.event.helpers import (Collect, Periodic, Evaluator)\n\nfrom top.model.keypoint import KeypointNetwork2D\nfrom top.model.loss import (\n ObjectHeatmapLoss, KeypointDisplacementLoss,\n KeypointScaleLoss)\n\nfrom top.data.transforms.augment import PhotometricAugment\nfrom top.data.transforms import (\n DenseMapsMobilePose,\n Normalize,\n InstancePadding,\n DrawKeypointMap,\n PhotometricAugment\n)\nfrom top.data.schema import Schema\nfrom top.data.load import (DatasetSettings, get_loaders)\n\nfrom top.run.app_util import update_settings\nfrom top.run.path_util import RunPath\nfrom top.run.torch_util import resolve_device\n\n\n@dataclass\nclass AppSettings(Serializable):\n model: KeypointNetwork2D.Settings = KeypointNetwork2D.Settings()\n\n # Dataset selection options.\n dataset: DatasetSettings = DatasetSettings()\n\n # NOTE(ycho): root run path is set to tmp dir y default.\n path: RunPath.Settings = RunPath.Settings(root='/tmp/ai604-kpt')\n train: Trainer.Settings = Trainer.Settings()\n batch_size: int = 8\n device: str = ''\n\n # Logging interval / every N train steps\n log_period: int = int(100)\n\n # Checkpointing interval / every N train steps\n save_period: int = int(1e3)\n\n # Evaluation interval / every N train steps\n eval_period: int = int(1e3)\n\n # Auxiliary transform settings ...\n padding: InstancePadding.Settings = InstancePadding.Settings()\n maps: DenseMapsMobilePose.Settings = DenseMapsMobilePose.Settings()\n photo_aug: PhotometricAugment.Settings = PhotometricAugment.Settings()\n\n profile: bool = False\n load_ckpt: str = ''\n\n\nclass TrainLogger:\n \"\"\"\n Logging during training - specifically, tqdm-based logging to the shell and tensorboard.\n \"\"\"\n\n def __init__(self, hub: Hub, writer: th.utils.tensorboard.SummaryWriter,\n period: int):\n self.step = None\n self.hub = hub\n self.writer = writer\n self.tqdm = tqdm()\n self.period = period\n self._subscribe()\n\n self.draw_kpt_map = DrawKeypointMap(\n DrawKeypointMap.Settings(\n as_displacement=False))\n\n def _on_losses(self, losses: Dict[str, th.Tensor]):\n \"\"\"Log individual training losses.\"\"\"\n for k, v in losses.items():\n name = k\n loss = v.detach().cpu()\n self.writer.add_scalar(name, loss,\n global_step=self.step)\n\n def _on_loss(self, loss):\n \"\"\"log training loss.\"\"\"\n loss = loss.detach().cpu()\n\n # Update tensorboard ...\n self.writer.add_scalar('train_loss', loss,\n global_step=self.step)\n\n # Update tqdm logger bar.\n self.tqdm.set_postfix(loss=loss)\n self.tqdm.update(self.period)\n\n def _on_train_out(self, inputs, outputs):\n \"\"\"log training outputs.\"\"\"\n\n # Fetch inputs ...\n with th.no_grad():\n input_image = inputs[Schema.IMAGE].detach()\n out_heatmap = outputs[Schema.HEATMAP].detach()\n target_heatmap = inputs[Schema.HEATMAP].detach()\n # NOTE(ycho): Only show for first image\n # feels a bit wasteful? consider better alternatives...\n out_kpt_map = self.draw_kpt_map(\n outputs[Schema.KEYPOINT_HEATMAP][0]).detach()\n target_kpt_map = self.draw_kpt_map(\n inputs[Schema.KEYPOINT_HEATMAP][0]).detach()\n\n # NOTE(ycho): denormalize input image.\n image = th.clip(0.5 + (input_image[0] * 0.25), 0.0, 1.0)\n\n self.writer.add_image(\n 'train_images',\n image.cpu(),\n global_step=self.step)\n\n for i_cls in range(out_heatmap.shape[1]):\n self.writer.add_image(F'out_heatmap/{i_cls}',\n out_heatmap[0, i_cls, None].cpu(),\n global_step=self.step)\n self.writer.add_image(F'target_heatmap/{i_cls}',\n target_heatmap[0, i_cls, None].cpu(),\n global_step=self.step)\n self.writer.add_image('out_kpt_map',\n out_kpt_map.cpu(),\n global_step=self.step)\n self.writer.add_image('target_kpt_map',\n target_kpt_map.cpu(),\n global_step=self.step)\n\n def _on_step(self, step):\n \"\"\"save current step.\"\"\"\n self.step = step\n\n def _subscribe(self):\n self.hub.subscribe(Topic.STEP, self._on_step)\n # NOTE(ycho): Log loss only periodically.\n self.hub.subscribe(Topic.TRAIN_LOSS,\n Periodic(self.period, self._on_loss))\n self.hub.subscribe(Topic.TRAIN_LOSSES,\n Periodic(self.period, self._on_losses))\n self.hub.subscribe(Topic.TRAIN_OUT,\n Periodic(self.period, self._on_train_out))\n\n def __del__(self):\n self.tqdm.close()\n\n\nclass ModelAsTuple(th.nn.Module):\n \"\"\"Workaround to avoid tracing bugs in add_graph from rejecting outputs of\n form Dict[Schema,Any].\"\"\"\n\n def __init__(self, model: th.nn.Module):\n super().__init__()\n self.model = model\n\n def forward(self, inputs):\n return tuple(v for (k, v) in self.model(inputs).items())\n\n\ndef main():\n logging.basicConfig(level=logging.WARN)\n opts = AppSettings()\n opts = update_settings(opts)\n path = RunPath(opts.path)\n\n device = resolve_device(opts.device)\n model = KeypointNetwork2D(opts.model).to(device)\n # FIXME(ycho): Hardcoded lr == 1e-3\n optimizer = th.optim.Adam(model.parameters(), lr=1e-3)\n writer = th.utils.tensorboard.SummaryWriter(path.log)\n\n # NOTE(ycho): Force data loading on the CPU.\n data_device = th.device('cpu')\n\n # TODO(ycho): Consider scripted compositions?\n # If a series of transforms can be fused and compiled,\n # it would probably make it a lot faster to train...\n transform = Compose([\n DenseMapsMobilePose(opts.maps, data_device),\n PhotometricAugment(opts.photo_aug, False),\n Normalize(Normalize.Settings()),\n InstancePadding(opts.padding)\n ])\n\n train_loader, test_loader = get_loaders(opts.dataset,\n device=data_device,\n batch_size=opts.batch_size,\n transform=transform)\n\n # NOTE(ycho): Synchronous event hub.\n hub = Hub()\n\n def _on_train_begin():\n\n # Save meta-parameters.\n opts.save(path.dir / 'opts.yaml')\n # NOTE(ycho): Currently `load` only works with a modified version of the\n # main SimpleParsing repository.\n # opts.load(path.dir / 'opts.yaml')\n\n # Generate tensorboard graph.\n data = next(iter(test_loader))\n dummy = data[Schema.IMAGE].to(device).detach()\n # NOTE(ycho): No need to set model to `eval`,\n # eval mode is set internally within add_graph().\n writer.add_graph(ModelAsTuple(model), dummy)\n\n hub.subscribe(\n Topic.TRAIN_BEGIN, _on_train_begin)\n\n # Periodically log training statistics.\n # FIXME(ycho): hardcoded logging period.\n # NOTE(ycho): Currently only plots `loss`.\n collect = Collect(hub, Topic.METRICS, [])\n train_logger = TrainLogger(hub, writer, opts.log_period)\n\n # Periodically save model, per epoch.\n # TODO(ycho): Consider folding this callback inside Trainer().\n hub.subscribe(\n Topic.EPOCH,\n lambda epoch: Saver(\n model,\n optimizer).save(\n path.ckpt /\n F'epoch-{epoch}.zip'))\n\n # Periodically save model, per N training steps.\n # TODO(ycho): Consider folding this callback inside Trainer()\n # and adding {save_period} args to Trainer instead.\n hub.subscribe(\n Topic.STEP,\n Periodic(opts.save_period, lambda step: Saver(\n model,\n optimizer).save(\n path.ckpt /\n F'step-{step}.zip')))\n\n # Periodically evaluate model, per N training steps.\n # NOTE(ycho): Load and process test data ...\n # TODO(ycho): Consider folding this callback inside Trainer()\n # and adding {test_loader, eval_fn} args to Trainer instead.\n def _eval_fn(model, data):\n # TODO(ycho): Actually implement evaluation function.\n # return model(data[Schema.IMAGE].to(device))\n return None\n evaluator = Evaluator(\n Evaluator.Settings(period=opts.eval_period),\n hub, model, test_loader, _eval_fn)\n\n # TODO(ycho):\n # All metrics evaluation should reset stats at eval_begin(),\n # aggregate stats at eval_step(),\n # and output stats at eval_end(). These signals are all implemented.\n # What are the appropriate metrics to implement for keypoint regression?\n # - keypoint matching F1 score(?)\n # - loss_fn() but for the evaluation datasets\n def _on_eval_step(inputs, outputs):\n pass\n hub.subscribe(Topic.EVAL_STEP, _on_eval_step)\n\n collect = Collect(hub, Topic.METRICS, [])\n\n def _log_all(metrics: Dict[Topic, Any]):\n pass\n hub.subscribe(Topic.METRICS, _log_all)\n\n # TODO(ycho): weight the losses with some constant ??\n losses = {\n Schema.HEATMAP: ObjectHeatmapLoss(key=Schema.HEATMAP),\n # Schema.DISPLACEMENT_MAP: KeypointDisplacementLoss(),\n Schema.KEYPOINT_HEATMAP: ObjectHeatmapLoss(\n key=Schema.KEYPOINT_HEATMAP),\n Schema.SCALE: KeypointScaleLoss()\n }\n\n def _loss_fn(model: th.nn.Module, data):\n # Now that we're here, convert all inputs to the device.\n data = {k: (v.to(device) if isinstance(v, th.Tensor) else v)\n for (k, v) in data.items()}\n image = data[Schema.IMAGE]\n outputs = model(image)\n # Also make input/output pair from training\n # iterations available to the event bus.\n hub.publish(Topic.TRAIN_OUT,\n inputs=data,\n outputs=outputs)\n kpt_heatmap_loss = losses[Schema.KEYPOINT_HEATMAP](outputs, data)\n heatmap_loss = losses[Schema.HEATMAP](outputs, data)\n scale_loss = losses[Schema.SCALE](outputs, data)\n # Independently log stuff\n hub.publish(Topic.TRAIN_LOSSES, {\n 'keypoint': kpt_heatmap_loss,\n 'center': heatmap_loss,\n 'scale': scale_loss})\n return (kpt_heatmap_loss + heatmap_loss + scale_loss)\n\n ## Load from checkpoint\n if opts.load_ckpt:\n logging.info(F'Loading checkpoint {opts.load_ckpt} ...')\n Saver(model, optimizer).load(opts.load_ckpt)\n\n ## Trainer\n trainer = Trainer(\n opts.train,\n model,\n optimizer,\n _loss_fn,\n hub,\n train_loader)\n\n # Train, optionally profile\n if opts.profile:\n try:\n with profiler.profile(record_shapes=True, use_cuda=True) as prof:\n trainer.train()\n finally:\n print(\n prof.key_averages().table(\n sort_by='cpu_time_total',\n row_limit=16))\n prof.export_chrome_trace(\"/tmp/trace.json\")\n else:\n trainer.train()\n\n\nif __name__ == '__main__':\n main()\n",
"#!/usr/bin/env python3\n\nimport torch as th\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom typing import Dict\n\nfrom top.data.schema import Schema\n\nfrom top.model.loss_util import FocalLoss\n\n\nclass ObjectHeatmapLoss(nn.Module):\n def __init__(self, key: Schema = Schema.HEATMAP):\n super().__init__()\n self.focal_loss = FocalLoss()\n self.key = key\n\n def forward(\n self, output: Dict[str, th.Tensor],\n targets: Dict[str, th.Tensor]) -> float:\n\n # Extract relevant tensors from arguments.\n pred = output[self.key]\n target = targets[self.key]\n\n # FIXME(ycho): Hardcoded batch_size inference\n batch_size = target.shape[0]\n\n # NOTE(ycho): deprecated for now ...\n if False:\n diff = pred - target\n mask = th.ones_like(diff, dtype=th.bool)\n # Ignore padded labels after `num_instance`.\n #inums = target[Schema.INSTANCE_NUM]\n #for batch_i in range(batch_size):\n # num_instance = inums[batch_i]\n # mask[batch_i, num_instance:] = False\n\n diff[~mask] = 0.0\n numer = th.sum(th.square(diff))\n denom = th.sum(mask)\n return numer / denom\n\n out = self.focal_loss(pred, target)\n return out\n\n\nclass KeypointDisplacementLoss(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, output: Dict[str, th.Tensor],\n target: Dict[str, th.Tensor]) -> float:\n pred = output[Schema.DISPLACEMENT_MAP]\n\n mask = th.isfinite(target[Schema.DISPLACEMENT_MAP])\n\n diff = pred - target[Schema.DISPLACEMENT_MAP]\n # NOTE(ycho): during inference, this mask is approximated\n # by the heatmaps.\n # NOTE(ycho): We MUST this use form since inf * 0 = NaN.\n diff[~mask] = 0.0\n # NOTE(ycho): Using abs here, which results in L1 loss.\n numer = th.sum(th.abs(diff))\n denom = th.sum(mask)\n return numer / denom\n\n\nclass KeypointHeatmapLoss(nn.Module):\n def __init__(self):\n return NotImplemented\n\n def forward(\n self, output: Dict[str, th.Tensor],\n target: Dict[str, th.Tensor]) -> float:\n return NotImplemented\n\n\nclass KeypointCrossEntropyLoss(nn.Module):\n \"\"\"Given a keypoint heatmap of logits, compute the loss against integer-\n valued target map of keypoints.\n\n TODO(ycho): Perhaps not the best idea, especially if the number of keypoints are very sparse.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n # self.loss = nn.MSELoss()\n self.loss = nn.CrossEntropyLoss()\n\n def forward(self, output: th.Tensor, target: th.Tensor) -> float:\n return self.loss(output, target)\n\n\nclass KeypointScaleLoss(nn.Module):\n def __init__(self):\n super().__init__()\n self.loss = nn.L1Loss()\n\n def forward(self, output: Dict[str, th.Tensor],\n target: Dict[str, th.Tensor]) -> float:\n\n # We extract the center index from the input.\n # TODO(ycho): Consider adding a data-processing `transform` instead.\n # H, W = inputs[Schema.IMAGE].shape[-2:]\n h, w = output[Schema.SCALE_MAP].shape[-2:]\n\n # FIXME(ycho): `visibility` mask should ultimately account for\n # out-of-range behavior ... (fingers crossed)\n visibility = target[Schema.VISIBILITY].to(dtype=th.bool)[..., 0]\n keypoints_2d_uv = target[Schema.KEYPOINT_2D]\n center_uv = keypoints_2d_uv[..., 0, :2]\n scale_xy = th.as_tensor(\n [w, h], dtype=th.int32, device=center_uv.device)\n center_xy = th.round(center_uv * scale_xy).to(dtype=th.int64)\n # NOTE(ycho): Explicitly writing out (i,j) since the `Objectron`\n # keypoint order is # unconventional.\n j = center_xy[..., 0] # (B, O)\n i = center_xy[..., 1] # (B, O)\n flat_index = (i * w + j)\n\n in_bound = th.all(th.logical_and(center_xy >= 0,\n center_xy < scale_xy), dim=-1)\n visibility = th.logical_and(visibility, in_bound)\n\n # NOTE(ycho): Overwrite invalid(invisible) index with 0\n # in order to prevent errors during gather().\n # Here, we explicitly check for not only the dataset visibility,\n # but also the validity of the resulting indexes within image bounds as\n # well.\n flat_index[~visibility] = 0\n\n shape = output[Schema.SCALE_MAP].shape\n\n X = output[Schema.SCALE_MAP].reshape(shape[:-2] + (-1,))\n I = flat_index[:, None]\n I = I.expand(*((-1, shape[1]) + tuple(flat_index.shape[1:])))\n V = visibility\n\n # NOTE(ycho): permute required for (B,3,O) -> (B,O,3)\n scale_output = X.gather(-1, I).permute(0, 2, 1)\n scale_target = target[Schema.SCALE]\n\n return self.loss(scale_output[V], scale_target[V])\n"
] | [
[
"torch.clip",
"torch.no_grad",
"torch.utils.tensorboard.SummaryWriter",
"torch.device",
"torch.autograd.profiler.profile"
],
[
"torch.abs",
"torch.nn.CrossEntropyLoss",
"torch.ones_like",
"torch.round",
"torch.sum",
"torch.isfinite",
"torch.square",
"torch.logical_and",
"torch.nn.L1Loss",
"torch.as_tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hyzhak/zalando-research-fashionmnist-analyze | [
"5dfff74f80982769c7ffae746abc58fc7113113b"
] | [
"src/models/baseline_logistic_regression.py"
] | [
"import luigi\nimport mlflow\nimport mlflow.sklearn\nimport pickle\nfrom sklearn.linear_model import LogisticRegression\nimport time\n\nfrom src.data.external_train_set import ExternalTrainSet\nfrom src.utils.params_to_filename import encode_task_to_filename\nfrom src.utils.extract_x_y import extract_x_and_y\n\n\nclass TrainBaselineLogisticRegression(luigi.Task):\n model_name = 'logistic_regression'\n\n solver = luigi.Parameter(\n default='lbfgs',\n description='Algorithm to use in the optimization problem'\n )\n # for small datasets 'liblinear'\n # 'sag' and 'saga' for large\n # 'newton-cg'\n\n multi_class = luigi.Parameter(default='multinomial')\n C = luigi.FloatParameter(\n default=1.0,\n description='Inverse of regularization strength; '\n 'must be a positive float. '\n 'Like in support vector machines, smaller values specify stronger regularization.'\n )\n random_seed = luigi.IntParameter(default=12345)\n n_jobs = luigi.IntParameter(\n default=-1,\n significant=False\n )\n max_iter = luigi.IntParameter(default=100)\n\n # model_file = luigi.Parameter(default='model.pkl')\n\n def output(self):\n filename = encode_task_to_filename(self)\n return luigi.LocalTarget(\n f'models/baseline/{self.model_name}/{filename}.pkl',\n format=luigi.format.Nop\n )\n\n def requires(self):\n return ExternalTrainSet()\n\n def run(self):\n X_train, y_train = extract_x_and_y(self.input())\n\n start = time.time()\n clf = LogisticRegression(solver=self.solver,\n multi_class=self.multi_class,\n random_state=self.random_seed,\n n_jobs=self.n_jobs,\n max_iter=self.max_iter)\n clf.fit(X_train, y_train)\n training_time = time.time() - start\n\n mlflow.sklearn.log_model(clf, 'model')\n with self.output().open('w') as f:\n pickle.dump(clf, f)\n mlflow.log_param('model_name', self.model_name)\n mlflow.log_param('solver', self.solver)\n mlflow.log_param('multi_class', self.multi_class)\n mlflow.log_param('random_seed', self.random_seed)\n mlflow.log_param('max_iter', self.max_iter)\n mlflow.log_metric('training_time', training_time)\n\n\nif __name__ == '__main__':\n with mlflow.start_run():\n luigi.run()\n"
] | [
[
"sklearn.linear_model.LogisticRegression"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
h0ke/jesse | [
"02dbf2b5df3a970eed18b276d5e3bcf8fb3f9220",
"02dbf2b5df3a970eed18b276d5e3bcf8fb3f9220"
] | [
"jesse/indicators/beta.py",
"jesse/indicators/willr.py"
] | [
"from typing import Union\n\nimport numpy as np\nimport talib\n\n\ndef beta(candles: np.ndarray, period=5, sequential=False) -> Union[float, np.ndarray]:\n \"\"\"\n BETA - Beta\n\n :param candles: np.ndarray\n :param period: int - default: 5\n :param sequential: bool - default=False\n\n :return: float | np.ndarray\n \"\"\"\n if not sequential and len(candles) > 240:\n candles = candles[-240:]\n\n res = talib.BETA(candles[:, 3], candles[:, 4], timeperiod=period)\n\n if sequential:\n return res\n else:\n return None if np.isnan(res[-1]) else res[-1]\n",
"from typing import Union\n\nimport numpy as np\nimport talib\n\n\ndef willr(candles: np.ndarray, period=14, sequential=False) -> Union[float, np.ndarray]:\n \"\"\"\n WILLR - Williams' %R\n\n :param candles: np.ndarray\n :param period: int - default=14\n :param sequential: bool - default=False\n\n :return: float | np.ndarray\n \"\"\"\n if not sequential and len(candles) > 240:\n candles = candles[-240:]\n\n res = talib.WILLR(candles[:, 3], candles[:, 4], candles[:, 2], timeperiod=period)\n\n if sequential:\n return res\n else:\n return None if np.isnan(res[-1]) else res[-1]\n"
] | [
[
"numpy.isnan"
],
[
"numpy.isnan"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jojochuang/determined | [
"22a7cd4b497767d7420b26ead769ba7e61d7f90a"
] | [
"examples/official/native/native_mnist_estimator/native_impl.py"
] | [
"\"\"\"\nThis example demonstrates training a simple DNN with tf.estimator using the Determined\nNative API.\n\"\"\"\nimport argparse\nimport json\nimport logging\nimport os\nimport pathlib\nimport tarfile\nfrom typing import Callable, Dict, List, Tuple\n\nimport requests\nimport tensorflow as tf\n\nimport determined as det\nfrom determined import experimental\nfrom determined.experimental import estimator\nfrom determined.estimator import EstimatorNativeContext\n\nWORK_DIRECTORY = \"/tmp/determined-mnist-estimator-work-dir\"\nMNIST_TF_RECORDS_FILE = \"mnist-tfrecord.tar.gz\"\nMNIST_TF_RECORDS_URL = (\n \"https://s3-us-west-2.amazonaws.com/determined-ai-test-data/\" + MNIST_TF_RECORDS_FILE\n)\n\n\nIMAGE_SIZE = 28\nNUM_CLASSES = 10\n\n\ndef download_mnist_tfrecords(download_directory) -> str:\n \"\"\"\n Return the path of a directory with the MNIST dataset in TFRecord format.\n The dataset will be downloaded into WORK_DIRECTORY, if it is not already\n present.\n \"\"\"\n if not tf.io.gfile.exists(download_directory):\n tf.io.gfile.makedirs(download_directory)\n\n filepath = os.path.join(download_directory, MNIST_TF_RECORDS_FILE)\n if not tf.io.gfile.exists(filepath):\n logging.info(\"Downloading {}\".format(MNIST_TF_RECORDS_URL))\n\n r = requests.get(MNIST_TF_RECORDS_URL)\n with tf.io.gfile.GFile(filepath, \"wb\") as f:\n f.write(r.content)\n logging.info(\"Downloaded {} ({} bytes)\".format(MNIST_TF_RECORDS_FILE, f.size()))\n\n logging.info(\"Extracting {} to {}\".format(MNIST_TF_RECORDS_FILE, download_directory))\n with tarfile.open(filepath, mode=\"r:gz\") as f:\n f.extractall(path=download_directory)\n\n data_dir = os.path.join(download_directory, \"mnist-tfrecord\")\n assert tf.io.gfile.exists(data_dir)\n return data_dir\n\n\ndef parse_mnist_tfrecord(serialized_example: tf.Tensor) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:\n \"\"\"\n Parse a TFRecord representing a single MNIST data point into an input\n feature tensor and a label tensor.\n\n Returns: (features: Dict[str, Tensor], label: Tensor)\n \"\"\"\n raw = tf.io.parse_example(\n serialized=serialized_example, features={\"image_raw\": tf.io.FixedLenFeature([], tf.string)}\n )\n image = tf.io.decode_raw(raw[\"image_raw\"], tf.float32)\n\n label_dict = tf.io.parse_example(\n serialized=serialized_example, features={\"label\": tf.io.FixedLenFeature(1, tf.int64)}\n )\n return {\"image\": image}, label_dict[\"label\"]\n\n\ndef build_estimator(context: EstimatorNativeContext) -> tf.estimator.Estimator:\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=context.get_hparam(\"learning_rate\"))\n # Call `wrap_optimizer` immediately after creating your optimizer.\n optimizer = context.wrap_optimizer(optimizer)\n return tf.compat.v1.estimator.DNNClassifier(\n feature_columns=[\n tf.feature_column.numeric_column(\n \"image\", shape=(IMAGE_SIZE, IMAGE_SIZE, 1), dtype=tf.float32\n )\n ],\n n_classes=NUM_CLASSES,\n hidden_units=[\n context.get_hparam(\"hidden_layer_1\"),\n context.get_hparam(\"hidden_layer_2\"),\n context.get_hparam(\"hidden_layer_3\"),\n ],\n optimizer=optimizer,\n dropout=context.get_hparam(\"dropout\"),\n )\n\n\ndef input_fn(\n context: EstimatorNativeContext, files: List[str], shuffle_and_repeat: bool = False\n) -> Callable:\n def _fn() -> tf.data.TFRecordDataset:\n dataset = tf.data.TFRecordDataset(files)\n # Call `wrap_dataset` immediately after creating your dataset.\n dataset = context.wrap_dataset(dataset)\n if shuffle_and_repeat:\n dataset = dataset.apply(tf.data.experimental.shuffle_and_repeat(1000))\n dataset = dataset.batch(context.get_per_slot_batch_size())\n dataset = dataset.map(parse_mnist_tfrecord)\n return dataset\n\n return _fn\n\n\ndef _get_filenames(directory: str) -> List[str]:\n return [os.path.join(directory, path) for path in tf.io.gfile.listdir(directory)]\n\n\ndef build_train_spec(\n context: EstimatorNativeContext, download_data_dir: str\n) -> tf.estimator.TrainSpec:\n train_files = _get_filenames(os.path.join(download_data_dir, \"train\"))\n return tf.estimator.TrainSpec(input_fn(context, train_files, shuffle_and_repeat=True))\n\n\ndef build_validation_spec(\n context: EstimatorNativeContext, download_data_dir: str\n) -> tf.estimator.EvalSpec:\n val_files = _get_filenames(os.path.join(download_data_dir, \"validation\"))\n return tf.estimator.EvalSpec(input_fn(context, val_files, shuffle_and_repeat=False))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--config\",\n dest=\"config\",\n help=\"Specifies Determined Experiment configuration.\",\n default=\"{}\",\n )\n parser.add_argument(\"--local\", action=\"store_true\", help=\"Specifies local mode\")\n parser.add_argument(\"--test\", action=\"store_true\", help=\"Specifies test mode\")\n args = parser.parse_args()\n\n config = {\n \"hyperparameters\": {\n \"learning_rate\": det.Log(-4.0, -2.0, 10),\n \"global_batch_size\": det.Constant(64),\n \"hidden_layer_1\": det.Constant(250),\n \"hidden_layer_2\": det.Constant(250),\n \"hidden_layer_3\": det.Constant(250),\n \"dropout\": det.Double(0.0, 0.5),\n },\n \"searcher\": {\n \"name\": \"single\",\n \"metric\": \"accuracy\",\n \"max_length\": {\n \"batches\": 1000,\n },\n \"smaller_is_better\": False,\n },\n }\n config.update(json.loads(args.config))\n\n context = estimator.init(\n config, local=args.local, test=args.test, context_dir=str(pathlib.Path.cwd())\n )\n\n # Create a unique download directory for each rank so they don't overwrite each other.\n download_directory = f\"/tmp/data-rank{context.distributed.get_rank()}\"\n data_dir = download_mnist_tfrecords(download_directory)\n\n context.train_and_evaluate(\n build_estimator(context),\n build_train_spec(context, data_dir),\n build_validation_spec(context, data_dir),\n )\n"
] | [
[
"tensorflow.io.gfile.exists",
"tensorflow.data.TFRecordDataset",
"tensorflow.io.gfile.GFile",
"tensorflow.io.decode_raw",
"tensorflow.io.gfile.makedirs",
"tensorflow.io.FixedLenFeature",
"tensorflow.feature_column.numeric_column",
"tensorflow.io.gfile.listdir",
"tensorflow.data.experimental.shuffle_and_repeat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hellomirro/dgl | [
"ff64bd0de83f865a8076cf50f69525549c32ca87",
"ff64bd0de83f865a8076cf50f69525549c32ca87"
] | [
"python/dgl/transform.py",
"python/dgl/nn/pytorch/conv/agnnconv.py"
] | [
"\"\"\"Module for graph transformation utilities.\"\"\"\n\nfrom collections.abc import Iterable, Mapping\nfrom collections import defaultdict\nimport numpy as np\nfrom scipy import sparse\n\nfrom ._ffi.function import _init_api\nfrom .base import dgl_warning, DGLError\nfrom . import convert\nfrom .heterograph import DGLHeteroGraph, DGLBlock\nfrom . import ndarray as nd\nfrom . import backend as F\nfrom . import utils, batch\nfrom .partition import metis_partition_assignment\nfrom .partition import partition_graph_with_halo\nfrom .partition import metis_partition\n\n# TO BE DEPRECATED\nfrom ._deprecate.graph import DGLGraph as DGLGraphStale\n\n__all__ = [\n 'line_graph',\n 'khop_adj',\n 'khop_graph',\n 'reverse',\n 'to_bidirected',\n 'to_bidirected_stale',\n 'add_reverse_edges',\n 'laplacian_lambda_max',\n 'knn_graph',\n 'segmented_knn_graph',\n 'add_edges',\n 'add_nodes',\n 'remove_edges',\n 'remove_nodes',\n 'add_self_loop',\n 'remove_self_loop',\n 'metapath_reachable_graph',\n 'compact_graphs',\n 'to_block',\n 'to_simple',\n 'to_simple_graph',\n 'as_immutable_graph',\n 'metis_partition_assignment',\n 'partition_graph_with_halo',\n 'metis_partition',\n 'as_heterograph']\n\n\ndef pairwise_squared_distance(x):\n \"\"\"\n x : (n_samples, n_points, dims)\n return : (n_samples, n_points, n_points)\n \"\"\"\n x2s = F.sum(x * x, -1, True)\n # assuming that __matmul__ is always implemented (true for PyTorch, MXNet and Chainer)\n return x2s + F.swapaxes(x2s, -1, -2) - 2 * x @ F.swapaxes(x, -1, -2)\n\n#pylint: disable=invalid-name\ndef knn_graph(x, k):\n \"\"\"Construct a graph from a set of points according to k-nearest-neighbor (KNN)\n and return.\n\n The function transforms the coordinates/features of a point set\n into a directed homogeneous graph. The coordinates of the point\n set is specified as a matrix whose rows correspond to points and\n columns correspond to coordinate/feature dimensions.\n\n The nodes of the returned graph correspond to the points, where the predecessors\n of each point are its k-nearest neighbors measured by the Euclidean distance.\n\n If :attr:`x` is a 3D tensor, then each submatrix will be transformed\n into a separate graph. DGL then composes the graphs into a large\n graph of multiple connected components.\n\n Parameters\n ----------\n x : Tensor\n The point coordinates. It can be either on CPU or GPU.\n\n * If is 2D, ``x[i]`` corresponds to the i-th node in the KNN graph.\n\n * If is 3D, ``x[i]`` corresponds to the i-th KNN graph and\n ``x[i][j]`` corresponds to the j-th node in the i-th KNN graph.\n k : int\n The number of nearest neighbors per node.\n\n Returns\n -------\n DGLGraph\n The constructred graph. The node IDs are in the same order as :attr:`x`.\n\n The returned graph is on CPU, regardless of the context of input :attr:`x`.\n\n Examples\n --------\n\n The following examples use PyTorch backend.\n\n >>> import dgl\n >>> import torch\n\n When :attr:`x` is a 2D tensor, a single KNN graph is constructed.\n\n >>> x = torch.tensor([[0.0, 0.0, 1.0],\n ... [1.0, 0.5, 0.5],\n ... [0.5, 0.2, 0.2],\n ... [0.3, 0.2, 0.4]])\n >>> knn_g = dgl.knn_graph(x, 2) # Each node has two predecessors\n >>> knn_g.edges()\n >>> (tensor([0, 1, 2, 2, 2, 3, 3, 3]), tensor([0, 1, 1, 2, 3, 0, 2, 3]))\n\n When :attr:`x` is a 3D tensor, DGL constructs multiple KNN graphs and\n and then composes them into a graph of multiple connected components.\n\n >>> x1 = torch.tensor([[0.0, 0.0, 1.0],\n ... [1.0, 0.5, 0.5],\n ... [0.5, 0.2, 0.2],\n ... [0.3, 0.2, 0.4]])\n >>> x2 = torch.tensor([[0.0, 1.0, 1.0],\n ... [0.3, 0.3, 0.3],\n ... [0.4, 0.4, 1.0],\n ... [0.3, 0.8, 0.2]])\n >>> x = torch.stack([x1, x2], dim=0)\n >>> knn_g = dgl.knn_graph(x, 2) # Each node has two predecessors\n >>> knn_g.edges()\n (tensor([0, 1, 2, 2, 2, 3, 3, 3, 4, 5, 5, 5, 6, 6, 7, 7]),\n tensor([0, 1, 1, 2, 3, 0, 2, 3, 4, 5, 6, 7, 4, 6, 5, 7]))\n \"\"\"\n if F.ndim(x) == 2:\n x = F.unsqueeze(x, 0)\n n_samples, n_points, _ = F.shape(x)\n\n dist = pairwise_squared_distance(x)\n k_indices = F.argtopk(dist, k, 2, descending=False)\n dst = F.copy_to(k_indices, F.cpu())\n\n src = F.zeros_like(dst) + F.reshape(F.arange(0, n_points), (1, -1, 1))\n\n per_sample_offset = F.reshape(F.arange(0, n_samples) * n_points, (-1, 1, 1))\n dst += per_sample_offset\n src += per_sample_offset\n dst = F.reshape(dst, (-1,))\n src = F.reshape(src, (-1,))\n adj = sparse.csr_matrix(\n (F.asnumpy(F.zeros_like(dst) + 1), (F.asnumpy(dst), F.asnumpy(src))),\n shape=(n_samples * n_points, n_samples * n_points))\n\n return convert.from_scipy(adj)\n\n#pylint: disable=invalid-name\ndef segmented_knn_graph(x, k, segs):\n \"\"\"Construct multiple graphs from multiple sets of points according to\n k-nearest-neighbor (KNN) and return.\n\n Compared with :func:`dgl.knn_graph`, this allows multiple point sets with\n different capacity. The points from different sets are stored contiguously\n in the :attr:`x` tensor.\n :attr:`segs` specifies the number of points in each point set. The\n function constructs a KNN graph for each point set, where the predecessors\n of each point are its k-nearest neighbors measured by the Euclidean distance.\n DGL then composes all KNN graphs\n into a graph with multiple connected components.\n\n Parameters\n ----------\n x : Tensor\n Coordinates/features of points. Must be 2D. It can be either on CPU or GPU.\n k : int\n The number of nearest neighbors per node.\n segs : list[int]\n Number of points in each point set. The numbers in :attr:`segs`\n must sum up to the number of rows in :attr:`x`.\n\n Returns\n -------\n DGLGraph\n The graph. The node IDs are in the same order as :attr:`x`.\n\n The returned graph is on CPU, regardless of the context of input :attr:`x`.\n\n Examples\n --------\n\n The following examples use PyTorch backend.\n\n >>> import dgl\n >>> import torch\n\n In the example below, the first point set has three points\n and the second point set has four points.\n\n >>> # Features/coordinates of the first point set\n >>> x1 = torch.tensor([[0.0, 0.5, 0.2],\n ... [0.1, 0.3, 0.2],\n ... [0.4, 0.2, 0.2]])\n >>> # Features/coordinates of the second point set\n >>> x2 = torch.tensor([[0.3, 0.2, 0.1],\n ... [0.5, 0.2, 0.3],\n ... [0.1, 0.1, 0.2],\n ... [0.6, 0.3, 0.3]])\n >>> x = torch.cat([x1, x2], dim=0)\n >>> segs = [x1.shape[0], x2.shape[0]]\n >>> knn_g = dgl.segmented_knn_graph(x, 2, segs)\n >>> knn_g.edges()\n (tensor([0, 0, 1, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6]),\n tensor([0, 1, 0, 1, 2, 2, 3, 5, 4, 6, 3, 5, 4, 6]))\n \"\"\"\n n_total_points, _ = F.shape(x)\n offset = np.insert(np.cumsum(segs), 0, 0)\n\n h_list = F.split(x, segs, 0)\n dst = [\n F.argtopk(pairwise_squared_distance(h_g), k, 1, descending=False) +\n int(offset[i])\n for i, h_g in enumerate(h_list)]\n dst = F.cat(dst, 0)\n src = F.arange(0, n_total_points).unsqueeze(1).expand(n_total_points, k)\n\n dst = F.reshape(dst, (-1,))\n src = F.reshape(src, (-1,))\n adj = sparse.csr_matrix((F.asnumpy(F.zeros_like(dst) + 1), (F.asnumpy(dst), F.asnumpy(src))))\n\n return convert.from_scipy(adj)\n\ndef to_bidirected(g, copy_ndata=False, readonly=None):\n r\"\"\"Convert the graph to a bi-directional simple graph and return.\n\n For an input graph :math:`G`, return a new graph :math:`G'` such that an edge\n :math:`(u, v)\\in G'` if and only if there exists an edge :math:`(u, v)\\in G` or\n an edge :math:`(v, u)\\in G`. The resulting graph :math:`G'` is a simple graph,\n meaning there is no parallel edge.\n\n The operation only works for edges whose two endpoints belong to the same node type.\n DGL will raise error if the input graph is heterogeneous and contains edges\n with different types of endpoints.\n\n Parameters\n ----------\n g : DGLGraph\n The input graph.\n copy_ndata: bool, optional\n If True, the node features of the bidirected graph are copied from the\n original graph. If False, the bidirected graph will not have any node features.\n (Default: False)\n readonly : bool\n **DEPRECATED**.\n\n Returns\n -------\n DGLGraph\n The bidirected graph\n\n Notes\n -----\n If :attr:`copy_ndata` is True, the resulting graph will share the node feature\n tensors with the input graph. Hence, users should try to avoid in-place operations\n which will be visible to both graphs.\n\n This function discards the batch information. Please use\n :func:`dgl.DGLGraph.set_batch_num_nodes`\n and :func:`dgl.DGLGraph.set_batch_num_edges` on the transformed graph\n to maintain the information.\n\n Examples\n --------\n The following examples use PyTorch backend.\n\n >>> import dgl\n >>> import torch as th\n >>> g = dgl.graph((th.tensor([0, 1, 2]), th.tensor([1, 2, 0])))\n >>> bg1 = dgl.to_bidirected(g)\n >>> bg1.edges()\n (tensor([0, 1, 2, 1, 2, 0]), tensor([1, 2, 0, 0, 1, 2]))\n\n The graph already have i->j and j->i\n\n >>> g = dgl.graph((th.tensor([0, 1, 2, 0]), th.tensor([1, 2, 0, 2])))\n >>> bg1 = dgl.to_bidirected(g)\n >>> bg1.edges()\n (tensor([0, 1, 2, 1, 2, 0]), tensor([1, 2, 0, 0, 1, 2]))\n\n **Heterogeneous graphs with Multiple Edge Types**\n\n >>> g = dgl.heterograph({\n ... ('user', 'wins', 'user'): (th.tensor([0, 2, 0, 2]), th.tensor([1, 1, 2, 0])),\n ... ('user', 'follows', 'user'): (th.tensor([1, 2, 1]), th.tensor([2, 1, 1]))\n ... })\n >>> bg1 = dgl.to_bidirected(g)\n >>> bg1.edges(etype='wins')\n (tensor([0, 0, 1, 1, 2, 2]), tensor([1, 2, 0, 2, 0, 1]))\n >>> bg1.edges(etype='follows')\n (tensor([1, 1, 2]), tensor([1, 2, 1]))\n \"\"\"\n if readonly is not None:\n dgl_warning(\"Parameter readonly is deprecated\" \\\n \"There will be no difference between readonly and non-readonly DGLGraph\")\n\n for c_etype in g.canonical_etypes:\n if c_etype[0] != c_etype[2]:\n assert False, \"to_bidirected is not well defined for \" \\\n \"unidirectional bipartite graphs\" \\\n \", but {} is unidirectional bipartite\".format(c_etype)\n\n assert g.is_multigraph is False, \"to_bidirected only support simple graph\"\n\n g = add_reverse_edges(g, copy_ndata=copy_ndata, copy_edata=False)\n g = to_simple(g, return_counts=None, copy_ndata=copy_ndata, copy_edata=False)\n return g\n\ndef add_reverse_edges(g, readonly=None, copy_ndata=True,\n copy_edata=False, ignore_bipartite=False):\n r\"\"\"Add an reversed edge for each edge in the input graph and return a new graph.\n\n For a graph with edges :math:`(i_1, j_1), \\cdots, (i_n, j_n)`, this\n function creates a new graph with edges\n :math:`(i_1, j_1), \\cdots, (i_n, j_n), (j_1, i_1), \\cdots, (j_n, i_n)`.\n\n The operation only works for edges whose two endpoints belong to the same node type.\n DGL will raise error if the input graph is heterogeneous and contains edges\n with different types of endpoints. If :attr:`ignore_bipartite` is true, DGL will\n ignore those edges instead.\n\n Parameters\n ----------\n g : DGLGraph\n The input graph.\n readonly : bool, default to be True\n Deprecated. There will be no difference between readonly and non-readonly\n copy_ndata: bool, optional\n If True, the node features of the new graph are copied from\n the original graph. If False, the new graph will not have any\n node features.\n\n (Default: True)\n copy_edata: bool, optional\n If True, the features of the reversed edges will be identical to\n the original ones.\"\n\n If False, the new graph will not have any edge features.\n\n (Default: False)\n ignore_bipartite: bool, optional\n If True, unidirectional bipartite graphs are ignored and\n no error is raised. If False, an error will be raised if\n an edge type of the input heterogeneous graph is for a unidirectional\n bipartite graph.\n\n Returns\n -------\n DGLGraph\n The graph with reversed edges added.\n\n Notes\n -----\n If :attr:`copy_ndata` is True, the resulting graph will share the node feature\n tensors with the input graph. Hence, users should try to avoid in-place operations\n which will be visible to both graphs. On the contrary, the two graphs do not share\n the same edge feature storage.\n\n This function discards the batch information. Please use\n :func:`dgl.DGLGraph.set_batch_num_nodes`\n and :func:`dgl.DGLGraph.set_batch_num_edges` on the transformed graph\n to maintain the information.\n\n Examples\n --------\n **Homogeneous graphs**\n\n >>> g = dgl.graph((th.tensor([0, 0]), th.tensor([0, 1])))\n >>> bg1 = dgl.add_reverse_edges(g)\n >>> bg1.edges()\n (tensor([0, 0, 0, 1]), tensor([0, 1, 0, 0]))\n\n **Heterogeneous graphs**\n\n >>> g = dgl.heterograph({\n >>> ('user', 'wins', 'user'): (th.tensor([0, 2, 0, 2, 2]), th.tensor([1, 1, 2, 1, 0])),\n >>> ('user', 'plays', 'game'): (th.tensor([1, 2, 1]), th.tensor([2, 1, 1])),\n >>> ('user', 'follows', 'user'): (th.tensor([1, 2, 1), th.tensor([0, 0, 0]))\n >>> })\n >>> g.nodes['game'].data['hv'] = th.ones(3, 1)\n >>> g.edges['wins'].data['h'] = th.tensor([0, 1, 2, 3, 4])\n\n The :func:`add_reverse_edges` operation is applied to the edge type\n ``('user', 'wins', 'user')`` and the edge type ``('user', 'follows', 'user')``.\n The edge type ``('user', 'plays', 'game')`` is ignored. Both the node features and\n edge features are shared.\n\n >>> bg = dgl.add_reverse_edges(g, copy_ndata=True,\n copy_edata=True, ignore_bipartite=True)\n >>> bg.edges(('user', 'wins', 'user'))\n (tensor([0, 2, 0, 2, 2, 1, 1, 2, 1, 0]), tensor([1, 1, 2, 1, 0, 0, 2, 0, 2, 2]))\n >>> bg.edges(('user', 'follows', 'user'))\n (tensor([1, 2, 1, 0, 0, 0]), tensor([0, 0, 0, 1, 2, 1]))\n >>> bg.edges(('user', 'plays', 'game'))\n (th.tensor([1, 2, 1]), th.tensor([2, 1, 1]))\n >>> bg.nodes['game'].data['hv']\n tensor([0, 0, 0])\n >>> bg.edges[('user', 'wins', 'user')].data['h']\n th.tensor([0, 1, 2, 3, 4, 0, 1, 2, 3, 4])\n \"\"\"\n if readonly is not None:\n dgl_warning(\"Parameter readonly is deprecated\" \\\n \"There will be no difference between readonly and non-readonly DGLGraph\")\n\n # get node cnt for each ntype\n num_nodes_dict = {}\n for ntype in g.ntypes:\n num_nodes_dict[ntype] = g.number_of_nodes(ntype)\n\n canonical_etypes = g.canonical_etypes\n num_nodes_dict = {ntype: g.number_of_nodes(ntype) for ntype in g.ntypes}\n # fast path\n if ignore_bipartite is False:\n subgs = {}\n for c_etype in canonical_etypes:\n if c_etype[0] != c_etype[2]:\n assert False, \"add_reverse_edges is not well defined for \" \\\n \"unidirectional bipartite graphs\" \\\n \", but {} is unidirectional bipartite\".format(c_etype)\n\n u, v = g.edges(form='uv', order='eid', etype=c_etype)\n subgs[c_etype] = (F.cat([u, v], dim=0), F.cat([v, u], dim=0))\n\n new_g = convert.heterograph(subgs, num_nodes_dict=num_nodes_dict)\n else:\n subgs = {}\n for c_etype in canonical_etypes:\n if c_etype[0] != c_etype[2]:\n u, v = g.edges(form='uv', order='eid', etype=c_etype)\n subgs[c_etype] = (u, v)\n else:\n u, v = g.edges(form='uv', order='eid', etype=c_etype)\n subgs[c_etype] = (F.cat([u, v], dim=0), F.cat([v, u], dim=0))\n\n new_g = convert.heterograph(subgs, num_nodes_dict=num_nodes_dict)\n\n # handle features\n if copy_ndata:\n node_frames = utils.extract_node_subframes(g, None)\n utils.set_new_frames(new_g, node_frames=node_frames)\n\n if copy_edata:\n # find indices\n eids = []\n for c_etype in canonical_etypes:\n eid = F.copy_to(F.arange(0, g.number_of_edges(c_etype)), new_g.device)\n if c_etype[0] != c_etype[2]:\n eids.append(eid)\n else:\n eids.append(F.cat([eid, eid], 0))\n\n edge_frames = utils.extract_edge_subframes(g, eids)\n utils.set_new_frames(new_g, edge_frames=edge_frames)\n\n return new_g\n\ndef line_graph(g, backtracking=True, shared=False):\n \"\"\"Return the line graph of this graph.\n\n The line graph ``L(G)`` of a given graph ``G`` is defined as another graph where\n the nodes in ``L(G)`` maps to the edges in ``G``. For any pair of edges ``(u, v)``\n and ``(v, w)`` in ``G``, the corresponding node of edge ``(u, v)`` in ``L(G)`` will\n have an edge connecting to the corresponding node of edge ``(v, w)``.\n\n Parameters\n ----------\n g : DGLGraph\n Input graph. Must be homogeneous.\n backtracking : bool, optional\n If False, the line graph node corresponding to edge ``(u, v)`` will not have\n an edge connecting to the line graph node corresponding to edge ``(v, u)``.\n\n Default: True.\n shared : bool, optional\n Whether to copy the edge features of the original graph as the node features\n of the result line graph.\n\n Returns\n -------\n G : DGLGraph\n The line graph of this graph.\n\n Notes\n -----\n * If :attr:`shared` is True, the node features of the resulting graph share the same\n storage with the edge features of the input graph. Hence, users should try to\n avoid in-place operations which will be visible to both graphs.\n * The function supports input graph on GPU but copies it to CPU during computation.\n * This function discards the batch information. Please use\n :func:`dgl.DGLGraph.set_batch_num_nodes`\n and :func:`dgl.DGLGraph.set_batch_num_edges` on the transformed graph\n to maintain the information.\n\n Examples\n --------\n Assume that the graph has the following adjacency matrix: ::\n\n A = [[0, 0, 1],\n [1, 0, 1],\n [1, 1, 0]]\n\n >>> g = dgl.graph(([0, 1, 1, 2, 2],[2, 0, 2, 0, 1]), 'user', 'follows')\n >>> lg = g.line_graph()\n >>> lg\n Graph(num_nodes=5, num_edges=8,\n ndata_schemes={}\n edata_schemes={})\n >>> lg.edges()\n (tensor([0, 0, 1, 2, 2, 3, 4, 4]), tensor([3, 4, 0, 3, 4, 0, 1, 2]))\n >>> lg = g.line_graph(backtracking=False)\n >>> lg\n Graph(num_nodes=5, num_edges=4,\n ndata_schemes={}\n edata_schemes={})\n >>> lg.edges()\n (tensor([0, 1, 2, 4]), tensor([4, 0, 3, 1]))\n \"\"\"\n assert g.is_homogeneous, \\\n 'only homogeneous graph is supported'\n\n dev = g.device\n lg = DGLHeteroGraph(_CAPI_DGLHeteroLineGraph(g._graph.copy_to(nd.cpu()), backtracking))\n lg = lg.to(dev)\n if shared:\n new_frames = utils.extract_edge_subframes(g, None)\n utils.set_new_frames(lg, node_frames=new_frames)\n\n return lg\n\nDGLHeteroGraph.line_graph = utils.alias_func(line_graph)\n\ndef khop_adj(g, k):\n \"\"\"Return the matrix of :math:`A^k` where :math:`A` is the adjacency matrix of the graph\n :math:`g`.\n\n The returned matrix is a 32-bit float dense matrix on CPU. The graph must be homogeneous.\n\n Parameters\n ----------\n g : DGLGraph\n The input graph.\n k : int\n The :math:`k` in :math:`A^k`.\n\n Returns\n -------\n Tensor\n The returned tensor.\n\n Examples\n --------\n >>> import dgl\n >>> g = dgl.graph(([0,1,2,3,4,0,1,2,3,4], [0,1,2,3,4,1,2,3,4,0]))\n >>> dgl.khop_adj(g, 1)\n tensor([[1., 0., 0., 0., 1.],\n [1., 1., 0., 0., 0.],\n [0., 1., 1., 0., 0.],\n [0., 0., 1., 1., 0.],\n [0., 0., 0., 1., 1.]])\n >>> dgl.khop_adj(g, 3)\n tensor([[1., 0., 1., 3., 3.],\n [3., 1., 0., 1., 3.],\n [3., 3., 1., 0., 1.],\n [1., 3., 3., 1., 0.],\n [0., 1., 3., 3., 1.]])\n \"\"\"\n assert g.is_homogeneous, \\\n 'only homogeneous graph is supported'\n adj_k = g.adj(scipy_fmt=g.formats()['created'][0]) ** k\n return F.tensor(adj_k.todense().astype(np.float32))\n\ndef khop_graph(g, k, copy_ndata=True):\n \"\"\"Return the graph whose edges connect the :attr:`k`-hop neighbors of the original graph.\n\n More specifically, an edge from node ``u`` and node ``v`` exists in the new graph if\n and only if a path with length :attr:`k` exists from node ``u`` to node ``v`` in the\n original graph.\n\n The adjacency matrix of the returned graph is :math:`A^k`\n (where :math:`A` is the adjacency matrix of :math:`g`).\n\n Parameters\n ----------\n g : DGLGraph\n The input graph.\n k : int\n The :math:`k` in `k`-hop graph.\n copy_ndata: bool, optional\n If True, the node features of the new graph are copied from the\n original graph.\n\n If False, the new graph will not have any node features.\n\n (Default: True)\n\n Returns\n -------\n DGLGraph\n The returned graph.\n\n Notes\n -----\n If :attr:`copy_ndata` is True, the resulting graph will share the node feature\n tensors with the input graph. Hence, users should try to avoid in-place operations\n which will be visible to both graphs.\n\n This function discards the batch information. Please use\n :func:`dgl.DGLGraph.set_batch_num_nodes`\n and :func:`dgl.DGLGraph.set_batch_num_edges` on the transformed graph\n to maintain the information.\n\n Examples\n --------\n\n Below gives an easy example:\n\n >>> import dgl\n >>> g = dgl.graph(([0, 1], [1, 2]))\n >>> g_2 = dgl.transform.khop_graph(g, 2)\n >>> print(g_2.edges())\n (tensor([0]), tensor([2]))\n\n A more complicated example:\n\n >>> import dgl\n >>> g = dgl.graph(([0,1,2,3,4,0,1,2,3,4], [0,1,2,3,4,1,2,3,4,0]))\n >>> dgl.khop_graph(g, 1)\n DGLGraph(num_nodes=5, num_edges=10,\n ndata_schemes={}\n edata_schemes={})\n >>> dgl.khop_graph(g, 3)\n DGLGraph(num_nodes=5, num_edges=40,\n ndata_schemes={}\n edata_schemes={})\n \"\"\"\n assert g.is_homogeneous, \\\n 'only homogeneous graph is supported'\n n = g.number_of_nodes()\n adj_k = g.adj(transpose=True, scipy_fmt=g.formats()['created'][0]) ** k\n adj_k = adj_k.tocoo()\n multiplicity = adj_k.data\n row = np.repeat(adj_k.row, multiplicity)\n col = np.repeat(adj_k.col, multiplicity)\n # TODO(zihao): we should support creating multi-graph from scipy sparse matrix\n # in the future.\n new_g = convert.graph((row, col), num_nodes=n)\n\n # handle ndata\n if copy_ndata:\n node_frames = utils.extract_node_subframes(g, None)\n utils.set_new_frames(new_g, node_frames=node_frames)\n\n return new_g\n\ndef reverse(g, copy_ndata=True, copy_edata=False, *, share_ndata=None, share_edata=None):\n r\"\"\"Return a new graph with every edges being the reverse ones in the input graph.\n\n The reverse (also called converse, transpose) of a graph with edges\n :math:`(i_1, j_1), (i_2, j_2), \\cdots` of type ``(U, E, V)`` is a new graph with edges\n :math:`(j_1, i_1), (j_2, i_2), \\cdots` of type ``(V, E, U)``.\n\n The returned graph shares the data structure with the original graph, i.e. dgl.reverse\n will not create extra storage for the reversed graph.\n\n Parameters\n ----------\n g : DGLGraph\n The input graph.\n copy_ndata: bool, optional\n If True, the node features of the reversed graph are copied from the\n original graph. If False, the reversed graph will not have any node features.\n (Default: True)\n copy_edata: bool, optional\n If True, the edge features of the reversed graph are copied from the\n original graph. If False, the reversed graph will not have any edge features.\n (Default: False)\n\n Return\n ------\n DGLGraph\n The reversed graph.\n\n Notes\n -----\n If :attr:`copy_ndata` or :attr:`copy_edata` is True,\n the resulting graph will share the node or edge feature\n tensors with the input graph. Hence, users should try to avoid in-place operations\n which will be visible to both graphs.\n\n This function discards the batch information. Please use\n :func:`dgl.DGLGraph.set_batch_num_nodes`\n and :func:`dgl.DGLGraph.set_batch_num_edges` on the transformed graph\n to maintain the information.\n\n Examples\n --------\n **Homogeneous graphs**\n\n Create a graph to reverse.\n\n >>> import dgl\n >>> import torch as th\n >>> g = dgl.graph((th.tensor([0, 1, 2]), th.tensor([1, 2, 0])))\n >>> g.ndata['h'] = th.tensor([[0.], [1.], [2.]])\n >>> g.edata['h'] = th.tensor([[3.], [4.], [5.]])\n\n Reverse the graph.\n\n >>> rg = dgl.reverse(g, copy_edata=True)\n >>> rg.ndata['h']\n tensor([[0.],\n [1.],\n [2.]])\n\n The i-th edge in the reversed graph corresponds to the i-th edge in the\n original graph. When :attr:`copy_edata` is True, they have the same features.\n\n >>> rg.edges()\n (tensor([1, 2, 0]), tensor([0, 1, 2]))\n >>> rg.edata['h']\n tensor([[3.],\n [4.],\n [5.]])\n\n **Heterogenenous graphs**\n\n >>> g = dgl.heterograph({\n ... ('user', 'follows', 'user'): (th.tensor([0, 2]), th.tensor([1, 2])),\n ... ('user', 'plays', 'game'): (th.tensor([1, 2, 1]), th.tensor([2, 1, 1]))\n ... })\n >>> g.nodes['game'].data['hv'] = th.ones(3, 1)\n >>> g.edges['plays'].data['he'] = th.zeros(3, 1)\n\n The resulting graph will have edge types\n ``('user', 'follows', 'user)`` and ``('game', 'plays', 'user')``.\n\n >>> rg = dgl.reverse(g, copy_ndata=True)\n >>> rg\n Graph(num_nodes={'game': 3, 'user': 3},\n num_edges={('user', 'follows', 'user'): 2, ('game', 'plays', 'user'): 3},\n metagraph=[('user', 'user'), ('game', 'user')])\n >>> rg.edges(etype='follows')\n (tensor([1, 2]), tensor([0, 2]))\n >>> rg.edges(etype='plays')\n (tensor([2, 1, 1]), tensor([1, 2, 1]))\n >>> rg.nodes['game'].data['hv']\n tensor([[1.],\n [1.],\n [1.]])\n >>> rg.edges['plays'].data\n {}\n \"\"\"\n if share_ndata is not None:\n dgl_warning('share_ndata argument has been renamed to copy_ndata.')\n copy_ndata = share_ndata\n if share_edata is not None:\n dgl_warning('share_edata argument has been renamed to copy_edata.')\n copy_edata = share_edata\n if g.is_block:\n # TODO(0.5 release, xiangsx) need to handle BLOCK\n # currently reversing a block results in undefined behavior\n raise DGLError('Reversing a block graph is not supported.')\n gidx = g._graph.reverse()\n new_g = DGLHeteroGraph(gidx, g.ntypes, g.etypes)\n\n # handle ndata\n if copy_ndata:\n # for each ntype\n for ntype in g.ntypes:\n new_g.nodes[ntype].data.update(g.nodes[ntype].data)\n\n # handle edata\n if copy_edata:\n # for each etype\n for utype, etype, vtype in g.canonical_etypes:\n new_g.edges[vtype, etype, utype].data.update(\n g.edges[utype, etype, vtype].data)\n\n return new_g\n\nDGLHeteroGraph.reverse = utils.alias_func(reverse)\n\ndef to_simple_graph(g):\n \"\"\"Convert the graph to a simple graph with no multi-edge.\n\n DEPRECATED: renamed to dgl.to_simple\n\n Parameters\n ----------\n g : DGLGraph\n The input graph.\n\n Returns\n -------\n DGLGraph\n A simple graph.\n\n Notes\n -----\n\n This function discards the batch information. Please use\n :func:`dgl.DGLGraph.set_batch_num_nodes`\n and :func:`dgl.DGLGraph.set_batch_num_edges` on the transformed graph\n to maintain the information.\n \"\"\"\n dgl_warning('dgl.to_simple_graph is renamed to dgl.to_simple in v0.5.')\n return to_simple(g)\n\ndef to_bidirected_stale(g, readonly=True):\n \"\"\"NOTE: this function only works on the deprecated\n :class:`dgl.DGLGraphStale` object.\n\n Convert the graph to a bidirected graph.\n\n The function generates a new graph with no node/edge feature.\n If g has an edge for ``(u, v)`` but no edge for ``(v, u)``, then the\n returned graph will have both ``(u, v)`` and ``(v, u)``.\n\n If the input graph is a multigraph (there are multiple edges from node u to node v),\n the returned graph isn't well defined.\n\n Parameters\n ----------\n g : DGLGraphStale\n The input graph.\n readonly : bool\n Whether the returned bidirected graph is readonly or not.\n\n (Default: True)\n\n Notes\n -----\n Please make sure g is a simple graph, otherwise the return value is undefined.\n\n This function discards the batch information. Please use\n :func:`dgl.DGLGraph.set_batch_num_nodes`\n and :func:`dgl.DGLGraph.set_batch_num_edges` on the transformed graph\n to maintain the information.\n\n Returns\n -------\n DGLGraph\n\n Examples\n --------\n The following two examples use PyTorch backend, one for non-multi graph\n and one for multi-graph.\n\n >>> g = dgl._deprecate.graph.DGLGraph()\n >>> g.add_nodes(2)\n >>> g.add_edges([0, 0], [0, 1])\n >>> bg1 = dgl.to_bidirected_stale(g)\n >>> bg1.edges()\n (tensor([0, 1, 0]), tensor([0, 0, 1]))\n \"\"\"\n if readonly:\n newgidx = _CAPI_DGLToBidirectedImmutableGraph(g._graph)\n else:\n newgidx = _CAPI_DGLToBidirectedMutableGraph(g._graph)\n return DGLGraphStale(newgidx)\n\ndef laplacian_lambda_max(g):\n \"\"\"Return the largest eigenvalue of the normalized symmetric Laplacian of a graph.\n\n If the graph is batched from multiple graphs, return the list of the largest eigenvalue\n for each graph instead.\n\n Parameters\n ----------\n g : DGLGraph\n The input graph, it must be a bi-directed homogeneous graph, i.e., every edge\n should have an accompanied reverse edge in the graph.\n The graph can be batched from multiple graphs.\n\n Returns\n -------\n list[float]\n A list where the i-th item indicates the largest eigenvalue\n of i-th graph in :attr:`g`.\n\n In the case where the function takes a single graph, it will return a list\n consisting of a single element.\n\n Examples\n --------\n >>> import dgl\n >>> g = dgl.graph(([0, 1, 2, 3, 4, 0, 1, 2, 3, 4], [1, 2, 3, 4, 0, 4, 0, 1, 2, 3]))\n >>> dgl.laplacian_lambda_max(g)\n [1.809016994374948]\n \"\"\"\n g_arr = batch.unbatch(g)\n rst = []\n for g_i in g_arr:\n n = g_i.number_of_nodes()\n adj = g_i.adj(scipy_fmt=g_i.formats()['created'][0]).astype(float)\n norm = sparse.diags(F.asnumpy(g_i.in_degrees()).clip(1) ** -0.5, dtype=float)\n laplacian = sparse.eye(n) - norm * adj * norm\n rst.append(sparse.linalg.eigs(laplacian, 1, which='LM',\n return_eigenvectors=False)[0].real)\n return rst\n\ndef metapath_reachable_graph(g, metapath):\n \"\"\"Return a graph where the successors of any node ``u`` are nodes reachable from ``u`` by\n the given metapath.\n\n If the beginning node type ``s`` and ending node type ``t`` are the same, it will return\n a homogeneous graph with node type ``s = t``. Otherwise, a unidirectional bipartite graph\n with source node type ``s`` and destination node type ``t`` is returned.\n\n In both cases, two nodes ``u`` and ``v`` will be connected with an edge ``(u, v)`` if\n there exists one path matching the metapath from ``u`` to ``v``.\n\n The result graph keeps the node set of type ``s`` and ``t`` in the original graph even if\n they might have no neighbor.\n\n The features of the source/destination node type in the original graph would be copied to\n the new graph.\n\n Parameters\n ----------\n g : DGLGraph\n The input graph\n metapath : list[str or tuple of str]\n Metapath in the form of a list of edge types\n\n Returns\n -------\n DGLGraph\n A homogeneous or unidirectional bipartite graph. It will be on CPU regardless of\n whether the input graph is on CPU or GPU.\n\n Notes\n -----\n\n This function discards the batch information. Please use\n :func:`dgl.DGLGraph.set_batch_num_nodes`\n and :func:`dgl.DGLGraph.set_batch_num_edges` on the transformed graph\n to maintain the information.\n\n Examples\n --------\n >>> g = dgl.heterograph({\n ... ('A', 'AB', 'B'): ([0, 1, 2], [1, 2, 3]),\n ... ('B', 'BA', 'A'): ([1, 2, 3], [0, 1, 2])})\n >>> new_g = dgl.metapath_reachable_graph(g, ['AB', 'BA'])\n >>> new_g.edges(order='eid')\n (tensor([0, 1, 2]), tensor([0, 1, 2]))\n \"\"\"\n adj = 1\n for etype in metapath:\n adj = adj * g.adj(etype=etype, scipy_fmt='csr', transpose=True)\n\n adj = (adj != 0).tocsr()\n srctype = g.to_canonical_etype(metapath[0])[0]\n dsttype = g.to_canonical_etype(metapath[-1])[2]\n new_g = convert.heterograph({(srctype, '_E', dsttype): adj.nonzero()},\n {srctype: adj.shape[0], dsttype: adj.shape[1]},\n idtype=g.idtype, device=g.device)\n\n # copy srcnode features\n new_g.nodes[srctype].data.update(g.nodes[srctype].data)\n # copy dstnode features\n if srctype != dsttype:\n new_g.nodes[dsttype].data.update(g.nodes[dsttype].data)\n\n return new_g\n\ndef add_nodes(g, num, data=None, ntype=None):\n r\"\"\"Add the given number of nodes to the graph and return a new graph.\n\n The new nodes will have IDs starting from ``g.num_nodes(ntype)``.\n\n Parameters\n ----------\n num : int\n The number of nodes to add.\n data : dict[str, Tensor], optional\n Feature data of the added nodes. The keys are feature names\n while the values are feature data.\n ntype : str, optional\n The node type name. Can be omitted if there is\n only one type of nodes in the graph.\n\n Return\n ------\n DGLGraph\n The graph with newly added nodes.\n\n Notes\n -----\n * For features in :attr:`g` but not in :attr:`data`,\n DGL assigns zero features for the newly added nodes.\n * For feature in :attr:`data` but not in :attr:`g`, DGL assigns zero features\n for the existing nodes in the graph.\n * This function discards the batch information. Please use\n :func:`dgl.DGLGraph.set_batch_num_nodes`\n and :func:`dgl.DGLGraph.set_batch_num_edges` on the transformed graph\n to maintain the information.\n\n Examples\n --------\n\n The following example uses PyTorch backend.\n\n >>> import dgl\n >>> import torch\n\n **Homogeneous Graphs**\n\n >>> g = dgl.graph((torch.tensor([0, 1]), torch.tensor([1, 2])))\n >>> g.num_nodes()\n 3\n >>> g = dgl.add_nodes(g, 2)\n >>> g.num_nodes()\n 5\n\n If the graph has some node features and new nodes are added without\n features, their features will be filled with zeros.\n\n >>> g.ndata['h'] = torch.ones(5, 1)\n >>> g = dgl.add_nodes(g, 1)\n >>> g.ndata['h']\n tensor([[1.], [1.], [1.], [1.], [1.], [0.]])\n\n Assign features for the new nodes.\n\n >>> g = dgl.add_nodes(g, 1, {'h': torch.ones(1, 1), 'w': torch.ones(1, 1)})\n >>> g.ndata['h']\n tensor([[1.], [1.], [1.], [1.], [1.], [0.], [1.]])\n\n Since :attr:`data` contains new feature fields, the features for existing nodes\n will be filled with zeros.\n\n >>> g.ndata['w']\n tensor([[0.], [0.], [0.], [0.], [0.], [0.], [1.]])\n\n **Heterogeneous Graphs**\n\n >>> g = dgl.heterograph({\n ... ('user', 'plays', 'game'): (torch.tensor([0, 1, 1, 2]),\n ... torch.tensor([0, 0, 1, 1])),\n ... ('developer', 'develops', 'game'): (torch.tensor([0, 1]),\n ... torch.tensor([0, 1]))\n ... })\n >>> g.num_nodes('user')\n 3\n >>> g = dgl.add_nodes(g, 2, ntype='user')\n >>> g.num_nodes('user')\n 5\n\n See Also\n --------\n remove_nodes\n add_edges\n remove_edges\n \"\"\"\n g = g.clone()\n g.add_nodes(num, data=data, ntype=ntype)\n return g\n\ndef add_edges(g, u, v, data=None, etype=None):\n r\"\"\"Add the edges to the graph and return a new graph.\n\n The i-th new edge will be from ``u[i]`` to ``v[i]``. The IDs of the new\n edges will start from ``g.num_edges(etype)``.\n\n Parameters\n ----------\n u : int, Tensor or iterable[int]\n Source node IDs, ``u[i]`` gives the source node for the i-th new edge.\n v : int, Tensor or iterable[int]\n Destination node IDs, ``v[i]`` gives the destination node for the i-th new edge.\n data : dict[str, Tensor], optional\n Feature data of the added edges. The keys are feature names\n while the values are feature data.\n etype : str or (str, str, str), optional\n The type names of the edges. The allowed type name formats are:\n\n * ``(str, str, str)`` for source node type, edge type and destination node type.\n * or one ``str`` edge type name if the name can uniquely identify a\n triplet format in the graph.\n\n Can be omitted if the graph has only one type of edges.\n\n Return\n ------\n DGLGraph\n The graph with newly added edges.\n\n Notes\n -----\n * If the end nodes of the given edges do not exist in :attr:`g`,\n :func:`dgl.add_nodes` is invoked to add those nodes.\n The node features of the new nodes will be filled with zeros.\n * For features in :attr:`g` but not in :attr:`data`,\n DGL assigns zero features for the newly added nodes.\n * For feature in :attr:`data` but not in :attr:`g`, DGL assigns zero features\n for the existing nodes in the graph.\n * This function discards the batch information. Please use\n :func:`dgl.DGLGraph.set_batch_num_nodes`\n and :func:`dgl.DGLGraph.set_batch_num_edges` on the transformed graph\n to maintain the information.\n\n Examples\n --------\n The following example uses PyTorch backend.\n\n >>> import dgl\n >>> import torch\n\n **Homogeneous Graphs**\n\n >>> g = dgl.graph((torch.tensor([0, 1]), torch.tensor([1, 2])))\n >>> g.num_edges()\n 2\n >>> g = dgl.add_edges(g, torch.tensor([1, 3]), torch.tensor([0, 1]))\n >>> g.num_edges()\n 4\n\n Since ``u`` or ``v`` contains a non-existing node ID, the nodes are\n added implicitly.\n\n >>> g.num_nodes()\n 4\n\n If the graph has some edge features and new edges are added without\n features, their features will be filled with zeros.\n\n >>> g.edata['h'] = torch.ones(4, 1)\n >>> g = dgl.add_edges(g, torch.tensor([1]), torch.tensor([1]))\n >>> g.edata['h']\n tensor([[1.], [1.], [1.], [1.], [0.]])\n\n You can also assign features for the new edges in adding new edges.\n\n >>> g = dgl.add_edges(g, torch.tensor([0, 0]), torch.tensor([2, 2]),\n ... {'h': torch.tensor([[1.], [2.]]), 'w': torch.ones(2, 1)})\n >>> g.edata['h']\n tensor([[1.], [1.], [1.], [1.], [0.], [1.], [2.]])\n\n Since :attr:`data` contains new feature fields, the features for old edges\n will be filled with zeros.\n\n >>> g.edata['w']\n tensor([[0.], [0.], [0.], [0.], [0.], [1.], [1.]])\n\n **Heterogeneous Graphs**\n\n >>> g = dgl.heterograph({\n ... ('user', 'plays', 'game'): (torch.tensor([0, 1, 1, 2]),\n ... torch.tensor([0, 0, 1, 1])),\n ... ('developer', 'develops', 'game'): (torch.tensor([0, 1]),\n ... torch.tensor([0, 1]))\n ... })\n >>> g.number_of_edges('plays')\n 4\n >>> g = dgl.add_edges(g, torch.tensor([3]), torch.tensor([3]), etype='plays')\n >>> g.number_of_edges('plays')\n 5\n\n See Also\n --------\n add_nodes\n remove_nodes\n remove_edges\n \"\"\"\n g = g.clone()\n g.add_edges(u, v, data=data, etype=etype)\n return g\n\ndef remove_edges(g, eids, etype=None, store_ids=False):\n r\"\"\"Remove the specified edges and return a new graph.\n\n Also delete the features of the edges. The edges must exist in the graph.\n The resulting graph has the same number of the nodes as the input one,\n even if some nodes become isolated after the the edge removal.\n\n Parameters\n ----------\n eids : int, Tensor, iterable[int]\n The IDs of the edges to remove.\n etype : str or (str, str, str), optional\n The type names of the edges. The allowed type name formats are:\n\n * ``(str, str, str)`` for source node type, edge type and destination node type.\n * or one ``str`` edge type name if the name can uniquely identify a\n triplet format in the graph.\n\n Can be omitted if the graph has only one type of edges.\n store_ids : bool, optional\n If True, it will store the raw IDs of the extracted nodes and edges in the ``ndata``\n and ``edata`` of the resulting graph under name ``dgl.NID`` and ``dgl.EID``,\n respectively.\n\n Return\n ------\n DGLGraph\n The graph with edges deleted.\n\n Notes\n -----\n\n This function discards the batch information. Please use\n :func:`dgl.DGLGraph.set_batch_num_nodes`\n and :func:`dgl.DGLGraph.set_batch_num_edges` on the transformed graph\n to maintain the information.\n\n Examples\n --------\n >>> import dgl\n >>> import torch\n\n **Homogeneous Graphs**\n\n >>> g = dgl.graph((torch.tensor([0, 0, 2]), torch.tensor([0, 1, 2])))\n >>> g.edata['he'] = torch.arange(3).float().reshape(-1, 1)\n >>> g = dgl.remove_edges(g, torch.tensor([0, 1]))\n >>> g\n Graph(num_nodes=3, num_edges=1,\n ndata_schemes={}\n edata_schemes={'he': Scheme(shape=(1,), dtype=torch.float32)})\n >>> g.edges('all')\n (tensor([2]), tensor([2]), tensor([0]))\n >>> g.edata['he']\n tensor([[2.]])\n\n **Heterogeneous Graphs**\n\n >>> g = dgl.heterograph({\n ... ('user', 'plays', 'game'): (torch.tensor([0, 1, 1, 2]),\n ... torch.tensor([0, 0, 1, 1])),\n ... ('developer', 'develops', 'game'): (torch.tensor([0, 1]),\n ... torch.tensor([0, 1]))\n ... })\n >>> g = dgl.remove_edges(g, torch.tensor([0, 1]), 'plays')\n >>> g.edges('all', etype='plays')\n (tensor([0, 1]), tensor([0, 0]), tensor([0, 1]))\n\n See Also\n --------\n add_nodes\n add_edges\n remove_nodes\n \"\"\"\n g = g.clone()\n g.remove_edges(eids, etype=etype, store_ids=store_ids)\n return g\n\n\ndef remove_nodes(g, nids, ntype=None, store_ids=False):\n r\"\"\"Remove the specified nodes and return a new graph.\n\n Also delete the features. Edges that connect from/to the nodes will be\n removed as well. After the removal, DGL re-labels the remaining nodes and edges\n with IDs from 0.\n\n Parameters\n ----------\n nids : int, Tensor, iterable[int]\n The nodes to be removed.\n ntype : str, optional\n The type of the nodes to remove. Can be omitted if there is\n only one node type in the graph.\n store_ids : bool, optional\n If True, it will store the raw IDs of the extracted nodes and edges in the ``ndata``\n and ``edata`` of the resulting graph under name ``dgl.NID`` and ``dgl.EID``,\n respectively.\n\n Return\n ------\n DGLGraph\n The graph with nodes deleted.\n\n Notes\n -----\n\n This function discards the batch information. Please use\n :func:`dgl.DGLGraph.set_batch_num_nodes`\n and :func:`dgl.DGLGraph.set_batch_num_edges` on the transformed graph\n to maintain the information.\n\n Examples\n --------\n\n >>> import dgl\n >>> import torch\n\n **Homogeneous Graphs**\n\n >>> g = dgl.graph((torch.tensor([0, 0, 2]), torch.tensor([0, 1, 2])))\n >>> g.ndata['hv'] = torch.arange(3).float().reshape(-1, 1)\n >>> g.edata['he'] = torch.arange(3).float().reshape(-1, 1)\n >>> g = dgl.remove_nodes(g, torch.tensor([0, 1]))\n >>> g\n Graph(num_nodes=1, num_edges=1,\n ndata_schemes={'hv': Scheme(shape=(1,), dtype=torch.float32)}\n edata_schemes={'he': Scheme(shape=(1,), dtype=torch.float32)})\n >>> g.ndata['hv']\n tensor([[2.]])\n >>> g.edata['he']\n tensor([[2.]])\n\n **Heterogeneous Graphs**\n\n >>> g = dgl.heterograph({\n ... ('user', 'plays', 'game'): (torch.tensor([0, 1, 1, 2]),\n ... torch.tensor([0, 0, 1, 1])),\n ... ('developer', 'develops', 'game'): (torch.tensor([0, 1]),\n ... torch.tensor([0, 1]))\n ... })\n >>> g = dgl.remove_nodes(g, torch.tensor([0, 1]), ntype='game')\n >>> g.num_nodes('user')\n 3\n >>> g.num_nodes('game')\n 0\n >>> g.num_edges('plays')\n 0\n\n See Also\n --------\n add_nodes\n add_edges\n remove_edges\n \"\"\"\n g = g.clone()\n g.remove_nodes(nids, ntype=ntype, store_ids=store_ids)\n return g\n\ndef add_self_loop(g, etype=None):\n r\"\"\"Add self-loops for each node in the graph and return a new graph.\n\n Parameters\n ----------\n g : DGLGraph\n The graph.\n etype : str or (str, str, str), optional\n The type names of the edges. The allowed type name formats are:\n\n * ``(str, str, str)`` for source node type, edge type and destination node type.\n * or one ``str`` edge type name if the name can uniquely identify a\n triplet format in the graph.\n\n Can be omitted if the graph has only one type of edges.\n\n Return\n ------\n DGLGraph\n The graph with self-loops.\n\n Notes\n -----\n * The function only supports homogeneous graphs or heterogeneous graphs but\n the relation graph specified by the :attr:`etype` argument is homogeneous.\n * The function adds self-loops regardless of whether they already exist or not.\n If one wishes to have exactly one self-loop for every node,\n call :func:`remove_self_loop` before invoking :func:`add_self_loop`.\n * Features of the new edges (self-loop edges) will be filled with zeros.\n * This function discards the batch information. Please use\n :func:`dgl.DGLGraph.set_batch_num_nodes`\n and :func:`dgl.DGLGraph.set_batch_num_edges` on the transformed graph\n to maintain the information.\n\n Examples\n --------\n >>> import dgl\n >>> import torch\n\n **Homogeneous Graphs**\n\n >>> g = dgl.graph((torch.tensor([0, 0, 2]), torch.tensor([2, 1, 0])))\n >>> g.ndata['hv'] = torch.arange(3).float().reshape(-1, 1)\n >>> g.edata['he'] = torch.arange(3).float().reshape(-1, 1)\n >>> g = dgl.add_self_loop(g)\n >>> g\n Graph(num_nodes=3, num_edges=6,\n ndata_schemes={'hv': Scheme(shape=(1,), dtype=torch.float32)}\n edata_schemes={'he': Scheme(shape=(1,), dtype=torch.float32)})\n >>> g.edata['he']\n tensor([[0.],\n [1.],\n [2.],\n [0.],\n [0.],\n [0.]])\n\n **Heterogeneous Graphs**\n\n >>> g = dgl.heterograph({\n ... ('user', 'follows', 'user'): (torch.tensor([1, 2]),\n ... torch.tensor([0, 1])),\n ... ('user', 'plays', 'game'): (torch.tensor([0, 1]),\n ... torch.tensor([0, 1]))})\n >>> g = dgl.add_self_loop(g, etype='follows')\n >>> g\n Graph(num_nodes={'user': 3, 'game': 2},\n num_edges={('user', 'plays', 'game'): 2, ('user', 'follows', 'user'): 5},\n metagraph=[('user', 'user'), ('user', 'game')])\n \"\"\"\n etype = g.to_canonical_etype(etype)\n if etype[0] != etype[2]:\n raise DGLError(\n 'add_self_loop does not support unidirectional bipartite graphs: {}.' \\\n 'Please make sure the types of head node and tail node are identical.' \\\n ''.format(etype))\n nodes = g.nodes(etype[0])\n new_g = add_edges(g, nodes, nodes, etype=etype)\n return new_g\n\nDGLHeteroGraph.add_self_loop = utils.alias_func(add_self_loop)\n\ndef remove_self_loop(g, etype=None):\n r\"\"\" Remove self-loops for each node in the graph and return a new graph.\n\n Parameters\n ----------\n g : DGLGraph\n The graph.\n etype : str or (str, str, str), optional\n The type names of the edges. The allowed type name formats are:\n\n * ``(str, str, str)`` for source node type, edge type and destination node type.\n * or one ``str`` edge type name if the name can uniquely identify a\n triplet format in the graph.\n\n Can be omitted if the graph has only one type of edges.\n\n Notes\n -----\n If a node has multiple self-loops, remove them all. Do nothing for nodes without\n self-loops.\n\n This function discards the batch information. Please use\n :func:`dgl.DGLGraph.set_batch_num_nodes`\n and :func:`dgl.DGLGraph.set_batch_num_edges` on the transformed graph\n to maintain the information.\n\n Examples\n ---------\n\n >>> import dgl\n >>> import torch\n\n **Homogeneous Graphs**\n\n >>> g = dgl.graph((torch.tensor([0, 0, 0, 1]), torch.tensor([1, 0, 0, 2])))\n >>> g.edata['he'] = torch.arange(4).float().reshape(-1, 1)\n >>> g = dgl.remove_self_loop(g)\n >>> g\n Graph(num_nodes=3, num_edges=2,\n edata_schemes={'he': Scheme(shape=(2,), dtype=torch.float32)})\n >>> g.edata['he']\n tensor([[0.],[3.]])\n\n **Heterogeneous Graphs**\n\n >>> g = dgl.heterograph({\n ... ('user', 'follows', 'user'): (torch.tensor([0, 1, 1, 1, 2]),\n ... torch.tensor([0, 0, 1, 1, 1])),\n ... ('user', 'plays', 'game'): (torch.tensor([0, 1]),\n ... torch.tensor([0, 1]))\n ... })\n >>> g = dgl.remove_self_loop(g, etype='follows')\n >>> g.num_nodes('user')\n 3\n >>> g.num_nodes('game')\n 2\n >>> g.num_edges('follows')\n 2\n >>> g.num_edges('plays')\n 2\n\n See Also\n --------\n add_self_loop\n \"\"\"\n etype = g.to_canonical_etype(etype)\n if etype[0] != etype[2]:\n raise DGLError(\n 'remove_self_loop does not support unidirectional bipartite graphs: {}.' \\\n 'Please make sure the types of head node and tail node are identical.' \\\n ''.format(etype))\n u, v = g.edges(form='uv', order='eid', etype=etype)\n self_loop_eids = F.tensor(F.nonzero_1d(u == v), dtype=F.dtype(u))\n new_g = remove_edges(g, self_loop_eids, etype=etype)\n return new_g\n\nDGLHeteroGraph.remove_self_loop = utils.alias_func(remove_self_loop)\n\ndef compact_graphs(graphs, always_preserve=None, copy_ndata=True, copy_edata=True):\n \"\"\"Given a list of graphs with the same set of nodes, find and eliminate the common\n isolated nodes across all graphs.\n\n This function requires the graphs to have the same set of nodes (i.e. the node types\n must be the same, and the number of nodes of each node type must be the same). The\n metagraph does not have to be the same.\n\n It finds all the nodes that have zero in-degree and zero out-degree in all the given\n graphs, and eliminates them from all the graphs.\n\n Useful for graph sampling where you have a giant graph but you only wish to perform\n message passing on a smaller graph with a (tiny) subset of nodes.\n\n Parameters\n ----------\n graphs : DGLGraph or list[DGLGraph]\n The graph, or list of graphs.\n\n All graphs must be on CPU.\n\n All graphs must have the same set of nodes.\n always_preserve : Tensor or dict[str, Tensor], optional\n If a dict of node types and node ID tensors is given, the nodes of given\n node types would not be removed, regardless of whether they are isolated.\n\n If a Tensor is given, DGL assumes that all the graphs have one (same) node type.\n copy_ndata: bool, optional\n If True, the node features of the returned graphs are copied from the\n original graphs.\n\n If False, the returned graphs will not have any node features.\n\n (Default: True)\n copy_edata: bool, optional\n If True, the edge features of the reversed graph are copied from the\n original graph.\n\n If False, the reversed graph will not have any edge features.\n\n (Default: True)\n\n Returns\n -------\n DGLGraph or list[DGLGraph]\n The compacted graph or list of compacted graphs.\n\n Each returned graph would have a feature ``dgl.NID`` containing the mapping\n of node IDs for each type from the compacted graph(s) to the original graph(s).\n Note that the mapping is the same for all the compacted graphs.\n\n All the returned graphs are on CPU.\n\n Notes\n -----\n This function currently requires that the same node type of all graphs should have\n the same node type ID, i.e. the node types are *ordered* the same.\n\n If :attr:`copy_edata` is True, the resulting graph will share the edge feature\n tensors with the input graph. Hence, users should try to avoid in-place operations\n which will be visible to both graphs.\n\n This function discards the batch information. Please use\n :func:`dgl.DGLGraph.set_batch_num_nodes`\n and :func:`dgl.DGLGraph.set_batch_num_edges` on the transformed graph\n to maintain the information.\n\n Examples\n --------\n The following code constructs a bipartite graph with 20 users and 10 games, but\n only user #1 and #3, as well as game #3 and #5, have connections:\n\n >>> g = dgl.heterograph({('user', 'plays', 'game'): ([1, 3], [3, 5])},\n >>> {'user': 20, 'game': 10})\n\n The following would compact the graph above to another bipartite graph with only\n two users and two games.\n\n >>> new_g, induced_nodes = dgl.compact_graphs(g)\n >>> induced_nodes\n {'user': tensor([1, 3]), 'game': tensor([3, 5])}\n\n The mapping tells us that only user #1 and #3 as well as game #3 and #5 are kept.\n Furthermore, the first user and second user in the compacted graph maps to\n user #1 and #3 in the original graph. Games are similar.\n\n One can verify that the edge connections are kept the same in the compacted graph.\n\n >>> new_g.edges(form='all', order='eid', etype='plays')\n (tensor([0, 1]), tensor([0, 1]), tensor([0, 1]))\n\n When compacting multiple graphs, nodes that do not have any connections in any\n of the given graphs are removed. So if you compact ``g`` and the following ``g2``\n graphs together:\n\n >>> g2 = dgl.heterograph({('user', 'plays', 'game'): ([1, 6], [6, 8])},\n >>> {'user': 20, 'game': 10})\n >>> (new_g, new_g2), induced_nodes = dgl.compact_graphs([g, g2])\n >>> induced_nodes\n {'user': tensor([1, 3, 6]), 'game': tensor([3, 5, 6, 8])}\n\n Then one can see that user #1 from both graphs, users #3 from the first graph, as\n well as user #6 from the second graph, are kept. Games are similar.\n\n Similarly, one can also verify the connections:\n\n >>> new_g.edges(form='all', order='eid', etype='plays')\n (tensor([0, 1]), tensor([0, 1]), tensor([0, 1]))\n >>> new_g2.edges(form='all', order='eid', etype='plays')\n (tensor([0, 2]), tensor([2, 3]), tensor([0, 1]))\n \"\"\"\n return_single = False\n if not isinstance(graphs, Iterable):\n graphs = [graphs]\n return_single = True\n if len(graphs) == 0:\n return []\n if graphs[0].is_block:\n raise DGLError('Compacting a block graph is not allowed.')\n assert all(g.device == F.cpu() for g in graphs), 'all the graphs must be on CPU'\n\n # Ensure the node types are ordered the same.\n # TODO(BarclayII): we ideally need to remove this constraint.\n ntypes = graphs[0].ntypes\n idtype = graphs[0].idtype\n device = graphs[0].device\n for g in graphs:\n assert ntypes == g.ntypes, \\\n (\"All graphs should have the same node types in the same order, got %s and %s\" %\n ntypes, g.ntypes)\n assert idtype == g.idtype, \"Expect graph data type to be {}, but got {}\".format(\n idtype, g.idtype)\n assert device == g.device, \"Expect graph device to be {}, but got {}\".format(\n device, g.device)\n\n # Process the dictionary or tensor of \"always preserve\" nodes\n if always_preserve is None:\n always_preserve = {}\n elif not isinstance(always_preserve, Mapping):\n if len(ntypes) > 1:\n raise ValueError(\"Node type must be given if multiple node types exist.\")\n always_preserve = {ntypes[0]: always_preserve}\n\n always_preserve = utils.prepare_tensor_dict(graphs[0], always_preserve, 'always_preserve')\n always_preserve_nd = []\n for ntype in ntypes:\n nodes = always_preserve.get(ntype, None)\n if nodes is None:\n nodes = F.copy_to(F.tensor([], idtype), device)\n always_preserve_nd.append(F.to_dgl_nd(nodes))\n\n # Compact and construct heterographs\n new_graph_indexes, induced_nodes = _CAPI_DGLCompactGraphs(\n [g._graph for g in graphs], always_preserve_nd)\n induced_nodes = [F.from_dgl_nd(nodes) for nodes in induced_nodes]\n\n new_graphs = [\n DGLHeteroGraph(new_graph_index, graph.ntypes, graph.etypes)\n for new_graph_index, graph in zip(new_graph_indexes, graphs)]\n\n if copy_ndata:\n for g, new_g in zip(graphs, new_graphs):\n node_frames = utils.extract_node_subframes(g, induced_nodes)\n utils.set_new_frames(new_g, node_frames=node_frames)\n if copy_edata:\n for g, new_g in zip(graphs, new_graphs):\n edge_frames = utils.extract_edge_subframes(g, None)\n utils.set_new_frames(new_g, edge_frames=edge_frames)\n\n if return_single:\n new_graphs = new_graphs[0]\n\n return new_graphs\n\ndef to_block(g, dst_nodes=None, include_dst_in_src=True):\n \"\"\"Convert a graph into a bipartite-structured *block* for message passing.\n\n A block is a graph consisting of two sets of nodes: the\n *input* nodes and *output* nodes. The input and output nodes can have multiple\n node types. All the edges connect from input nodes to output nodes.\n\n Specifically, the input nodes and output nodes will have the same node types as the\n ones in the original graph. DGL maps each edge ``(u, v)`` with edge type\n ``(utype, etype, vtype)`` in the original graph to the edge with type\n ``etype`` connecting from node ID ``u`` of type ``utype`` in the input side to node\n ID ``v`` of type ``vtype`` in the output side.\n\n For blocks returned by :func:`to_block`, the output nodes of the block will only\n contain the nodes that have at least one inbound edge of any type. The input nodes\n of the block will only contain the nodes that appear in the output nodes, as well\n as the nodes that have at least one outbound edge connecting to one of the output nodes.\n\n If the :attr:`dst_nodes` argument is not None, it specifies the output nodes instead.\n\n Parameters\n ----------\n graph : DGLGraph\n The graph.\n dst_nodes : Tensor or dict[str, Tensor], optional\n The list of output nodes.\n\n If a tensor is given, the graph must have only one node type.\n\n If given, it must be a superset of all the nodes that have at least one inbound\n edge. An error will be raised otherwise.\n include_dst_in_src : bool\n If False, do not include output nodes in input nodes.\n\n (Default: True)\n\n Returns\n -------\n DGLBlock\n The new graph describing the block.\n\n The node IDs induced for each type in both sides would be stored in feature\n ``dgl.NID``.\n\n The edge IDs induced for each type would be stored in feature ``dgl.EID``.\n\n Raises\n ------\n DGLError\n If :attr:`dst_nodes` is specified but it is not a superset of all the nodes that\n have at least one inbound edge.\n\n If :attr:`dst_nodes` is not None, and :attr:`g` and :attr:`dst_nodes`\n are not in the same context.\n\n Notes\n -----\n :func:`to_block` is most commonly used in customizing neighborhood sampling\n for stochastic training on a large graph. Please refer to the user guide\n :ref:`guide-minibatch` for a more thorough discussion about the methodology\n of stochastic training.\n\n See also :func:`create_block` for more flexible construction of blocks.\n\n Examples\n --------\n Converting a homogeneous graph to a block as described above:\n >>> g = dgl.graph(([1, 2], [2, 3]))\n >>> block = dgl.to_block(g, torch.LongTensor([3, 2]))\n\n The output nodes would be exactly the same as the ones given: [3, 2].\n\n >>> induced_dst = block.dstdata[dgl.NID]\n >>> induced_dst\n tensor([3, 2])\n\n The first few input nodes would also be exactly the same as\n the ones given. The rest of the nodes are the ones necessary for message passing\n into nodes 3, 2. This means that the node 1 would be included.\n\n >>> induced_src = block.srcdata[dgl.NID]\n >>> induced_src\n tensor([3, 2, 1])\n\n You can notice that the first two nodes are identical to the given nodes as well as\n the output nodes.\n\n The induced edges can also be obtained by the following:\n\n >>> block.edata[dgl.EID]\n tensor([2, 1])\n\n This indicates that edge (2, 3) and (1, 2) are included in the result graph. You can\n verify that the first edge in the block indeed maps to the edge (2, 3), and the\n second edge in the block indeed maps to the edge (1, 2):\n\n >>> src, dst = block.edges(order='eid')\n >>> induced_src[src], induced_dst[dst]\n (tensor([2, 1]), tensor([3, 2]))\n\n The output nodes specified must be a superset of the nodes that have edges connecting\n to them. For example, the following will raise an error since the output nodes\n does not contain node 3, which has an edge connecting to it.\n\n >>> g = dgl.graph(([1, 2], [2, 3]))\n >>> dgl.to_block(g, torch.LongTensor([2])) # error\n\n Converting a heterogeneous graph to a block is similar, except that when specifying\n the output nodes, you have to give a dict:\n\n >>> g = dgl.heterograph({('A', '_E', 'B'): ([1, 2], [2, 3])})\n\n If you don't specify any node of type A on the output side, the node type ``A``\n in the block would have zero nodes on the output side.\n\n >>> block = dgl.to_block(g, {'B': torch.LongTensor([3, 2])})\n >>> block.number_of_dst_nodes('A')\n 0\n >>> block.number_of_dst_nodes('B')\n 2\n >>> block.dstnodes['B'].data[dgl.NID]\n tensor([3, 2])\n\n The input side would contain all the nodes on the output side:\n\n >>> block.srcnodes['B'].data[dgl.NID]\n tensor([3, 2])\n\n As well as all the nodes that have connections to the nodes on the output side:\n\n >>> block.srcnodes['A'].data[dgl.NID]\n tensor([2, 1])\n\n See also\n --------\n create_block\n \"\"\"\n if dst_nodes is None:\n # Find all nodes that appeared as destinations\n dst_nodes = defaultdict(list)\n for etype in g.canonical_etypes:\n _, dst = g.edges(etype=etype)\n dst_nodes[etype[2]].append(dst)\n dst_nodes = {ntype: F.unique(F.cat(values, 0)) for ntype, values in dst_nodes.items()}\n elif not isinstance(dst_nodes, Mapping):\n # dst_nodes is a Tensor, check if the g has only one type.\n if len(g.ntypes) > 1:\n raise DGLError(\n 'Graph has more than one node type; please specify a dict for dst_nodes.')\n dst_nodes = {g.ntypes[0]: dst_nodes}\n\n dst_node_ids = [\n utils.toindex(dst_nodes.get(ntype, []), g._idtype_str).tousertensor(\n ctx=F.to_backend_ctx(g._graph.ctx))\n for ntype in g.ntypes]\n dst_node_ids_nd = [F.to_dgl_nd(nodes) for nodes in dst_node_ids]\n\n for d in dst_node_ids_nd:\n if g._graph.ctx != d.ctx:\n raise ValueError('g and dst_nodes need to have the same context.')\n\n new_graph_index, src_nodes_nd, induced_edges_nd = _CAPI_DGLToBlock(\n g._graph, dst_node_ids_nd, include_dst_in_src)\n\n # The new graph duplicates the original node types to SRC and DST sets.\n new_ntypes = (g.ntypes, g.ntypes)\n new_graph = DGLBlock(new_graph_index, new_ntypes, g.etypes)\n assert new_graph.is_unibipartite # sanity check\n\n src_node_ids = [F.from_dgl_nd(src) for src in src_nodes_nd]\n edge_ids = [F.from_dgl_nd(eid) for eid in induced_edges_nd]\n\n node_frames = utils.extract_node_subframes_for_block(g, src_node_ids, dst_node_ids)\n edge_frames = utils.extract_edge_subframes(g, edge_ids)\n utils.set_new_frames(new_graph, node_frames=node_frames, edge_frames=edge_frames)\n\n return new_graph\n\ndef to_simple(g,\n return_counts='count',\n writeback_mapping=False,\n copy_ndata=True,\n copy_edata=False):\n r\"\"\"Convert a graph to a simple graph without parallel edges and return.\n\n For a heterogeneous graph with multiple edge types, DGL treats edges with the same\n edge type and endpoints as parallel edges and removes them.\n Optionally, one can get the the number of parallel edges by specifying the\n :attr:`return_counts` argument. To get the a mapping from the edge IDs in the\n input graph to the edge IDs in the resulting graph, set :attr:`writeback_mapping`\n to true.\n\n Parameters\n ----------\n g : DGLGraph\n The input graph. Must be on CPU.\n return_counts : str, optional\n If given, the count of each edge in the original graph\n will be stored as edge features under the name\n ``return_counts``. The old features with the same name will be replaced.\n\n (Default: \"count\")\n writeback_mapping: bool, optional\n If True, return an extra write-back mapping for each edge\n type. The write-back mapping is a tensor recording\n the mapping from the edge IDs in the input graph to\n the edge IDs in the result graph. If the graph is\n heterogeneous, DGL returns a dictionary of edge types and such\n tensors.\n\n If False, only the simple graph is returned.\n\n (Default: False)\n copy_ndata: bool, optional\n If True, the node features of the simple graph are copied\n from the original graph.\n\n If False, the simple graph will not have any node features.\n\n (Default: True)\n copy_edata: bool, optional\n If True, the edge features of the simple graph are copied\n from the original graph. If there exists duplicate edges between\n two nodes (u, v), the feature of the edge is randomly selected\n from one of the duplicate edges.\n\n If False, the simple graph will not have any edge features.\n\n (Default: False)\n\n Returns\n -------\n DGLGraph\n The graph.\n tensor or dict of tensor\n The writeback mapping. Only when ``writeback_mapping`` is True.\n\n Notes\n -----\n If :attr:`copy_ndata` is True, the resulting graph will share the node feature\n tensors with the input graph. Hence, users should try to avoid in-place operations\n which will be visible to both graphs.\n\n This function discards the batch information. Please use\n :func:`dgl.DGLGraph.set_batch_num_nodes`\n and :func:`dgl.DGLGraph.set_batch_num_edges` on the transformed graph\n to maintain the information.\n\n Examples\n --------\n **Homogeneous Graphs**\n\n Create a graph for demonstrating to_simple API.\n In the original graph, there are multiple edges between 1 and 2.\n\n >>> import dgl\n >>> import torch as th\n >>> g = dgl.graph((th.tensor([0, 1, 2, 1]), th.tensor([1, 2, 0, 2])))\n >>> g.ndata['h'] = th.tensor([[0.], [1.], [2.]])\n >>> g.edata['h'] = th.tensor([[3.], [4.], [5.], [6.]])\n\n Convert the graph to a simple graph. The return counts is\n stored in the edge feature 'cnt' and the writeback mapping\n is returned in a tensor.\n\n >>> sg, wm = dgl.to_simple(g, return_counts='cnt', writeback_mapping=True)\n >>> sg.ndata['h']\n tensor([[0.],\n [1.],\n [2.]])\n >>> u, v, eid = sg.edges(form='all')\n >>> u\n tensor([0, 1, 2])\n >>> v\n tensor([1, 2, 0])\n >>> eid\n tensor([0, 1, 2])\n >>> sg.edata['cnt']\n tensor([1, 2, 1])\n >>> wm\n tensor([0, 1, 2, 1])\n >>> 'h' in g.edata\n False\n\n **Heterogeneous Graphs**\n\n >>> g = dgl.heterograph({\n ... ('user', 'wins', 'user'): (th.tensor([0, 2, 0, 2, 2]), th.tensor([1, 1, 2, 1, 0])),\n ... ('user', 'plays', 'game'): (th.tensor([1, 2, 1]), th.tensor([2, 1, 1]))\n ... })\n >>> g.nodes['game'].data['hv'] = th.ones(3, 1)\n >>> g.edges['plays'].data['he'] = th.zeros(3, 1)\n\n The return counts is stored in the default edge feature 'count' for each edge type.\n\n >>> sg, wm = dgl.to_simple(g, copy_ndata=False, writeback_mapping=True)\n >>> sg\n Graph(num_nodes={'game': 3, 'user': 3},\n num_edges={('user', 'wins', 'user'): 4, ('game', 'plays', 'user'): 3},\n metagraph=[('user', 'user'), ('game', 'user')])\n >>> sg.edges(etype='wins')\n (tensor([0, 2, 0, 2]), tensor([1, 1, 2, 0]))\n >>> wm[('user', 'wins', 'user')]\n tensor([0, 1, 2, 1, 3])\n >>> sg.edges(etype='plays')\n (tensor([2, 1, 1]), tensor([1, 2, 1]))\n >>> wm[('user', 'plays', 'game')]\n tensor([0, 1, 2])\n >>> 'hv' in sg.nodes['game'].data\n False\n >>> 'he' in sg.edges['plays'].data\n False\n >>> sg.edata['count']\n {('user', 'wins', 'user'): tensor([1, 2, 1, 1])\n ('user', 'plays', 'game'): tensor([1, 1, 1])}\n \"\"\"\n assert g.device == F.cpu(), 'the graph must be on CPU'\n if g.is_block:\n raise DGLError('Cannot convert a block graph to a simple graph.')\n simple_graph_index, counts, edge_maps = _CAPI_DGLToSimpleHetero(g._graph)\n simple_graph = DGLHeteroGraph(simple_graph_index, g.ntypes, g.etypes)\n counts = [F.from_dgl_nd(count) for count in counts]\n edge_maps = [F.from_dgl_nd(edge_map) for edge_map in edge_maps]\n\n if copy_ndata:\n node_frames = utils.extract_node_subframes(g, None)\n utils.set_new_frames(simple_graph, node_frames=node_frames)\n if copy_edata:\n eids = []\n for i in range(len(g.canonical_etypes)):\n feat_idx = F.asnumpy(edge_maps[i])\n _, indices = np.unique(feat_idx, return_index=True)\n eids.append(F.zerocopy_from_numpy(indices))\n\n edge_frames = utils.extract_edge_subframes(g, eids)\n utils.set_new_frames(simple_graph, edge_frames=edge_frames)\n\n if return_counts is not None:\n for count, canonical_etype in zip(counts, g.canonical_etypes):\n simple_graph.edges[canonical_etype].data[return_counts] = count\n\n if writeback_mapping:\n # single edge type\n if len(edge_maps) == 1:\n return simple_graph, edge_maps[0]\n # multiple edge type\n else:\n wb_map = {}\n for edge_map, canonical_etype in zip(edge_maps, g.canonical_etypes):\n wb_map[canonical_etype] = edge_map\n return simple_graph, wb_map\n\n return simple_graph\n\nDGLHeteroGraph.to_simple = utils.alias_func(to_simple)\n\ndef as_heterograph(g, ntype='_U', etype='_E'): # pylint: disable=unused-argument\n \"\"\"Convert a DGLGraph to a DGLHeteroGraph with one node and edge type.\n\n DEPRECATED: DGLGraph and DGLHeteroGraph have been merged. This function will\n do nothing and can be removed safely in all cases.\n \"\"\"\n dgl_warning('DEPRECATED: DGLGraph and DGLHeteroGraph have been merged in v0.5.\\n'\n '\\tdgl.as_heterograph will do nothing and can be removed safely in all cases.')\n return g\n\ndef as_immutable_graph(hg):\n \"\"\"Convert a DGLHeteroGraph with one node and edge type into a DGLGraph.\n\n DEPRECATED: DGLGraph and DGLHeteroGraph have been merged. This function will\n do nothing and can be removed safely in all cases.\n \"\"\"\n dgl_warning('DEPRECATED: DGLGraph and DGLHeteroGraph have been merged in v0.5.\\n'\n '\\tdgl.as_immutable_graph will do nothing and can be removed safely in all cases.')\n return hg\n\n_init_api(\"dgl.transform\")\n",
"\"\"\"Torch Module for Attention-based Graph Neural Network layer\"\"\"\n# pylint: disable= no-member, arguments-differ, invalid-name\nimport torch as th\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom .... import function as fn\nfrom ...functional import edge_softmax\nfrom ....base import DGLError\nfrom ....utils import expand_as_pair\n\n\nclass AGNNConv(nn.Module):\n r\"\"\"\n\n Description\n -----------\n Attention-based Graph Neural Network layer from paper `Attention-based\n Graph Neural Network for Semi-Supervised Learning\n <https://arxiv.org/abs/1803.03735>`__.\n\n .. math::\n H^{l+1} = P H^{l}\n\n where :math:`P` is computed as:\n\n .. math::\n P_{ij} = \\mathrm{softmax}_i ( \\beta \\cdot \\cos(h_i^l, h_j^l))\n\n where :math:`\\beta` is a single scalar parameter.\n\n Parameters\n ----------\n init_beta : float, optional\n The :math:`\\beta` in the formula, a single scalar parameter.\n learn_beta : bool, optional\n If True, :math:`\\beta` will be learnable parameter.\n allow_zero_in_degree : bool, optional\n If there are 0-in-degree nodes in the graph, output for those nodes will be invalid\n since no message will be passed to those nodes. This is harmful for some applications\n causing silent performance regression. This module will raise a DGLError if it detects\n 0-in-degree nodes in input graph. By setting ``True``, it will suppress the check\n and let the users handle it by themselves. Default: ``False``.\n\n Note\n ----\n Zero in-degree nodes will lead to invalid output value. This is because no message\n will be passed to those nodes, the aggregation function will be appied on empty input.\n A common practice to avoid this is to add a self-loop for each node in the graph if\n it is homogeneous, which can be achieved by:\n\n >>> g = ... # a DGLGraph\n >>> g = dgl.add_self_loop(g)\n\n Calling ``add_self_loop`` will not work for some graphs, for example, heterogeneous graph\n since the edge type can not be decided for self_loop edges. Set ``allow_zero_in_degree``\n to ``True`` for those cases to unblock the code and handle zere-in-degree nodes manually.\n A common practise to handle this is to filter out the nodes with zere-in-degree when use\n after conv.\n\n Example\n -------\n >>> import dgl\n >>> import numpy as np\n >>> import torch as th\n >>> from dgl.nn import AGNNConv\n >>>\n >>> g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3]))\n >>> g = dgl.add_self_loop(g)\n >>> feat = th.ones(6, 10)\n >>> conv = AGNNConv()\n >>> res = conv(g, feat)\n >>> res\n tensor([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]],\n grad_fn=<BinaryReduceBackward>)\n \"\"\"\n def __init__(self,\n init_beta=1.,\n learn_beta=True,\n allow_zero_in_degree=False):\n super(AGNNConv, self).__init__()\n self._allow_zero_in_degree = allow_zero_in_degree\n if learn_beta:\n self.beta = nn.Parameter(th.Tensor([init_beta]))\n else:\n self.register_buffer('beta', th.Tensor([init_beta]))\n\n def set_allow_zero_in_degree(self, set_value):\n r\"\"\"\n\n Description\n -----------\n Set allow_zero_in_degree flag.\n\n Parameters\n ----------\n set_value : bool\n The value to be set to the flag.\n \"\"\"\n self._allow_zero_in_degree = set_value\n\n def forward(self, graph, feat):\n r\"\"\"\n\n Description\n -----------\n Compute AGNN layer.\n\n Parameters\n ----------\n graph : DGLGraph\n The graph.\n feat : torch.Tensor\n The input feature of shape :math:`(N, *)` :math:`N` is the\n number of nodes, and :math:`*` could be of any shape.\n If a pair of torch.Tensor is given, the pair must contain two tensors of shape\n :math:`(N_{in}, *)` and :math:`(N_{out}, *)`, the :math:`*` in the later\n tensor must equal the previous one.\n\n Returns\n -------\n torch.Tensor\n The output feature of shape :math:`(N, *)` where :math:`*`\n should be the same as input shape.\n\n Raises\n ------\n DGLError\n If there are 0-in-degree nodes in the input graph, it will raise DGLError\n since no message will be passed to those nodes. This will cause invalid output.\n The error can be ignored by setting ``allow_zero_in_degree`` parameter to ``True``.\n \"\"\"\n with graph.local_scope():\n if not self._allow_zero_in_degree:\n if (graph.in_degrees() == 0).any():\n raise DGLError('There are 0-in-degree nodes in the graph, '\n 'output for those nodes will be invalid. '\n 'This is harmful for some applications, '\n 'causing silent performance regression. '\n 'Adding self-loop on the input graph by '\n 'calling `g = dgl.add_self_loop(g)` will resolve '\n 'the issue. Setting ``allow_zero_in_degree`` '\n 'to be `True` when constructing this module will '\n 'suppress the check and let the code run.')\n\n feat_src, feat_dst = expand_as_pair(feat, graph)\n\n graph.srcdata['h'] = feat_src\n graph.srcdata['norm_h'] = F.normalize(feat_src, p=2, dim=-1)\n if isinstance(feat, tuple) or graph.is_block:\n graph.dstdata['norm_h'] = F.normalize(feat_dst, p=2, dim=-1)\n # compute cosine distance\n graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos'))\n cos = graph.edata.pop('cos')\n e = self.beta * cos\n graph.edata['p'] = edge_softmax(graph, e)\n graph.update_all(fn.u_mul_e('h', 'p', 'm'), fn.sum('m', 'h'))\n return graph.dstdata.pop('h')\n"
] | [
[
"numpy.unique",
"scipy.sparse.eye",
"numpy.cumsum",
"scipy.sparse.linalg.eigs",
"numpy.repeat"
],
[
"torch.nn.functional.normalize",
"torch.Tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
scikit-learn-contrib/categorical_encoding | [
"6a13c14919d56fed8177a173d4b3b82c5ea2fef5",
"6a13c14919d56fed8177a173d4b3b82c5ea2fef5"
] | [
"category_encoders/hashing.py",
"category_encoders/ordinal.py"
] | [
"\"\"\"The hashing module contains all methods and classes related to the hashing trick.\"\"\"\n\nimport sys\nimport hashlib\nimport category_encoders.utils as util\nimport multiprocessing\nimport pandas as pd\nimport math\nimport platform\n\n__author__ = 'willmcginnis', 'LiuShulun'\n\n\nclass HashingEncoder(util.BaseEncoder, util.UnsupervisedTransformerMixin):\n\n \"\"\" A multivariate hashing implementation with configurable dimensionality/precision.\n\n The advantage of this encoder is that it does not maintain a dictionary of observed categories.\n Consequently, the encoder does not grow in size and accepts new values during data scoring\n by design.\n\n It's important to read about how max_process & max_sample work\n before setting them manually, inappropriate setting slows down encoding.\n\n Default value of 'max_process' is 1 on Windows because multiprocessing might cause issues, see in :\n https://github.com/scikit-learn-contrib/categorical-encoding/issues/215\n https://docs.python.org/2/library/multiprocessing.html?highlight=process#windows\n\n Parameters\n ----------\n\n verbose: int\n integer indicating verbosity of the output. 0 for none.\n cols: list\n a list of columns to encode, if None, all string columns will be encoded.\n drop_invariant: bool\n boolean for whether or not to drop columns with 0 variance.\n return_df: bool\n boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array).\n hash_method: str\n which hashing method to use. Any method from hashlib works.\n max_process: int\n how many processes to use in transform(). Limited in range(1, 64).\n By default, it uses half of the logical CPUs.\n For example, 4C4T makes max_process=2, 4C8T makes max_process=4.\n Set it larger if you have a strong CPU.\n It is not recommended to set it larger than is the count of the\n logical CPUs as it will actually slow down the encoding.\n max_sample: int\n how many samples to encode by each process at a time.\n This setting is useful on low memory machines.\n By default, max_sample=(all samples num)/(max_process).\n For example, 4C8T CPU with 100,000 samples makes max_sample=25,000,\n 6C12T CPU with 100,000 samples makes max_sample=16,666.\n It is not recommended to set it larger than the default value.\n n_components: int\n how many bits to use to represent the feature. By default we use 8 bits.\n For high-cardinality features, consider using up-to 32 bits.\n\n Example\n -------\n >>> from category_encoders.hashing import HashingEncoder\n >>> import pandas as pd\n >>> from sklearn.datasets import load_boston\n >>> bunch = load_boston()\n >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)\n >>> y = bunch.target\n >>> he = HashingEncoder(cols=['CHAS', 'RAD']).fit(X, y)\n >>> data = he.transform(X)\n >>> print(data.info())\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 506 entries, 0 to 505\n Data columns (total 19 columns):\n col_0 506 non-null int64\n col_1 506 non-null int64\n col_2 506 non-null int64\n col_3 506 non-null int64\n col_4 506 non-null int64\n col_5 506 non-null int64\n col_6 506 non-null int64\n col_7 506 non-null int64\n CRIM 506 non-null float64\n ZN 506 non-null float64\n INDUS 506 non-null float64\n NOX 506 non-null float64\n RM 506 non-null float64\n AGE 506 non-null float64\n DIS 506 non-null float64\n TAX 506 non-null float64\n PTRATIO 506 non-null float64\n B 506 non-null float64\n LSTAT 506 non-null float64\n dtypes: float64(11), int64(8)\n memory usage: 75.2 KB\n None\n\n References\n ----------\n .. [1] Feature Hashing for Large Scale Multitask Learning, from\n https://alex.smola.org/papers/2009/Weinbergeretal09.pdf\n .. [2] Don't be tricked by the Hashing Trick, from\n https://booking.ai/dont-be-tricked-by-the-hashing-trick-192a6aae3087\n\n \"\"\"\n prefit_ordinal = False\n encoding_relation = util.EncodingRelation.ONE_TO_M\n\n def __init__(self, max_process=0, max_sample=0, verbose=0, n_components=8, cols=None, drop_invariant=False,\n return_df=True, hash_method='md5'):\n super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df,\n handle_unknown=\"does not apply\", handle_missing=\"does not apply\")\n\n if max_process not in range(1, 128):\n if platform.system == 'Windows':\n self.max_process = 1\n else:\n self.max_process = int(math.ceil(multiprocessing.cpu_count() / 2))\n if self.max_process < 1:\n self.max_process = 1\n elif self.max_process > 128:\n self.max_process = 128\n else:\n self.max_process = max_process\n self.max_sample = int(max_sample)\n self.auto_sample = max_sample <= 0\n self.data_lines = 0\n self.X = None\n\n self.n_components = n_components\n self.hash_method = hash_method\n\n def _fit(self, X, y=None, **kwargs):\n pass\n\n @staticmethod\n def require_data(self, data_lock, new_start, done_index, hashing_parts, cols, process_index):\n if data_lock.acquire():\n if new_start.value:\n end_index = 0\n new_start.value = False\n else:\n end_index = done_index.value\n\n if all([self.data_lines > 0, end_index < self.data_lines]):\n start_index = end_index\n if (self.data_lines - end_index) <= self.max_sample:\n end_index = self.data_lines\n else:\n end_index += self.max_sample\n done_index.value = end_index\n data_lock.release()\n\n data_part = self.X.iloc[start_index: end_index]\n # Always get df and check it after merge all data parts\n data_part = self.hashing_trick(X_in=data_part, hashing_method=self.hash_method, N=self.n_components, cols=self.cols)\n part_index = int(math.ceil(end_index / self.max_sample))\n hashing_parts.put({part_index: data_part})\n if self.verbose == 5:\n print(\"Process - \" + str(process_index),\n \"done hashing data : \" + str(start_index) + \"~\" + str(end_index))\n if end_index < self.data_lines:\n self.require_data(self, data_lock, new_start, done_index, hashing_parts, cols=cols, process_index=process_index)\n else:\n data_lock.release()\n else:\n data_lock.release()\n\n def _transform(self, X):\n \"\"\"\n Call _transform_single_cpu() if you want to use single CPU with all samples\n \"\"\"\n self.X = X\n\n self.data_lines = len(self.X)\n\n data_lock = multiprocessing.Manager().Lock()\n new_start = multiprocessing.Manager().Value('d', True)\n done_index = multiprocessing.Manager().Value('d', int(0))\n hashing_parts = multiprocessing.Manager().Queue()\n\n if self.auto_sample:\n self.max_sample = int(self.data_lines / self.max_process)\n\n if self.max_sample == 0:\n self.max_sample = 1\n if self.max_process == 1:\n self.require_data(self, data_lock, new_start, done_index, hashing_parts, cols=self.cols, process_index=1)\n else:\n n_process = []\n for thread_index in range(self.max_process):\n process = multiprocessing.Process(target=self.require_data,\n args=(self, data_lock, new_start, done_index, hashing_parts, self.cols, thread_index + 1))\n process.daemon = True\n n_process.append(process)\n for process in n_process:\n process.start()\n for process in n_process:\n process.join()\n data = self.X\n if self.max_sample == 0 or self.max_sample == self.data_lines:\n if hashing_parts:\n data = list(hashing_parts.get().values())[0]\n else:\n list_data = {}\n while not hashing_parts.empty():\n list_data.update(hashing_parts.get())\n sort_data = []\n for part_index in sorted(list_data):\n sort_data.append(list_data[part_index])\n if sort_data:\n data = pd.concat(sort_data)\n return data\n\n def _transform_single_cpu(self, X, override_return_df=False):\n \"\"\"Perform the transformation to new categorical data.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n\n Returns\n -------\n\n p : array, shape = [n_samples, n_numeric + N]\n Transformed values with encoding applied.\n\n \"\"\"\n\n if self._dim is None:\n raise ValueError('Must train encoder before it can be used to transform data.')\n\n # first check the type\n X = util.convert_input(X)\n\n # then make sure that it is the right size\n if X.shape[1] != self._dim:\n raise ValueError(f'Unexpected input dimension {X.shape[1]}, expected {self._dim}')\n\n if not list(self.cols):\n return X\n\n X = self.hashing_trick(X, hashing_method=self.hash_method, N=self.n_components, cols=self.cols)\n\n if self.drop_invariant:\n X = X.drop(columns=self.invariant_cols)\n\n if self.return_df or override_return_df:\n return X\n else:\n return X.values\n\n @staticmethod\n def hashing_trick(X_in, hashing_method='md5', N=2, cols=None, make_copy=False):\n \"\"\"A basic hashing implementation with configurable dimensionality/precision\n\n Performs the hashing trick on a pandas dataframe, `X`, using the hashing method from hashlib\n identified by `hashing_method`. The number of output dimensions (`N`), and columns to hash (`cols`) are\n also configurable.\n\n Parameters\n ----------\n\n X_in: pandas dataframe\n description text\n hashing_method: string, optional\n description text\n N: int, optional\n description text\n cols: list, optional\n description text\n make_copy: bool, optional\n description text\n\n Returns\n -------\n\n out : dataframe\n A hashing encoded dataframe.\n\n References\n ----------\n Cite the relevant literature, e.g. [1]_. You may also cite these\n references in the notes section above.\n .. [1] Kilian Weinberger; Anirban Dasgupta; John Langford; Alex Smola; Josh Attenberg (2009). Feature Hashing\n for Large Scale Multitask Learning. Proc. ICML.\n\n \"\"\"\n if hashing_method not in hashlib.algorithms_available:\n raise ValueError(f\"Hashing Method: {hashing_method} not Available. \"\n f\"Please use one from: [{', '.join([str(x) for x in hashlib.algorithms_available])}]\")\n\n if make_copy:\n X = X_in.copy(deep=True)\n else:\n X = X_in\n\n if cols is None:\n cols = X.columns.values\n\n def hash_fn(x):\n tmp = [0 for _ in range(N)]\n for val in x.values:\n if val is not None:\n hasher = hashlib.new(hashing_method)\n if sys.version_info[0] == 2:\n hasher.update(str(val))\n else:\n hasher.update(bytes(str(val), 'utf-8'))\n tmp[int(hasher.hexdigest(), 16) % N] += 1\n return pd.Series(tmp, index=new_cols)\n\n new_cols = [f'col_{d}' for d in range(N)]\n\n X_cat = X.loc[:, cols]\n X_num = X.loc[:, [x for x in X.columns.values if x not in cols]]\n\n X_cat = X_cat.apply(hash_fn, axis=1)\n X_cat.columns = new_cols\n\n X = pd.concat([X_cat, X_num], axis=1)\n\n return X\n",
"\"\"\"Ordinal or label encoding\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport category_encoders.utils as util\nimport warnings\n\n__author__ = 'willmcginnis'\n\n\nclass OrdinalEncoder(util.BaseEncoder, util.UnsupervisedTransformerMixin):\n \"\"\"Encodes categorical features as ordinal, in one ordered feature.\n\n Ordinal encoding uses a single column of integers to represent the classes. An optional mapping dict can be passed\n in; in this case, we use the knowledge that there is some true order to the classes themselves. Otherwise, the classes\n are assumed to have no true order and integers are selected at random.\n\n Parameters\n ----------\n\n verbose: int\n integer indicating verbosity of the output. 0 for none.\n cols: list\n a list of columns to encode, if None, all string columns will be encoded.\n drop_invariant: bool\n boolean for whether or not to drop columns with 0 variance.\n return_df: bool\n boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array).\n mapping: list of dicts\n a mapping of class to label to use for the encoding, optional.\n the dict contains the keys 'col' and 'mapping'.\n the value of 'col' should be the feature name.\n the value of 'mapping' should be a dictionary of 'original_label' to 'encoded_label'.\n example mapping: [\n {'col': 'col1', 'mapping': {None: 0, 'a': 1, 'b': 2}},\n {'col': 'col2', 'mapping': {None: 0, 'x': 1, 'y': 2}}\n ]\n handle_unknown: str\n options are 'error', 'return_nan' and 'value', defaults to 'value', which will impute the category -1.\n handle_missing: str\n options are 'error', 'return_nan', and 'value, default to 'value', which treat nan as a category at fit time,\n or -2 at transform time if nan is not a category during fit.\n\n Example\n -------\n >>> from category_encoders import *\n >>> import pandas as pd\n >>> from sklearn.datasets import load_boston\n >>> bunch = load_boston()\n >>> y = bunch.target\n >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)\n >>> enc = OrdinalEncoder(cols=['CHAS', 'RAD']).fit(X, y)\n >>> numeric_dataset = enc.transform(X)\n >>> print(numeric_dataset.info())\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 506 entries, 0 to 505\n Data columns (total 13 columns):\n CRIM 506 non-null float64\n ZN 506 non-null float64\n INDUS 506 non-null float64\n CHAS 506 non-null int64\n NOX 506 non-null float64\n RM 506 non-null float64\n AGE 506 non-null float64\n DIS 506 non-null float64\n RAD 506 non-null int64\n TAX 506 non-null float64\n PTRATIO 506 non-null float64\n B 506 non-null float64\n LSTAT 506 non-null float64\n dtypes: float64(11), int64(2)\n memory usage: 51.5 KB\n None\n\n References\n ----------\n\n .. [1] Contrast Coding Systems for Categorical Variables, from\n https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/\n\n .. [2] Gregory Carey (2003). Coding Categorical Variables, from\n http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf\n\n \"\"\"\n prefit_ordinal = False\n encoding_relation = util.EncodingRelation.ONE_TO_ONE\n\n def __init__(self, verbose=0, mapping=None, cols=None, drop_invariant=False, return_df=True,\n handle_unknown='value', handle_missing='value'):\n super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df,\n handle_unknown=handle_unknown, handle_missing=handle_missing)\n self.mapping_supplied = mapping is not None\n self.mapping = mapping\n\n @property\n def category_mapping(self):\n return self.mapping\n\n def _fit(self, X, y=None, **kwargs):\n # reset mapping in case of refit\n if not self.mapping_supplied:\n self.mapping = None\n _, categories = self.ordinal_encoding(\n X,\n mapping=self.mapping,\n cols=self.cols,\n handle_unknown=self.handle_unknown,\n handle_missing=self.handle_missing\n )\n self.mapping = categories\n\n def _transform(self, X):\n\n X, _ = self.ordinal_encoding(\n X,\n mapping=self.mapping,\n cols=self.cols,\n handle_unknown=self.handle_unknown,\n handle_missing=self.handle_missing\n )\n return X\n\n def inverse_transform(self, X_in):\n \"\"\"\n Perform the inverse transformation to encoded data. Will attempt best case reconstruction, which means\n it will return nan for handle_missing and handle_unknown settings that break the bijection. We issue\n warnings when some of those cases occur.\n\n Parameters\n ----------\n X_in : array-like, shape = [n_samples, n_features]\n\n Returns\n -------\n p: array, the same size of X_in\n\n \"\"\"\n\n # fail fast\n if self._dim is None:\n raise ValueError('Must train encoder before it can be used to inverse_transform data')\n\n # first check the type and make deep copy\n X = util.convert_input(X_in, deep=True)\n\n # then make sure that it is the right size\n if X.shape[1] != self._dim:\n if self.drop_invariant:\n raise ValueError(f\"Unexpected input dimension {X.shape[1]}, the attribute drop_invariant should \"\n \"be False when transforming the data\")\n else:\n raise ValueError(f'Unexpected input dimension {X.shape[1]}, expected {self._dim}')\n\n if not list(self.cols):\n return X if self.return_df else X.values\n\n if self.handle_unknown == 'value':\n for col in self.cols:\n if any(X[col] == -1):\n warnings.warn(\"inverse_transform is not supported because transform impute \"\n f\"the unknown category -1 when encode {col}\")\n\n if self.handle_unknown == 'return_nan' and self.handle_missing == 'return_nan':\n for col in self.cols:\n if X[col].isnull().any():\n warnings.warn(\"inverse_transform is not supported because transform impute \"\n f\"the unknown category nan when encode {col}\")\n\n for switch in self.mapping:\n column_mapping = switch.get('mapping')\n inverse = pd.Series(data=column_mapping.index, index=column_mapping.values)\n X[switch.get('col')] = X[switch.get('col')].map(inverse).astype(switch.get('data_type'))\n\n return X if self.return_df else X.values\n\n @staticmethod\n def ordinal_encoding(X_in, mapping=None, cols=None, handle_unknown='value', handle_missing='value'):\n \"\"\"\n Ordinal encoding uses a single column of integers to represent the classes. An optional mapping dict can be passed\n in, in this case we use the knowledge that there is some true order to the classes themselves. Otherwise, the classes\n are assumed to have no true order and integers are selected at random.\n \"\"\"\n\n return_nan_series = pd.Series(data=[np.nan], index=[-2])\n\n X = X_in.copy(deep=True)\n\n if cols is None:\n cols = X.columns.values\n\n if mapping is not None:\n mapping_out = mapping\n for switch in mapping:\n column = switch.get('col')\n col_mapping = switch['mapping']\n\n # Treat None as np.nan\n X[column] = pd.Series([el if el is not None else np.NaN for el in X[column]], index=X[column].index)\n X[column] = X[column].map(col_mapping)\n if util.is_category(X[column].dtype):\n nan_identity = col_mapping.loc[col_mapping.index.isna()].values[0]\n X[column] = X[column].cat.add_categories(nan_identity)\n X[column] = X[column].fillna(nan_identity)\n try:\n X[column] = X[column].astype(int)\n except ValueError as e:\n X[column] = X[column].astype(float)\n\n if handle_unknown == 'value':\n X[column].fillna(-1, inplace=True)\n elif handle_unknown == 'error':\n missing = X[column].isnull()\n if any(missing):\n raise ValueError(f'Unexpected categories found in column {column}')\n\n if handle_missing == 'return_nan':\n X[column] = X[column].map(return_nan_series).where(X[column] == -2, X[column])\n\n else:\n mapping_out = []\n for col in cols:\n\n nan_identity = np.nan\n \n categories = list(X[col].unique())\n if util.is_category(X[col].dtype):\n # Avoid using pandas category dtype meta-data if possible, see #235, #238.\n if X[col].dtype.ordered:\n categories = [c for c in X[col].dtype.categories if c in categories]\n if X[col].isna().any():\n categories += [np.nan]\n\n index = pd.Series(categories).fillna(nan_identity).unique()\n\n data = pd.Series(index=index, data=range(1, len(index) + 1))\n\n if handle_missing == 'value' and ~data.index.isnull().any():\n data.loc[nan_identity] = -2\n elif handle_missing == 'return_nan':\n data.loc[nan_identity] = -2\n\n mapping_out.append({'col': col, 'mapping': data, 'data_type': X[col].dtype}, )\n\n return X, mapping_out\n"
] | [
[
"pandas.concat",
"pandas.Series"
],
[
"pandas.Series"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
AsifHasanChowdhury/Airtificial-Intelligence-CSE422-BRACU- | [
"03acedf4694111eddde3c1ccce9d009571a7f546"
] | [
"L-A-1/Task1.py"
] | [
"import numpy as dp\r\nimport queue\r\nq=queue.Queue()\r\nvertex=0\r\nedge=0\r\nline=0\r\ncount=0\r\n\r\n#with open('F:\\\\CSE422\\\\bfs.txt') as file:\r\n# for line in file:\r\n# print(line.rstrip())\r\n# vertex=line\r\n# Edge=line\r\n# break\r\n \r\narr_zeros=dp.zeros((vertex,vertex),dtype=int)\r\n\r\nf=open (\"F:\\\\CSE422\\\\bfs.txt\",\"r\")\r\nfor i in f:\r\n line_space_remove=i.strip()\r\n split_line=line_space_remove.split()\r\n #print(\"hello\",i)\r\n if(count==0):\r\n vertex=int(split_line[0])\r\n count=count+1\r\n #print(split_line[0])\r\n elif(count==1):\r\n edge=int(split_line[0])\r\n arr_zeros = dp.zeros((vertex,vertex),dtype=int)\r\n count=count+1\r\n #print(split_line[0])\r\n elif(count>1):\r\n #print(\"Asche\")\r\n bi_directional_edge_1=int(split_line[0])\r\n bi_directional_edge_2=int(split_line[1])\r\n \r\n arr_zeros[bi_directional_edge_1,bi_directional_edge_2]=int(1)\r\n arr_zeros[bi_directional_edge_2,bi_directional_edge_1]=int(1)\r\n #print(split_line[1])\r\n #lst.append(split_line)\r\n \r\nprint(arr_zeros)\r\ndiscover=dp.zeros((1,vertex),dtype=int)\r\nvisited=dp.zeros((1,vertex),dtype=int)\r\nq.put(0)\r\nwhile not q.empty():\r\n u=q.get()\r\n v=0\r\n while(v<vertex):\r\n if(arr_zeros[u,v]==1 and visited[0,v]==0):\r\n q.put(int(v))\r\n visited[0,v]=1\r\n discover[0,v]=discover[0,u]+1\r\n v=v+1\r\n\r\n\r\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Wollala/Gradient-Free-Optimizers | [
"8fb1608c264431b87f66fd2d233b76a0fa75316c",
"8fb1608c264431b87f66fd2d233b76a0fa75316c"
] | [
"gradient_free_optimizers/optimizers/exp_opt/ensemble_optimizer.py",
"gradient_free_optimizers/optimizers/smb_opt/exp_imp_based_opt.py"
] | [
"# Author: Simon Blanke\n# Email: [email protected]\n# License: MIT License\n\n\nfrom ..smb_opt.exp_imp_based_opt import ExpectedImprovementBasedOptimization\nfrom ..smb_opt.surrogate_models import EnsembleRegressor\n\n\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.neural_network import MLPRegressor\n\n\nclass EnsembleOptimizer(ExpectedImprovementBasedOptimization):\n name = \"Ensemble Optimizer\"\n\n def __init__(\n self,\n *args,\n estimators=[\n GradientBoostingRegressor(n_estimators=5),\n # DecisionTreeRegressor(),\n # MLPRegressor(),\n GaussianProcessRegressor(),\n ],\n xi=0.01,\n warm_start_smbo=None,\n max_sample_size=10000000,\n sampling={\"random\": 1000000},\n warnings=100000000,\n **kwargs\n ):\n super().__init__(*args, **kwargs)\n self.estimators = estimators\n self.regr = EnsembleRegressor(estimators)\n self.xi = xi\n self.warm_start_smbo = warm_start_smbo\n self.max_sample_size = max_sample_size\n self.sampling = sampling\n self.warnings = warnings\n\n self.init_warm_start_smbo()\n",
"# Author: Simon Blanke\n# Email: [email protected]\n# License: MIT License\n\n\nimport numpy as np\nfrom scipy.stats import norm\n\nfrom .smbo import SMBO\n\n\ndef normalize(array):\n num = array - array.min()\n den = array.max() - array.min()\n\n if den == 0:\n return np.random.random_sample(array.shape)\n else:\n return ((num / den) + 0) / 1\n\n\nclass ExpectedImprovementBasedOptimization(SMBO):\n def __init__(\n self,\n *args,\n xi=0.01,\n warm_start_smbo=None,\n sampling={\"random\": 1000000},\n warnings=100000000,\n **kwargs\n ):\n super().__init__(*args, **kwargs)\n self.new_positions = []\n self.xi = xi\n self.warm_start_smbo = warm_start_smbo\n self.sampling = sampling\n self.warnings = warnings\n\n def _expected_improvement(self):\n all_pos_comb = self._all_possible_pos()\n self.pos_comb = self._sampling(all_pos_comb)\n\n mu, sigma = self.regr.predict(self.pos_comb, return_std=True)\n # mu_sample = self.regr.predict(self.X_sample)\n mu = mu.reshape(-1, 1)\n sigma = sigma.reshape(-1, 1)\n\n Y_sample = normalize(np.array(self.Y_sample)).reshape(-1, 1)\n imp = mu - np.max(Y_sample) - self.xi\n Z = np.divide(imp, sigma, out=np.zeros_like(sigma), where=sigma != 0)\n\n exploit = imp * norm.cdf(Z)\n explore = sigma * norm.pdf(Z)\n\n exp_imp = exploit + explore\n exp_imp[sigma == 0.0] = 0.0\n\n return exp_imp[:, 0]\n\n def _propose_location(self):\n X_sample = np.array(self.X_sample)\n Y_sample = np.array(self.Y_sample)\n\n if len(Y_sample) == 0:\n return self.move_random()\n\n Y_sample = normalize(Y_sample).reshape(-1, 1)\n self.regr.fit(X_sample, Y_sample)\n\n exp_imp = self._expected_improvement()\n\n index_best = list(exp_imp.argsort()[::-1])\n all_pos_comb_sorted = self.pos_comb[index_best]\n pos_best = all_pos_comb_sorted[0]\n\n return pos_best\n\n @SMBO.track_nth_iter\n @SMBO.track_X_sample\n def iterate(self):\n return self._propose_location()\n\n @SMBO.track_y_sample\n def evaluate(self, score_new):\n self.score_new = score_new\n\n self._evaluate_new2current(score_new)\n self._evaluate_current2best()\n"
] | [
[
"sklearn.gaussian_process.GaussianProcessRegressor",
"sklearn.ensemble.GradientBoostingRegressor"
],
[
"scipy.stats.norm.cdf",
"scipy.stats.norm.pdf",
"numpy.random.random_sample",
"numpy.max",
"numpy.zeros_like",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hqucms/dgl | [
"bf8bb58f60863466e5254bfa6ee2ad15f2384acb"
] | [
"examples/pytorch/sgc/sgc.py"
] | [
"\"\"\"\nThis code was modified from the GCN implementation in DGL examples.\nSimplifying Graph Convolutional Networks\nPaper: https://arxiv.org/abs/1902.07153\nCode: https://github.com/Tiiiger/SGC\nSGC implementation in DGL.\n\"\"\"\nimport argparse, time, math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport dgl.function as fn\nfrom dgl import DGLGraph\nfrom dgl.data import register_data_args, load_data\nfrom dgl.nn.pytorch.conv import SGConv\n\n\ndef evaluate(model, g, features, labels, mask):\n model.eval()\n with torch.no_grad():\n logits = model(g, features)[mask] # only compute the evaluation set\n labels = labels[mask]\n _, indices = torch.max(logits, dim=1)\n correct = torch.sum(indices == labels)\n return correct.item() * 1.0 / len(labels)\n\ndef main(args):\n # load and preprocess dataset\n data = load_data(args)\n features = torch.FloatTensor(data.features)\n labels = torch.LongTensor(data.labels)\n train_mask = torch.ByteTensor(data.train_mask)\n val_mask = torch.ByteTensor(data.val_mask)\n test_mask = torch.ByteTensor(data.test_mask)\n in_feats = features.shape[1]\n n_classes = data.num_labels\n n_edges = data.graph.number_of_edges()\n print(\"\"\"----Data statistics------'\n #Edges %d\n #Classes %d\n #Train samples %d\n #Val samples %d\n #Test samples %d\"\"\" %\n (n_edges, n_classes,\n train_mask.sum().item(),\n val_mask.sum().item(),\n test_mask.sum().item()))\n\n if args.gpu < 0:\n cuda = False\n else:\n cuda = True\n torch.cuda.set_device(args.gpu)\n features = features.cuda()\n labels = labels.cuda()\n train_mask = train_mask.cuda()\n val_mask = val_mask.cuda()\n test_mask = test_mask.cuda()\n\n # graph preprocess and calculate normalization factor\n g = DGLGraph(data.graph)\n n_edges = g.number_of_edges()\n # add self loop\n g.add_edges(g.nodes(), g.nodes())\n\n # create SGC model\n model = SGConv(in_feats,\n n_classes,\n k=2,\n cached=True,\n bias=args.bias)\n\n if cuda: model.cuda()\n loss_fcn = torch.nn.CrossEntropyLoss()\n\n # use optimizer\n optimizer = torch.optim.Adam(model.parameters(),\n lr=args.lr,\n weight_decay=args.weight_decay)\n\n # initialize graph\n dur = []\n for epoch in range(args.n_epochs):\n model.train()\n if epoch >= 3:\n t0 = time.time()\n # forward\n logits = model(g, features) # only compute the train set\n loss = loss_fcn(logits[train_mask], labels[train_mask])\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if epoch >= 3:\n dur.append(time.time() - t0)\n\n acc = evaluate(model, g, features, labels, val_mask)\n print(\"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | \"\n \"ETputs(KTEPS) {:.2f}\". format(epoch, np.mean(dur), loss.item(),\n acc, n_edges / np.mean(dur) / 1000))\n\n print()\n acc = evaluate(model, g, features, labels, test_mask)\n print(\"Test Accuracy {:.4f}\".format(acc))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='SGC')\n register_data_args(parser)\n parser.add_argument(\"--gpu\", type=int, default=-1,\n help=\"gpu\")\n parser.add_argument(\"--lr\", type=float, default=0.2,\n help=\"learning rate\")\n parser.add_argument(\"--bias\", action='store_true', default=False,\n help=\"flag to use bias\")\n parser.add_argument(\"--n-epochs\", type=int, default=100,\n help=\"number of training epochs\")\n parser.add_argument(\"--weight-decay\", type=float, default=5e-6,\n help=\"Weight for L2 loss\")\n args = parser.parse_args()\n print(args)\n\n main(args)\n"
] | [
[
"torch.ByteTensor",
"torch.nn.CrossEntropyLoss",
"torch.LongTensor",
"torch.max",
"torch.cuda.set_device",
"torch.sum",
"torch.no_grad",
"torch.FloatTensor",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TJ-Machine-Learning-Group/LAB1-Regression | [
"86baa7123f711cdf4a39a1632223cdc5ae0e6d2b"
] | [
"EDA.py"
] | [
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.decomposition import PCA\ndata = pd.read_excel(r\"Concrete_Data.xls\")\nreq_col_names = [\"Cement\", \"BlastFurnaceSlag\", \"FlyAsh\", \"Water\", \"Superplasticizer\",\n \"CoarseAggregate\", \"FineAggregate\", \"Age\", \"CC_Strength\"]\ncurr_col_names = list(data.columns)\n\nmapper = {}\nfor i, name in enumerate(curr_col_names):\n mapper[name] = req_col_names[i]\n\ndata = data.rename(columns=mapper)\ndata.head()\ndata.isna().sum()\ndata.describe()\n# pairwise relations\n#sns.pairplot(newX)\n#plt.show()\n\n# Pearson Correlation coefficients heatmap\nX=data.values[:, :-1]\npca=PCA(n_components=X.shape[1]-2)\nnewX = pca.fit_transform(X)\ncorr = np.corrcoef(newX.T)\nplt.figure(figsize=(9,7))\nsns.heatmap(corr, annot=True, cmap='Oranges')\nb, t = plt.ylim()\nplt.ylim(b+0.5, t-0.5)\nplt.title(\"Feature Correlation Heatmap\")\nplt.show()\n\n\n# Observations\n\n# Observations from Strength vs (Cement, Age, Water)\nax = sns.distplot(data.CC_Strength)\nax.set_title(\"Compressive Strength Distribution\")\nfig, ax = plt.subplots(figsize=(10,7))\nsns.scatterplot(y=\"CC_Strength\", x=\"Cement\", hue=\"Water\", size=\"Age\", data=data, ax=ax, sizes=(50, 300))\nax.set_title(\"CC Strength vs (Cement, Age, Water)\")\nax.legend(loc=\"upper left\", bbox_to_anchor=(1,1))\nplt.show()\n\n\n# Observations from CC Strength vs (Fine aggregate, Super Plasticizer, FlyAsh)\nfig, ax = plt.subplots(figsize=(10,7))\nsns.scatterplot(y=\"CC_Strength\", x=\"FineAggregate\", hue=\"FlyAsh\", size=\"Superplasticizer\",\n data=data, ax=ax, sizes=(50, 300))\nax.set_title(\"CC Strength vs (Fine aggregate, Super Plasticizer, FlyAsh)\")\nax.legend(loc=\"upper left\", bbox_to_anchor=(1,1))\nplt.show()\n\n\n# Observations from CC Strength vs (Fine aggregate, Super Plasticizer, Water)\nfig, ax = plt.subplots(figsize=(10,7))\nsns.scatterplot(y=\"CC_Strength\", x=\"FineAggregate\", hue=\"Water\", size=\"Superplasticizer\",\n data=data, ax=ax, sizes=(50, 300))\nax.set_title(\"CC Strength vs (Fine aggregate, Super Plasticizer, Water)\")\nax.legend(loc=\"upper left\", bbox_to_anchor=(1,1))\nplt.show()\n\n\n#reference: https://github.com/pranaymodukuru/Concrete-compressive-strength/blob/master/ConcreteCompressiveStrengthPrediction.ipynb\n"
] | [
[
"pandas.read_excel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.subplots",
"numpy.corrcoef",
"matplotlib.pyplot.show",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
rakeshchada/gpt2-singular-plural | [
"80744e3049af8014ad0e32675665d6886ab6bfa8"
] | [
"generate_singular.py"
] | [
"#!/usr/bin/env python3\n# coding=utf-8\n# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/Transformer-XL/XLNet)\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport argparse\nimport logging\nfrom tqdm import trange\n\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom pytorch_transformers import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig\n\nfrom pytorch_transformers import GPT2LMHeadModel, GPT2Tokenizer\nfrom pytorch_transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer\nfrom pytorch_transformers import XLNetLMHeadModel, XLNetTokenizer\nfrom pytorch_transformers import TransfoXLLMHeadModel, TransfoXLTokenizer\n\n\nlogging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO)\nlogger = logging.getLogger(__name__)\n\nMAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop\n\nALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())\n\nMODEL_CLASSES = {\n 'gpt2': (GPT2LMHeadModel, GPT2Tokenizer),\n 'openai-gpt': (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),\n 'xlnet': (XLNetLMHeadModel, XLNetTokenizer),\n 'transfo-xl': (TransfoXLLMHeadModel, TransfoXLTokenizer),\n}\n\n# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia\n# in https://github.com/rusiaaman/XLNet-gen#methodology\n# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e\nPADDING_TEXT = \"\"\" In 1991, the remains of Russian Tsar Nicholas II and his family\n(except for Alexei and Maria) are discovered.\nThe voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the\nremainder of the story. 1883 Western Siberia,\na young Grigori Rasputin is asked by his father and a group of men to perform magic.\nRasputin has a vision and denounces one of the men as a horse thief. Although his\nfather initially slaps him for making such an accusation, Rasputin watches as the\nman is chased outside and beaten. Twenty years later, Rasputin sees a vision of\nthe Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,\nwith people, even a bishop, begging for his blessing. <eod> </s> <eos>\"\"\"\n\n\ndef set_seed(args):\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):\n \"\"\" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering\n Args:\n logits: logits distribution shape (vocabulary size)\n top_k > 0: keep only top k tokens with highest probability (top-k filtering).\n top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).\n Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)\n From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317\n \"\"\"\n assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear\n top_k = min(top_k, logits.size(-1)) # Safety check\n if top_k > 0:\n # Remove all tokens with a probability less than the last token of the top-k\n indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]\n logits[indices_to_remove] = filter_value\n\n if top_p > 0.0:\n sorted_logits, sorted_indices = torch.sort(logits, descending=True)\n cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n\n # Remove tokens with cumulative probability above the threshold\n sorted_indices_to_remove = cumulative_probs > top_p\n # Shift the indices to the right to keep also the first token above the threshold\n sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\n sorted_indices_to_remove[..., 0] = 0\n\n indices_to_remove = sorted_indices[sorted_indices_to_remove]\n logits[indices_to_remove] = filter_value\n return logits\n\n\ndef sample_sequence(model, length, context, num_samples=1, temperature=1, top_k=0, top_p=0.0, is_xlnet=False, device='cpu'):\n context = torch.tensor(context, dtype=torch.long, device=device)\n context = context.unsqueeze(0).repeat(num_samples, 1)\n generated = context\n with torch.no_grad():\n for _ in trange(length):\n\n inputs = {'input_ids': generated}\n if is_xlnet: \n # XLNet is a direct (predict same token, not next token) and bi-directional model by default\n # => need one additional dummy token in the input (will be masked), attention mask and target mapping (see model docstring)\n input_ids = torch.cat((generated, torch.zeros((1, 1), dtype=torch.long, device=device)), dim=1)\n perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float, device=device)\n perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token\n target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float, device=device)\n target_mapping[0, 0, -1] = 1.0 # predict last token\n inputs = {'input_ids': input_ids, 'perm_mask': perm_mask, 'target_mapping': target_mapping}\n\n outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)\n next_token_logits = outputs[0][0, -1, :] / temperature\n filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)\n next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)\n generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)\n return generated\n\ndef generate(args, model, tokenizer, raw_text):\n context_tokens = tokenizer.encode(raw_text)\n out = sample_sequence(\n model=model,\n context=context_tokens,\n length=args.length,\n temperature=args.temperature,\n top_k=args.top_k,\n top_p=args.top_p,\n device=args.device,\n is_xlnet=bool(args.model_type == \"xlnet\"),\n )\n out = out[0, len(context_tokens):].tolist()\n text = tokenizer.decode(out, clean_up_tokenization_spaces=True)\n return text\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model_type\", default=None, type=str, required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_CLASSES.keys()))\n parser.add_argument(\"--model_name_or_path\", default=None, type=str, required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list: \" + \", \".join(ALL_MODELS))\n parser.add_argument(\"--prompt\", type=str, default=\"\")\n parser.add_argument(\"--padding_text\", type=str, default=\"\")\n parser.add_argument(\"--length\", type=int, default=20)\n parser.add_argument(\"--temperature\", type=float, default=1.0)\n parser.add_argument(\"--top_k\", type=int, default=0)\n parser.add_argument(\"--top_p\", type=float, default=0.9)\n parser.add_argument(\"--no_cuda\", action='store_true',\n help=\"Avoid using CUDA when available\")\n parser.add_argument('--seed', type=int, default=42,\n help=\"random seed for initialization\")\n args = parser.parse_args()\n\n args.device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n\n set_seed(args)\n\n args.model_type = args.model_type.lower()\n model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)\n model = model_class.from_pretrained(args.model_name_or_path)\n model.to(args.device)\n model.eval()\n\n if args.length < 0 and model.config.max_position_embeddings > 0:\n args.length = model.config.max_position_embeddings\n elif 0 < model.config.max_position_embeddings < args.length:\n args.length = model.config.max_position_embeddings # No generation bigger than model size \n elif args.length < 0:\n args.length = MAX_LENGTH # avoid infinite loop\n\n logger.info(args)\n \n plurals = []\n \n with open('top_1000_plurals_gold.txt', 'r') as f:\n for line in f:\n if 'Noun' not in line:\n plurals.append(line.lower().replace('\\n','').split('\\t')[1].split(',')[0])\n \n singulars = []\n for plural in plurals:\n prompt = f\"mats : mat . analyses : analysis . advances : advance . criteria : criterion . actresses : actress . views : view . kinds : kind . arts : art . efforts : effort . lacks : lack . countries : country . women : woman . wives : wife . courses : course . {plural} : \"\n singular = generate(args, model, tokenizer, prompt)\n singular = singular.split(\".\")[0]\n singulars.append(singular)\n \n with open('singular_predictions.txt', 'w') as f:\n for plural, singular in zip(plurals, singulars):\n f.write(f\"Plural: {plural}, Singular: {singular}\\n\") \n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.nn.functional.softmax",
"numpy.random.seed",
"torch.zeros",
"torch.manual_seed",
"torch.tensor",
"torch.no_grad",
"torch.sort",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.topk",
"torch.cuda.device_count"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andreandradecosta/Adversarial_Autoencoder | [
"255a5cc021a46d9f8320a8608f15370d3e89e29e"
] | [
"semi_supervised_adversarial_autoencoder.py"
] | [
"import tensorflow as tf\nimport numpy as np\nimport datetime\nimport os\nimport argparse\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n# Get the MNIST data\nmnist = input_data.read_data_sets('./Data', one_hot=True)\n\n# Parameters\ninput_dim = 784\nn_l1 = 1000\nn_l2 = 1000\nz_dim = 10\nbatch_size = 100\nn_epochs = 1000\nlearning_rate = 0.001\nbeta1 = 0.9\nresults_path = './Results/Semi_Supervised'\nn_labels = 10\nn_labeled = 1000\n\n# Placeholders for input data and the targets\nx_input = tf.placeholder(dtype=tf.float32, shape=[batch_size, input_dim], name='Input')\nx_input_l = tf.placeholder(dtype=tf.float32, shape=[batch_size, input_dim], name='Labeled_Input')\ny_input = tf.placeholder(dtype=tf.float32, shape=[batch_size, n_labels], name='Labels')\nx_target = tf.placeholder(dtype=tf.float32, shape=[batch_size, input_dim], name='Target')\nreal_distribution = tf.placeholder(dtype=tf.float32, shape=[batch_size, z_dim], name='Real_distribution')\ncategorial_distribution = tf.placeholder(dtype=tf.float32, shape=[batch_size, n_labels],\n name='Categorical_distribution')\nmanual_decoder_input = tf.placeholder(dtype=tf.float32, shape=[1, z_dim + n_labels], name='Decoder_input')\n\n\ndef form_results():\n \"\"\"\n Forms folders for each run to store the tensorboard files, saved models and the log files.\n :return: three string pointing to tensorboard, saved models and log paths respectively.\n \"\"\"\n folder_name = \"/{0}_{1}_{2}_{3}_{4}_{5}_Semi_Supervised\". \\\n format(datetime.datetime.now(), z_dim, learning_rate, batch_size, n_epochs, beta1)\n tensorboard_path = results_path + folder_name + '/Tensorboard'\n saved_model_path = results_path + folder_name + '/Saved_models/'\n log_path = results_path + folder_name + '/log'\n if not os.path.exists(results_path + folder_name):\n os.mkdir(results_path + folder_name)\n os.mkdir(tensorboard_path)\n os.mkdir(saved_model_path)\n os.mkdir(log_path)\n return tensorboard_path, saved_model_path, log_path\n\n\ndef generate_image_grid(sess, op):\n \"\"\"\n Generates a grid of images by passing a set of numbers to the decoder and getting its output.\n :param sess: Tensorflow Session required to get the decoder output\n :param op: Operation that needs to be called inorder to get the decoder output\n :return: None, displays a matplotlib window with all the merged images.\n \"\"\"\n nx, ny = 10, 10\n random_inputs = np.random.randn(10, z_dim) * 5.\n sample_y = np.identity(10)\n plt.subplot()\n gs = gridspec.GridSpec(nx, ny, hspace=0.05, wspace=0.05)\n i = 0\n for r in random_inputs:\n for t in sample_y:\n r, t = np.reshape(r, (1, z_dim)), np.reshape(t, (1, n_labels))\n dec_input = np.concatenate((t, r), 1)\n x = sess.run(op, feed_dict={manual_decoder_input: dec_input})\n ax = plt.subplot(gs[i])\n i += 1\n img = np.array(x.tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_aspect('auto')\n plt.show()\n\n\ndef dense(x, n1, n2, name):\n \"\"\"\n Used to create a dense layer.\n :param x: input tensor to the dense layer\n :param n1: no. of input neurons\n :param n2: no. of output neurons\n :param name: name of the entire dense layer.i.e, variable scope name.\n :return: tensor with shape [batch_size, n2]\n \"\"\"\n with tf.variable_scope(name, reuse=None):\n weights = tf.get_variable(\"weights\", shape=[n1, n2],\n initializer=tf.random_normal_initializer(mean=0., stddev=0.01))\n bias = tf.get_variable(\"bias\", shape=[n2], initializer=tf.constant_initializer(0.0))\n out = tf.add(tf.matmul(x, weights), bias, name='matmul')\n return out\n\n\n# The autoencoder network\ndef encoder(x, reuse=False, supervised=False):\n \"\"\"\n Encode part of the autoencoder.\n :param x: input to the autoencoder\n :param reuse: True -> Reuse the encoder variables, False -> Create or search of variables before creating\n :param supervised: True -> returns output without passing it through softmax,\n False -> returns output after passing it through softmax.\n :return: tensor which is the classification output and a hidden latent variable of the autoencoder.\n \"\"\"\n if reuse:\n tf.get_variable_scope().reuse_variables()\n with tf.name_scope('Encoder'):\n e_dense_1 = tf.nn.relu(dense(x, input_dim, n_l1, 'e_dense_1'))\n e_dense_2 = tf.nn.relu(dense(e_dense_1, n_l1, n_l2, 'e_dense_2'))\n latent_variable = dense(e_dense_2, n_l2, z_dim, 'e_latent_variable')\n cat_op = dense(e_dense_2, n_l2, n_labels, 'e_label')\n if not supervised:\n softmax_label = tf.nn.softmax(logits=cat_op, name='e_softmax_label')\n else:\n softmax_label = cat_op\n return softmax_label, latent_variable\n\n\ndef decoder(x, reuse=False):\n \"\"\"\n Decoder part of the autoencoder.\n :param x: input to the decoder\n :param reuse: True -> Reuse the decoder variables, False -> Create or search of variables before creating\n :return: tensor which should ideally be the input given to the encoder.\n \"\"\"\n if reuse:\n tf.get_variable_scope().reuse_variables()\n with tf.name_scope('Decoder'):\n d_dense_1 = tf.nn.relu(dense(x, z_dim + n_labels, n_l2, 'd_dense_1'))\n d_dense_2 = tf.nn.relu(dense(d_dense_1, n_l2, n_l1, 'd_dense_2'))\n output = tf.nn.sigmoid(dense(d_dense_2, n_l1, input_dim, 'd_output'))\n return output\n\n\ndef discriminator_gauss(x, reuse=False):\n \"\"\"\n Discriminator that is used to match the posterior distribution with a given gaussian distribution.\n :param x: tensor of shape [batch_size, z_dim]\n :param reuse: True -> Reuse the discriminator variables,\n False -> Create or search of variables before creating\n :return: tensor of shape [batch_size, 1]\n \"\"\"\n if reuse:\n tf.get_variable_scope().reuse_variables()\n with tf.name_scope('Discriminator_Gauss'):\n dc_den1 = tf.nn.relu(dense(x, z_dim, n_l1, name='dc_g_den1'))\n dc_den2 = tf.nn.relu(dense(dc_den1, n_l1, n_l2, name='dc_g_den2'))\n output = dense(dc_den2, n_l2, 1, name='dc_g_output')\n return output\n\n\ndef discriminator_categorical(x, reuse=False):\n \"\"\"\n Discriminator that is used to match the posterior distribution with a given categorical distribution.\n :param x: tensor of shape [batch_size, n_labels]\n :param reuse: True -> Reuse the discriminator variables,\n False -> Create or search of variables before creating\n :return: tensor of shape [batch_size, 1]\n \"\"\"\n if reuse:\n tf.get_variable_scope().reuse_variables()\n with tf.name_scope('Discriminator_Categorial'):\n dc_den1 = tf.nn.relu(dense(x, n_labels, n_l1, name='dc_c_den1'))\n dc_den2 = tf.nn.relu(dense(dc_den1, n_l1, n_l2, name='dc_c_den2'))\n output = dense(dc_den2, n_l2, 1, name='dc_c_output')\n return output\n\n\ndef next_batch(x, y, batch_size):\n \"\"\"\n Used to return a random batch from the given inputs.\n :param x: Input images of shape [None, 784]\n :param y: Input labels of shape [None, 10]\n :param batch_size: integer, batch size of images and labels to return\n :return: x -> [batch_size, 784], y-> [batch_size, 10]\n \"\"\"\n index = np.arange(n_labeled)\n random_index = np.random.permutation(index)[:batch_size]\n return x[random_index], y[random_index]\n\n\ndef train(train_model=True):\n \"\"\"\n Used to train the autoencoder by passing in the necessary inputs.\n :param train_model: True -> Train the model, False -> Load the latest trained model and show the image grid.\n :return: does not return anything\n \"\"\"\n\n # Reconstruction Phase\n with tf.variable_scope(tf.get_variable_scope()):\n encoder_output_label, encoder_output_latent = encoder(x_input)\n # Concat class label and the encoder output\n decoder_input = tf.concat([encoder_output_label, encoder_output_latent], 1)\n decoder_output = decoder(decoder_input)\n\n # Regularization Phase\n with tf.variable_scope(tf.get_variable_scope()):\n d_g_real = discriminator_gauss(real_distribution)\n d_g_fake = discriminator_gauss(encoder_output_latent, reuse=True)\n\n with tf.variable_scope(tf.get_variable_scope()):\n d_c_real = discriminator_categorical(categorial_distribution)\n d_c_fake = discriminator_categorical(encoder_output_label, reuse=True)\n\n # Semi-Supervised Classification Phase\n with tf.variable_scope(tf.get_variable_scope()):\n encoder_output_label_, _ = encoder(x_input_l, reuse=True, supervised=True)\n\n # Generate output images\n with tf.variable_scope(tf.get_variable_scope()):\n decoder_image = decoder(manual_decoder_input, reuse=True)\n\n # Classification accuracy of encoder\n correct_pred = tf.equal(tf.argmax(encoder_output_label_, 1), tf.argmax(y_input, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n # Autoencoder loss\n autoencoder_loss = tf.reduce_mean(tf.square(x_target - decoder_output))\n\n # Gaussian Discriminator Loss\n dc_g_loss_real = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_g_real), logits=d_g_real))\n dc_g_loss_fake = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_g_fake), logits=d_g_fake))\n dc_g_loss = dc_g_loss_fake + dc_g_loss_real\n\n # Categorical Discrimminator Loss\n dc_c_loss_real = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_c_real), logits=d_c_real))\n dc_c_loss_fake = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_c_fake), logits=d_c_fake))\n dc_c_loss = dc_c_loss_fake + dc_c_loss_real\n\n # Generator loss\n generator_g_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_g_fake), logits=d_g_fake))\n generator_c_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_c_fake), logits=d_c_fake))\n generator_loss = generator_c_loss + generator_g_loss\n\n # Supervised Encoder Loss\n supervised_encoder_loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=y_input, logits=encoder_output_label_))\n\n all_variables = tf.trainable_variables()\n dc_g_var = [var for var in all_variables if 'dc_g_' in var.name]\n dc_c_var = [var for var in all_variables if 'dc_c_' in var.name]\n en_var = [var for var in all_variables if 'e_' in var.name]\n\n # Optimizers\n autoencoder_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,\n beta1=beta1).minimize(autoencoder_loss)\n discriminator_g_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,\n beta1=beta1).minimize(dc_g_loss, var_list=dc_g_var)\n discriminator_c_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,\n beta1=beta1).minimize(dc_c_loss, var_list=dc_c_var)\n generator_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,\n beta1=beta1).minimize(generator_loss, var_list=en_var)\n supervised_encoder_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,\n beta1=beta1).minimize(supervised_encoder_loss,\n var_list=en_var)\n\n init = tf.global_variables_initializer()\n\n # Reshape immages to display them\n input_images = tf.reshape(x_input, [-1, 28, 28, 1])\n generated_images = tf.reshape(decoder_output, [-1, 28, 28, 1])\n\n # Tensorboard visualization\n tf.summary.scalar(name='Autoencoder Loss', tensor=autoencoder_loss)\n tf.summary.scalar(name='Discriminator gauss Loss', tensor=dc_g_loss)\n tf.summary.scalar(name='Discriminator categorical Loss', tensor=dc_c_loss)\n tf.summary.scalar(name='Generator Loss', tensor=generator_loss)\n tf.summary.scalar(name='Supervised Encoder Loss', tensor=supervised_encoder_loss)\n tf.summary.histogram(name='Encoder Gauss Distribution', values=encoder_output_latent)\n tf.summary.histogram(name='Real Gauss Distribution', values=real_distribution)\n tf.summary.histogram(name='Encoder Categorical Distribution', values=encoder_output_label)\n tf.summary.histogram(name='Real Categorical Distribution', values=categorial_distribution)\n tf.summary.image(name='Input Images', tensor=input_images, max_outputs=10)\n tf.summary.image(name='Generated Images', tensor=generated_images, max_outputs=10)\n summary_op = tf.summary.merge_all()\n\n # Saving the model\n saver = tf.train.Saver()\n step = 0\n with tf.Session() as sess:\n if train_model:\n tensorboard_path, saved_model_path, log_path = form_results()\n sess.run(init)\n writer = tf.summary.FileWriter(logdir=tensorboard_path, graph=sess.graph)\n x_l, y_l = mnist.test.next_batch(n_labeled)\n for i in range(n_epochs):\n n_batches = int(n_labeled / batch_size)\n print(\"------------------Epoch {}/{}------------------\".format(i, n_epochs))\n for b in range(1, n_batches + 1):\n z_real_dist = np.random.randn(batch_size, z_dim) * 5.\n real_cat_dist = np.random.randint(low=0, high=10, size=batch_size)\n real_cat_dist = np.eye(n_labels)[real_cat_dist]\n batch_x_ul, _ = mnist.train.next_batch(batch_size)\n batch_x_l, batch_y_l = next_batch(x_l, y_l, batch_size=batch_size)\n sess.run(autoencoder_optimizer, feed_dict={x_input: batch_x_ul, x_target: batch_x_ul})\n sess.run(discriminator_g_optimizer,\n feed_dict={x_input: batch_x_ul, x_target: batch_x_ul, real_distribution: z_real_dist})\n sess.run(discriminator_c_optimizer,\n feed_dict={x_input: batch_x_ul, x_target: batch_x_ul,\n categorial_distribution: real_cat_dist})\n sess.run(generator_optimizer, feed_dict={x_input: batch_x_ul, x_target: batch_x_ul})\n sess.run(supervised_encoder_optimizer, feed_dict={x_input_l: batch_x_l, y_input: batch_y_l})\n if b % 5 == 0:\n a_loss, d_g_loss, d_c_loss, g_loss, s_loss, summary = sess.run(\n [autoencoder_loss, dc_g_loss, dc_c_loss, generator_loss, supervised_encoder_loss,\n summary_op],\n feed_dict={x_input: batch_x_ul, x_target: batch_x_ul,\n real_distribution: z_real_dist, y_input: batch_y_l, x_input_l: batch_x_l,\n categorial_distribution: real_cat_dist})\n writer.add_summary(summary, global_step=step)\n print(\"Epoch: {}, iteration: {}\".format(i, b))\n print(\"Autoencoder Loss: {}\".format(a_loss))\n print(\"Discriminator Gauss Loss: {}\".format(d_g_loss))\n print(\"Discriminator Categorical Loss: {}\".format(d_c_loss))\n print(\"Generator Loss: {}\".format(g_loss))\n print(\"Supervised Loss: {}\\n\".format(s_loss))\n with open(log_path + '/log.txt', 'a') as log:\n log.write(\"Epoch: {}, iteration: {}\\n\".format(i, b))\n log.write(\"Autoencoder Loss: {}\\n\".format(a_loss))\n log.write(\"Discriminator Gauss Loss: {}\".format(d_g_loss))\n log.write(\"Discriminator Categorical Loss: {}\".format(d_c_loss))\n log.write(\"Generator Loss: {}\\n\".format(g_loss))\n log.write(\"Supervised Loss: {}\".format(s_loss))\n step += 1\n acc = 0\n num_batches = int(mnist.validation.num_examples/batch_size)\n for j in range(num_batches):\n # Classify unseen validation data instead of test data or train data\n batch_x_l, batch_y_l = mnist.validation.next_batch(batch_size=batch_size)\n encoder_acc = sess.run(accuracy, feed_dict={x_input_l: batch_x_l, y_input: batch_y_l})\n acc += encoder_acc\n acc /= num_batches\n print(\"Encoder Classification Accuracy: {}\".format(acc))\n with open(log_path + '/log.txt', 'a') as log:\n log.write(\"Encoder Classification Accuracy: {}\".format(acc))\n saver.save(sess, save_path=saved_model_path, global_step=step)\n else:\n # Get the latest results folder\n all_results = os.listdir(results_path)\n all_results.sort()\n saver.restore(sess, save_path=tf.train.latest_checkpoint(results_path + '/' +\n all_results[-1] + '/Saved_models/'))\n generate_image_grid(sess, op=decoder_image)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Autoencoder Train Parameter\")\n parser.add_argument('--train', '-t', type=bool, default=True,\n help='Set to True to train a new model, False to load weights and display image grid')\n args = parser.parse_args()\n train(train_model=args.train)\n"
] | [
[
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.concat",
"tensorflow.cast",
"numpy.concatenate",
"numpy.random.randn",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"numpy.random.randint",
"numpy.reshape",
"numpy.arange",
"tensorflow.summary.image",
"numpy.eye",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec",
"tensorflow.name_scope",
"tensorflow.square",
"tensorflow.trainable_variables",
"tensorflow.train.Saver",
"tensorflow.argmax",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.Session",
"tensorflow.random_normal_initializer",
"tensorflow.matmul",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.zeros_like",
"numpy.identity",
"tensorflow.summary.merge_all",
"matplotlib.pyplot.show",
"tensorflow.summary.histogram",
"tensorflow.nn.softmax",
"tensorflow.summary.FileWriter",
"tensorflow.train.latest_checkpoint",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.constant_initializer",
"numpy.random.permutation",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
policy-in-practice/stata-linter | [
"eaff30abdf3597d6f53839e9082e83ae09dedde3"
] | [
"src/stata_linter_detect.py"
] | [
"# version 1.0 (based on DIME 0.0.2) 29/12/2021\n# Import packages ====================\nimport os\nimport re\nimport sys\nimport pandas as pd\nimport argparse\n\n# simple run entry point\ndef run():\n parser = argparse.ArgumentParser(description='Lint a Stata do-file.')\n parser.add_argument('filename', metavar='file', type=str, nargs='?',\n help='The name of the file to lint.')\n parser.add_argument('--indent', type=int, nargs='?', default=4,\n help=\"Number of spaces to use for each indentation\"\n )\n parser.add_argument('--nocheck', action='store_true',\n help=\"Disable checking\"\n )\n parser.add_argument('--suppress', action='store_true',\n help=\"Suppress line item printout\"\n )\n parser.add_argument('--summary', action='store_true',\n help=\"Print a summary of bad practices detected\"\n )\n parser.add_argument('--linemax', type=int, nargs='?', default=80,\n help=\"Maximum number of characters per line\"\n )\n parser.add_argument('--excel_output', type=str, nargs='?', default=\"\",\n help=\"If specified, save results to Excel workbook\"\n )\n \n \n args=parser.parse_args()\n return stata_linter_detect_py(\n input_file=args.filename,\n indent=args.indent,\n nocheck=\"1\" if args.nocheck else \"0\", \n suppress=\"1\" if args.suppress else \"0\",\n summary=\"1\" if args.summary else \"0\",\n excel=args.excel_output,\n linemax=args.linemax,\n tab_space=args.indent\n )\n\n# Style ===================\n\n# Avoid to use abstract index names ----------------\ndef abstract_index_name(\n line_index, line, input_lines, indent,\n suppress, style_dictionary, excel_output_list,\n tab_space\n ):\n\n if re.search(r\"^(qui[a-z]*\\s+)?(foreach|forv)\", line.lstrip()):\n list_of_words = line.split()\n # get the index used in for loops\n for word in list_of_words:\n if re.search(r\"^(foreach)\", word):\n index_in_loop = list_of_words[list_of_words.index(word) + 1]\n break\n elif re.search(r\"^(forv)\", word):\n index_in_loop = list_of_words[list_of_words.index(word) + 1].split(\"=\")[0]\n break\n # warn if the number of characters in the index is just 1\n if len(set(index_in_loop)) == 1:\n print_output = (\n '''In for loops, index names should describe what the code is looping over. ''' +\n '''Do not use an abstract index such as \"{:s}\".'''.format(index_in_loop)\n )\n if suppress != \"1\":\n print(\n '''(line {:d}) style: '''.format(line_index + 1) +\n print_output\n )\n\n style_dictionary[\"abstract_index_name\"] += 1\n excel_output_list.append([line_index + 1, \"style\", print_output])\n\n return([style_dictionary, excel_output_list])\n\n# Use proper indentations in for-loops, while-loops, and if/else statements ----------------\ndef proper_indent(\n line_index, line, input_lines, indent,\n suppress, style_dictionary, excel_output_list,\n tab_space\n ):\n\n line_rstrip = re.sub(r\"(\\/\\/)|(\\/\\*).*\", r\"\", line).rstrip()\n if len(line_rstrip) > 0:\n # check if the line includes for-loop, while-loop, or if/else statements\n if (\n (re.search(r\"^(qui[a-z]*\\s+)?(foreach |forv|if |else )\", line.lstrip()) != None) &\n (line_rstrip[-1] == \"{\")\n ):\n line_ws = line.expandtabs(tab_space)\n j = 1\n # find the next non-blank line \n while (j + line_index <= len(input_lines)):\n if (j + line_index == len(input_lines)):\n next_line = input_lines[line_index + 1]\n break\n if (\n (len(input_lines[line_index + j].strip()) > 0) &\n (re.search(r\"^(\\*|\\/\\/)\", input_lines[line_index + j].lstrip()) == None)\n ):\n next_line = input_lines[line_index + j]\n break\n j += 1\n # warn if the next non-blank line is not properly indented\n next_line_ws = next_line.expandtabs(tab_space)\n line_left_spaces = len(line_ws) - len(line_ws.lstrip())\n next_line_left_spaces = len(next_line_ws) - len(next_line_ws.lstrip())\n if (next_line_left_spaces - line_left_spaces < indent) & (len(next_line_ws.strip()) > 0):\n print_output = (\n '''After declaring for loop statement or if-else statement, ''' +\n '''add indentation ({:d} whitespaces).'''.format(indent)\n )\n\n if suppress != \"1\":\n print(\n '''(line {:d}) style: '''.format(line_index + 1) +\n print_output\n )\n\n style_dictionary[\"proper_indent\"] += 1\n excel_output_list.append([line_index + 1, \"style\", print_output])\n return([style_dictionary, excel_output_list])\n\n# Use indentations after line breaks (///) ----------------\ndef indent_after_newline(\n line_index, line, input_lines, indent,\n suppress, style_dictionary, excel_output_list,\n tab_space\n ):\n\n # check if the line includes \"///\" but the previous line does not include \"///\"\n if (\n (re.search(r\"\\/\\/\\/\", line) != None) & \n (re.search(r\"\\/\\/\\/\", input_lines[max(line_index - 1, 0)]) == None)\n ):\n line_ws = line.expandtabs(tab_space)\n # warn if the following line (after line break) is not properly indented\n next_line = input_lines[line_index + 1]\n next_line_ws = next_line.expandtabs(tab_space)\n line_left_spaces = len(line_ws) - len(line_ws.lstrip())\n next_line_left_spaces = len(next_line_ws) - len(next_line_ws.lstrip())\n if (next_line_left_spaces - line_left_spaces < indent) & (len(next_line_ws.strip()) > 0):\n print_output = (\n '''After new line statement (\"///\"), add indentation ({:d} whitespaces).'''.format(indent)\n )\n\n if suppress != \"1\":\n print(\n '''(line {:d}) style: '''.format(line_index + 1) +\n print_output\n )\n\n style_dictionary[\"indent_after_newline\"] += 1\n excel_output_list.append([line_index + 1, \"style\", print_output])\n return([style_dictionary, excel_output_list])\n\n# No whitespaces around math symbols ----------------\ndef whitespace_symbol(\n line_index, line, input_lines, indent,\n suppress, style_dictionary, excel_output_list,\n tab_space\n ):\n\n # warn if no whitespaces around math symbols\n if re.search(r\"(( )*(<|>|=|==|\\+)\\w|\\w(<|>|=|==|\\+)( )*)\", line):\n print_output = (\n '''Before and after math symbols (>, <, =, +, etc), it is recommended to use whitespaces. ''' +\n '''(For example, do \"gen a = b + c\" instead of \"gen a=b+c\".)'''\n )\n if suppress != \"1\":\n print(\n '''(line {:d}) style: '''.format(line_index + 1) +\n print_output\n )\n\n style_dictionary[\"whitespace_symbol\"] += 1\n excel_output_list.append([line_index + 1, \"style\", print_output])\n return([style_dictionary, excel_output_list])\n\n# For missing values \"var < .\" or \"var != .\" are used (!missing(var) is recommended) ----------------\ndef condition_missing(\n line_index, line, input_lines, indent,\n suppress, style_dictionary, excel_output_list,\n tab_space\n ):\n\n # warn if \"var < .\" or \"var != .\" are used \n if re.search(r\"(<|!=)( )*\\.\", line):\n print_output = (\n '''Use \"!missing(var)\" instead of \"var < .\" or \"var != .\".'''\n )\n if suppress != \"1\":\n print(\n '''(line {:d}) style: '''.format(line_index + 1) +\n print_output\n )\n\n style_dictionary[\"condition_missing\"] += 1\n excel_output_list.append([line_index + 1, \"style\", print_output])\n return([style_dictionary, excel_output_list])\n\n# Using \"#delimit\" should be avoided\ndef dont_use_delimit(\n line_index, line, input_lines, indent,\n suppress, style_dictionary, excel_output_list,\n tab_space\n ):\n\n # warn if \"#delimit\" is used\n if re.search(r\"#delimit(?! cr)\", line):\n print_output = (\n '''Avoid to use \"delimit\". For line breaks, use \"///\" instead.'''\n )\n if suppress != \"1\":\n print(\n '''(line {:d}) style: '''.format(line_index + 1) +\n print_output\n )\n\n style_dictionary[\"dont_use_delimit\"] += 1\n excel_output_list.append([line_index + 1, \"style\", print_output])\n return([style_dictionary, excel_output_list])\n\n# Using \"cd\" should be avoided\ndef dont_use_cd(\n line_index, line, input_lines, indent,\n suppress, style_dictionary, excel_output_list,\n tab_space\n ):\n\n # warn if \"#cd\" is used\n if re.search(r\"(^| )cd \", line.lstrip()):\n print_output = (\n '''Do not use \"cd\" but use absolute and dynamic file paths.'''\n )\n if suppress != \"1\":\n print(\n '''(line {:d}) style: '''.format(line_index + 1) +\n print_output\n )\n\n style_dictionary[\"dont_use_cd\"] += 1\n excel_output_list.append([line_index + 1, \"style\", print_output])\n return([style_dictionary, excel_output_list])\n\n# If a line is too lone, it should be broken into multiple lines\ndef too_long_line(\n line_index, line, input_lines, indent, linemax,\n suppress, style_dictionary, excel_output_list,\n tab_space\n ):\n\n # warn if the line is too long (and line breaks are not used yet)\n if (len(line) >= linemax) & (\"///\" not in line):\n print_output = (\n '''This line is too long ({:d} characters). '''.format(len(line)) +\n '''Use \"///\" for line breaks so that one line has at most {:d} characters.'''.format(linemax)\n )\n if suppress != \"1\":\n print(\n '''(line {:d}) style: '''.format(line_index + 1) +\n print_output\n )\n\n style_dictionary[\"too_long_line\"] += 1\n excel_output_list.append([line_index + 1, \"style\", print_output])\n return([style_dictionary, excel_output_list])\n\n# \"if\" condition should be explicit \ndef explicit_if(\n line_index, line, input_lines, indent,\n suppress, style_dictionary, excel_output_list,\n tab_space\n ):\n\n # warn if \"if\" statement is used but the condition is not explicit\n search_if = re.search(r\"^(if|else if) \", line.lstrip())\n if (search_if != None):\n if (\n (re.search(r\"missing\\(\", line[search_if.span()[0]:]) == None) &\n (re.search(r\"((=|<|>))\", line[search_if.span()[0]:]) == None)\n ):\n print_output = (\n '''Always explicitly specify the condition in the if statement. ''' +\n '''(For example, declare \"if var == 1\" instead of \"if var\".) '''\n )\n if suppress != \"1\":\n print(\n '''(line {:d}) style: '''.format(line_index + 1) +\n print_output\n )\n style_dictionary[\"explicit_if\"] += 1\n excel_output_list.append([line_index + 1, \"style\", print_output])\n\n return([style_dictionary, excel_output_list])\n\n# Use parentheses for global macros\ndef parentheses_for_global_macro(\n line_index, line, input_lines, indent,\n suppress, style_dictionary, excel_output_list,\n tab_space\n ):\n\n # warn if global macros are used without parentheses\n if re.search(r\"\\\\$\\w\", line):\n print_output = (\n '''Always use \"\\${}\" for global macros. '''\n )\n if suppress != \"1\":\n print(\n '''(line {:d}) style: '''.format(line_index + 1) +\n print_output\n )\n\n style_dictionary[\"parentheses_for_global_macro\"] += 1\n excel_output_list.append([line_index + 1, \"style\", print_output])\n return([style_dictionary, excel_output_list])\n\n# Check ===================\n\n# Ask if missing variables are properly taken into account\ndef check_missing(\n line_index, line, input_lines, indent,\n suppress, check_dictionary, excel_output_list,\n tab_space\n ):\n # ask if missing variables are properly taken into account\n if re.search(r\"(~=)|(!=)(?!(( )*\\.))\", line):\n print_output = (\n '''Are you taking missing values into account properly? ''' +\n '''(Remember that \"a != 0\" includes cases where a is missing.)'''\n )\n if suppress != \"1\":\n print(\n '''(line {:d}) check: '''.format(line_index + 1) +\n print_output\n )\n\n check_dictionary[\"check_missing\"] += 1\n excel_output_list.append([line_index + 1, \"check\", print_output])\n return([check_dictionary, excel_output_list])\n\n# Ask if the user may be using backslashes in file paths\ndef backslash_in_path(\n line_index, line, input_lines, indent,\n suppress, check_dictionary, excel_output_list,\n tab_space\n ):\n # warn if anything is sandwiched by backslashes, \n # which suggests that the user may be using backslashes for file paths\n if re.search(r\"\\\\(\\w| |-)+\\\\\", line):\n print_output = (\n '''Are you using backslashes (\"\\\\\") for a file path? ''' +\n '''If so, use forward slashes (\"/\") instead.'''\n )\n if suppress != \"1\":\n print(\n '''(line {:d}) check: '''.format(line_index + 1) +\n print_output\n )\n\n check_dictionary[\"backslash_in_path\"] += 1\n excel_output_list.append([line_index + 1, \"check\", print_output])\n return([check_dictionary, excel_output_list])\n\ndef bang_not_tilde(\n line_index, line, input_lines, indent,\n suppress, check_dictionary, excel_output_list,\n tab_space\n ):\n\n # warn if tilde is used, which suggests \n # that the user may be using tilde for negation\n if re.search(r\"~\", line):\n print_output = (\n '''Are you using tilde (~) for negation? ''' +\n '''If so, for negation, use bang (!) instead of tilde (~).'''\n )\n\n if suppress != \"1\":\n print(\n '''(line {:d}) style: '''.format(line_index + 1) +\n print_output\n )\n\n check_dictionary[\"bang_not_tilde\"] += 1\n excel_output_list.append([line_index + 1, \"check\", print_output])\n return([check_dictionary, excel_output_list])\n\n# Function to update comment delimiter ======================\n# (detection works only when comment delimiter == 0)\ndef update_comment_delimiter(comment_delimiter, line):\n # if \"/*\" and \"*/\" are in the same line, never mind\n if re.search(r\"\\/\\*.*\\*\\/\", line):\n pass\n # if \"/*\" (opening) detected, add 1\n elif re.search(r\"\\/\\*\", line):\n comment_delimiter += 1\n # if \"*/\" (closing) detected, subtract 1\n elif (re.search(r\"\\*\\/\", line) != None) & (comment_delimiter > 0):\n comment_delimiter -= 1\n return(comment_delimiter)\n\n# Run linter program to detect bad coding practices ===================\ndef stata_linter_detect_py(\n input_file, indent, nocheck, \n suppress, summary, excel, linemax,\n tab_space\n ):\n\n excel_output_list = []\n\n # style ============\n if suppress != \"1\":\n print(\"Style =====================\")\n # Any hard tabs in the do file\n with open(input_file, \"r\") as f:\n input_lines = f.readlines()\n comment_delimiter = 0\n for line_index, line in enumerate(input_lines):\n\n comment_delimiter = update_comment_delimiter(comment_delimiter, line)\n\n if comment_delimiter == 0:\n hard_tab = \"No\"\n if re.search(r\"\\t\", line):\n hard_tab = \"Yes\"\n print_output = (\n '''Use {:d} white spaces instead of tabs. '''.format(int(indent)) +\n '''(This may apply to other lines as well.)'''\n )\n excel_output_list.append([line_index + 1, \"style\", print_output])\n if suppress != \"1\":\n print(\n '''(line {:d}) style: '''.format(line_index + 1) +\n print_output\n )\n break\n\n # Other line-by-line bad practices\n style_dictionary = {\n \"abstract_index_name\": 0,\n \"proper_indent\": 0,\n \"indent_after_newline\": 0,\n \"whitespace_symbol\": 0,\n \"condition_missing\": 0,\n \"explicit_if\": 0,\n \"dont_use_delimit\": 0,\n \"dont_use_cd\": 0,\n \"too_long_line\": 0,\n \"parentheses_for_global_macro\": 0\n }\n\n with open(input_file, \"r\") as f:\n input_lines = f.readlines()\n comment_delimiter = 0\n for line_index, line in enumerate(input_lines):\n # update comment delimiter\n comment_delimiter = update_comment_delimiter(comment_delimiter, line)\n\n if re.search(r\"^(\\*|\\/\\/)\", line.lstrip()) != None:\n pass\n elif comment_delimiter > 0:\n pass\n else:\n style_dictionary, excel_output_list = abstract_index_name(\n line_index, line, input_lines, int(indent),\n suppress, style_dictionary, excel_output_list,\n int(tab_space)\n )\n style_dictionary, excel_output_list = proper_indent(\n line_index, line, input_lines, int(indent),\n suppress, style_dictionary, excel_output_list,\n int(tab_space)\n )\n style_dictionary, excel_output_list = indent_after_newline(\n line_index, line, input_lines, int(indent),\n suppress, style_dictionary, excel_output_list,\n int(tab_space)\n )\n style_dictionary, excel_output_list = whitespace_symbol(\n line_index, line, input_lines, int(indent),\n suppress, style_dictionary, excel_output_list,\n int(tab_space)\n )\n style_dictionary, excel_output_list = condition_missing(\n line_index, line, input_lines, int(indent),\n suppress, style_dictionary, excel_output_list,\n int(tab_space)\n )\n style_dictionary, excel_output_list = explicit_if(\n line_index, line, input_lines, int(indent),\n suppress, style_dictionary, excel_output_list,\n int(tab_space)\n )\n style_dictionary, excel_output_list = dont_use_delimit(\n line_index, line, input_lines, int(indent),\n suppress, style_dictionary, excel_output_list,\n int(tab_space)\n )\n style_dictionary, excel_output_list = dont_use_cd(\n line_index, line, input_lines, int(indent),\n suppress, style_dictionary, excel_output_list,\n int(tab_space)\n )\n style_dictionary, excel_output_list = too_long_line(\n line_index, line, input_lines, int(indent), int(linemax),\n suppress, style_dictionary, excel_output_list,\n int(tab_space)\n )\n style_dictionary, excel_output_list = parentheses_for_global_macro(\n line_index, line, input_lines, int(indent),\n suppress, style_dictionary, excel_output_list,\n int(tab_space)\n )\n # check ============\n check_dictionary = {\n \"check_missing\": 0,\n \"backslash_in_path\": 0,\n \"bang_not_tilde\": 0,\n }\n\n if int(nocheck) == 0:\n if suppress != \"1\":\n print(\"Check =====================\")\n with open(input_file, \"r\") as f:\n input_lines = f.readlines()\n comment_delimiter = 0\n for line_index, line in enumerate(input_lines):\n\n # update comment delimiter\n comment_delimiter = update_comment_delimiter(comment_delimiter, line)\n\n if re.search(r\"^(\\*|\\/\\/)\", line.lstrip()) != None:\n pass\n elif comment_delimiter > 0:\n pass\n else:\n check_dictionary, excel_output_list = check_missing(\n line_index, line, input_lines, int(indent),\n suppress, check_dictionary, excel_output_list,\n int(tab_space)\n )\n check_dictionary, excel_output_list = backslash_in_path(\n line_index, line, input_lines, int(indent),\n suppress, check_dictionary, excel_output_list,\n int(tab_space)\n )\n check_dictionary, excel_output_list = bang_not_tilde(\n line_index, line, input_lines, int(indent),\n suppress, check_dictionary, excel_output_list,\n int(tab_space)\n )\n\n if summary == \"1\":\n print(\"\\nSummary (number of lines where bad practices are detected) =======================\")\n\n print(\"\\n[Style]\")\n print(\"Hard tabs instead of soft tabs (whitespaces) used: {:s}\".format(hard_tab))\n print(\"Abstract index used in for-loop: {:d}\".format(style_dictionary[\"abstract_index_name\"]))\n print(\"Not proper indentation in for-loop for if-else statement: {:d}\".format(style_dictionary[\"proper_indent\"]))\n print(\"Not proper indentation in newline: {:d}\".format(style_dictionary[\"indent_after_newline\"]))\n print(\"Missing whitespaces around math symbols: {:d}\".format(style_dictionary[\"whitespace_symbol\"]))\n print(\"Incomplete conditions: {:d}\".format(style_dictionary[\"condition_missing\"]))\n print(\"Not explicit if statement: {:d}\".format(style_dictionary[\"explicit_if\"]))\n print(\"Delimit used: {:d}\".format(style_dictionary[\"dont_use_delimit\"]))\n print(\"cd used: {:d}\".format(style_dictionary[\"dont_use_cd\"]))\n print(\"Lines too long: {:d}\".format(style_dictionary[\"too_long_line\"]))\n print(\"Brackets not used for global macro: {:d}\".format(style_dictionary[\"parentheses_for_global_macro\"]))\n\n if int(nocheck) == 0:\n print(\"\\n[Check]\")\n print(\"Missing values properly treated?: {:d}\".format(check_dictionary[\"check_missing\"]))\n print(\"Backslash used in file path?: {:d}\".format(check_dictionary[\"backslash_in_path\"]))\n print(\"Bang (!) used instead of tilde (~) for negation?: {:d}\".format(check_dictionary[\"bang_not_tilde\"]))\n\n output_df = pd.DataFrame(excel_output_list)\n if excel != \"\":\n if (output_df.empty == True):\n output_df = pd.DataFrame(columns = [\"Line\", \"Type\", \"Problem\"])\n output_df.columns = [\"Line\", \"Type\", \"Problem\"]\n if os.path.exists(excel):\n with pd.ExcelWriter(excel, engine = \"openpyxl\", mode = \"a\") as writer:\n output_df.to_excel(writer, index = False, sheet_name = os.path.basename(input_file)[:20])\n else:\n with pd.ExcelWriter(excel) as writer:\n output_df.to_excel(writer, index = False, sheet_name = os.path.basename(input_file)[:20])\n print(\"\\n File {:s} created\".format(excel))\n\n return( not output_df.empty )\n"
] | [
[
"pandas.DataFrame",
"pandas.ExcelWriter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
josiahjohnston/PowerGenome | [
"6e8c353cb185479c0cf2235d8c6d02dcadd4990c"
] | [
"powergenome/load_data.py"
] | [
"\"Load and download data for use in other modules\"\n\nimport sqlite3\n\nimport geopandas as gpd\nimport pandas as pd\nimport requests\nimport sqlalchemy as sa\nfrom bs4 import BeautifulSoup\nfrom xlrd import XLRDError\n\nfrom powergenome.params import IPM_GEOJSON_PATH\n\n\ndef load_ipm_plant_region_map(pudl_engine):\n \"\"\"Load the table associating each power plant to an IPM region\n\n Parameters\n ----------\n pudl_engine : sqlalchemy.Engine\n A sqlalchemy connection for use by pandas\n\n Returns\n -------\n dataframe\n All plants in the NEEDS database and their associated IPM region. Columns are\n plant_id_eia and region.\n \"\"\"\n region_map_df = pd.read_sql_table(\n \"plant_region_map_epaipm\", con=pudl_engine, columns=[\"plant_id_eia\", \"region\"]\n )\n\n return region_map_df\n\n\ndef load_ownership_eia860(pudl_engine, data_years=[2017]):\n\n cols = [\n \"report_date\",\n \"utility_id_eia\",\n \"plant_id_eia\",\n \"generator_id\",\n \"operational_status_code\",\n \"owner_utility_id_eia\",\n \"owner_name\",\n \"owner_state\",\n \"fraction_owned\",\n ]\n ownership = pd.read_sql_table(\n \"ownership_eia860\", pudl_engine, columns=cols, parse_dates=[\"report_date\"]\n )\n ownership = ownership.loc[ownership[\"report_date\"].dt.year.isin(data_years)]\n\n return ownership\n\n\ndef load_plants_860(pudl_engine, data_years=[2017]):\n\n plants = pd.read_sql_table(\"plants_eia860\", pudl_engine, parse_dates=[\"report_date\"])\n\n plants = plants.loc[plants[\"report_date\"].dt.year.isin(data_years)]\n\n return plants\n"
] | [
[
"pandas.read_sql_table"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
dominik-steenken/qiskit-terra | [
"1e04bad5067610abda5e7cbba36939745075f3b9",
"1e04bad5067610abda5e7cbba36939745075f3b9",
"1e04bad5067610abda5e7cbba36939745075f3b9",
"1e04bad5067610abda5e7cbba36939745075f3b9"
] | [
"qiskit/quantum_info/states/states.py",
"qiskit/visualization/latex.py",
"qiskit/pulse/commands/sample_pulse.py",
"qiskit/quantum_info/operators/channel/superop.py"
] | [
"# -*- coding: utf-8 -*-\n\n# Copyright 2017, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n# pylint: disable=invalid-name,anomalous-backslash-in-string\n\n\"\"\"\nA collection of useful quantum information functions for states.\n\n\n\"\"\"\nimport logging\nimport numpy as np\nfrom qiskit.exceptions import QiskitError\n\nlogger = logging.getLogger(__name__)\n\n\ndef basis_state(str_state, num):\n \"\"\"\n Return a basis state ndarray.\n\n Args:\n str_state (string): a string representing the state.\n num (int): the number of qubits\n Returns:\n ndarray: state(2**num) a quantum state with basis basis state.\n Raises:\n QiskitError: if the dimensions is wrong\n \"\"\"\n n = int(str_state, 2)\n if num >= len(str_state):\n state = np.zeros(1 << num, dtype=complex)\n state[n] = 1\n return state\n else:\n raise QiskitError('size of bitstring is greater than num.')\n\n\ndef random_state(dim, seed=None):\n \"\"\"\n Return a random quantum state from the uniform (Haar) measure on\n state space.\n\n Args:\n dim (int): the dim of the state spaxe\n seed (int): Optional. To set a random seed.\n\n Returns:\n ndarray: state(2**num) a random quantum state.\n \"\"\"\n if seed is None:\n seed = np.random.randint(0, np.iinfo(np.int32).max)\n rng = np.random.RandomState(seed)\n logger.debug(\"random_state RandomState seeded with seed=%s\", seed)\n # Random array over interval (0, 1]\n x = rng.rand(dim)\n x += x == 0\n x = -np.log(x)\n sumx = sum(x)\n phases = rng.rand(dim)*2.0*np.pi\n return np.sqrt(x/sumx)*np.exp(1j*phases)\n\n\ndef projector(state, flatten=False):\n \"\"\"\n maps a pure state to a state matrix\n\n Args:\n state (ndarray): the number of qubits\n flatten (bool): determine if state matrix of column work\n Returns:\n ndarray: state_mat(2**num, 2**num) if flatten is false\n ndarray: state_mat(4**num) if flatten is true stacked on by the column\n \"\"\"\n density_matrix = np.outer(state.conjugate(), state)\n if flatten:\n return density_matrix.flatten(order='F')\n return density_matrix\n\n\ndef purity(state):\n \"\"\"Calculate the purity of a quantum state.\n\n Args:\n state (ndarray): a quantum state\n Returns:\n float: purity.\n \"\"\"\n rho = np.array(state)\n if rho.ndim == 1:\n return 1.0\n return np.real(np.trace(rho.dot(rho)))\n",
"# -*- coding: utf-8 -*-\n\n# Copyright 2018, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n# pylint: disable=invalid-name,anomalous-backslash-in-string,missing-docstring\n\n\"\"\"latex circuit visualization backends.\"\"\"\n\nimport collections\nimport io\nimport itertools\nimport json\nimport math\nimport operator\nimport re\n\nimport numpy as np\n\nfrom qiskit.visualization import exceptions\nfrom qiskit.visualization import qcstyle as _qcstyle\n\n\nclass QCircuitImage:\n \"\"\"This class contains methods to create \\\\LaTeX circuit images.\n\n The class targets the \\\\LaTeX package Q-circuit\n (https://arxiv.org/pdf/quant-ph/0406003).\n\n Thanks to Eric Sabo for the initial implementation for Qiskit.\n \"\"\"\n\n def __init__(self, qregs, cregs, ops, scale, style=None,\n plot_barriers=True, reverse_bits=False):\n \"\"\"\n Args:\n qregs (list): A list of tuples for the quantum registers\n cregs (list): A list of tuples for the classical registers\n ops (list): A list of dicts where each entry is a operation from\n the circuit.\n scale (float): image scaling\n style (dict or str): dictionary of style or file name of style file\n reverse_bits (bool): When set to True reverse the bit order inside\n registers for the output visualization.\n plot_barriers (bool): Enable/disable drawing barriers in the output\n circuit. Defaults to True.\n \"\"\"\n # style sheet\n self._style = _qcstyle.QCStyle()\n if style:\n if isinstance(style, dict):\n self._style.set_style(style)\n elif isinstance(style, str):\n with open(style, 'r') as infile:\n dic = json.load(infile)\n self._style.set_style(dic)\n\n # compiled qobj circuit\n self.ops = ops\n\n # image scaling\n self.scale = scale\n\n # Map of qregs to sizes\n self.qregs = {}\n\n # Map of cregs to sizes\n self.cregs = {}\n\n # List of qregs and cregs in order of appearance in code and image\n self.ordered_regs = []\n\n # Map from registers to the list they appear in the image\n self.img_regs = {}\n\n # Array to hold the \\\\LaTeX commands to generate a circuit image.\n self._latex = []\n\n # Variable to hold image depth (width)\n self.img_depth = 0\n\n # Variable to hold image width (height)\n self.img_width = 0\n\n # Variable to hold total circuit depth\n self.sum_column_widths = 0\n\n # Variable to hold total circuit width\n self.sum_row_heights = 0\n\n # em points of separation between circuit columns\n self.column_separation = 0.5\n\n # em points of separation between circuit row\n self.row_separation = 0.0\n\n # presence of \"box\" or \"target\" determines row spacing\n self.has_box = False\n self.has_target = False\n self.reverse_bits = reverse_bits\n self.plot_barriers = plot_barriers\n\n #################################\n self.qregs = collections.OrderedDict(_get_register_specs(qregs))\n self.qubit_list = qregs\n self.ordered_regs = qregs + cregs\n self.cregs = collections.OrderedDict(_get_register_specs(cregs))\n self.clbit_list = cregs\n self.img_regs = {bit: ind for ind, bit in\n enumerate(self.ordered_regs)}\n self.img_width = len(self.img_regs)\n self.wire_type = {}\n for key, value in self.ordered_regs:\n self.wire_type[(key, value)] = key in self.cregs.keys()\n\n def latex(self, aliases=None):\n \"\"\"Return LaTeX string representation of circuit.\n\n This method uses the LaTeX Qconfig package to create a graphical\n representation of the circuit.\n\n Returns:\n string: for writing to a LaTeX file.\n \"\"\"\n self._initialize_latex_array(aliases)\n self._build_latex_array(aliases)\n header_1 = r\"\"\"% \\documentclass[preview]{standalone}\n% If the image is too large to fit on this documentclass use\n\\documentclass[draft]{beamer}\n\"\"\"\n beamer_line = \"\\\\usepackage[size=custom,height=%d,width=%d,scale=%.1f]{beamerposter}\\n\"\n header_2 = r\"\"\"% instead and customize the height and width (in cm) to fit.\n% Large images may run out of memory quickly.\n% To fix this use the LuaLaTeX compiler, which dynamically\n% allocates memory.\n\\usepackage[braket, qm]{qcircuit}\n\\usepackage{amsmath}\n\\pdfmapfile{+sansmathaccent.map}\n% \\usepackage[landscape]{geometry}\n% Comment out the above line if using the beamer documentclass.\n\\begin{document}\n\\begin{equation*}\"\"\"\n qcircuit_line = r\"\"\"\n \\Qcircuit @C=%.1fem @R=%.1fem @!R {\n\"\"\"\n output = io.StringIO()\n output.write(header_1)\n output.write('%% img_width = %d, img_depth = %d\\n' % (self.img_width, self.img_depth))\n output.write(beamer_line % self._get_beamer_page())\n output.write(header_2)\n output.write(qcircuit_line %\n (self.column_separation, self.row_separation))\n for i in range(self.img_width):\n output.write(\"\\t \\t\")\n for j in range(self.img_depth + 1):\n cell_str = self._latex[i][j]\n # Don't truncate offset float if drawing a barrier\n if 'barrier' in cell_str:\n output.write(cell_str)\n else:\n # floats can cause \"Dimension too large\" latex error in\n # xymatrix this truncates floats to avoid issue.\n cell_str = re.sub(r'[-+]?\\d*\\.\\d{2,}|\\d{2,}',\n _truncate_float,\n cell_str)\n output.write(cell_str)\n if j != self.img_depth:\n output.write(\" & \")\n else:\n output.write(r'\\\\' + '\\n')\n output.write('\\t }\\n')\n output.write('\\\\end{equation*}\\n\\n')\n output.write('\\\\end{document}')\n contents = output.getvalue()\n output.close()\n return contents\n\n def _initialize_latex_array(self, aliases=None):\n # pylint: disable=unused-argument\n self.img_depth, self.sum_column_widths = self._get_image_depth(aliases)\n self.sum_row_heights = self.img_width\n # choose the most compact row spacing, while not squashing them\n if self.has_box:\n self.row_separation = 0.0\n elif self.has_target:\n self.row_separation = 0.2\n else:\n self.row_separation = 1.0\n self._latex = [\n [\"\\\\cw\" if self.wire_type[self.ordered_regs[j]]\n else \"\\\\qw\" for i in range(self.img_depth + 1)]\n for j in range(self.img_width)]\n self._latex.append([\" \"] * (self.img_depth + 1))\n for i in range(self.img_width):\n if self.wire_type[self.ordered_regs[i]]:\n self._latex[i][0] = \"\\\\lstick{\" + self.ordered_regs[i][0].name + \\\n \"_{\" + str(self.ordered_regs[i][1]) + \"}\" + \\\n \": 0}\"\n else:\n self._latex[i][0] = \"\\\\lstick{\" + \\\n self.ordered_regs[i][0].name + \"_{\" + \\\n str(self.ordered_regs[i][1]) + \"}\" + \\\n \": \\\\ket{0}}\"\n\n def _get_image_depth(self, aliases=None):\n \"\"\"Get depth information for the circuit.\n\n Args:\n aliases (dict): dict mapping the current qubits in the circuit to\n new qubit names.\n\n Returns:\n int: number of columns in the circuit\n int: total size of columns in the circuit\n\n Raises:\n VisualizationError: if trying to draw unsupported gates\n \"\"\"\n columns = 2 # wires in the beginning and end\n is_occupied = [False] * self.img_width\n max_column_width = {}\n for op in self.ops:\n # useful information for determining row spacing\n boxed_gates = ['u0', 'u1', 'u2', 'u3', 'x', 'y', 'z', 'h', 's',\n 'sdg', 't', 'tdg', 'rx', 'ry', 'rz', 'ch', 'cy',\n 'crz', 'cu3', 'id']\n target_gates = ['cx', 'ccx']\n if op.name in boxed_gates:\n self.has_box = True\n if op.name in target_gates:\n self.has_target = True\n\n # useful information for determining column widths and final image\n # scaling\n if op.name not in ['measure', 'reset', 'barrier']:\n qarglist = op.qargs\n if aliases is not None:\n qarglist = map(lambda x: aliases[x], qarglist)\n if len(qarglist) == 1:\n pos_1 = self.img_regs[(qarglist[0][0],\n qarglist[0][1])]\n if op.condition:\n mask = self._get_mask(op.condition[0])\n cl_reg = self.clbit_list[self._ffs(mask)]\n if_reg = cl_reg[0]\n pos_2 = self.img_regs[cl_reg]\n for i in range(pos_1, pos_2 + self.cregs[if_reg]):\n if is_occupied[i] is False:\n is_occupied[i] = True\n else:\n columns += 1\n is_occupied = [False] * self.img_width\n for j in range(pos_1, pos_2 + 1):\n is_occupied[j] = True\n break\n else:\n if is_occupied[pos_1] is False:\n is_occupied[pos_1] = True\n else:\n columns += 1\n is_occupied = [False] * self.img_width\n is_occupied[pos_1] = True\n elif len(qarglist) == 2:\n pos_1 = self.img_regs[(qarglist[0][0], qarglist[0][1])]\n pos_2 = self.img_regs[(qarglist[1][0], qarglist[1][1])]\n\n if op.condition:\n mask = self._get_mask(op.condition[0])\n cl_reg = self.clbit_list[self._ffs(mask)]\n if_reg = cl_reg[0]\n pos_3 = self.img_regs[(if_reg, 0)]\n if pos_1 > pos_2:\n for i in range(pos_2, pos_3 + self.cregs[if_reg]):\n if is_occupied[i] is False:\n is_occupied[i] = True\n else:\n columns += 1\n is_occupied = [False] * self.img_width\n for j in range(pos_2, pos_3 + 1):\n is_occupied[j] = True\n break\n else:\n for i in range(pos_1, pos_3 + self.cregs[if_reg]):\n if is_occupied[i] is False:\n is_occupied[i] = True\n else:\n columns += 1\n is_occupied = [False] * self.img_width\n for j in range(pos_1, pos_3 + 1):\n is_occupied[j] = True\n break\n # symetric gates have angle labels\n if op.name in ['cu1']:\n columns += 1\n is_occupied = [False] * self.img_width\n is_occupied[max(pos_1, pos_2)] = True\n else:\n temp = [pos_1, pos_2]\n temp.sort(key=int)\n top = temp[0]\n bottom = temp[1]\n\n for i in range(top, bottom + 1):\n if is_occupied[i] is False:\n is_occupied[i] = True\n else:\n columns += 1\n is_occupied = [False] * self.img_width\n for j in range(top, bottom + 1):\n is_occupied[j] = True\n break\n # symetric gates have angle labels\n if op.name in ['cu1']:\n columns += 1\n is_occupied = [False] * self.img_width\n is_occupied[top] = True\n\n elif len(qarglist) == 3:\n pos_1 = self.img_regs[(qarglist[0][0], qarglist[0][1])]\n pos_2 = self.img_regs[(qarglist[1][0], qarglist[1][1])]\n pos_3 = self.img_regs[(qarglist[2][0], qarglist[2][1])]\n\n if op.condition:\n mask = self._get_mask(op.condition[0])\n cl_reg = self.clbit_list[self._ffs(mask)]\n if_reg = cl_reg[0]\n pos_4 = self.img_regs[(if_reg, 0)]\n\n temp = [pos_1, pos_2, pos_3, pos_4]\n temp.sort(key=int)\n top = temp[0]\n bottom = temp[2]\n\n for i in range(top, pos_4 + 1):\n if is_occupied[i] is False:\n is_occupied[i] = True\n else:\n columns += 1\n is_occupied = [False] * self.img_width\n for j in range(top, pos_4 + 1):\n is_occupied[j] = True\n break\n else:\n temp = [pos_1, pos_2, pos_3]\n temp.sort(key=int)\n top = temp[0]\n bottom = temp[2]\n\n for i in range(top, bottom + 1):\n if is_occupied[i] is False:\n is_occupied[i] = True\n else:\n columns += 1\n is_occupied = [False] * self.img_width\n for j in range(top, bottom + 1):\n is_occupied[j] = True\n break\n\n # update current column width\n arg_str_len = 0\n for arg in op.op.params:\n arg_str = re.sub(r'[-+]?\\d*\\.\\d{2,}|\\d{2,}',\n _truncate_float, str(arg))\n arg_str_len += len(arg_str)\n if columns not in max_column_width:\n max_column_width[columns] = 0\n max_column_width[columns] = max(arg_str_len,\n max_column_width[columns])\n elif op.name == \"measure\":\n if len(op.cargs) != 1 or len(op.qargs) != 1:\n raise exceptions.VisualizationError(\"bad operation record\")\n if op.condition:\n raise exceptions.VisualizationError(\n 'conditional measures currently not supported.')\n qname, qindex = op.qargs[0]\n cname, cindex = op.cargs[0]\n if aliases:\n newq = aliases[(qname, qindex)]\n qname = newq[0]\n qindex = newq[1]\n pos_1 = self.img_regs[(qname, qindex)]\n pos_2 = self.img_regs[(cname, cindex)]\n temp = [pos_1, pos_2]\n temp.sort(key=int)\n [pos_1, pos_2] = temp\n for i in range(pos_1, pos_2 + 1):\n if is_occupied[i] is False:\n is_occupied[i] = True\n else:\n columns += 1\n is_occupied = [False] * self.img_width\n for j in range(pos_1, pos_2 + 1):\n is_occupied[j] = True\n break\n # update current column width\n if columns not in max_column_width:\n max_column_width[columns] = 0\n elif op.name == \"reset\":\n if 'conditional' in op and op.condition:\n raise exceptions.VisualizationError(\n 'conditional reset currently not supported.')\n qname, qindex = op.qargs[0]\n if aliases:\n newq = aliases[(qname, qindex)]\n qname = newq[0]\n qindex = newq[1]\n pos_1 = self.img_regs[(qname, qindex)]\n if is_occupied[pos_1] is False:\n is_occupied[pos_1] = True\n else:\n columns += 1\n is_occupied = [False] * self.img_width\n is_occupied[pos_1] = True\n elif op.name in [\"barrier\", 'snapshot', 'load', 'save',\n 'noise']:\n if self.plot_barriers:\n qarglist = op.qargs\n indexes = [self._get_qubit_index(x) for x in qarglist]\n start_bit = self.qubit_list[min(indexes)]\n if aliases is not None:\n qarglist = map(lambda x: aliases[x], qarglist)\n start = self.img_regs[start_bit]\n span = len(op.qargs) - 1\n for i in range(start, start + span + 1):\n if is_occupied[i] is False:\n is_occupied[i] = True\n else:\n columns += 1\n is_occupied = [False] * self.img_width\n for j in range(start, start + span + 1):\n is_occupied[j] = True\n break\n # update current column width\n if columns not in max_column_width:\n max_column_width[columns] = 0\n else:\n raise exceptions.VisualizationError(\"bad node data\")\n # every 3 characters is roughly one extra 'unit' of width in the cell\n # the gate name is 1 extra 'unit'\n # the qubit/cbit labels plus initial states is 2 more\n # the wires poking out at the ends is 2 more\n sum_column_widths = sum(1 + v / 3 for v in max_column_width.values())\n return columns + 1, math.ceil(sum_column_widths) + 4\n\n def _get_beamer_page(self):\n \"\"\"Get height, width & scale attributes for the beamer page.\n\n Returns:\n tuple: (height, width, scale) desirable page attributes\n \"\"\"\n # PIL python package limits image size to around a quarter gigabyte\n # this means the beamer image should be limited to < 50000\n # if you want to avoid a \"warning\" too, set it to < 25000\n PIL_limit = 40000\n\n # the beamer latex template limits each dimension to < 19 feet\n # (i.e. 575cm)\n beamer_limit = 550\n\n # columns are roughly twice as big as rows\n aspect_ratio = self.sum_row_heights / self.sum_column_widths\n\n # choose a page margin so circuit is not cropped\n margin_factor = 1.5\n height = min(self.sum_row_heights * margin_factor, beamer_limit)\n width = min(self.sum_column_widths * margin_factor, beamer_limit)\n\n # if too large, make it fit\n if height * width > PIL_limit:\n height = min(np.sqrt(PIL_limit * aspect_ratio), beamer_limit)\n width = min(np.sqrt(PIL_limit / aspect_ratio), beamer_limit)\n\n # if too small, give it a minimum size\n height = max(height, 10)\n width = max(width, 10)\n\n return (height, width, self.scale)\n\n def _get_mask(self, creg_name):\n mask = 0\n for index, cbit in enumerate(self.clbit_list):\n if creg_name == cbit[0]:\n mask |= (1 << index)\n return mask\n\n def _build_latex_array(self, aliases=None):\n \"\"\"Returns an array of strings containing \\\\LaTeX for this circuit.\n\n If aliases is not None, aliases contains a dict mapping\n the current qubits in the circuit to new qubit names.\n We will deduce the register names and sizes from aliases.\n \"\"\"\n columns = 1\n is_occupied = [False] * self.img_width\n\n # Rename qregs if necessary\n if aliases:\n qregdata = {}\n for q in aliases.values():\n if q[0] not in qregdata:\n qregdata[q[0]] = q[1] + 1\n elif qregdata[q[0]] < q[1] + 1:\n qregdata[q[0]] = q[1] + 1\n else:\n qregdata = self.qregs\n\n for current_op in self.ops:\n if current_op.condition:\n mask = self._get_mask(current_op.condition[0])\n cl_reg = self.clbit_list[self._ffs(mask)]\n if_reg = cl_reg[0]\n pos_2 = self.img_regs[cl_reg]\n if_value = format(current_op.condition[1],\n 'b').zfill(self.cregs[if_reg])[::-1]\n if current_op.name not in ['measure', 'barrier', 'snapshot', 'load',\n 'save', 'noise']:\n nm = current_op.name\n qarglist = current_op.qargs\n if aliases is not None:\n qarglist = map(lambda x: aliases[x], qarglist)\n if len(qarglist) == 1:\n pos_1 = self.img_regs[(qarglist[0][0],\n qarglist[0][1])]\n if current_op.condition:\n mask = self._get_mask(current_op.condition[0])\n cl_reg = self.clbit_list[self._ffs(mask)]\n if_reg = cl_reg[0]\n pos_2 = self.img_regs[cl_reg]\n for i in range(pos_1, pos_2 + self.cregs[if_reg]):\n if is_occupied[i] is False:\n is_occupied[i] = True\n else:\n columns += 1\n is_occupied = [False] * self.img_width\n for j in range(pos_1, pos_2 + 1):\n is_occupied[j] = True\n break\n\n if nm == \"x\":\n self._latex[pos_1][columns] = \"\\\\gate{X}\"\n elif nm == \"y\":\n self._latex[pos_1][columns] = \"\\\\gate{Y}\"\n elif nm == \"z\":\n self._latex[pos_1][columns] = \"\\\\gate{Z}\"\n elif nm == \"h\":\n self._latex[pos_1][columns] = \"\\\\gate{H}\"\n elif nm == \"id\":\n self._latex[pos_1][columns] = \"\\\\gate{Id}\"\n elif nm == \"s\":\n self._latex[pos_1][columns] = \"\\\\gate{S}\"\n elif nm == \"sdg\":\n self._latex[pos_1][columns] = \"\\\\gate{S^\\\\dag}\"\n elif nm == \"t\":\n self._latex[pos_1][columns] = \"\\\\gate{T}\"\n elif nm == \"tdg\":\n self._latex[pos_1][columns] = \"\\\\gate{T^\\\\dag}\"\n elif nm == \"u0\":\n self._latex[pos_1][columns] = \"\\\\gate{U_0(%s)}\" % (\n current_op.op.params[0])\n elif nm == \"u1\":\n self._latex[pos_1][columns] = \"\\\\gate{U_1(%s)}\" % (\n current_op[\"op\"].params[0])\n elif nm == \"u2\":\n self._latex[pos_1][columns] = \\\n \"\\\\gate{U_2\\\\left(%s,%s\\\\right)}\" % (\n current_op[\"op\"].params[0], current_op[\"op\"].params[1])\n elif nm == \"u3\":\n self._latex[pos_1][columns] = (\"\\\\gate{U_3(%s,%s,%s)}\" % (\n current_op.op.params[0],\n current_op.op.params[1],\n current_op.op.params[2]))\n elif nm == \"rx\":\n self._latex[pos_1][columns] = \"\\\\gate{R_x(%s)}\" % (\n current_op.op.params[0])\n elif nm == \"ry\":\n self._latex[pos_1][columns] = \"\\\\gate{R_y(%s)}\" % (\n current_op.op.params[0])\n elif nm == \"rz\":\n self._latex[pos_1][columns] = \"\\\\gate{R_z(%s)}\" % (\n current_op.op.params[0])\n\n gap = pos_2 - pos_1\n for i in range(self.cregs[if_reg]):\n if if_value[i] == '1':\n self._latex[pos_2 + i][columns] = \\\n \"\\\\control \\\\cw \\\\cwx[-\" + str(gap) + \"]\"\n gap = 1\n else:\n self._latex[pos_2 + i][columns] = \\\n \"\\\\controlo \\\\cw \\\\cwx[-\" + str(gap) + \"]\"\n gap = 1\n\n else:\n if not is_occupied[pos_1]:\n is_occupied[pos_1] = True\n else:\n columns += 1\n is_occupied = [False] * self.img_width\n is_occupied[pos_1] = True\n\n if nm == \"x\":\n self._latex[pos_1][columns] = \"\\\\gate{X}\"\n elif nm == \"y\":\n self._latex[pos_1][columns] = \"\\\\gate{Y}\"\n elif nm == \"z\":\n self._latex[pos_1][columns] = \"\\\\gate{Z}\"\n elif nm == \"h\":\n self._latex[pos_1][columns] = \"\\\\gate{H}\"\n elif nm == \"id\":\n self._latex[pos_1][columns] = \"\\\\gate{Id}\"\n elif nm == \"s\":\n self._latex[pos_1][columns] = \"\\\\gate{S}\"\n elif nm == \"sdg\":\n self._latex[pos_1][columns] = \"\\\\gate{S^\\\\dag}\"\n elif nm == \"t\":\n self._latex[pos_1][columns] = \"\\\\gate{T}\"\n elif nm == \"tdg\":\n self._latex[pos_1][columns] = \"\\\\gate{T^\\\\dag}\"\n elif nm == \"u0\":\n self._latex[pos_1][columns] = \"\\\\gate{U_0(%s)}\" % (\n current_op.op.params[0])\n elif nm == \"u1\":\n self._latex[pos_1][columns] = \"\\\\gate{U_1(%s)}\" % (\n current_op.op.params[0])\n elif nm == \"u2\":\n self._latex[pos_1][columns] = \\\n \"\\\\gate{U_2\\\\left(%s,%s\\\\right)}\" % (\n current_op.op.params[0], current_op.op.params[1])\n elif nm == \"u3\":\n self._latex[pos_1][columns] = (\"\\\\gate{U_3(%s,%s,%s)}\" % (\n current_op.op.params[0],\n current_op.op.params[1],\n current_op.op.params[2]))\n elif nm == \"rx\":\n self._latex[pos_1][columns] = \"\\\\gate{R_x(%s)}\" % (\n current_op.op.params[0])\n elif nm == \"ry\":\n self._latex[pos_1][columns] = \"\\\\gate{R_y(%s)}\" % (\n current_op.op.params[0])\n elif nm == \"rz\":\n self._latex[pos_1][columns] = \"\\\\gate{R_z(%s)}\" % (\n current_op.op.params[0])\n elif nm == \"reset\":\n self._latex[pos_1][columns] = (\n \"\\\\push{\\\\rule{.6em}{0em}\\\\ket{0}\\\\\"\n \"rule{.2em}{0em}} \\\\qw\")\n\n elif len(qarglist) == 2:\n pos_1 = self.img_regs[(qarglist[0][0], qarglist[0][1])]\n pos_2 = self.img_regs[(qarglist[1][0], qarglist[1][1])]\n\n if current_op.condition:\n pos_3 = self.img_regs[(if_reg, 0)]\n temp = [pos_1, pos_2, pos_3]\n temp.sort(key=int)\n top = temp[0]\n bottom = temp[1]\n\n for i in range(top, pos_3 + 1):\n if is_occupied[i] is False:\n is_occupied[i] = True\n else:\n columns += 1\n is_occupied = [False] * self.img_width\n for j in range(top, pos_3 + 1):\n is_occupied[j] = True\n break\n # symetric gates have angle labels\n if current_op.name == 'cu1':\n columns += 1\n is_occupied = [False] * self.img_width\n is_occupied[top] = True\n\n gap = pos_3 - bottom\n for i in range(self.cregs[if_reg]):\n if if_value[i] == '1':\n self._latex[pos_3 + i][columns] = \\\n \"\\\\control \\\\cw \\\\cwx[-\" + str(gap) + \"]\"\n gap = 1\n else:\n self._latex[pos_3 + i][columns] = \\\n \"\\\\controlo \\\\cw \\\\cwx[-\" + str(gap) + \"]\"\n gap = 1\n\n if nm == \"cx\":\n self._latex[pos_1][columns] = \\\n \"\\\\ctrl{\" + str(pos_2 - pos_1) + \"}\"\n self._latex[pos_2][columns] = \"\\\\targ\"\n elif nm == \"cz\":\n self._latex[pos_1][columns] = \\\n \"\\\\ctrl{\" + str(pos_2 - pos_1) + \"}\"\n self._latex[pos_2][columns] = \"\\\\control\\\\qw\"\n elif nm == \"cy\":\n self._latex[pos_1][columns] = \\\n \"\\\\ctrl{\" + str(pos_2 - pos_1) + \"}\"\n self._latex[pos_2][columns] = \"\\\\gate{Y}\"\n elif nm == \"ch\":\n self._latex[pos_1][columns] = \\\n \"\\\\ctrl{\" + str(pos_2 - pos_1) + \"}\"\n self._latex[pos_2][columns] = \"\\\\gate{H}\"\n elif nm == \"swap\":\n self._latex[pos_1][columns] = \"\\\\qswap\"\n self._latex[pos_2][columns] = \\\n \"\\\\qswap \\\\qwx[\" + str(pos_1 - pos_2) + \"]\"\n elif nm == \"crz\":\n self._latex[pos_1][columns] = \\\n \"\\\\ctrl{\" + str(pos_2 - pos_1) + \"}\"\n self._latex[pos_2][columns] = \\\n \"\\\\gate{R_z(%s)}\" % (current_op.op.params[0])\n elif nm == \"cu1\":\n self._latex[pos_1][columns - 1] = \"\\\\ctrl{\" + str(\n pos_2 - pos_1) + \"}\"\n self._latex[pos_2][columns - 1] = \"\\\\control\\\\qw\"\n self._latex[min(pos_1, pos_2)][columns] = \\\n \"\\\\dstick{%s}\\\\qw\" % (current_op.op.params[0])\n self._latex[max(pos_1, pos_2)][columns] = \"\\\\qw\"\n elif nm == \"cu3\":\n self._latex[pos_1][columns] = \\\n \"\\\\ctrl{\" + str(pos_2 - pos_1) + \"}\"\n self._latex[pos_2][columns] = \\\n \"\\\\gate{U_3(%s,%s,%s)}\" % (current_op.op.params[0],\n current_op.op.params[1],\n current_op.op.params[2])\n else:\n temp = [pos_1, pos_2]\n temp.sort(key=int)\n top = temp[0]\n bottom = temp[1]\n\n for i in range(top, bottom + 1):\n if is_occupied[i] is False:\n is_occupied[i] = True\n else:\n columns += 1\n is_occupied = [False] * self.img_width\n for j in range(top, bottom + 1):\n is_occupied[j] = True\n break\n # symetric gates have angle labels\n if current_op.name == 'cu1':\n columns += 1\n is_occupied = [False] * self.img_width\n is_occupied[top] = True\n\n if nm == \"cx\":\n self._latex[pos_1][columns] = \"\\\\ctrl{\" + str(\n pos_2 - pos_1) + \"}\"\n self._latex[pos_2][columns] = \"\\\\targ\"\n elif nm == \"cz\":\n self._latex[pos_1][columns] = \"\\\\ctrl{\" + str(\n pos_2 - pos_1) + \"}\"\n self._latex[pos_2][columns] = \"\\\\control\\\\qw\"\n elif nm == \"cy\":\n self._latex[pos_1][columns] = \"\\\\ctrl{\" + str(\n pos_2 - pos_1) + \"}\"\n self._latex[pos_2][columns] = \"\\\\gate{Y}\"\n elif nm == \"ch\":\n self._latex[pos_1][columns] = \"\\\\ctrl{\" + str(\n pos_2 - pos_1) + \"}\"\n self._latex[pos_2][columns] = \"\\\\gate{H}\"\n elif nm == \"swap\":\n self._latex[pos_1][columns] = \"\\\\qswap\"\n self._latex[pos_2][columns] = \\\n \"\\\\qswap \\\\qwx[\" + str(pos_1 - pos_2) + \"]\"\n elif nm == \"crz\":\n self._latex[pos_1][columns] = \"\\\\ctrl{\" + str(\n pos_2 - pos_1) + \"}\"\n self._latex[pos_2][columns] = \\\n \"\\\\gate{R_z(%s)}\" % (current_op.op.params[0])\n elif nm == \"cu1\":\n self._latex[pos_1][columns - 1] = \"\\\\ctrl{\" + str(\n pos_2 - pos_1) + \"}\"\n self._latex[pos_2][columns - 1] = \"\\\\control\\\\qw\"\n self._latex[min(pos_1, pos_2)][columns] = \\\n \"\\\\dstick{%s}\\\\qw\" % (current_op.op.params[0])\n self._latex[max(pos_1, pos_2)][columns] = \"\\\\qw\"\n elif nm == \"cu3\":\n self._latex[pos_1][columns] = \"\\\\ctrl{\" + str(\n pos_2 - pos_1) + \"}\"\n self._latex[pos_2][columns] = (\"\\\\gate{U_3(%s,%s,%s)}\" % (\n current_op.op.params[0],\n current_op.op.params[1],\n current_op.op.params[2]))\n\n elif len(qarglist) == 3:\n pos_1 = self.img_regs[(qarglist[0][0], qarglist[0][1])]\n pos_2 = self.img_regs[(qarglist[1][0], qarglist[1][1])]\n pos_3 = self.img_regs[(qarglist[2][0], qarglist[2][1])]\n\n if current_op.condition:\n pos_4 = self.img_regs[(if_reg, 0)]\n\n temp = [pos_1, pos_2, pos_3, pos_4]\n temp.sort(key=int)\n top = temp[0]\n bottom = temp[2]\n\n for i in range(top, pos_4 + 1):\n if is_occupied[i] is False:\n is_occupied[i] = True\n else:\n columns += 1\n is_occupied = [False] * self.img_width\n for j in range(top, pos_4 + 1):\n is_occupied[j] = True\n break\n\n prev_column = [x[columns - 1] for x in self._latex]\n for item, prev_entry in enumerate(prev_column):\n if 'barrier' in prev_entry:\n span = re.search('barrier{(.*)}', prev_entry)\n if span and any(i in temp for i in range(\n item, int(span.group(1)))):\n self._latex[item][columns - 1] = \\\n prev_entry.replace(\n '\\\\barrier{',\n '\\\\barrier[-0.65em]{')\n\n gap = pos_4 - bottom\n for i in range(self.cregs[if_reg]):\n if if_value[i] == '1':\n self._latex[pos_4 + i][columns] = \\\n \"\\\\control \\\\cw \\\\cwx[-\" + str(gap) + \"]\"\n gap = 1\n else:\n self._latex[pos_4 + i][columns] = \\\n \"\\\\controlo \\\\cw \\\\cwx[-\" + str(gap) + \"]\"\n gap = 1\n\n if nm == \"ccx\":\n self._latex[pos_1][columns] = \"\\\\ctrl{\" + str(\n pos_2 - pos_1) + \"}\"\n self._latex[pos_2][columns] = \"\\\\ctrl{\" + str(\n pos_3 - pos_2) + \"}\"\n self._latex[pos_3][columns] = \"\\\\targ\"\n\n if nm == \"cswap\":\n self._latex[pos_1][columns] = \"\\\\ctrl{\" + str(\n pos_2 - pos_1) + \"}\"\n self._latex[pos_2][columns] = \"\\\\qswap\"\n self._latex[pos_3][columns] = \\\n \"\\\\qswap \\\\qwx[\" + str(pos_2 - pos_3) + \"]\"\n else:\n temp = [pos_1, pos_2, pos_3]\n temp.sort(key=int)\n top = temp[0]\n bottom = temp[2]\n\n for i in range(top, bottom + 1):\n if is_occupied[i] is False:\n is_occupied[i] = True\n else:\n columns += 1\n is_occupied = [False] * self.img_width\n for j in range(top, bottom + 1):\n is_occupied[j] = True\n break\n\n prev_column = [x[columns - 1] for x in self._latex]\n for item, prev_entry in enumerate(prev_column):\n if 'barrier' in prev_entry:\n span = re.search('barrier{(.*)}', prev_entry)\n if span and any(i in temp for i in range(\n item, int(span.group(1)))):\n self._latex[item][columns - 1] = \\\n prev_entry.replace(\n '\\\\barrier{',\n '\\\\barrier[-0.65em]{')\n\n if nm == \"ccx\":\n self._latex[pos_1][columns] = \"\\\\ctrl{\" + str(\n pos_2 - pos_1) + \"}\"\n self._latex[pos_2][columns] = \"\\\\ctrl{\" + str(\n pos_3 - pos_2) + \"}\"\n self._latex[pos_3][columns] = \"\\\\targ\"\n\n if nm == \"cswap\":\n self._latex[pos_1][columns] = \"\\\\ctrl{\" + str(\n pos_2 - pos_1) + \"}\"\n self._latex[pos_2][columns] = \"\\\\qswap\"\n self._latex[pos_3][columns] = \\\n \"\\\\qswap \\\\qwx[\" + str(pos_2 - pos_3) + \"]\"\n\n elif current_op.name == \"measure\":\n if (len(current_op.cargs) != 1\n or len(current_op.qargs) != 1\n or current_op.op.params):\n raise exceptions.VisualizationError(\"bad operation record\")\n if current_op.condition:\n raise exceptions.VisualizationError(\n \"If controlled measures currently not supported.\")\n\n qname, qindex = current_op.qargs[0]\n cname, cindex = current_op.cargs[0]\n if aliases:\n newq = aliases[(qname, qindex)]\n qname = newq[0]\n qindex = newq[1]\n\n pos_1 = self.img_regs[(qname, qindex)]\n pos_2 = self.img_regs[(cname, cindex)]\n\n for i in range(pos_1, pos_2 + 1):\n if is_occupied[i] is False:\n is_occupied[i] = True\n else:\n columns += 1\n is_occupied = [False] * self.img_width\n for j in range(pos_1, pos_2 + 1):\n is_occupied[j] = True\n break\n\n try:\n self._latex[pos_1][columns] = \"\\\\meter\"\n prev_column = [x[columns - 1] for x in self._latex]\n for item, prev_entry in enumerate(prev_column):\n if 'barrier' in prev_entry:\n span = re.search('barrier{(.*)}', prev_entry)\n if span and (\n item + int(span.group(1))) - pos_1 >= 0:\n self._latex[item][columns - 1] = \\\n prev_entry.replace(\n '\\\\barrier{',\n '\\\\barrier[-1.15em]{')\n\n self._latex[pos_2][columns] = \\\n \"\\\\cw \\\\cwx[-\" + str(pos_2 - pos_1) + \"]\"\n except Exception as e:\n raise exceptions.VisualizationError(\n 'Error during Latex building: %s' % str(e))\n elif current_op.name in ['barrier', 'snapshot', 'load', 'save',\n 'noise']:\n if self.plot_barriers:\n qarglist = current_op.qargs\n indexes = [self._get_qubit_index(x) for x in qarglist]\n start_bit = self.qubit_list[min(indexes)]\n if aliases is not None:\n qarglist = map(lambda x: aliases[x], qarglist)\n start = self.img_regs[start_bit]\n span = len(current_op.qargs) - 1\n for i in range(start, start + span + 1):\n if is_occupied[i] is False:\n is_occupied[i] = True\n else:\n columns += 1\n is_occupied = [False] * self.img_width\n for j in range(start, start + span + 1):\n is_occupied[j] = True\n break\n self._latex[start][columns] = \"\\\\qw \\\\barrier{\" + str(\n span) + \"}\"\n else:\n raise exceptions.VisualizationError(\"bad node data\")\n\n def _get_qubit_index(self, qubit):\n \"\"\"Get the index number for a quantum bit\n Args:\n qubit (tuple): The tuple of the bit of the form\n (register_name, bit_number)\n Returns:\n int: The index in the bit list\n Raises:\n VisualizationError: If the bit isn't found\n \"\"\"\n for i, bit in enumerate(self.qubit_list):\n if qubit == bit:\n qindex = i\n break\n else:\n raise exceptions.VisualizationError(\n \"unable to find bit for operation\")\n return qindex\n\n def _ffs(self, mask):\n \"\"\"Find index of first set bit.\n\n Args:\n mask (int): integer to search\n Returns:\n int: index of the first set bit.\n \"\"\"\n origin = (mask & (-mask)).bit_length()\n return origin - 1\n\n\ndef _get_register_specs(bit_labels):\n \"\"\"Get the number and size of unique registers from bit_labels list.\n\n Args:\n bit_labels (list): this list is of the form::\n\n [['reg1', 0], ['reg1', 1], ['reg2', 0]]\n\n which indicates a register named \"reg1\" of size 2\n and a register named \"reg2\" of size 1. This is the\n format of classic and quantum bit labels in qobj\n header.\n\n Yields:\n tuple: iterator of register_name:size pairs.\n \"\"\"\n it = itertools.groupby(bit_labels, operator.itemgetter(0))\n for register_name, sub_it in it:\n yield register_name, max(ind[1] for ind in sub_it) + 1\n\n\ndef _truncate_float(matchobj, format_str='0.2g'):\n \"\"\"Truncate long floats\n\n Args:\n matchobj (re.Match): contains original float\n format_str (str): format specifier\n Returns:\n str: returns truncated float\n \"\"\"\n if matchobj.group(0):\n return format(float(matchobj.group(0)), format_str)\n return ''\n",
"# -*- coding: utf-8 -*-\n\n# Copyright 2019, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"\nSample pulse.\n\"\"\"\n\nimport numpy as np\n\nfrom qiskit.pulse.exceptions import CommandsError\nfrom .pulse_command import PulseCommand\n\n\nclass SamplePulse(PulseCommand):\n \"\"\"Container for functional pulse.\"\"\"\n\n def __init__(self, samples, name=None):\n \"\"\"Create new sample pulse command.\n\n Args:\n samples (ndarray): Complex array of pulse envelope.\n name (str): Unique name to identify the pulse.\n Raises:\n CommandsError: when pulse envelope amplitude exceeds 1.\n \"\"\"\n if not name:\n _name = str('pulse_object_%s' % id(self))\n else:\n _name = name\n\n super(SamplePulse, self).__init__(duration=len(samples), name=_name)\n\n if np.any(np.abs(samples) > 1):\n raise CommandsError('Absolute value of pulse envelope amplitude exceeds 1.')\n\n self.samples = samples\n\n def draw(self, **kwargs):\n \"\"\"Plot the interpolated envelope of pulse.\n\n Keyword Args:\n dt (float): Time interval of samples.\n interp_method (str): Method of interpolation\n (set `None` for turn off the interpolation).\n filename (str): Name required to save pulse image.\n interactive (bool): When set true show the circuit in a new window\n (this depends on the matplotlib backend being used supporting this).\n dpi (int): Resolution of saved image.\n nop (int): Data points for interpolation.\n size (tuple): Size of figure.\n \"\"\"\n from qiskit.tools.visualization import pulse_drawer\n\n return pulse_drawer(self.samples, self.duration, **kwargs)\n\n def __eq__(self, other):\n \"\"\"Two SamplePulses are the same if they are of the same type\n and have the same name and samples.\n\n Args:\n other (SamplePulse): other SamplePulse\n\n Returns:\n bool: are self and other equal.\n \"\"\"\n if type(self) is type(other) and \\\n self.name == other.name and \\\n (self.samples == other.samples).all():\n return True\n return False\n",
"# -*- coding: utf-8 -*-\n\n# Copyright 2019, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\"\"\"\nSuperoperator representation of a Quantum Channel.\n\n\nFor a quantum channel E, the superoperator is defined as the matrix S such that\n\n |E(ρ)⟩⟩ = S|ρ⟩⟩\n\nwhere |A⟩⟩ denotes the column stacking vectorization of a matrix A.\n\nSee [1] for further details.\n\nReferences:\n [1] C.J. Wood, J.D. Biamonte, D.G. Cory, Quant. Inf. Comp. 15, 0579-0811 (2015)\n Open access: arXiv:1111.6950 [quant-ph]\n\"\"\"\n\nfrom numbers import Number\nimport numpy as np\n\nfrom qiskit.qiskiterror import QiskitError\nfrom .basechannel import QuantumChannel\nfrom .transformations import _to_superop, _bipartite_tensor\nfrom .choi import Choi\n\n\nclass SuperOp(QuantumChannel):\n \"\"\"Superoperator representation of a quantum channel\"\"\"\n\n def __init__(self, data, input_dim=None, output_dim=None):\n # Check if input is a quantum channel object\n # If so we disregard the dimension kwargs\n if issubclass(data.__class__, QuantumChannel):\n input_dim, output_dim = data.dims\n super_mat = _to_superop(data.rep, data._data, input_dim,\n output_dim)\n else:\n # We initialize directly from superoperator matrix\n super_mat = np.array(data, dtype=complex)\n # Determine input and output dimensions\n dout, din = super_mat.shape\n if output_dim is None:\n output_dim = int(np.sqrt(dout))\n if input_dim is None:\n input_dim = int(np.sqrt(din))\n # Check dimensions\n if output_dim**2 != dout or input_dim**2 != din:\n raise QiskitError(\n \"Invalid input and output dimension for superoperator input.\"\n )\n super().__init__('SuperOp', super_mat, input_dim, output_dim)\n\n @property\n def _bipartite_shape(self):\n \"\"\"Return the shape for bipartite matrix\"\"\"\n return (self._output_dim, self._output_dim, self._input_dim,\n self._input_dim)\n\n def is_cptp(self):\n \"\"\"Return True if completely-positive trace-preserving.\"\"\"\n # We convert to the Choi representation to check if CPTP\n tmp = Choi(self)\n return tmp.is_cptp()\n\n def _evolve(self, state):\n \"\"\"Evolve a quantum state by the QuantumChannel.\n\n Args:\n state (QuantumState): The input statevector or density matrix.\n\n Returns:\n DensityMatrix: the output quantum state as a density matrix.\n \"\"\"\n state = self._format_density_matrix(self._check_state(state))\n shape_in = self._input_dim * self._input_dim\n shape_out = (self._output_dim, self._output_dim)\n return np.reshape(\n np.dot(self._data, np.reshape(state, shape_in, order='F')),\n shape_out,\n order='F')\n\n def conjugate(self, inplace=False):\n \"\"\"Return the conjugate of the QuantumChannel.\n\n Args:\n inplace (bool): If True modify the current object inplace\n [Default: False]\n\n Returns:\n SuperOp: the conjugate of the quantum channel as a SuperOp object.\n \"\"\"\n if inplace:\n np.conjugate(self._data, out=self._data)\n return self\n return SuperOp(np.conj(self._data), self._input_dim, self._output_dim)\n\n def transpose(self, inplace=False):\n \"\"\"Return the transpose of the QuantumChannel.\n\n Args:\n inplace (bool): If True modify the current object inplace\n [Default: False]\n\n Returns:\n SuperOp: the transpose of the quantum channel as a SuperOp object.\n \"\"\"\n # Swaps input and output dimensions\n output_dim = self._input_dim\n input_dim = self._output_dim\n if inplace:\n self._data = np.transpose(self._data)\n self._input_dim = input_dim\n self._output_dim = output_dim\n return self\n return SuperOp(np.transpose(self._data), input_dim, output_dim)\n\n def adjoint(self, inplace=False):\n \"\"\"Return the adjoint of the QuantumChannel.\n\n Args:\n inplace (bool): If True modify the current object inplace\n [Default: False]\n\n Returns:\n SuperOp: the adjoint of the quantum channel as a SuperOp object.\n \"\"\"\n return super().adjoint(inplace=inplace)\n\n def compose(self, other, inplace=False, front=False):\n \"\"\"Return the composition channel self∘other.\n\n Args:\n other (QuantumChannel): a quantum channel subclass\n inplace (bool): If True modify the current object inplace\n [Default: False]\n front (bool): If False compose in standard order other(self(input))\n otherwise compose in reverse order self(other(input))\n [default: False]\n\n Returns:\n SuperOp: The composition channel as a SuperOp object.\n\n Raises:\n QiskitError: if other is not a QuantumChannel subclass, or\n has incompatible dimensions.\n \"\"\"\n if not issubclass(other.__class__, QuantumChannel):\n raise QiskitError('Other is not a channel rep')\n # Check dimensions match up\n if front and self._input_dim != other._output_dim:\n raise QiskitError(\n 'input_dim of self must match output_dim of other')\n if not front and self._output_dim != other._input_dim:\n raise QiskitError(\n 'input_dim of other must match output_dim of self')\n # Convert other to SuperOp\n if not isinstance(other, SuperOp):\n other = SuperOp(other)\n\n if front:\n # Composition A(B(input))\n input_dim = other._input_dim\n output_dim = self._output_dim\n if inplace:\n if self.dims == other.dims:\n np.dot(self._data, other.data, out=self._data)\n else:\n self._data = np.dot(self._data, other.data)\n self._input_dim = input_dim\n self._output_dim = output_dim\n return self\n return SuperOp(\n np.dot(self._data, other.data), input_dim, output_dim)\n # Composition B(A(input))\n input_dim = self._input_dim\n output_dim = other._output_dim\n if inplace:\n if self.dims == other.dims:\n np.dot(other.data, self._data, out=self._data)\n else:\n self._data = np.dot(other.data, self._data)\n self._input_dim = input_dim\n self._output_dim = output_dim\n return self\n return SuperOp(np.dot(other.data, self._data), input_dim, output_dim)\n\n def power(self, n, inplace=False):\n \"\"\"Return the compose of a QuantumChannel with itself n times.\n\n Args:\n n (int): the number of times to compose with self (n>0).\n inplace (bool): If True modify the current object inplace\n [Default: False]\n\n Returns:\n SuperOp: the n-times composition channel as a SuperOp object.\n\n Raises:\n QiskitError: if the input and output dimensions of the\n QuantumChannel are not equal, or the power is not a positive\n integer.\n \"\"\"\n if not isinstance(n, int) or n < 1:\n raise QiskitError(\"Can only power with positive integer powers.\")\n if self._input_dim != self._output_dim:\n raise QiskitError(\"Can only power with input_dim = output_dim.\")\n # Override base class power so we can implement more efficiently\n # using Numpy.matrix_power\n if inplace:\n if n == 1:\n return self\n self._data = np.linalg.matrix_power(self._data, n)\n return self\n # Return new object\n return SuperOp(np.linalg.matrix_power(self._data, n), *self.dims)\n\n def tensor(self, other, inplace=False):\n \"\"\"Return the tensor product channel self ⊗ other.\n\n Args:\n other (QuantumChannel): a quantum channel subclass\n inplace (bool): If True modify the current object inplace\n [Default: False]\n\n Returns:\n SuperOp: the tensor product channel self ⊗ other as a SuperOp\n object.\n\n Raises:\n QiskitError: if other is not a QuantumChannel subclass.\n \"\"\"\n return self._tensor_product(other, inplace=inplace, reverse=False)\n\n def expand(self, other, inplace=False):\n \"\"\"Return the tensor product channel other ⊗ self.\n\n Args:\n other (QuantumChannel): a quantum channel subclass\n inplace (bool): If True modify the current object inplace\n [Default: False]\n\n Returns:\n SuperOp: the tensor product channel other ⊗ self as a SuperOp\n object.\n\n Raises:\n QiskitError: if other is not a QuantumChannel subclass.\n \"\"\"\n return self._tensor_product(other, inplace=inplace, reverse=True)\n\n def add(self, other, inplace=False):\n \"\"\"Return the QuantumChannel self + other.\n\n Args:\n other (QuantumChannel): a quantum channel subclass\n inplace (bool): If True modify the current object inplace\n [Default: False]\n\n Returns:\n SuperOp: the linear addition self + other as a SuperOp object.\n\n Raises:\n QiskitError: if other is not a QuantumChannel subclass, or\n has incompatible dimensions.\n \"\"\"\n if not issubclass(other.__class__, QuantumChannel):\n raise QiskitError('other is not a QuantumChannel subclass')\n if self.dims != other.dims:\n raise QiskitError(\"other QuantumChannel dimensions are not equal\")\n if not isinstance(other, SuperOp):\n other = SuperOp(other)\n\n if inplace:\n self._data += other._data\n return self\n input_dim, output_dim = self.dims\n return SuperOp(self._data + other.data, input_dim, output_dim)\n\n def subtract(self, other, inplace=False):\n \"\"\"Return the QuantumChannel self - other.\n\n Args:\n other (QuantumChannel): a quantum channel subclass\n inplace (bool): If True modify the current object inplace\n [Default: False]\n\n Returns:\n SuperOp: the linear subtraction self - other as SuperOp object.\n\n Raises:\n QiskitError: if other is not a QuantumChannel subclass, or\n has incompatible dimensions.\n \"\"\"\n if not issubclass(other.__class__, QuantumChannel):\n raise QiskitError('other is not a QuantumChannel subclass')\n if self.dims != other.dims:\n raise QiskitError(\"other QuantumChannel dimensions are not equal\")\n if not isinstance(other, SuperOp):\n other = SuperOp(other)\n if inplace:\n self._data -= other.data\n return self\n input_dim, output_dim = self.dims\n return SuperOp(self._data - other.data, input_dim, output_dim)\n\n def multiply(self, other, inplace=False):\n \"\"\"Return the QuantumChannel self + other.\n\n Args:\n other (complex): a complex number\n inplace (bool): If True modify the current object inplace\n [Default: False]\n\n Returns:\n SuperOp: the scalar multiplication other * self as a SuperOp object.\n\n Raises:\n QiskitError: if other is not a valid scalar.\n \"\"\"\n if not isinstance(other, Number):\n raise QiskitError(\"other is not a number\")\n if inplace:\n self._data *= other\n return self\n input_dim, output_dim = self.dims\n return SuperOp(other * self._data, input_dim, output_dim)\n\n def _tensor_product(self, other, inplace=False, reverse=False):\n \"\"\"Return the tensor product channel.\n\n Args:\n other (QuantumChannel): a quantum channel subclass\n inplace (bool): If True modify the current object inplace\n [default: False]\n reverse (bool): If False return self ⊗ other, if True return\n if True return (other ⊗ self) [Default: False\n Returns:\n SuperOp: the tensor product channel as a SuperOp object.\n\n Raises:\n QiskitError: if other is not a QuantumChannel subclass.\n \"\"\"\n # Convert other to SuperOp\n if not issubclass(other.__class__, QuantumChannel):\n raise QiskitError('other is not a QuantumChannel subclass')\n if not isinstance(other, SuperOp):\n other = SuperOp(other)\n\n # Reshuffle indicies\n a_in, a_out = self.dims\n b_in, b_out = other.dims\n\n # Combined channel dimensions\n input_dim = a_in * b_in\n output_dim = a_out * b_out\n\n if reverse:\n data = _bipartite_tensor(other.data, self._data,\n shape1=other._bipartite_shape,\n shape2=self._bipartite_shape)\n else:\n data = _bipartite_tensor(self._data, other.data,\n shape1=self._bipartite_shape,\n shape2=other._bipartite_shape)\n if inplace:\n self._data = data\n self._input_dim = input_dim\n self._output_dim = output_dim\n return self\n # return new object\n return SuperOp(data, input_dim, output_dim)\n"
] | [
[
"numpy.log",
"numpy.sqrt",
"numpy.iinfo",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.random.RandomState"
],
[
"numpy.sqrt"
],
[
"numpy.abs"
],
[
"numpy.dot",
"numpy.conj",
"numpy.sqrt",
"numpy.reshape",
"numpy.linalg.matrix_power",
"numpy.transpose",
"numpy.array",
"numpy.conjugate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HumaticsLAB/GTM-Transformer | [
"94124d3246c7c22d8b952beeda53639a9ad170e3"
] | [
"models/GTM.py"
] | [
"import math\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport pytorch_lightning as pl\r\nfrom transformers import pipeline\r\nfrom torchvision import models\r\nfrom fairseq.optim.adafactor import Adafactor\r\n\r\n\r\nclass PositionalEncoding(nn.Module):\r\n def __init__(self, d_model, dropout=0.1, max_len=52):\r\n super(PositionalEncoding, self).__init__()\r\n self.dropout = nn.Dropout(p=dropout)\r\n\r\n pe = torch.zeros(max_len, d_model)\r\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\r\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\r\n pe[:, 0::2] = torch.sin(position * div_term)\r\n pe[:, 1::2] = torch.cos(position * div_term)\r\n pe = pe.unsqueeze(0).transpose(0, 1)\r\n self.register_buffer('pe', pe)\r\n\r\n def forward(self, x):\r\n x = x + self.pe[:x.size(0), :]\r\n return self.dropout(x)\r\n\r\nclass TimeDistributed(nn.Module):\r\n # Takes any module and stacks the time dimension with the batch dimenison of inputs before applying the module\r\n # Insipired from https://keras.io/api/layers/recurrent_layers/time_distributed/\r\n # https://discuss.pytorch.org/t/any-pytorch-function-can-work-as-keras-timedistributed/1346/4\r\n def __init__(self, module, batch_first=True):\r\n super(TimeDistributed, self).__init__()\r\n self.module = module # Can be any layer we wish to apply like Linear, Conv etc\r\n self.batch_first = batch_first\r\n\r\n def forward(self, x):\r\n if len(x.size()) <= 2:\r\n return self.module(x)\r\n\r\n # Squash samples and timesteps into a single axis\r\n x_reshape = x.contiguous().view(-1, x.size(-1)) \r\n\r\n y = self.module(x_reshape)\r\n\r\n # We have to reshape Y\r\n if self.batch_first:\r\n y = y.contiguous().view(x.size(0), -1, y.size(-1)) # (samples, timesteps, output_size)\r\n else:\r\n y = y.view(-1, x.size(1), y.size(-1)) # (timesteps, samples, output_size)\r\n\r\n return y\r\n\r\nclass FusionNetwork(nn.Module):\r\n def __init__(self, embedding_dim, hidden_dim, use_img, use_text, dropout=0.2):\r\n super(FusionNetwork, self).__init__()\r\n \r\n self.img_pool = nn.AdaptiveAvgPool2d((1,1))\r\n self.img_linear = nn.Linear(2048, embedding_dim)\r\n self.use_img = use_img\r\n self.use_text = use_text\r\n input_dim = embedding_dim + (embedding_dim*use_img) + (embedding_dim*use_text)\r\n self.feature_fusion = nn.Sequential(\r\n nn.BatchNorm1d(input_dim),\r\n nn.Linear(input_dim, input_dim, bias=False),\r\n nn.ReLU(),\r\n nn.Dropout(dropout),\r\n nn.Linear(input_dim, hidden_dim)\r\n )\r\n\r\n def forward(self, img_encoding, text_encoding, dummy_encoding):\r\n # Fuse static features together\r\n pooled_img = self.img_pool(img_encoding)\r\n condensed_img = self.img_linear(pooled_img.flatten(1))\r\n\r\n # Build input\r\n decoder_inputs = []\r\n if self.use_img == 1:\r\n decoder_inputs.append(condensed_img) \r\n if self.use_text == 1:\r\n decoder_inputs.append(text_encoding) \r\n decoder_inputs.append(dummy_encoding)\r\n concat_features = torch.cat(decoder_inputs, dim=1)\r\n\r\n final = self.feature_fusion(concat_features)\r\n # final = self.feature_fusion(dummy_encoding)\r\n\r\n return final\r\n\r\nclass GTrendEmbedder(nn.Module):\r\n def __init__(self, forecast_horizon, embedding_dim, use_mask, trend_len, num_trends, gpu_num):\r\n super().__init__()\r\n self.forecast_horizon = forecast_horizon\r\n self.input_linear = TimeDistributed(nn.Linear(num_trends, embedding_dim))\r\n self.pos_embedding = PositionalEncoding(embedding_dim, max_len=trend_len)\r\n encoder_layer = nn.TransformerEncoderLayer(d_model=embedding_dim, nhead=4, dropout=0.2)\r\n self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=2)\r\n self.use_mask = use_mask\r\n self.gpu_num = gpu_num\r\n\r\n def _generate_encoder_mask(self, size, forecast_horizon):\r\n mask = torch.zeros((size, size))\r\n split = math.gcd(size, forecast_horizon)\r\n for i in range(0, size, split):\r\n mask[i:i+split, i:i+split] = 1\r\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)).to('cuda:'+str(self.gpu_num))\r\n return mask\r\n \r\n def _generate_square_subsequent_mask(self, size):\r\n mask = (torch.triu(torch.ones(size, size)) == 1).transpose(0, 1)\r\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)).to('cuda:'+str(self.gpu_num))\r\n return mask\r\n\r\n def forward(self, gtrends):\r\n gtrend_emb = self.input_linear(gtrends.permute(0,2,1))\r\n gtrend_emb = self.pos_embedding(gtrend_emb.permute(1,0,2))\r\n input_mask = self._generate_encoder_mask(gtrend_emb.shape[0], self.forecast_horizon)\r\n if self.use_mask == 1:\r\n gtrend_emb = self.encoder(gtrend_emb, input_mask)\r\n else:\r\n gtrend_emb = self.encoder(gtrend_emb)\r\n return gtrend_emb\r\n \r\nclass TextEmbedder(nn.Module):\r\n def __init__(self, embedding_dim, cat_dict, col_dict, fab_dict, gpu_num):\r\n super().__init__()\r\n self.embedding_dim = embedding_dim\r\n self.cat_dict = {v: k for k, v in cat_dict.items()}\r\n self.col_dict = {v: k for k, v in col_dict.items()}\r\n self.fab_dict = {v: k for k, v in fab_dict.items()}\r\n self.word_embedder = pipeline('feature-extraction', model='bert-base-uncased')\r\n self.fc = nn.Linear(768, embedding_dim)\r\n self.dropout = nn.Dropout(0.1)\r\n self.gpu_num = gpu_num\r\n\r\n def forward(self, category, color, fabric):\r\n textual_description = [self.col_dict[color.detach().cpu().numpy().tolist()[i]] + ' ' \\\r\n + self.fab_dict[fabric.detach().cpu().numpy().tolist()[i]] + ' ' \\\r\n + self.cat_dict[category.detach().cpu().numpy().tolist()[i]] for i in range(len(category))]\r\n\r\n\r\n # Use BERT to extract features\r\n word_embeddings = self.word_embedder(textual_description)\r\n\r\n # BERT gives us embeddings for [CLS] .. [EOS], which is why we only average the embeddings in the range [1:-1] \r\n # We're not fine tuning BERT and we don't want the noise coming from [CLS] or [EOS]\r\n word_embeddings = [torch.FloatTensor(x[0][1:-1]).mean(axis=0) for x in word_embeddings] \r\n word_embeddings = torch.stack(word_embeddings).to('cuda:'+str(self.gpu_num))\r\n \r\n # Embed to our embedding space\r\n word_embeddings = self.dropout(self.fc(word_embeddings))\r\n\r\n return word_embeddings\r\n\r\nclass ImageEmbedder(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n # Img feature extraction\r\n resnet = models.resnet50(pretrained=True)\r\n modules = list(resnet.children())[:-2]\r\n self.resnet = nn.Sequential(*modules)\r\n for p in self.resnet.parameters():\r\n p.requires_grad = False\r\n\r\n # Fine tune resnet\r\n # for c in list(self.resnet.children())[6:]:\r\n # for p in c.parameters():\r\n # p.requires_grad = True\r\n \r\n def forward(self, images): \r\n img_embeddings = self.resnet(images) \r\n size = img_embeddings.size()\r\n out = img_embeddings.view(*size[:2],-1)\r\n\r\n return out.view(*size).contiguous() # batch_size, 2048, image_size/32, image_size/32\r\n\r\nclass DummyEmbedder(nn.Module):\r\n def __init__(self, embedding_dim):\r\n super().__init__()\r\n self.embedding_dim = embedding_dim\r\n self.day_embedding = nn.Linear(1, embedding_dim)\r\n self.week_embedding = nn.Linear(1, embedding_dim)\r\n self.month_embedding = nn.Linear(1, embedding_dim)\r\n self.year_embedding = nn.Linear(1, embedding_dim)\r\n self.dummy_fusion = nn.Linear(embedding_dim*4, embedding_dim)\r\n self.dropout = nn.Dropout(0.2)\r\n\r\n\r\n def forward(self, temporal_features):\r\n # Temporal dummy variables (day, week, month, year)\r\n d, w, m, y = temporal_features[:, 0].unsqueeze(1), temporal_features[:, 1].unsqueeze(1), \\\r\n temporal_features[:, 2].unsqueeze(1), temporal_features[:, 3].unsqueeze(1)\r\n d_emb, w_emb, m_emb, y_emb = self.day_embedding(d), self.week_embedding(w), self.month_embedding(m), self.year_embedding(y)\r\n temporal_embeddings = self.dummy_fusion(torch.cat([d_emb, w_emb, m_emb, y_emb], dim=1))\r\n temporal_embeddings = self.dropout(temporal_embeddings)\r\n\r\n return temporal_embeddings\r\n\r\nclass TransformerDecoderLayer(nn.Module):\r\n\r\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=\"relu\"):\r\n super(TransformerDecoderLayer, self).__init__()\r\n \r\n self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\r\n\r\n # Implementation of Feedforward model\r\n self.linear1 = nn.Linear(d_model, dim_feedforward)\r\n self.dropout = nn.Dropout(dropout)\r\n self.linear2 = nn.Linear(dim_feedforward, d_model)\r\n\r\n self.norm2 = nn.LayerNorm(d_model)\r\n self.norm3 = nn.LayerNorm(d_model)\r\n self.dropout2 = nn.Dropout(dropout)\r\n self.dropout3 = nn.Dropout(dropout)\r\n\r\n self.activation = F.relu\r\n\r\n def __setstate__(self, state):\r\n if 'activation' not in state:\r\n state['activation'] = F.relu\r\n super(TransformerDecoderLayer, self).__setstate__(state)\r\n\r\n def forward(self, tgt, memory, tgt_mask = None, memory_mask = None, tgt_key_padding_mask = None, \r\n memory_key_padding_mask = None):\r\n\r\n tgt2, attn_weights = self.multihead_attn(tgt, memory, memory)\r\n tgt = tgt + self.dropout2(tgt2)\r\n tgt = self.norm2(tgt)\r\n tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))\r\n tgt = tgt + self.dropout3(tgt2)\r\n tgt = self.norm3(tgt)\r\n return tgt, attn_weights\r\n\r\nclass GTM(pl.LightningModule):\r\n def __init__(self, embedding_dim, hidden_dim, output_dim, num_heads, num_layers, use_text, use_img, \\\r\n cat_dict, col_dict, fab_dict, trend_len, num_trends, gpu_num, use_encoder_mask=1, autoregressive=False):\r\n super().__init__()\r\n self.hidden_dim = hidden_dim\r\n self.embedding_dim = embedding_dim\r\n self.output_len = output_dim\r\n self.use_encoder_mask = use_encoder_mask\r\n self.autoregressive = autoregressive\r\n self.gpu_num = gpu_num\r\n self.save_hyperparameters()\r\n\r\n # Encoder\r\n self.dummy_encoder = DummyEmbedder(embedding_dim)\r\n self.image_encoder = ImageEmbedder()\r\n self.text_encoder = TextEmbedder(embedding_dim, cat_dict, col_dict, fab_dict, gpu_num)\r\n self.gtrend_encoder = GTrendEmbedder(output_dim, hidden_dim, use_encoder_mask, trend_len, num_trends, gpu_num)\r\n self.static_feature_encoder = FusionNetwork(embedding_dim, hidden_dim, use_img, use_text)\r\n\r\n # Decoder\r\n self.decoder_linear = TimeDistributed(nn.Linear(1, hidden_dim))\r\n decoder_layer = TransformerDecoderLayer(d_model=self.hidden_dim, nhead=num_heads, \\\r\n dim_feedforward=self.hidden_dim * 4, dropout=0.1)\r\n \r\n if self.autoregressive: self.pos_encoder = PositionalEncoding(hidden_dim, max_len=12)\r\n self.decoder = nn.TransformerDecoder(decoder_layer, num_layers)\r\n \r\n self.decoder_fc = nn.Sequential(\r\n nn.Linear(hidden_dim, self.output_len if not self.autoregressive else 1),\r\n nn.Dropout(0.2)\r\n )\r\n def _generate_square_subsequent_mask(self, size):\r\n mask = (torch.triu(torch.ones(size, size)) == 1).transpose(0, 1)\r\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)).to('cuda:'+str(self.gpu_num))\r\n return mask\r\n\r\n def forward(self, category, color, fabric, temporal_features, gtrends, images):\r\n # Encode features and get inputs\r\n img_encoding = self.image_encoder(images)\r\n dummy_encoding = self.dummy_encoder(temporal_features)\r\n text_encoding = self.text_encoder(category, color, fabric)\r\n gtrend_encoding = self.gtrend_encoder(gtrends)\r\n\r\n # Fuse static features together\r\n static_feature_fusion = self.static_feature_encoder(img_encoding, text_encoding, dummy_encoding)\r\n\r\n if self.autoregressive == 1:\r\n # Decode\r\n tgt = torch.zeros(self.output_len, gtrend_encoding.shape[1], gtrend_encoding.shape[-1]).to('cuda:'+str(self.gpu_num))\r\n tgt[0] = static_feature_fusion\r\n tgt = self.pos_encoder(tgt)\r\n tgt_mask = self._generate_square_subsequent_mask(self.output_len)\r\n memory = gtrend_encoding\r\n decoder_out, attn_weights = self.decoder(tgt, memory, tgt_mask)\r\n forecast = self.decoder_fc(decoder_out)\r\n else:\r\n # Decode (generatively/non-autoregressively)\r\n tgt = static_feature_fusion.unsqueeze(0)\r\n memory = gtrend_encoding\r\n decoder_out, attn_weights = self.decoder(tgt, memory)\r\n forecast = self.decoder_fc(decoder_out)\r\n\r\n return forecast.view(-1, self.output_len), attn_weights\r\n\r\n def configure_optimizers(self):\r\n optimizer = Adafactor(self.parameters(),scale_parameter=True, relative_step=True, warmup_init=True, lr=None)\r\n \r\n return [optimizer]\r\n\r\n\r\n def training_step(self, train_batch, batch_idx):\r\n item_sales, category, color, fabric, temporal_features, gtrends, images = train_batch \r\n forecasted_sales, _ = self.forward(category, color, fabric, temporal_features, gtrends, images)\r\n loss = F.mse_loss(item_sales, forecasted_sales.squeeze())\r\n self.log('train_loss', loss)\r\n\r\n return loss\r\n\r\n def validation_step(self, test_batch, batch_idx):\r\n item_sales, category, color, fabric, temporal_features, gtrends, images = test_batch \r\n forecasted_sales, _ = self.forward(category, color, fabric, temporal_features, gtrends, images)\r\n \r\n return item_sales.squeeze(), forecasted_sales.squeeze()\r\n\r\n def validation_epoch_end(self, val_step_outputs):\r\n item_sales, forecasted_sales = [x[0] for x in val_step_outputs], [x[1] for x in val_step_outputs]\r\n item_sales, forecasted_sales = torch.stack(item_sales), torch.stack(forecasted_sales)\r\n rescaled_item_sales, rescaled_forecasted_sales = item_sales*1065, forecasted_sales*1065 # 1065 is the normalization factor (max of the sales of the training set)\r\n loss = F.mse_loss(item_sales, forecasted_sales.squeeze())\r\n mae = F.l1_loss(rescaled_item_sales, rescaled_forecasted_sales)\r\n self.log('val_mae', mae)\r\n self.log('val_loss', loss)\r\n\r\n print('Validation MAE:', mae.detach().cpu().numpy(), 'LR:', self.optimizers().param_groups[0]['lr'])\r\n"
] | [
[
"torch.nn.functional.l1_loss",
"torch.sin",
"torch.zeros",
"torch.cat",
"torch.FloatTensor",
"torch.nn.Dropout",
"torch.ones",
"torch.nn.MultiheadAttention",
"torch.nn.TransformerDecoder",
"torch.nn.TransformerEncoder",
"torch.arange",
"torch.cos",
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"torch.nn.Linear",
"torch.nn.TransformerEncoderLayer",
"torch.stack",
"torch.nn.LayerNorm",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
joergfranke/recnet | [
"bfb8a359207258d4c2f71fe4a1304764f6f355cb"
] | [
"recnet/layer_pool/ln_reccurent_layer.py"
] | [
"from __future__ import absolute_import, print_function, division\n\"\"\"\nThis file contains the implementation of different layer normalized recurrent layers.\n\"\"\"\n\n###### Imports\n########################################\nimport numpy as np\nimport theano\nimport theano.tensor as T\nfrom collections import OrderedDict\nfrom .layer_master import LayerMaster\n\n\n###### Conventional recurrent layer with layer normalization\n########################################\nclass conv_ln(LayerMaster):\n \"\"\"\n Hyperbolic tangent or rectified linear unit layer\n \"\"\"\n\n def __init__(self, rng, trng, n_in, n_out, n_batches, activation, old_weights=None,go_backwards=False): # , prm_structure, layer_no ):\n\n # Parameters\n self.go_backwards = go_backwards\n self.activation = activation\n\n # Random\n self.rng = rng\n self.trng = trng\n\n if old_weights == None:\n\n np_weights = OrderedDict()\n\n np_weights['w_in_hidden'] = self.rec_uniform_sqrt(rng, n_in, n_out)\n np_weights['w_hidden_hidden'] = self.sqr_ortho(rng, n_out)\n np_weights['b_act'] = np.zeros(n_out)\n\n np_weights['ln_s1'] = np.ones(n_out)\n np_weights['ln_b1'] = np.zeros(n_out)\n np_weights['ln_s2'] = np.ones(n_out)\n np_weights['ln_b2'] = np.zeros(n_out)\n\n self.weights = []\n for kk, pp in np_weights.items():\n self.weights.append(theano.shared(name=kk, value=pp.astype(T.config.floatX)))\n\n # load old weights\n else:\n self.weights = []\n for pp in old_weights:\n self.weights.append(theano.shared(value=pp.astype(T.config.floatX)))\n\n # Init last output and cell state\n init_hidden = np.zeros([n_batches, n_out]).astype(dtype=theano.config.floatX)\n self.t_init_hidden = theano.shared(name='init_hidden', value=init_hidden.astype(T.config.floatX))\n\n\n def t_forward_step(self, mask, cur_w_in_sig, pre_out_sig, w_hidden_hidden, b_act, ln_s1, ln_b1, ln_s2, ln_b2):\n\n pre_w_sig = T.dot(pre_out_sig, w_hidden_hidden)\n\n inner_act = self.activation\n\n pre_w_sig_ln = self.ln(pre_w_sig, ln_b1, ln_s1)\n cur_w_in_sig_ln = self.ln(cur_w_in_sig, ln_b2, ln_s2)\n\n out_sig = inner_act(T.add(cur_w_in_sig_ln, pre_w_sig_ln, b_act))\n\n mask = T.addbroadcast(mask, 1)\n out_sig_m = mask * out_sig + (1. - mask) * pre_out_sig\n return [out_sig_m]\n\n def sequence_iteration(self, in_seq, mask, use_dropout, dropout_value=1):\n\n in_seq_d = T.switch(use_dropout,\n (in_seq *\n self.trng.binomial(in_seq.shape,\n p=dropout_value, n=1,\n dtype=in_seq.dtype)),\n in_seq)\n\n w_in_seq = T.dot(in_seq_d, self.weights[0])\n\n out_seq, updates = theano.scan(\n fn=self.t_forward_step,\n sequences=[mask, w_in_seq],\n outputs_info=[self.t_init_hidden],\n non_sequences=self.weights[1:],\n go_backwards=self.go_backwards,\n truncate_gradient=-1,\n # n_steps=50,\n strict=True,\n allow_gc=False,\n )\n return out_seq\n\n\n###### LSTM Layer with layer normalization\n########################################\nclass LSTMp_ln(LayerMaster):\n \"\"\"\n Long short term memory layer with layer normalization\n\n key ideas of implementation:\n - peepholes at input gate and forget gate but not at output gate\n - calculate dot product of input and input weights before scan function\n - calculate dot product of previous output and weights only ones per sequence\n - weights and biases separate\n - one function for each step, one for each sequence\n \"\"\"\n\n def __init__(self, rng, trng, n_in, n_out, n_batches, activation, old_weights=None,\n go_backwards=False): # , prm_structure, layer_no ):\n\n # Parameters\n self.go_backwards = go_backwards\n self.activation = activation\n\n # Random\n self.rng = rng\n self.trng = trng\n\n self.t_n_out = theano.shared(name='t_n_out', value=n_out)\n\n if old_weights == None:\n\n np_weights = OrderedDict()\n # Input weights\n np_weights['w_ifco_x'] = self.rec_uniform_sqrt(rng, n_in, 4 * n_out)\n\n # Peephole weights (input- forget- output- gate)\n np_weights['w_ig_c'] = self.vec_uniform_sqrt(self.rng, n_out)\n np_weights['w_fg_c'] = self.vec_uniform_sqrt(self.rng,\n n_out) + 2 # Forgot gate with +2 initialized for keeping sequences right from begin\n np_weights['w_og_c'] = self.vec_uniform_sqrt(self.rng, n_out)\n # Previous output weights\n np_weights['w_ifco'] = self.rec_ortho(rng, n_out, 4)\n np_weights['b_ifco'] = np.zeros(4 * n_out)\n #np_weights['b_ifco'] = np.zeros(4 * n_out)\n\n # Layer normalization weights\n np_weights['ln_b1'] = np.zeros(4*n_out)\n np_weights['ln_s1'] = np.ones(4*n_out)\n\n np_weights['ln_b2'] = np.zeros(4*n_out)\n np_weights['ln_s2'] = np.ones(4*n_out)\n\n np_weights['ln_b3'] = np.zeros(n_out)\n np_weights['ln_s3'] = np.ones(n_out)\n\n\n self.weights = []\n for kk, pp in np_weights.items():\n self.weights.append(theano.shared(name=kk, value=pp.astype(T.config.floatX)))\n\n # load old weights\n else:\n self.weights = []\n for pp in old_weights:\n self.weights.append(theano.shared(value=pp.astype(T.config.floatX)))\n\n # Init last output and cell state\n ol_t00_np1 = np.zeros([n_batches, n_out]).astype(dtype=theano.config.floatX)\n cs_t00_np1 = np.zeros([n_batches, n_out]).astype(dtype=theano.config.floatX)\n self.t_ol_t00 = theano.shared(name='ol_b_t00', value=ol_t00_np1.astype(T.config.floatX))\n self.t_cs_t00 = theano.shared(name='cs_b_t00', value=cs_t00_np1.astype(T.config.floatX))\n\n\n def t_forward_step(self, mask, cur_w_in_sig, pre_out_sig, pre_cell_sig, w_ig_c, w_fg_c, w_og_c, w_ifco, b_ifco, ln_b1,ln_s1, ln_b2,ln_s2,ln_b3,ln_s3,\n t_n_out):\n\n cur_w_in_sig_ln = self.ln(cur_w_in_sig, ln_b1, ln_s1)\n\n pre_w_out_sig = T.dot(pre_out_sig, w_ifco)\n pre_w_out_sig_ln = self.ln(pre_w_out_sig, ln_b2, ln_s2)\n\n preact = T.add(cur_w_in_sig_ln, pre_w_out_sig_ln, b_ifco)\n\n inner_act = self.activation # T.nnet.hard_sigmoid T.tanh\n gate_act = self.sigmoid() # T.nnet.hard_sigmoid\n\n # Input Gate\n ig_t1 = gate_act(T.add(preact[:, 0:t_n_out], T.mul(pre_cell_sig, w_ig_c)))\n # Forget Gate\n fg_t1 = gate_act(T.add(preact[:, 1 * t_n_out:2 * t_n_out], T.mul(pre_cell_sig, w_fg_c),))\n # Cell State\n cs_t1 = T.add(T.mul(fg_t1, pre_cell_sig), T.mul(ig_t1, inner_act( T.add(preact[:, 2 * t_n_out:3 * t_n_out]))))\n\n mask = T.addbroadcast(mask, 1)\n cs_t1 = mask * cs_t1 + (1. - mask) * pre_cell_sig\n # functionality: cs_t1 = T.switch(mask , cs_t1, pre_cell_sig)\n\n cs_t1_ln = self.ln(cs_t1, ln_b3, ln_s3)\n\n # Output Gate\n og_t1 = gate_act(\n T.add(preact[:, 3 * t_n_out:4 * t_n_out], T.mul(cs_t1_ln, w_og_c)))\n # Output LSTM\n out_sig = T.mul(og_t1, inner_act(cs_t1_ln))\n\n out_sig = mask * out_sig + (1. - mask) * pre_out_sig\n\n return [out_sig, cs_t1]\n\n def sequence_iteration(self, in_seq, mask, use_dropout, dropout_value=1):\n\n in_seq_d = T.switch(use_dropout,\n (in_seq *\n self.trng.binomial(in_seq.shape,\n p=dropout_value, n=1,\n dtype=in_seq.dtype)),\n in_seq)\n\n w_in_seq = T.dot(in_seq_d, self.weights[0])\n\n\n [out_seq, cell_seq], updates = theano.scan(\n fn=self.t_forward_step,\n sequences=[mask, w_in_seq],\n outputs_info=[self.t_ol_t00, self.t_cs_t00],\n non_sequences=self.weights[1:] + [self.t_n_out],\n go_backwards=self.go_backwards,\n truncate_gradient=-1,\n # n_steps=50,\n strict=True,\n allow_gc=False,\n )\n\n return out_seq\n\n\n###### LSTM without peepholes Layer with layer normalization\n########################################\nclass LSTM_ln(LayerMaster):\n \"\"\"\n Long short term memory layer without peepholes\n\n key ideas of implementation:\n - calculate dot product of input and input weights before scan function\n - calculate dot product of previous output and weights only ones per sequence\n - weights and biases separate\n - one function for each step, one for each sequence\n \"\"\"\n\n def __init__(self, rng, trng, n_in, n_out, n_batches, activation, old_weights=None,\n go_backwards=False): # , prm_structure, layer_no ):\n\n # Parameters\n self.go_backwards = go_backwards\n self.activation = activation\n\n # Random\n self.rng = rng\n self.trng = trng\n\n self.t_n_out = theano.shared(name='t_n_out', value=n_out)\n\n if old_weights == None:\n\n np_weights = OrderedDict()\n\n # Input weights\n np_weights['w_ifco_x'] = self.rec_uniform_sqrt(rng, n_in, 4 * n_out)\n # Previous output weights\n np_weights['w_ifco'] = self.rec_ortho(rng, n_out, 4)\n np_weights['b_ifco'] = np.zeros(4 * n_out)\n\n\n\n # Layer normalization weights\n np_weights['ln_b1'] = np.zeros(4*n_out)\n np_weights['ln_s1'] = np.ones(4*n_out)\n\n np_weights['ln_b2'] = np.zeros(4*n_out)\n np_weights['ln_s2'] = np.ones(4*n_out)\n\n np_weights['ln_b3'] = np.zeros(n_out)\n np_weights['ln_s3'] = np.ones(n_out)\n\n self.weights = []\n for kk, pp in np_weights.items():\n self.weights.append(theano.shared(name=kk, value=pp.astype(T.config.floatX)))\n\n # load old weights\n else:\n self.weights = []\n for pp in old_weights:\n self.weights.append(theano.shared(value=pp.astype(T.config.floatX)))\n\n # Init last output and cell state\n ol_t00_np1 = np.zeros([n_batches, n_out]).astype(dtype=theano.config.floatX)\n cs_t00_np1 = np.zeros([n_batches, n_out]).astype(dtype=theano.config.floatX)\n self.t_ol_t00 = theano.shared(name='ol_b_t00', value=ol_t00_np1.astype(T.config.floatX))\n self.t_cs_t00 = theano.shared(name='cs_b_t00', value=cs_t00_np1.astype(T.config.floatX))\n\n # Outputs & cell states\n self.t_o = T.matrix('ol', dtype=theano.config.floatX)\n self.t_cs = T.vector('cs', dtype=theano.config.floatX)\n\n def t_forward_step(self, mask, cur_w_in_sig, pre_out_sig, pre_cell_sig, w_ifco, b_ifco,ln_b1,ln_s1, ln_b2,ln_s2,ln_b3,ln_s3,\n t_n_out):\n\n cur_w_in_sig_ln = self.ln(cur_w_in_sig, ln_b1, ln_s1)\n\n pre_w_out_sig = T.dot(pre_out_sig, w_ifco)\n pre_w_out_sig_ln = self.ln(pre_w_out_sig, ln_b2, ln_s2)\n\n preact = T.add(cur_w_in_sig_ln, pre_w_out_sig_ln, b_ifco)\n\n\n\n inner_act = self.activation # T.nnet.hard_sigmoid #T.tanh # T.nnet.hard_sigmoid T.tanh\n gate_act = self.sigmoid() # T.nnet.hard_sigmoid #T.nnet.sigmoid\n\n # Input Gate\n ig_t1 = gate_act(preact[:, 0:t_n_out])\n # Forget Gate\n fg_t1 = gate_act(preact[:, 1 * t_n_out:2 * t_n_out])\n # Cell State\n cs_t1 = T.add(T.mul(fg_t1, pre_cell_sig), T.mul(ig_t1, inner_act(preact[:, 2 * t_n_out:3 * t_n_out])))\n\n mask = T.addbroadcast(mask, 1)\n cs_t1 = mask * cs_t1 + (1. - mask) * pre_cell_sig\n\n cs_t1_ln = self.ln(cs_t1, ln_b3, ln_s3)\n # Output Gate\n og_t1 = gate_act(preact[:, 3 * t_n_out:4 * t_n_out])\n # Output LSTM\n out_sig = T.mul(og_t1, inner_act(cs_t1_ln))\n\n out_sig = mask * out_sig + (1. - mask) * pre_out_sig\n\n return [out_sig, cs_t1]\n\n def sequence_iteration(self, in_seq, mask, use_dropout, dropout_value=1):\n\n in_seq_d = T.switch(use_dropout,\n (in_seq *\n self.trng.binomial(in_seq.shape,\n p=dropout_value, n=1,\n dtype=in_seq.dtype)),\n in_seq)\n\n w_in_seq = T.dot(in_seq_d, self.weights[0])\n\n [out_seq, cell_seq], updates = theano.scan(\n fn=self.t_forward_step,\n sequences=[mask, w_in_seq],\n outputs_info=[self.t_ol_t00, self.t_cs_t00],\n non_sequences=self.weights[1:] + [self.t_n_out],\n go_backwards=self.go_backwards,\n truncate_gradient=-1,\n # n_steps=50,\n strict=True,\n allow_gc=False,\n )\n\n return out_seq\n\n\n###### GRU Layer with layer normalization\n########################################\nclass GRU_ln(LayerMaster):\n \"\"\"\n Gated recurrent unit layer\n\n key ideas of implementation:\n \"\"\"\n\n def __init__(self, rng, trng, n_in, n_out, n_batches, activation, old_weights=None,go_backwards=False):\n\n # Parameters\n self.go_backwards = go_backwards\n self.activation = activation\n\n # Random\n self.rng = rng\n self.trng = trng\n\n self.t_n_out = theano.shared(name='t_n_out', value=n_out)\n\n if old_weights == None:\n\n np_weights = OrderedDict()\n\n # Input weights for reset/update gate and update weights\n np_weights['w_rzup'] = self.rec_uniform_sqrt(rng,n_in, 3 * n_out ) # rng.uniform(-0.1, 0.1,(n_in, 3 * n_out))\n np_weights['b_rzup'] = np.zeros(3 * n_out)\n\n # reset and update gate\n np_weights['u_rz'] = self.rec_ortho(rng, n_out, 2) #self.uniform(-0.1, 0.1, (n_out, n_out))\n\n # update weights\n np_weights['u_up'] = self.sqr_ortho(rng, n_out) #rng.uniform(-0.1, 0.1, (n_out, n_out))\n\n # Layer normalization weights\n np_weights['ln_b1'] = np.zeros(3*n_out)\n np_weights['ln_s1'] = np.ones(3*n_out)\n\n np_weights['ln_b2'] = np.zeros(2*n_out)\n np_weights['ln_s2'] = np.ones(2*n_out)\n\n np_weights['ln_b3'] = np.zeros(n_out)\n np_weights['ln_s3'] = np.ones(n_out)\n\n\n self.weights = []\n for kk, pp in np_weights.items():\n self.weights.append(theano.shared(name=kk, value=pp.astype(T.config.floatX)))\n\n # load old weights\n else:\n self.weights = []\n for pp in old_weights:\n self.weights.append(theano.shared(value=pp.astype(T.config.floatX)))\n\n #Init last output and cell state\n ol_t00_np1 = np.zeros([n_batches,n_out]).astype(dtype=theano.config.floatX)\n self.t_ol_t00 = theano.shared(name='ol_b_t00', value=ol_t00_np1.astype(T.config.floatX))\n\n def t_forward_step(self,mask, rzup_in_sig, h_pre,b_rzup, u_rz, u_up,ln_b1,ln_s1, ln_b2,ln_s2,ln_b3,ln_s3, t_n_out):\n\n\n\n signal_act = self.activation\n gate_act = self.sigmoid()\n\n rzup_in_sig_ln = self.ln(rzup_in_sig, ln_b1, ln_s1)\n\n rzup_b_in_sig_ln = T.add(rzup_in_sig_ln, b_rzup)\n\n preact = T.dot( h_pre, u_rz)\n\n preact_ln = self.ln(preact, ln_b2, ln_s2)\n\n r = gate_act( T.add( rzup_b_in_sig_ln[:, 0:t_n_out] , preact_ln[:, 0:t_n_out] ))\n z = gate_act( T.add( rzup_b_in_sig_ln[:, t_n_out:2 * t_n_out] , preact_ln[:, t_n_out:2 * t_n_out] ))\n\n preactx = T.dot(h_pre , u_up)\n preactx_ln = self.ln(preactx, ln_b3, ln_s3)\n h_pre_r_ln = T.mul( preactx_ln, r)\n\n h_update = signal_act( T.add( rzup_b_in_sig_ln[:, 2*t_n_out:3*t_n_out] , h_pre_r_ln ))\n\n h_new = T.add( (1.-z) * h_update , z * h_pre )\n\n mask = T.addbroadcast(mask, 1)\n out_sig = T.add( mask * h_new , (1. - mask) * h_pre )\n\n return out_sig\n\n def sequence_iteration(self, in_seq, mask, use_dropout,dropout_value=1):\n\n in_seq_d = T.switch(use_dropout,\n (in_seq *\n self.trng.binomial(in_seq.shape,\n p=dropout_value, n=1,\n dtype=in_seq.dtype)),\n in_seq)\n\n rz_in_seq = T.dot(in_seq_d, self.weights[0])\n\n out_seq, updates = theano.scan(\n fn=self.t_forward_step,\n sequences=[mask, rz_in_seq],\n outputs_info=[self.t_ol_t00],\n non_sequences=[i for i in self.weights][1:] + [self.t_n_out],\n go_backwards = self.go_backwards,\n truncate_gradient=-1,\n #n_steps=50,\n strict=True,\n allow_gc=False,\n )\n return out_seq"
] | [
[
"numpy.zeros",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
klevis-a/process-vicon-biplane | [
"f140589b4705f0d6411b80b8e2699add68d08662"
] | [
"biplane_kine/graphing/vicon_accuracy_plotters.py"
] | [
"\"\"\"A module that provides plotters for Vicon markers that have been tracked via biplane fluoroscopy to ascertain the\nspatiotemporal syncing accuracy between the Vicon and biplane fluoroscopy systems .\"\"\"\n\nimport numpy as np\nfrom typing import Sequence, List\nimport matplotlib.figure\nimport matplotlib.pyplot as plt\nfrom biplane_kine.graphing.common_graph_utils import make_interactive\nfrom biplane_kine.graphing.smoothing_graph_utils import marker_graph_init, marker_graph_add, marker_graph_title\nfrom biplane_kine.graphing.vicon_accuracy_graph_utils import marker_diff_graph\nfrom biplane_kine.vicon_biplane_diff import BiplaneViconDiff, BiplaneViconSmoothDiff\n\n\nclass ViconAccuracyPlotter:\n \"\"\"Plotter that enables comparing marker position data as measured by Vicon (raw/labeled) and biplane\n fluoroscopy.\n\n Attributes\n ----------\n trial_name: str\n Trial identifier.\n marker_name: str\n Marker identifier.\n biplane_vicon_diff: biplane_kine.vicon_biplane_diff.BiplaneViconDiff\n Contains differences between marker position data as measured via Vicon and biplane fluoroscopy, and associated\n statistics.\n vicon_data_raw: numpy.ndarray, (N, 3)\n Raw (labeled) Vicon marker data positions.\n vicon_indices: numpy.ndarray, (N, )\n Zero-based indices for the Vicon frames.\n vicon_frames: numpy.ndarray, (N, )\n One-based indices for the Vicon frames - useful for plotting.\n biplane_data: numpy.ndarray, (N, )\n Marker data positions as measured via biplane fluoroscopy.\n \"\"\"\n\n def __init__(self, trial_name: str, marker_name: str, biplane_vicon_diff: BiplaneViconDiff):\n self.trial_name = trial_name\n self.marker_name = marker_name\n self.biplane_vicon_diff = biplane_vicon_diff\n self.vicon_data_raw = biplane_vicon_diff.vmd_fluoro\n self.vicon_indices = np.arange(self.vicon_data_raw.shape[0])\n self.vicon_frames = self.vicon_indices + 1\n self.biplane_data = np.full((self.vicon_data_raw.shape[0], 3), np.nan)\n self.biplane_data[biplane_vicon_diff.biplane_marker_data.indices, :] = \\\n biplane_vicon_diff.biplane_marker_data.data\n\n # biplane_vicon_diff.raw_diff contains differences between Vicon and biplane only at the frames where the\n # marker was tracked via biplane fluoroscopy. This doesn't include the entire biplane fluoroscopy trial. This\n # is fine, but when graphing it's useful to see the entire trial timeline. Matplotlib doesn't graph NaN values\n # so we first create a matrix with all NaN values that encompasses the entire timeline of the biplane\n # fluoroscopy trial. Then copy the Vicon/biplane fluoroscopy differences to the frames where the marker was\n # measured via biplane fluoroscopy.\n self._diff_raw = np.full((self.vicon_data_raw.shape[0], 3), np.nan)\n self._diff_raw[biplane_vicon_diff.biplane_marker_data.indices, :] = biplane_vicon_diff.raw_diff\n self._diff_raw_scalar = np.full((self.vicon_data_raw.shape[0],), np.nan)\n self._diff_raw_scalar[biplane_vicon_diff.biplane_marker_data.indices] = biplane_vicon_diff.raw_diff_scalar\n\n def plot(self) -> List[matplotlib.figure.Figure]:\n \"\"\"Plot figures quantifying differences in marker position as measured via Vicon versus biplane fluoroscopy.\n\n Figure 0: Trend plots of marker position tracked via biplane fluoroscopy vs Vicon (raw/labeled) overlayed\n Figure 1: Trend plots of the difference between marker position data as tracked via biplane fluoroscopy vs\n Vicon (raw/labeled)\n \"\"\"\n title = self.trial_name + ' ' + self.marker_name\n figs = []\n\n # plot biplane and vicon marker data together\n acc_vicon_fig = self.plot_biplane_vicon(title, 0, 'vicon_data_raw', 'Raw')\n figs.append(acc_vicon_fig)\n\n # plot difference\n diff_fig = self.plot_diff(title, 1, ['_diff_raw', '_diff_raw_scalar'], 'raw')\n figs.append(diff_fig)\n\n return figs\n\n def plot_biplane_vicon(self, title: str, fig_num: int, vicon_field: str, vicon_type: str) \\\n -> matplotlib.figure.Figure:\n \"\"\"Plot overlayed marker position data as measured via Vicon and biplane fluoroscopy.\"\"\"\n fig = plt.figure(num=fig_num)\n ax = fig.subplots(3, 1, sharex=True)\n lines_vicon = marker_graph_init(ax, getattr(self, vicon_field), 'Distance (mm)', self.vicon_frames,\n color='limegreen', marker='.', lw=1, ms=2)\n lines_biplane = marker_graph_add(ax, self.biplane_data, self.vicon_frames, color='indigo', marker='.')\n\n fig.legend((lines_biplane[0], lines_vicon[0]), ('Biplane', vicon_type + ' Vicon'), 'upper right', ncol=3,\n columnspacing=0.3, handlelength=1.0)\n marker_graph_title(fig, title)\n make_interactive()\n return fig\n\n def plot_diff(self, title: str, fig_num: int, vicon_fields: Sequence[str], diff_field: str) \\\n -> matplotlib.figure.Figure:\n \"\"\"Plot difference between marker position data as measured via Vicon and biplane fluoroscopy.\"\"\"\n fig = plt.figure(num=fig_num)\n ax = fig.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios': [2, 1]})\n\n lines_xyz = marker_diff_graph(ax[0], getattr(self, vicon_fields[0]), 'Distance (mm)', x_label=None,\n x_data=self.vicon_frames)\n line_scalar = marker_diff_graph(ax[1], getattr(self, vicon_fields[1]), 'Distance (mm)', None,\n self.vicon_frames, color='indigo')\n fig.legend(lines_xyz + line_scalar, ('X', 'Y', 'Z', '| |'), loc='lower center', ncol=4, columnspacing=0.3,\n handlelength=1.0)\n fig.suptitle(title, fontsize=11, fontweight='bold')\n plt.tight_layout(pad=1.0, h_pad=0, rect=(0, 0.015, 1, 1))\n\n # add RMS, MAE, Max for each individual x, y, z\n text_align = [(0.01, 0.99, 'left', 'top'), (0.99, 0.99, 'right', 'top'), (0.01, 0.01, 'left', 'bottom')]\n cc = plt.rcParams['axes.prop_cycle'].by_key()['color']\n for n in range(3):\n xyz_text = 'RMS: {:.2f} MAE: {:.2f} Max: {:.2f}'\\\n .format(getattr(self.biplane_vicon_diff, diff_field + '_rms')[n],\n getattr(self.biplane_vicon_diff, diff_field + '_mae')[n],\n getattr(self.biplane_vicon_diff, diff_field + '_max')[n])\n ax[0].text(text_align[n][0], text_align[n][1], xyz_text, ha=text_align[n][2], va=text_align[n][3],\n transform=fig.transFigure, fontweight='bold',\n bbox=dict(ec=cc[n], fc='None', boxstyle='round', lw=2))\n\n # add RMS, MAE, Max for scalar\n scalar_text = 'RMS: {:.2f} MAE: {:.2f} Max: {:.2f}'\\\n .format(getattr(self.biplane_vicon_diff, diff_field + '_rms_scalar'),\n getattr(self.biplane_vicon_diff, diff_field + '_mae_scalar'),\n getattr(self.biplane_vicon_diff, diff_field + '_max_scalar'))\n ax[0].text(0.99, 0.01, scalar_text, ha='right', va='bottom', transform=fig.transFigure, fontweight='bold',\n bbox=dict(ec='indigo', fc='None', boxstyle='round', lw=2))\n make_interactive()\n return fig\n\n\nclass ViconAccuracySmoothingPlotter(ViconAccuracyPlotter):\n \"\"\"Plotter that enables comparing marker position data as measured by Vicon (then smoothed) and biplane\n fluoroscopy.\n\n Attributes\n ----------\n vicon_data_smoothed: numpy.ndarray, (N, 3)\n Smoothed Vicon marker data position.\n \"\"\"\n\n def __init__(self, trial_name: str, marker_name: str, biplane_vicon_smooth_diff: BiplaneViconSmoothDiff):\n super().__init__(trial_name, marker_name, biplane_vicon_smooth_diff)\n self.vicon_data_smoothed = biplane_vicon_smooth_diff.smoothed_vmd_fluoro\n\n # see comment in base class as to why the operation below is performed\n self._diff_smoothed = np.full((self.vicon_data_raw.shape[0], 3), np.nan)\n self._diff_smoothed[biplane_vicon_smooth_diff.biplane_marker_data.indices, :] = \\\n biplane_vicon_smooth_diff.smoothed_diff\n self._diff_smoothed_scalar = np.full((self.vicon_data_raw.shape[0],), np.nan)\n self._diff_smoothed_scalar[biplane_vicon_smooth_diff.biplane_marker_data.indices] = \\\n biplane_vicon_smooth_diff.smoothed_diff_scalar\n\n def plot(self) -> List[matplotlib.figure.Figure]:\n \"\"\"Plot figures quantifying differences in marker position as measured via Vicon, Vicon (then smoothed), and\n biplane fluoroscopy.\n\n Figure 0: Trend plots of marker position tracked via biplane fluoroscopy vs Vicon (raw/labeled) overlayed\n Figure 1: Trend plots of the difference between marker position data as tracked via biplane fluoroscopy vs\n Vicon (raw/labeled)\n Figure 2: Trend plots of marker position tracked via biplane fluoroscopy vs Vicon (smoothed) overlayed\n Figure 3: Trend plots of the difference between marker position data as tracked via biplane fluoroscopy vs\n Vicon (smoothed)\n Figure 4: Figure 1 and 3 overlayed on top of each other\n \"\"\"\n title = self.trial_name + ' ' + self.marker_name\n figs = super().plot()\n\n # plot biplane and vicon marker data together\n acc_vicon_fig = self.plot_biplane_vicon(title, 2, 'vicon_data_smoothed', 'Smooth ')\n figs.append(acc_vicon_fig)\n\n # plot difference\n diff_fig = self.plot_diff(title, 3, ['_diff_smoothed', '_diff_smoothed_scalar'], 'smoothed')\n figs.append(diff_fig)\n\n # plot all differences in the same plot\n diff_all_fig = self.plot_all_diff(title, 4)\n figs.append(diff_all_fig)\n\n return figs\n\n def plot_all_diff(self, title: str, fig_num: int) -> matplotlib.figure.Figure:\n \"\"\"Overlay plot differences in marker position as measured via Vicon and biplane fluoroscopy, and differences\n in marker position as measured Vicon (then smoothed) and biplane fluoroscopy.\"\"\"\n fig = plt.figure(num=fig_num)\n ax = fig.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios': [2, 1]})\n\n lines_xyz_raw = marker_diff_graph(ax[0], self._diff_raw, 'Distance (mm)', x_label=None,\n x_data=self.vicon_frames, ls='--')\n line_scalar_raw = marker_diff_graph(ax[1], self._diff_raw_scalar, 'Distance (mm)', 'Frame Number',\n self.vicon_frames, color='indigo', ls=':')\n # reset colors\n ax[0].set_prop_cycle(None)\n lines_xyz_smooth = marker_diff_graph(ax[0], self._diff_smoothed, 'Distance (mm)', x_label=None,\n x_data=self.vicon_frames)\n line_scalar_smooth = marker_diff_graph(ax[1], self._diff_smoothed_scalar, 'Distance (mm)', 'Frame Number',\n self.vicon_frames, color='indigo')\n leg1 = fig.legend(lines_xyz_raw + line_scalar_raw, ('X (Raw)', 'Y', 'Z', '$\\\\mid \\\\mid$'), loc='lower left',\n handletextpad=0.1, ncol=4, columnspacing=0.5, handlelength=1.0, bbox_to_anchor=(0.0, 0.0))\n fig.legend(lines_xyz_smooth + line_scalar_smooth, ('X (Smooth)', 'Y', 'Z', '$\\\\mid \\\\mid$'), loc='lower right',\n handletextpad=0.1, ncol=4, columnspacing=0.5, handlelength=1.0, bbox_to_anchor=(1.0, 0.0))\n fig.add_artist(leg1)\n fig.suptitle(title, fontsize=11, fontweight='bold')\n plt.tight_layout(pad=1.0, h_pad=0, rect=(0, 0.015, 1, 1))\n\n # add RMS, MAE, Max\n raw_text = 'RMS: {:.2f} MAE: {:.2f} Max: {:.2f}'\\\n .format(self.biplane_vicon_diff.raw_rms_scalar, self.biplane_vicon_diff.raw_mae_scalar,\n self.biplane_vicon_diff.raw_max_scalar)\n smooth_text = 'RMS: {:.2f} MAE: {:.2f} Max: {:.2f}'\\\n .format(self.biplane_vicon_diff.smoothed_rms_scalar, self.biplane_vicon_diff.smoothed_mae_scalar,\n self.biplane_vicon_diff.smoothed_max_scalar)\n ax[0].text(0.01, 0.99, raw_text, ha='left', va='top', transform=fig.transFigure, fontweight='bold',\n bbox=dict(ec='indigo', fc='None', boxstyle='round', ls=':', lw=2))\n ax[0].text(0.99, 0.99, smooth_text, ha='right', va='top', transform=fig.transFigure, fontweight='bold',\n bbox=dict(ec='indigo', fc='None', boxstyle='round', lw=2))\n make_interactive()\n return fig\n"
] | [
[
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.figure",
"numpy.full"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GitHubEmploy/DashboardWebsite | [
"1b84b2df36e6a360c6b91d8d7ba5fc4f64332698"
] | [
"app/views.py"
] | [
"# -*- encoding: utf-8 -*-\n\"\"\"\nLicense: MIT\nCopyright (c) 2019 - present AppSeed.us\n\"\"\"\n\n# Python modules\nimport fnmatch\nimport os, logging\n\n# Flask modules\nimport threading\n\nimport requests\nfrom flask import render_template, request, url_for, redirect, send_from_directory, session, Response, flash\nfrom flask_login import login_user, logout_user, current_user, login_required\nfrom werkzeug.exceptions import HTTPException, NotFound, abort\nimport alpaca_trade_api as tradeapi\nimport datetime\nimport random\nfrom datetime import date\nfrom dateutil.relativedelta import relativedelta\n# App modules\nimport pandas as pd\nfrom yahoo_fin.stock_info import get_data\nfrom yahoo_fin.stock_info import get_day_gainers\nimport sys\nimport trace\nfrom app import app, lm, db, bc\nfrom app.models import User\nfrom app.forms import LoginForm, RegisterForm\n\nfrom StratusDashboard.app.forms import APIForm\n\nuserlist = {}\n\ndef findtopstock():\n url = 'https://finance.yahoo.com/screener/predefined/most_actives'\n read = pd.read_html(url)[0]\n symbols = read['Symbol'][0]\n change = read['% Change'][0]\n return symbols, change\ndef findgainer():\n url = 'https://finance.yahoo.com/gainers'\n read = pd.read_html(url)[0]\n symbols = read['Symbol']\n change = read['% Change']\n price = read['Price (Intraday)']\n return symbols, change, price\ndef findReplace(directory, find, replace, filePattern):\n for path, dirs, files in os.walk(os.path.abspath(directory)):\n for filename in fnmatch.filter(files, filePattern):\n filepath = os.path.join(path, filename)\n with open(filepath) as f:\n s = f.read()\n s = s.replace(find, replace)\n with open(filepath, \"w\") as f:\n f.write(s)\n f.close()\n\ndef human_format(num):\n magnitude = 0\n while abs(num) >= 1000:\n magnitude += 1\n num /= 1000.0\n # add more suffixes if you need them\n return '%.2f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])\n\nlfcount = 0\n\ndef replace(apikey, apisecret, apiurl):\n api = tradeapi.REST(apikey, apisecret, apiurl)\n one_month = date.today() + relativedelta(hours=-5)\n indexreturn = ''\n resstock = ''\n daygraph = []\n jsquery = ''\n ccount = 0\n numblist = []\n topstocklist = []\n openpositions = []\n domain = 'StratusDashboard.githubemploy.repl.co'\n account = api.get_account()\n gainer, gainerchange, gainerprice = findgainer()\n\n lastMonth = (datetime.date.today().replace(day=1) - datetime.timedelta(days=1)).strftime(\"%Y-%m-%d\")\n lastdate = api.get_portfolio_history(date_start=lastMonth, date_end=datetime.date.today(), period=None,\n timeframe='15Min')\n dayrundict = api.get_portfolio_history(date_start=str(str(datetime.date.today()).split('-')[0]) + '-' + str(str(datetime.date.today()).split('-')[1]) + '-' + str(int(str(datetime.date.today()).split('-')[2])-1), date_end=datetime.date.today(), period=None, timeframe='15Min').df.to_dict()['equity'].values()\n balance_change = str(round(float(account.equity) - float(account.last_equity), 2))\n print(balance_change)\n topstock, stockchange = findtopstock()\n topstockdata = get_data(topstock, start_date = str(one_month), end_date = datetime.date.today(), index_as_date = True, interval = \"1d\").to_dict()['open'].values()\n\n for item in topstockdata:\n topstocklist.append(item)\n for loop in range(0, 6, 1):\n numblist.append(str(random.randint(0,18)))\n with open('/Users/mohit/PycharmProjects/SerpentAI/StratusDashboard/app/templates/pages/index.html', 'r') as reader:\n for line in reader:\n indexreturn = indexreturn + line\n with open('/Users/mohit/PycharmProjects/SerpentAI/StratusDashboard/app/static/assets/js/argon.js', 'r') as reader:\n for line in reader:\n jsquery = jsquery + line\n with open('/Users/mohit/PycharmProjects/SerpentAI/StratusDashboard/app/static/assets/js/argon.js', 'w+') as reader:\n jsquery = jsquery.replace('[0, 20, 10, 30, 15, 40, 20, 60, 60]', str(numblist))\n reader.write(jsquery)\n with open('/Users/mohit/PycharmProjects/SerpentAI/StratusDashboard/app/templates/newpages/index.html', 'w+') as writer:\n if float(account.buying_power) <= float(list(lastdate.df.to_dict()['equity'].values())[0]):\n resacc = \"fas fa-arrow-down\"\n accolor = 'text-danger mr-2'\n if float(account.buying_power) > float(list(lastdate.df.to_dict()['equity'].values())[0]):\n resacc = \"fa fa-arrow-up\"\n accolor = 'text-success mr-2'\n if str(''.join([i for i in stockchange if not i.isdigit()])).strip().replace('.', '').replace('%', '') == '-':\n resstock = \"fas fa-arrow-down\"\n stockcolor = 'text-danger mr-2'\n if str(''.join([i for i in stockchange if not i.isdigit()])).strip().replace('.', '').replace('%', '') == '+':\n resstock = \"fa fa-arrow-up\"\n stockcolor = 'text-success mr-2'\n stockchange = str(stockchange).replace('+', '').replace('-', '')\n portfolio = api.list_positions()\n # Print the quantity of shares for each position.\n for position in portfolio:\n openpositions.append(str(position.symbol))\n\n sendvar = str(indexreturn).replace('REPLACEACCOUNTVALUE', str(account.buying_power) + '$').replace('ACCARROW', resacc).replace('ACCPERCENT', str(human_format(abs(float(account.buying_power) - float(list(lastdate.df.to_dict()['equity'].values())[0]))))).replace('PROFITLOSS', str(balance_change)).replace('REPLACESTOCK', topstock).replace('REPLACECHANGE', str(stockchange)).replace('RESSTOCK', resstock).replace('TEXTSTOCK', stockcolor).replace('ACCOLOR', accolor).replace('OPENPOSITIONS', str(len(openpositions))+' Stock(s)')\n sendvar = sendvar.replace('REPLACEDAILYDATA', str(topstocklist))\n for item in api.list_orders(status='closed', limit=5):\n ccount = ccount + 1\n sendvar = sendvar.replace('ITEM'+str(ccount), str(item.symbol))\n sendvar = sendvar.replace('SHARES'+str(ccount), str(item.qty))\n sendvar = sendvar.replace('SIDE'+str(ccount), str(item.side))\n if str(item.side) == 'buy':\n sendvar = sendvar.replace('CLASS'+str(ccount), 'fas fa-arrow-up text-success mr-3')\n else:\n sendvar = sendvar.replace('CLASS'+str(ccount), 'fas fa-arrow-down text-warning mr-3')\n sendvar = sendvar.replace('TYPE'+str(ccount), str(item.time_in_force))\n #print(item.symbol, item.qty, item.side, item.time_in_force)\n for loop in range(0, 6, 1):\n #print(str(str(gainerchange[loop]).replace('%', '').replace('+', '').replace('-', '').strip()))\n sendvar = sendvar.replace('GAINPRICE'+str(loop), str(gainerprice[loop])+'$')\n sendvar = sendvar.replace('GAINSTOCK'+str(loop), gainer[loop])\n sendvar = sendvar.replace('GAINPERCENT'+str(loop), str(str(gainerchange[loop]).replace('%', '').replace('+', '').replace('-', '').strip()))\n sendvar = sendvar.replace('DOMAINPORT', domain).replace('APIKEY', apikey).replace('APISECRET', apisecret).replace('APIURL', apiurl)\n writer.write(sendvar)\n\nsession = {}\[email protected]_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\[email protected]('/startform.html', methods=['GET', 'POST'])\ndef startform():\n form = APIForm(request.form)\n if form.validate() or request.method == \"POST\":\n session['apikey'] = str(form.pubapi.data)\n session['secretkey'] = str(form.secapi.data)\n session['urlkey'] = str(form.urlapi.data)\n print(form.pubapi.data, form.secapi.data, form.urlapi.data)\n return redirect('/start.html')\n return render_template('layouts/api-default.html', content=render_template('pages/startform.html', form=form))\n #return render_template('pages/startform.html', form=form)\n\[email protected]('/start.html')\ndef start():\n try:\n apikey = session['apikey']\n except:\n return render_template('layouts/api-default.html', content=render_template('pages/404.html'))\n privatekey = session['secretkey']\n apiurl = session['urlkey']\n usedir = str('user' + apikey)\n isDirectory = os.path.isdir(usedir)\n runyn = True\n def runuser():\n os.system('cd ' + usedir + ' && python RunFile.py')\n if isDirectory == True:\n userlist[apikey] = threading.Thread(target=runuser)\n userlist[apikey].start()\n elif isDirectory == False:\n replace(str(apikey), str(privatekey), str(apiurl))\n os.system('git clone https://github.com/GitHubEmploy/SuperSimpleAITrading.git '+usedir)\n findReplace(str(str('user') + str(apikey)), \"publicapikey\", apikey, \"*.csv\")\n findReplace(str(str('user') + str(apikey)), \"secretapikey\", privatekey, \"*.csv\")\n findReplace(str(str('user') + str(apikey)), \"usageurl\", apiurl, \"*.csv\")\n\n userlist[apikey] = threading.Thread(target=runuser)\n userlist[apikey].start()\n return render_template('layouts/api-default.html', content=render_template('pages/startedproc.html'))\n# Logout user\[email protected]('/logout.html')\ndef logout():\n logout_user()\n form = LoginForm(request.form)\n return redirect(\"/login.html\")\n\n\n# Register a new user\[email protected]('/register.html', methods=['GET', 'POST'])\ndef register():\n # declare the Registration Form\n form = RegisterForm(request.form)\n\n msg = None\n\n if request.method == 'GET':\n return render_template('layouts/auth-default.html',\n content=render_template('pages/register.html', form=form, msg=msg))\n\n # check if both http method is POST and form is valid on submit\n if form.validate_on_submit():\n\n # assign form data to variables\n username = request.form.get('username', '', type=str)\n password = request.form.get('password', '', type=str)\n email = request.form.get('email', '', type=str)\n\n # filter User out of database through username\n user = User.query.filter_by(user=username).first()\n\n # filter User out of database through username\n user_by_email = User.query.filter_by(email=email).first()\n\n if user or user_by_email:\n msg = 'Error: User exists!'\n\n else:\n\n pw_hash = password # bc.generate_password_hash(password)\n\n user = User(username, email, pw_hash)\n\n user.save()\n\n msg = 'User created, please <a href=\"' + url_for('login') + '\">login</a>'\n\n else:\n msg = 'Input error'\n\n return render_template('layouts/auth-default.html',\n content=render_template('pages/register.html', form=form, msg=msg))\n\n# Authenticate user\[email protected]('/login.html', methods=['GET', 'POST'])\ndef login():\n # Declare the login form\n form = LoginForm(request.form)\n\n # Flask message injected into the page, in case of any errors\n msg = None\n\n # check if both http method is POST and form is valid on submit\n if form.validate_on_submit():\n\n # assign form data to variables\n username = request.form.get('username', '', type=str)\n password = request.form.get('password', '', type=str)\n\n # filter User out of database through username\n user = User.query.filter_by(user=username).first()\n\n if user:\n\n # if bc.check_password_hash(user.password, password):\n if user.password == password:\n login_user(user)\n return redirect('/')\n else:\n msg = \"Wrong password. Please try again.\"\n else:\n msg = \"Unkown user\"\n\n return render_template('layouts/auth-default.html',\n content=render_template('pages/login.html', form=form, msg=msg))\n\[email protected]('/status.html', methods=['GET', 'POST'])\ndef statusapi():\n apikey = session['apikey']\n try:\n userlist[apikey].isAlive()\n return render_template('layouts/api-default.html', content=render_template('pages/apialive.html'))\n except:\n try:\n return render_template('layouts/api-default.html', content=render_template('pages/apinotalive.html'))\n except:\n return render_template('layouts/api-default.html', content=render_template('pages/404.html'))\[email protected]('/stop.html', methods=['GET', 'POST'])\ndef stopapi():\n apikey = session['apikey']\n runyn = False\n return render_template('layouts/api-default.html', content=render_template('pages/stopapi.html'))\n #return 'Stopping Process Gracefully, this may take up to 10 minutes. Please be patient.'\[email protected]('/', methods=['GET', 'POST'])\ndef default():\n if not current_user.is_authenticated:\n return redirect(url_for('login'))\n form = APIForm(request.form)\n if form.validate() or request.method == \"POST\":\n try:\n session['apikey'] = str(form.pubapi.data)\n session['secretkey'] = str(form.secapi.data)\n session['urlkey'] = str(form.urlapi.data)\n replace(str(form.pubapi.data), str(form.secapi.data), str(form.urlapi.data))\n except:\n return render_template('layouts/api-default.html', content=render_template('pages/invalidapi.html', form=form, msg='Invalid API Keys/Combination. Visit https://alpaca.markets to get your API Keys!'))\n #print(form.pubapi.data, form.secapi.data, form.urlapi.data)\n return render_template('layouts/default.html', content=render_template('newpages/index.html'))\n return render_template('layouts/api-default.html', content=render_template('pages/startform.html', form=form))\n\n # print(str(indexreturn).replace('REPLACESERVERSTATUS', str(account.buying_power)))\n\n\[email protected]('/<path>')\ndef index(path):\n return render_template('layouts/auth-default.html',\n content=render_template( 'pages/404.html' ) )\n\n\n# Return sitemap\[email protected]('/sitemap.xml')\ndef sitemap():\n return send_from_directory(os.path.join(app.root_path, 'static'), 'sitemap.xml')\n"
] | [
[
"pandas.read_html"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
juliaavaladares/data-science-ethereum | [
"035cf959123645394e09566092e3529716f83652",
"035cf959123645394e09566092e3529716f83652"
] | [
"src/models/semi_supervised_pred.py",
"src/data/make_final_data.py"
] | [
"# Data manipulation\nimport numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\n# Sklearn\nfrom sklearn.model_selection import train_test_split # for splitting data into train and test samples\nfrom sklearn.svm import SVC # for Support Vector Classification baseline model\nfrom sklearn.semi_supervised import SelfTrainingClassifier # for Semi-Supervised learning\nfrom sklearn.metrics import classification_report # for model evaluation metrics\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import tree\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.tree import export_graphviz\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.semi_supervised import SelfTrainingClassifier \nfrom sklearn.ensemble import VotingClassifier\n\n\n######## READ DATASET ######################\ndataset = pd.read_csv(\"../../data/processed/dataset_mylabels_2020.csv\")\n\ndataset_lab = dataset[dataset.my_labels != -1]\ndataset_lab.reset_index(drop=True, inplace=True)\n\nX_lab = dataset_lab.iloc[:,[1,3,4,5,6,7]]\ny_lab = dataset_lab.iloc[:, 10]\n\ndataset_unlab = dataset[dataset.my_labels == -1]\ndataset_unlab.reset_index(drop=True, inplace=True)\n\nX_unlab = dataset_unlab.iloc[:,[1,3,4,5,6,7]]\ny_unlab = dataset_unlab.iloc[:, 10]\n\n######## NORMALIZE DATASET ######################\nscaler = preprocessing.MinMaxScaler(feature_range=(0,1))\nX_lab = scaler.fit_transform(X_lab)\nX_unlab = scaler.fit_transform(X_unlab)\n\n\n\nX_train, X_test, y_train, y_test = train_test_split(X_lab, y_lab, test_size=0.25, random_state=0)\nX_train = np.concatenate((X_train, X_unlab))\ny_train = np.concatenate((y_train, y_unlab))\n\n\nknn = KNeighborsClassifier()\ndecision_tree = DecisionTreeClassifier(max_depth=5)\nrandom_forest = RandomForestClassifier(n_estimators=100, random_state=2)\nlogistic_regression = LogisticRegression()\nlinear_svm = SVC(kernel='linear', probability=True)\ngaussian_svm = SVC(kernel='rbf', probability=True)\nsigmoid_svm = SVC(kernel='sigmoid', probability=True)\nestimators = [('KNN', knn), ('Decision Tree', decision_tree), \\\n ('Random Forest', random_forest), ('Logistic Regression', logistic_regression), \\\n ('Linear SVM' ,linear_svm), ('Gaussian SVM', gaussian_svm), ('Sigmoid SVM', sigmoid_svm)]\n\nhard_voting_classifier = VotingClassifier(estimators = estimators, voting = \"hard\")\nsoft_voting_classifier = VotingClassifier(estimators = estimators, voting = \"soft\")\n\nresults = pd.DataFrame()\n\nfor classifier, model in estimators:\n self_training_model = SelfTrainingClassifier(base_estimator=model, # An estimator object implementing fit and predict_proba.\n threshold=0.7, # default=0.75, The decision threshold for use with criterion='threshold'. Should be in [0, 1).\n criterion='threshold', # {‘threshold’, ‘k_best’}, default=’threshold’, The selection criterion used to select which labels to add to the training set. If 'threshold', pseudo-labels with prediction probabilities above threshold are added to the dataset. If 'k_best', the k_best pseudo-labels with highest prediction probabilities are added to the dataset.\n #k_best=50, # default=10, The amount of samples to add in each iteration. Only used when criterion='k_best'.\n max_iter=100, # default=10, Maximum number of iterations allowed. Should be greater than or equal to 0. If it is None, the classifier will continue to predict labels until no new pseudo-labels are added, or all unlabeled samples have been labeled.\n verbose=True # default=False, Verbosity prints some information after each iteration\n )\n\n # Fit the model\n clf_ST = self_training_model.fit(X_train, y_train)\n series = pd.DataFrame(clf_ST.transduction_, columns=[classifier])\n results = pd.concat([results, series], axis=1)\n\n\n\n ########## Step 3 - Model Evaluation ########## \n print('')\n print('---------- Self Training Model - Summary ----------')\n print('Base Estimator: ', clf_ST.base_estimator_)\n print('Classes: ', clf_ST.classes_)\n print('Transduction Labels: ', clf_ST.transduction_, \"Len: \", len(clf_ST.transduction_))\n #print('Iteration When Sample Was Labeled: ', clf_ST.labeled_iter_)\n print('Number of Features: ', clf_ST.n_features_in_)\n #print('Feature Names: ', clf_ST.feature_names_in_)\n print('Number of Iterations: ', clf_ST.n_iter_)\n print('Termination Condition: ', clf_ST.termination_condition_)\n print('')\n\n print('---------- Self Training Model - Evaluation on Test Data ----------')\n accuracy_score_ST = clf_ST.score(X_test, y_test)\n print('Accuracy Score: ', accuracy_score_ST)\n # Look at classification report to evaluate the model\n print(classification_report(y_test, clf_ST.predict(X_test)))\n\nresults.to_csv(\"../../reports/semi_supervised_results.csv\", index=False)",
"from os import path\nimport pandas as pd\n\npath = \"../../data/processed/\"\n\naccounts_features = pd.read_csv(path+\"accounts_features_2021.txt\")\naccounts_created_features = pd.read_csv(path+\"Accounts2021_Created_Features.csv\", nrows=37083)\naccounts_labels = pd.read_csv(path+\"accounts_labels_2021.txt\", nrows=37083)\n\naccounts_features = pd.concat([accounts_features, \\\n accounts_created_features[[\"sent\", \"received\", \"n_contracts_sent\", \"n_contracts_received\"]], \\\n accounts_labels[[\"labels\", \"is_professional\"]]], axis=1)\n\naccounts_features.to_csv(path+\"final_dataset_2021.csv\", index=False)"
] | [
[
"pandas.concat",
"pandas.read_csv",
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.VotingClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.concatenate",
"pandas.DataFrame",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.semi_supervised.SelfTrainingClassifier",
"sklearn.svm.SVC",
"sklearn.preprocessing.MinMaxScaler"
],
[
"pandas.concat",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
trotsky1997/mars | [
"315b94ade1489d4fdfd351f17263fbc1d4c47008",
"315b94ade1489d4fdfd351f17263fbc1d4c47008",
"315b94ade1489d4fdfd351f17263fbc1d4c47008"
] | [
"mars/learn/metrics/pairwise/tests/test_manhattan_distances.py",
"mars/learn/contrib/xgboost/tests/test_train.py",
"mars/services/storage/tests/test_transfer.py"
] | [
"# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport scipy.sparse as sps\nimport pytest\ntry:\n import sklearn\n\n from sklearn.metrics.pairwise import manhattan_distances as sk_manhattan_distances\nexcept ImportError: # pragma: no cover\n sklearn = None\n\nfrom mars import tensor as mt\nfrom mars.learn.metrics.pairwise import manhattan_distances\nfrom mars.tests import setup\n\n\nsetup = setup\n\n\[email protected](sklearn is None, reason='scikit-learn not installed')\ndef test_manhattan_distances():\n x = mt.random.randint(10, size=(10, 3), density=0.4)\n y = mt.random.randint(10, size=(11, 3), density=0.5)\n\n with pytest.raises(TypeError):\n manhattan_distances(x, y, sum_over_features=False)\n\n x = x.todense()\n y = y.todense()\n\n d = manhattan_distances(x, y, sum_over_features=True)\n assert d.shape == (10, 11)\n d = manhattan_distances(x, y, sum_over_features=False)\n assert d.shape == (110, 3)\n\n\nraw_x = np.random.rand(20, 5)\nraw_y = np.random.rand(21, 5)\n\nx1 = mt.tensor(raw_x, chunk_size=30)\ny1 = mt.tensor(raw_y, chunk_size=30)\n\nx2 = mt.tensor(raw_x, chunk_size=11)\ny2 = mt.tensor(raw_y, chunk_size=12)\n\nraw_sparse_x = sps.random(20, 5, density=0.4, format='csr', random_state=0)\nraw_sparse_y = sps.random(21, 5, density=0.3, format='csr', random_state=0)\n\nx3 = mt.tensor(raw_sparse_x, chunk_size=30)\ny3 = mt.tensor(raw_sparse_y, chunk_size=30)\n\nx4 = mt.tensor(raw_sparse_x, chunk_size=11)\ny4 = mt.tensor(raw_sparse_y, chunk_size=12)\n\n\[email protected](sklearn is None, reason='scikit-learn not installed')\[email protected]('x, y, is_sparse',\n [(x1, y1, False),\n (x2, y2, False),\n (x3, y3, True),\n (x4, y4, True)])\ndef test_manhattan_distances_execution(setup, x, y, is_sparse):\n if is_sparse:\n rx, ry = raw_sparse_x, raw_sparse_y\n else:\n rx, ry = raw_x, raw_y\n\n sv = [True, False] if not is_sparse else [True]\n\n for sum_over_features in sv:\n d = manhattan_distances(x, y, sum_over_features)\n\n result = d.execute().fetch()\n expected = sk_manhattan_distances(rx, ry, sum_over_features)\n\n np.testing.assert_almost_equal(result, expected)\n\n d = manhattan_distances(x, sum_over_features=sum_over_features)\n\n result = d.execute().fetch()\n expected = sk_manhattan_distances(rx, sum_over_features=sum_over_features)\n\n np.testing.assert_almost_equal(result, expected)\n",
"# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pytest\n\nimport mars.tensor as mt\nimport mars.dataframe as md\nfrom mars.learn.contrib.xgboost import train, MarsDMatrix\nfrom mars.tests import setup\n\ntry:\n import xgboost\n from xgboost import Booster\nexcept ImportError:\n xgboost = None\n\nsetup = setup\n\nn_rows = 1000\nn_columns = 10\nchunk_size = 200\nrs = mt.random.RandomState(0)\nX = rs.rand(n_rows, n_columns, chunk_size=chunk_size)\ny = rs.rand(n_rows, chunk_size=chunk_size)\nX_df = md.DataFrame(X)\ny_series = md.Series(y)\nx_sparse = np.random.rand(n_rows, n_columns)\nx_sparse[np.arange(n_rows), np.random.randint(n_columns, size=n_rows)] = np.nan\nX_sparse = mt.tensor(x_sparse, chunk_size=chunk_size).tosparse(missing=np.nan)\n\n\[email protected](xgboost is None, reason='XGBoost not installed')\ndef test_local_train_tensor(setup):\n dtrain = MarsDMatrix(X, y)\n booster = train({}, dtrain, num_boost_round=2)\n assert isinstance(booster, Booster)\n\n\[email protected](xgboost is None, reason='XGBoost not installed')\ndef test_local_train_sparse_tensor(setup):\n dtrain = MarsDMatrix(X_sparse, y)\n booster = train({}, dtrain, num_boost_round=2)\n assert isinstance(booster, Booster)\n\n\[email protected](xgboost is None, reason='XGBoost not installed')\ndef test_local_train_dataframe(setup):\n dtrain = MarsDMatrix(X_df, y_series)\n booster = train({}, dtrain, num_boost_round=2)\n assert isinstance(booster, Booster)\n",
"# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport os\nimport sys\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport mars.oscar as mo\nfrom mars.oscar.backends.allocate_strategy import IdleLabel\nfrom mars.services.storage.errors import DataNotExist\nfrom mars.services.storage.core import StorageManagerActor, StorageQuotaActor\nfrom mars.services.storage.handler import StorageHandlerActor\nfrom mars.services.storage.transfer import ReceiverManagerActor, SenderManagerActor\nfrom mars.storage import StorageLevel\n\n\[email protected]\nasync def actor_pools():\n async def start_pool():\n start_method = os.environ.get('POOL_START_METHOD', 'forkserver') \\\n if sys.platform != 'win32' else None\n\n pool = await mo.create_actor_pool('127.0.0.1', n_process=2,\n labels=['main', 'sub', 'io'],\n subprocess_start_method=start_method)\n await pool.start()\n return pool\n\n worker_pool_1 = await start_pool()\n worker_pool_2 = await start_pool()\n yield worker_pool_1, worker_pool_2\n await worker_pool_1.stop()\n await worker_pool_2.stop()\n\n\[email protected]\nasync def create_actors(actor_pools):\n worker_pool_1, worker_pool_2 = actor_pools\n\n if sys.platform == 'darwin':\n plasma_dir = '/tmp'\n else:\n plasma_dir = '/dev/shm'\n plasma_setup_params = dict(\n store_memory=5 * 1024 * 1024,\n plasma_directory=plasma_dir,\n check_dir_size=False)\n storage_configs = {\n \"plasma\": plasma_setup_params,\n }\n\n manager_ref1 = await mo.create_actor(\n StorageManagerActor, storage_configs,\n uid=StorageManagerActor.default_uid(),\n address=worker_pool_1.external_address)\n\n manager_ref2 = await mo.create_actor(\n StorageManagerActor, storage_configs,\n uid=StorageManagerActor.default_uid(),\n address=worker_pool_2.external_address)\n yield worker_pool_1.external_address, worker_pool_2.external_address\n await mo.destroy_actor(manager_ref1)\n await mo.destroy_actor(manager_ref2)\n\n\[email protected]\nasync def test_simple_transfer(create_actors):\n worker_address_1, worker_address_2 = create_actors\n\n session_id = 'mock_session'\n data1 = np.random.rand(100, 100)\n data2 = pd.DataFrame(np.random.randint(0, 100, (500, 10)))\n\n storage_handler1 = await mo.actor_ref(uid=StorageHandlerActor.default_uid(),\n address=worker_address_1)\n storage_handler2 = await mo.actor_ref(uid=StorageHandlerActor.default_uid(),\n address=worker_address_2)\n\n await storage_handler1.put(session_id, 'data_key1', data1, StorageLevel.MEMORY)\n await storage_handler1.put(session_id, 'data_key2', data2, StorageLevel.MEMORY)\n await storage_handler2.put(session_id, 'data_key3', data2, StorageLevel.MEMORY)\n\n sender_actor = await mo.actor_ref(address=worker_address_1,\n uid=SenderManagerActor.default_uid())\n\n # send data to worker2 from worker1\n await sender_actor.send_data(session_id, 'data_key1',\n worker_address_2,\n StorageLevel.MEMORY, block_size=1000)\n\n await sender_actor.send_data(session_id, 'data_key2',\n worker_address_2,\n StorageLevel.MEMORY, block_size=1000)\n\n get_data1 = await storage_handler2.get(session_id, 'data_key1')\n np.testing.assert_array_equal(data1, get_data1)\n\n get_data2 = await storage_handler2.get(session_id, 'data_key2')\n pd.testing.assert_frame_equal(data2, get_data2)\n\n # send data to worker1 from worker2\n sender_actor = await mo.actor_ref(address=worker_address_2,\n uid=SenderManagerActor.default_uid())\n await sender_actor.send_data(session_id, 'data_key3', worker_address_1,\n StorageLevel.MEMORY)\n get_data3 = await storage_handler1.get(session_id, 'data_key3')\n pd.testing.assert_frame_equal(data2, get_data3)\n\n\n# test for cancelling happens when writing\nclass MockReceiverManagerActor(ReceiverManagerActor):\n async def do_write(self, message):\n await asyncio.sleep(3)\n await super().do_write(message)\n\n\nclass MockSenderManagerActor(SenderManagerActor):\n @staticmethod\n async def get_receiver_ref(address: str):\n return await mo.actor_ref(\n address=address, uid=MockReceiverManagerActor.default_uid())\n\n\n# test for cancelling happens when creating writer\nclass MockReceiverManagerActor2(ReceiverManagerActor):\n async def create_writer(self,\n session_id: str,\n data_key: str,\n data_size: int,\n level: StorageLevel):\n await asyncio.sleep(3)\n await super().create_writer(session_id, data_key, data_size, level)\n\n\nclass MockSenderManagerActor2(SenderManagerActor):\n @staticmethod\n async def get_receiver_ref(address: str):\n return await mo.actor_ref(\n address=address, uid=MockReceiverManagerActor2.default_uid())\n\n\[email protected]('mock_sender, mock_receiver',\n [(MockSenderManagerActor, MockReceiverManagerActor)])\[email protected]\nasync def test_cancel_transfer(create_actors, mock_sender, mock_receiver):\n worker_address_1, worker_address_2 = create_actors\n\n strategy = IdleLabel('io', 'mock_sender')\n quota_refs = {StorageLevel.MEMORY: await mo.actor_ref(\n StorageQuotaActor, StorageLevel.MEMORY, 5 * 1024 * 1024,\n address=worker_address_2, uid=StorageQuotaActor.gen_uid(StorageLevel.MEMORY))}\n\n await mo.create_actor(\n mock_sender, uid=mock_sender.default_uid(),\n address=worker_address_1, allocate_strategy=strategy)\n await mo.create_actor(\n mock_receiver, quota_refs, uid=mock_receiver.default_uid(),\n address=worker_address_2, allocate_strategy=strategy)\n\n data1 = np.random.rand(10, 10)\n storage_handler1 = await mo.actor_ref(\n uid=StorageHandlerActor.default_uid(),\n address=worker_address_1)\n storage_handler2 = await mo.actor_ref(\n uid=StorageHandlerActor.default_uid(),\n address=worker_address_2)\n await storage_handler1.put('mock', 'data_key1',\n data1, StorageLevel.MEMORY)\n\n sender_actor = await mo.actor_ref(address=worker_address_1,\n uid=mock_sender.default_uid())\n used_before = (await quota_refs[StorageLevel.MEMORY].get_quota())[1]\n\n send_task = asyncio.create_task(sender_actor.send_data(\n 'mock', 'data_key1', worker_address_2, StorageLevel.MEMORY))\n\n await asyncio.sleep(0.5)\n send_task.cancel()\n\n try:\n await send_task\n except asyncio.CancelledError:\n pass\n\n used = (await quota_refs[StorageLevel.MEMORY].get_quota())[1]\n assert used == used_before\n\n with pytest.raises(DataNotExist):\n await storage_handler2.get('mock', 'data_key1')\n\n send_task = asyncio.create_task(sender_actor.send_data(\n 'mock', 'data_key1', worker_address_2, StorageLevel.MEMORY))\n await send_task\n get_data = await storage_handler2.get('mock', 'data_key1')\n np.testing.assert_array_equal(data1, get_data)\n"
] | [
[
"scipy.sparse.random",
"numpy.testing.assert_almost_equal",
"numpy.random.rand",
"sklearn.metrics.pairwise.manhattan_distances"
],
[
"numpy.arange",
"numpy.random.rand",
"numpy.random.randint"
],
[
"numpy.testing.assert_array_equal",
"pandas.testing.assert_frame_equal",
"numpy.random.rand",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
monkeyhippies/speech2signs-2017-nmt | [
"b2cc696f1673a59e32f3f1a3e2896b9f80e75d7a"
] | [
"transformer/Modules.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport numpy as np\n\n__author__ = \"Yu-Hsiang Huang\"\n\nclass Linear(nn.Module):\n ''' Simple Linear layer with xavier init '''\n def __init__(self, d_in, d_out, bias=True):\n super(Linear, self).__init__()\n self.linear = nn.Linear(d_in, d_out, bias=bias)\n init.xavier_normal(self.linear.weight)\n\n def forward(self, x):\n return self.linear(x)\n\nclass Bottle(nn.Module):\n ''' Perform the reshape routine before and after an operation '''\n\n def forward(self, input):\n if len(input.size()) <= 2:\n return super(Bottle, self).forward(input)\n size = input.size()[:2]\n out = super(Bottle, self).forward(input.view(size[0]*size[1], -1))\n return out.view(size[0], size[1], -1)\n\nclass BottleLinear(Bottle, Linear):\n ''' Perform the reshape routine before and after a linear projection '''\n pass\n\nclass BottleSoftmax(Bottle, nn.Softmax):\n ''' Perform the reshape routine before and after a softmax operation'''\n pass\n\nclass LayerNormalization(nn.Module):\n ''' Layer normalization module '''\n\n def __init__(self, d_hid, eps=1e-3):\n super(LayerNormalization, self).__init__()\n\n self.eps = eps\n self.a_2 = nn.Parameter(torch.ones(d_hid), requires_grad=True)\n self.b_2 = nn.Parameter(torch.zeros(d_hid), requires_grad=True)\n\n def forward(self, z):\n if z.size(1) == 1:\n return z\n\n mu = torch.mean(z, keepdim=True, dim=-1)\n sigma = torch.std(z, keepdim=True, dim=-1)\n ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)\n ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(ln_out)\n\n return ln_out\n\nclass BatchBottle(nn.Module):\n ''' Perform the reshape routine before and after an operation '''\n\n def forward(self, input):\n if len(input.size()) <= 2:\n return super(BatchBottle, self).forward(input)\n size = input.size()[1:]\n out = super(BatchBottle, self).forward(input.view(-1, size[0]*size[1]))\n return out.view(-1, size[0], size[1])\n\nclass BottleLayerNormalization(BatchBottle, LayerNormalization):\n ''' Perform the reshape routine before and after a layer normalization'''\n pass\n\nclass ScaledDotProductAttention(nn.Module):\n ''' Scaled Dot-Product Attention '''\n\n def __init__(self, d_model, attn_dropout=0.1):\n super(ScaledDotProductAttention, self).__init__()\n self.temper = np.power(d_model, 0.5)\n self.dropout = nn.Dropout(attn_dropout)\n self.softmax = BottleSoftmax()\n\n def forward(self, q, k, v, attn_mask=None):\n\n attn = torch.bmm(q, k.transpose(1, 2)) / self.temper\n\n if attn_mask is not None:\n\n assert attn_mask.size() == attn.size(), \\\n 'Attention mask shape {} mismatch ' \\\n 'with Attention logit tensor shape ' \\\n '{}.'.format(attn_mask.size(), attn.size())\n\n attn.data.masked_fill_(attn_mask, -float('inf'))\n\n attn = self.softmax(attn)\n attn = self.dropout(attn)\n output = torch.bmm(attn, v)\n\n return output, attn\n"
] | [
[
"torch.mean",
"torch.nn.Dropout",
"torch.ones",
"numpy.power",
"torch.zeros",
"torch.nn.Linear",
"torch.std",
"torch.bmm",
"torch.nn.init.xavier_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Yanci0/openGauss-server | [
"f43410e1643c887819e718d9baceb9e853ad9574",
"f43410e1643c887819e718d9baceb9e853ad9574"
] | [
"src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/anomaly_detection/spectral_residual.py",
"src/gausskernel/dbmind/tools/anomaly_detection/detector/algorithm/fb_prophet.py"
] | [
"# SR.py\nimport numpy as np\nimport scipy as sc\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom gs_aiops.detector.algorithm.anomal_detect_algorithm.utils import *\nfrom scipy.fftpack import fft, ifft\nfrom gs_aiops.tools import generate_anomal_data\nfrom gs_aiops.detector.algorithm.anomal_detect_algorithm.utils import load_data\n\n\nclass SR:\n '''\n This module realises a spectral residual method for anomaly detection.\n The input data suppose list,np.ndarray and pd.Series\n '''\n\n def __init__(self, X=np.array([]), slice_window=3, map_window=3, tresh=1):\n self.slice_window = slice_window\n self.X = getData(X)\n self.map_window = map_window\n self.thresh = tresh\n # raise NotImplementedError\n\n def run(self):\n Smap = self.getSalienceMap(self.X)\n result = np.array([1 if i > self.thresh else 0 for i in Smap])\n return result, Smap\n\n def setdata(self, data):\n self.X = getData(data)\n\n def setslicewindow(self, thresh):\n self.slice_window = thresh\n\n def plot(self):\n raise NotImplementedError\n\n def getSR(self, X):\n '''\n 傅里叶变化、残差谱、反傅里叶变化\n '''\n X = getData(X)\n\n # spectral_residual_transform\n yy = fft(X)\n A = yy.real\n P = yy.imag\n V = np.sqrt(A ** 2 + P ** 2)\n eps_index = np.where(V <= EPS)[0]\n V[eps_index] = EPS\n L = np.log(V)\n L[eps_index] = 0\n residual = np.exp(L - average_filter(L, self.map_window))\n yy.imag = residual * P / V\n yy.real = residual * A / V\n yy.imag[eps_index] = 0\n yy.real[eps_index] = 0\n result = ifft(yy)\n S = np.sqrt(result.real ** 2 + result.imag ** 2)\n # guass filter\n return S\n\n def getSalienceMap(self, X):\n Map = self.getSR(self.extendseries(X))[:len(X)]\n ave_mag = average_filter(Map, n=self.slice_window)\n ave_mag[np.where(ave_mag <= EPS)] = EPS\n\n return abs(Map - ave_mag) / ave_mag\n\n def estimate(self, X):\n '''\n get k estimated points which is equal to x(n+1)\n x(n+1)=x(n-m+1)+m*g\n g=sum(g(x(n),x(n-i)))/m\n '''\n n = len(X)\n gradients = [(X[-1] - v) / (n - 1 - i) for i, v in enumerate(X[:-1])]\n # g=np.sum(gradients)/m\n return X[1] + np.sum(gradients)\n\n def extendseries(self, X, k=5):\n '''\n use k to extend oringe serie;\n '''\n print(X[-k - 2:-1])\n X = np.append(X, self.estimate(X[-k - 2:-1]).repeat(k))\n return X\n\n\n\nif __name__ == '__main__':\n # data = generate_anomal_data.generate_period_trend_ts()\n data = load_data('../../../../anomal_data/art_daily_flatmiddle.csv')\n sr = SR(data, tresh=1.5)\n res, ma = sr.run()\n print(res)\n print(len(res))\n plt.subplot(211)\n plt.plot(data)\n for index, value in enumerate(data):\n if res[index] == 1:\n plt.scatter(index, value, c='r')\n plt.subplot(212)\n plt.plot(ma)\n plt.show()\n # data = np.array([1, 2, 3])\n # print(SR().extendseries(data))",
"\"\"\"\nCopyright (c) 2020 Huawei Technologies Co.,Ltd.\n\nopenGauss is licensed under Mulan PSL v2.\nYou can use this software according to the terms and conditions of the Mulan PSL v2.\nYou may obtain a copy of Mulan PSL v2 at:\n\n http://license.coscl.org.cn/MulanPSL2\n\nTHIS SOFTWARE IS PROVIDED ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND,\nEITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,\nMERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.\nSee the Mulan PSL v2 for more details.\n\"\"\"\nimport pickle\nimport time\n\nimport pandas as pd\nfrom fbprophet import Prophet\n\nfrom .model import AlgModel\n\n\nclass FbProphet(AlgModel):\n \"\"\"\n This class inherits from the AlgModel class.\n It is based on the Facebook prophet algorithm and uses to forecast time-series.\n \"\"\"\n\n def __init__(self):\n AlgModel.__init__(self)\n self.model = None\n self.train_length = 0\n\n def fit(self, timeseries):\n \"\"\"\n :param timeseries: list, it should include timestamp and value like\n [[111111111, 2222222222, ...], [4.0, 5.0, ...]].\n :return: NA\n \"\"\"\n timeseries = pd.DataFrame(timeseries, columns=['ds', 'y'])\n timeseries['ds'] = timeseries['ds'].map(\n lambda x: time.strftime(AlgModel.DATE_FORMAT, time.localtime(x)))\n self.train_length = len(timeseries)\n self.model = Prophet(yearly_seasonality=True,\n weekly_seasonality=True,\n daily_seasonality=True)\n self.model.fit(timeseries)\n\n def forecast(self, period, freq):\n \"\"\"\n :param freq: int, time interval.\n :param period: string, like '100S','1D', reprensent forecast period.\n :return: list, forecast result which include date and value.\n \"\"\"\n if freq.endswith('M'):\n freq = freq.replace('M', 'T')\n\n future = self.model.make_future_dataframe(freq=freq,\n periods=period,\n include_history=False)\n forecast_result = self.model.predict(future)[['ds', 'yhat']]\n forecast_result['ds'] = forecast_result['ds'].map(lambda x: x.strftime(AlgModel.DATE_FORMAT))\n return forecast_result.values[:, 0], forecast_result.values[:, 1]\n\n def save(self, model_path):\n with open(model_path, mode='wb') as f:\n pickle.dump(self.model, f)\n\n def load(self, model_path):\n with open(model_path, mode='rb') as f:\n self.model = pickle.load(f)\n"
] | [
[
"numpy.log",
"scipy.fftpack.ifft",
"numpy.sqrt",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.plot",
"scipy.fftpack.fft",
"matplotlib.pyplot.subplot",
"numpy.array",
"numpy.where",
"numpy.sum",
"matplotlib.pyplot.show"
],
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
arthurpessoa/tensorflow-handwriten-digits | [
"d140299b96c4da146ca4c63015c9101aacb851f1"
] | [
"mnist_softmax.py"
] | [
"# 1) Download dataset from MNIST (\"Modified National Institute of Standards and Technology\"),\n# 2) Splits it into Training + Test data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\nimport tensorflow as tf\n\n#Set parameters\nlearning_rate = 0.01 # How fast we wanna update our weights, if the rate is too big\n # the model may skip the optimal solution, if it's too small, it may take too many iterations\n # to converge to best result\ntraining_iteration = 30\nbatch_size = 100\ndisplay_step = 2\n\n\n# Graph input (Input neurons)\nx = tf.placeholder(\"float\", [None, 784]) # Our images are 28*28 pixels, 28*28=784\ny = tf.placeholder(\"float\", [None, 10]) # 0-9 digits recognition, so we need 10 classes\n\n# Create our model\n# ----------------------\n#Set weights\nW = tf.Variable(tf.zeros([784, 10])) #weights\nb = tf.Variable(tf.zeros([10])) #bias\n\nwith tf.name_scope(\"Wx_b\") as scope:\n # Construct a linear model\n model = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax\n\n# Add summary ops to collect data\nw_h = tf.summary.histogram(\"weights\", W)\nb_h = tf.summary.histogram(\"biases\", b)\n\n# More name scopes will clean up graph representation\nwith tf.name_scope(\"cost_function\") as scope:\n # Minimize error using cross entropy\n # Cross entropy\n cost_function = -tf.reduce_sum(y*tf.log(model))\n # Create a summary to monitor the cost function\n tf.summary.scalar(\"cost_function\", cost_function)\n\nwith tf.name_scope(\"train\") as scope:\n # Gradient descent\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)\n\n# Initializing the variables\ninit = tf.initialize_all_variables()\n\n# Merge all summaries into a single operator\nmerged_summary_op = tf.summary.merge_all()\n\n# Launch the graph\nwith tf.Session() as sess:\n sess.run(init)\n # Change this to a location on your computer\n summary_writer = tf.summary.FileWriter('./train_summary/', graph_def=sess.graph_def)\n\n # Training cycle\n for iteration in range(training_iteration):\n avg_cost = 0.\n total_batch = int(mnist.train.num_examples/batch_size)\n # Loop over all batches\n for i in range(total_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n # Fit training using batch data\n sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})\n # Compute the average loss\n avg_cost += sess.run(cost_function, feed_dict={x: batch_xs, y: batch_ys})/total_batch\n # Write logs for each iteration\n summary_str = sess.run(merged_summary_op, feed_dict={x: batch_xs, y: batch_ys})\n summary_writer.add_summary(summary_str, iteration*total_batch + i)\n # Display logs per iteration step\n if iteration % display_step == 0:\n print(\"Iteration:\", '%04d' % (iteration + 1), \"cost=\", \"{:.9f}\".format(avg_cost))\n\n print(\"Tuning completed!\")\n\n # Test the model\n predictions = tf.equal(tf.argmax(model, 1), tf.argmax(y, 1))\n # Calculate accuracy\n accuracy = tf.reduce_mean(tf.cast(predictions, \"float\"))\n print(\"Accuracy:\", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))\n"
] | [
[
"tensorflow.matmul",
"tensorflow.summary.FileWriter",
"tensorflow.zeros",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.initialize_all_variables",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.summary.merge_all",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.log",
"tensorflow.argmax",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.summary.scalar",
"tensorflow.summary.histogram"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
wingkitlee0/pt-dec | [
"087b6231ea52422d827bf446b2ecf755ae9a6679"
] | [
"ptdec/model.py"
] | [
"from typing import Callable, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom sklearn.cluster import KMeans\nfrom torch.utils.data.dataloader import DataLoader, default_collate\nfrom tqdm import tqdm\n\nfrom ptdec.utils import cluster_accuracy, target_distribution\n\n\ndef train(\n dataset: torch.utils.data.Dataset,\n model: torch.nn.Module,\n epochs: int,\n batch_size: int,\n optimizer: torch.optim.Optimizer,\n stopping_delta: Optional[float] = None,\n collate_fn=default_collate,\n cuda: bool = True,\n sampler: Optional[torch.utils.data.sampler.Sampler] = None,\n silent: bool = False,\n update_freq: int = 10,\n evaluate_batch_size: int = 1024,\n update_callback: Optional[Callable[..., None]] = None,\n epoch_callback: Optional[Callable[[int, torch.nn.Module], None]] = None,\n) -> None:\n \"\"\"\n Train the DEC model given a dataset, a model instance and various configuration parameters.\n\n :param dataset: instance of Dataset to use for training\n :param model: instance of DEC model to train\n :param epochs: number of training epochs\n :param batch_size: size of the batch to train with\n :param optimizer: instance of optimizer to use\n :param stopping_delta: label delta as a proportion to use for stopping, None to disable, default None\n :param collate_fn: function to merge a list of samples into mini-batch\n :param cuda: whether to use CUDA, defaults to True\n :param sampler: optional sampler to use in the DataLoader, defaults to None\n :param silent: set to True to prevent printing out summary statistics, defaults to False\n :param update_freq: frequency of batches with which to update counter, None disables, default 10\n :param evaluate_batch_size: batch size for evaluation stage, default 1024\n :param update_callback: optional function of accuracy and loss to update, default None\n :param epoch_callback: optional function of epoch and model, default None\n :return: None\n \"\"\"\n static_dataloader = DataLoader(\n dataset,\n batch_size=batch_size,\n collate_fn=collate_fn,\n pin_memory=False,\n sampler=sampler,\n shuffle=False,\n )\n train_dataloader = DataLoader(\n dataset,\n batch_size=batch_size,\n collate_fn=collate_fn,\n sampler=sampler,\n shuffle=True,\n )\n data_iterator = tqdm(\n static_dataloader,\n leave=True,\n unit=\"batch\",\n postfix={\n \"epo\": -1,\n \"acc\": \"%.4f\" % 0.0,\n \"lss\": \"%.8f\" % 0.0,\n \"dlb\": \"%.4f\" % -1,\n },\n disable=silent,\n )\n kmeans = KMeans(n_clusters=model.cluster_number, n_init=20)\n model.train()\n features = []\n actual = []\n # form initial cluster centres\n for index, batch in enumerate(data_iterator):\n if (isinstance(batch, tuple) or isinstance(batch, list)) and len(batch) == 2:\n batch, value = batch # if we have a prediction label, separate it to actual\n actual.append(value)\n if cuda:\n batch = batch.cuda(non_blocking=True)\n features.append(model.encoder(batch).detach().cpu())\n actual = torch.cat(actual).long()\n predicted = kmeans.fit_predict(torch.cat(features).numpy())\n predicted_previous = torch.tensor(np.copy(predicted), dtype=torch.long)\n _, accuracy = cluster_accuracy(predicted, actual.cpu().numpy())\n cluster_centers = torch.tensor(\n kmeans.cluster_centers_, dtype=torch.float, requires_grad=True\n )\n if cuda:\n cluster_centers = cluster_centers.cuda(non_blocking=True)\n with torch.no_grad():\n # initialise the cluster centers\n model.state_dict()[\"assignment.cluster_centers\"].copy_(cluster_centers)\n loss_function = nn.KLDivLoss(size_average=False)\n delta_label = None\n for epoch in range(epochs):\n features = []\n data_iterator = tqdm(\n train_dataloader,\n leave=True,\n unit=\"batch\",\n postfix={\n \"epo\": epoch,\n \"acc\": \"%.4f\" % (accuracy or 0.0),\n \"lss\": \"%.8f\" % 0.0,\n \"dlb\": \"%.4f\" % (delta_label or 0.0),\n },\n disable=silent,\n )\n model.train()\n for index, batch in enumerate(data_iterator):\n if (isinstance(batch, tuple) or isinstance(batch, list)) and len(\n batch\n ) == 2:\n batch, _ = batch # if we have a prediction label, strip it away\n if cuda:\n batch = batch.cuda(non_blocking=True)\n output = model(batch)\n target = target_distribution(output).detach()\n loss = loss_function(output.log(), target) / output.shape[0]\n data_iterator.set_postfix(\n epo=epoch,\n acc=\"%.4f\" % (accuracy or 0.0),\n lss=\"%.8f\" % float(loss.item()),\n dlb=\"%.4f\" % (delta_label or 0.0),\n )\n optimizer.zero_grad()\n loss.backward()\n optimizer.step(closure=None)\n features.append(model.encoder(batch).detach().cpu())\n if update_freq is not None and index % update_freq == 0:\n loss_value = float(loss.item())\n data_iterator.set_postfix(\n epo=epoch,\n acc=\"%.4f\" % (accuracy or 0.0),\n lss=\"%.8f\" % loss_value,\n dlb=\"%.4f\" % (delta_label or 0.0),\n )\n if update_callback is not None:\n update_callback(accuracy, loss_value, delta_label)\n predicted, actual = predict(\n dataset,\n model,\n batch_size=evaluate_batch_size,\n collate_fn=collate_fn,\n silent=True,\n return_actual=True,\n cuda=cuda,\n )\n delta_label = (\n float((predicted != predicted_previous).float().sum().item())\n / predicted_previous.shape[0]\n )\n if stopping_delta is not None and delta_label < stopping_delta:\n print(\n 'Early stopping as label delta \"%1.5f\" less than \"%1.5f\".'\n % (delta_label, stopping_delta)\n )\n break\n predicted_previous = predicted\n _, accuracy = cluster_accuracy(predicted.cpu().numpy(), actual.cpu().numpy())\n data_iterator.set_postfix(\n epo=epoch,\n acc=\"%.4f\" % (accuracy or 0.0),\n lss=\"%.8f\" % 0.0,\n dlb=\"%.4f\" % (delta_label or 0.0),\n )\n if epoch_callback is not None:\n epoch_callback(epoch, model)\n\n\ndef predict(\n dataset: torch.utils.data.Dataset,\n model: torch.nn.Module,\n batch_size: int = 1024,\n collate_fn=default_collate,\n cuda: bool = True,\n silent: bool = False,\n return_actual: bool = False,\n) -> Union[Tuple[torch.Tensor, torch.Tensor], torch.Tensor]:\n \"\"\"\n Predict clusters for a dataset given a DEC model instance and various configuration parameters.\n\n :param dataset: instance of Dataset to use for training\n :param model: instance of DEC model to predict\n :param batch_size: size of the batch to predict with, default 1024\n :param collate_fn: function to merge a list of samples into mini-batch\n :param cuda: whether CUDA is used, defaults to True\n :param silent: set to True to prevent printing out summary statistics, defaults to False\n :param return_actual: return actual values, if present in the Dataset\n :return: tuple of prediction and actual if return_actual is True otherwise prediction\n \"\"\"\n dataloader = DataLoader(\n dataset, batch_size=batch_size, collate_fn=collate_fn, shuffle=False\n )\n data_iterator = tqdm(\n dataloader,\n leave=True,\n unit=\"batch\",\n disable=silent,\n )\n features = []\n actual = []\n model.eval()\n for batch in data_iterator:\n if (isinstance(batch, tuple) or isinstance(batch, list)) and len(batch) == 2:\n batch, value = batch # unpack if we have a prediction label\n if return_actual:\n actual.append(value)\n elif return_actual:\n raise ValueError(\n \"Dataset has no actual value to unpack, but return_actual is set.\"\n )\n if cuda:\n batch = batch.cuda(non_blocking=True)\n features.append(\n model(batch).detach().cpu()\n ) # move to the CPU to prevent out of memory on the GPU\n if return_actual:\n return torch.cat(features).max(1)[1], torch.cat(actual).long()\n else:\n return torch.cat(features).max(1)[1]\n"
] | [
[
"torch.nn.KLDivLoss",
"sklearn.cluster.KMeans",
"torch.cat",
"torch.tensor",
"numpy.copy",
"torch.no_grad",
"torch.utils.data.dataloader.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
muminkoykiran/computervision-recipes | [
"b573f2600ebda68b1ab571d4122a32525b674587",
"b573f2600ebda68b1ab571d4122a32525b674587"
] | [
"utils_cv/action_recognition/dataset.py",
"utils_cv/detection/references/coco_utils.py"
] | [
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nimport copy\nimport math\nfrom pathlib import Path\nimport warnings\nfrom typing import Callable, Tuple, Union, List\n\nimport decord\nfrom einops.layers.torch import Rearrange\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy.random import randint\nimport torch\nfrom torch.utils.data import Dataset, Subset, DataLoader\nfrom torchvision.transforms import Compose\n\nfrom .references import transforms_video as transforms\nfrom .references.functional_video import denormalize\n\nfrom ..common.misc import Config\nfrom ..common.gpu import num_devices, db_num_workers\n\nTrans = Callable[[object, dict], Tuple[object, dict]]\n\nDEFAULT_MEAN = (0.43216, 0.394666, 0.37645)\nDEFAULT_STD = (0.22803, 0.22145, 0.216989)\n\n\nclass VideoRecord(object):\n \"\"\"\n This class is used for parsing split-files where each row contains a path\n and a label:\n\n Ex:\n ```\n path/to/my/clip_1 3\n path/to/another/clip_2 32\n ```\n \"\"\"\n\n def __init__(self, data: List[str]):\n \"\"\" Initialized a VideoRecord\n\n Ex.\n data = [\"path/to/video.mp4\", 2, \"cooking\"]\n\n Args:\n row: a list where first element is the path and second element is\n the label, and the third element (optional) is the label name\n \"\"\"\n assert len(data) >= 2 and len(data) <= 3\n assert isinstance(data[0], str)\n assert isinstance(int(data[1]), int)\n if len(data) == 3:\n assert isinstance(data[2], str)\n\n self._data = data\n self._num_frames = None\n\n @property\n def path(self) -> str:\n return self._data[0]\n\n @property\n def num_frames(self) -> int:\n if self._num_frames is None:\n self._num_frames = int(\n len([x for x in Path(self._data[0]).glob(\"img_*\")]) - 1\n )\n return self._num_frames\n\n @property\n def label(self) -> int:\n return int(self._data[1])\n\n @property\n def label_name(self) -> str:\n return None if len(self._data) <= 2 else self._data[2]\n\n\ndef get_transforms(train: bool = True, tfms_config: Config = None) -> Trans:\n \"\"\" Get default transformations to apply depending on whether we're applying it to the training or the validation set. If no tfms configurations are passed in, use the defaults.\n\n Args:\n train: whether or not this is for training\n tfms_config: Config object with tranforms-related configs\n\n Returns:\n A list of transforms to apply\n \"\"\"\n if tfms_config is None:\n tfms_config = get_default_tfms_config(train=train)\n\n # 1. resize\n tfms = [\n transforms.ToTensorVideo(),\n transforms.ResizeVideo(\n tfms_config.im_scale, tfms_config.resize_keep_ratio\n ),\n ]\n\n # 2. crop\n if tfms_config.random_crop:\n if tfms_config.random_crop_scales:\n crop = transforms.RandomResizedCropVideo(\n tfms_config.input_size, tfms_config.random_crop_scales\n )\n else:\n crop = transforms.RandomCropVideo(tfms_config.input_size)\n else:\n crop = transforms.CenterCropVideo(tfms_config.input_size)\n tfms.append(crop)\n\n # 3. flip\n tfms.append(transforms.RandomHorizontalFlipVideo(tfms_config.flip_ratio))\n\n # 4. normalize\n tfms.append(transforms.NormalizeVideo(tfms_config.mean, tfms_config.std))\n\n return Compose(tfms)\n\n\ndef get_default_tfms_config(train: bool) -> Config:\n \"\"\"\n Args:\n train: whether or not this is for training\n\n Settings:\n input_size (int or tuple): Model input image size.\n im_scale (int or tuple): Resize target size.\n resize_keep_ratio (bool): If True, keep the original ratio when resizing.\n mean (tuple): Normalization mean.\n if train:\n std (tuple): Normalization std.\n flip_ratio (float): Horizontal flip ratio.\n random_crop (bool): If False, do center-crop.\n random_crop_scales (tuple): Range of size of the origin size random cropped.\n \"\"\"\n flip_ratio = 0.5 if train else 0.0\n random_crop = True if train else False\n random_crop_scales = (0.6, 1.0) if train else None\n\n return Config(\n dict(\n input_size=112,\n im_scale=128,\n resize_keep_ratio=True,\n mean=DEFAULT_MEAN,\n std=DEFAULT_STD,\n flip_ratio=flip_ratio,\n random_crop=random_crop,\n random_crop_scales=random_crop_scales,\n )\n )\n\n\nclass VideoDataset:\n \"\"\" A video recognition dataset. \"\"\"\n\n def __init__(\n self,\n root: str,\n seed: int = None,\n train_pct: float = 0.75,\n num_samples: int = 1,\n sample_length: int = 8,\n sample_step: int = 1,\n temporal_jitter: bool = True,\n temporal_jitter_step: int = 2,\n random_shift: bool = True,\n batch_size: int = 8,\n video_ext: str = \"mp4\",\n warning: bool = False,\n train_split_file: str = None,\n test_split_file: str = None,\n train_transforms: Trans = get_transforms(train=True),\n test_transforms: Trans = get_transforms(train=False),\n ) -> None:\n \"\"\" initialize dataset\n\n Arg:\n root: Videos directory.\n seed: random seed\n train_pct: percentage of dataset to use for training\n num_samples: Number of clips to sample from each video.\n sample_length: Number of consecutive frames to sample from a video (i.e. clip length).\n sample_step: Sampling step.\n temporal_jitter: Randomly skip frames when sampling each frames.\n temporal_jitter_step: temporal jitter in frames\n random_shift: Random temporal shift when sample a clip.\n video_ext: Video file extension.\n warning: On or off warning.\n train_split_file: Annotation file containing video filenames and labels.\n test_split_file: Annotation file containing video filenames and labels.\n train_transforms: transforms for training\n test_transforms: transforms for testing\n \"\"\"\n\n assert sample_step > 0\n assert num_samples > 0\n\n if temporal_jitter:\n assert temporal_jitter_step > 0\n\n if train_split_file:\n assert Path(train_split_file).exists()\n assert (\n test_split_file is not None and Path(test_split_file).exists()\n )\n\n if test_split_file:\n assert Path(test_split_file).exists()\n assert (\n train_split_file is not None\n and Path(train_split_file).exists()\n )\n\n self.root = root\n self.seed = seed\n self.num_samples = num_samples\n self.sample_length = sample_length\n self.sample_step = sample_step\n self.presample_length = sample_length * sample_step\n self.temporal_jitter_step = temporal_jitter_step\n self.train_transforms = train_transforms\n self.test_transforms = test_transforms\n self.random_shift = random_shift\n self.temporal_jitter = temporal_jitter\n self.batch_size = batch_size\n self.video_ext = video_ext\n self.warning = warning\n\n # create training and validation datasets\n self.train_ds, self.test_ds = (\n self.split_with_file(\n train_split_file=train_split_file,\n test_split_file=test_split_file,\n )\n if train_split_file\n else self.split_by_folder(train_pct=train_pct)\n )\n\n # initialize dataloaders\n self.init_data_loaders()\n\n def split_by_folder(\n self, train_pct: float = 0.8\n ) -> Tuple[Dataset, Dataset]:\n \"\"\" Split this dataset into a training and testing set based on the\n folders that the videos are in.\n\n ```\n /data\n +-- action_class_1\n | +-- video_01.mp4\n | +-- video_02.mp4\n | +-- ...\n +-- action_class_2\n | +-- video_11.mp4\n | +-- video_12.mp4\n | +-- ...\n +-- ...\n ```\n\n Args:\n train_pct: the ratio of images to use for training vs\n testing\n\n Return\n A training and testing dataset in that order\n \"\"\"\n self.video_records = []\n\n # get all dirs in root (and make sure they are dirs)\n dirs = []\n for entry in os.listdir(self.root):\n if os.path.isdir(os.path.join(self.root, entry)):\n dirs.append(os.path.join(self.root, entry))\n\n # add each video in each dir as a video record\n label = 0\n self.classes = []\n for action in dirs:\n action = os.path.basename(os.path.normpath(action))\n self.video_records.extend(\n [\n VideoRecord(\n [\n os.path.join(self.root, action, vid.split(\".\")[0]),\n label,\n action,\n ]\n )\n for vid in os.listdir(os.path.join(self.root, action))\n ]\n )\n label += 1\n self.classes.append(action)\n\n # random split\n test_num = math.floor(len(self) * (1 - train_pct))\n if self.seed:\n torch.manual_seed(self.seed)\n\n # set indices\n indices = torch.randperm(len(self)).tolist()\n train_range = indices[test_num:]\n test_range = indices[:test_num]\n\n return self.split_train_test(train_range, test_range)\n\n def split_with_file(\n self,\n train_split_file: Union[Path, str],\n test_split_file: Union[Path, str],\n ) -> Tuple[Dataset, Dataset]:\n \"\"\" Split this dataset into a training and testing set using a split file.\n\n Each line in the split file must use the form:\n ```\n path/to/jumping/video_name_1 3\n path/to/swimming/video_name_2 5\n path/to/another/jumping/video_name_3 3\n ```\n\n Args:\n split_files: a tuple of 2 files\n\n Return:\n A training and testing dataset in that order\n \"\"\"\n self.video_records = []\n\n # add train records\n self.video_records.extend(\n [\n VideoRecord(row.strip().split(\" \"))\n for row in open(train_split_file)\n ]\n )\n train_len = len(self.video_records)\n\n # add validation records\n self.video_records.extend(\n [\n VideoRecord(row.strip().split(\" \"))\n for row in open(test_split_file)\n ]\n )\n\n # create indices\n indices = torch.arange(0, len(self.video_records))\n train_range = indices[:train_len]\n test_range = indices[train_len:]\n\n return self.split_train_test(train_range, test_range)\n\n def split_train_test(\n self, train_range: torch.Tensor, test_range: torch.Tensor,\n ) -> Tuple[Dataset, Dataset]:\n \"\"\" Split this dataset into a training and testing set\n\n Args:\n train_range: range of indices for training set\n test_range: range of indices for testing set\n\n Return\n A training and testing dataset in that order\n \"\"\"\n # create train subset\n train = copy.deepcopy(Subset(self, train_range))\n train.dataset.transforms = self.train_transforms\n train.dataset.sample_step = (\n self.temporal_jitter_step\n if self.temporal_jitter\n else self.sample_step\n )\n train.dataset.presample_length = self.sample_length * self.sample_step\n\n # create test subset\n test = copy.deepcopy(Subset(self, test_range))\n test.dataset.transforms = self.test_transforms\n test.dataset.random_shift = False\n test.dataset.temporal_jitter = False\n\n return train, test\n\n def init_data_loaders(self) -> None:\n \"\"\" Create training and validation data loaders. \"\"\"\n devices = num_devices()\n\n self.train_dl = DataLoader(\n self.train_ds,\n batch_size=self.batch_size * devices,\n shuffle=True,\n num_workers=db_num_workers(),\n pin_memory=True,\n )\n\n self.test_dl = DataLoader(\n self.test_ds,\n batch_size=self.batch_size * devices,\n shuffle=False,\n num_workers=db_num_workers(),\n pin_memory=True,\n )\n\n def __len__(self) -> int:\n return len(self.video_records)\n\n def _sample_indices(self, record: VideoRecord) -> List[int]:\n \"\"\"\n Create a list of frame-wise offsets into a video record. Depending on\n whether or not 'random shift' is used, perform a uniform sample or a\n random sample.\n\n Args:\n record (VideoRecord): A video record.\n\n Return:\n list: Segment offsets (start indices)\n \"\"\"\n if record.num_frames > self.presample_length:\n if self.random_shift:\n # Random sample\n offsets = np.sort(\n randint(\n record.num_frames - self.presample_length + 1,\n size=self.num_samples,\n )\n )\n else:\n # Uniform sample\n distance = (\n record.num_frames - self.presample_length + 1\n ) / self.num_samples\n offsets = np.array(\n [\n int(distance / 2.0 + distance * x)\n for x in range(self.num_samples)\n ]\n )\n else:\n if self.warning:\n warnings.warn(\n f\"num_samples and/or sample_length > num_frames in {record.path}\"\n )\n offsets = np.zeros((self.num_samples,), dtype=int)\n\n return offsets\n\n def _get_frames(\n self, video_reader: decord.VideoReader, offset: int,\n ) -> List[np.ndarray]:\n \"\"\" Get frames at sample length.\n\n Args:\n video_reader: the decord tool for parsing videos\n offset: where to start the reader from\n\n Returns\n Frames at sample length in a List\n \"\"\"\n clip = list()\n\n # decord.seek() seems to have a bug. use seek_accurate().\n video_reader.seek_accurate(offset)\n\n # first frame\n clip.append(video_reader.next().asnumpy())\n\n # remaining frames\n try:\n for i in range(self.sample_length - 1):\n step = (\n randint(self.sample_step + 1)\n if self.temporal_jitter\n else self.sample_step\n )\n\n if step == 0 and self.temporal_jitter:\n clip.append(clip[-1].copy())\n else:\n if step > 1:\n video_reader.skip_frames(step - 1)\n cur_frame = video_reader.next().asnumpy()\n clip.append(cur_frame)\n\n except StopIteration:\n # pass when video has ended\n pass\n\n # if clip needs more frames, simply duplicate the last frame in the clip.\n while len(clip) < self.sample_length:\n clip.append(clip[-1].copy())\n\n return clip\n\n def __getitem__(self, idx: int) -> Tuple[torch.tensor, int]:\n \"\"\"\n Return:\n (clips (torch.tensor), label (int))\n \"\"\"\n record = self.video_records[idx]\n video_reader = decord.VideoReader(\n \"{}.{}\".format(\n os.path.join(self.root, record.path), self.video_ext\n ),\n # TODO try to add `ctx=decord.ndarray.gpu(0) or .cuda(0)`\n )\n record._num_frames = len(video_reader)\n\n offsets = self._sample_indices(record)\n clips = np.array([self._get_frames(video_reader, o) for o in offsets])\n\n if self.num_samples == 1:\n return (\n # [T, H, W, C] -> [C, T, H, W]\n self.transforms(torch.from_numpy(clips[0])),\n record.label,\n )\n\n else:\n return (\n # [S, T, H, W, C] -> [S, C, T, H, W]\n torch.stack(\n [self.transforms(torch.from_numpy(c)) for c in clips]\n ),\n record.label,\n )\n\n def _show_batch(\n self,\n images: List[torch.tensor],\n labels: List[int],\n sample_length: int,\n mean: Tuple[int, int, int] = DEFAULT_MEAN,\n std: Tuple[int, int, int] = DEFAULT_STD,\n ) -> None:\n \"\"\"\n Display a batch of images.\n\n Args:\n images: List of sample (clip) tensors\n labels: List of labels\n sample_length: Number of frames to show for each sample\n mean: Normalization mean\n std: Normalization std-dev\n \"\"\"\n batch_size = len(images)\n plt.tight_layout()\n fig, axs = plt.subplots(\n batch_size,\n sample_length,\n figsize=(4 * sample_length, 3 * batch_size),\n )\n\n for i, ax in enumerate(axs):\n if batch_size == 1:\n clip = images[0]\n else:\n clip = images[i]\n clip = Rearrange(\"c t h w -> t c h w\")(clip)\n if not isinstance(ax, np.ndarray):\n ax = [ax]\n for j, a in enumerate(ax):\n a.axis(\"off\")\n a.imshow(\n np.moveaxis(denormalize(clip[j], mean, std).numpy(), 0, -1)\n )\n\n # display label/label_name on the first image\n if j == 0:\n a.text(\n x=3,\n y=15,\n s=f\"{labels[i]}\",\n fontsize=20,\n bbox=dict(facecolor=\"white\", alpha=0.80),\n )\n\n def show_batch(self, train_or_test: str = \"train\", rows: int = 2) -> None:\n \"\"\"Plot first few samples in the datasets\"\"\"\n if train_or_test == \"train\":\n batch = [self.train_ds[i] for i in range(rows)]\n elif train_or_test == \"test\":\n batch = [self.test_ds[i] for i in range(rows)]\n else:\n raise ValueError(\"Unknown data type {}\".format(which_data))\n\n images = [im[0] for im in batch]\n labels = [im[1] for im in batch]\n\n self._show_batch(images, labels, self.sample_length)\n",
"import copy\nimport os\nfrom PIL import Image\n\nimport torch\nimport torch.utils.data\nimport torchvision\n\nfrom pycocotools import mask as coco_mask\nfrom pycocotools.coco import COCO\n\nfrom . import transforms as T # EDITED\n\n\nclass FilterAndRemapCocoCategories(object):\n def __init__(self, categories, remap=True):\n self.categories = categories\n self.remap = remap\n\n def __call__(self, image, target):\n anno = target[\"annotations\"]\n anno = [obj for obj in anno if obj[\"category_id\"] in self.categories]\n if not self.remap:\n target[\"annotations\"] = anno\n return image, target\n anno = copy.deepcopy(anno)\n for obj in anno:\n obj[\"category_id\"] = self.categories.index(obj[\"category_id\"])\n target[\"annotations\"] = anno\n return image, target\n\n\ndef convert_coco_poly_to_mask(segmentations, height, width):\n masks = []\n for polygons in segmentations:\n rles = coco_mask.frPyObjects(polygons, height, width)\n mask = coco_mask.decode(rles)\n if len(mask.shape) < 3:\n mask = mask[..., None]\n mask = torch.as_tensor(mask, dtype=torch.uint8)\n mask = mask.any(dim=2)\n masks.append(mask)\n if masks:\n masks = torch.stack(masks, dim=0)\n else:\n masks = torch.zeros((0, height, width), dtype=torch.uint8)\n return masks\n\n\nclass ConvertCocoPolysToMask(object):\n def __call__(self, image, target):\n w, h = image.size\n\n image_id = target[\"image_id\"]\n image_id = torch.tensor([image_id])\n\n anno = target[\"annotations\"]\n\n anno = [obj for obj in anno if obj['iscrowd'] == 0]\n\n boxes = [obj[\"bbox\"] for obj in anno]\n # guard against no boxes via resizing\n boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)\n boxes[:, 2:] += boxes[:, :2]\n boxes[:, 0::2].clamp_(min=0, max=w)\n boxes[:, 1::2].clamp_(min=0, max=h)\n\n classes = [obj[\"category_id\"] for obj in anno]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n segmentations = [obj[\"segmentation\"] for obj in anno]\n masks = convert_coco_poly_to_mask(segmentations, h, w)\n\n keypoints = None\n if anno and \"keypoints\" in anno[0]:\n keypoints = [obj[\"keypoints\"] for obj in anno]\n keypoints = torch.as_tensor(keypoints, dtype=torch.float32)\n num_keypoints = keypoints.shape[0]\n if num_keypoints:\n keypoints = keypoints.view(num_keypoints, -1, 3)\n\n keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])\n boxes = boxes[keep]\n classes = classes[keep]\n masks = masks[keep]\n if keypoints is not None:\n keypoints = keypoints[keep]\n\n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = classes\n target[\"masks\"] = masks\n target[\"image_id\"] = image_id\n if keypoints is not None:\n target[\"keypoints\"] = keypoints\n\n # for conversion to coco api\n area = torch.tensor([obj[\"area\"] for obj in anno])\n iscrowd = torch.tensor([obj[\"iscrowd\"] for obj in anno])\n target[\"area\"] = area\n target[\"iscrowd\"] = iscrowd\n\n return image, target\n\n\ndef _coco_remove_images_without_annotations(dataset, cat_list=None):\n def _has_only_empty_bbox(anno):\n return all(any(o <= 1 for o in obj[\"bbox\"][2:]) for obj in anno)\n\n def _count_visible_keypoints(anno):\n return sum(sum(1 for v in ann[\"keypoints\"][2::3] if v > 0) for ann in anno)\n\n min_keypoints_per_image = 10\n\n def _has_valid_annotation(anno):\n # if it's empty, there is no annotation\n if len(anno) == 0:\n return False\n # if all boxes have close to zero area, there is no annotation\n if _has_only_empty_bbox(anno):\n return False\n # keypoints task have a slight different critera for considering\n # if an annotation is valid\n if \"keypoints\" not in anno[0]:\n return True\n # for keypoint detection tasks, only consider valid images those\n # containing at least min_keypoints_per_image\n if _count_visible_keypoints(anno) >= min_keypoints_per_image:\n return True\n return False\n\n assert isinstance(dataset, torchvision.datasets.CocoDetection)\n ids = []\n for ds_idx, img_id in enumerate(dataset.ids):\n ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anno = dataset.coco.loadAnns(ann_ids)\n if cat_list:\n anno = [obj for obj in anno if obj[\"category_id\"] in cat_list]\n if _has_valid_annotation(anno):\n ids.append(ds_idx)\n\n dataset = torch.utils.data.Subset(dataset, ids)\n return dataset\n\n\ndef convert_to_coco_api(ds):\n coco_ds = COCO()\n # annotation IDs need to start at 1, not 0, see torchvision issue #1530\n ann_id = 1\n dataset = {'images': [], 'categories': [], 'annotations': []}\n categories = set()\n for img_idx in range(len(ds)):\n # find better way to get target\n # targets = ds.get_annotations(img_idx)\n img, targets = ds[img_idx]\n image_id = targets[\"image_id\"].item()\n img_dict = {}\n img_dict['id'] = image_id\n img_dict['height'] = img.shape[-2]\n img_dict['width'] = img.shape[-1]\n dataset['images'].append(img_dict)\n bboxes = targets[\"boxes\"]\n bboxes[:, 2:] -= bboxes[:, :2]\n bboxes = bboxes.tolist()\n labels = targets['labels'].tolist()\n areas = targets['area'].tolist()\n iscrowd = targets['iscrowd'].tolist()\n if 'masks' in targets:\n masks = targets['masks']\n # make masks Fortran contiguous for coco_mask\n masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)\n if 'keypoints' in targets:\n keypoints = targets['keypoints']\n keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()\n num_objs = len(bboxes)\n for i in range(num_objs):\n ann = {}\n ann['image_id'] = image_id\n ann['bbox'] = bboxes[i]\n ann['category_id'] = labels[i]\n categories.add(labels[i])\n ann['area'] = areas[i]\n ann['iscrowd'] = iscrowd[i]\n ann['id'] = ann_id\n if 'masks' in targets:\n ann[\"segmentation\"] = coco_mask.encode(masks[i].numpy())\n if 'keypoints' in targets:\n ann['keypoints'] = keypoints[i]\n ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])\n dataset['annotations'].append(ann)\n ann_id += 1\n dataset['categories'] = [{'id': i} for i in sorted(categories)]\n coco_ds.dataset = dataset\n coco_ds.createIndex()\n return coco_ds\n\n\ndef get_coco_api_from_dataset(dataset):\n for _ in range(10):\n if isinstance(dataset, torchvision.datasets.CocoDetection):\n break\n if isinstance(dataset, torch.utils.data.Subset):\n dataset = dataset.dataset\n if isinstance(dataset, torchvision.datasets.CocoDetection):\n return dataset.coco\n return convert_to_coco_api(dataset)\n\n\nclass CocoDetection(torchvision.datasets.CocoDetection):\n def __init__(self, img_folder, ann_file, transforms):\n super(CocoDetection, self).__init__(img_folder, ann_file)\n self._transforms = transforms\n\n def __getitem__(self, idx):\n img, target = super(CocoDetection, self).__getitem__(idx)\n image_id = self.ids[idx]\n target = dict(image_id=image_id, annotations=target)\n if self._transforms is not None:\n img, target = self._transforms(img, target)\n return img, target\n\n\ndef get_coco(root, image_set, transforms, mode='instances'):\n anno_file_template = \"{}_{}2017.json\"\n PATHS = {\n \"train\": (\"train2017\", os.path.join(\"annotations\", anno_file_template.format(mode, \"train\"))),\n \"val\": (\"val2017\", os.path.join(\"annotations\", anno_file_template.format(mode, \"val\"))),\n # \"train\": (\"val2017\", os.path.join(\"annotations\", anno_file_template.format(mode, \"val\")))\n }\n\n t = [ConvertCocoPolysToMask()]\n\n if transforms is not None:\n t.append(transforms)\n transforms = T.Compose(t)\n\n img_folder, ann_file = PATHS[image_set]\n img_folder = os.path.join(root, img_folder)\n ann_file = os.path.join(root, ann_file)\n\n dataset = CocoDetection(img_folder, ann_file, transforms=transforms)\n\n if image_set == \"train\":\n dataset = _coco_remove_images_without_annotations(dataset)\n\n # dataset = torch.utils.data.Subset(dataset, [i for i in range(500)])\n\n return dataset\n\n\ndef get_coco_kp(root, image_set, transforms):\n return get_coco(root, image_set, transforms, mode=\"person_keypoints\")\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"torch.manual_seed",
"matplotlib.pyplot.subplots",
"torch.from_numpy",
"torch.utils.data.Subset",
"numpy.zeros",
"numpy.random.randint"
],
[
"torch.zeros",
"torch.tensor",
"torch.as_tensor",
"torch.utils.data.Subset",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LawrenceMMStewart/Optimal_Transport_MIT | [
"a71a0110fa15110692fd383c1e77a6c347ef9ca3",
"a71a0110fa15110692fd383c1e77a6c347ef9ca3"
] | [
"src/augment.py",
"datasets/wine/process_example.py"
] | [
"\"\"\"\nFile: augment\nDescription: Evaluate the performance of MLP's (trained on various levels\nof noisy data) on the validation set. \nAuthor Lawrence Stewart <[email protected]>\nLicense: Mit License \n\"\"\"\n\n\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nimport os\nimport argparse \n#seed the RNG \nnp.random.seed(123)\ntf.random.set_seed(123)\n\n#args = number of labels to train on\nparser = argparse.ArgumentParser(description = \"Sinkhorn Batch Imputation for 3D Dataset\")\nparser.add_argument(\"dataset\",help=\"Options = wine,\",type = str)\nargs = parser.parse_args()\n\ndname = args.dataset\n\n\n#initialisations for dataset\nscaler = MinMaxScaler() \nif dname ==\"wine\":\n\tpath = os.path.join(\"datasets\",\"wine\",\"winequality-white.csv\")\ndata = pd.read_csv(path,sep=';')\nX = data.drop(columns='quality')\nY = data['quality']\n#fit the scaler to X \nscaler.fit(X)\n\n#split into train and test sets\ntrain_x,test_x,train_y,test_y = train_test_split(X,Y,\n\trandom_state = 0, stratify = Y,shuffle=True,\n\ttrain_size=4000)\n\ntrain = scaler.transform(train_x)\ntest = scaler.transform(test_x)\ntrain_y = pd.DataFrame.to_numpy(train_y).reshape(-1,1).astype(np.float32)\ntest_y = pd.DataFrame.to_numpy(test_y).reshape(-1,1).astype(np.float32)\n\n\nl2reg=1e-3\nd=train.shape[1]\nloss_fun = tf.keras.losses.MSE\n\n\ncheckpoint_dir = os.path.join(\"src\",\"models\",dname,\"aug_noise\")\n\naccuracy = []\nnoise_amp = []\nfor i in range(1,6):\n\tmname = \"1.00E-0\"+\"%i\"%i\n\tmdir = os.path.join(checkpoint_dir,mname)\n\tmodel = tf.keras.models.load_model(mdir)\n\tpred = model(test)\n\tmse = tf.reduce_mean(loss_fun(pred,test_y))\n\taccuracy.append(mse.numpy())\n\tnoise_amp.append(10**(-i))\n\n\nprint(\"models acc\",list(zip(noise_amp,accuracy)))\nfig, ax = plt.subplots()\nbarWidth = 0.25\nr = np.arange(len(accuracy))\nplt.bar(r,accuracy,edgecolor='black')\nopacity = 0.6\n\nplt.grid(\"on\",axis='y')\nplt.ylabel(\"MSE Validation Set\")\nplt.xlabel(r\"$\\sigma$\")\nplt.xticks([a for a in range(len(r))],\n\t[str(a) for a in noise_amp])\nplt.yscale(\"log\")\nax.set_facecolor('#D9E6E8')\nplt.show()\n",
"import numpy as np\nimport tensorflow as tf\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\n\n\n\nscaler = MinMaxScaler() \npath = \"winequality-white.csv\"\ndata = pd.read_csv(path,sep=';')\n\ndesc = data.describe()\nprint(desc.T)\n\nfig = sns.heatmap(data.corr()\n,annot=True,linewidths=.5,center=0,cmap=\"YlGnBu\")\n# plt.show()\n\n#find any nan values:\nprint(\"Missing values? \\n\",data.isna().any())\n\n\"\"\"\nAccording to the correlation matrix, we can drop the following variables:\npH, free sulfur dioxide, residual sugar (looking at their correlation\nto the wine quality.)\n\"\"\"\nkeys_to_drop = ['pH','free sulfur dioxide','residual sugar']\ndata_cut = data.drop(columns=keys_to_drop)\n\n\nX = data_cut.drop(columns='quality')\nY = data_cut['quality']\nscaler.fit(X)\n\ntrain_x,test_x,train_y,test_y =train_test_split(X,Y,\n\trandom_state = 0, stratify = Y,shuffle=True,\n\ttrain_size=4000)\n\ntrain = scaler.transform(train_x)\ntest = scaler.transform(test_x)\n\n\nprint(train.shape)\nprint(test.shape)\n\n\n\n\n"
] | [
[
"tensorflow.keras.models.load_model",
"pandas.read_csv",
"numpy.random.seed",
"tensorflow.random.set_seed",
"matplotlib.pyplot.yscale",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.subplots",
"pandas.DataFrame.to_numpy",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"sklearn.preprocessing.MinMaxScaler",
"matplotlib.pyplot.ylabel"
],
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.MinMaxScaler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
kclamar/vedo | [
"2fd8b02ba8debcabbf43f0a4decbc141854273e1"
] | [
"examples/pyplot/plot_errband.py"
] | [
"\"\"\"Plotting functions with error bands\"\"\"\nimport numpy as np\nfrom vedo import *\nfrom vedo.pyplot import plot\n\n# Make up same data\nx = np.arange(0, 6, 0.1)\ny = 2+2*np.sin(2*x)/(x+1)\nye= y**2 / 10\nminy = np.min(y-ye)\nidx = np.argmax(y)\n\n# Plot the two variables, return a Plot(Assembly) object:\nplt = plot(x,y,\n yerrors=ye,\n xtitle='time in \\museconds',\n ytitle='y oscillation [a.u.]',\n ylim=(0.5, 5),\n aspect=4/3, # aspect ratio (any float = x_size/y_size)\n errorBand=True, # join errors on y into an error band\n lc=\"k\", # line color\n ec=\"r\", # error band color\n la=0.6, # error and line alphas\n pad=0.0, # tight margins, no padding\n)\n\n# Add a grey transparent rectangle to represent an exclusion region:\nplt += Rectangle([1,0.5], [2.7,5], alpha=0.2, c='k')\n\n# Add some text and latex formula\nplt += Text3D(\"Excluded\\ntime range!\", s=.2, c='k', font=\"Quikhand\").rotateZ(20).pos(1.3, 3.6)\nplt += Latex(r\"y(t)=2+2\\cdot\\frac{\\sin(2t)}{(t+1)}\", pos=(4.7, 4.7), s=.8, c='db')\n\n# Add a star marker at maximum of function (at z=0.1, so it stays on top):\nplt += Marker('*', pos=(x[idx], y[idx], 0.1), c='blue')\n\n# Add a dashed line for the minimum\nplt += DashedLine([x[0], miny], [x[-1], miny])\n\nplt.show(zoom=1.2, mode=\"image\").close()\n"
] | [
[
"numpy.arange",
"numpy.argmax",
"numpy.sin",
"numpy.min"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mfwarren/FreeCoding | [
"58ac87f35ad2004a3514782556762ee0ed72c39a"
] | [
"2015/06/fc_2015_06_07.py"
] | [
"#!/usr/bin/env python3\n# imports go here\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\n\nfrom sklearn.isotonic import IsotonicRegression\nfrom sklearn.utils import check_random_state\n\n#\n# Free Coding session for 2015-06-07\n# Written by Matt Warren\n#\n\nn = 100\nx = np.arange(n)\nrs = check_random_state(0)\ny = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))\n\nir = IsotonicRegression()\n\ny_ = ir.fit_transform(x, y)\n\nsegments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]\nlc = LineCollection(segments, zorder=0)\nlc.set_array(np.ones(len(y)))\nlc.set_linewidths(0.5 * np.ones(n))\n\nfig = plt.figure()\nplt.plot(x, y, 'r. ', markersize=12)\nplt.plot(x, y_, 'g.-', markersize=12)\nplt.gca().add_collection(lc)\nplt.legend(('Data', 'Isotonic Fit'), loc='lower right')\nplt.title('Isotonic regression')\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.gca",
"sklearn.isotonic.IsotonicRegression",
"matplotlib.pyplot.title",
"matplotlib.collections.LineCollection",
"numpy.arange",
"numpy.ones",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"sklearn.utils.check_random_state",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
carlosferpereira/OpenAeroStruct | [
"35e1ff8aac5c67e40b1925829cfbc203ba1b2f2d",
"35e1ff8aac5c67e40b1925829cfbc203ba1b2f2d",
"35e1ff8aac5c67e40b1925829cfbc203ba1b2f2d",
"35e1ff8aac5c67e40b1925829cfbc203ba1b2f2d",
"35e1ff8aac5c67e40b1925829cfbc203ba1b2f2d"
] | [
"openaerostruct/common/tests/test_reynolds_comp.py",
"openaerostruct/tests/test_simple_rect_AS.py",
"openaerostruct/structures/local_stiff.py",
"openaerostruct/structures/fuel_loads.py",
"openaerostruct/tests/test_aero_opt_no_symmetry.py"
] | [
"import unittest\r\nimport numpy as np\r\nimport openmdao.api as om\r\nfrom openmdao.utils.assert_utils import assert_check_partials\r\nfrom openaerostruct.common.reynolds_comp import ReynoldsComp\r\n\r\n\r\nclass Test(unittest.TestCase):\r\n\r\n def test_reynolds_derivs(self):\r\n comp = ReynoldsComp()\r\n\r\n prob = om.Problem()\r\n prob.model.add_subsystem('comp', comp, promotes=['*'])\r\n prob.setup(force_alloc_complex=True)\r\n\r\n prob['rho'] = np.random.random()\r\n prob['mu'] = np.random.random()\r\n prob['v'] = np.random.random()\r\n prob.run_model()\r\n\r\n check = prob.check_partials(compact_print=True, method='cs', step=1e-40)\r\n\r\n assert_check_partials(check)\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n",
"from openmdao.utils.assert_utils import assert_rel_error\nimport unittest\nimport numpy as np\n\nfrom openaerostruct.geometry.utils import generate_mesh\n\nfrom openaerostruct.integration.aerostruct_groups import AerostructGeometry, AerostructPoint\n\nimport openmdao.api as om\nfrom openaerostruct.utils.constants import grav_constant\n\n\nclass Test(unittest.TestCase):\n\n def test(self):\n # Create a dictionary to store options about the surface\n # OM: vary 'num_y' and 'num_x' to change the size of the mesh\n mesh_dict = {'num_y' : 5,\n 'num_x' : 2,\n 'wing_type' : 'rect',\n 'symmetry' : True}\n\n mesh = generate_mesh(mesh_dict)\n\n surf_dict = {\n # Wing definition\n 'name' : 'wing', # name of the surface\n 'symmetry' : True, # if true, model one half of wing\n # reflected across the plane y = 0\n 'S_ref_type' : 'wetted', # how we compute the wing area,\n # can be 'wetted' or 'projected'\n 'fem_model_type' : 'tube',\n\n 'thickness_cp' : np.ones((2)) * .1,\n 'twist_cp' : np.ones((2)),\n\n 'mesh' : mesh,\n\n # Aerodynamic performance of the lifting surface at\n # an angle of attack of 0 (alpha=0).\n # These CL0 and CD0 values are added to the CL and CD\n # obtained from aerodynamic analysis of the surface to get\n # the total CL and CD.\n # These CL0 and CD0 values do not vary wrt alpha.\n 'CL0' : 0.0, # CL of the surface at alpha=0\n 'CD0' : 0.015, # CD of the surface at alpha=0\n\n # Airfoil properties for viscous drag calculation\n 'k_lam' : 0.05, # percentage of chord with laminar\n # flow, used for viscous drag\n 't_over_c_cp' : np.array([0.15]), # thickness over chord ratio (NACA0015)\n 'c_max_t' : .303, # chordwise location of maximum (NACA0015)\n # thickness\n 'with_viscous' : True,\n 'with_wave' : False, # if true, compute wave drag\n\n # Structural values are based on aluminum 7075\n 'E' : 70.e9, # [Pa] Young's modulus of the spar\n 'G' : 30.e9, # [Pa] shear modulus of the spar\n 'yield' : 500.e6 / 2.5, # [Pa] yield stress divided by 2.5 for limiting case\n 'mrho' : 3.e3, # [kg/m^3] material density\n 'fem_origin' : 0.35, # normalized chordwise location of the spar\n 'wing_weight_ratio' : 2.,\n 'struct_weight_relief' : False, # True to add the weight of the structure to the loads on the structure\n 'distributed_fuel_weight' : False,\n # Constraints\n 'exact_failure_constraint' : False, # if false, use KS function\n }\n\n surfaces = [surf_dict]\n\n # Create the problem and assign the model group\n prob = om.Problem()\n\n # Add problem information as an independent variables component\n indep_var_comp = om.IndepVarComp()\n indep_var_comp.add_output('v', val=248.136, units='m/s')\n indep_var_comp.add_output('alpha', val=9., units='deg')\n indep_var_comp.add_output('Mach_number', val=0.84)\n indep_var_comp.add_output('re', val=1.e6, units='1/m')\n indep_var_comp.add_output('rho', val=0.38, units='kg/m**3')\n indep_var_comp.add_output('CT', val=grav_constant * 17.e-6, units='1/s')\n indep_var_comp.add_output('R', val=11.165e6, units='m')\n indep_var_comp.add_output('W0', val=0.4 * 3e5, units='kg')\n indep_var_comp.add_output('speed_of_sound', val=295.4, units='m/s')\n indep_var_comp.add_output('load_factor', val=1.)\n indep_var_comp.add_output('empty_cg', val=np.zeros((3)), units='m')\n\n prob.model.add_subsystem('prob_vars',\n indep_var_comp,\n promotes=['*'])\n\n # Loop over each surface in the surfaces list\n for surface in surfaces:\n\n # Get the surface name and create a group to contain components\n # only for this surface\n name = surface['name']\n\n aerostruct_group = AerostructGeometry(surface=surface)\n\n # Add tmp_group to the problem with the name of the surface.\n prob.model.add_subsystem(name, aerostruct_group)\n\n # Loop through and add a certain number of aero points\n for i in range(1):\n\n point_name = 'AS_point_{}'.format(i)\n # Connect the parameters within the model for each aero point\n\n # Create the aero point group and add it to the model\n AS_point = AerostructPoint(surfaces=surfaces)\n\n prob.model.add_subsystem(point_name, AS_point)\n\n # Connect flow properties to the analysis point\n prob.model.connect('v', point_name + '.v')\n prob.model.connect('alpha', point_name + '.alpha')\n prob.model.connect('Mach_number', point_name + '.Mach_number')\n prob.model.connect('re', point_name + '.re')\n prob.model.connect('rho', point_name + '.rho')\n prob.model.connect('CT', point_name + '.CT')\n prob.model.connect('R', point_name + '.R')\n prob.model.connect('W0', point_name + '.W0')\n prob.model.connect('speed_of_sound', point_name + '.speed_of_sound')\n prob.model.connect('empty_cg', point_name + '.empty_cg')\n prob.model.connect('load_factor', point_name + '.load_factor')\n\n for surface in surfaces:\n\n com_name = point_name + '.' + name + '_perf'\n prob.model.connect(name + '.local_stiff_transformed', point_name + '.coupled.' + name + '.local_stiff_transformed')\n prob.model.connect(name + '.nodes', point_name + '.coupled.' + name + '.nodes')\n\n # Connect aerodyamic mesh to coupled group mesh\n prob.model.connect(name + '.mesh', point_name + '.coupled.' + name + '.mesh')\n\n # Connect performance calculation variables\n prob.model.connect(name + '.radius', com_name + '.radius')\n prob.model.connect(name + '.thickness', com_name + '.thickness')\n prob.model.connect(name + '.nodes', com_name + '.nodes')\n prob.model.connect(name + '.cg_location', point_name + '.' + 'total_perf.' + name + '_cg_location')\n prob.model.connect(name + '.structural_mass', point_name + '.' + 'total_perf.' + name + '_structural_mass')\n prob.model.connect(name + '.t_over_c', com_name + '.t_over_c')\n\n prob.driver = om.ScipyOptimizeDriver()\n prob.driver.options['tol'] = 1e-9\n\n # Setup problem and add design variables, constraint, and objective\n prob.model.add_design_var('wing.twist_cp', lower=-10., upper=15.)\n prob.model.add_design_var('wing.thickness_cp', lower=0.01, upper=0.5, scaler=1e2)\n prob.model.add_constraint('AS_point_0.wing_perf.failure', upper=0.)\n prob.model.add_constraint('AS_point_0.wing_perf.thickness_intersects', upper=0.)\n\n # Add design variables, constraisnt, and objective on the problem\n prob.model.add_design_var('alpha', lower=-10., upper=10.)\n prob.model.add_constraint('AS_point_0.L_equals_W', equals=0.)\n prob.model.add_objective('AS_point_0.fuelburn', scaler=1e-5)\n\n # Set up the problem\n prob.setup()\n\n prob.run_driver()\n\n assert_rel_error(self, prob['AS_point_0.fuelburn'][0], 68345.6633812, 1e-5)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"import numpy as np\n\nimport openmdao.api as om\n\n\ncoeffs_2 = np.array([\n [ 1., -1.],\n [-1., 1.],\n])\n\ncoeffs_y = np.array([\n [ 12., -6., -12., -6.],\n [ -6., 4., 6., 2.],\n [-12., 6., 12., 6.],\n [ -6., 2., 6., 4.],\n])\n\ncoeffs_z = np.array([\n [ 12., 6., -12., 6.],\n [ 6., 4., -6., 2.],\n [-12., -6., 12., -6.],\n [ 6., 2., -6., 4.],\n])\n\n\nclass LocalStiff(om.ExplicitComponent):\n\n def initialize(self):\n self.options.declare('surface', types=dict)\n\n def setup(self):\n surface = self.options['surface']\n\n self.ny = ny = surface['mesh'].shape[1]\n\n self.add_input('A', shape=ny - 1, units='m**2')\n self.add_input('J', shape=ny - 1, units='m**4')\n self.add_input('Iy', shape=ny - 1, units='m**4')\n self.add_input('Iz', shape=ny - 1, units='m**4')\n self.add_input('element_lengths', shape=ny - 1, units='m')\n\n self.add_output('local_stiff', shape=(ny - 1, 12, 12))\n\n rows = np.arange(144 * (ny - 1))\n cols = np.outer(np.arange(ny - 1), np.ones(144, int)).flatten()\n\n self.declare_partials('local_stiff', 'A', rows=rows, cols=cols)\n self.declare_partials('local_stiff', 'J', rows=rows, cols=cols)\n self.declare_partials('local_stiff', 'Iy', rows=rows, cols=cols)\n self.declare_partials('local_stiff', 'Iz', rows=rows, cols=cols)\n self.declare_partials('local_stiff', 'element_lengths', rows=rows, cols=cols)\n\n def compute(self, inputs, outputs):\n surface = self.options['surface']\n\n ny = self.ny\n E = surface['E']\n G = surface['G']\n\n A = inputs['A']\n Iy = inputs['Iy']\n Iz = inputs['Iz']\n J = inputs['J']\n L = inputs['element_lengths']\n\n outputs['local_stiff'] = 0.\n\n for i in range(2):\n for j in range(2):\n outputs['local_stiff'][:, 0 + i, 0 + j] = E * A / L * coeffs_2[i, j]\n outputs['local_stiff'][:, 2 + i, 2 + j] = G * J / L * coeffs_2[i, j]\n\n for i in range(4):\n for j in range(4):\n outputs['local_stiff'][:, 4 + i, 4 + j] = E * Iy / L ** 3 * coeffs_y[i, j]\n outputs['local_stiff'][:, 8 + i, 8 + j] = E * Iz / L ** 3 * coeffs_z[i, j]\n\n for i in [1, 3]:\n for j in range(4):\n outputs['local_stiff'][:, 4 + i, 4 + j] *= L\n outputs['local_stiff'][:, 8 + i, 8 + j] *= L\n for i in range(4):\n for j in [1, 3]:\n outputs['local_stiff'][:, 4 + i, 4 + j] *= L\n outputs['local_stiff'][:, 8 + i, 8 + j] *= L\n\n def compute_partials(self, inputs, partials):\n surface = self.options['surface']\n ny = surface['mesh'].shape[1]\n E = surface['E']\n G = surface['G']\n\n A = inputs['A']\n Iy = inputs['Iy']\n Iz = inputs['Iz']\n J = inputs['J']\n L = inputs['element_lengths']\n\n derivs_A = partials['local_stiff', 'A'].reshape((ny - 1, 12, 12))\n derivs_Iy = partials['local_stiff', 'Iy'].reshape((ny - 1, 12, 12))\n derivs_Iz = partials['local_stiff', 'Iz'].reshape((ny - 1, 12, 12))\n derivs_J = partials['local_stiff', 'J'].reshape((ny - 1, 12, 12))\n derivs_L = partials['local_stiff', 'element_lengths'].reshape((ny - 1, 12, 12))\n\n derivs_A[:] = 0.\n derivs_Iy[:] = 0.\n derivs_Iz[:] = 0.\n derivs_J[:] = 0.\n derivs_L[:] = 0.\n\n for i in range(2):\n for j in range(2):\n derivs_A[:, 0 + i, 0 + j] = E / L * coeffs_2[i, j]\n derivs_L[:, 0 + i, 0 + j] = -E * A / L ** 2 * coeffs_2[i, j]\n\n derivs_J[:, 2 + i, 2 + j] = G / L * coeffs_2[i, j]\n derivs_L[:, 2 + i, 2 + j] = -G * J / L ** 2 * coeffs_2[i, j]\n\n for i in range(4):\n for j in range(4):\n derivs_Iy[:, 4 + i, 4 + j] = E / L ** 3 * coeffs_y[i, j]\n derivs_L [:, 4 + i, 4 + j] = -3 * E * Iy / L ** 4 * coeffs_y[i, j]\n\n derivs_Iz[:, 8 + i, 8 + j] = E / L ** 3 * coeffs_z[i, j]\n derivs_L [:, 8 + i, 8 + j] = -3 * E * Iz / L ** 4 * coeffs_z[i, j]\n\n for i in [1, 3]:\n for j in range(4):\n derivs_Iy[:, 4 + i, 4 + j] = E / L ** 2 * coeffs_y[i, j]\n derivs_L[:, 4 + i, 4 + j] = -2 * E * Iy / L ** 3 * coeffs_y[i, j]\n\n derivs_Iz[:, 8 + i, 8 + j] = E / L ** 2 * coeffs_z[i, j]\n derivs_L[:, 8 + i, 8 + j] = -2 * E * Iz / L ** 3 * coeffs_z[i, j]\n for i in range(4):\n for j in [1, 3]:\n derivs_Iy[:, 4 + i, 4 + j] = E / L ** 2 * coeffs_y[i, j]\n derivs_L[:, 4 + i, 4 + j] = -2 * E * Iy / L ** 3 * coeffs_y[i, j]\n\n derivs_Iz[:, 8 + i, 8 + j] = E / L ** 2 * coeffs_z[i, j]\n derivs_L[:, 8 + i, 8 + j] = -2 * E * Iz / L ** 3 * coeffs_z[i, j]\n\n for i in [1, 3]:\n for j in [1, 3]:\n derivs_Iy[:, 4 + i, 4 + j] = E / L * coeffs_y[i, j]\n derivs_L[:, 4 + i, 4 + j] = -E * Iy / L ** 2 * coeffs_y[i, j]\n\n derivs_Iz[:, 8 + i, 8 + j] = E / L * coeffs_z[i, j]\n derivs_L[:, 8 + i, 8 + j] = -E * Iz / L ** 2 * coeffs_z[i, j]\n",
"import numpy as np\n\nimport openmdao.api as om\nfrom openaerostruct.utils.constants import grav_constant\n\n\ndef norm(vec):\n return np.sqrt(np.sum(vec**2))\n\nclass FuelLoads(om.ExplicitComponent):\n \"\"\"\n Compute the nodal loads from the distributed fuel within the wing\n to be applied to the wing structure.\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n\n def initialize(self):\n self.options.declare('surface', types=dict)\n\n def setup(self):\n self.surface = surface = self.options['surface']\n self.ny = surface['mesh'].shape[1]\n\n self.add_input('fuel_vols', val=np.ones((self.ny-1)), units='m**3')\n self.add_input('nodes', val=np.zeros((self.ny, 3)), units='m')\n self.add_input('fuel_mass', val=1., units='kg')\n self.add_input('load_factor', val=1.)\n self.add_output('fuel_weight_loads', val=np.zeros((self.ny, 6)), units='N')\n\n self.declare_partials('*', '*', method='cs')\n\n def compute(self, inputs, outputs):\n nodes = inputs['nodes']\n\n element_lengths = np.ones(self.ny - 1, dtype=complex)\n for i in range(self.ny - 1):\n element_lengths[i] = norm(nodes[i+1] - nodes[i])\n\n # And we also need the deltas between consecutive nodes\n deltas = nodes[1:, :] - nodes[:-1, :]\n\n # Fuel weight\n fuel_weight = (inputs['fuel_mass'] + self.surface['Wf_reserve']) * grav_constant * inputs['load_factor']\n\n if self.surface['symmetry']:\n fuel_weight /= 2.\n\n vols = inputs['fuel_vols']\n sum_vols = np.sum(vols)\n\n # Now we need the fuel weight per segment\n # Assume it's divided evenly based on vols\n z_weights = vols * fuel_weight / sum_vols\n\n # Assume weight coincides with the elastic axis\n z_forces_for_each = z_weights / 2.\n z_moments_for_each = z_weights * element_lengths / 12. * (deltas[:, 0]**2 + deltas[:,1]**2)**0.5 / element_lengths\n\n loads = np.zeros((self.ny, 6), dtype=complex)\n\n # Loads in z-direction\n loads[:-1, 2] = loads[:-1, 2] - z_forces_for_each\n loads[1:, 2] = loads[1:, 2] - z_forces_for_each\n\n # Bending moments for consistency\n loads[:-1, 3] = loads[:-1, 3] - z_moments_for_each * deltas[: , 1] / element_lengths\n loads[1:, 3] = loads[1:, 3] + z_moments_for_each * deltas[: , 1] / element_lengths\n\n loads[:-1, 4] = loads[:-1, 4] - z_moments_for_each * deltas[: , 0] / element_lengths\n loads[1:, 4] = loads[1:, 4] + z_moments_for_each * deltas[: , 0] / element_lengths\n\n outputs['fuel_weight_loads'] = loads\n",
"from openmdao.utils.assert_utils import assert_rel_error\nimport unittest\n\nclass Test(unittest.TestCase):\n\n def test(self):\n import numpy as np\n\n import openmdao.api as om\n\n from openaerostruct.geometry.utils import generate_mesh\n from openaerostruct.geometry.geometry_group import Geometry\n from openaerostruct.aerodynamics.aero_groups import AeroPoint\n\n # Create a dictionary to store options about the mesh\n mesh_dict = {'num_y' : 7,\n 'num_x' : 2,\n 'wing_type' : 'CRM',\n 'symmetry' : False,\n 'num_twist_cp' : 5}\n\n # Generate the aerodynamic mesh based on the previous dictionary\n mesh, twist_cp = generate_mesh(mesh_dict)\n\n # Create a dictionary with info and options about the aerodynamic\n # lifting surface\n surface = {\n # Wing definition\n 'name' : 'wing', # name of the surface\n 'symmetry' : False, # if true, model one half of wing\n # reflected across the plane y = 0\n 'S_ref_type' : 'wetted', # how we compute the wing area,\n # can be 'wetted' or 'projected'\n 'fem_model_type' : 'tube',\n\n 'twist_cp' : twist_cp,\n 'mesh' : mesh,\n\n # Aerodynamic performance of the lifting surface at\n # an angle of attack of 0 (alpha=0).\n # These CL0 and CD0 values are added to the CL and CD\n # obtained from aerodynamic analysis of the surface to get\n # the total CL and CD.\n # These CL0 and CD0 values do not vary wrt alpha.\n 'CL0' : 0.0, # CL of the surface at alpha=0\n 'CD0' : 0.015, # CD of the surface at alpha=0\n\n # Airfoil properties for viscous drag calculation\n 'k_lam' : 0.05, # percentage of chord with laminar\n # flow, used for viscous drag\n 't_over_c_cp' : np.array([0.15]), # thickness over chord ratio (NACA0015)\n 'c_max_t' : .303, # chordwise location of maximum (NACA0015)\n # thickness\n 'with_viscous' : True, # if true, compute viscous drag\n 'with_wave' : False, # if true, compute wave drag\n }\n\n # Create the OpenMDAO problem\n prob = om.Problem()\n\n # Create an independent variable component that will supply the flow\n # conditions to the problem.\n indep_var_comp = om.IndepVarComp()\n indep_var_comp.add_output('v', val=248.136, units='m/s')\n indep_var_comp.add_output('alpha', val=5., units='deg')\n indep_var_comp.add_output('Mach_number', val=0.84)\n indep_var_comp.add_output('re', val=1.e6, units='1/m')\n indep_var_comp.add_output('rho', val=0.38, units='kg/m**3')\n indep_var_comp.add_output('cg', val=np.zeros((3)), units='m')\n\n # Add this IndepVarComp to the problem model\n prob.model.add_subsystem('prob_vars',\n indep_var_comp,\n promotes=['*'])\n\n # Create and add a group that handles the geometry for the\n # aerodynamic lifting surface\n geom_group = Geometry(surface=surface)\n prob.model.add_subsystem(surface['name'], geom_group)\n\n # Create the aero point group, which contains the actual aerodynamic\n # analyses\n aero_group = AeroPoint(surfaces=[surface])\n point_name = 'aero_point_0'\n prob.model.add_subsystem(point_name, aero_group,\n promotes_inputs=['v', 'alpha', 'Mach_number', 're', 'rho', 'cg'])\n\n name = surface['name']\n\n # Connect the mesh from the geometry component to the analysis point\n prob.model.connect(name + '.mesh', point_name + '.' + name + '.def_mesh')\n\n # Perform the connections with the modified names within the\n # 'aero_states' group.\n prob.model.connect(name + '.mesh', point_name + '.aero_states.' + name + '_def_mesh')\n\n prob.model.connect(name + '.t_over_c', point_name + '.' + name + '_perf.' + 't_over_c')\n\n # Import the Scipy Optimizer and set the driver of the problem to use\n # it, which defaults to an SLSQP optimization method\n prob.driver = om.ScipyOptimizeDriver()\n prob.driver.options['tol'] = 1e-9\n\n # Setup problem and add design variables, constraint, and objective\n prob.model.add_design_var('wing.twist_cp', lower=-10., upper=15.)\n prob.model.add_constraint(point_name + '.wing_perf.CL', equals=0.5)\n prob.model.add_objective(point_name + '.wing_perf.CD', scaler=1e4)\n\n # Set up and run the optimization problem\n prob.setup()\n prob.run_model()\n # prob.check_partials()\n # exit()\n prob.run_driver()\n\n assert_rel_error(self, prob['aero_point_0.wing_perf.CD'][0], 0.03339013029042684, 1e-5)\n assert_rel_error(self, prob['aero_point_0.wing_perf.CL'][0], 0.5, 1e-6)\n assert_rel_error(self, prob['aero_point_0.CM'][1], -1.7886135541410009, 1e-4)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.random.random"
],
[
"numpy.array",
"numpy.zeros",
"numpy.ones"
],
[
"numpy.arange",
"numpy.array",
"numpy.ones"
],
[
"numpy.zeros",
"numpy.sum",
"numpy.ones"
],
[
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LukeTheHecker/mne-python | [
"7d508e4fded73b5beb73564e4a01169530e058a8",
"7d508e4fded73b5beb73564e4a01169530e058a8"
] | [
"mne/export/_egimff.py",
"mne/channels/montage.py"
] | [
"# -*- coding: utf-8 -*-\n# Authors: MNE Developers\n#\n# License: BSD (3-clause)\n\nimport datetime\n\nimport numpy as np\n\nfrom ..io.egi.egimff import _import_mffpy\nfrom ..io.pick import pick_types, pick_channels\nfrom ..utils import verbose\n\n\n@verbose\ndef export_evokeds_mff(fname, evoked, history=None, *, verbose=None):\n \"\"\"Export evoked dataset to MFF.\n\n Parameters\n ----------\n %(export_params_fname)s\n evoked : list of Evoked instances\n List of evoked datasets to export to one file. Note that the\n measurement info from the first evoked instance is used, so be sure\n that information matches.\n history : None (default) | list of dict\n Optional list of history entries (dictionaries) to be written to\n history.xml. This must adhere to the format described in\n mffpy.xml_files.History.content. If None, no history.xml will be\n written.\n %(verbose)s\n\n Notes\n -----\n .. versionadded:: 0.24\n\n Only EEG channels are written to the output file.\n ``info['device_info']['type']`` must be a valid MFF recording device\n (e.g. 'HydroCel GSN 256 1.0'). This field is automatically populated when\n using MFF read functions.\n \"\"\"\n mffpy = _import_mffpy('Export evokeds to MFF.')\n import pytz\n info = evoked[0].info\n if np.round(info['sfreq']) != info['sfreq']:\n raise ValueError('Sampling frequency must be a whole number. '\n f'sfreq: {info[\"sfreq\"]}')\n sampling_rate = int(info['sfreq'])\n\n # Initialize writer\n writer = mffpy.Writer(fname)\n current_time = pytz.utc.localize(datetime.datetime.utcnow())\n writer.addxml('fileInfo', recordTime=current_time)\n try:\n device = info['device_info']['type']\n except (TypeError, KeyError):\n raise ValueError('No device type. Cannot determine sensor layout.')\n writer.add_coordinates_and_sensor_layout(device)\n\n # Add EEG data\n eeg_channels = pick_types(info, eeg=True, exclude=[])\n eeg_bin = mffpy.bin_writer.BinWriter(sampling_rate)\n for ave in evoked:\n # Signals are converted to µV\n block = (ave.data[eeg_channels] * 1e6).astype(np.float32)\n eeg_bin.add_block(block, offset_us=0)\n writer.addbin(eeg_bin)\n\n # Add categories\n categories_content = _categories_content_from_evokeds(evoked)\n writer.addxml('categories', categories=categories_content)\n\n # Add history\n if history:\n writer.addxml('historyEntries', entries=history)\n\n writer.write()\n\n\ndef _categories_content_from_evokeds(evoked):\n \"\"\"Return categories.xml content for evoked dataset.\"\"\"\n content = dict()\n begin_time = 0\n for ave in evoked:\n # Times are converted to microseconds\n sfreq = ave.info['sfreq']\n duration = np.round(len(ave.times) / sfreq * 1e6).astype(int)\n end_time = begin_time + duration\n event_time = begin_time - np.round(ave.tmin * 1e6).astype(int)\n eeg_bads = _get_bad_eeg_channels(ave.info)\n content[ave.comment] = [\n _build_segment_content(begin_time, end_time, event_time, eeg_bads,\n name='Average', nsegs=ave.nave)\n ]\n begin_time += duration\n return content\n\n\ndef _get_bad_eeg_channels(info):\n \"\"\"Return a list of bad EEG channels formatted for categories.xml.\n\n Given a list of only the EEG channels in file, return the indices of this\n list (starting at 1) that correspond to bad channels.\n \"\"\"\n if len(info['bads']) == 0:\n return []\n eeg_channels = pick_types(info, eeg=True, exclude=[])\n bad_channels = pick_channels(info['ch_names'], info['bads'])\n bads_elementwise = np.isin(eeg_channels, bad_channels)\n return list(np.flatnonzero(bads_elementwise) + 1)\n\n\ndef _build_segment_content(begin_time, end_time, event_time, eeg_bads,\n status='unedited', name=None, pns_bads=None,\n nsegs=None):\n \"\"\"Build content for a single segment in categories.xml.\n\n Segments are sorted into categories in categories.xml. In a segmented MFF\n each category can contain multiple segments, but in an averaged MFF each\n category only contains one segment (the average).\n \"\"\"\n channel_status = [{\n 'signalBin': 1,\n 'exclusion': 'badChannels',\n 'channels': eeg_bads\n }]\n if pns_bads:\n channel_status.append({\n 'signalBin': 2,\n 'exclusion': 'badChannels',\n 'channels': pns_bads\n })\n content = {\n 'status': status,\n 'beginTime': begin_time,\n 'endTime': end_time,\n 'evtBegin': event_time,\n 'evtEnd': event_time,\n 'channelStatus': channel_status,\n }\n if name:\n content['name'] = name\n if nsegs:\n content['keys'] = {\n '#seg': {\n 'type': 'long',\n 'data': nsegs\n }\n }\n return content\n",
"# Authors: Alexandre Gramfort <[email protected]>\n# Denis Engemann <[email protected]>\n# Martin Luessi <[email protected]>\n# Eric Larson <[email protected]>\n# Marijn van Vliet <[email protected]>\n# Jona Sassenhagen <[email protected]>\n# Teon Brooks <[email protected]>\n# Christian Brodbeck <[email protected]>\n# Stefan Appelhoff <[email protected]>\n# Joan Massich <[email protected]>\n#\n# License: Simplified BSD\n\nfrom collections import OrderedDict\nfrom copy import deepcopy\nimport os.path as op\nimport re\n\nimport numpy as np\n\nfrom ..defaults import HEAD_SIZE_DEFAULT\nfrom ..source_space import get_mni_fiducials\nfrom ..viz import plot_montage\nfrom ..transforms import (apply_trans, get_ras_to_neuromag_trans, _sph_to_cart,\n _topo_to_sph, _frame_to_str, Transform,\n _verbose_frames, _fit_matched_points,\n _quat_to_affine)\nfrom ..io._digitization import (_count_points_by_type,\n _get_dig_eeg, _make_dig_points, write_dig,\n _read_dig_fif, _format_dig_points,\n _get_fid_coords, _coord_frame_const,\n _get_data_as_dict_from_dig)\nfrom ..io.meas_info import create_info\nfrom ..io.open import fiff_open\nfrom ..io.pick import pick_types, _picks_to_idx, channel_type\nfrom ..io.constants import FIFF, CHANNEL_LOC_ALIASES\nfrom ..utils import (warn, copy_function_doc_to_method_doc, _pl, verbose,\n _check_option, _validate_type, _check_fname, _on_missing,\n fill_doc)\n\nfrom ._dig_montage_utils import _read_dig_montage_egi\nfrom ._dig_montage_utils import _parse_brainvision_dig_montage\n\n_BUILT_IN_MONTAGES = [\n 'EGI_256',\n 'GSN-HydroCel-128', 'GSN-HydroCel-129', 'GSN-HydroCel-256',\n 'GSN-HydroCel-257', 'GSN-HydroCel-32', 'GSN-HydroCel-64_1.0',\n 'GSN-HydroCel-65_1.0',\n 'biosemi128', 'biosemi16', 'biosemi160', 'biosemi256',\n 'biosemi32', 'biosemi64',\n 'easycap-M1', 'easycap-M10',\n 'mgh60', 'mgh70',\n 'standard_1005', 'standard_1020', 'standard_alphabetic',\n 'standard_postfixed', 'standard_prefixed', 'standard_primed',\n 'artinis-octamon', 'artinis-brite23'\n]\n\n\ndef _check_get_coord_frame(dig):\n dig_coord_frames = sorted(set(d['coord_frame'] for d in dig))\n if len(dig_coord_frames) != 1:\n raise RuntimeError(\n 'Only a single coordinate frame in dig is supported, got '\n f'{dig_coord_frames}')\n return _frame_to_str[dig_coord_frames.pop()] if dig_coord_frames else None\n\n\ndef get_builtin_montages():\n \"\"\"Get a list of all builtin montages.\n\n Returns\n -------\n montages : list\n Names of all builtin montages that can be used by\n :func:`make_standard_montage`.\n \"\"\"\n return _BUILT_IN_MONTAGES\n\n\ndef make_dig_montage(ch_pos=None, nasion=None, lpa=None, rpa=None,\n hsp=None, hpi=None, coord_frame='unknown'):\n r\"\"\"Make montage from arrays.\n\n Parameters\n ----------\n ch_pos : dict | None\n Dictionary of channel positions. Keys are channel names and values\n are 3D coordinates - array of shape (3,) - in native digitizer space\n in m.\n nasion : None | array, shape (3,)\n The position of the nasion fiducial point.\n This point is assumed to be in the native digitizer space in m.\n lpa : None | array, shape (3,)\n The position of the left periauricular fiducial point.\n This point is assumed to be in the native digitizer space in m.\n rpa : None | array, shape (3,)\n The position of the right periauricular fiducial point.\n This point is assumed to be in the native digitizer space in m.\n hsp : None | array, shape (n_points, 3)\n This corresponds to an array of positions of the headshape points in\n 3d. These points are assumed to be in the native digitizer space in m.\n hpi : None | array, shape (n_hpi, 3)\n This corresponds to an array of HPI points in the native digitizer\n space. They only necessary if computation of a ``compute_dev_head_t``\n is True.\n coord_frame : str\n The coordinate frame of the points. Usually this is \"unknown\"\n for native digitizer space.\n\n Returns\n -------\n montage : instance of DigMontage\n The montage.\n\n See Also\n --------\n DigMontage\n read_dig_captrak\n read_dig_egi\n read_dig_fif\n read_dig_polhemus_isotrak\n\n Notes\n -----\n Valid ``coord_frame`` arguments are 'meg', 'mri', 'mri_voxel', 'head',\n 'mri_tal', 'ras', 'fs_tal', 'ctf_head', 'ctf_meg', 'unknown'. For custom\n montages without fiducials this parameter has to be set to 'head'.\n \"\"\"\n _validate_type(ch_pos, (dict, None), 'ch_pos')\n if ch_pos is None:\n ch_names = None\n else:\n ch_names = list(ch_pos)\n dig = _make_dig_points(\n nasion=nasion, lpa=lpa, rpa=rpa, hpi=hpi, extra_points=hsp,\n dig_ch_pos=ch_pos, coord_frame=coord_frame\n )\n\n return DigMontage(dig=dig, ch_names=ch_names)\n\n\nclass DigMontage(object):\n \"\"\"Montage for digitized electrode and headshape position data.\n\n .. warning:: Montages are typically created using one of the helper\n functions in the ``See Also`` section below instead of\n instantiating this class directly.\n\n Parameters\n ----------\n dig : list of dict\n The object containing all the dig points.\n ch_names : list of str\n The names of the EEG channels.\n\n See Also\n --------\n read_dig_captrak\n read_dig_dat\n read_dig_egi\n read_dig_fif\n read_dig_hpts\n read_dig_polhemus_isotrak\n make_dig_montage\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n\n def __init__(self, *, dig=None, ch_names=None):\n dig = list() if dig is None else dig\n _validate_type(item=dig, types=list, item_name='dig')\n ch_names = list() if ch_names is None else ch_names\n n_eeg = sum([1 for d in dig if d['kind'] == FIFF.FIFFV_POINT_EEG])\n if n_eeg != len(ch_names):\n raise ValueError(\n 'The number of EEG channels (%d) does not match the number'\n ' of channel names provided (%d)' % (n_eeg, len(ch_names))\n )\n\n self.dig = dig\n self.ch_names = ch_names\n\n def __repr__(self):\n \"\"\"Return string representation.\"\"\"\n n_points = _count_points_by_type(self.dig)\n return ('<DigMontage | {extra:d} extras (headshape), {hpi:d} HPIs,'\n ' {fid:d} fiducials, {eeg:d} channels>').format(**n_points)\n\n @copy_function_doc_to_method_doc(plot_montage)\n def plot(self, scale_factor=20, show_names=True, kind='topomap', show=True,\n sphere=None, verbose=None):\n return plot_montage(self, scale_factor=scale_factor,\n show_names=show_names, kind=kind, show=show,\n sphere=sphere)\n\n @fill_doc\n def rename_channels(self, mapping, allow_duplicates=False):\n \"\"\"Rename the channels.\n\n Parameters\n ----------\n %(rename_channels_mapping_duplicates)s\n\n Returns\n -------\n inst : instance of DigMontage\n The instance. Operates in-place.\n \"\"\"\n from .channels import rename_channels\n temp_info = create_info(list(self._get_ch_pos()), 1000., 'eeg')\n rename_channels(temp_info, mapping, allow_duplicates)\n self.ch_names = temp_info['ch_names']\n\n def save(self, fname):\n \"\"\"Save digitization points to FIF.\n\n Parameters\n ----------\n fname : str\n The filename to use. Should end in .fif or .fif.gz.\n \"\"\"\n coord_frame = _check_get_coord_frame(self.dig)\n write_dig(fname, self.dig, coord_frame)\n\n def __iadd__(self, other):\n \"\"\"Add two DigMontages in place.\n\n Notes\n -----\n Two DigMontages can only be added if there are no duplicated ch_names\n and if fiducials are present they should share the same coordinate\n system and location values.\n \"\"\"\n def is_fid_defined(fid):\n return not(\n fid.nasion is None and fid.lpa is None and fid.rpa is None\n )\n\n # Check for none duplicated ch_names\n ch_names_intersection = set(self.ch_names).intersection(other.ch_names)\n if ch_names_intersection:\n raise RuntimeError((\n \"Cannot add two DigMontage objects if they contain duplicated\"\n \" channel names. Duplicated channel(s) found: {}.\"\n ).format(\n ', '.join(['%r' % v for v in sorted(ch_names_intersection)])\n ))\n\n # Check for unique matching fiducials\n self_fid, self_coord = _get_fid_coords(self.dig)\n other_fid, other_coord = _get_fid_coords(other.dig)\n\n if is_fid_defined(self_fid) and is_fid_defined(other_fid):\n if self_coord != other_coord:\n raise RuntimeError('Cannot add two DigMontage objects if '\n 'fiducial locations are not in the same '\n 'coordinate system.')\n\n for kk in self_fid:\n if not np.array_equal(self_fid[kk], other_fid[kk]):\n raise RuntimeError('Cannot add two DigMontage objects if '\n 'fiducial locations do not match '\n '(%s)' % kk)\n\n # keep self\n self.dig = _format_dig_points(\n self.dig + [d for d in other.dig\n if d['kind'] != FIFF.FIFFV_POINT_CARDINAL]\n )\n else:\n self.dig = _format_dig_points(self.dig + other.dig)\n\n self.ch_names += other.ch_names\n return self\n\n def copy(self):\n \"\"\"Copy the DigMontage object.\n\n Returns\n -------\n dig : instance of DigMontage\n The copied DigMontage instance.\n \"\"\"\n return deepcopy(self)\n\n def __add__(self, other):\n \"\"\"Add two DigMontages.\"\"\"\n out = self.copy()\n out += other\n return out\n\n def _get_ch_pos(self):\n pos = [d['r'] for d in _get_dig_eeg(self.dig)]\n assert len(self.ch_names) == len(pos)\n return OrderedDict(zip(self.ch_names, pos))\n\n def _get_dig_names(self):\n NAMED_KIND = (FIFF.FIFFV_POINT_EEG,)\n is_eeg = np.array([d['kind'] in NAMED_KIND for d in self.dig])\n assert len(self.ch_names) == is_eeg.sum()\n dig_names = [None] * len(self.dig)\n for ch_name_idx, dig_idx in enumerate(np.where(is_eeg)[0]):\n dig_names[dig_idx] = self.ch_names[ch_name_idx]\n\n return dig_names\n\n def get_positions(self):\n \"\"\"Get all channel and fiducial positions.\n\n Returns\n -------\n positions : dict\n A dictionary of the positions for channels (``ch_pos``),\n coordinate frame (``coord_frame``), nasion (``nasion``),\n left preauricular point (``lpa``),\n right preauricular point (``rpa``),\n Head Shape Polhemus (``hsp``), and\n Head Position Indicator(``hpi``).\n E.g.::\n\n {\n 'ch_pos': {'EEG061': [0, 0, 0]},\n 'nasion': [0, 0, 1],\n 'coord_frame': 'mni_tal',\n 'lpa': [0, 1, 0],\n 'rpa': [1, 0, 0],\n 'hsp': None,\n 'hpi': None\n }\n \"\"\"\n # get channel positions as dict\n ch_pos = self._get_ch_pos()\n\n # get coordframe and fiducial coordinates\n montage_bunch = _get_data_as_dict_from_dig(self.dig)\n coord_frame = _frame_to_str.get(montage_bunch.coord_frame)\n\n # return dictionary\n positions = dict(\n ch_pos=ch_pos,\n coord_frame=coord_frame,\n nasion=montage_bunch.nasion,\n lpa=montage_bunch.lpa,\n rpa=montage_bunch.rpa,\n hsp=montage_bunch.hsp,\n hpi=montage_bunch.hpi,\n )\n return positions\n\n @verbose\n def add_estimated_fiducials(self, subject, subjects_dir=None,\n verbose=None):\n \"\"\"Estimate fiducials based on FreeSurfer ``fsaverage`` subject.\n\n This takes a montage with the ``mri`` coordinate frame,\n corresponding to the FreeSurfer RAS (xyz in the volume) T1w\n image of the specific subject. It will call\n :func:`mne.coreg.get_mni_fiducials` to estimate LPA, RPA and\n Nasion fiducial points.\n\n Parameters\n ----------\n %(subject)s\n %(subjects_dir)s\n %(verbose)s\n\n Returns\n -------\n inst : instance of DigMontage\n The instance, modified in-place.\n\n See Also\n --------\n :ref:`plot_source_alignment`\n\n Notes\n -----\n Since MNE uses the FIF data structure, it relies on the ``head``\n coordinate frame. Any coordinate frame can be transformed\n to ``head`` if the fiducials (i.e. LPA, RPA and Nasion) are\n defined. One can use this function to estimate those fiducials\n and then use ``montage.get_native_head_t()`` to get the\n head <-> MRI transform.\n \"\"\"\n # get coordframe and fiducial coordinates\n montage_bunch = _get_data_as_dict_from_dig(self.dig)\n\n # get the coordinate frame as a string and check that it's MRI\n if montage_bunch.coord_frame != FIFF.FIFFV_COORD_MRI:\n raise RuntimeError(\n f'Montage should be in mri coordinate frame to call '\n f'`add_estimated_fiducials`. The current coordinate '\n f'frame is {montage_bunch.coord_frame}')\n\n # estimate LPA, nasion, RPA from FreeSurfer fsaverage\n fids_mri = list(get_mni_fiducials(subject, subjects_dir))\n\n # add those digpoints to front of montage\n self.dig = fids_mri + self.dig\n return self\n\n\nVALID_SCALES = dict(mm=1e-3, cm=1e-2, m=1)\n\n\ndef _check_unit_and_get_scaling(unit):\n _check_option('unit', unit, sorted(VALID_SCALES.keys()))\n return VALID_SCALES[unit]\n\n\ndef transform_to_head(montage):\n \"\"\"Transform a DigMontage object into head coordinate.\n\n It requires that the LPA, RPA and Nasion fiducial\n point are available. It requires that all fiducial\n points are in the same coordinate e.g. 'unknown'\n and it will convert all the point in this coordinate\n system to Neuromag head coordinate system.\n\n Parameters\n ----------\n montage : instance of DigMontage\n The montage.\n\n Returns\n -------\n montage : instance of DigMontage\n The montage after transforming the points to head\n coordinate system.\n \"\"\"\n # Get fiducial points and their coord_frame\n native_head_t = compute_native_head_t(montage)\n montage = montage.copy() # to avoid inplace modification\n if native_head_t['from'] != FIFF.FIFFV_COORD_HEAD:\n for d in montage.dig:\n if d['coord_frame'] == native_head_t['from']:\n d['r'] = apply_trans(native_head_t, d['r'])\n d['coord_frame'] = FIFF.FIFFV_COORD_HEAD\n return montage\n\n\ndef read_dig_dat(fname):\n r\"\"\"Read electrode positions from a ``*.dat`` file.\n\n .. Warning::\n This function was implemented based on ``*.dat`` files available from\n `Compumedics <https://compumedicsneuroscan.com/scan-acquire-\n configuration-files/>`__ and might not work as expected with novel\n files. If it does not read your files correctly please contact the\n mne-python developers.\n\n Parameters\n ----------\n fname : path-like\n File from which to read electrode locations.\n\n Returns\n -------\n montage : DigMontage\n The montage.\n\n See Also\n --------\n read_dig_captrak\n read_dig_dat\n read_dig_egi\n read_dig_fif\n read_dig_hpts\n read_dig_polhemus_isotrak\n make_dig_montage\n\n Notes\n -----\n ``*.dat`` files are plain text files and can be inspected and amended with\n a plain text editor.\n \"\"\"\n from ._standard_montage_utils import _check_dupes_odict\n fname = _check_fname(fname, overwrite='read', must_exist=True)\n\n with open(fname, 'r') as fid:\n lines = fid.readlines()\n\n ch_names, poss = list(), list()\n nasion = lpa = rpa = None\n for i, line in enumerate(lines):\n items = line.split()\n if not items:\n continue\n elif len(items) != 5:\n raise ValueError(\n \"Error reading %s, line %s has unexpected number of entries:\\n\"\n \"%s\" % (fname, i, line.rstrip()))\n num = items[1]\n if num == '67':\n continue # centroid\n pos = np.array([float(item) for item in items[2:]])\n if num == '78':\n nasion = pos\n elif num == '76':\n lpa = pos\n elif num == '82':\n rpa = pos\n else:\n ch_names.append(items[0])\n poss.append(pos)\n electrodes = _check_dupes_odict(ch_names, poss)\n return make_dig_montage(electrodes, nasion, lpa, rpa)\n\n\ndef read_dig_fif(fname):\n r\"\"\"Read digitized points from a .fif file.\n\n Note that electrode names are not present in the .fif file so\n they are here defined with the convention from VectorView\n systems (EEG001, EEG002, etc.)\n\n Parameters\n ----------\n fname : path-like\n FIF file from which to read digitization locations.\n\n Returns\n -------\n montage : instance of DigMontage\n The montage.\n\n See Also\n --------\n DigMontage\n read_dig_dat\n read_dig_egi\n read_dig_captrak\n read_dig_polhemus_isotrak\n read_dig_hpts\n make_dig_montage\n \"\"\"\n _check_fname(fname, overwrite='read', must_exist=True)\n # Load the dig data\n f, tree = fiff_open(fname)[:2]\n with f as fid:\n dig = _read_dig_fif(fid, tree)\n\n ch_names = []\n for d in dig:\n if d['kind'] == FIFF.FIFFV_POINT_EEG:\n ch_names.append('EEG%03d' % d['ident'])\n\n montage = DigMontage(dig=dig, ch_names=ch_names)\n return montage\n\n\ndef read_dig_hpts(fname, unit='mm'):\n \"\"\"Read historical .hpts mne-c files.\n\n Parameters\n ----------\n fname : str\n The filepath of .hpts file.\n unit : 'm' | 'cm' | 'mm'\n Unit of the positions. Defaults to 'mm'.\n\n Returns\n -------\n montage : instance of DigMontage\n The montage.\n\n See Also\n --------\n DigMontage\n read_dig_captrak\n read_dig_dat\n read_dig_egi\n read_dig_fif\n read_dig_polhemus_isotrak\n make_dig_montage\n\n Notes\n -----\n The hpts format digitzer data file may contain comment lines starting\n with the pound sign (#) and data lines of the form::\n\n <*category*> <*identifier*> <*x/mm*> <*y/mm*> <*z/mm*>\n\n where:\n\n ``<*category*>``\n defines the type of points. Allowed categories are: ``hpi``,\n ``cardinal`` (fiducial), ``eeg``, and ``extra`` corresponding to\n head-position indicator coil locations, cardinal landmarks, EEG\n electrode locations, and additional head surface points,\n respectively.\n\n ``<*identifier*>``\n identifies the point. The identifiers are usually sequential\n numbers. For cardinal landmarks, 1 = left auricular point,\n 2 = nasion, and 3 = right auricular point. For EEG electrodes,\n identifier = 0 signifies the reference electrode.\n\n ``<*x/mm*> , <*y/mm*> , <*z/mm*>``\n Location of the point, usually in the head coordinate system\n in millimeters. If your points are in [m] then unit parameter can\n be changed.\n\n For example::\n\n cardinal 2 -5.6729 -12.3873 -30.3671\n cardinal 1 -37.6782 -10.4957 91.5228\n cardinal 3 -131.3127 9.3976 -22.2363\n hpi 1 -30.4493 -11.8450 83.3601\n hpi 2 -122.5353 9.2232 -28.6828\n hpi 3 -6.8518 -47.0697 -37.0829\n hpi 4 7.3744 -50.6297 -12.1376\n hpi 5 -33.4264 -43.7352 -57.7756\n eeg FP1 3.8676 -77.0439 -13.0212\n eeg FP2 -31.9297 -70.6852 -57.4881\n eeg F7 -6.1042 -68.2969 45.4939\n ...\n \"\"\"\n from ._standard_montage_utils import _str_names, _str\n _scale = _check_unit_and_get_scaling(unit)\n\n out = np.genfromtxt(fname, comments='#',\n dtype=(_str, _str, 'f8', 'f8', 'f8'))\n kind, label = _str_names(out['f0']), _str_names(out['f1'])\n kind = [k.lower() for k in kind]\n xyz = np.array([out['f%d' % ii] for ii in range(2, 5)]).T\n xyz *= _scale\n del _scale\n fid_idx_to_label = {'1': 'lpa', '2': 'nasion', '3': 'rpa'}\n fid = {fid_idx_to_label[label[ii]]: this_xyz\n for ii, this_xyz in enumerate(xyz) if kind[ii] == 'cardinal'}\n ch_pos = {label[ii]: this_xyz\n for ii, this_xyz in enumerate(xyz) if kind[ii] == 'eeg'}\n hpi = np.array([this_xyz for ii, this_xyz in enumerate(xyz)\n if kind[ii] == 'hpi'])\n hpi.shape = (-1, 3) # in case it's empty\n hsp = np.array([this_xyz for ii, this_xyz in enumerate(xyz)\n if kind[ii] == 'extra'])\n hsp.shape = (-1, 3) # in case it's empty\n return make_dig_montage(ch_pos=ch_pos, **fid, hpi=hpi, hsp=hsp)\n\n\ndef read_dig_egi(fname):\n \"\"\"Read electrode locations from EGI system.\n\n Parameters\n ----------\n fname : path-like\n EGI MFF XML coordinates file from which to read digitization locations.\n\n Returns\n -------\n montage : instance of DigMontage\n The montage.\n\n See Also\n --------\n DigMontage\n read_dig_captrak\n read_dig_dat\n read_dig_fif\n read_dig_hpts\n read_dig_polhemus_isotrak\n make_dig_montage\n \"\"\"\n _check_fname(fname, overwrite='read', must_exist=True)\n\n data = _read_dig_montage_egi(\n fname=fname,\n _scaling=1.,\n _all_data_kwargs_are_none=True\n )\n return make_dig_montage(**data)\n\n\ndef read_dig_captrak(fname):\n \"\"\"Read electrode locations from CapTrak Brain Products system.\n\n Parameters\n ----------\n fname : path-like\n BrainVision CapTrak coordinates file from which to read EEG electrode\n locations. This is typically in XML format with the .bvct extension.\n\n Returns\n -------\n montage : instance of DigMontage\n The montage.\n\n See Also\n --------\n DigMontage\n read_dig_dat\n read_dig_egi\n read_dig_fif\n read_dig_hpts\n read_dig_polhemus_isotrak\n make_dig_montage\n \"\"\"\n _check_fname(fname, overwrite='read', must_exist=True)\n data = _parse_brainvision_dig_montage(fname, scale=1e-3)\n\n return make_dig_montage(**data)\n\n\ndef _get_montage_in_head(montage):\n coords = set([d['coord_frame'] for d in montage.dig])\n if len(coords) == 1 and coords.pop() == FIFF.FIFFV_COORD_HEAD:\n return montage\n else:\n return transform_to_head(montage.copy())\n\n\ndef _set_montage_fnirs(info, montage):\n \"\"\"Set the montage for fNIRS data.\n\n This needs to be different to electrodes as each channel has three\n coordinates that need to be set. For each channel there is a source optode\n location, a detector optode location, and a channel midpoint that must be\n stored. This function modifies info['chs'][#]['loc'] and info['dig'] in\n place.\n \"\"\"\n from ..preprocessing.nirs import _validate_nirs_info\n # Validate that the fNIRS info is correctly formatted\n picks = _validate_nirs_info(info)\n\n # Modify info['chs'][#]['loc'] in place\n num_ficiduals = len(montage.dig) - len(montage.ch_names)\n for ch_idx in picks:\n ch = info['chs'][ch_idx]['ch_name']\n source, detector = ch.split(' ')[0].split('_')\n source_pos = montage.dig[montage.ch_names.index(source)\n + num_ficiduals]['r']\n detector_pos = montage.dig[montage.ch_names.index(detector)\n + num_ficiduals]['r']\n\n info['chs'][ch_idx]['loc'][3:6] = source_pos\n info['chs'][ch_idx]['loc'][6:9] = detector_pos\n midpoint = (source_pos + detector_pos) / 2\n info['chs'][ch_idx]['loc'][:3] = midpoint\n\n # Modify info['dig'] in place\n info['dig'] = montage.dig\n\n\n@fill_doc\ndef _set_montage(info, montage, match_case=True, match_alias=False,\n on_missing='raise'):\n \"\"\"Apply montage to data.\n\n With a DigMontage, this function will replace the digitizer info with\n the values specified for the particular montage.\n\n Usually, a montage is expected to contain the positions of all EEG\n electrodes and a warning is raised when this is not the case.\n\n Parameters\n ----------\n info : instance of Info\n The measurement info to update.\n %(montage)s\n %(match_case)s\n %(match_alias)s\n %(on_missing_montage)s\n\n Notes\n -----\n This function will change the info variable in place.\n \"\"\"\n _validate_type(montage, (DigMontage, None, str), 'montage')\n if montage is None:\n # Next line modifies info['dig'] in place\n info['dig'] = None\n for ch in info['chs']:\n # Next line modifies info['chs'][#]['loc'] in place\n ch['loc'] = np.full(12, np.nan)\n return\n if isinstance(montage, str): # load builtin montage\n _check_option('montage', montage, _BUILT_IN_MONTAGES)\n montage = make_standard_montage(montage)\n\n mnt_head = _get_montage_in_head(montage)\n del montage\n\n def _backcompat_value(pos, ref_pos):\n if any(np.isnan(pos)):\n return np.full(6, np.nan)\n else:\n return np.concatenate((pos, ref_pos))\n\n # get the channels in the montage in head\n ch_pos = mnt_head._get_ch_pos()\n\n # only get the eeg, seeg, dbs, ecog channels\n picks = pick_types(\n info, meg=False, eeg=True, seeg=True, dbs=True, ecog=True,\n exclude=())\n non_picks = np.setdiff1d(np.arange(info['nchan']), picks)\n\n # get the reference position from the loc[3:6]\n chs = [info['chs'][ii] for ii in picks]\n non_names = [info['chs'][ii]['ch_name'] for ii in non_picks]\n del picks\n ref_pos = [ch['loc'][3:6] for ch in chs]\n\n # keep reference location from EEG/ECoG/SEEG/DBS channels if they\n # already exist and are all the same.\n custom_eeg_ref_dig = False\n # Note: ref position is an empty list for fieldtrip data\n if ref_pos:\n if all([np.equal(ref_pos[0], pos).all() for pos in ref_pos]) \\\n and not np.equal(ref_pos[0], [0, 0, 0]).all():\n eeg_ref_pos = ref_pos[0]\n # since we have an EEG reference position, we have\n # to add it into the info['dig'] as EEG000\n custom_eeg_ref_dig = True\n if not custom_eeg_ref_dig:\n refs = set(ch_pos) & {'EEG000', 'REF'}\n assert len(refs) <= 1\n eeg_ref_pos = np.zeros(3) if not(refs) else ch_pos.pop(refs.pop())\n\n # This raises based on info being subset/superset of montage\n info_names = [ch['ch_name'] for ch in chs]\n dig_names = mnt_head._get_dig_names()\n ref_names = [None, 'EEG000', 'REF']\n\n if match_case:\n info_names_use = info_names\n dig_names_use = dig_names\n non_names_use = non_names\n else:\n ch_pos_use = OrderedDict(\n (name.lower(), pos) for name, pos in ch_pos.items())\n info_names_use = [name.lower() for name in info_names]\n dig_names_use = [name.lower() if name is not None else name\n for name in dig_names]\n non_names_use = [name.lower() for name in non_names]\n ref_names = [name.lower() if name is not None else name\n for name in ref_names]\n n_dup = len(ch_pos) - len(ch_pos_use)\n if n_dup:\n raise ValueError('Cannot use match_case=False as %s montage '\n 'name(s) require case sensitivity' % n_dup)\n n_dup = len(info_names_use) - len(set(info_names_use))\n if n_dup:\n raise ValueError('Cannot use match_case=False as %s channel '\n 'name(s) require case sensitivity' % n_dup)\n ch_pos = ch_pos_use\n del ch_pos_use\n del dig_names\n\n # use lookup table to match unrecognized channel names to known aliases\n if match_alias:\n alias_dict = (match_alias if isinstance(match_alias, dict) else\n CHANNEL_LOC_ALIASES)\n if not match_case:\n alias_dict = {\n ch_name.lower(): ch_alias.lower()\n for ch_name, ch_alias in alias_dict.items()\n }\n\n # excluded ch_alias not in info, to prevent unnecessary mapping and\n # warning messages based on aliases.\n alias_dict = {\n ch_name: ch_alias\n for ch_name, ch_alias in alias_dict.items()\n }\n info_names_use = [\n alias_dict.get(ch_name, ch_name) for ch_name in info_names_use\n ]\n non_names_use = [\n alias_dict.get(ch_name, ch_name) for ch_name in non_names_use\n ]\n\n # warn user if there is not a full overlap of montage with info_chs\n missing = np.where([use not in ch_pos for use in info_names_use])[0]\n if len(missing): # DigMontage is subset of info\n missing_names = [info_names[ii] for ii in missing]\n missing_coord_msg = (\n 'DigMontage is only a subset of info. There are '\n f'{len(missing)} channel position{_pl(missing)} '\n 'not present in the DigMontage. The required channels are:\\n\\n'\n f'{missing_names}.\\n\\nConsider using inst.set_channel_types '\n 'if these are not EEG channels, or use the on_missing '\n 'parameter if the channel positions are allowed to be unknown '\n 'in your analyses.'\n )\n _on_missing(on_missing, missing_coord_msg)\n\n # set ch coordinates and names from digmontage or nan coords\n for ii in missing:\n ch_pos[info_names_use[ii]] = [np.nan] * 3\n del info_names\n\n extra = np.where([non in ch_pos for non in non_names_use])[0]\n if len(extra):\n types = '/'.join(sorted(set(channel_type(info, ii) for ii in extra)))\n names = [non_names[ii] for ii in extra]\n warn(f'Not setting position{_pl(extra)} of {len(extra)} {types} '\n f'channel{_pl(extra)} found in montage:\\n{names}\\n'\n 'Consider setting the channel types to be of EEG/sEEG/ECoG/DBS '\n 'using inst.set_channel_types before calling inst.set_montage.')\n\n for ch, use in zip(chs, info_names_use):\n # Next line modifies info['chs'][#]['loc'] in place\n if use in ch_pos:\n ch['loc'][:6] = _backcompat_value(ch_pos[use], eeg_ref_pos)\n ch['coord_frame'] = FIFF.FIFFV_COORD_HEAD\n del ch_pos\n\n # XXX this is probably wrong as it uses the order from the montage\n # rather than the order of our info['ch_names'] ...\n digpoints = [\n mnt_head.dig[ii] for ii, name in enumerate(dig_names_use)\n if name in (info_names_use + ref_names)]\n\n # get a copy of the old dig\n if info['dig'] is not None:\n old_dig = info['dig'].copy()\n else:\n old_dig = []\n\n # determine if needed to add an extra EEG REF DigPoint\n if custom_eeg_ref_dig:\n # ref_name = 'EEG000' if match_case else 'eeg000'\n ref_dig_dict = {'kind': FIFF.FIFFV_POINT_EEG,\n 'r': eeg_ref_pos,\n 'ident': 0,\n 'coord_frame': info['dig'].pop()['coord_frame']}\n ref_dig_point = _format_dig_points([ref_dig_dict])[0]\n # only append the reference dig point if it was already\n # in the old dig\n if ref_dig_point in old_dig:\n digpoints.append(ref_dig_point)\n # Next line modifies info['dig'] in place\n info['dig'] = _format_dig_points(digpoints, enforce_order=True)\n\n # Handle fNIRS with source, detector and channel\n fnirs_picks = _picks_to_idx(info, 'fnirs', allow_empty=True)\n if len(fnirs_picks) > 0:\n _set_montage_fnirs(info, mnt_head)\n\n\ndef _read_isotrak_elp_points(fname):\n \"\"\"Read Polhemus Isotrak digitizer data from a ``.elp`` file.\n\n Parameters\n ----------\n fname : str\n The filepath of .elp Polhemus Isotrak file.\n\n Returns\n -------\n out : dict of arrays\n The dictionary containing locations for 'nasion', 'lpa', 'rpa'\n and 'points'.\n \"\"\"\n value_pattern = r\"\\-?\\d+\\.?\\d*e?\\-?\\d*\"\n coord_pattern = r\"({0})\\s+({0})\\s+({0})\\s*$\".format(value_pattern)\n\n with open(fname) as fid:\n file_str = fid.read()\n\n points_str = [m.groups() for m in re.finditer(coord_pattern, file_str,\n re.MULTILINE)]\n points = np.array(points_str, dtype=float)\n\n return {\n 'nasion': points[0], 'lpa': points[1], 'rpa': points[2],\n 'points': points[3:]\n }\n\n\ndef _read_isotrak_hsp_points(fname):\n \"\"\"Read Polhemus Isotrak digitizer data from a ``.hsp`` file.\n\n Parameters\n ----------\n fname : str\n The filepath of .hsp Polhemus Isotrak file.\n\n Returns\n -------\n out : dict of arrays\n The dictionary containing locations for 'nasion', 'lpa', 'rpa'\n and 'points'.\n \"\"\"\n def get_hsp_fiducial(line):\n return np.fromstring(line.replace('%F', ''), dtype=float, sep='\\t')\n\n with open(fname) as ff:\n for line in ff:\n if 'position of fiducials' in line.lower():\n break\n\n nasion = get_hsp_fiducial(ff.readline())\n lpa = get_hsp_fiducial(ff.readline())\n rpa = get_hsp_fiducial(ff.readline())\n\n _ = ff.readline()\n line = ff.readline()\n if line:\n n_points, n_cols = np.fromstring(line, dtype=int, sep='\\t')\n points = np.fromstring(\n string=ff.read(), dtype=float, sep='\\t',\n ).reshape(-1, n_cols)\n assert points.shape[0] == n_points\n else:\n points = np.empty((0, 3))\n\n return {\n 'nasion': nasion, 'lpa': lpa, 'rpa': rpa, 'points': points\n }\n\n\ndef read_dig_polhemus_isotrak(fname, ch_names=None, unit='m'):\n \"\"\"Read Polhemus digitizer data from a file.\n\n Parameters\n ----------\n fname : str\n The filepath of Polhemus ISOTrak formatted file.\n File extension is expected to be '.hsp', '.elp' or '.eeg'.\n ch_names : None | list of str\n The names of the points. This will make the points\n considered as EEG channels. If None, channels will be assumed\n to be HPI if the extension is ``'.elp'``, and extra headshape\n points otherwise.\n unit : 'm' | 'cm' | 'mm'\n Unit of the digitizer file. Polhemus ISOTrak systems data is usually\n exported in meters. Defaults to 'm'.\n\n Returns\n -------\n montage : instance of DigMontage\n The montage.\n\n See Also\n --------\n DigMontage\n make_dig_montage\n read_polhemus_fastscan\n read_dig_captrak\n read_dig_dat\n read_dig_egi\n read_dig_fif\n \"\"\"\n VALID_FILE_EXT = ('.hsp', '.elp', '.eeg')\n _scale = _check_unit_and_get_scaling(unit)\n\n _, ext = op.splitext(fname)\n _check_option('fname', ext, VALID_FILE_EXT)\n\n if ext == '.elp':\n data = _read_isotrak_elp_points(fname)\n else:\n # Default case we read points as hsp since is the most likely scenario\n data = _read_isotrak_hsp_points(fname)\n\n if _scale != 1:\n data = {key: val * _scale for key, val in data.items()}\n else:\n pass # noqa\n\n if ch_names is None:\n keyword = 'hpi' if ext == '.elp' else 'hsp'\n data[keyword] = data.pop('points')\n\n else:\n points = data.pop('points')\n if points.shape[0] == len(ch_names):\n data['ch_pos'] = OrderedDict(zip(ch_names, points))\n else:\n raise ValueError((\n \"Length of ``ch_names`` does not match the number of points\"\n \" in {fname}. Expected ``ch_names`` length {n_points:d},\"\n \" given {n_chnames:d}\"\n ).format(\n fname=fname, n_points=points.shape[0], n_chnames=len(ch_names)\n ))\n\n return make_dig_montage(**data)\n\n\ndef _is_polhemus_fastscan(fname):\n header = ''\n with open(fname, 'r') as fid:\n for line in fid:\n if not line.startswith('%'):\n break\n header += line\n\n return 'FastSCAN' in header\n\n\n@verbose\ndef read_polhemus_fastscan(fname, unit='mm', on_header_missing='raise', *,\n verbose=None):\n \"\"\"Read Polhemus FastSCAN digitizer data from a ``.txt`` file.\n\n Parameters\n ----------\n fname : str\n The filepath of .txt Polhemus FastSCAN file.\n unit : 'm' | 'cm' | 'mm'\n Unit of the digitizer file. Polhemus FastSCAN systems data is usually\n exported in millimeters. Defaults to 'mm'.\n %(on_header_missing)s\n %(verbose)s\n\n Returns\n -------\n points : array, shape (n_points, 3)\n The digitization points in digitizer coordinates.\n\n See Also\n --------\n read_dig_polhemus_isotrak\n make_dig_montage\n \"\"\"\n VALID_FILE_EXT = ['.txt']\n _scale = _check_unit_and_get_scaling(unit)\n\n _, ext = op.splitext(fname)\n _check_option('fname', ext, VALID_FILE_EXT)\n\n if not _is_polhemus_fastscan(fname):\n msg = \"%s does not contain a valid Polhemus FastSCAN header\" % fname\n _on_missing(on_header_missing, msg)\n\n points = _scale * np.loadtxt(fname, comments='%', ndmin=2)\n _check_dig_shape(points)\n return points\n\n\ndef _read_eeglab_locations(fname):\n ch_names = np.genfromtxt(fname, dtype=str, usecols=3).tolist()\n topo = np.loadtxt(fname, dtype=float, usecols=[1, 2])\n sph = _topo_to_sph(topo)\n pos = _sph_to_cart(sph)\n pos[:, [0, 1]] = pos[:, [1, 0]] * [-1, 1]\n\n return ch_names, pos\n\n\ndef read_custom_montage(fname, head_size=HEAD_SIZE_DEFAULT, coord_frame=None):\n \"\"\"Read a montage from a file.\n\n Parameters\n ----------\n fname : str\n File extension is expected to be:\n '.loc' or '.locs' or '.eloc' (for EEGLAB files),\n '.sfp' (BESA/EGI files), '.csd',\n '.elc', '.txt', '.csd', '.elp' (BESA spherical),\n '.bvef' (BrainVision files),\n '.csv', '.tsv', '.xyz' (XYZ coordinates).\n head_size : float | None\n The size of the head (radius, in [m]). If ``None``, returns the values\n read from the montage file with no modification. Defaults to 0.095m.\n coord_frame : str | None\n The coordinate frame of the points. Usually this is \"unknown\"\n for native digitizer space. Defaults to None, which is \"unknown\" for\n most readers but \"head\" for EEGLAB.\n\n .. versionadded:: 0.20\n\n Returns\n -------\n montage : instance of DigMontage\n The montage.\n\n See Also\n --------\n make_dig_montage\n make_standard_montage\n\n Notes\n -----\n The function is a helper to read electrode positions you may have\n in various formats. Most of these format are weakly specified\n in terms of units, coordinate systems. It implies that setting\n a montage using a DigMontage produced by this function may\n be problematic. If you use a standard/template (eg. 10/20,\n 10/10 or 10/05) we recommend you use :func:`make_standard_montage`.\n If you can have positions in memory you can also use\n :func:`make_dig_montage` that takes arrays as input.\n \"\"\"\n from ._standard_montage_utils import (\n _read_theta_phi_in_degrees, _read_sfp, _read_csd, _read_elc,\n _read_elp_besa, _read_brainvision, _read_xyz\n )\n SUPPORTED_FILE_EXT = {\n 'eeglab': ('.loc', '.locs', '.eloc', ),\n 'hydrocel': ('.sfp', ),\n 'matlab': ('.csd', ),\n 'asa electrode': ('.elc', ),\n 'generic (Theta-phi in degrees)': ('.txt', ),\n 'standard BESA spherical': ('.elp', ), # NB: not same as polhemus elp\n 'brainvision': ('.bvef', ),\n 'xyz': ('.csv', '.tsv', '.xyz'),\n }\n\n _, ext = op.splitext(fname)\n _check_option('fname', ext, list(sum(SUPPORTED_FILE_EXT.values(), ())))\n\n if ext in SUPPORTED_FILE_EXT['eeglab']:\n if head_size is None:\n raise ValueError(\n \"``head_size`` cannot be None for '{}'\".format(ext))\n ch_names, pos = _read_eeglab_locations(fname)\n scale = head_size / np.median(np.linalg.norm(pos, axis=-1))\n pos *= scale\n\n montage = make_dig_montage(\n ch_pos=OrderedDict(zip(ch_names, pos)),\n coord_frame='head',\n )\n\n elif ext in SUPPORTED_FILE_EXT['hydrocel']:\n montage = _read_sfp(fname, head_size=head_size)\n\n elif ext in SUPPORTED_FILE_EXT['matlab']:\n montage = _read_csd(fname, head_size=head_size)\n\n elif ext in SUPPORTED_FILE_EXT['asa electrode']:\n montage = _read_elc(fname, head_size=head_size)\n\n elif ext in SUPPORTED_FILE_EXT['generic (Theta-phi in degrees)']:\n if head_size is None:\n raise ValueError(\n \"``head_size`` cannot be None for '{}'\".format(ext))\n montage = _read_theta_phi_in_degrees(fname, head_size=head_size,\n fid_names=('Nz', 'LPA', 'RPA'))\n\n elif ext in SUPPORTED_FILE_EXT['standard BESA spherical']:\n montage = _read_elp_besa(fname, head_size)\n\n elif ext in SUPPORTED_FILE_EXT['brainvision']:\n montage = _read_brainvision(fname, head_size)\n\n elif ext in SUPPORTED_FILE_EXT['xyz']:\n montage = _read_xyz(fname)\n\n if coord_frame is not None:\n coord_frame = _coord_frame_const(coord_frame)\n for d in montage.dig:\n d['coord_frame'] = coord_frame\n\n return montage\n\n\ndef compute_dev_head_t(montage):\n \"\"\"Compute device to head transform from a DigMontage.\n\n Parameters\n ----------\n montage : instance of DigMontage\n The DigMontage must contain the fiducials in head\n coordinate system and hpi points in both head and\n meg device coordinate system.\n\n Returns\n -------\n dev_head_t : instance of Transform\n A Device-to-Head transformation matrix.\n \"\"\"\n _, coord_frame = _get_fid_coords(montage.dig)\n if coord_frame != FIFF.FIFFV_COORD_HEAD:\n raise ValueError('montage should have been set to head coordinate '\n 'system with transform_to_head function.')\n\n hpi_head = np.array(\n [d['r'] for d in montage.dig\n if (d['kind'] == FIFF.FIFFV_POINT_HPI and\n d['coord_frame'] == FIFF.FIFFV_COORD_HEAD)], float)\n hpi_dev = np.array(\n [d['r'] for d in montage.dig\n if (d['kind'] == FIFF.FIFFV_POINT_HPI and\n d['coord_frame'] == FIFF.FIFFV_COORD_DEVICE)], float)\n\n if not (len(hpi_head) == len(hpi_dev) and len(hpi_dev) > 0):\n raise ValueError((\n \"To compute Device-to-Head transformation, the same number of HPI\"\n \" points in device and head coordinates is required. (Got {dev}\"\n \" points in device and {head} points in head coordinate systems)\"\n ).format(dev=len(hpi_dev), head=len(hpi_head)))\n\n trans = _quat_to_affine(_fit_matched_points(hpi_dev, hpi_head)[0])\n return Transform(fro='meg', to='head', trans=trans)\n\n\ndef compute_native_head_t(montage):\n \"\"\"Compute the native-to-head transformation for a montage.\n\n This uses the fiducials in the native space to transform to compute the\n transform to the head coordinate frame.\n\n Parameters\n ----------\n montage : instance of DigMontage\n The montage.\n\n Returns\n -------\n native_head_t : instance of Transform\n A native-to-head transformation matrix.\n \"\"\"\n # Get fiducial points and their coord_frame\n fid_coords, coord_frame = _get_fid_coords(montage.dig, raise_error=False)\n if coord_frame is None:\n coord_frame = FIFF.FIFFV_COORD_UNKNOWN\n if coord_frame == FIFF.FIFFV_COORD_HEAD:\n native_head_t = np.eye(4)\n else:\n fid_keys = ('nasion', 'lpa', 'rpa')\n for key in fid_keys:\n if fid_coords[key] is None:\n warn('Fiducial point %s not found, assuming identity %s to '\n 'head transformation'\n % (key, _verbose_frames[coord_frame],))\n native_head_t = np.eye(4)\n break\n else:\n native_head_t = get_ras_to_neuromag_trans(\n *[fid_coords[key] for key in fid_keys])\n return Transform(coord_frame, 'head', native_head_t)\n\n\ndef make_standard_montage(kind, head_size=HEAD_SIZE_DEFAULT):\n \"\"\"Read a generic (built-in) montage.\n\n Parameters\n ----------\n kind : str\n The name of the montage to use. See notes for valid kinds.\n head_size : float\n The head size (radius, in meters) to use for spherical montages.\n Defaults to 95mm.\n\n Returns\n -------\n montage : instance of DigMontage\n The montage.\n\n See Also\n --------\n DigMontage\n make_dig_montage\n read_custom_montage\n\n Notes\n -----\n Individualized (digitized) electrode positions should be read in using\n :func:`read_dig_captrak`, :func:`read_dig_dat`, :func:`read_dig_egi`,\n :func:`read_dig_fif`, :func:`read_dig_polhemus_isotrak`,\n :func:`read_dig_hpts` or made with :func:`make_dig_montage`.\n\n Valid ``kind`` arguments are:\n\n =================== =====================================================\n Kind Description\n =================== =====================================================\n standard_1005 Electrodes are named and positioned according to the\n international 10-05 system (343+3 locations)\n standard_1020 Electrodes are named and positioned according to the\n international 10-20 system (94+3 locations)\n standard_alphabetic Electrodes are named with LETTER-NUMBER combinations\n (A1, B2, F4, ...) (65+3 locations)\n standard_postfixed Electrodes are named according to the international\n 10-20 system using postfixes for intermediate\n positions (100+3 locations)\n standard_prefixed Electrodes are named according to the international\n 10-20 system using prefixes for intermediate\n positions (74+3 locations)\n standard_primed Electrodes are named according to the international\n 10-20 system using prime marks (' and '') for\n intermediate positions (100+3 locations)\n\n biosemi16 BioSemi cap with 16 electrodes (16+3 locations)\n biosemi32 BioSemi cap with 32 electrodes (32+3 locations)\n biosemi64 BioSemi cap with 64 electrodes (64+3 locations)\n biosemi128 BioSemi cap with 128 electrodes (128+3 locations)\n biosemi160 BioSemi cap with 160 electrodes (160+3 locations)\n biosemi256 BioSemi cap with 256 electrodes (256+3 locations)\n\n easycap-M1 EasyCap with 10-05 electrode names (74 locations)\n easycap-M10 EasyCap with numbered electrodes (61 locations)\n\n EGI_256 Geodesic Sensor Net (256 locations)\n\n GSN-HydroCel-32 HydroCel Geodesic Sensor Net and Cz (33+3 locations)\n GSN-HydroCel-64_1.0 HydroCel Geodesic Sensor Net (64+3 locations)\n GSN-HydroCel-65_1.0 HydroCel Geodesic Sensor Net and Cz (65+3 locations)\n GSN-HydroCel-128 HydroCel Geodesic Sensor Net (128+3 locations)\n GSN-HydroCel-129 HydroCel Geodesic Sensor Net and Cz (129+3 locations)\n GSN-HydroCel-256 HydroCel Geodesic Sensor Net (256+3 locations)\n GSN-HydroCel-257 HydroCel Geodesic Sensor Net and Cz (257+3 locations)\n\n mgh60 The (older) 60-channel cap used at\n MGH (60+3 locations)\n mgh70 The (newer) 70-channel BrainVision cap used at\n MGH (70+3 locations)\n\n artinis-octamon Artinis OctaMon fNIRS (8 sources, 2 detectors)\n\n artinis-brite23 Artinis Brite23 fNIRS (11 sources, 7 detectors)\n =================== =====================================================\n\n .. versionadded:: 0.19.0\n \"\"\"\n from ._standard_montage_utils import standard_montage_look_up_table\n _check_option('kind', kind, _BUILT_IN_MONTAGES)\n return standard_montage_look_up_table[kind](head_size=head_size)\n\n\ndef _check_dig_shape(pts):\n _validate_type(pts, np.ndarray, 'points')\n if pts.ndim != 2 or pts.shape[-1] != 3:\n raise ValueError(\n f'Points must be of shape (n, 3) instead of {pts.shape}')\n"
] | [
[
"numpy.round",
"numpy.isin",
"numpy.flatnonzero"
],
[
"numpy.array_equal",
"numpy.isnan",
"numpy.arange",
"numpy.eye",
"numpy.linalg.norm",
"numpy.empty",
"numpy.genfromtxt",
"numpy.full",
"numpy.concatenate",
"numpy.fromstring",
"numpy.zeros",
"numpy.equal",
"numpy.array",
"numpy.where",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gAldeia/iirsBenchmark | [
"2211b4755405eb32178a09f1a01143d53dc6516d",
"2211b4755405eb32178a09f1a01143d53dc6516d",
"2211b4755405eb32178a09f1a01143d53dc6516d"
] | [
"iirsBenchmark/regressors/DecisionTree_regressor.py",
"iirsBenchmark/explainers/ELA_explainer.py",
"iirsBenchmark/regressors/Linear_regressor.py"
] | [
"# Author: Guilherme Aldeia\r\n# Contact: [email protected]\r\n# Version: 1.0.0\r\n# Last modified: 08-20-2021 by Guilherme Aldeia\r\n\r\n\"\"\"\r\nDecision tree regressor. This method is considered a white-box for most \r\nauthors.\r\n\r\nThis regressor extends the scikit-learn\r\n[DecisionTreeRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html).\r\n\r\nbeyond what the scikit model can do, this class also implements:\r\n* `to_str()` method, that returns a string representation of the final model;\r\n* `stochastic_executions` attribute, indicating if the model presents\r\n different results between different executions if the random_state is not\r\n setted;\r\n* `interpretability_spectrum` attribute, with a string representing where on\r\n the interpretability spectrun (white, gray or black-box) this model lays;\r\n* `grid_params` attribute, with different possible values to be used in \r\n a gridsearch optimization of the method;\r\n* `feature_importances_` attribute, representing the importances calculated by\r\n an intrinsic explanation method (the Partial Effect, used in the context\r\n of regression analysis).\r\n\"\"\"\r\n\r\nfrom sklearn.tree import DecisionTreeRegressor\r\nfrom sklearn import tree\r\n\r\n# \r\nclass DecisionTree_regressor(DecisionTreeRegressor):\r\n\r\n # Nossos inits devem ser iguais (ou ter o máximo de argumentos)\r\n # que temos na classe derivada.\r\n\r\n # Mudar criterion para mudar o critério de treino\r\n def __init__(self, *,\r\n splitter='best', max_depth=None, min_samples_split=2,\r\n min_samples_leaf=1, min_weight_fraction_leaf=0.0,\r\n max_features=None, random_state=None, criterion = 'mse',\r\n max_leaf_nodes=None, min_impurity_decrease=0.0,\r\n min_impurity_split=None, ccp_alpha=0.0, **kwargs):\r\n \r\n super(DecisionTree_regressor, self).__init__(\r\n criterion=criterion, splitter = splitter,\r\n max_depth = max_depth, min_samples_split = min_samples_split,\r\n min_samples_leaf = min_samples_leaf,\r\n min_weight_fraction_leaf = min_weight_fraction_leaf,\r\n max_features = max_features, random_state = random_state,\r\n max_leaf_nodes = max_leaf_nodes,\r\n min_impurity_decrease = min_impurity_decrease,\r\n min_impurity_split = min_impurity_split,\r\n ccp_alpha = ccp_alpha)\r\n \r\n\r\n def to_str(self):\r\n \r\n text_rep = tree.export_text(self)\r\n text_rep = text_rep.replace('\\n', r'<br>').replace('feature_', 'x_')\r\n\r\n return text_rep\r\n\r\n\r\n# IMO, decisions trees are interpretable only if they don't get too deep.\r\n# I will use small complexity configurations for the gridsearch.\r\n\r\nDecisionTree_regressor.stochastic_executions = True\r\nDecisionTree_regressor.interpretability_spectrum = 'white-box'\r\nDecisionTree_regressor.grid_params = {\r\n 'max_depth' : [5, 10, 15],\r\n 'max_leaf_nodes' : [5, 10, 15],\r\n}\r\n",
"# Author: Guilherme Aldeia\r\n# Contact: [email protected]\r\n# Version: 1.0.1\r\n# Last modified: 21-11-2021 by Guilherme Aldeia\r\n\r\n\"\"\"\r\nExplain by Local Approximation explainer.\r\n\"\"\"\r\n\r\nfrom iirsBenchmark.explainers._base_explainer import Base_explainer \r\n\r\nfrom sklearn.utils.validation import check_array\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\nimport numpy as np\r\n\r\n\r\nclass ELA_explainer(Base_explainer):\r\n def __init__(self, *, predictor, k=5, **kwargs):\r\n \r\n super(ELA_explainer, self).__init__(\r\n predictor = predictor,\r\n agnostic = ['ITEA_regressor', #'Operon_regressor',\r\n 'Linear_regressor', 'Lasso_regressor', 'Feynman_regressor'],\r\n local_scope = True,\r\n global_scope = False\r\n )\r\n \r\n # (global explanation is not a feature importance, but a visual\r\n # explanation like PartialDependencePlots, so it was not implemented\r\n # here)\r\n \r\n # n of closest neighbors evaluated in the linear regression\r\n self.k = k \r\n\r\n \r\n def _check_fit(self, X, y):\r\n \r\n assert X.shape[0] >= self.k, \\\r\n f\"Data set too small to be used with given value for k={self.k}.\" \r\n\r\n return super()._check_fit(X, y)\r\n\r\n\r\n def _k_closest_neighbors(self, x):\r\n \r\n # p1 and p2 must be a 1-dimensional numpy array of same length\r\n euclidean_dist = lambda p1, p2: np.sqrt(np.sum((p1 - p2)**2))\r\n \r\n # Distance will consider only the subset of features existing in the\r\n # regresison model (the model must have a selected_features_ thus\r\n # ELA is not entirelly model agnostic).\r\n subset_features = self.predictor.selected_features_\r\n\r\n # setting discarded variables to same value so it doesn't affect\r\n # the distance calculation\r\n x_masked = x.copy()\r\n X_masked = self.X_.copy()\r\n\r\n # x is 1d, X is 2d\r\n x_masked[subset_features] = 0.0\r\n X_masked[:, subset_features] = 0.0\r\n\r\n selected = np.argsort(\r\n [euclidean_dist(x_masked, xprime) for xprime in X_masked])\r\n \r\n return self.X_[selected[ :self.k], :]\r\n \r\n \r\n def fit(self, X, y):\r\n self._check_fit(X, y)\r\n\r\n self.X_ = X\r\n self.y_ = y\r\n \r\n return self\r\n \r\n\r\n def explain_local(self, X):\r\n \r\n self._check_is_fitted()\r\n\r\n X = check_array(X)\r\n \r\n nobs, nvars = X.shape\r\n \r\n coeffs = np.zeros_like(X)\r\n for i in range(nobs):\r\n X_closest_neighbors = self._k_closest_neighbors(X[i, :])\r\n \r\n linear_reg = LinearRegression()\r\n linear_reg.fit(\r\n X_closest_neighbors, self.predictor.predict(X_closest_neighbors))\r\n \r\n coeffs[i, :] = linear_reg.coef_\r\n \r\n # Final explanation is the product of x by the coefficients\r\n # normalized for all variables\r\n explanations = np.abs(coeffs * X)\r\n \r\n # Normalizing (broadcast and transposing to divide matrix by vector)\r\n explanations = ((explanations * 100.).T / np.sum(explanations, axis=1)).T\r\n \r\n # check if everything is as expected: column-wise sum\r\n # should be 100\r\n assert np.all(np.isclose(np.sum(explanations, axis=1), 100.0))\r\n \r\n return self._protect_explanation(explanations)",
"# Author: Guilherme Aldeia\r\n# Contact: [email protected]\r\n# Version: 1.0.1\r\n# Last modified: 21-11-2021 by Guilherme Aldeia\r\n\r\n\"\"\"\r\nLinear regressor. This method is considered a white-box for most authors.\r\n\r\nThis regressor extends the scikit-learn\r\n[LinearRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).\r\n\r\nbeyond what the scikit model can do, this class also implements:\r\n* `to_str()` method, that returns a string representation of the final model;\r\n* `gradients()` method, which computes the gradient vector for a given\r\n observation (or observations);\r\n* `stochastic_executions` attribute, indicating if the model presents\r\n different results between different executions if the random_state is not\r\n setted;\r\n* `interpretability_spectrum` attribute, with a string representing where on\r\n the interpretability spectrun (white, gray or black-box) this model lays;\r\n* `grid_params` attribute, with different possible values to be used in \r\n a gridsearch optimization of the method;\r\n* `feature_importances_` attribute, representing the importances calculated by\r\n an intrinsic explanation method (the Partial Effect, used in the context\r\n of regression analysis).\r\n* `selected_features_`: some model-specific explainers can rely on using a\r\n subset of the original features to explain the model (i.g. ELA). This\r\n attribute is created after fitting the regressor, and indicates the indexes\r\n of the features that are actually being considered by the model when making\r\n predictions, and it is implemented to all regressors that creates a \r\n mathematical expression that can be analysed to extract this information.\r\n\"\"\"\r\n\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\nimport numpy as np\r\n\r\nclass Linear_regressor(LinearRegression):\r\n def __init__(self, *,\r\n fit_intercept=True, normalize=False, copy_X=True,\r\n positive=False, **kwargs):\r\n\r\n # This method does not have a stochastic behavior and the scikit\r\n # implementation does not take a random_state argument\r\n \r\n # the scikit method supports parallelization, but we want to avoid \r\n # nested paralellizations (the original experiments were designed\r\n # to run in multiple subprocesses). n_jobs of superclass should be None\r\n\r\n super(Linear_regressor, self).__init__(\r\n fit_intercept=fit_intercept, normalize=normalize, copy_X=copy_X,\r\n n_jobs=None, positive=positive)\r\n \r\n\r\n def fit(self, X, y):\r\n super_fit = super().fit(X, y)\r\n\r\n self.feature_importances_ = self.coef_\r\n\r\n n_features = X.shape[1]\r\n\r\n # Useful for model specific explainers that uses information about\r\n # selected features\r\n self.selected_features_ = np.array(\r\n [i for i in range(n_features) if self.coef_[i] != 0.0])\r\n\r\n return super_fit\r\n \r\n\r\n def to_str(self):\r\n coefs = self.coef_\r\n intercept = self.intercept_\r\n \r\n str_terms = []\r\n for i, c in enumerate(coefs):\r\n if np.isclose(c, 0.0):\r\n continue\r\n \r\n str_terms.append(f\"{c.round(3)}*x_{i}\")\r\n\r\n expr_str = ' + '.join(str_terms)\r\n\r\n return f\"{expr_str} + {intercept.round(3)}\"\r\n\r\n\r\n def gradients(self, X):\r\n\r\n gradients = np.zeros_like(X)\r\n\r\n # broadcasting coefficients, since the gradients will always\r\n # be the same.\r\n gradients[:] = self.coef_\r\n\r\n return gradients\r\n\r\n\r\nLinear_regressor.interpretability_spectrum = 'white-box'\r\nLinear_regressor.stochastic_executions = False\r\nLinear_regressor.grid_params = {\r\n\r\n}"
] | [
[
"sklearn.tree.export_text"
],
[
"numpy.abs",
"sklearn.utils.validation.check_array",
"numpy.zeros_like",
"sklearn.linear_model.LinearRegression",
"numpy.sum"
],
[
"numpy.zeros_like",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kyleaton/great_expectations | [
"a856513859445cdb1b254e0d90022bab6257b6a2"
] | [
"tests/conftest.py"
] | [
"import datetime\nimport json\nimport locale\nimport os\nimport shutil\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport great_expectations as ge\nfrom great_expectations.core import (\n ExpectationConfiguration,\n ExpectationSuite,\n ExpectationValidationResult,\n expectationSuiteSchema,\n)\nfrom great_expectations.data_context.types.resource_identifiers import (\n ExpectationSuiteIdentifier\n)\nfrom great_expectations.data_context.util import file_relative_path\nfrom great_expectations.dataset.pandas_dataset import PandasDataset\nfrom .test_utils import expectationSuiteValidationResultSchema, get_dataset\n\n###\n#\n# NOTE: THESE TESTS ARE WRITTEN WITH THE en_US.UTF-8 LOCALE AS DEFAULT FOR STRING FORMATTING\n#\n###\nlocale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\n \"markers\", \"smoketest: mark test as smoketest--it does not have useful assertions but may produce side effects \"\n \"that require manual inspection.\"\n )\n config.addinivalue_line(\n \"markers\", \"rendered_output: produces rendered output that should be manually reviewed.\"\n )\n config.addinivalue_line(\n \"markers\", \"aws_integration: runs aws integration test that may be very slow and requires credentials\"\n )\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--no-spark\", action='store_true', help=\"If set, suppress tests against the spark test suite\"\n )\n parser.addoption(\n \"--no-sqlalchemy\", action=\"store_true\", help=\"If set, suppress all tests using sqlalchemy\"\n )\n parser.addoption(\n \"--no-postgresql\", action=\"store_true\", help=\"If set, suppress tests against postgresql\"\n )\n parser.addoption(\n \"--aws-integration\", action=\"store_true\", help=\"If set, run aws integration tests\"\n )\n\n\ndef build_test_backends_list(metafunc):\n test_backends = ['PandasDataset']\n no_spark = metafunc.config.getoption(\"--no-spark\")\n if not no_spark:\n try:\n from pyspark.sql import SparkSession\n except ImportError:\n raise ValueError(\"spark tests are requested, but pyspark is not installed\")\n test_backends += ['SparkDFDataset']\n no_sqlalchemy = metafunc.config.getoption(\"--no-sqlalchemy\")\n if not no_sqlalchemy:\n test_backends += ['sqlite']\n import sqlalchemy as sa\n no_postgresql = metafunc.config.getoption(\"--no-postgresql\")\n if not no_postgresql:\n ###\n # NOTE: 20190918 - JPC: Since I've had to relearn this a few times, a note here.\n # SQLALCHEMY coerces postgres DOUBLE_PRECISION to float, which loses precision\n # round trip compared to NUMERIC, which stays as a python DECIMAL\n\n # Be sure to ensure that tests (and users!) understand that subtlety,\n # which can be important for distributional expectations, for example.\n ###\n try:\n engine = sa.create_engine('postgresql://postgres@localhost/test_ci')\n conn = engine.connect()\n conn.close()\n except (ImportError, sa.exc.SQLAlchemyError):\n raise ImportError(\"postgresql tests are requested, but unable to connect to the postgresql database at \"\n \"'postgresql://postgres@localhost/test_ci'\")\n test_backends += ['postgresql']\n # TODO: enable mysql or other backend tests to be optionally specified\n # if mysql:\n # try:\n # engine = sa.create_engine('mysql://root@localhost/test_ci')\n # conn = engine.connect()\n # test_backends += ['mysql']\n # conn.close()\n # except (ImportError, sa.exc.SQLAlchemyError):\n # warnings.warn(\"No mysql context available for testing.\")\n return test_backends\n\n\ndef pytest_generate_tests(metafunc):\n test_backends = build_test_backends_list(metafunc)\n if \"test_backend\" in metafunc.fixturenames:\n metafunc.parametrize(\"test_backend\", test_backends)\n if \"test_backends\" in metafunc.fixturenames:\n metafunc.parametrize(\"test_backends\", [test_backends])\n\n\ndef pytest_collection_modifyitems(config, items):\n if config.getoption(\"--aws-integration\"):\n # --aws-integration given in cli: do not skip aws-integration tests\n return\n skip_aws_integration = pytest.mark.skip(reason=\"need --aws-integration option to run\")\n for item in items:\n if \"aws_integration\" in item.keywords:\n item.add_marker(skip_aws_integration)\n\n\[email protected]\ndef sa(test_backends):\n if (\n \"postgresql\" not in test_backends and\n \"sqlite\" not in test_backends\n ):\n pytest.skip(\"No recognized sqlalchemy backend selected.\")\n else:\n import sqlalchemy as sa\n return sa\n\n\[email protected]\ndef spark_session(test_backends):\n if \"SparkDFDataset\" not in test_backends:\n pytest.skip(\"No spark backend selected.\")\n from pyspark.sql import SparkSession\n return SparkSession.builder.getOrCreate()\n\n\[email protected]\ndef empty_expectation_suite():\n expectation_suite = {\n 'expectation_suite_name': \"default\",\n 'meta': {},\n 'expectations': []\n }\n return expectation_suite\n\n\[email protected]\ndef basic_expectation_suite():\n expectation_suite = ExpectationSuite(\n expectation_suite_name=\"default\",\n meta={},\n expectations=[\n ExpectationConfiguration(\n expectation_type=\"expect_column_to_exist\",\n kwargs={\n \"column\": \"infinities\"\n }\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_to_exist\",\n kwargs={\n \"column\": \"nulls\"\n }\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_to_exist\",\n kwargs={\n \"column\": \"naturals\"\n }\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_unique\",\n kwargs={\n \"column\": \"naturals\"\n }\n )\n ]\n )\n return expectation_suite\n\n\[email protected]\ndef file_data_asset(tmp_path):\n tmp_path = str(tmp_path)\n path = os.path.join(tmp_path, 'file_data_asset.txt')\n with open(path, 'w+') as file:\n file.write(json.dumps([0, 1, 2, 3, 4]))\n\n return ge.data_asset.FileDataAsset(file_path=path)\n\n\[email protected]\ndef numeric_high_card_dict():\n data = {\n \"norm_0_1\": [0.7225866251125405, -0.5951819764073379, -0.2679313226299394, -0.22503289285616823, 0.1432092195399402, 1.1874676802669433, 1.2766412196640815, 0.15197071140718296, -0.08787273509474242, -0.14524643717509128, -1.236408169492396, -0.1595432263317598, 1.0856768114741797, 0.5082788229519655, 0.26419244684748955, -0.2532308428977167, -0.6362679196021943, -3.134120304969242, -1.8990888524318292, 0.15701781863102648, -0.775788419966582, -0.7400872167978756, -0.10578357492485335, 0.30287010067847436, -1.2127058770179304, -0.6750567678010801, 0.3341434318919877, 1.8336516507046157, 1.105410842250908, -0.7711783703442725, -0.20834347267477862, -0.06315849766945486, 0.003016997583954831, -1.0500016329150343, -0.9168020284223636, 0.306128397266698, 1.0980602112281863, -0.10465519493772572, 0.4557797534454941, -0.2524452955086468, -1.6176089110359837, 0.46251282530754667, 0.45751208998354903, 0.4222844954971609, 0.9651098606162691, -0.1364401431697167, -0.4988616288584964, -0.29549238375582904, 0.6950204582392359, 0.2975369992016046, -1.0159498719807218, 1.3704532401348395, 1.1210419577766673, 1.2051869452003332, 0.10749349867353084, -3.1876892257116562, 1.316240976262548, -1.3777452919511493, -1.0666211985935259, 1.605446695828751, -0.39682821266996865, -0.2828059717857655, 1.30488698803017, -2.116606225467923, -0.2026680301462151, -0.05504008273574069, -0.028520163428411835, 0.4424105678123449, -0.3427628263418371, 0.23805293411919937, -0.7515414823259695, -0.1272505897548366, 1.803348436304099, -2.0178252709022124, 0.4860300090112474, 1.2304054166426217, 0.7228668982068365, 1.7400607500575112, 0.3480274098246697, -0.3887978895385282, -1.6511926233909175, 0.14517929503564567, -1.1599010576123796, -0.016133552438119002, 0.47157644883706273, 0.27657785075518254, 1.4464286976282463, -1.2605489185634533, -1.2548765025615338, 0.0755319579826929, 1.0476733637516833, -0.7038690219524807, -0.9580696842862921, -0.18135657098008018, -0.18163993379314564, 0.4092798531146971, -2.049808182546896, -1.2447062617916826, -1.6681140306283337, 1.0709944517933483, -0.7059385234342846, -0.8033587669003331, -1.8152275905903312, 0.11729996097670137, 2.2994900038012376, -0.1291192451734159, -0.6731565869164164, -0.06690994571366346, -0.40330072968473235, -0.23927186025094221, 2.7756216937096676, 0.06441299443146056, -0.5095247173507204, -0.5228853558871007, 0.806629654091097, -2.110096084114651, -0.1233374136509439, -1.021178519845751, 0.058906278340351045, -0.26316852406211017, -1.2990807244026237, -0.1937986598084067, 0.3909222793445317, 0.578027315076297, -0.11837271520846208, -1.134297652720464, 0.496915417153268, -0.5315184110418045, 0.5284176849952198, -1.6810338988102331, 0.41220454054009154, 1.0554031136792, -1.4222775023918832, -1.1664353586956209, 0.018952180522661358, -0.04620616876577671, -0.8446292647938418, -0.6889432180332509, -0.16012081070647954, 0.5680940644754282, -1.9792941921407943, 0.35441842206114726, 0.12433268557499534, 0.25366905921805377, 0.6262297786892028, 1.327981424671081, 1.774834324890265, -0.9725604763128438, 0.42824027889428, 0.19725541390327114, 1.4640606982992412, 1.6484993842838995, 0.009848260786412894, -2.318740403198263, -0.4125245127403577, -0.15500831770388285, 1.010740123094443, 0.7509498708766653, -0.021415407776108144, 0.6466776546788641, -1.421096837521404, 0.5632248951325018, -1.230539161899903, -0.26766333435961503, -1.7208241092827994, -1.068122926814994, -1.6339248620455546, 0.07225436117508208, -1.2018233250224348, -0.07213000691963527, -1.0080992229563746, -1.151378048476321, -0.2660104149809121, 1.6307779136408695, 0.8394822016824073, -0.23362802143120032, -0.36799502320054384, 0.35359852278856263, 0.5830948999779656, -0.730683771776052, 1.4715728371820667, -1.0668090648998136, -1.025762014881618, 0.21056106958224155, -0.5141254207774576, -0.1592942838690149, 0.7688711617969363, -2.464535892598544, -0.33306989349452987, 0.9457207224940593, 0.36108072442574435, -0.6490066877470516, -0.8714147266896871, 0.6567118414749348, -0.18543305444915045, 0.11156511615955596, 0.7299392157186994, -0.9902398239693843, -1.3231344439063761, -1.1402773433114928, 0.3696183719476138, -1.0512718152423168, -0.6093518314203102, 0.0010622538704462257, -0.17676306948277776, -0.6291120128576891, 1.6390197341434742, -0.8105788162716191, -2.0105672384392204, -0.7909143328024505, -0.10510684692203587, -0.013384480496840259, 0.37683659744804815, -0.15123337965442354, 1.8427651248902048, 1.0371006855495906, 0.29198928612503655, -1.7455852392709181, 1.0854545339796853, 1.8156620972829793, 1.2399563224061596, 1.1196530775769857, 0.4349954478175989, 0.11093680938321168, 0.9945934589378227, -0.5779739742428905, 1.0398502505219054, -0.09401160691650227, 0.22793239636661505, -1.8664992140331715, -0.16104499274010126, -0.8497511318264537, -0.005035074822415585, -1.7956896952184151, 1.8304783101189757, 0.19094408763231646, 1.3353023874309002, 0.5889134606052353, -0.48487660139277866, 0.4817014755127622, 1.5981632863770983, 2.1416849775567943, -0.5524061711669017, 0.3364804821524787, -0.8609687548167294, 0.24548635047971906, -0.1281468603588133, -0.03871410517044196, -0.2678174852638268, 0.41800607312114096, -0.2503930647517959, 0.8432391494945226, -0.5684563173706987, -0.6737077809046504, 2.0559579098493606, -0.29098826888414253, -0.08572747304559661, -0.301857666880195, -0.3446199959065524, 0.7391340848217359, -0.3087136212446006, 0.5245553707204758, -3.063281336805349, 0.47471623010413705, 0.3733427291759615, -0.26216851429591426, -0.5433523111756248, 0.3305385199964823, -1.4866150542941634, -0.4699911958560942, 0.7312367186673805, -0.22346998944216903, -0.4102860865811592, -0.3003478250288424, -0.3436168605845268, 0.9456524589400904, -0.03710285453384255, 0.10330609878001526, 0.6919858329179392, 0.8673477607085118, 0.380742577915601, 0.5785785515837437, -0.011421905830097267, 0.587187810965595, -1.172536467775141, -0.532086162097372, -0.34440413367820183, -1.404900386188497, -0.1916375229779241, 1.6910999461291834, -0.6070351182769795, -0.8371447893868493, 0.8853944070432224, 1.4062946075925473, -0.4575973141608374, 1.1458755768004445, 0.2619874618238163, 1.7105876844856704, -1.3938976454537522, -0.11403217166441704, -1.0354305240085717, -0.4285770475062154, 0.10326635421187867, 0.6911853442971228, 0.6293835213179542, -0.819693698713199, -0.7378190403744175, -1.495947672573938, -1.2406693914431872, -1.0486341638186725, -1.3715759883075953, 3.585407817418151, -0.8007079372574223, -1.527336776754733, -0.4716571043072485, -0.6967311271405545, 1.0003347462169225, -0.30569565002022697, 0.3646134876772732, 0.49083033603832493, 0.07754580794955847, -0.13467337850920083, 0.02134473458605164, 0.5025183900540823, -0.940929087894874, 1.441600637127558, -0.0857298131221344, -0.575175243519591, 0.42622029657630595, -0.3239674701415489, 0.22648849821602596, -0.6636465305318631, 0.30415000329164754, -0.6170241274574016, 0.07578674772163065, 0.2952841441615124, 0.8120317689468056, -0.46861353019671337, 0.04718559572470416, -0.3105660017232523, -0.28898463203535724, 0.9575298065734561, -0.1977556031830993, 0.009658232624257272, 1.1432743259603295, -1.8989396918936858, 0.20787070770386357, 1.4256750543782999, -0.03838329973778874, -0.9051229357470373, -1.2002277085489457, 2.405569956130733, 1.895817948326675, -0.8260858325924574, 0.5759061866255807, 2.7022875569683342, 1.0591327405967745, 0.21449833798124354, 0.19970388388081273, 0.018242139911433558, -0.630960146999549, -2.389646042147776, 0.5424304992480339, -1.2159551561948718, -1.6851632640204128, -0.4812221268109694, 0.6217652794219579, -0.380139431677482, -0.2643524783321051, 0.5106648694993016, -0.895602157034141, -0.20559568725141816, 1.5449271875734911, 1.544075783565114, 0.17877619857826843, 1.9729717339967108, 0.8302033109816261, -0.39118561199170965, -0.4428357598297098, -0.02550407946753186, -1.0202977138210447, 2.6604654314300835, 1.9163029269361842, 0.34697436596877657, -0.8078124769022497, -1.3876596649099957, 0.44707250163663864, -0.6752837232272447, -0.851291770954755, 0.7599767868730256, 0.8134109401706875, -1.6766750539980289, -0.06051832829232975, -0.4652931327216134, -0.9249124398287735, 1.9022739762222731, 1.7632300613807597, 1.675335012283785, 0.47529854476887495, -0.7892463423254658, 0.3910120652706098, 0.5812432547936405, 0.2693084649672777, -0.08138564925779349, 0.9150619269526952, -0.8637356349272142, -0.14137853834901817, -0.20192754829896423, 0.04718228147088756, -0.9743600144318, -0.9936290943927825, 0.3544612180477054, 0.6839546770735121, 1.5089070357620178, 1.301167565172228, -1.5396145667672985, 0.42854366341485456, -1.5876582617301032, -0.0316985879141714, 0.3144220016570915, -0.05054766725644431, 0.2934139006870167, 0.11396170275994542, -0.6472140129693643, 1.6556030742445431, 1.0319410208453506, 0.3292217603989991, -0.058758121958605435, -0.19917171648476298, -0.5192866115874029, 0.1997510689920335, -1.3675686656161756, -1.7761517497832053, -0.11260276070167097, 0.9717892642758689, 0.0840815981843948, -0.40211265381258554, 0.27384496844034517, -1.0403875081272367, 1.2884781173493884, -1.8066239592554476, 1.1136979156298865, -0.06223155785690416, 1.3930381289015936, 0.4586305673655182, 1.3159249757827194, -0.5369892835955705, 0.17827408233621184, 0.22693934439969682, 0.8216240002114816, -1.0422409752281838, 0.3329686606709231, -1.5128804353968217, 1.0323052869815534, 1.1640486934424354, 1.6450118078345612, -0.6717687395070293, -0.08135119186406627, 1.2746921873544188, -0.8255794145095643, 0.7123504776564864, 0.6953336934741682, 2.191382322698439, 1.4155790749261592, 2.4681081786912866, -2.2904357033803815, -0.8375155191566624, 1.1040106662196736, 0.7084133268872015, -3.401968681942055, 0.23237090512844757, 1.1199436238058174, 0.6333916486592628, -0.6012340913121055, -0.3693951838866523, -1.7742670566875682, -0.36431378282545124, -0.4042586409194551, -0.04648644034604476, 1.5138191613743486, -0.2053670782251071, 1.8679122383251414, 0.8355881018692999, -0.5369705129279005, -0.7909355080370954, 2.1080036780007987, 0.019537331188020687, -1.4672982688640615, -1.486842866467901, -1.1036839537574874, 1.0800858540685894, -0.2313974176207594, 0.47763272078271807, -1.9196070490691473, -0.8193535127855751, -0.6853651905832031, -0.18272370464882973, -0.33413577684633056, 2.2261342671906106, 1.6853726343573683, 0.8563421109235769, 1.0468799885096596, 0.12189082561416206, -1.3596466927672854, -0.7607432068282968, 0.7061728288620306, -0.4384478018639071, 0.8620104661898899, 1.04258758121448, -1.1464159128515612, 0.9617945424413628, 0.04987102831355013, -0.8472878887606543, 0.32986774370339184, 1.278319839581162, -0.4040926804592034, -0.6691567800662129, 0.9415431940597389, 0.3974846022291844, -0.8425204662387112, -1.506166868030291, -0.04248497940038203, 0.26434168799067986, -1.5698380163561454, -0.6651727917714935, 1.2400220571204048, -0.1251830593977037, 0.6156254221302833, 0.43585628657139575, -1.6014619037611209, 1.9152323656075512, -0.8847911114213622, 1.359854519784993, -0.5554989575409871, 0.25064804193232354, 0.7976616257678464, 0.37834567410982123, -0.6300374359617635, -1.0613465068052854, -0.866474302027355, 1.2458556977164312, 0.577814049080149, 2.069400463823993, 0.9068690176961165, -0.5031387968484738, -0.3640749863516844, -1.041502465417534, 0.6732994659644133, -0.006355018868252906, -0.3650517541386253, 1.0975063446734974, -2.203726812834859, 1.060685913143899, -0.4618706570892267, 0.06475263817517128, -0.19326357638969882, -0.01812119454736379, 0.1337618009668529, 1.1838276997792907, 0.4273677345455913, -0.4912341608307858, 0.2349993979417651, 0.9566260826411601, -0.7948243131958422, -0.6168334352331588, 0.3369425926447926, 0.8547756445246633, 0.2666330662219728, 2.431868771129661, 1.0089732701876513, -0.1162341515974066, -1.1746306816795218, -0.08227639025627424, 0.794676385688044, 0.15005011094018297, -0.8763821573601055, -1.0811684990769739, 0.6311588092267179, 0.026124278982220386, 0.8306502001533514, 1.0856487813261877, -0.018702855899823106, -0.07338137135247896, -0.8435746484744243, -0.18091216366556986, 0.2295807891528797, -1.0689295774443397, -1.5621175533013612, 1.3314045672598216, 0.6211561903553582, 1.0479302317100871, -1.1509436982013124, 0.447985084931758, 0.19917261474342404, 0.3582887259341301, 0.9953552868908098, 0.8948165434511316, 0.4949033431999123, -0.23004847985703908, 0.6411581535557106, -1.1589671573242186, -0.13691519182560624, -0.8849560872785238, 0.6629182075027006, 2.2608150731789696, 2.2823614453180294, -1.2291376923498247, -0.9267975556981378, 0.2597417839242135, -0.7667310491821938, 0.10503294084132372, 2.960320355577672, -1.0645098483081497, -1.2888339889815872, -0.6564570556444346, 0.4742489396354781, 0.8879606773334898, -0.6477585196839569, -0.7309497810668936, 1.7025953934976548, 0.1789174966941155, -0.4839093362740933, -0.8917713440107442, 1.4521776747175792, -0.1676974219641624, -0.500672037099228, -0.2947747621553442, 0.929636971325952, -0.7614935150071248, 1.6886298813725842, -0.8136217834373227, 1.2030997228178093, 1.382267485738376, 2.594387458306705, -0.7703668776292266, -0.7642584795112598, 1.3356598324609947, -0.5745269784148925, -2.212092904499444, -1.727975556661197, -0.18543087256023608, -0.10167435635752538, 1.3480966068787303, 0.0142803272337873, -0.480077631815393, -0.32270216749876185, -1.7884435311074431, -0.5695640948971382, -0.22859087912027687, -0.08783386938029487, -0.18151955278624396, 0.2031493507095467, 0.06444304447669409, -0.4339138073294572, 0.236563959074551, -0.2937958719187449, 0.1611232843821199, -0.6574871644742827, 1.3141902865107886, 0.6093649138398077, 0.056674985715912514, -1.828714441504608, -0.46768482587669535, 0.6489735384886999, 0.5035677725398181, -0.887590772676158, -0.3222316759913631, -0.35172770495027483, -0.4329205472963193, -0.8449916868048998, 0.38282765028957993, 1.3171924061732359, 0.2956667124648384, 0.5390909497681301, -0.7591989862253667, -1.1520792974885883, -0.39344757869384944, 0.6192677330177175, -0.05578834574542242, 0.593015990282657, 0.9374465229256678, 0.647772562443425, 1.1071167572595217, -1.3015016617832518, 1.267300472456379, -0.5807673178649629, 0.9343468385348384, -0.28554893036513673, 0.4487573993840033, 0.6749018890520516, -1.20482985206765, 0.17291806504654686, -0.4124576407610529, -0.9203236505429044, -0.7461342369802754, -0.19694162321688435, 0.46556512963300906, 0.5198366004764268, -1.7222561645076129, -0.7078891617994071, -1.1653209054214695, 1.5560964971092122, 0.3335520152642012, 0.008390825910327906, 0.11336719644324977, 0.3158913817073965, 0.4704483453862008, -0.5700583482495889, -1.276634964816531, -1.7880560933777756, -0.26514994709973827, 0.6194447367446946, -0.654762456435761, 1.0621929196158544, 0.4454719444987052, -0.9323145612076791, 1.3197357985874438, -0.8792938558447049, -0.2470423905508279, 0.5128954444799875, -0.09202044992462606, -1.3082892596744382, -0.34428948138804927, 0.012422196356164879, 1.4626152292162142, 0.34678216997159833, 0.409462409138861, 0.32838364873801185, 1.8776849459782967, 1.6816627852133539, -0.24894138693568296, 0.7150105850753732, 0.22929306929129853, -0.21434910504054566, 1.3339497173912471, -1.2497042452057836, -0.04487255356399775, -0.6486304639082145, -0.8048044333264733, -1.8090170501469942, 1.481689285694336, -1.4772553200884717, -0.36792462539303805, -1.103508260812736, -0.2135236993720317, 0.40889179796540165, 1.993585196733386, 0.43879096427562897, -0.44512875171982147, -1.1780830020629518, -1.666001035275436, -0.2977294957665528, 1.7299614542270356, 0.9882265798853356, 2.2412430815464597, 0.5801434875813244, -0.739190619909163, -1.2663490594895201, 0.5735521649879137, 1.2105709455012765, 1.9112159951415644, -2.259218931706201, -0.563310876529377, -2.4119185903750493, 0.9662624485722368, -0.22788851242764951, 0.9198283887420099, 0.7855927065251492, -0.7459868094792474, 0.10543289218409971, 0.6401750224618271, -0.0077375118689326705, -0.11647036625911977, -0.4722391874001602, -0.2718425102733572, -0.8796746964457087, 0.6112903638894259, 0.5347851929096421, -0.4749419210717794, 1.0633720764557604, -0.2590556665572949, 2.590182301241823, 1.4524061372706638, -0.8503733047335056, 0.5609357391481067, -1.5661825434426477, 0.8019667474525984, 1.2716795425969496, 0.20011166646917924, -0.7105405282282679, -0.5593129072748189, -1.2401371010520867, -0.7002520937780202, -2.236596391787529, -1.8130090502823886, -0.23990633860801777, 1.7428780878151378, 1.4661206538178901, -0.8678567353744017, 0.2957423562639015, 0.13935419069962593, 1.399598845123674, 0.059729544605779575, -0.9607778026198247, 0.18474907798482051, 1.0117193651915666, -0.9173540069396245, 0.8934765521365161, -0.665655291396948, -0.32955768273493324, 0.3062873812209283, 0.177342106982554, 0.3595522704599547, -1.5964209653110262, 0.6705899137346863, -1.1034642863469553, -1.0029562484065524, 0.10622956543479244, 0.4261871936541378, 0.7777501694354336, -0.806235923997437, -0.8272801398172428, -1.2783440745845536, 0.5982979227669168, -0.28214494859284556, 1.101560367699546, -0.14008021262664466, -0.38717961692054237, 0.9962925044431369, -0.7391490127960976, -0.06294945881724459, 0.7283671247384875, -0.8458895297768138, 0.22808829204347086, 0.43685668023014523, 0.9204095286935638, -0.028241645704951284, 0.15951784765135396, 0.8068984900818966, -0.34387965576978663, 0.573828962760762, -0.13374515460012618, -0.5552788325377814, 0.5644705833909952, -0.7500532220469983, 0.33436674493862256, -0.8595435026628129, -0.38943898244735853, 0.6401502590131951, -1.2968645995363652, 0.5861622311675501, 0.2311759458689689, 0.10962292708600496, -0.26025023584932205, -0.5398478003611565, -1.0514168636922954, 1.2689172189127857, 1.7029909647408918, -0.02325431623491577, -0.3064675950620902, -1.5816446841009473, 0.6874254059433739, 0.7755967316475798, 1.4119333324396597, 0.14198739135512406, 0.2927714469848192, -0.7239793888399496, 0.3506448783535265, -0.7568480706640158, -1.2158508387501554, 0.22197589131086445, -0.5621415304506887, -1.2381112050191665, -1.917208333033256, -0.3321665793941188, -0.5916951886991071, -1.244826507645294, -0.29767661008214463, 0.8590635852032509, -1.8579290298421591, -1.0470546224962876, -2.540080936704841, 0.5458326769958273, 0.042222128206941614, 0.6080450228346708, 0.6542717901662132, -1.7292955132690793, -0.4793123354077725, 0.7341767020417185, -1.3322222208234826, -0.5076389542432337, 0.684399163420284, 0.3948487980667425, -1.7919279627150193, 1.582925890933478, 0.8341846456063038, 0.11776890377042544, 1.7471239793853526, 1.2269451783893597, 0.4235463733287474, 1.5908284320029056, -1.635191535538596, 0.04419903330064594, -1.264385360373252, 0.5370192519783876, 1.2368603501240771, -0.9241079150337286, -0.3428051342915208, 0.0882286441353256, -2.210824604513402, -1.9000343283757128, 0.4633735273417207, -0.32534396967175094, 0.026187836765356437, 0.18253601230609245, 0.8519745761039671, -0.028225375482784816, -0.5114197447067229, -1.2428743809444227, 0.2879711400745508, 1.2857130031108321, 0.5296743558975853, -0.8440551904275335, -1.3776032491368861, 1.8164028526343798, -1.1422045767986222, -1.8675179752970443, 0.6969635320800454, 0.9444010906414336, -1.28197913481747, -0.06259132322304235, -0.4518754825442558, 0.9183188639099813, -0.2916931407869574, -1.1464007469977915, -0.4475136941593681, 0.44385573868752803, 2.1606711638680762, -1.4813603018181851, -0.5647618024870872, -1.474746204557383, -2.9067748098220485, 0.06132111635940877, -0.09663310829361334, -1.087053744976143, -1.774855117659402, 0.8130120568830074, -0.5179279676199186, -0.32549430825787784, -1.1995838271705979, 0.8587480835176114, -0.02095126282663596, 0.6677898019388228, -1.1891003375304232, -2.1125937754631305, -0.047765192715672734, 0.09812525010300294, -1.034992359189106, 1.0213451864081846, 1.0788796513160641, -1.444469239557739, 0.28341828947950637, -2.4556013891966737, 1.7126080715698266, -0.5943068899412715, 1.0897594994215383, -0.16345461884651272, 0.7027032523865234, 2.2851158088542562, 0.5038100496225458, -0.16724173993999966, -0.6747457076421414, 0.42254684460738184, 1.277203836895222, -0.34438446183574595, 0.38956738377878264, -0.26884968654334923, -0.02148772950361766, 0.02044885235644607, -1.3873669828232345, 0.19995968746809226, -1.5826859815811556, -0.20385119370067947, 0.5724329589281247, -1.330307658319185, 0.7756101314358208, -0.4989071461473931, 0.5388161769427321, -0.9811085284266614, 2.335331094403556, -0.5588657325211347, -1.2850853695283377, 0.40092993245913744, -1.9675685522110529, 0.9378938542456674, -0.18645815013912917, -0.6828273180353106, -1.840122530632185, -1.2581798109361761, 0.2867275394896832],\n }\n return data\n\n\[email protected]\ndef numeric_high_card_dataset(test_backend, numeric_high_card_dict):\n schemas = {\n \"pandas\": {\n \"norm_0_1\": \"float64\",\n },\n \"postgresql\": {\n # \"norm_0_1\": \"DOUBLE_PRECISION\",\n \"norm_0_1\": \"NUMERIC\",\n },\n \"sqlite\": {\n \"norm_0_1\": \"FLOAT\",\n },\n \"mysql\": {\n \"norm_0_1\": \"FLOAT\",\n },\n \"spark\": {\n \"norm_0_1\": \"FloatType\",\n }\n }\n return get_dataset(test_backend, numeric_high_card_dict, schemas=schemas)\n\n\[email protected]\ndef datetime_dataset(test_backend):\n data = {\n \"datetime\": [\n str(datetime.datetime(2020, 2, 4, 22, 12, 5, 943152)),\n str(datetime.datetime(2020, 2, 5, 22, 12, 5, 943152)),\n str(datetime.datetime(2020, 2, 6, 22, 12, 5, 943152)),\n str(datetime.datetime(2020, 2, 7, 22, 12, 5, 943152)),\n str(datetime.datetime(2020, 2, 8, 22, 12, 5, 943152)),\n str(datetime.datetime(2020, 2, 9, 22, 12, 5, 943152)),\n str(datetime.datetime(2020, 2, 10, 22, 12, 5, 943152)),\n str(datetime.datetime(2020, 2, 11, 22, 12, 5, 943152)),\n str(datetime.datetime(2020, 2, 12, 22, 12, 5, 943152)),\n str(datetime.datetime(2020, 2, 13, 22, 12, 5, 943152)),\n ]\n }\n\n schemas = {\n \"pandas\": {\n \"datetime\": \"datetime64\",\n },\n \"postgresql\": {\n \"datetime\": \"TIMESTAMP\",\n },\n \"sqlite\": {\n \"datetime\": \"TIMESTAMP\",\n },\n \"mysql\": {\n \"datetime\": \"TIMESTAMP\",\n },\n \"spark\": {\n \"datetime\": \"TimestampType\",\n }\n }\n return get_dataset(test_backend, data, schemas=schemas)\n\n\[email protected]\ndef non_numeric_low_card_dataset(test_backend):\n \"\"\"Provide dataset fixtures that have special values and/or are otherwise useful outside\n the standard json testing framework\"\"\"\n\n data = {\n \"lowcardnonnum\": [\n \"a\", \"b\", \"b\", \"b\", \"b\", \"b\", \"b\",\n \"b\", \"b\", \"b\", \"b\", \"b\", \"b\", \"b\",\n \"b\", \"b\", \"b\", \"b\", \"b\", \"b\", \"b\",\n \"b\", \"b\", \"b\", \"b\", \"b\", \"b\", \"b\",\n \"b\", \"b\", \"b\", \"b\", \"b\", \"b\", \"b\",\n \"b\", \"b\", \"b\", \"b\", \"b\", \"b\", \"b\",\n \"b\", \"b\", \"b\", \"b\", \"b\", \"b\", \"b\",\n \"b\", \"b\", \"b\", \"b\", \"b\", \"b\", \"b\",\n \"b\", \"b\", \"b\", \"b\", \"b\", \"b\", \"b\",\n \"b\", \"b\", \"b\", \"b\", \"b\", \"b\", \"b\",\n \"b\", \"b\", \"b\", \"b\", \"b\", \"b\", \"b\",\n \"b\", \"b\", \"b\", \"b\", \"b\", \"b\", \"b\",\n \"b\", \"b\", \"b\", \"b\", \"b\", \"b\", \"b\",\n \"b\", \"b\", \"b\", \"b\", \"b\", \"b\", \"b\",\n \"b\", \"b\", \"b\", \"b\", \"b\", \"b\", \"b\",\n ]\n }\n schemas = {\n \"pandas\": {\n \"lowcardnonnum\": \"str\",\n },\n \"postgresql\": {\n \"lowcardnonnum\": \"TEXT\",\n },\n \"sqlite\": {\n \"lowcardnonnum\": \"VARCHAR\",\n },\n \"mysql\": {\n \"lowcardnonnum\": \"TEXT\",\n },\n \"spark\": {\n \"lowcardnonnum\": \"StringType\",\n }\n }\n return get_dataset(test_backend, data, schemas=schemas)\n\n\[email protected]\ndef non_numeric_high_card_dataset(test_backend):\n \"\"\"Provide dataset fixtures that have special values and/or are otherwise useful outside\n the standard json testing framework\"\"\"\n\n data = {\n \"highcardnonnum\": [\n \"CZVYSnQhHhoti8mQ66XbDuIjE5FMeIHb\", \"cPWAg2MJjh8fkRRU1B9aD8vWq3P8KTxJ\", \"4tehKwWiCDpuOmTPRYYqTqM7TvEa8Zi7\",\n \"ZvlAnCGiGfkKgQoNrhnnyrjmU7sLsUZz\", \"AaqMhdYukVdexTk6LlWvzXYXTp5upPuf\", \"ZSKmXUB35K14khHGyjYtuCHuI8yeM7yR\",\n \"F1cwKp4HsCN2s2kXQGR5RUa3WAcibCq2\", \"coaX8bSHoVZ8FP8SuQ57SFbrvpRHcibq\", \"3IzmbSJF525qtn7O4AvfKONnz7eFgnyU\",\n \"gLCtw7435gaR532PNFVCtvk14lNJpZXv\", \"hNyjMYZkVlOKRjhg8cKymU5Bvnh0MK5R\", \"IqKC2auGTNehP8y24HzDQOdt9oysgFyx\",\n \"TePy034aBKlNeAmcJmKJ4p1yF7EUYEOg\", \"cIfDv6ieTAobe84P84InzDKrJrccmqbq\", \"m1979gfI6lVF9ijJA245bchYFd1EaMap\",\n \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"7wcR161jyKYhFLEZkhFqSXLwXW46I5x8\", \"IpmNsUFgbbVnL0ljJZOBHnTV0FKARwSn\",\n \"hsA4btHJg6Gq1jwOuOc3pl2UPB5QUwZg\", \"vwZyG0jGUys3HQdUiOocIbzhUdUugwKX\", \"rTc9h94WjOXN5Wg40DyatFEFfp9mgWj6\",\n \"p1f20s14ZJGUTIBUNeBmJEkWKlwoyqjA\", \"VzgAIYNKHA0APN0oZtzMAfmbCzJenswy\", \"IO7BqR3iS136YMlLCEo6W3jKNOVJIlLG\",\n \"eTEyhiRuyEcTnHThi1W6yi1mxUjq8TEp\", \"4OHPKQgk3sPPYpKWcEWUtNZ0jv00UuPU\", \"ZJCstyyUvTR2gwSM6FLgkXYDwG54qo8u\",\n \"nGQsvDAzuL5Yc2XpqoG5P7RhpiTpJp8H\", \"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\", \"CP22IFHDX1maoSjTEdtBfrMHWQKACGDB\",\n \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hGwZQW7ao9HqNV2xAovuMBdyafNDE8q6\", \"OJmDHbqP1wzarsaSwCphsqvdy5SnTQMT\",\n \"JQbXIcgwUhttfPIGB7VGGfL2KiElabrO\", \"eTTNDggfPpRC22SEVNo9W0BPEWO4Cr57\", \"GW2JuUJmuCebia7RUiCNl2BTjukIzZWj\",\n \"oVFAvQEKmRTLBqdCuPoJNvzPvQ7UArWC\", \"zeMHFFKLr5j4DIFxRQ7jHWCMClrP3LmJ\", \"eECcArV5TZRftL6ZWaUDO6D2l3HiZj1Y\",\n \"xLNJXaCkOLrD6E0kgGaFOFwctNXjrd77\", \"1f8KOCkOvehXYvN8PKv1Ch6dzOjRAr01\", \"uVF6HJgjVmoipK1sEpVOFJYuv2TXXsOG\",\n \"agIk8H2nFa0K27IFr0VM2RNp6saihYI3\", \"cAUnysbb8SBLSTr0H7cA1fmnpaL80e0N\", \"fM1IzD5USx4lMYi6bqPCEZjd2aP7G9vv\",\n \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"i65d8jqET5FsVw9t5BwAvBjkEJI6eUMj\", \"HbT1b7DQL7n7ZEt2FsKHIggycT1XIYd8\",\n \"938eC0iGMSqZNlqgDNG9YVE7t4izO2Ev\", \"PyZetp4izgE4ymPcUXyImF5mm7I6zbta\", \"FaXA6YSUrvSnW7quAimLqQMNrU1Dxyjs\",\n \"PisVMvI9RsqQw21B7qYcKkRo5c8C2AKd\", \"eSQIxFqyYVf55UMzMEZrotPO74i3Sh03\", \"2b74DhJ6YFHrAkrjK4tvvKkYUKll44bR\",\n \"3svDRnrELyAsC69Phpnl2Os89856tFBJ\", \"ZcSGN9YYNHnHjUp0SktWoZI7JDmvRTTN\", \"m9eDkZ5oZEOFP3HUfaZEirecv2UhQ1B1\",\n \"wZTwJmMX5Q58DhDdmScdigTSyUUC04sO\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"KAuFgcmRKQPIIqGMAQQPfjyC1VXt40vs\",\n \"0S4iueoqKNjvS55O57BdY3DbfwhIDwKc\", \"ywbQfOLkvXEUzZISZp1cpwCenrrNPjfF\", \"Mayxk8JkV3Z6aROtnsKyqwVK5exiJa8i\",\n \"pXqIRP5fQzbDtj1xFqgJey6dyFOJ1YiU\", \"6Ba6RSM56x4MIaJ2wChQ3trBVOw1SWGM\", \"puqzOpRJyNVAwH2vLjVCL3uuggxO5aoB\",\n \"jOI4E43wA3lYBWbV0nMxqix885Tye1Pf\", \"YgTTYpRDrxU1dMKZeVHYzYNovH2mWGB7\", \"24yYfUg1ATvfI1PW79ytsEqHWJHI69wQ\",\n \"mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"ItvI4l02oAIZEd5cPtDf4OnyBazji0PL\",\n \"DW4oLNP49MNNENFoFf7jDTI04xdvCiWg\", \"vrOZrkAS9MCGOqzhCv4cmr5AGddVBShU\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"R74JT4EEhh3Xeu5tbx8bZFkXZRhx6HUn\", \"bd9yxS6b1QrKXuT4irY4kpjSyLmKZmx6\", \"UMdFQNSiJZtLK3jxBETZrINDKcRqRd0c\",\n \"He7xIY2BMNZ7vSO47KfKoYskVJeeedI7\", \"G8PqO0ADoKfDPsMT1K0uOrYf1AtwlTSR\", \"hqfmEBNCA7qgntcQVqB7beBt0hB7eaxF\",\n \"mlYdlfei13P6JrT7ZbSZdsudhE24aPYr\", \"gUTUoH9LycaItbwLZkK9qf0xbRDgOMN4\", \"xw3AuIPyHYq59Qbo5QkQnECSqd2UCvLo\",\n \"kbfzRyRqGZ9WvmTdYKDjyds6EK4fYCyx\", \"7AOZ3o2egl6aU1zOrS8CVwXYZMI8NTPg\", \"Wkh43H7t95kRb9oOMjTSqC7163SrI4rU\",\n \"x586wCHsLsOaXl3F9cYeaROwdFc2pbU1\", \"oOd7GdoPn4qqfAeFj2Z3ddyFdmkuPznh\", \"suns0vGgaMzasYpwDEEof2Ktovy0o4os\",\n \"of6W1csCTCBMBXli4a6cEmGZ9EFIOFRC\", \"mmTiWVje9SotwPgmRxrGrNeI9DssAaCj\", \"pIX0vhOzql5c6Z6NpLbzc8MvYiONyT54\",\n \"nvyCo3MkIK4tS6rkuL4Yw1RgGKwhm4c2\", \"prQGAOvQbB8fQIrp8xaLXmGwcxDcCnqt\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\",\n \"mty9rQJBeTsBQ7ra8vWRbBaWulzhWRSG\", \"JL38Vw7yERPC4gBplBaixlbpDg8V7gC6\", \"MylTvGl5L1tzosEcgGCQPjIRN6bCUwtI\",\n \"hmr0LNyYObqe5sURs408IhRb50Lnek5K\", \"CZVYSnQhHhoti8mQ66XbDuIjE5FMeIHb\", \"cPWAg2MJjh8fkRRU1B9aD8vWq3P8KTxJ\",\n \"4tehKwWiCDpuOmTPRYYqTqM7TvEa8Zi7\", \"ZvlAnCGiGfkKgQoNrhnnyrjmU7sLsUZz\", \"AaqMhdYukVdexTk6LlWvzXYXTp5upPuf\",\n \"ZSKmXUB35K14khHGyjYtuCHuI8yeM7yR\", \"F1cwKp4HsCN2s2kXQGR5RUa3WAcibCq2\", \"coaX8bSHoVZ8FP8SuQ57SFbrvpRHcibq\",\n \"3IzmbSJF525qtn7O4AvfKONnz7eFgnyU\", \"gLCtw7435gaR532PNFVCtvk14lNJpZXv\", \"hNyjMYZkVlOKRjhg8cKymU5Bvnh0MK5R\",\n \"IqKC2auGTNehP8y24HzDQOdt9oysgFyx\", \"TePy034aBKlNeAmcJmKJ4p1yF7EUYEOg\", \"cIfDv6ieTAobe84P84InzDKrJrccmqbq\",\n \"m1979gfI6lVF9ijJA245bchYFd1EaMap\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"7wcR161jyKYhFLEZkhFqSXLwXW46I5x8\",\n \"IpmNsUFgbbVnL0ljJZOBHnTV0FKARwSn\", \"hsA4btHJg6Gq1jwOuOc3pl2UPB5QUwZg\", \"vwZyG0jGUys3HQdUiOocIbzhUdUugwKX\",\n \"rTc9h94WjOXN5Wg40DyatFEFfp9mgWj6\", \"p1f20s14ZJGUTIBUNeBmJEkWKlwoyqjA\", \"VzgAIYNKHA0APN0oZtzMAfmbCzJenswy\",\n \"IO7BqR3iS136YMlLCEo6W3jKNOVJIlLG\", \"eTEyhiRuyEcTnHThi1W6yi1mxUjq8TEp\", \"4OHPKQgk3sPPYpKWcEWUtNZ0jv00UuPU\",\n \"ZJCstyyUvTR2gwSM6FLgkXYDwG54qo8u\", \"nGQsvDAzuL5Yc2XpqoG5P7RhpiTpJp8H\", \"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\",\n \"CP22IFHDX1maoSjTEdtBfrMHWQKACGDB\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hGwZQW7ao9HqNV2xAovuMBdyafNDE8q6\",\n \"OJmDHbqP1wzarsaSwCphsqvdy5SnTQMT\", \"JQbXIcgwUhttfPIGB7VGGfL2KiElabrO\", \"eTTNDggfPpRC22SEVNo9W0BPEWO4Cr57\",\n \"GW2JuUJmuCebia7RUiCNl2BTjukIzZWj\", \"oVFAvQEKmRTLBqdCuPoJNvzPvQ7UArWC\", \"zeMHFFKLr5j4DIFxRQ7jHWCMClrP3LmJ\",\n \"eECcArV5TZRftL6ZWaUDO6D2l3HiZj1Y\", \"xLNJXaCkOLrD6E0kgGaFOFwctNXjrd77\", \"1f8KOCkOvehXYvN8PKv1Ch6dzOjRAr01\",\n \"uVF6HJgjVmoipK1sEpVOFJYuv2TXXsOG\", \"agIk8H2nFa0K27IFr0VM2RNp6saihYI3\", \"cAUnysbb8SBLSTr0H7cA1fmnpaL80e0N\",\n \"fM1IzD5USx4lMYi6bqPCEZjd2aP7G9vv\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"i65d8jqET5FsVw9t5BwAvBjkEJI6eUMj\",\n \"HbT1b7DQL7n7ZEt2FsKHIggycT1XIYd8\", \"938eC0iGMSqZNlqgDNG9YVE7t4izO2Ev\", \"PyZetp4izgE4ymPcUXyImF5mm7I6zbta\",\n \"FaXA6YSUrvSnW7quAimLqQMNrU1Dxyjs\", \"PisVMvI9RsqQw21B7qYcKkRo5c8C2AKd\", \"eSQIxFqyYVf55UMzMEZrotPO74i3Sh03\",\n \"2b74DhJ6YFHrAkrjK4tvvKkYUKll44bR\", \"3svDRnrELyAsC69Phpnl2Os89856tFBJ\", \"ZcSGN9YYNHnHjUp0SktWoZI7JDmvRTTN\",\n \"m9eDkZ5oZEOFP3HUfaZEirecv2UhQ1B1\", \"wZTwJmMX5Q58DhDdmScdigTSyUUC04sO\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\",\n \"KAuFgcmRKQPIIqGMAQQPfjyC1VXt40vs\", \"0S4iueoqKNjvS55O57BdY3DbfwhIDwKc\", \"ywbQfOLkvXEUzZISZp1cpwCenrrNPjfF\",\n \"Mayxk8JkV3Z6aROtnsKyqwVK5exiJa8i\", \"pXqIRP5fQzbDtj1xFqgJey6dyFOJ1YiU\", \"6Ba6RSM56x4MIaJ2wChQ3trBVOw1SWGM\",\n \"puqzOpRJyNVAwH2vLjVCL3uuggxO5aoB\", \"jOI4E43wA3lYBWbV0nMxqix885Tye1Pf\", \"YgTTYpRDrxU1dMKZeVHYzYNovH2mWGB7\",\n \"24yYfUg1ATvfI1PW79ytsEqHWJHI69wQ\", \"mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\",\n \"ItvI4l02oAIZEd5cPtDf4OnyBazji0PL\", \"DW4oLNP49MNNENFoFf7jDTI04xdvCiWg\", \"vrOZrkAS9MCGOqzhCv4cmr5AGddVBShU\",\n \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"R74JT4EEhh3Xeu5tbx8bZFkXZRhx6HUn\", \"bd9yxS6b1QrKXuT4irY4kpjSyLmKZmx6\",\n \"UMdFQNSiJZtLK3jxBETZrINDKcRqRd0c\", \"He7xIY2BMNZ7vSO47KfKoYskVJeeedI7\", \"G8PqO0ADoKfDPsMT1K0uOrYf1AtwlTSR\",\n \"hqfmEBNCA7qgntcQVqB7beBt0hB7eaxF\", \"mlYdlfei13P6JrT7ZbSZdsudhE24aPYr\", \"gUTUoH9LycaItbwLZkK9qf0xbRDgOMN4\",\n \"xw3AuIPyHYq59Qbo5QkQnECSqd2UCvLo\", \"kbfzRyRqGZ9WvmTdYKDjyds6EK4fYCyx\", \"7AOZ3o2egl6aU1zOrS8CVwXYZMI8NTPg\",\n \"Wkh43H7t95kRb9oOMjTSqC7163SrI4rU\", \"x586wCHsLsOaXl3F9cYeaROwdFc2pbU1\", \"oOd7GdoPn4qqfAeFj2Z3ddyFdmkuPznh\",\n \"suns0vGgaMzasYpwDEEof2Ktovy0o4os\", \"of6W1csCTCBMBXli4a6cEmGZ9EFIOFRC\", \"mmTiWVje9SotwPgmRxrGrNeI9DssAaCj\",\n \"pIX0vhOzql5c6Z6NpLbzc8MvYiONyT54\", \"nvyCo3MkIK4tS6rkuL4Yw1RgGKwhm4c2\", \"prQGAOvQbB8fQIrp8xaLXmGwcxDcCnqt\",\n \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"mty9rQJBeTsBQ7ra8vWRbBaWulzhWRSG\", \"JL38Vw7yERPC4gBplBaixlbpDg8V7gC6\",\n \"MylTvGl5L1tzosEcgGCQPjIRN6bCUwtI\", \"hmr0LNyYObqe5sURs408IhRb50Lnek5K\"\n ],\n # Built from highcardnonnum using the following:\n # vals = pd.Series(data[\"highcardnonnum\"])\n # sample_vals = vals.sample(n=10, random_state=42)\n # weights = np.random.RandomState(42).rand(10)\n # weights = weights / np.sum(weights)\n # new_vals = sample_vals.sample(n=200, weights=weights, replace=True, random_state=11)\n \"medcardnonnum\": [\n 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', 'ajcLVizD2vwZlmmGKyXYki03SWn7fnt3', 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ',\n 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ', 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ',\n 'ajcLVizD2vwZlmmGKyXYki03SWn7fnt3', 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ', 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J',\n 'NhTsracusfp5V6zVeWqLZnychDl7jjO4', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP',\n 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', 'NhTsracusfp5V6zVeWqLZnychDl7jjO4', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP',\n 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'ajcLVizD2vwZlmmGKyXYki03SWn7fnt3', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP',\n '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', 'NhTsracusfp5V6zVeWqLZnychDl7jjO4',\n 'NhTsracusfp5V6zVeWqLZnychDl7jjO4', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer',\n 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk',\n 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'ajcLVizD2vwZlmmGKyXYki03SWn7fnt3', 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ',\n 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ', 'NhTsracusfp5V6zVeWqLZnychDl7jjO4', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk',\n 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J',\n 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP',\n 'NhTsracusfp5V6zVeWqLZnychDl7jjO4', 'ajcLVizD2vwZlmmGKyXYki03SWn7fnt3', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer',\n 'ajcLVizD2vwZlmmGKyXYki03SWn7fnt3', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'ajcLVizD2vwZlmmGKyXYki03SWn7fnt3',\n '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'NhTsracusfp5V6zVeWqLZnychDl7jjO4', 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J',\n 'NhTsracusfp5V6zVeWqLZnychDl7jjO4', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk',\n '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ',\n 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'ajcLVizD2vwZlmmGKyXYki03SWn7fnt3', 'ajcLVizD2vwZlmmGKyXYki03SWn7fnt3',\n 'NhTsracusfp5V6zVeWqLZnychDl7jjO4', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk',\n 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ',\n '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'NhTsracusfp5V6zVeWqLZnychDl7jjO4', 'NhTsracusfp5V6zVeWqLZnychDl7jjO4',\n 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'NhTsracusfp5V6zVeWqLZnychDl7jjO4', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk',\n 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk',\n 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk',\n '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP',\n 'ajcLVizD2vwZlmmGKyXYki03SWn7fnt3', 'NhTsracusfp5V6zVeWqLZnychDl7jjO4', 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ',\n 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ',\n '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', 'NhTsracusfp5V6zVeWqLZnychDl7jjO4',\n 'NhTsracusfp5V6zVeWqLZnychDl7jjO4', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J',\n 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP',\n 'NhTsracusfp5V6zVeWqLZnychDl7jjO4', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ',\n '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk',\n 'mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP',\n 'NhTsracusfp5V6zVeWqLZnychDl7jjO4', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ',\n 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP',\n '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J',\n 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk',\n 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'NhTsracusfp5V6zVeWqLZnychDl7jjO4', 'NfX4KfEompMbbKloFq8NQpdXtk5PjaPe',\n 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer',\n 'NfX4KfEompMbbKloFq8NQpdXtk5PjaPe', 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk',\n 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ', 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk',\n 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J',\n 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', 'NhTsracusfp5V6zVeWqLZnychDl7jjO4', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP',\n 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J',\n '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP',\n 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ', 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk',\n 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', 'NfX4KfEompMbbKloFq8NQpdXtk5PjaPe', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP',\n 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'NfX4KfEompMbbKloFq8NQpdXtk5PjaPe', 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ',\n 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J',\n 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer',\n 'ajcLVizD2vwZlmmGKyXYki03SWn7fnt3', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk',\n 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'ajcLVizD2vwZlmmGKyXYki03SWn7fnt3', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP',\n 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'ajcLVizD2vwZlmmGKyXYki03SWn7fnt3', 'NhTsracusfp5V6zVeWqLZnychDl7jjO4',\n 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', 'ajcLVizD2vwZlmmGKyXYki03SWn7fnt3',\n 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', 'NhTsracusfp5V6zVeWqLZnychDl7jjO4', 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ',\n 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ', 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer',\n 'ajcLVizD2vwZlmmGKyXYki03SWn7fnt3', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer',\n 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer',\n 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', 'ajcLVizD2vwZlmmGKyXYki03SWn7fnt3', 'NhTsracusfp5V6zVeWqLZnychDl7jjO4',\n 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk',\n 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', 'NhTsracusfp5V6zVeWqLZnychDl7jjO4', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP',\n 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer',\n 'NhTsracusfp5V6zVeWqLZnychDl7jjO4', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer',\n 'hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk', 'T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP', 'NhTsracusfp5V6zVeWqLZnychDl7jjO4',\n 'k8B9KCXhaQb6Q82zFbAzOESAtDxK174J', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer', '2K8njWnvuq1u6tkzreNhxTEyO8PTeWer',\n 'ajcLVizD2vwZlmmGKyXYki03SWn7fnt3', 'oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ'\n ]\n }\n schemas = {\n \"pandas\": {\n \"highcardnonnum\": \"str\",\n \"medcardnonnum\": \"str\",\n },\n \"postgresql\": {\n \"highcardnonnum\": \"TEXT\",\n \"medcardnonnum\": \"TEXT\",\n },\n \"sqlite\": {\n \"highcardnonnum\": \"VARCHAR\",\n \"medcardnonnum\": \"VARCHAR\",\n },\n \"mysql\": {\n \"highcardnonnum\": \"TEXT\",\n \"medcardnonnum\": \"TEXT\",\n },\n \"spark\": {\n \"highcardnonnum\": \"StringType\",\n \"medcardnonnum\": \"StringType\",\n }\n }\n return get_dataset(test_backend, data, schemas=schemas)\n\n\ndef dataset_sample_data(test_backend):\n # No infinities for mysql\n if test_backend == \"mysql\":\n data = {\n # \"infinities\": [-np.inf, -10, -np.pi, 0, np.pi, 10/2.2, np.inf],\n \"nulls\": [np.nan, None, 0, 1.1, 2.2, 3.3, None],\n \"naturals\": [1, 2, 3, 4, 5, 6, 7]\n }\n else:\n data = {\n \"infinities\": [-np.inf, -10, -np.pi, 0, np.pi, 10 / 2.2, np.inf],\n \"nulls\": [np.nan, None, 0, 1.1, 2.2, 3.3, None],\n \"naturals\": [1, 2, 3, 4, 5, 6, 7]\n }\n schemas = {\n \"pandas\": {\n \"infinities\": \"float64\",\n \"nulls\": \"float64\",\n \"naturals\": \"float64\"\n },\n \"postgresql\": {\n \"infinities\": \"DOUBLE_PRECISION\",\n \"nulls\": \"DOUBLE_PRECISION\",\n \"naturals\": \"NUMERIC\"\n },\n \"sqlite\": {\n \"infinities\": \"FLOAT\",\n \"nulls\": \"FLOAT\",\n \"naturals\": \"FLOAT\"\n },\n \"mysql\": {\n \"infinities\": \"FLOAT\",\n \"nulls\": \"FLOAT\",\n \"naturals\": \"FLOAT\"\n },\n \"spark\": {\n \"infinities\": \"FloatType\",\n \"nulls\": \"FloatType\",\n \"naturals\": \"FloatType\"\n }\n }\n return data, schemas\n\n\[email protected]\ndef dataset(test_backend):\n \"\"\"Provide dataset fixtures that have special values and/or are otherwise useful outside\n the standard json testing framework\"\"\"\n data, schemas = dataset_sample_data(test_backend)\n return get_dataset(test_backend, data, schemas=schemas)\n\n\[email protected]\ndef pandas_dataset():\n test_backend = \"PandasDataset\"\n data, schemas = dataset_sample_data(test_backend)\n return get_dataset(test_backend, data, schemas=schemas)\n\n\[email protected]\ndef sqlalchemy_dataset(test_backends):\n \"\"\"Provide dataset fixtures that have special values and/or are otherwise useful outside\n the standard json testing framework\"\"\"\n if \"postgresql\" in test_backends:\n backend = \"postgresql\"\n elif \"sqlite\" in test_backends:\n backend = \"sqlite\"\n else:\n return\n\n data = {\n \"infinities\": [-np.inf, -10, -np.pi, 0, np.pi, 10/2.2, np.inf],\n \"nulls\": [np.nan, None, 0, 1.1, 2.2, 3.3, None],\n \"naturals\": [1, 2, 3, 4, 5, 6, 7]\n }\n schemas = {\n \"postgresql\": {\n \"infinities\": \"DOUBLE_PRECISION\",\n \"nulls\": \"DOUBLE_PRECISION\",\n \"naturals\": \"DOUBLE_PRECISION\"\n },\n \"sqlite\": {\n \"infinities\": \"FLOAT\",\n \"nulls\": \"FLOAT\",\n \"naturals\": \"FLOAT\"\n }\n }\n return get_dataset(backend, data, schemas=schemas, profiler=None)\n\n\[email protected]\ndef sqlitedb_engine(test_backend):\n if test_backend == 'sqlite':\n import sqlalchemy as sa\n return sa.create_engine('sqlite://')\n else:\n pytest.skip(\"Skipping test designed for sqlite on non-sqlite backend.\")\n\n\[email protected]\ndef postgresql_engine(test_backend):\n if test_backend == 'postgresql':\n import sqlalchemy as sa\n engine = sa.create_engine('postgresql://postgres@localhost/test_ci').connect()\n yield engine\n engine.close()\n else:\n pytest.skip(\"Skipping test designed for postgresql on non-postgresql backend.\")\n\n\[email protected]\ndef empty_data_context(tmp_path_factory):\n project_path = str(tmp_path_factory.mktemp('empty_data_context'))\n context = ge.data_context.DataContext.create(project_path)\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n os.makedirs(asset_config_path, exist_ok=True)\n return context\n\n\[email protected]\ndef titanic_data_context(tmp_path_factory):\n project_path = str(tmp_path_factory.mktemp('titanic_data_context'))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n data_path = os.path.join(context_path, \"../data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n titanic_yml_path = file_relative_path(__file__, \"./test_fixtures/great_expectations_titanic.yml\")\n shutil.copy(titanic_yml_path, str(os.path.join(context_path, \"great_expectations.yml\")))\n titanic_csv_path = file_relative_path(__file__, \"./test_sets/Titanic.csv\")\n shutil.copy(titanic_csv_path, str(os.path.join(context_path, \"../data/Titanic.csv\")))\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef titanic_sqlite_db():\n from sqlalchemy import create_engine\n titanic_db_path = file_relative_path(__file__, \"./test_sets/titanic.db\")\n engine = create_engine('sqlite:///{}'.format(titanic_db_path))\n assert engine.execute(\"select count(*) from titanic\").fetchall()[0] == (1313,)\n return engine\n\n\[email protected]\ndef empty_sqlite_db():\n \"\"\"An empty in-memory sqlite db that always gets run.\"\"\"\n try:\n from sqlalchemy import create_engine\n engine = create_engine('sqlite://')\n assert engine.execute(\"select 1\").fetchall()[0] == (1,)\n return engine\n except ImportError:\n raise ValueError(\"sqlite tests require sqlalchemy to be installed\")\n\n\[email protected]\ndef site_builder_data_context_with_html_store_titanic_random(tmp_path_factory, filesystem_csv_3):\n base_dir = str(tmp_path_factory.mktemp(\"project_dir\"))\n project_dir = os.path.join(base_dir, \"project_path\")\n os.mkdir(project_dir)\n\n os.makedirs(os.path.join(project_dir, \"data\"))\n os.makedirs(os.path.join(project_dir, \"data/titanic\"))\n shutil.copy(\n file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(os.path.join(project_dir, \"data/titanic/Titanic.csv\"))\n )\n\n os.makedirs(os.path.join(project_dir, \"data/random\"))\n shutil.copy(\n os.path.join(filesystem_csv_3, \"f1.csv\"),\n str(os.path.join(project_dir, \"data/random/f1.csv\"))\n )\n shutil.copy(\n os.path.join(filesystem_csv_3, \"f2.csv\"),\n str(os.path.join(project_dir, \"data/random/f2.csv\"))\n )\n\n shutil.copy(file_relative_path(__file__, \"./test_fixtures/great_expectations_site_builder.yml\"),\n str(os.path.join(project_dir, \"great_expectations.yml\")))\n context = ge.data_context.DataContext.create(project_dir)\n\n context.add_datasource(\n \"titanic\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": os.path.join(project_dir, \"data/titanic/\")\n }\n }\n )\n context.add_datasource(\n \"random\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": os.path.join(project_dir, \"data/random/\")\n }\n }\n )\n\n context.profile_datasource(\"titanic\")\n context.profile_datasource(\"random\")\n context.profile_datasource(context.list_datasources()[0][\"name\"])\n\n context._project_config.anonymous_usage_statistics = {\n \"enabled\": True,\n \"data_context_id\": \"f43d4897-385f-4366-82b0-1a8eda2bf79c\"\n }\n\n return context\n\n\[email protected]\ndef titanic_multibatch_data_context(tmp_path_factory):\n \"\"\"\n Based on titanic_data_context, but with 2 identical batches of\n data asset \"titanic\"\n :param tmp_path_factory:\n :return:\n \"\"\"\n project_path = str(tmp_path_factory.mktemp('titanic_data_context'))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n data_path = os.path.join(context_path, \"../data/titanic\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n shutil.copy(file_relative_path(__file__, \"./test_fixtures/great_expectations_titanic.yml\"),\n str(os.path.join(context_path, \"great_expectations.yml\")))\n shutil.copy(file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(os.path.join(context_path, \"../data/titanic/Titanic_1911.csv\")))\n shutil.copy(file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(os.path.join(context_path, \"../data/titanic/Titanic_1912.csv\")))\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef data_context(tmp_path_factory):\n \"\"\"\n This data_context is *manually* created to have the config we want, vs\n created with DataContext.create()\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n fixture_dir = file_relative_path(__file__, \"./test_fixtures\")\n os.makedirs(\n os.path.join(asset_config_path, \"my_dag_node\"),\n exist_ok=True,\n )\n shutil.copy(\n os.path.join(fixture_dir, \"great_expectations_basic.yml\"),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n os.path.join(\n fixture_dir,\n \"expectation_suites/parameterized_expectation_suite_fixture.json\",\n ),\n os.path.join(\n asset_config_path, \"my_dag_node/default.json\"\n ),\n )\n os.makedirs(os.path.join(context_path, \"plugins\"), exist_ok=True)\n shutil.copy(\n os.path.join(fixture_dir, \"custom_pandas_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_pandas_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sqlalchemy_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sqlalchemy_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sparkdf_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sparkdf_dataset.py\")),\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]()\ndef filesystem_csv_data_context(empty_data_context, filesystem_csv_2):\n empty_data_context.add_datasource(\n \"rad_datasource\",\n module_name=\"great_expectations.datasource\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": str(filesystem_csv_2),\n }\n },\n )\n return empty_data_context\n\n\[email protected]\ndef filesystem_csv(tmp_path_factory):\n base_dir = tmp_path_factory.mktemp('filesystem_csv')\n base_dir = str(base_dir)\n # Put a few files in the directory\n with open(os.path.join(base_dir, \"f1.csv\"), \"w\") as outfile:\n outfile.writelines([\"a,b,c\\n\"])\n with open(os.path.join(base_dir, \"f2.csv\"), \"w\") as outfile:\n outfile.writelines([\"a,b,c\\n\"])\n\n os.makedirs(os.path.join(base_dir, \"f3\"), exist_ok=True)\n with open(os.path.join(base_dir, \"f3\", \"f3_20190101.csv\"), \"w\") as outfile:\n outfile.writelines([\"a,b,c\\n\"])\n with open(os.path.join(base_dir, \"f3\", \"f3_20190102.csv\"), \"w\") as outfile:\n outfile.writelines([\"a,b,c\\n\"])\n\n return base_dir\n\n\[email protected]\ndef filesystem_csv_2(tmp_path_factory):\n base_dir = tmp_path_factory.mktemp('test_files')\n base_dir = str(base_dir)\n\n # Put a file in the directory\n toy_dataset = PandasDataset({\"x\": [1, 2, 3]})\n toy_dataset.to_csv(os.path.join(base_dir, \"f1.csv\"), index=None)\n\n return base_dir\n\n\[email protected]\ndef filesystem_csv_3(tmp_path_factory):\n base_dir = tmp_path_factory.mktemp('test_files')\n base_dir = str(base_dir)\n\n # Put a file in the directory\n toy_dataset = PandasDataset({\"x\": [1, 2, 3]})\n toy_dataset.to_csv(os.path.join(base_dir, \"f1.csv\"), index=None)\n\n toy_dataset_2 = PandasDataset({\"y\": [1, 2, 3]})\n toy_dataset_2.to_csv(os.path.join(base_dir, \"f2.csv\"), index=None)\n\n return base_dir\n\n\[email protected]()\ndef filesystem_csv_4(tmp_path_factory):\n base_dir = tmp_path_factory.mktemp('test_files')\n base_dir = str(base_dir)\n\n # Put a file in the directory\n toy_dataset = PandasDataset({\n \"x\": [1, 2, 3],\n \"y\": [1, 2, 3],\n })\n toy_dataset.to_csv(os.path.join(base_dir, \"f1.csv\"), index=None)\n\n return base_dir\n\n\[email protected]\ndef titanic_profiled_evrs_1():\n with open(file_relative_path(__file__, './render/fixtures/BasicDatasetProfiler_evrs.json'), 'r') as infile:\n return expectationSuiteValidationResultSchema.loads(infile.read())\n\n\[email protected]\ndef titanic_profiled_name_column_evrs():\n\n #This is a janky way to fetch expectations matching a specific name from an EVR suite.\n #TODO: It will no longer be necessary once we implement ValidationResultSuite._group_evrs_by_column\n from great_expectations.render.renderer.renderer import (\n Renderer,\n )\n\n with open(file_relative_path(__file__, \"./render/fixtures/BasicDatasetProfiler_evrs.json\"), \"r\") as infile:\n titanic_profiled_evrs_1 = expectationSuiteValidationResultSchema.load(json.load(infile))\n\n evrs_by_column = Renderer()._group_evrs_by_column(titanic_profiled_evrs_1)\n name_column_evrs = evrs_by_column[\"Name\"]\n\n return name_column_evrs\n\n\[email protected]\ndef titanic_profiled_expectations_1():\n with open(file_relative_path(__file__, \"./render/fixtures/BasicDatasetProfiler_expectations.json\"), 'r') as infile:\n return expectationSuiteSchema.load(json.load(infile))\n\n\[email protected]\ndef titanic_profiled_name_column_expectations():\n from great_expectations.render.renderer.renderer import Renderer\n\n with open(file_relative_path(__file__, \"./render/fixtures/BasicDatasetProfiler_expectations.json\"), 'r') as infile:\n titanic_profiled_expectations = expectationSuiteSchema.load(json.load(infile))\n\n columns, ordered_columns = Renderer()._group_and_order_expectations_by_column(titanic_profiled_expectations)\n name_column_expectations = columns[\"Name\"]\n\n return name_column_expectations\n\n\[email protected]\ndef titanic_validation_results():\n with open(file_relative_path(__file__, \"./test_sets/expected_cli_results_default.json\"), \"r\") as infile:\n return expectationSuiteValidationResultSchema.load(json.load(infile))\n\n\n# various types of evr\[email protected]\ndef evr_failed():\n return ExpectationValidationResult(\n success=False,\n result={\n \"element_count\": 1313,\n \"missing_count\": 0,\n \"missing_percent\": 0.0,\n \"unexpected_count\": 3,\n \"unexpected_percent\": 0.2284843869002285,\n \"unexpected_percent_nonmissing\": 0.2284843869002285,\n \"partial_unexpected_list\": [\n \"Daly, Mr Peter Denis \",\n \"Barber, Ms \",\n \"Geiger, Miss Emily \"\n ],\n \"partial_unexpected_index_list\": [\n 77,\n 289,\n 303\n ],\n \"partial_unexpected_counts\": [\n {\n \"value\": \"Barber, Ms \",\n \"count\": 1\n },\n {\n \"value\": \"Daly, Mr Peter Denis \",\n \"count\": 1\n },\n {\n \"value\": \"Geiger, Miss Emily \",\n \"count\": 1\n }\n ]\n },\n exception_info={\n \"raised_exception\": False,\n \"exception_message\": None,\n \"exception_traceback\": None\n },\n expectation_config={\n \"expectation_type\": \"expect_column_values_to_not_match_regex\",\n \"kwargs\": {\n \"column\": \"Name\",\n \"regex\": \"^\\\\s+|\\\\s+$\",\n \"result_format\": \"SUMMARY\"\n }\n }\n )\n\n\[email protected]\ndef evr_failed_with_exception():\n return ExpectationValidationResult(\n success=False,\n exception_info={\n 'raised_exception': True,\n 'exception_message': 'Invalid partition object.',\n 'exception_traceback': 'Traceback (most recent call last):\\n File \"/great_expectations/great_expectations/data_asset/data_asset.py\", line 216, in wrapper\\n return_obj = func(self, **evaluation_args)\\n File \"/great_expectations/great_expectations/dataset/dataset.py\", line 106, in inner_wrapper\\n evaluation_result = func(self, column, *args, **kwargs)\\n File \"/great_expectations/great_expectations/dataset/dataset.py\", line 3381, in expect_column_kl_divergence_to_be_less_than\\n raise ValueError(\"Invalid partition object.\")\\nValueError: Invalid partition object.\\n'\n },\n expectation_config=ExpectationConfiguration(\n expectation_type='expect_column_kl_divergence_to_be_less_than',\n kwargs={\n 'column': 'live',\n 'partition_object': None,\n 'threshold': None,\n 'result_format': 'SUMMARY'\n },\n meta={\n 'BasicDatasetProfiler': {'confidence': 'very low'}\n }\n )\n )\n\n\[email protected]\ndef evr_success():\n return ExpectationValidationResult(\n success=True,\n result={\n \"observed_value\": 1313\n },\n exception_info={\n \"raised_exception\": False,\n \"exception_message\": None,\n \"exception_traceback\": None\n },\n expectation_config=ExpectationConfiguration(\n expectation_type=\"expect_table_row_count_to_be_between\",\n kwargs={\n \"min_value\": 0,\n \"max_value\": None,\n \"result_format\": \"SUMMARY\"\n }\n )\n )\n\n\[email protected]\ndef sqlite_view_engine(test_backends):\n # Create a small in-memory engine with two views, one of which is temporary\n if \"sqlite\" in test_backends:\n import sqlalchemy as sa\n sqlite_engine = sa.create_engine(\"sqlite://\")\n df = pd.DataFrame({\"a\": [1, 2, 3, 4, 5]})\n df.to_sql(\"test_table\", con=sqlite_engine)\n sqlite_engine.execute(\"CREATE TEMP VIEW test_temp_view AS SELECT * FROM test_table where a < 4;\")\n sqlite_engine.execute(\"CREATE VIEW test_view AS SELECT * FROM test_table where a > 4;\")\n return sqlite_engine\n else:\n pytest.skip(\"SqlAlchemy tests disabled; not testing views\")\n\n\[email protected]\ndef expectation_suite_identifier():\n return ExpectationSuiteIdentifier(\n \"my.expectation.suite.name\"\n )"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
bobatsar/moviepy | [
"75a111b9d3b2c50c6f2a9a36d21432053f02284d"
] | [
"moviepy/video/tools/segmenting.py"
] | [
"import numpy as np\nimport scipy.ndimage as ndi\nfrom moviepy.video.VideoClip import ImageClip\n\n\ndef findObjects(clip,rem_thr=500, preview=False):\n \"\"\" \n Returns a list of ImageClips representing each a separate object on\n the screen.\n \n rem_thr : all objects found with size < rem_Thr will be\n considered false positives and will be removed\n \n \"\"\"\n \n image = clip.get_frame(0)\n if clip.mask is None:\n clip = clip.add_mask()\n \n mask = clip.mask.get_frame(0)\n labelled, num_features = ndi.measurements.label(image[:,:,0])\n \n #find the objects\n slices = ndi.find_objects(labelled)\n # cool trick to remove letter holes (in o,e,a, etc.)\n slices = [e for e in slices if mask[e[0],e[1]].mean() >0.2]\n # remove very small slices\n slices = [e for e in slices if image[e[0],e[1]].size > rem_thr]\n # Sort the slices from left to right\n islices = sorted(enumerate(slices), key = lambda s : s[1][1].start)\n \n letters = []\n for i,(ind,(sy,sx)) in enumerate(islices):\n \"\"\" crop each letter separately \"\"\"\n sy = slice(sy.start-1,sy.stop+1)\n sx = slice(sx.start-1,sx.stop+1)\n letter = image[sy,sx]\n labletter = labelled[sy,sx]\n maskletter = (labletter==(ind+1))*mask[sy,sx]\n letter = ImageClip(image[sy,sx])\n letter.mask = ImageClip( maskletter,ismask=True)\n letter.screenpos = np.array((sx.start,sy.start))\n letters.append(letter)\n \n if preview:\n import matplotlib.pyplot as plt\n print( \"found %d objects\"%(num_features) )\n fig,ax = plt.subplots(2)\n ax[0].axis('off')\n ax[0].imshow(labelled)\n ax[1].imshow([range(num_features)],interpolation='nearest')\n ax[1].set_yticks([])\n plt.show()\n \n return letters\n"
] | [
[
"matplotlib.pyplot.subplots",
"scipy.ndimage.measurements.label",
"scipy.ndimage.find_objects",
"numpy.array",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.10",
"1.3",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16"
],
"tensorflow": []
}
] |
proteneer/timemachine | [
"feee9f24adcb533ab9e1c15a3f4fa4dcc9d9a701",
"feee9f24adcb533ab9e1c15a3f4fa4dcc9d9a701",
"feee9f24adcb533ab9e1c15a3f4fa4dcc9d9a701",
"feee9f24adcb533ab9e1c15a3f4fa4dcc9d9a701"
] | [
"tests/test_jax_nonbonded.py",
"tests/test_centroid_rescaler.py",
"tests/test_standard_state.py",
"fe/model_rabfe.py"
] | [
"import jax\n\njax.config.update(\"jax_enable_x64\", True)\n\nimport numpy as onp\nfrom numpy.random import randn, rand, randint, seed\n\nseed(2021)\n\nfrom scipy.optimize import minimize\n\nfrom jax import numpy as np, value_and_grad, jit, vmap\n\nfrom jax.ops import index_update, index\nfrom timemachine.potentials.nonbonded import nonbonded_v3, nonbonded_v3_on_specific_pairs, nonbonded_block\nfrom timemachine.potentials.jax_utils import convert_to_4d, get_all_pairs_indices, get_group_group_indices, distance\nfrom md import builders\nfrom ff.handlers import openmm_deserializer\nfrom simtk import unit\n\nfrom functools import partial\nfrom typing import Tuple, Callable\n\nConf = Params = Box = ChargeMask = LJMask = LambdaPlaneIdxs = LambdaOffsetIdxs = np.array\nLamb = Beta = Cutoff = Energy = float\n\nnonbonded_args = Conf, Params, Box, Lamb, ChargeMask, LJMask, Beta, Cutoff, LambdaPlaneIdxs, LambdaOffsetIdxs\nNonbondedArgs = Tuple[nonbonded_args]\nNonbondedFxn = Callable[[*nonbonded_args], Energy]\n\n\ndef resolve_clashes(x0, box0, min_dist=0.1):\n def urt(x, box):\n distance_matrix = distance(x, box)\n i, j = np.triu_indices(len(distance_matrix), k=1)\n return distance_matrix[i, j]\n\n dij = urt(x0, box0)\n x_shape = x0.shape\n box_shape = box0.shape\n\n if np.min(dij) < min_dist:\n # print('some distances too small')\n print(f\"before optimization: min(dij) = {np.min(dij)} < min_dist threshold ({min_dist})\")\n # print('smallest few distances', sorted(dij)[:10])\n\n def unflatten(xbox):\n n = x_shape[0] * x_shape[1]\n x = xbox[:n].reshape(x_shape)\n box = xbox[n:].reshape(box_shape)\n return x, box\n\n def U_repulse(xbox):\n x, box = unflatten(xbox)\n dij = urt(x, box)\n return np.sum(np.where(dij < min_dist, (dij - min_dist) ** 2, 0))\n\n def fun(xbox):\n v, g = value_and_grad(U_repulse)(xbox)\n return float(v), onp.array(g, onp.float64)\n\n initial_state = np.hstack([x0.flatten(), box0.flatten()])\n # print(f'penalty before: {U_repulse(initial_state)}')\n result = minimize(fun, initial_state, jac=True, method=\"L-BFGS-B\")\n # print(f'penalty after minimization: {U_repulse(result.x)}')\n\n x, box = unflatten(result.x)\n dij = urt(x, box)\n\n print(f\"after optimization: min(dij) = {np.min(dij)}\")\n\n return x, box\n\n else:\n return x0, box0\n\n\neasy_instance_flags = dict(\n trigger_pbc=False,\n randomize_box=False,\n randomize_charges=False,\n randomize_sigma=False,\n randomize_epsilon=False,\n randomize_lamb=False,\n randomize_charge_rescale_mask=False,\n randomize_lj_rescale_mask=False,\n randomize_lambda_plane_idxs=False,\n randomize_lambda_offset_idxs=False,\n randomize_beta=False,\n randomize_cutoff=False,\n)\n\ndifficult_instance_flags = {key: True for key in easy_instance_flags}\n\n\ndef generate_waterbox_nb_args() -> NonbondedArgs:\n\n system, positions, box, _ = builders.build_water_system(3.0)\n bps, masses = openmm_deserializer.deserialize_system(system, cutoff=1.2)\n nb = bps[-1]\n params = nb.params\n\n conf = positions.value_in_unit(unit.nanometer)\n\n N = conf.shape[0]\n beta = nb.get_beta()\n cutoff = nb.get_cutoff()\n\n lamb = 0.0\n charge_rescale_mask = onp.ones((N, N))\n lj_rescale_mask = onp.ones((N, N))\n lambda_plane_idxs = np.zeros(N, dtype=int)\n lambda_offset_idxs = np.zeros(N, dtype=int)\n\n args = (\n conf,\n params,\n box,\n lamb,\n charge_rescale_mask,\n lj_rescale_mask,\n beta,\n cutoff,\n lambda_plane_idxs,\n lambda_offset_idxs,\n )\n\n return args\n\n\ndef generate_random_inputs(n_atoms, dim, instance_flags=difficult_instance_flags) -> NonbondedArgs:\n \"\"\"Can toggle randomization of each argument using instance_flags\"\"\"\n box = np.eye(dim)\n if instance_flags[\"randomize_box\"]:\n box += np.diag(rand(dim))\n assert box.shape == (dim, dim)\n\n conf = rand(n_atoms, dim)\n if instance_flags[\"trigger_pbc\"]:\n conf *= 5\n conf -= 2.5\n\n min_dist = 0.1\n conf, box = resolve_clashes(conf, box, min_dist=min_dist)\n\n charges = np.zeros(n_atoms)\n sig = min_dist * np.ones(n_atoms)\n eps = np.ones(n_atoms)\n if instance_flags[\"randomize_charges\"]:\n charges = randn(n_atoms)\n if instance_flags[\"randomize_sigma\"]:\n sig = min_dist * rand(n_atoms)\n if instance_flags[\"randomize_epsilon\"]:\n eps = rand(n_atoms)\n\n params = np.array([charges, sig, eps]).T\n\n lamb = 0.0\n if instance_flags[\"randomize_lamb\"]:\n lamb = rand()\n charge_rescale_mask = onp.ones((n_atoms, n_atoms))\n lj_rescale_mask = onp.ones((n_atoms, n_atoms))\n\n for _ in range(n_atoms):\n i, j = randint(n_atoms, size=2)\n if instance_flags[\"randomize_charge_rescale_mask\"]:\n charge_rescale_mask[i, j] = charge_rescale_mask[j, i] = 0.0\n if instance_flags[\"randomize_lj_rescale_mask\"]:\n lj_rescale_mask[i, j] = lj_rescale_mask[j, i] = 0.0\n\n beta = 2.0\n if instance_flags[\"randomize_beta\"]:\n beta += rand()\n cutoff = 1.2\n if instance_flags[\"randomize_cutoff\"]:\n cutoff += rand()\n\n lambda_plane_idxs = np.zeros(n_atoms, dtype=int)\n lambda_offset_idxs = np.zeros(n_atoms, dtype=int)\n\n if instance_flags[\"randomize_lambda_plane_idxs\"]:\n lambda_plane_idxs = randint(low=-2, high=2, size=n_atoms)\n\n if instance_flags[\"randomize_lambda_offset_idxs\"]:\n lambda_offset_idxs = randint(low=-2, high=2, size=n_atoms)\n\n args = (\n conf,\n params,\n box,\n lamb,\n charge_rescale_mask,\n lj_rescale_mask,\n beta,\n cutoff,\n lambda_plane_idxs,\n lambda_offset_idxs,\n )\n\n return args\n\n\ndef compare_two_potentials(u_a: NonbondedFxn, u_b: NonbondedFxn, args: NonbondedArgs, differentiate_wrt=(0, 1, 3)):\n \"\"\"Assert that energies and derivatives w.r.t. request argnums are close\"\"\"\n value_and_grads = partial(value_and_grad, argnums=differentiate_wrt)\n energy_a, gradients_a = value_and_grads(u_a)(*args)\n energy_b, gradients_b = value_and_grads(u_b)(*args)\n\n onp.testing.assert_almost_equal(energy_a, energy_b)\n for (g_a, g_b) in zip(gradients_a, gradients_b):\n onp.testing.assert_allclose(g_a, g_b)\n\n\ndef _nonbonded_v3_clone(\n conf,\n params,\n box,\n lamb,\n charge_rescale_mask,\n lj_rescale_mask,\n beta,\n cutoff,\n lambda_plane_idxs,\n lambda_offset_idxs,\n):\n \"\"\"See docstring of nonbonded_v3 for more details\n\n This is here just for testing purposes, to mimic the signature of nonbonded_v3 but to use\n nonbonded_v3_on_specific_pairs under the hood.\n \"\"\"\n\n N = conf.shape[0]\n\n if conf.shape[-1] == 3:\n conf = convert_to_4d(conf, lamb, lambda_plane_idxs, lambda_offset_idxs, cutoff)\n\n # make 4th dimension of box large enough so its roughly aperiodic\n if box is not None:\n if box.shape[-1] == 3:\n box_4d = np.eye(4) * 1000\n box_4d = index_update(box_4d, index[:3, :3], box)\n else:\n box_4d = box\n else:\n box_4d = None\n box = box_4d\n\n # TODO: len(inds_i) == n_interactions -- may want to break this\n # up into more manageable blocks if n_interactions is large\n inds_i, inds_j = get_all_pairs_indices(N)\n\n lj, coulomb = nonbonded_v3_on_specific_pairs(conf, params, box, inds_i, inds_j, beta, cutoff)\n\n # keep only eps > 0\n eps = params[:, 2]\n lj = np.where(eps[inds_i] > 0, lj, 0)\n lj = np.where(eps[inds_j] > 0, lj, 0)\n\n eij_total = lj * lj_rescale_mask[inds_i, inds_j] + coulomb * charge_rescale_mask[inds_i, inds_j]\n\n return np.sum(eij_total)\n\n\ndef run_randomized_tests_of_jax_nonbonded(instance_generator, n_instances=10):\n \"\"\"Assert that nonbonded_v3 and _nonbonded_v3 agree on several random instances\n\n instance_generator(n_atoms, dim) -> NonbondedArgs\n \"\"\"\n jittable_nonbonded_v3 = partial(nonbonded_v3, runtime_validate=False)\n u_a, u_b = jit(jittable_nonbonded_v3), jit(_nonbonded_v3_clone)\n\n min_size, max_size = 10, 50\n\n random_sizes = onp.random.randint(min_size, max_size, n_instances)\n dims = onp.random.randint(3, 5, n_instances)\n\n for n_atoms, dim in zip(random_sizes, dims):\n args = instance_generator(n_atoms, dim)\n compare_two_potentials(u_a, u_b, args)\n\n\ndef test_jax_nonbonded_waterbox():\n jittable_nonbonded_v3 = partial(nonbonded_v3, runtime_validate=False)\n u_a, u_b = jit(jittable_nonbonded_v3), jit(_nonbonded_v3_clone)\n compare_two_potentials(u_a, u_b, generate_waterbox_nb_args())\n\n\ndef test_jax_nonbonded_easy(n_instances=10):\n instance_generator = partial(generate_random_inputs, instance_flags=easy_instance_flags)\n run_randomized_tests_of_jax_nonbonded(instance_generator, n_instances)\n\n\ndef test_jax_nonbonded(n_instances=10):\n instance_generator = partial(generate_random_inputs, instance_flags=difficult_instance_flags)\n run_randomized_tests_of_jax_nonbonded(instance_generator, n_instances)\n\n\ndef test_vmap():\n \"\"\"Can call jit(vmap(nonbonded_v3_on_specific_pairs))\"\"\"\n\n # # atoms in \"ligand\" vs. \"environment\"\n n_ligand, n_environment = 50, 1000\n n_total = n_ligand + n_environment\n conf, params, box, lamb, _, _, beta, cutoff, _, _ = generate_random_inputs(n_total, 3)\n\n inds_i, inds_j = get_group_group_indices(n_ligand, n_environment)\n inds_j += n_ligand\n n_interactions = len(inds_i)\n\n fixed_kwargs = dict(params=params, box=box, inds_l=inds_i, inds_r=inds_j, beta=beta, cutoff=cutoff)\n\n # signature: conf -> ljs, coulombs, where ljs.shape == (n_interactions, )\n u_pairs = partial(nonbonded_v3_on_specific_pairs, **fixed_kwargs)\n ljs, coulombs = u_pairs(conf)\n assert ljs.shape == (n_interactions,)\n\n def u(conf):\n ljs, coulombs = u_pairs(conf)\n return np.sum(ljs + coulombs)\n\n # vmap over snapshots\n vmapped = jit(vmap(u))\n n_snapshots = 100\n confs = onp.random.randn(n_snapshots, n_total, 3)\n us = vmapped(confs)\n assert us.shape == (n_snapshots,)\n\n\ndef test_jax_nonbonded_block():\n \"\"\"Assert that nonbonded_block and nonbonded_on_specific_pairs agree\"\"\"\n system, positions, box, _ = builders.build_water_system(3.0)\n bps, masses = openmm_deserializer.deserialize_system(system, cutoff=1.2)\n nb = bps[-1]\n params = nb.params\n\n conf = positions.value_in_unit(unit.nanometer)\n\n N = conf.shape[0]\n beta = nb.get_beta()\n cutoff = nb.get_cutoff()\n\n split = 70\n\n def u_a(x, box, params):\n xi = x[:split]\n xj = x[split:]\n pi = params[:split]\n pj = params[split:]\n return nonbonded_block(xi, xj, box, pi, pj, beta, cutoff)\n\n i_s, j_s = np.indices((split, N - split))\n indices_left = i_s.flatten()\n indices_right = j_s.flatten() + split\n\n def u_b(x, box, params):\n vdw, es = nonbonded_v3_on_specific_pairs(x, params, box, indices_left, indices_right, beta, cutoff)\n\n return np.sum(vdw + es)\n\n onp.testing.assert_almost_equal(u_a(conf, box, params), u_b(conf, box, params))\n",
"import numpy as np\nfrom md.barostat.moves import CentroidRescaler\nfrom md.barostat.utils import compute_intramolecular_distances\n\nnp.random.seed(2021)\n\n\ndef _generate_random_instance():\n # randomly generate point set of size between 50 and 1000\n n_particles = np.random.randint(50, 1000)\n particle_inds = np.arange(n_particles)\n\n # randomly generate group_inds with group sizes between 1 and 10\n group_inds = []\n np.random.shuffle(particle_inds)\n i = 0\n while i < len(particle_inds):\n j = min(n_particles, i + np.random.randint(1, 10))\n group_inds.append(np.array(particle_inds[i:j]))\n i = j\n\n # randomly generate coords\n coords = np.array(np.random.randn(n_particles, 3))\n\n return coords, group_inds\n\n\ndef test_null_rescaling(n_instances=10):\n \"\"\"scaling by a factor of 1.0x shouldn't change coordinates\"\"\"\n for _ in range(n_instances):\n coords, group_inds = _generate_random_instance()\n center = np.random.randn(3)\n\n rescaler = CentroidRescaler(group_inds)\n coords_prime = rescaler.scale_centroids(coords, center, 1.0)\n\n np.testing.assert_allclose(coords_prime, coords)\n\n\ndef test_intramolecular_distance(n_instances=10):\n \"\"\"Test that applying a rescaling doesn't change intramolecular distances\"\"\"\n for _ in range(n_instances):\n coords, group_inds = _generate_random_instance()\n distances = compute_intramolecular_distances(coords, group_inds)\n\n center = np.random.randn(3)\n scale = np.random.rand() + 0.5\n\n rescaler = CentroidRescaler(group_inds)\n coords_prime = rescaler.scale_centroids(coords, center, scale)\n distances_prime = compute_intramolecular_distances(coords_prime, group_inds)\n\n np.testing.assert_allclose(np.hstack(distances_prime), np.hstack(distances))\n\n\ndef test_compute_centroids(n_instances=10):\n \"\"\"test that CentroidRescaler's compute_centroids agrees with _slow_compute_centroids\n on random instances of varying size\"\"\"\n\n for _ in range(n_instances):\n coords, group_inds = _generate_random_instance()\n\n # assert compute_centroids agrees with _slow_compute_centroids\n rescaler = CentroidRescaler(group_inds)\n fast_centroids = rescaler.compute_centroids(coords)\n slow_centroids = rescaler._slow_compute_centroids(coords)\n np.testing.assert_array_almost_equal(slow_centroids, fast_centroids)\n",
"import numpy as np\nimport functools\n\nfrom simtk import unit\nfrom fe import standard_state\nimport rmsd\n\nimport scipy.integrate\n\n\nfrom timemachine.potentials import rmsd\n\n\ndef test_translational_restraint():\n k = 25.0\n b = 0.0\n\n def harmonic_restraint(r):\n return k * (r - b) ** 2\n\n beta = 0.67\n Z_numeric = standard_state.integrate_radial_Z(harmonic_restraint, beta, r_max=10.0)\n\n k = k * beta\n Z_exact = (\n 4.0\n * np.pi\n * (\n (b * np.exp(-(b ** 2) * k)) / (2 * k)\n + ((1 + 2 * b ** 2 * k) * np.sqrt(np.pi) * (1 + scipy.special.erf(b * np.sqrt(k)))) / (4 * k ** (3 / 2))\n )\n )\n\n np.testing.assert_almost_equal(Z_numeric, Z_exact)\n\n dG = standard_state.standard_state_correction(Z_exact, beta)\n\n assert dG < 0\n\n\ndef test_rotational_restraint():\n\n k = 25.0\n u_fn = functools.partial(rmsd.angle_u, k=k)\n beta = 0.67\n Z_quat = standard_state.integrate_rotation_Z(u_fn, beta)\n\n def integrand(phi_1, phi_2, psi):\n delta = psi\n alpha = phi_1\n gamma = phi_2\n cos_theta = np.cos(delta / 2) ** 2 * np.cos(gamma + alpha) - np.sin(delta / 2) ** 2\n nrg = rmsd.cos_angle_u(cos_theta, k)\n assert nrg > 0\n # constant = 1/(8*np.pi**2) # normalization constant not needed\n constant = 1 / 8\n return constant * np.sin(psi) * np.exp(-beta * nrg)\n\n Z_euler, _ = scipy.integrate.tplquad(\n integrand,\n 0, # psi low\n np.pi, # psi high\n lambda x: 0, # phi_1 low\n lambda x: 2 * np.pi, # phi_1 high\n lambda x, y: 0, # phi_2 low\n lambda x, y: 2 * np.pi, # phi_2 high\n )\n\n np.testing.assert_almost_equal(Z_quat, Z_euler)\n\n\ndef test_release_restraints():\n # test the release of orientational restraints.\n k_t = 50.0\n k_r = 25.0\n beta = 0.67\n dG_t, dG_r = standard_state.release_orientational_restraints(k_t, k_r, beta)\n\n # these should be negative for sensible force constants\n assert dG_t < 0\n assert dG_r < 0\n",
"from abc import ABC\n\nimport functools\nimport numpy as np\nimport mdtraj\n\nfrom simtk import openmm\nfrom rdkit import Chem\n\nfrom timemachine.lib import potentials, LangevinIntegrator, MonteCarloBarostat\nfrom timemachine import constants\nfrom fe.frames import endpoint_frames_only\nfrom fe import free_energy_rabfe, topology, estimator_abfe, model_utils\nfrom ff import Forcefield\n\nfrom parallel.client import AbstractClient, _MockFuture\nfrom typing import Optional, Tuple, Any, List\n\nfrom md.barostat.utils import get_group_indices, get_bond_list\n\nimport pickle\n\n\nclass AbsoluteModel(ABC):\n def __init__(\n self,\n client: AbstractClient or None,\n ff: Forcefield,\n host_system: openmm.System,\n host_schedule: np.ndarray,\n host_topology: openmm.app.Topology,\n temperature: float,\n pressure: float,\n dt: float,\n equil_steps: int,\n prod_steps: int,\n frame_filter: Optional[callable] = None,\n ):\n\n self.host_system = host_system\n self.host_schedule = host_schedule\n self.host_topology = host_topology\n self.temperature = temperature\n self.pressure = pressure\n self.dt = dt\n\n self.client = client\n self.ff = ff\n self.equil_steps = equil_steps\n self.prod_steps = prod_steps\n if frame_filter is None:\n frame_filter = endpoint_frames_only\n self.frame_filter = frame_filter\n\n def setup_topology(self, mol):\n raise NotImplementedError()\n\n def simulate_futures(\n self, ff_params, mol, x0, box0, prefix, core_idxs=None\n ) -> Tuple[List[Any], estimator_abfe.FreeEnergyModel, List[Any]]:\n top = self.setup_topology(mol)\n\n afe = free_energy_rabfe.AbsoluteFreeEnergy(mol, top)\n\n unbound_potentials, sys_params, masses = afe.prepare_host_edge(ff_params, self.host_system)\n\n seed = 0\n\n beta = 1 / (constants.BOLTZ * self.temperature)\n\n bond_list = get_bond_list(unbound_potentials[0])\n masses = model_utils.apply_hmr(masses, bond_list)\n friction = 1.0\n integrator = LangevinIntegrator(self.temperature, self.dt, friction, masses, seed)\n\n group_indices = get_group_indices(bond_list)\n barostat_interval = 5\n barostat = MonteCarloBarostat(\n x0.shape[0], self.pressure, self.temperature, group_indices, barostat_interval, seed\n )\n\n v0 = np.zeros_like(x0)\n\n endpoint_correct = False\n model = estimator_abfe.FreeEnergyModel(\n unbound_potentials,\n endpoint_correct,\n self.client,\n box0,\n x0,\n v0,\n integrator,\n barostat,\n self.host_schedule,\n self.equil_steps,\n self.prod_steps,\n beta,\n prefix,\n )\n bound_potentials = []\n for params, unbound_pot in zip(sys_params, model.unbound_potentials):\n bp = unbound_pot.bind(np.asarray(params))\n bound_potentials.append(bp)\n\n all_args = []\n for lamb_idx, lamb in enumerate(model.lambda_schedule):\n\n subsample_interval = 1000\n\n all_args.append(\n (\n lamb,\n model.box,\n model.x0,\n model.v0,\n bound_potentials,\n model.integrator,\n model.barostat,\n model.equil_steps,\n model.prod_steps,\n subsample_interval,\n subsample_interval,\n model.lambda_schedule,\n )\n )\n\n if endpoint_correct:\n\n assert isinstance(bound_potentials[-1], potentials.HarmonicBond)\n\n all_args.append(\n (\n 1.0,\n model.box,\n model.x0,\n model.v0,\n bound_potentials[:-1], # strip out the restraints\n model.integrator,\n model.barostat,\n model.equil_steps,\n model.prod_steps,\n subsample_interval,\n subsample_interval,\n [], # no need to evaluate Us for the endpoint correction\n )\n )\n\n futures = []\n if self.client is None:\n for args in all_args:\n futures.append(_MockFuture(estimator_abfe.simulate(*args)))\n else:\n for args in all_args:\n futures.append(self.client.submit(estimator_abfe.simulate, *args))\n return sys_params, model, futures\n\n def predict_from_futures(self, sys_params, mol, model: estimator_abfe.FreeEnergyModel, futures: List[Any]):\n results = [fut.result() for fut in futures]\n dG, dG_err, results = estimator_abfe.deltaG_from_results(model, results, sys_params)\n\n # uncomment if we want to visualize\n combined_topology = model_utils.generate_imaged_topology(\n [self.host_topology, mol], model.x0, model.box, \"initial_\" + model.prefix + \".pdb\"\n )\n\n for lambda_idx, res in self.frame_filter(results):\n np.savez(\n f\"initial_{model.prefix}_lambda_idx_{lambda_idx}.npz\",\n xs=res.xs,\n boxes=res.boxes,\n du_dps=res.du_dps,\n lambda_us=res.lambda_us,\n )\n\n return dG, dG_err\n\n def predict(self, ff_params, mol, x0, box0, prefix, core_idxs=None):\n \"\"\"Compute the absolute free of energy of decoupling mol_a.\n\n This function is differentiable w.r.t. ff_params.\n\n Parameters\n ----------\n\n ff_params: list of np.ndarray\n This should match the ordered params returned by the forcefield\n\n mol: Chem.Mol\n Molecule we want to decouple\n\n x0: np.narray\n Initial coordinates of the combined system.\n\n box0: np.narray\n Initial box vectors of the combined system.\n\n prefix: str\n String to prepend to print out statements\n\n core_idxs: None or list of int\n List of core_idxs we may wish to turn off.\n\n Returns\n -------\n float\n delta G in kJ/mol\n\n float\n BAR error in the delta G in kJ/mol\n\n Note that the error estimate is likely to be biased for two reasons: we don't\n know the true decorrelation time, and by re-using intermediate windows\n to compute delta_Us, the BAR estimates themselves become correlated.\n\n \"\"\"\n sys_params, model, futures = self.simulate_futures(ff_params, mol, x0, box0, prefix, core_idxs=core_idxs)\n\n dG, dG_err = self.predict_from_futures(sys_params, mol, model, futures)\n\n return dG, dG_err\n\n\nclass RelativeModel(ABC):\n \"\"\"\n Absolute free energy using a reference molecule to block the binding pocket.\n \"\"\"\n\n def __init__(\n self,\n client: Optional[AbstractClient],\n ff: Forcefield,\n host_system: openmm.System,\n host_schedule: np.ndarray,\n host_topology: openmm.app.Topology,\n temperature: float,\n pressure: float,\n dt: float,\n equil_steps: int,\n prod_steps: int,\n frame_filter: Optional[callable] = None,\n ):\n\n self.host_system = host_system\n self.temperature = temperature\n self.pressure = pressure\n self.dt = dt\n self.host_schedule = host_schedule\n self.host_topology = host_topology\n self.client = client\n self.ff = ff\n self.equil_steps = equil_steps\n self.prod_steps = prod_steps\n if frame_filter is None:\n frame_filter = endpoint_frames_only\n self.frame_filter = frame_filter\n\n def setup_topology(self, mol_a, mol_b):\n raise NotImplementedError()\n\n def _futures_a_to_b(self, ff_params, mol_a, mol_b, combined_core_idxs, x0, box0, prefix):\n\n num_host_atoms = x0.shape[0] - mol_a.GetNumAtoms() - mol_b.GetNumAtoms()\n\n # (ytz): super ugly, undo combined_core_idxs to get back original idxs\n core_idxs = combined_core_idxs - num_host_atoms\n core_idxs[:, 1] -= mol_a.GetNumAtoms()\n\n dual_topology = self.setup_topology(mol_a, mol_b)\n rfe = free_energy_rabfe.RelativeFreeEnergy(dual_topology)\n\n unbound_potentials, sys_params, masses = rfe.prepare_host_edge(ff_params, self.host_system)\n\n k_core = 30.0\n\n core_params = np.zeros_like(combined_core_idxs).astype(np.float64)\n core_params[:, 0] = k_core\n\n B = len(combined_core_idxs)\n\n restraint_potential = potentials.HarmonicBond(\n combined_core_idxs,\n )\n\n unbound_potentials.append(restraint_potential)\n sys_params.append(core_params)\n\n # tbd sample from boltzmann distribution later\n v0 = np.zeros_like(x0)\n\n seed = 0\n beta = 1 / (constants.BOLTZ * self.temperature)\n\n bond_list = np.concatenate([unbound_potentials[0].get_idxs(), core_idxs])\n masses = model_utils.apply_hmr(masses, bond_list)\n\n friction = 1.0\n integrator = LangevinIntegrator(self.temperature, self.dt, friction, masses, seed)\n bond_list = list(map(tuple, bond_list))\n group_indices = get_group_indices(bond_list)\n barostat_interval = 5\n\n barostat = MonteCarloBarostat(\n x0.shape[0], self.pressure, self.temperature, group_indices, barostat_interval, seed\n )\n\n endpoint_correct = True\n model = estimator_abfe.FreeEnergyModel(\n unbound_potentials,\n endpoint_correct,\n self.client,\n box0, # important, use equilibrated box.\n x0,\n v0,\n integrator,\n barostat,\n self.host_schedule,\n self.equil_steps,\n self.prod_steps,\n beta,\n prefix,\n )\n\n bound_potentials = []\n for params, unbound_pot in zip(sys_params, model.unbound_potentials):\n bp = unbound_pot.bind(np.asarray(params))\n bound_potentials.append(bp)\n\n all_args = []\n for lamb_idx, lamb in enumerate(model.lambda_schedule):\n\n subsample_interval = 1000\n\n all_args.append(\n (\n lamb,\n model.box,\n model.x0,\n model.v0,\n bound_potentials,\n model.integrator,\n model.barostat,\n model.equil_steps,\n model.prod_steps,\n subsample_interval,\n subsample_interval,\n model.lambda_schedule,\n )\n )\n\n if endpoint_correct:\n\n assert isinstance(bound_potentials[-1], potentials.HarmonicBond)\n\n all_args.append(\n (\n 1.0,\n model.box,\n model.x0,\n model.v0,\n bound_potentials[:-1], # strip out the restraints\n model.integrator,\n model.barostat,\n model.equil_steps,\n model.prod_steps,\n subsample_interval,\n subsample_interval,\n [], # no need to evaluate Us for the endpoint correction\n )\n )\n\n futures = []\n if self.client is None:\n for args in all_args:\n futures.append(_MockFuture(estimator_abfe.simulate(*args)))\n else:\n for args in all_args:\n futures.append(self.client.submit(estimator_abfe.simulate, *args))\n\n return sys_params, model, futures\n\n def simulate_futures(\n self, ff_params, mol_a, mol_b, core, x0, box0, prefix\n ) -> Tuple[List[Any], List[estimator_abfe.FreeEnergyModel], List[List[Any]]]:\n\n num_host_atoms = x0.shape[0] - mol_a.GetNumAtoms() - mol_b.GetNumAtoms()\n host_coords = x0[:num_host_atoms]\n mol_a_coords = x0[num_host_atoms : num_host_atoms + mol_a.GetNumAtoms()]\n mol_b_coords = x0[num_host_atoms + mol_a.GetNumAtoms() :]\n\n # pull out mol_b from combined state\n combined_core_idxs = np.copy(core)\n combined_core_idxs[:, 0] += num_host_atoms\n combined_core_idxs[:, 1] += num_host_atoms + mol_a.GetNumAtoms()\n # this is redundant, but thought it best to be explicit about ordering here..\n combined_coords = np.concatenate([host_coords, mol_a_coords, mol_b_coords])\n\n all_sys = []\n models = []\n all_futures = []\n sys_params, model, futures = self._futures_a_to_b(\n ff_params,\n mol_a,\n mol_b,\n combined_core_idxs,\n combined_coords,\n box0,\n prefix + \"_ref_to_mol\",\n )\n\n all_sys.append(sys_params)\n models.append(model)\n all_futures.append(futures)\n\n # pull out mol_a from combined state\n combined_core_idxs = np.copy(core)\n # swap the ligand coordinates in the reverse direction\n combined_core_idxs[:, 0] = core[:, 1]\n combined_core_idxs[:, 1] = core[:, 0]\n combined_core_idxs[:, 0] += num_host_atoms\n combined_core_idxs[:, 1] += num_host_atoms + mol_b.GetNumAtoms()\n combined_coords = np.concatenate([host_coords, mol_b_coords, mol_a_coords])\n sys_params, model, futures = self._futures_a_to_b(\n ff_params,\n mol_b,\n mol_a,\n combined_core_idxs,\n combined_coords,\n box0,\n prefix + \"_mol_to_ref\",\n )\n\n all_sys.append(sys_params)\n models.append(model)\n all_futures.append(futures)\n\n return all_sys, models, all_futures\n\n def predict_from_futures(\n self, sys_params, mol_a, mol_b, models: List[estimator_abfe.FreeEnergyModel], futures: List[List[Any]]\n ):\n assert len(futures) == 2\n assert len(models) == 2\n assert len(sys_params) == 2\n err = 0\n fwd_dG = 0\n back_dG = 0\n for i, (params, model, sub_futures) in enumerate(zip(sys_params, models, futures)):\n results = [fut.result() for fut in sub_futures]\n dG, dG_err, results = estimator_abfe.deltaG_from_results(model, results, params)\n\n # Save out the pdb\n combined_topology = model_utils.generate_imaged_topology(\n [self.host_topology, mol_a, mol_b], model.x0, model.box, f\"initial_{model.prefix}.pdb\"\n )\n\n for lambda_idx, res in self.frame_filter(results):\n np.savez(\n f\"initial_{model.prefix}_lambda_idx_{lambda_idx}.npz\",\n xs=res.xs,\n boxes=res.boxes,\n du_dps=res.du_dps,\n lambda_us=res.lambda_us,\n )\n # fwd_dG is the free energy of moving X-A-B into X-A+B\n # back_dG is the free energy of moving X-B-A into X-B+A\n # -fwd_dG + back_dG is the free energy of moving X-A+B -> X-B+A\n # i.e. the free energy of \"unbinding\" A\n if i == 0:\n fwd_dG = dG\n else:\n back_dG = dG\n err += dG_err ** 2\n err = np.sqrt(err)\n return -fwd_dG + back_dG, err\n\n def predict(\n self,\n ff_params: list,\n mol_a: Chem.Mol,\n mol_b: Chem.Mol,\n core_idxs: np.array,\n x0: np.array,\n box0: np.array,\n prefix: str,\n ):\n \"\"\"\n Compute the free of energy of converting mol_a into mol_b. The starting state\n has mol_a fully interacting with the environment, mol_b is non-interacting.\n The end state has mol_b fully interacting with the environment, and mol_a is\n non-interacting. The atom mapping defining the core need be neither\n bijective nor factorizable.\n\n This function is differentiable w.r.t. ff_params.\n\n Parameters\n ----------\n\n ff_params: list of np.ndarray\n This should match the ordered params returned by the forcefield\n\n mol_a: Chem.Mol\n Starting molecule\n\n mol_b: Chem.Mol\n Resulting molecule\n\n core_idxs: np.array (Nx2), dtype int32\n Atom mapping defining the core, mapping atoms from mol_a to atoms in mol_b.\n\n prefix: str\n Auxiliary string to prepend print-outs\n\n x0: np.ndarray\n Initial coordinates of the combined system.\n\n box0: np.ndarray\n Initial box vectors.\n\n Returns\n -------\n float\n delta delta G in kJ/mol of morphing mol_a into mol_b\n\n float\n BAR error in the delta delta G in kJ/mol\n\n Note that the error estimate is likely to be biased for two reasons: we don't\n know the true decorrelation time, and by re-using intermediate windows\n to compute delta_Us, the BAR estimates themselves become correlated.\n\n \"\"\"\n\n sys_params, models, futures = self.simulate_futures(\n ff_params,\n mol_a,\n mol_b,\n core_idxs,\n x0,\n box0,\n prefix,\n )\n dG, dG_err = self.predict_from_futures(\n sys_params,\n mol_b,\n mol_a,\n models,\n futures,\n )\n\n return dG, dG_err\n\n\n# subclasses specific for each model\n\n\nclass AbsoluteHydrationModel(AbsoluteModel):\n def setup_topology(self, mol):\n return topology.BaseTopologyRHFE(mol, self.ff)\n\n\nclass RelativeHydrationModel(RelativeModel):\n def setup_topology(self, mol_a, mol_b):\n return topology.DualTopologyRHFE(mol_a, mol_b, self.ff)\n\n\nclass AbsoluteConversionModel(AbsoluteModel):\n def setup_topology(self, mol):\n top = topology.BaseTopologyConversion(mol, self.ff)\n return top\n\n\nclass AbsoluteStandardHydrationModel(AbsoluteModel):\n def setup_topology(self, mol):\n top = topology.BaseTopologyStandardDecoupling(mol, self.ff)\n return top\n\n\nclass RelativeBindingModel(RelativeModel):\n def setup_topology(self, mol_a, mol_b):\n top = topology.DualTopologyStandardDecoupling(mol_a, mol_b, self.ff)\n return top\n"
] | [
[
"numpy.random.seed",
"numpy.ones",
"numpy.testing.assert_almost_equal",
"numpy.random.randn",
"numpy.random.rand",
"scipy.optimize.minimize",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.random.randint"
],
[
"numpy.hstack",
"numpy.testing.assert_array_almost_equal",
"numpy.random.seed",
"numpy.arange",
"numpy.random.shuffle",
"numpy.random.randn",
"numpy.random.rand",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.random.randint"
],
[
"numpy.sqrt",
"numpy.cos",
"numpy.sin",
"numpy.testing.assert_almost_equal",
"numpy.exp"
],
[
"numpy.savez",
"numpy.sqrt",
"numpy.asarray",
"numpy.concatenate",
"numpy.copy",
"numpy.zeros_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YUANMUCE/masktrackrcnn | [
"10e5d7ded62e0b7c5bf79075d9ee0cc37dc15321"
] | [
"tools/test_video.py"
] | [
"import argparse\n\nimport torch\nimport mmcv\nfrom mmcv.runner import load_checkpoint, parallel_test, obj_from_dict\nfrom mmcv.parallel import scatter, collate, MMDataParallel\n\nfrom mmdet import datasets\nfrom mmdet.core import results2json_videoseg, ytvos_eval\nfrom mmdet.datasets import build_dataloader\nfrom mmdet.models import build_detector, detectors\n\n\ndef single_test(model, data_loader, show=False, save_path=''):\n model.eval()\n results = []\n dataset = data_loader.dataset\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, rescale=not show, **data)\n results.append(result)\n\n if show:\n model.module.show_result(data, result, dataset.img_norm_cfg,\n dataset=dataset.CLASSES,\n save_vis = True,\n save_path = save_path,\n is_video = True)\n\n batch_size = data['img'][0].size(0)\n for _ in range(batch_size):\n prog_bar.update()\n return results\n\n\ndef _data_func(data, device_id):\n data = scatter(collate([data], samples_per_gpu=1), [device_id])[0]\n return dict(return_loss=False, rescale=True, **data)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='MMDet test detector')\n parser.add_argument('config', help='test config file path')\n parser.add_argument('checkpoint', help='checkpoint file')\n parser.add_argument(\n '--save_path', \n type=str,\n help='path to save visual result')\n parser.add_argument(\n '--gpus', default=1, type=int, help='GPU number used for testing')\n parser.add_argument(\n '--proc_per_gpu',\n default=1,\n type=int,\n help='Number of processes per GPU')\n parser.add_argument('--out', help='output result file')\n parser.add_argument('--load_result', \n action='store_true', \n help='whether to load existing result')\n parser.add_argument(\n '--eval',\n type=str,\n nargs='+',\n choices=['bbox', 'segm'],\n help='eval types')\n parser.add_argument('--show', action='store_true', help='show results')\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n\n if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):\n raise ValueError('The output file must be a pkl file.')\n\n cfg = mmcv.Config.fromfile(args.config)\n # set cudnn_benchmark\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n cfg.model.pretrained = None\n cfg.data.test.test_mode = True\n\n dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))\n assert args.gpus == 1\n model = build_detector(\n cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)\n load_checkpoint(model, args.checkpoint)\n model = MMDataParallel(model, device_ids=[0])\n\n data_loader = build_dataloader(\n dataset,\n imgs_per_gpu=1,\n workers_per_gpu=cfg.data.workers_per_gpu,\n num_gpus=1,\n dist=False,\n shuffle=False)\n if args.load_result:\n outputs = mmcv.load(args.out)\n else:\n outputs = single_test(model, data_loader, args.show, save_path=args.save_path)\n\n if args.out:\n if not args.load_result:\n print('writing results to {}'.format(args.out))\n \n mmcv.dump(outputs, args.out)\n eval_types = args.eval\n if eval_types:\n print('Starting evaluate {}'.format(' and '.join(eval_types)))\n if not isinstance(outputs[0], dict):\n result_file = args.out + '.json'\n results2json_videoseg(dataset, outputs, result_file)\n ytvos_eval(result_file, eval_types, dataset.ytvos)\n else:\n NotImplemented\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ryfi/scikit-allel | [
"a597b50ff32d0280fd8187f8fadc0b2b895dda61"
] | [
"allel/test/test_stats.py"
] | [
"# -*- coding: utf-8 -*-\nimport unittest\n\n\nimport numpy as np\nimport pytest\nfrom pytest import approx\n\n\nimport allel\nfrom allel.test.tools import assert_array_equal as aeq, assert_array_almost_equal\nfrom allel.util import ignore_invalid, mask_inaccessible\nfrom allel import GenotypeArray, HaplotypeArray, SortedIndex, AlleleCountsArray\n\n\nclass TestAccessibilityMasking(unittest.TestCase):\n\n def test_mask_inaccessible(self):\n np.random.seed(2837)\n for n_vars in [5, 50, 500]:\n pos = np.arange(1, n_vars+1)\n ac = np.random.randint(1, 40, n_vars*2).reshape((n_vars, 2))\n mask = np.random.randint(2, size=n_vars).astype(bool)\n\n mpos, mac = mask_inaccessible(mask, pos, ac)\n aeq(mac, ac[mask])\n aeq(mpos, pos[mask])\n\n def test_incomplete_is_accessible(self):\n # is_accessible mask has to cover all positions\n pos = np.array([1, 2, 10])\n ac = np.array([[5, 5], [2, 4]])\n mask = np.array([True, True, False])\n self.assertRaises(ValueError, mask_inaccessible, mask, pos, ac)\n\n def test_compatible_dims(self):\n # is_accessible mask has to cover all positions\n pos = np.array([1, 2, 10])\n mask = np.array([True, True, False])\n self.assertRaises(ValueError, mask_inaccessible, mask, pos)\n\n def test_masking_warning(self):\n # assert user is being warning of masking\n pos = np.array([1, 2, 3])\n mask = np.array([True, True, False])\n self.assertWarns(UserWarning, mask_inaccessible, mask, pos)\n\n def test_fully_masked_windowed_diversty(self):\n ac = allel.AlleleCountsArray(np.array(\n [\n [5, 5],\n [5, 5],\n [1, 9],\n [1, 9]\n ]))\n pos = np.array([1, 2, 3, 4])\n mask = np.array([False, False, True, True])\n pi, _, _, _ = allel.windowed_diversity(pos, ac, size=2, start=1,\n stop=5, is_accessible=mask)\n self.assertTrue(np.isnan(pi[0]))\n\n def test_masked_windowed_diversity(self):\n # four haplotypes, 6 pairwise comparison\n h = allel.HaplotypeArray([[0, 0, 0, 0],\n [0, 0, 0, 1],\n [0, 0, 1, 1],\n [0, 1, 1, 1],\n [1, 1, 1, 1],\n [0, 0, 1, 2],\n [0, 1, 1, 2],\n [0, 1, -1, -1],\n [-1, -1, -1, -1]])\n ac = h.count_alleles()\n # mean pairwise diversity\n # expect = [0, 3/6, 4/6, 3/6, 0, 5/6, 5/6, 1, -1]\n pos = SortedIndex([2, 4, 7, 14, 15, 18, 19, 25, 27])\n mask = np.tile(np.repeat(np.array([True, False]), 5), 3)\n # expected is every other window with size 5\n expect, _, _, _ = allel.windowed_diversity(pos, ac, size=5, start=1,\n stop=31)\n # only getting every other element\n expect = expect[::2]\n # actual is window of size 10 with the last half masked out\n actual, _, _, _ = allel.windowed_diversity(pos, ac, size=10, start=1,\n stop=31, is_accessible=mask)\n assert_array_almost_equal(expect, actual)\n\n def test_masked_windowed_divergence(self):\n h = HaplotypeArray([[0, 0, 0, 0],\n [0, 0, 0, 1],\n [0, 0, 1, 1],\n [0, 1, 1, 1],\n [1, 1, 1, 1],\n [0, 0, 1, 2],\n [0, 1, 1, 2],\n [0, 1, -1, -1],\n [-1, -1, -1, -1]])\n h1 = h.take([0, 1], axis=1)\n h2 = h.take([2, 3], axis=1)\n ac1 = h1.count_alleles()\n ac2 = h2.count_alleles()\n pos = SortedIndex([2, 4, 7, 14, 15, 18, 19, 25, 27])\n mask = np.tile(np.repeat(np.array([True, False]), 5), 3)\n expect, _, _, _ = allel.windowed_divergence(pos, ac1, ac2, size=5,\n start=1, stop=31)\n expect = expect[::2]\n actual, _, _, _ = allel.windowed_divergence(pos, ac1, ac2, size=10,\n start=1, stop=31,\n is_accessible=mask)\n assert_array_almost_equal(expect, actual)\n\n\nclass TestWindowUtilities(unittest.TestCase):\n\n def test_moving_statistic(self):\n f = allel.moving_statistic\n\n values = [2, 5, 8, 16]\n expect = [7, 24]\n actual = f(values, statistic=np.sum, size=2)\n aeq(expect, actual)\n\n values = [2, 5, 8, 16]\n expect = [7, 13, 24]\n actual = f(values, statistic=np.sum, size=2, step=1)\n aeq(expect, actual)\n\n def test_windowed_statistic(self):\n f = allel.windowed_statistic\n pos = [1, 12, 15, 27]\n\n # boolean array, all true\n b = [True, True, True, True]\n expected_nnz = [1, 2, 1]\n expected_windows = [[1, 10], [11, 20], [21, 27]]\n expected_counts = [1, 2, 1]\n actual_nnz, actual_windows, actual_counts = \\\n f(pos, b, np.count_nonzero, 10)\n aeq(expected_nnz, actual_nnz)\n aeq(expected_windows, actual_windows)\n aeq(expected_counts, actual_counts)\n\n # boolean array, not all true\n b = [False, True, False, True]\n expected_nnz = [0, 1, 1]\n expected_windows = [[1, 10], [11, 20], [21, 27]]\n expected_counts = [1, 2, 1]\n actual_nnz, actual_windows, actual_counts = \\\n f(pos, b, np.count_nonzero, 10)\n aeq(expected_windows, actual_windows)\n aeq(expected_nnz, actual_nnz)\n aeq(expected_counts, actual_counts)\n\n # explicit start and stop\n b = [False, True, False, True]\n expected_nnz = [1, 0, 1]\n expected_windows = [[5, 14], [15, 24], [25, 29]]\n expected_counts = [1, 1, 1]\n actual_nnz, actual_windows, actual_counts = \\\n f(pos, b, np.count_nonzero, 10, start=5, stop=29)\n aeq(expected_windows, actual_windows)\n aeq(expected_nnz, actual_nnz)\n aeq(expected_counts, actual_counts)\n\n # boolean array, bad length\n b = [False, True, False]\n with pytest.raises(ValueError):\n f(pos, b, np.count_nonzero, 10)\n\n # 2D, 4 variants, 2 samples\n b = [[True, False],\n [True, True],\n [True, False],\n [True, True]]\n expected_nnz = [[1, 0],\n [2, 1],\n [1, 1]]\n expected_windows = [[1, 10], [11, 20], [21, 27]]\n expected_counts = [1, 2, 1]\n actual_nnz, actual_windows, actual_counts = \\\n f(pos, b, statistic=lambda x: np.sum(x, axis=0), size=10)\n aeq(expected_nnz, actual_nnz)\n aeq(expected_windows, actual_windows)\n aeq(expected_counts, actual_counts)\n\n def test_per_base(self):\n pos = [1, 12, 15, 27]\n\n # boolean array, all true\n b = [True, True, True, True]\n # N.B., final bin includes right edge\n expected_nnz = [1, 2, 1]\n expected_windows = [[1, 10], [11, 20], [21, 27]]\n expected_counts = [1, 2, 1]\n expected_densities = [1/10, 2/10, 1/7]\n expected_n_bases = [10, 10, 7]\n nnz, windows, counts = allel.windowed_statistic(\n pos, b, statistic=np.count_nonzero, size=10, start=1\n )\n densities, n_bases = allel.per_base(nnz, windows)\n aeq(expected_nnz, nnz)\n aeq(expected_windows, windows)\n aeq(expected_counts, counts)\n aeq(expected_densities, densities)\n aeq(expected_n_bases, n_bases)\n\n # boolean array, not all true\n b = [False, True, False, True]\n expected_densities = [0/10, 1/10, 1/7]\n expected_n_bases = [10, 10, 7]\n nnz, windows, counts = allel.windowed_statistic(\n pos, b, statistic=np.count_nonzero, size=10, start=1\n )\n densities, n_bases = allel.per_base(nnz, windows)\n aeq(expected_densities, densities)\n aeq(expected_n_bases, n_bases)\n\n # 2D, 4 variants, 2 samples\n b = [[True, False],\n [True, True],\n [True, False],\n [True, True]]\n expected_densities = [[1/10, 0/10],\n [2/10, 1/10],\n [1/7, 1/7]]\n expected_n_bases = [10, 10, 7]\n nnz, windows, counts = allel.windowed_statistic(\n pos, b, statistic=lambda x: np.sum(x, axis=0), size=10, start=1\n )\n densities, n_bases = allel.per_base(nnz, windows)\n aeq(expected_densities, densities)\n aeq(expected_n_bases, n_bases)\n\n # include is_accessible array option\n is_accessible = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 1, 1, 1, 1, 0, 0, 1, 1, 0, 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=bool)\n b = [False, True, False, True]\n expected_densities = [-1, 1/6, 1/7]\n expected_n_bases = [0, 6, 7]\n nnz, windows, counts = allel.windowed_statistic(\n pos, b, statistic=np.count_nonzero, size=10, start=1\n )\n densities, n_bases = allel.per_base(nnz, windows, is_accessible=is_accessible, fill=-1)\n aeq(expected_densities, densities)\n aeq(expected_n_bases, n_bases)\n\n def test_equally_accessible_windows(self):\n is_accessible = np.array([1, 0, 0, 1, 1, 0, 1, 0, 1])\n\n # default options\n actual = allel.equally_accessible_windows(is_accessible, size=2)\n expect = np.array([[1, 4], [5, 7]])\n aeq(expect, actual)\n\n # with step\n actual = allel.equally_accessible_windows(is_accessible, size=2, step=1)\n expect = np.array([[1, 4], [4, 5], [5, 7], [7, 9]])\n aeq(expect, actual)\n\n # with start and stop\n actual = allel.equally_accessible_windows(is_accessible, size=2, start=4, stop=5)\n expect = np.array([[4, 5]])\n aeq(expect, actual)\n\n\nclass TestDiversityDivergence(unittest.TestCase):\n\n def test_mean_pairwise_diversity(self):\n\n # start with simplest case, two haplotypes, one pairwise comparison\n h = HaplotypeArray([[0, 0],\n [1, 1],\n [0, 1],\n [1, 2],\n [0, -1],\n [-1, -1]])\n ac = h.count_alleles()\n expect = [0, 0, 1, 1, -1, -1]\n actual = allel.mean_pairwise_difference(ac, fill=-1)\n aeq(expect, actual)\n\n # four haplotypes, 6 pairwise comparison\n h = HaplotypeArray([[0, 0, 0, 0],\n [0, 0, 0, 1],\n [0, 0, 1, 1],\n [0, 1, 1, 1],\n [1, 1, 1, 1],\n [0, 0, 1, 2],\n [0, 1, 1, 2],\n [0, 1, -1, -1],\n [-1, -1, -1, -1]])\n ac = h.count_alleles()\n expect = [0, 3/6, 4/6, 3/6, 0, 5/6, 5/6, 1, -1]\n actual = allel.mean_pairwise_difference(ac, fill=-1)\n assert_array_almost_equal(expect, actual)\n\n def test_sequence_divergence(self):\n from allel import sequence_divergence\n pos = [2, 4, 8]\n ac1 = AlleleCountsArray([[2, 0],\n [2, 0],\n [2, 0]])\n ac2 = AlleleCountsArray([[0, 2],\n [0, 2],\n [0, 2]])\n\n # all variants\n e = 3 / 7\n a = sequence_divergence(pos, ac1, ac2)\n assert e == a\n\n # start/stop\n e = 2 / 6\n a = sequence_divergence(pos, ac1, ac2, start=0, stop=5)\n assert e == a\n\n # start/stop, an provided\n an1 = ac1.sum(axis=1)\n an2 = ac2.sum(axis=1)\n e = 2 / 6\n a = sequence_divergence(pos, ac1, ac2, start=0, stop=5, an1=an1,\n an2=an2)\n assert e == a\n\n def test_windowed_diversity(self):\n\n # four haplotypes, 6 pairwise comparison\n h = HaplotypeArray([[0, 0, 0, 0],\n [0, 0, 0, 1],\n [0, 0, 1, 1],\n [0, 1, 1, 1],\n [1, 1, 1, 1],\n [0, 0, 1, 2],\n [0, 1, 1, 2],\n [0, 1, -1, -1],\n [-1, -1, -1, -1]])\n ac = h.count_alleles()\n # mean pairwise diversity\n # expect = [0, 3/6, 4/6, 3/6, 0, 5/6, 5/6, 1, -1]\n pos = SortedIndex([2, 4, 7, 14, 15, 18, 19, 25, 27])\n expect = [(7/6)/10, (13/6)/10, 1/11]\n actual, _, _, _ = allel.windowed_diversity(pos, ac, size=10, start=1, stop=31)\n assert_array_almost_equal(expect, actual)\n\n def test_mean_pairwise_divergence(self):\n\n # simplest case, two haplotypes in each population\n h = HaplotypeArray([[0, 0, 0, 0],\n [0, 0, 0, 1],\n [0, 0, 1, 1],\n [0, 1, 1, 1],\n [1, 1, 1, 1],\n [0, 0, 1, 2],\n [0, 1, 1, 2],\n [0, 1, -1, -1],\n [-1, -1, -1, -1]])\n h1 = h.take([0, 1], axis=1)\n h2 = h.take([2, 3], axis=1)\n ac1 = h1.count_alleles()\n ac2 = h2.count_alleles()\n\n expect = [0/4, 2/4, 4/4, 2/4, 0/4, 4/4, 3/4, -1, -1]\n actual = allel.mean_pairwise_difference_between(ac1, ac2, fill=-1)\n aeq(expect, actual)\n\n def test_windowed_divergence(self):\n\n # simplest case, two haplotypes in each population\n h = HaplotypeArray([[0, 0, 0, 0],\n [0, 0, 0, 1],\n [0, 0, 1, 1],\n [0, 1, 1, 1],\n [1, 1, 1, 1],\n [0, 0, 1, 2],\n [0, 1, 1, 2],\n [0, 1, -1, -1],\n [-1, -1, -1, -1]])\n h1 = h.take([0, 1], axis=1)\n h2 = h.take([2, 3], axis=1)\n ac1 = h1.count_alleles()\n ac2 = h2.count_alleles()\n # mean pairwise divergence\n # expect = [0/4, 2/4, 4/4, 2/4, 0/4, 4/4, 3/4, -1, -1]\n pos = SortedIndex([2, 4, 7, 14, 15, 18, 19, 25, 27])\n expect = [(6/4)/10, (9/4)/10, 0/11]\n actual, _, _, _ = allel.windowed_divergence(\n pos, ac1, ac2, size=10, start=1, stop=31\n )\n assert_array_almost_equal(expect, actual)\n\n def test_tajima_d(self):\n from allel import tajima_d\n\n # example with calculable value\n ac = AlleleCountsArray([[1, 3],\n [2, 2],\n [3, 1]])\n expect = approx(0.168, 0.01)\n actual = tajima_d(ac)\n assert expect == actual\n\n # too few sites\n ac = AlleleCountsArray([[2, 2],\n [3, 1]])\n assert np.nan is tajima_d(ac)\n\n # too few segregating sites\n ac = AlleleCountsArray([[4, 0],\n [2, 2],\n [3, 1]])\n assert np.nan is tajima_d(ac)\n # allow people to override if they really want to\n assert approx(0.592, 0.01) == tajima_d(ac, min_sites=2)\n\n def test_moving_tajima_d(self):\n from allel import moving_tajima_d\n\n # example with calculable value\n ac = AlleleCountsArray([[1, 3],\n [2, 2],\n [3, 1],\n [1, 3],\n [2, 2]])\n expect = np.array([0.168] * 3)\n actual = moving_tajima_d(ac, size=3, step=1)\n assert_array_almost_equal(expect, actual, decimal=3)\n\n # too few sites\n actual = moving_tajima_d(ac, size=2, step=1)\n assert 4 == len(actual)\n assert np.all(np.isnan(actual))\n\n # too few segregating sites\n ac = AlleleCountsArray([[4, 0],\n [2, 2],\n [3, 1],\n [4, 0],\n [2, 2]])\n actual = moving_tajima_d(ac, size=3, step=1)\n assert 3 == len(actual)\n assert np.all(np.isnan(actual))\n # allow people to override if they really want to\n expect = np.array([0.592] * 3)\n actual = moving_tajima_d(ac, size=3, step=1, min_sites=2)\n assert_array_almost_equal(expect, actual, decimal=3)\n\n def test_windowed_tajima_d(self):\n from allel import windowed_tajima_d\n\n pos = np.array([1, 11, 21, 31, 41])\n\n # example with calculable value\n ac = AlleleCountsArray([[1, 3],\n [2, 2],\n [3, 1],\n [1, 3],\n [2, 2]])\n expect = np.array([0.168] * 3)\n actual, _, _ = windowed_tajima_d(pos, ac, size=25, step=10)\n assert_array_almost_equal(expect, actual, decimal=3)\n\n # too few sites\n actual, _, _ = windowed_tajima_d(pos, ac, size=15, step=10)\n assert 4 == len(actual)\n assert np.all(np.isnan(actual))\n\n # too few segregating sites\n ac = AlleleCountsArray([[4, 0],\n [2, 2],\n [3, 1],\n [4, 0],\n [2, 2]])\n actual, _, _ = windowed_tajima_d(pos, ac, size=25, step=10)\n assert 3 == len(actual)\n assert np.all(np.isnan(actual))\n # allow people to override if they really want to\n expect = np.array([0.592] * 3)\n actual, _, _ = windowed_tajima_d(pos, ac, size=25, step=10, min_sites=2)\n assert_array_almost_equal(expect, actual, decimal=3)\n\n\nclass TestHardyWeinberg(unittest.TestCase):\n\n def test_heterozygosity_observed(self):\n\n # diploid\n g = GenotypeArray([[[0, 0], [0, 0]],\n [[1, 1], [1, 1]],\n [[1, 1], [2, 2]],\n [[0, 0], [0, 1]],\n [[0, 0], [0, 2]],\n [[1, 1], [1, 2]],\n [[0, 1], [0, 1]],\n [[0, 1], [1, 2]],\n [[0, 0], [-1, -1]],\n [[0, 1], [-1, -1]],\n [[-1, -1], [-1, -1]]], dtype='i1')\n expect = [0, 0, 0, .5, .5, .5, 1, 1, 0, 1, -1]\n actual = allel.heterozygosity_observed(g, fill=-1)\n aeq(expect, actual)\n\n # polyploid\n g = GenotypeArray([[[0, 0, 0], [0, 0, 0]],\n [[1, 1, 1], [1, 1, 1]],\n [[1, 1, 1], [2, 2, 2]],\n [[0, 0, 0], [0, 0, 1]],\n [[0, 0, 0], [0, 0, 2]],\n [[1, 1, 1], [0, 1, 2]],\n [[0, 0, 1], [0, 1, 1]],\n [[0, 1, 1], [0, 1, 2]],\n [[0, 0, 0], [-1, -1, -1]],\n [[0, 0, 1], [-1, -1, -1]],\n [[-1, -1, -1], [-1, -1, -1]]], dtype='i1')\n expect = [0, 0, 0, .5, .5, .5, 1, 1, 0, 1, -1]\n actual = allel.heterozygosity_observed(g, fill=-1)\n aeq(expect, actual)\n\n def test_heterozygosity_expected(self):\n\n def refimpl(f, ploidy, fill=0):\n \"\"\"Limited reference implementation for testing purposes.\"\"\"\n\n # check allele frequencies sum to 1\n af_sum = np.sum(f, axis=1)\n\n # assume three alleles\n p = f[:, 0]\n q = f[:, 1]\n r = f[:, 2]\n\n out = 1 - p**ploidy - q**ploidy - r**ploidy\n with ignore_invalid():\n out[(af_sum < 1) | np.isnan(af_sum)] = fill\n\n return out\n\n # diploid\n g = GenotypeArray([[[0, 0], [0, 0]],\n [[1, 1], [1, 1]],\n [[1, 1], [2, 2]],\n [[0, 0], [0, 1]],\n [[0, 0], [0, 2]],\n [[1, 1], [1, 2]],\n [[0, 1], [0, 1]],\n [[0, 1], [1, 2]],\n [[0, 0], [-1, -1]],\n [[0, 1], [-1, -1]],\n [[-1, -1], [-1, -1]]], dtype='i1')\n expect1 = [0, 0, 0.5, .375, .375, .375, .5, .625, 0, .5, -1]\n af = g.count_alleles().to_frequencies()\n expect2 = refimpl(af, ploidy=g.ploidy, fill=-1)\n actual = allel.heterozygosity_expected(af, ploidy=g.ploidy, fill=-1)\n assert_array_almost_equal(expect1, actual)\n assert_array_almost_equal(expect2, actual)\n expect3 = [0, 0, 0.5, .375, .375, .375, .5, .625, 0, .5, 0]\n actual = allel.heterozygosity_expected(af, ploidy=g.ploidy, fill=0)\n assert_array_almost_equal(expect3, actual)\n\n # polyploid\n g = GenotypeArray([[[0, 0, 0], [0, 0, 0]],\n [[1, 1, 1], [1, 1, 1]],\n [[1, 1, 1], [2, 2, 2]],\n [[0, 0, 0], [0, 0, 1]],\n [[0, 0, 0], [0, 0, 2]],\n [[1, 1, 1], [0, 1, 2]],\n [[0, 0, 1], [0, 1, 1]],\n [[0, 1, 1], [0, 1, 2]],\n [[0, 0, 0], [-1, -1, -1]],\n [[0, 0, 1], [-1, -1, -1]],\n [[-1, -1, -1], [-1, -1, -1]]], dtype='i1')\n af = g.count_alleles().to_frequencies()\n expect = refimpl(af, ploidy=g.ploidy, fill=-1)\n actual = allel.heterozygosity_expected(af, ploidy=g.ploidy, fill=-1)\n assert_array_almost_equal(expect, actual)\n\n def test_inbreeding_coefficient(self):\n\n # diploid\n g = GenotypeArray([[[0, 0], [0, 0]],\n [[1, 1], [1, 1]],\n [[1, 1], [2, 2]],\n [[0, 0], [0, 1]],\n [[0, 0], [0, 2]],\n [[1, 1], [1, 2]],\n [[0, 1], [0, 1]],\n [[0, 1], [1, 2]],\n [[0, 0], [-1, -1]],\n [[0, 1], [-1, -1]],\n [[-1, -1], [-1, -1]]], dtype='i1')\n # ho = np.array([0, 0, 0, .5, .5, .5, 1, 1, 0, 1, -1])\n # he = np.array([0, 0, 0.5, .375, .375, .375, .5, .625, 0, .5, -1])\n # expect = 1 - (ho/he)\n expect = [-1, -1, 1-0, 1-(.5/.375), 1-(.5/.375), 1-(.5/.375),\n 1-(1/.5), 1-(1/.625), -1, 1-(1/.5), -1]\n actual = allel.inbreeding_coefficient(g, fill=-1)\n assert_array_almost_equal(expect, actual)\n\n\nclass TestDistance(unittest.TestCase):\n\n def test_pdist(self):\n from allel.stats.distance import pdist\n h = HaplotypeArray([[0, 0, 0, 0],\n [0, 0, 0, 1],\n [0, 0, 1, 1],\n [0, 1, 1, 1],\n [1, 1, 1, 1],\n [0, 0, 1, 2],\n [0, 1, 1, 2],\n [0, 1, -1, -1],\n [-1, -1, -1, -1]])\n import scipy.spatial\n d1 = scipy.spatial.distance.pdist(h.T, 'hamming')\n d2 = pdist(h, 'hamming')\n aeq(d1, d2)\n\n def test_pairwise_distance_multidim(self):\n g = GenotypeArray([[[0, 0], [0, 0]],\n [[1, 1], [1, 1]],\n [[1, 1], [2, 2]],\n [[0, 0], [0, 1]],\n [[0, 0], [0, 2]],\n [[1, 1], [1, 2]],\n [[0, 1], [0, 1]],\n [[0, 1], [1, 2]],\n [[0, 0], [-1, -1]],\n [[0, 1], [-1, -1]],\n [[-1, -1], [-1, -1]]], dtype='i1')\n gac = g.to_allele_counts()\n\n def metric(ac1, ac2):\n mpd = allel.mean_pairwise_difference_between(ac1, ac2, fill=0)\n return mpd.sum()\n\n expect = [allel.mean_pairwise_difference_between(gac[:, 0], gac[:, 1], fill=0).sum()]\n actual = allel.pairwise_distance(gac, metric)\n aeq(expect, actual)\n\n def test_condensed_coords(self):\n from allel import condensed_coords\n assert 0 == condensed_coords(0, 1, 2)\n assert 0 == condensed_coords(1, 0, 2)\n assert 0 == condensed_coords(0, 1, 3)\n assert 0 == condensed_coords(1, 0, 3)\n assert 1 == condensed_coords(0, 2, 3)\n assert 1 == condensed_coords(2, 0, 3)\n assert 2 == condensed_coords(1, 2, 3)\n assert 2 == condensed_coords(2, 1, 3)\n\n with pytest.raises(ValueError):\n condensed_coords(0, 0, 1)\n condensed_coords(0, 1, 1)\n condensed_coords(1, 0, 1)\n condensed_coords(0, 0, 2)\n condensed_coords(0, 2, 2)\n condensed_coords(2, 0, 2)\n condensed_coords(1, 1, 2)\n condensed_coords(0, 0, 3)\n condensed_coords(1, 1, 3)\n condensed_coords(2, 2, 3)\n\n def test_condensed_coords_within(self):\n from allel import condensed_coords_within\n\n pop = [0, 1]\n n = 3\n expect = [0]\n actual = condensed_coords_within(pop, n)\n assert expect == actual\n\n pop = [0, 2]\n n = 3\n expect = [1]\n actual = condensed_coords_within(pop, n)\n assert expect == actual\n\n pop = [1, 2]\n n = 3\n expect = [2]\n actual = condensed_coords_within(pop, n)\n assert expect == actual\n\n pop = [0, 1, 3]\n n = 4\n expect = [0, 2, 4]\n actual = condensed_coords_within(pop, n)\n assert expect == actual\n\n pop = [0, 0]\n with pytest.raises(ValueError):\n condensed_coords_within(pop, n)\n\n def test_condensed_coords_between(self):\n from allel import condensed_coords_between\n\n pop1 = [0, 1]\n pop2 = [2, 3]\n n = 4\n expect = [1, 2, 3, 4]\n actual = condensed_coords_between(pop1, pop2, n)\n assert expect == actual\n\n pop1 = [0, 2]\n pop2 = [1, 3]\n n = 4\n expect = [0, 2, 3, 5]\n actual = condensed_coords_between(pop1, pop2, n)\n assert expect == actual\n\n with pytest.raises(ValueError):\n condensed_coords_between(pop1, pop1, n)\n\n\nclass TestLinkageDisequilibrium(unittest.TestCase):\n\n def test_rogers_huff_r(self):\n\n gn = [[0, 1, 2],\n [0, 1, 2]]\n expect = 1.\n actual = allel.rogers_huff_r(gn)\n assert expect == actual\n\n gn = [[0, 1, 2],\n [2, 1, 0]]\n expect = -1.\n actual = allel.rogers_huff_r(gn)\n assert expect == actual\n\n gn = [[0, 0, 0],\n [0, 0, 0]]\n actual = allel.rogers_huff_r(gn)\n assert np.isnan(actual)\n\n gn = [[0, 0, 0],\n [1, 1, 1]]\n actual = allel.rogers_huff_r(gn)\n assert np.isnan(actual)\n\n gn = [[1, 1, 1],\n [1, 1, 1]]\n actual = allel.rogers_huff_r(gn)\n assert np.isnan(actual)\n\n gn = [[0, -1, 0],\n [-1, 1, -1]]\n actual = allel.rogers_huff_r(gn)\n assert np.isnan(actual)\n\n gn = [[0, 1, 0],\n [-1, -1, -1]]\n actual = allel.rogers_huff_r(gn)\n assert np.isnan(actual)\n\n gn = [[0, 1, 0, 1],\n [0, 1, 1, 0]]\n expect = 0\n actual = allel.rogers_huff_r(gn)\n assert expect == actual\n\n gn = [[0, 1, 2, -1],\n [0, 1, 2, 2]]\n expect = 1.\n actual = allel.rogers_huff_r(gn)\n assert expect == actual\n\n gn = [[0, 1, 2, 2],\n [0, 1, 2, -1]]\n expect = 1.\n actual = allel.rogers_huff_r(gn)\n assert expect == actual\n\n gn = [[0, 1, 2],\n [0, 1, -1]]\n expect = 1.\n actual = allel.rogers_huff_r(gn)\n assert expect == actual\n\n gn = [[0, 2],\n [2, 0],\n [0, 1]]\n expect = [-1, 1, -1]\n actual = allel.rogers_huff_r(gn)\n assert_array_almost_equal(expect, actual)\n\n gn = [[0, 2, 0],\n [0, 2, 0],\n [2, 0, 2],\n [0, 2, -1]]\n expect = [1, -1, 1, -1, 1, -1]\n actual = allel.rogers_huff_r(gn)\n assert_array_almost_equal(expect, actual)\n\n def test_rogers_huff_r_between(self):\n\n gna = [[0, 1, 2]]\n gnb = [[0, 1, 2]]\n expect = 1.\n actual = allel.rogers_huff_r_between(gna, gnb)\n assert expect == actual\n\n gna = [[0, 1, 2]]\n gnb = [[2, 1, 0]]\n expect = -1.\n actual = allel.rogers_huff_r_between(gna, gnb)\n assert expect == actual\n\n gna = [[0, 0, 0]]\n gnb = [[1, 1, 1]]\n actual = allel.rogers_huff_r_between(gna, gnb)\n assert np.isnan(actual)\n\n def test_locate_unlinked(self):\n\n gn = [[0, 1, 2],\n [0, 1, 2]]\n expect = [True, False]\n actual = allel.locate_unlinked(gn, size=2, step=2, threshold=.5)\n aeq(expect, actual)\n\n gn = [[0, 1, 1, 2],\n [0, 1, 1, 2],\n [1, 1, 0, 2],\n [1, 1, 0, 2]]\n actual = allel.locate_unlinked(gn, size=2, step=1, threshold=.5)\n expect = [True, False, True, False]\n aeq(expect, actual)\n\n gn = [[0, 1, 1, 2],\n [0, 1, 1, 2],\n [0, 1, 1, 2],\n [1, 1, 0, 2],\n [1, 1, 0, 2]]\n actual = allel.locate_unlinked(gn, size=2, step=1, threshold=.5)\n expect = [True, False, True, True, False]\n aeq(expect, actual)\n actual = allel.locate_unlinked(gn, size=3, step=1, threshold=.5)\n expect = [True, False, False, True, False]\n aeq(expect, actual)\n\n\nclass TestAdmixture(unittest.TestCase):\n\n def test_patterson_f2(self):\n aca = [[0, 2],\n [2, 0],\n [1, 1],\n [0, 0]]\n acb = [[0, 2],\n [0, 2],\n [0, 2],\n [0, 2]]\n expect = [0., 1., 0., np.nan]\n actual = allel.patterson_f2(aca, acb)\n assert_array_almost_equal(expect, actual)\n\n def test_patterson_f3(self):\n aca = [[0, 2],\n [2, 0],\n [0, 2],\n [0, 2],\n [0, 0]]\n acb = [[2, 0],\n [0, 2],\n [0, 2],\n [0, 2],\n [0, 2]]\n acc = [[1, 1],\n [1, 1],\n [0, 2],\n [2, 0],\n [1, 1]]\n expect_f3 = [-.5, -.5, 0., 1., np.nan]\n actual_f3, actual_hzc = allel.patterson_f3(acc, aca, acb)\n assert_array_almost_equal(expect_f3, actual_f3)\n expect_hzc = [1., 1., 0., 0., 1.]\n assert_array_almost_equal(expect_hzc, actual_hzc)\n\n def test_patterson_d(self):\n aca = [[0, 2],\n [2, 0],\n [2, 0],\n [1, 1],\n [0, 0]]\n acb = [[0, 2],\n [0, 2],\n [0, 2],\n [1, 1],\n [0, 2]]\n acc = [[2, 0],\n [2, 0],\n [0, 2],\n [1, 1],\n [0, 2]]\n acd = [[2, 0],\n [0, 2],\n [2, 0],\n [1, 1],\n [0, 2]]\n num, den = allel.patterson_d(aca, acb, acc, acd)\n expect_num = [0., 1., -1., 0., np.nan]\n expect_den = [0., 1., 1., 0.25, np.nan]\n assert_array_almost_equal(expect_num, num)\n assert_array_almost_equal(expect_den, den)\n\n\nclass TestRunsOfHomozygosity(unittest.TestCase):\n\n def test_roh_mhmm_100pct(self):\n\n # values correspond to start/stop/length/is_marginal\n roh_expected = np.array([[1, 100, 100, True]], dtype=object)\n fraction_expected = 1.0\n gv = np.zeros((4, 2), dtype=np.int16)\n pos = [1, 10, 50, 100]\n roh, fraction = allel.roh_mhmm(gv, pos, contig_size=100)\n aeq(roh.values, roh_expected)\n assert fraction == fraction_expected\n\n def test_roh_mhmm_0pct(self):\n\n fraction_expected = 0.0\n\n gv = np.zeros((4, 2), dtype=np.int16)\n gv[2, 0] = 1\n\n pos = [1, 10, 50, 100]\n roh, fraction = allel.roh_mhmm(gv, pos, contig_size=100)\n assert roh.shape[0] == 0\n assert fraction == fraction_expected\n"
] | [
[
"numpy.random.seed",
"numpy.isnan",
"numpy.arange",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hesenp/pyro | [
"0c49858ab8c5f263d1ece7f212180c8ccd8da370"
] | [
"examples/vae/vae_comparison.py"
] | [
"import argparse\nimport itertools\nimport os\nfrom abc import ABCMeta, abstractmethod\n\nimport torch\nimport torch.nn as nn\nfrom six import add_metaclass\nfrom torch.nn import functional\nfrom torchvision.utils import save_image\n\nimport pyro\nfrom pyro.contrib.examples import util\nfrom pyro.distributions import Bernoulli, Normal\nfrom pyro.infer import SVI, JitTrace_ELBO, Trace_ELBO\nfrom pyro.optim import Adam\nfrom utils.mnist_cached import DATA_DIR, RESULTS_DIR\n\n\"\"\"\nComparison of VAE implementation in PyTorch and Pyro. This example can be\nused for profiling purposes.\n\nThe PyTorch VAE example is taken (with minor modification) from pytorch/examples.\nSource: https://github.com/pytorch/examples/tree/master/vae\n\"\"\"\n\nTRAIN = 'train'\nTEST = 'test'\nOUTPUT_DIR = RESULTS_DIR\n\n\n# VAE encoder network\nclass Encoder(nn.Module):\n def __init__(self):\n super(Encoder, self).__init__()\n self.fc1 = nn.Linear(784, 400)\n self.fc21 = nn.Linear(400, 20)\n self.fc22 = nn.Linear(400, 20)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = x.reshape(-1, 784)\n h1 = self.relu(self.fc1(x))\n return self.fc21(h1), torch.exp(self.fc22(h1))\n\n\n# VAE Decoder network\nclass Decoder(nn.Module):\n def __init__(self):\n super(Decoder, self).__init__()\n self.fc3 = nn.Linear(20, 400)\n self.fc4 = nn.Linear(400, 784)\n self.relu = nn.ReLU()\n\n def forward(self, z):\n h3 = self.relu(self.fc3(z))\n return torch.sigmoid(self.fc4(h3))\n\n\n@add_metaclass(ABCMeta)\nclass VAE(object):\n \"\"\"\n Abstract class for the variational auto-encoder. The abstract method\n for training the network is implemented by subclasses.\n \"\"\"\n\n def __init__(self, args, train_loader, test_loader):\n self.args = args\n self.vae_encoder = Encoder()\n self.vae_decoder = Decoder()\n self.train_loader = train_loader\n self.test_loader = test_loader\n self.mode = TRAIN\n\n def set_train(self, is_train=True):\n if is_train:\n self.mode = TRAIN\n self.vae_encoder.train()\n self.vae_decoder.train()\n else:\n self.mode = TEST\n self.vae_encoder.eval()\n self.vae_decoder.eval()\n\n @abstractmethod\n def compute_loss_and_gradient(self, x):\n \"\"\"\n Given a batch of data `x`, run the optimizer (backpropagate the gradient),\n and return the computed loss.\n\n :param x: batch of data or a single datum (MNIST image).\n :return: loss computed on the data batch.\n \"\"\"\n return\n\n def model_eval(self, x):\n \"\"\"\n Given a batch of data `x`, run it through the trained VAE network to get\n the reconstructed image.\n\n :param x: batch of data or a single datum (MNIST image).\n :return: reconstructed image, and the latent z's mean and variance.\n \"\"\"\n z_mean, z_var = self.vae_encoder(x)\n if self.mode == TRAIN:\n z = Normal(z_mean, z_var.sqrt()).sample()\n else:\n z = z_mean\n return self.vae_decoder(z), z_mean, z_var\n\n def train(self, epoch):\n self.set_train(is_train=True)\n train_loss = 0\n for batch_idx, (x, _) in enumerate(self.train_loader):\n loss = self.compute_loss_and_gradient(x)\n train_loss += loss\n print('====> Epoch: {} \\nTraining loss: {:.4f}'.format(\n epoch, train_loss / len(self.train_loader.dataset)))\n\n def test(self, epoch):\n self.set_train(is_train=False)\n test_loss = 0\n for i, (x, _) in enumerate(self.test_loader):\n with torch.no_grad():\n recon_x = self.model_eval(x)[0]\n test_loss += self.compute_loss_and_gradient(x)\n if i == 0:\n n = min(x.size(0), 8)\n comparison = torch.cat([x[:n],\n recon_x.reshape(self.args.batch_size, 1, 28, 28)[:n]])\n save_image(comparison.detach().cpu(),\n os.path.join(OUTPUT_DIR, 'reconstruction_' + str(epoch) + '.png'),\n nrow=n)\n\n test_loss /= len(self.test_loader.dataset)\n print('Test set loss: {:.4f}'.format(test_loss))\n\n\nclass PyTorchVAEImpl(VAE):\n \"\"\"\n Adapted from pytorch/examples.\n Source: https://github.com/pytorch/examples/tree/master/vae\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(PyTorchVAEImpl, self).__init__(*args, **kwargs)\n self.optimizer = self.initialize_optimizer(lr=1e-3)\n\n def compute_loss_and_gradient(self, x):\n self.optimizer.zero_grad()\n recon_x, z_mean, z_var = self.model_eval(x)\n binary_cross_entropy = functional.binary_cross_entropy(recon_x, x.reshape(-1, 784))\n # Uses analytical KL divergence expression for D_kl(q(z|x) || p(z))\n # Refer to Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # (https://arxiv.org/abs/1312.6114)\n kl_div = -0.5 * torch.sum(1 + z_var.log() - z_mean.pow(2) - z_var)\n kl_div /= self.args.batch_size * 784\n loss = binary_cross_entropy + kl_div\n if self.mode == TRAIN:\n loss.backward()\n self.optimizer.step()\n return loss.item()\n\n def initialize_optimizer(self, lr=1e-3):\n model_params = itertools.chain(self.vae_encoder.parameters(), self.vae_decoder.parameters())\n return torch.optim.Adam(model_params, lr)\n\n\nclass PyroVAEImpl(VAE):\n \"\"\"\n Implementation of VAE using Pyro. Only the model and the guide specification\n is needed to run the optimizer (the objective function does not need to be\n specified as in the PyTorch implementation).\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(PyroVAEImpl, self).__init__(*args, **kwargs)\n self.optimizer = self.initialize_optimizer(lr=1e-3)\n\n def model(self, data):\n decoder = pyro.module('decoder', self.vae_decoder)\n z_mean, z_std = torch.zeros([data.size(0), 20]), torch.ones([data.size(0), 20])\n with pyro.plate('data', data.size(0)):\n z = pyro.sample('latent', Normal(z_mean, z_std).to_event(1))\n img = decoder.forward(z)\n pyro.sample('obs',\n Bernoulli(img).to_event(1),\n obs=data.reshape(-1, 784))\n\n def guide(self, data):\n encoder = pyro.module('encoder', self.vae_encoder)\n with pyro.plate('data', data.size(0)):\n z_mean, z_var = encoder.forward(data)\n pyro.sample('latent', Normal(z_mean, z_var.sqrt()).to_event(1))\n\n def compute_loss_and_gradient(self, x):\n if self.mode == TRAIN:\n loss = self.optimizer.step(x)\n else:\n loss = self.optimizer.evaluate_loss(x)\n loss /= self.args.batch_size * 784\n return loss\n\n def initialize_optimizer(self, lr):\n optimizer = Adam({'lr': lr})\n elbo = JitTrace_ELBO() if self.args.jit else Trace_ELBO()\n return SVI(self.model, self.guide, optimizer, loss=elbo)\n\n\ndef setup(args):\n pyro.set_rng_seed(args.rng_seed)\n train_loader = util.get_data_loader(dataset_name='MNIST',\n data_dir=DATA_DIR,\n batch_size=args.batch_size,\n is_training_set=True,\n shuffle=True)\n test_loader = util.get_data_loader(dataset_name='MNIST',\n data_dir=DATA_DIR,\n batch_size=args.batch_size,\n is_training_set=False,\n shuffle=True)\n global OUTPUT_DIR\n OUTPUT_DIR = os.path.join(RESULTS_DIR, args.impl)\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n pyro.clear_param_store()\n return train_loader, test_loader\n\n\ndef main(args):\n train_loader, test_loader = setup(args)\n if args.impl == 'pyro':\n vae = PyroVAEImpl(args, train_loader, test_loader)\n print('Running Pyro VAE implementation')\n elif args.impl == 'pytorch':\n vae = PyTorchVAEImpl(args, train_loader, test_loader)\n print('Running PyTorch VAE implementation')\n else:\n raise ValueError('Incorrect implementation specified: {}'.format(args.impl))\n for i in range(args.num_epochs):\n vae.train(i)\n if not args.skip_eval:\n vae.test(i)\n\n\nif __name__ == '__main__':\n assert pyro.__version__.startswith('0.3.0')\n parser = argparse.ArgumentParser(description='VAE using MNIST dataset')\n parser.add_argument('-n', '--num-epochs', nargs='?', default=10, type=int)\n parser.add_argument('--batch_size', nargs='?', default=128, type=int)\n parser.add_argument('--rng_seed', nargs='?', default=0, type=int)\n parser.add_argument('--impl', nargs='?', default='pyro', type=str)\n parser.add_argument('--skip_eval', action='store_true')\n parser.add_argument('--jit', action='store_true')\n parser.set_defaults(skip_eval=False)\n args = parser.parse_args()\n main(args)\n"
] | [
[
"torch.optim.Adam",
"torch.nn.Linear",
"torch.nn.ReLU",
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rushike/polymuse-future | [
"25af861e11fc3f4f95327405fec15d48bcc84a62"
] | [
"polymuse/drawer.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy, json\nimport matplotlib \n\n\"\"\"\nThis files has some function to draw the traing results. Like training losses, accuraces\n\"\"\"\n\nfont = {'family' : 'normal',\n 'weight' : 'bold',\n 'size' : 22}\n\nlines = {\n 'linewidth': 7,\n }\n\n\nmatplotlib.rc('lines', **lines)\nmatplotlib.rc('font', **font)\n\ndef draw_json_loss_acc(j_fn, j_ft): #draw note and time\n with open(j_fn, 'r') as j_file:\n j_strn = j_file\n dictn = json.load(j_strn)\n\n xn_loss = dictn['loss']\n xn_val_loss = dictn['val_loss']\n\n xn_acc = dictn['acc']\n xn_val_acc = dictn['val_acc']\n\n with open(j_ft, 'r') as j_file:\n j_strt = j_file\n dictn = json.load(j_strt)\n\n xt_loss = dictn['loss']\n xt_val_loss = dictn['val_loss']\n\n xt_acc = dictn['acc']\n xt_val_acc = dictn['val_acc']\n\n fig = plt.figure()\n fig.subplots_adjust(hspace=0.4, wspace=0.4)\n\n ax = fig.add_subplot(2, 2, 1)\n x = numpy.arange(len(xn_loss))\n ax.plot(x, xn_loss, label = \"Loss vs Epochs\")\n ax.plot(x, xn_val_loss, label = \"Val Loss vs Epochs\") \n ax.title.set_text(\"Note Loss\")\n # ax.xlabel(\"Epochs\")\n # ax.ylabel(\"Loss\")\n ax.legend()\n\n ax = fig.add_subplot(2, 2, 2)\n x = numpy.arange(len(xn_loss))\n ax.plot(x, xn_acc, label = \"Acc vs Epochs\")\n ax.plot(x, xn_val_acc, label = \"Val Acc vs Epochs\")\n ax.title.set_text(\"Note Acc\")\n # ax.xlabel(\"Epochs\")\n # ax.ylabel(\"Loss\")\n ax.legend()\n\n ax = fig.add_subplot(2, 2, 3)\n x = numpy.arange(len(xt_loss))\n ax.plot(x, xt_loss, label = \"Loss vs Epochs\")\n ax.plot(x, xt_val_loss, label = \"Val Loss vs Epochs\") \n ax.title.set_text(\"Time Loss\") \n # ax.xlabel(\"Epochs\")\n # ax.ylabel(\"Loss\")\n ax.legend()\n\n ax = fig.add_subplot(2, 2, 4)\n x = numpy.arange(len(xt_loss))\n ax.plot(x, xt_acc, label = \"Acc vs Epochs\")\n ax.plot(x, xt_val_acc, label = \"Val Acc vs Epochs\")\n ax.title.set_text(\"Time Acc\")\n # ax.xlabel(\"Epochs\")\n # ax.ylabel(\"Loss\")\n ax.legend()\n \n\n plt.show()\n\ndef draw_json_oct_loss_acc(j_fn, j_ft): #draw note and time\n with open(j_fn, 'r') as j_file:\n j_strn = j_file\n dictn = json.load(j_strn)\n\n xn_loss = dictn['loss']\n xn_val_loss = dictn['val_loss']\n\n xn_acc = dictn['acc']\n xn_val_acc = dictn['val_acc']\n\n \n xn_ocloss = dictn['octave_loss']\n xn_val_ocloss = dictn['val_octave_loss']\n\n with open(j_ft, 'r') as j_file:\n j_strt = j_file\n dictn = json.load(j_strt)\n\n xt_loss = dictn['loss']\n xt_val_loss = dictn['val_loss']\n\n xt_acc = dictn['acc']\n xt_val_acc = dictn['val_acc']\n\n fig = plt.figure()\n fig.subplots_adjust(hspace=0.4, wspace=0.4)\n\n ax = fig.add_subplot(2, 2, 1)\n x = numpy.arange(len(xn_loss))\n ax.plot(x, xn_loss, label = \"Loss vs Epochs\")\n ax.plot(x, xn_val_loss, label = \"Val Loss vs Epochs\") \n ax.plot(x, xn_ocloss, label = \"Oct Loss vs Epochs\") \n ax.plot(x, xn_val_ocloss, label = \"Val Oct Loss vs Epochs\") \n ax.title.set_text(\"Note Loss\")\n # ax.xlabel(\"Epochs\")\n # ax.ylabel(\"Loss\")\n ax.legend()\n\n ax = fig.add_subplot(2, 2, 2)\n x = numpy.arange(len(xn_loss))\n ax.plot(x, xn_acc, label = \"Acc vs Epochs\")\n ax.plot(x, xn_val_acc, label = \"Val Acc vs Epochs\")\n ax.title.set_text(\"Note Acc\")\n # ax.xlabel(\"Epochs\")\n # ax.ylabel(\"Loss\")\n ax.legend()\n\n ax = fig.add_subplot(2, 2, 3)\n x = numpy.arange(len(xt_loss))\n ax.plot(x, xt_loss, label = \"Loss vs Epochs\")\n ax.plot(x, xt_val_loss, label = \"Val Loss vs Epochs\") \n ax.title.set_text(\"Time Loss\") \n # ax.xlabel(\"Epochs\")\n # ax.ylabel(\"Loss\")\n ax.legend()\n\n ax = fig.add_subplot(2, 2, 4)\n x = numpy.arange(len(xt_loss))\n ax.plot(x, xt_acc, label = \"Acc vs Epochs\")\n ax.plot(x, xt_val_acc, label = \"Val Acc vs Epochs\")\n ax.title.set_text(\"Time Acc\")\n # ax.xlabel(\"Epochs\")\n # ax.ylabel(\"Loss\")\n ax.legend()\n \n\n plt.show()\n\n\ndef draw_json_loss_acc_1(j_fn): #draw note and time\n with open(j_fn, 'r') as j_file:\n j_strn = j_file\n dictn = json.load(j_strn)\n\n xn_loss = dictn['loss']\n xn_val_loss = dictn['val_loss']\n\n xn_acc = dictn['acc']\n xn_val_acc = dictn['val_acc']\n\n fig = plt.figure()\n fig.subplots_adjust(hspace=0.4, wspace=0.4)\n\n ax = fig.add_subplot(1, 2, 1)\n x = numpy.arange(len(xn_loss))\n ax.plot(x, xn_loss, label = \"Loss vs Epochs\")\n ax.plot(x, xn_val_loss, label = \"Val Loss vs Epochs\") \n ax.title.set_text(\"Note Loss\")\n # ax.xlabel(\"Epochs\")\n # ax.ylabel(\"Loss\")\n ax.legend()\n\n ax = fig.add_subplot(1, 2, 2)\n x = numpy.arange(len(xn_loss))\n ax.plot(x, xn_acc, label = \"Acc vs Epochs\")\n ax.plot(x, xn_val_acc, label = \"Val Acc vs Epochs\")\n ax.title.set_text(\"Note Acc\")\n # ax.xlabel(\"Epochs\")\n # ax.ylabel(\"Loss\")\n ax.legend()\n\n\n plt.show()\n\n\ndef draw_sFlat(seq:list, xtickshead:list, ytickshead:list, labels: list):\n #seq : - sFlat list\n #sFlat : - tracks, note_instances, depth\n for i, s in enumerate(seq):\n x = numpy.arange(s.shape[1])\n y = s[0, :, 0]\n\n plt.plot(x, y, label= labels[i])\n plt.xlabel(xtickshead[i])\n plt.ylabel([ytickshead[i]])\n \n plt.legend()\n plt.show()"
] | [
[
"matplotlib.pyplot.legend",
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.rc",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
raikel/dnfas | [
"56b4dfbef33fd9ad6e6504d1cb88105069b57d70"
] | [
"dfapi/tests/factory.py"
] | [
"import shutil\nfrom datetime import timedelta\nfrom os import path, mkdir\nfrom uuid import uuid4\n\nimport cv2 as cv\nimport numpy as np\nfrom django.conf import settings\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.utils import timezone\nfrom faker import Faker\n\nfrom ..models import (\n Face,\n Frame,\n Subject,\n SubjectSegment,\n VideoRecord,\n Camera,\n Notification,\n Stat,\n Recognition\n)\n\nTZ = timezone.get_current_timezone()\n\nFAKER = Faker()\n\nCURR_DIR = path.abspath(path.dirname(__file__))\nFACE_IMAGE_PATH = path.join(CURR_DIR, 'data/face.jpg')\nFRAME_IMAGE_PATH = path.join(CURR_DIR, 'data/frame.jpg')\nVIDEO_LARGE_PATH = path.join(CURR_DIR, 'data/video_large.mp4')\nVIDEO_SMALL_PATH = path.join(CURR_DIR, 'data/video_small.mp4')\n\n\ndef filter_keys(data: dict, keys: list):\n data_filtered = {}\n for key in keys:\n if key in data.keys():\n data_filtered[key] = data[key]\n return data_filtered\n\n\ndef create_video_file(file_path, media_path):\n _, ext = path.splitext(file_path)\n video_name = f'video_{uuid4()}{ext}'\n video_dir = path.join(settings.MEDIA_ROOT, media_path)\n if not path.exists(video_dir):\n mkdir(video_dir)\n shutil.copy2(file_path, path.join(video_dir, video_name))\n return video_name\n\n\ndef create_image_file(file_path, media_path):\n image_name = f'face_{uuid4()}.jpg'\n rel_path = path.join(media_path, image_name)\n full_path = path.join(settings.MEDIA_ROOT, rel_path)\n image = cv.imread(file_path)\n cv.imwrite(full_path, image)\n return rel_path\n\n\nclass ModelFactory:\n\n model_cls = None\n MODEL_REQUIRED_FIELDS = []\n API_REQUIRED_FIELDS = []\n API_READ_FIELDS = []\n\n def create_instance(self, full: bool = True):\n data = self.instance_data()\n if not full:\n data = filter_keys(data, self.MODEL_REQUIRED_FIELDS)\n return self.model_cls.objects.create(**data)\n\n def create_instances(self, full: bool = True, count: int = 5):\n return [self.create_instance(full) for _ in range(count)]\n\n def instance_data(self):\n return {}\n\n def api_post_data(self, full: bool = True):\n data = {}\n if not full:\n return filter_keys(data, self.API_REQUIRED_FIELDS)\n return data\n\n\nclass FrameFactory(ModelFactory):\n\n model_cls = Frame\n MODEL_REQUIRED_FIELDS = ['image']\n API_REQUIRED_FIELDS = ['image']\n API_READ_FIELDS = [\n 'id',\n 'image',\n 'timestamp',\n 'faces'\n ]\n\n def instance_data(self):\n return dict(\n image=create_image_file(\n FRAME_IMAGE_PATH,\n settings.FACES_IMAGES_PATH\n ),\n timestamp=timezone.now()\n )\n\n def api_post_data(self, full: bool = True):\n with open(FRAME_IMAGE_PATH, 'rb') as image_file:\n image = SimpleUploadedFile(\n 'frame.jpg',\n image_file.read(),\n content_type=\"image/[jpg,png,gif]\"\n )\n data = {\n 'image': image,\n 'timestamp': timezone.now()\n }\n\n if not full:\n return filter_keys(data, self.API_REQUIRED_FIELDS)\n\n return data\n\n\nclass FaceFactory(ModelFactory):\n\n model_cls = Face\n MODEL_REQUIRED_FIELDS = []\n API_REQUIRED_FIELDS = ['image']\n API_READ_FIELDS = [\n 'id',\n 'image',\n 'frame',\n 'box',\n 'subject',\n 'created_at',\n 'timestamp'\n ]\n\n def instance_data(self):\n frame_factory = FrameFactory()\n subject_factory = SubjectFactory()\n return dict(\n frame=frame_factory.create_instance(),\n image=create_image_file(\n FACE_IMAGE_PATH,\n settings.FACES_IMAGES_PATH\n ),\n subject=subject_factory.create_instance(),\n box=(0, 0, 1, 1),\n embeddings=np.random.uniform(0, 1, 512),\n landmarks=np.random.uniform(0, 1, 10),\n timestamp=timezone.now()\n )\n\n def api_post_data(self, full: bool = True):\n subject_factory = SubjectFactory()\n with open(FACE_IMAGE_PATH, 'rb') as image_file:\n image = SimpleUploadedFile(\n 'face.jpg',\n image_file.read(),\n content_type=\"image/[jpg,png,gif]\"\n )\n data = {\n 'image': image,\n 'subject': subject_factory.create_instance().pk\n }\n\n if not full:\n return filter_keys(data, self.API_REQUIRED_FIELDS)\n\n return data\n\n\nclass CameraFactory(ModelFactory):\n\n model_cls = Camera\n MODEL_REQUIRED_FIELDS = ['name', 'stream_url']\n API_REQUIRED_FIELDS = ['name', 'stream_url']\n API_READ_FIELDS = [\n 'id',\n 'created_at',\n 'updated_at',\n 'stream_url',\n 'name',\n 'location_lat',\n 'location_lon',\n 'address',\n 'running_tasks',\n 'frames_count',\n 'processing_time',\n 'frame_rate',\n 'faces_count',\n 'last_task_at'\n ]\n\n def instance_data(self):\n return dict(\n stream_url=VIDEO_LARGE_PATH,\n name=f'Camera {FAKER.pyint()}',\n location_lat=FAKER.pydecimal(min_value=-90, max_value=90),\n location_lon=FAKER.pydecimal(min_value=-180, max_value=180),\n address=FAKER.address(),\n )\n\n def api_post_data(self, full: bool = True):\n data = self.instance_data()\n if not full:\n return filter_keys(data, self.API_REQUIRED_FIELDS)\n return data\n\n\nclass VideoFactory(ModelFactory):\n\n model_cls = VideoRecord\n MODEL_REQUIRED_FIELDS = ['path']\n API_REQUIRED_FIELDS = ['path']\n API_READ_FIELDS = [\n 'id',\n 'starts_at',\n 'finish_at',\n 'created_at',\n 'updated_at',\n 'frame_width',\n 'frame_height',\n 'duration_seconds',\n 'size',\n 'url',\n 'thumbs',\n 'running_tasks',\n 'frames_count',\n 'processing_time',\n 'frame_rate',\n 'faces_count',\n 'last_task_at'\n ]\n\n def instance_data(self):\n return dict(\n path=create_video_file(\n VIDEO_SMALL_PATH,\n settings.VIDEO_RECORDS_PATH\n ),\n starts_at=timezone.now(),\n finish_at=timezone.now() + timedelta(seconds=60)\n )\n\n def api_post_data(self, full: bool = True):\n data = self.instance_data()\n if not full:\n return filter_keys(data, self.API_REQUIRED_FIELDS)\n return data\n\n\nclass SubjectFactory(ModelFactory):\n\n model_cls = Subject\n MODEL_REQUIRED_FIELDS = []\n API_REQUIRED_FIELDS = ['name']\n API_READ_FIELDS = [\n 'id',\n 'faces',\n 'name',\n 'last_name',\n 'full_name',\n 'age',\n 'birthdate',\n 'sex',\n 'skin',\n 'created_at',\n 'updated_at',\n 'task'\n ]\n\n def instance_data(self):\n return dict(\n name=FAKER.first_name(),\n last_name=FAKER.last_name(),\n birthdate=FAKER.date(),\n sex=Subject.SEX_MAN,\n skin=Subject.SKIN_WHITE,\n )\n\n def api_post_data(self, full: bool = True):\n data = self.instance_data()\n if not full:\n return filter_keys(data, self.API_REQUIRED_FIELDS)\n return data\n\n\nclass NotificationFactory(ModelFactory):\n\n model_cls = Notification\n MODEL_REQUIRED_FIELDS = [\n 'category',\n 'dtype',\n 'title',\n 'message',\n 'resource',\n 'seen'\n ]\n API_REQUIRED_FIELDS = []\n API_READ_FIELDS = [\n 'id',\n 'category',\n 'dtype',\n 'title',\n 'message',\n 'timestamp',\n 'resource',\n 'seen'\n ]\n\n def instance_data(self):\n return dict(\n category=Notification.CATEGORY_TASK,\n dtype=Notification.DTYPE_ERROR,\n title=FAKER.text(),\n message=FAKER.text(),\n resource=FAKER.pyint(),\n seen=FAKER.pybool(),\n )\n\n\nclass StatFactory(ModelFactory):\n\n model_cls = Stat\n MODEL_REQUIRED_FIELDS = [\n 'category',\n 'dtype',\n 'title',\n 'message',\n 'resource',\n 'seen'\n ]\n API_REQUIRED_FIELDS = []\n API_READ_FIELDS = [\n 'id',\n 'name',\n 'timestamp',\n 'updated_at',\n 'value',\n 'resolution'\n ]\n\n def instance_data(self):\n return dict(\n name=FAKER.text(),\n timestamp=FAKER.past_datetime(tzinfo=TZ),\n value=FAKER.pyfloat(),\n resolution=FAKER.random_element(Stat.RESOLUTION_CHOICES)[0]\n )\n\n\nclass SubjectSegmentFactory(ModelFactory):\n\n model_cls = SubjectSegment\n MODEL_REQUIRED_FIELDS = ['title']\n API_REQUIRED_FIELDS = ['title']\n API_READ_FIELDS = [\n 'id',\n 'disk_cached',\n 'title',\n 'name',\n 'naming',\n 'last_name',\n 'min_age',\n 'max_age',\n 'min_timestamp',\n 'max_timestamp',\n 'sex',\n 'skin',\n 'count',\n 'cameras',\n 'videos',\n 'tasks'\n ]\n\n def create_instance(self, full: bool = True):\n\n data = self.instance_data()\n videos = data.pop('videos')\n cameras = data.pop('cameras')\n if not full:\n data = filter_keys(data, self.MODEL_REQUIRED_FIELDS)\n instance = self.model_cls.objects.create(**data)\n instance.videos.add(*videos)\n instance.cameras.add(*cameras)\n\n return instance\n\n def instance_data(self):\n camera_factory = CameraFactory()\n video_factory = VideoFactory()\n return dict(\n disk_cached=True,\n title=f'Segment {FAKER.pyint()}',\n name=FAKER.name(),\n naming=SubjectSegment.NAMING_NAMED,\n last_name=FAKER.last_name(),\n min_birthdate=FAKER.past_date().isoformat(),\n max_birthdate=FAKER.future_date().isoformat(),\n min_timestamp=FAKER.past_datetime(tzinfo=TZ).isoformat(),\n max_timestamp=FAKER.future_datetime(tzinfo=TZ).isoformat(),\n sex=Subject.SEX_MAN,\n skin=Subject.SKIN_WHITE,\n count=0,\n model_path=f'Segment_{uuid4()}.npz',\n updated_at=timezone.now().isoformat(),\n cameras=[camera_factory.create_instance()],\n videos=[video_factory.create_instance()]\n )\n\n def api_post_data(self, full: bool = True):\n camera_factory = CameraFactory()\n video_factory = VideoFactory()\n data = dict(\n disk_cached=True,\n title=f'Segment {FAKER.pyint()}',\n name=FAKER.name(),\n naming=SubjectSegment.NAMING_NAMED,\n last_name=FAKER.last_name(),\n min_age=1,\n max_age=FAKER.pyint(min_value=1, max_value=70),\n min_timestamp=FAKER.past_datetime(tzinfo=TZ).isoformat(),\n max_timestamp=FAKER.future_datetime(tzinfo=TZ).isoformat(),\n sex=Subject.SEX_MAN,\n skin=Subject.SKIN_WHITE,\n cameras=[camera_factory.create_instance().pk],\n videos=[video_factory.create_instance().pk]\n )\n if not full:\n return filter_keys(data, self.API_REQUIRED_FIELDS)\n return data\n\n\nclass RecognitionFactory(ModelFactory):\n\n model_cls = Recognition\n MODEL_REQUIRED_FIELDS = ['face']\n API_REQUIRED_FIELDS = ['face']\n API_READ_FIELDS = [\n 'id',\n 'sim_thresh',\n 'max_matches',\n 'created_at',\n 'face',\n 'segments',\n 'filter',\n 'matches'\n ]\n\n def instance_data(self):\n face_factory = FaceFactory()\n return dict(\n sim_thresh=FAKER.pyfloat(min_value=0, max_value=1),\n max_matches=FAKER.pyint(min_value=1, max_value=10),\n face=face_factory.create_instance()\n )\n\n def api_post_data(self, full: bool = True):\n face_factory = FaceFactory()\n data = dict(\n sim_thresh=FAKER.pyfloat(min_value=0, max_value=1),\n max_matches=FAKER.pyint(min_value=1, max_value=10),\n face=face_factory.create_instance().pk\n )\n if not full:\n return filter_keys(data, self.API_REQUIRED_FIELDS)\n return data\n"
] | [
[
"numpy.random.uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kjanjua26/PolypDetect | [
"296815368d6504e50c2fbdc10e214bc46e98928c",
"296815368d6504e50c2fbdc10e214bc46e98928c"
] | [
"UNet_Segmentation/network.py",
"UNet_Segmentation/train.py"
] | [
"import tensorflow as tf\n\ndef make_unet(X, training):\n input_layer = X / 127.5 - 1\n conv1 = tf.layers.conv2d(inputs=input_layer, filters=8, kernel_size=[5, 5],padding=\"same\",activation=tf.nn.relu,kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1), name=\"conv_layer_1\")\n bn1 = tf.layers.batch_normalization(conv1, training=training, name=\"bn_1\")\n pool1 = tf.layers.max_pooling2d(bn1, (2, 2), strides=(2, 2), name=\"pool_1\")\n conv2 = tf.layers.conv2d(inputs=pool1, filters=16, kernel_size=[5, 5],padding=\"same\",activation=tf.nn.relu,kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1), name=\"conv_layer_2\")\n bn2 = tf.layers.batch_normalization(conv2, training=training, name=\"bn_2\")\n pool2 = tf.layers.max_pooling2d(bn2, (2, 2), strides=(2, 2), name=\"pool_2\")\n conv3 = tf.layers.conv2d(inputs=pool2, filters=32, kernel_size=[5, 5],padding=\"same\",activation=tf.nn.relu,kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1), name=\"conv_layer_3\")\n bn3 = tf.layers.batch_normalization(conv3, training=training, name=\"bn_3\")\n pool3 = tf.layers.max_pooling2d(bn3, (2, 2), strides=(2, 2), name=\"pool_3\")\n conv4 = tf.layers.conv2d(inputs=pool3, filters=64, kernel_size=[5, 5],padding=\"same\",activation=tf.nn.relu,kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1), name=\"conv_layer_4\")\n bn4 = tf.layers.batch_normalization(conv4, training=training, name=\"bn_4\")\n pool4 = tf.layers.max_pooling2d(bn4, (2, 2), strides=(2, 2), name=\"pool_4\")\n conv5 = tf.layers.conv2d(inputs=pool4, filters=128, kernel_size=[5, 5],padding=\"same\",activation=tf.nn.relu,kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1), name=\"conv_layer_5\")\n bn5 = tf.layers.batch_normalization(conv5, training=training, name=\"bn_5\")\n up_conv6 = tf.layers.conv2d_transpose(bn5, filters=64, kernel_size=2, strides=2, kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1), name=\"upconv_6\")\n concat6 = tf.concat([up_conv6, conv4], axis=-1, name=\"concat_6\")\n conv6 = tf.layers.conv2d(inputs=concat6, filters=32, kernel_size=[5, 5],padding=\"same\",activation=tf.nn.relu,kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1), name=\"conv_layer_6\") \n bn6 = tf.layers.batch_normalization(conv6, training=training, name=\"bn_6\")\n up_conv7 = tf.layers.conv2d_transpose(bn6, filters=32, kernel_size=2, strides=2, kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1), name=\"upconv_7\")\n concat7 = tf.concat([up_conv7,conv3], axis=-1, name=\"concat_7\")\n conv7 = tf.layers.conv2d(inputs=concat7, filters=32, kernel_size=[5, 5],padding=\"same\",activation=tf.nn.relu,kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1), name=\"conv_layer_7\")\n bn7 = tf.layers.batch_normalization(conv7, training=training, name=\"bn_7\")\n up_conv8 = tf.layers.conv2d_transpose(bn7, filters=16, kernel_size=2, strides=2, kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1), name=\"upconv_8\")\n concat8 = tf.concat([up_conv8, conv2], axis=-1, name=\"concat_8\")\n conv8 = tf.layers.conv2d(inputs=concat8, filters=32, kernel_size=[5, 5],padding=\"same\",activation=tf.nn.relu,kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1), name=\"conv_layer_8\")\n bn8 = tf.layers.batch_normalization(conv8, training=training, name=\"bn_8\")\n up_conv9 = tf.layers.conv2d_transpose(bn8, filters=8, kernel_size=2, strides=2, kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1), name=\"upconv_9\")\n concat9 = tf.concat([up_conv9, conv1], axis=-1, name=\"concat_9\")\n conv9 = tf.layers.conv2d(inputs=concat9, filters=32, kernel_size=[5, 5],padding=\"same\",activation=tf.nn.relu,kernel_regularizer=tf.contrib.layers.l2_regularizer(0.1), name=\"conv_layer_9\")\n bn9 = tf.layers.batch_normalization(conv9, training=training, name=\"bn_9\")\n out = tf.layers.conv2d(bn9, 1, (1, 1), name='final', activation=tf.nn.sigmoid, padding='same')\n return out\n\ndef IOU_(y_pred, y_true):\n H, W, _ = y_pred.get_shape().as_list()[1:]\n pred_flat = tf.reshape(y_pred, [-1, H * W])\n true_flat = tf.reshape(y_true, [-1, H * W])\n intersection = 10*tf.reduce_sum(pred_flat * true_flat, axis=1) + 1e-7\n union = tf.reduce_sum(pred_flat, axis=1) + tf.reduce_sum(true_flat, axis=1) + 1e-7\n return tf.reduce_mean(intersection / union)\n",
"import tensorflow as tf\nimport network\nimport utils\nimport os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\nNUM_EPOCHS = 500\nBATCH_SIZE = 32\n\nX = tf.placeholder(tf.float32, shape=[None, 256, 256, 1], name=\"image\")\nY = tf.placeholder(tf.float32, shape=[None, 256, 256, 1], name=\"mask\")\nisTraining = tf.placeholder(tf.bool, name=\"iftraining\")\nglobal_step = tf.Variable(0, trainable=False, name='global_step')\n\ndef build_network(input_images, mask_labels):\n logits = network.make_unet(input_images, isTraining)\n loss = 10-network.IOU_(logits, mask_labels)\n return loss\n \ndef train(x_train, x_val, y_train, y_val):\n loss = build_network(X, Y)\n optimizer = tf.train.AdamOptimizer()\n train_op = optimizer.minimize(loss, global_step=global_step)\n summary_op = tf.summary.merge_all()\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n for epoch in range(1, NUM_EPOCHS+1):\n batch_images, batch_labels = utils.next_batch(BATCH_SIZE, x_train, y_train)\n _, train_iou = sess.run([train_op, loss], feed_dict={X: batch_images, Y: batch_labels, isTraining: True})\n print('Step: {} Loss: {}'.format(epoch, train_iou))\n if epoch % 100 == 0:\n val_iou = sess.run([loss], feed_dict={X: x_val, Y: y_val, isTraining: False})\n print(\"\")\n print(\"Step: {} Loss: {}\".format(epoch, val_iou[0]))\n save_path = saver.save(sess, \"model/model-epoch{}.ckpt\".format(epoch))\n print(\"Model saved for epoch # {}\".format(epoch))\n print(\"\")\n\nif __name__ == \"__main__\":\n x_train, x_val, y_train, y_val = utils.train_test_split_data('CVC-ClinicDB')\n train(x_train, x_val, y_train, y_val)\n"
] | [
[
"tensorflow.layers.conv2d",
"tensorflow.concat",
"tensorflow.layers.batch_normalization",
"tensorflow.reduce_mean",
"tensorflow.reduce_sum",
"tensorflow.reshape",
"tensorflow.layers.max_pooling2d",
"tensorflow.contrib.layers.l2_regularizer"
],
[
"tensorflow.Variable",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"tensorflow.Session",
"tensorflow.train.AdamOptimizer",
"tensorflow.train.Saver"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
hadivafaii/vedo | [
"15f9adbd36d25c0212cbd4eb0c15af54c19f3819",
"15f9adbd36d25c0212cbd4eb0c15af54c19f3819",
"15f9adbd36d25c0212cbd4eb0c15af54c19f3819",
"15f9adbd36d25c0212cbd4eb0c15af54c19f3819"
] | [
"examples/volumetric/multiscalars.py",
"examples/pyplot/numpy2picture.py",
"examples/basic/flatarrow.py",
"examples/simulations/grayscott.py"
] | [
"\"\"\"A Volume can have multiple\nscalars associated to each voxel\"\"\"\nfrom vedo import *\nimport numpy as np\n\nvol = Volume(dataurl+'vase.vti')\nnx, ny, nz = vol.dimensions()\nr0,r1 = vol.scalarRange()\nvol.addScalarBar3D(title='original voxel scalars')\n\n\n# create a set of scalars and add it to the Volume\nsc1 = np.linspace(r0,r1, num=nx*ny*nz)#.astype(np.uint8)\nvol.addPointArray(sc1, \"myscalars1\")\n\n# create another set of scalars and add it to the Volume\nsc2 = np.random.randint(-100,+100, nx*ny*nz)\nvol.addPointArray(sc2, \"myscalars2\")\n\n# make SLCImage scalars the active array (can set 0, to pick the first):\nprintc('Arrays in Volume are:\\n', vol.getArrayNames(), invert=True)\nvol.selectPointArray('SLCImage') # select the first data array\n\n# Build the isosurface of the active scalars,\n# but use testscals1 to colorize this isosurface, and then smooth it\niso1 = vol.isosurface().cmap('jet', 'myscalars1').smoothWSinc().lw(0.1)\niso1.addScalarBar3D(title='myscalars1')\n\niso2 = vol.isosurface().cmap('viridis', 'myscalars2')\niso2.addScalarBar3D(title='myscalars2')\n\nshow([(vol, __doc__),\n (iso1,\"Colorize isosurface using\\nmyscalars1\"),\n (iso2,\"Colorize isosurface using\\nmyscalars2\"),\n ], N=3, axes=1\n).close()",
"\"\"\"Load a picture with matplotlib imread()\nand make it a 3D object\"\"\"\nfrom matplotlib.image import imread\nfrom vedo import *\n\nfname = download('https://vedo.embl.es/examples/data/images/tropical.jpg')\n\narr = imread(fname)\n\npic = Picture(arr) # create Picture object from numpy array\n\nshow(pic, __doc__, axes=7).close()\n",
"\"\"\"Use 2 lines to define a flat arrow\"\"\"\nfrom vedo import *\nfrom numpy import arange\n\narrs = []\nfor i in range(10):\n s, c = sin(i), cos(i)\n l1 = [[sin(x)+c, -cos(x)+s, x] for x in arange(0,3, 0.1)]\n l2 = [[sin(x)+c+0.1, -cos(x)+s + x/15, x] for x in arange(0,3, 0.1)]\n arrs.append(FlatArrow(l1, l2, c=i, tipSize=1, tipWidth=1))\n\n# three points, aka ellipsis, retrieves the list of all created actors\nshow(arrs, __doc__, viewup=\"z\", axes=1).close()\n",
"# -----------------------------------------------------------------------------\n# From Numpy to Python\n# Copyright (2017) Nicolas P. Rougier - BSD license\n# More information at https://github.com/rougier/numpy-book\n# https://www.labri.fr/perso/nrougier/from-python-to-numpy/code/gray_scott.py\n# Parameters from http://www.aliensaint.com/uo/java/rd\n# Adapted for vedo by Marco Musy (2020)\n# -----------------------------------------------------------------------------\nimport numpy as np\nfrom vedo import Grid, Latex, show, interactive, settings\n\n# ---------------------------------------------------------------\nNsteps = 300\nn = 200 # grid subdivisions\n#Du, Dv, F, k, name = 0.16, 0.08, 0.035, 0.065, 'Bacteria 1'\n#Du, Dv, F, k, name = 0.14, 0.06, 0.035, 0.065, 'Bacteria 2'\n#Du, Dv, F, k, name = 0.16, 0.08, 0.060, 0.062, 'Coral'\n#Du, Dv, F, k, name = 0.19, 0.05, 0.060, 0.062, 'Fingerprint'\n#Du, Dv, F, k, name = 0.10, 0.10, 0.018, 0.050, 'Spirals'\n#Du, Dv, F, k, name = 0.12, 0.08, 0.020, 0.050, 'Spirals Dense'\n#Du, Dv, F, k, name = 0.10, 0.16, 0.020, 0.050, 'Spirals Fast'\n#Du, Dv, F, k, name = 0.16, 0.08, 0.020, 0.055, 'Unstable'\n#Du, Dv, F, k, name = 0.16, 0.08, 0.050, 0.065, 'Worms 1'\n#Du, Dv, F, k, name = 0.16, 0.08, 0.054, 0.063, 'Worms 2'\nDu, Dv, F, k, name = 0.16, 0.08, 0.035, 0.060, 'Zebrafish'\n# ---------------------------------------------------------------\n\n\nZ = np.zeros((n+2, n+2), [('U', np.double), ('V', np.double)])\nU, V = Z['U'], Z['V']\nu, v = U[1:-1, 1:-1], V[1:-1, 1:-1]\n\nr = 20\nu[...] = 1.0\nU[n//2-r:n//2+r, n//2-r:n//2+r] = 0.50\nV[n//2-r:n//2+r, n//2-r:n//2+r] = 0.25\nu += 0.05*np.random.uniform(-1, 1, (n, n))\nv += 0.05*np.random.uniform(-1, 1, (n, n))\n\nsy, sx = V.shape\ngrd = Grid(sx=sx, sy=sy, resx=sx, resy=sy)\ngrd.lineWidth(0).wireframe(False).lighting(ambient=0.5)\nformula = r'(u,v)=(D_u\\cdot\\Delta u -u v v+F(1-u), D_v\\cdot\\Delta v +u v v -(F+k)v)'\nltx = Latex(formula, s=15, pos=(0,-sy/1.9,0))\nprint('Du, Dv, F, k, name =', Du, Dv, F, k, name)\nsettings.useDepthPeeling = False\n\nfor step in range(Nsteps):\n for i in range(25):\n Lu = ( U[0:-2, 1:-1] +\n U[1:-1, 0:-2] - 4*U[1:-1, 1:-1] + U[1:-1, 2:] +\n U[2: , 1:-1])\n Lv = ( V[0:-2, 1:-1] +\n V[1:-1, 0:-2] - 4*V[1:-1, 1:-1] + V[1:-1, 2:] +\n V[2: , 1:-1])\n uvv = u*v*v\n u += Du*Lu - uvv + F*(1-u)\n v += Dv*Lv + uvv - (F+k)*v\n\n grd.cmap('ocean_r', V.ravel(), on='cells', arrayName=\"escals\")\n grd.mapCellsToPoints()\n newpts = grd.points()\n newpts[:,2] = grd.getPointArray('escals')*25 # assign z\n grd.points(newpts) # set the new points\n plt = show(ltx, grd, zoom=1.25, elevation=-.15, bg='linen', interactive=False)\n if plt.escaped: break # if ESC is hit during loop\n\ninteractive().close()"
] | [
[
"numpy.linspace",
"numpy.random.randint"
],
[
"matplotlib.image.imread"
],
[
"numpy.arange"
],
[
"numpy.random.uniform",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
petroniocandido/STPE | [
"0303224fadddd40f86b816432e1a594afaebe8fe"
] | [
"distribuicoes.py"
] | [
"import numpy as np \nfrom scipy import stats\nimport matplotlib.pyplot as plt\n\n\ndef phi(px):\n ''' Função característica da PMF P(x) '''\n rads = np.linspace(-np.pi, np.pi, 100)\n ret = { w : np.sum([px[x] * np.exp(w*1j*x) for x in px.keys()]) for w in rads}\n return ret\n\n\ndef phi_plot(px, ax):\n fphi = phi(px)\n ax.plot([k for k in fphi.keys()], [k for k in fphi.values()])\n ax.set_xlabel(\"$\\omega$\")\n ax.set_ylabel(\"$\\phi(\\omega)$\")\n \n\ndef momento(px, n):\n ''' Gera o momento de n-enésima ordem da PMF P(x) '''\n ret = 0\n for x in px.keys():\n ret += (x ** n) * px[x]\n return ret\n\n\ndef momento_central(px, n):\n ''' Gera o momento central de n-enésima ordem da PMF P(x) '''\n mu = momento(px, 1)\n ret = 0\n for x in px.keys():\n ret += (x - mu) ** n * px[x]\n return ret\n\n\ndef momento_normalizado(px, n):\n ''' Gera o momento central normalizado de n-enésima ordem da PMF P(x) '''\n mu = momento(px, 1)\n sigma = momento_central(px, 2)\n ret = 0\n for x in px.keys():\n ret += ((x - mu)/sigma) ** n * px[x]\n return ret\n\ndef E(X, **kwargs):\n ''' Calcula o valor esperado da PMF P(x) '''\n m,n = X.shape\n e = 0.0\n modo = kwargs.get(\"modo\", \"realizacao\") # tempo, realizacao, ensemble\n if modo == \"tempo\":\n t = kwargs.get(\"t\", 0)\n e = X[:, t].mean()\n elif modo == \"realizacao\":\n r = kwargs.get(\"r\", 0)\n e = X[r, :].mean()\n else:\n e = X.mean()\n return e\n\ndef Var(X, k, **kwargs):\n ''' Calcula a variância da PMF P(x) '''\n m,n = X.shape\n mx = E(X, **kwargs)\n v = 0.0\n modo = kwargs.get(\"modo\", \"realizacao\") # tempo, realizacao, ensemble\n if modo == \"tempo\":\n t = kwargs.get(\"t\", 0)\n v = np.mean( (X[:, t] - mx)**2 )\n elif modo == \"realizacao\":\n r = kwargs.get(\"r\", 0)\n v = np.mean( (X[r, :] - mx)**2 )\n else:\n v = np.mean( (X - mx)**2 )\n return v\n\ndef Cov(X, k, **kwargs):\n ''' Calcula a autocovariância do processo estocástico X para a defasagem k '''\n \n m,n = X.shape\n modo = kwargs.get(\"modo\", \"realizacao\")\n mx = E(X, **kwargs)\n \n if modo == \"realizacao\":\n c = np.zeros(n-k)\n r = kwargs.get(\"r\", 0)\n for i in range(n - k):\n c[i] = (X[r,i] - mx)*(X[r,i+k] - mx)\n else:\n c = np.zeros((m, n-k))\n for r in range(m):\n for i in range(n - k):\n c[r, i] = (X[r,i] - mx)*(X[r,i+k] - mx)\n\n c = c.mean()\n \n return c\n"
] | [
[
"numpy.exp",
"numpy.mean",
"numpy.zeros",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lee-hyeonseung/lab_dl | [
"b8906247b6e0e2586f538081e2efaf47dac34972",
"b8906247b6e0e2586f538081e2efaf47dac34972",
"b8906247b6e0e2586f538081e2efaf47dac34972",
"b8906247b6e0e2586f538081e2efaf47dac34972"
] | [
"ch04/ex10_nditer.py",
"ch06/ex01_matplot3d.py",
"ch03/ex11.py",
"ch07/ex08_pulling.py"
] | [
"\"\"\"\nnumpy.nditer 객체: 반복문(for, while)을 쓰기 쉽게 도와주는 객체\n\"\"\"\nimport numpy as np\n\nnp.random.seed(1231)\na = np.random.randint(100, size=(2, 3))\nprint(a)\n\n# 40 21 5 52 84 39\nfor row in a:\n for x in row:\n print(x, end=' ')\nprint()\n\ni = 0\nwhile i < a.shape[0]:\n j = 0\n while j < a.shape[1]:\n print(a[i, j], end=' ')\n j += 1\n i += 1\nprint()\n\nwith np.nditer(a) as iterator: # nditer 클래스 객체 생성\n for val in iterator:\n print(val, end=' ')\nprint()\n\nwith np.nditer(a, flags=['multi_index']) as iterator:\n while not iterator.finished:\n i = iterator.multi_index\n print(f'{i}:{a[i]}', end=' ')\n iterator.iternext()\nprint()\n\nwith np.nditer(a, flags=['c_index']) as iterator:\n while not iterator.finished:\n i = iterator.index\n print(f'[{i}]{iterator[0]}', end=' ')\n iterator.iternext()\nprint()\n\na = np.arange(6).reshape((2, 3))\nprint(a)\nwith np.nditer(a, flags=['multi_index']) as it:\n while not it.finished:\n a[it.multi_index] *= 2\n it.iternext()\nprint(a)\n\n\na = np.arange(6).reshape((2, 3))\nwith np.nditer(a, flags=['c_index'], op_flags=['readwrite']) as it:\n while not it.finished:\n it[0] *= 2\n it.iternext()\nprint(a)\n\n\n\n\n",
"import mpl_toolkits.mplot3d\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef fn(x, y):\n \"\"\"f(x, y) = (1/20) * x**2 + y**2\"\"\"\n return x**2 / 20 + y**2\n\ndef fn_derivative(x, y):\n return x/10, 2*y\n\nif __name__ == '__main__':\n\n x = np.linspace(-10, 10, 100) # x 좌표들\n y = np.linspace(-10, 10, 100) # y 좌표들\n\n # 3차원 그래프를 그리기 위해서\n X, Y = np.meshgrid(x, y)\n Z = fn(X, Y)\n\n fig = plt.figure()\n ax = plt.axes(projection='3d')\n # projection 파라미터를 사용하려면 mpl_toolkits.mplot3d 패키지가 필요\n ax.contour3D(X, Y, Z, 100)\n plt.xlabel('x')\n plt.ylabel('y')\n\n plt.show()\n\n # 등고선(contour) 그래프\n plt.contour(X, Y, Z, 100, cmap='binary')\n plt.xlabel('x')\n plt.ylabel('y')\n plt.axis('equal')\n plt.show()",
"\"\"\"\nmini-batch\n\"\"\"\nimport pickle\nimport numpy as np\nfrom ch03.ex01 import sigmoid\nfrom dataset.mnist import load_mnist\n\n\ndef softmax(X):\n \"\"\"\n 1) X - 1차원: [x_1, x_2, ..., x_n]\n 1) X - 2차원: [[x_11, x_12, ..., x_1n],\n [x_21, x_22, ..., x_2n],\n ...]\n \"\"\"\n dimension = X.ndim\n if dimension == 1:\n m = np.max(X) # 1차원 배열의 최댓값을 찾음.\n X = X - m # 0 이하의 숫자로 변환 <- exp함수의 overflow를 방지하기 위해서.\n y = np.exp(X) / np.sum(np.exp(X))\n elif dimension == 2:\n # m = np.max(X, axis=1).reshape((len(X), 1))\n # # len(X): 2차원 리스트 X의 row의 개수\n # X = X - m\n # sum = np.sum(np.exp(X), axis=1).reshape((len(X), 1))\n # y = np.exp(X) / sum\n Xt = X.T # X의 전치 행렬(transpose)\n m = np.max(Xt, axis=0)\n Xt = Xt - m\n y = np.exp(Xt) / np.sum(np.exp(Xt), axis=0)\n y = y.T\n\n return y\n\n\ndef init_network():\n \"\"\"가중치 행렬들(W1, W2, W3, b1, b2, b3)을 생성\"\"\"\n # 교재의 저자가 만든 가중치 행렬(sample_weight.pkl)을 읽어 옴.\n with open('sample_weight.pkl', mode='rb') as file:\n network = pickle.load(file)\n print(network.keys())\n # W1, W2, W3, b1, b2, b3 shape 확인\n return network\n\n\n\ndef accuracy(y_true, y_pred):\n \"\"\"테스트 데이터 레이블(y_true)과 테스트 데이터 예측값(y_predict)을 파라미터로 전달받아서,\n 정확도(accuracy) = (정답 개수)/(테스트 데이터 개수) 를 리턴.\"\"\"\n result = y_true == y_pred # 정답과 예측값의 비교(bool) 결과를 저장한 배열\n print(result[:10]) # [True, True, ..., False, ...]\n return np.mean(result) # True = 1, False = 0 으로 대체된 후 평균 계산됨.\n # (1 + 1 + ... + 0 + ...) / 전체 개수\n\n\ndef forward(network, x):\n \"\"\"\n 순방향 전파(forward propagation).\n 파라미터 x: 이미지 한 개의 정보를 가지고 있는 배열. (784,)\n \"\"\"\n # 가중치 행렬(Weight Matrices)\n W1, W2, W3 = network['W1'], network['W2'], network['W3']\n # bias matrices\n b1, b2, b3 = network['b1'], network['b2'], network['b3']\n\n # 첫번째 은닉층\n a1 = x.dot(W1) + b1\n z1 = sigmoid(a1)\n\n # 두번째 은닉층\n a2 = z1.dot(W2) + b2\n z2 = sigmoid(a2)\n\n # 출력층\n a3 = z2.dot(W3) + b3\n y = softmax(a3)\n\n return y\n\n\ndef mini_batch(network, X, batch_size):\n y_pred = np.array([]) # 예측값들을 저장할 배열\n # batch_size 만큼씩 X의 데이터들을 나눠서 forward propagation(전파)\n\n for i in range(0, len(X), batch_size):\n X_batch = X[i:(i + batch_size)]\n # print('i:', i, 'i+batch_size:', i+batch_size)\n y_hat = forward(network, X_batch) # (batch_size, 10) shape의 배열\n predictions = np.argmax(y_hat, axis=1) # 각 row에서 최댓값의 인덱스 -> (batch_size,) 배열\n y_pred = np.append(y_pred, predictions) # 예측값들을 결과 배열에 추가\n return y_pred # (len(X),) shape의 배열\n\n\ndef accuracy(y_true, y_pred):\n return np.mean(y_true == y_pred)\n\n\n\nif __name__ == '__main__':\n np.random.seed(2020)\n # 1차원 softmax 테스트\n a = np.random.randint(10, size=5)\n print(a)\n print(softmax(a))\n\n # 2차원 softmax 테스트\n A = np.random.randint(10, size=(2, 3))\n print(A)\n print(softmax(A))\n\n # (Train/Test) 데이터 세트 로드.\n (X_train, y_train), (X_test, y_test) = load_mnist(normalize=True,\n flatten=True,\n one_hot_label=False)\n print('X_test.shape:', X_test.shape)\n print('y_test.shape:', y_test.shape)\n\n # 신경망 생성 (W1, b1, ...)\n with open('sample_weight.pkl', 'rb') as file:\n network = pickle.load(file)\n W1, W2, W3 = network['W1'], network['W2'], network['W3']\n b1, b2, b3 = network['b1'], network['b2'], network['b3']\n print('network:', network.keys())\n print('W1:', network['W1'].shape)\n print('W2:', network['W2'].shape)\n print('W3:', network['W3'].shape)\n\n batch_size = 77\n y_pred = mini_batch(network, X_test, batch_size)\n print('true[:10]', y_test[:10])\n print('pred[:10]', y_pred[:10])\n print('true[-10:]', y_test[-10:])\n print('pred[-10:]', y_pred[-10:])\n\n # 정확도(accuracy) 출력\n acc = accuracy(y_test, y_pred)\n print('정확도:', acc)\n\n # acc = accuracy(y_test, y_pred)\n # print('정확도(accuracy) =', acc)\n\n",
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\nfrom dataset.mnist import load_mnist\n\n\ndef pooling1d(x, pool_size, stride=1):\n n = x.shape[0] # len(x)\n result_size = (n - pool_size) // stride + 1\n result = np.zeros(result_size)\n for i in range(result_size):\n x_sub = x[(i * stride):(i * stride) + pool_size]\n result[i] = np.max(x_sub)\n return result\n\n\ndef pooling2d(x, pool_h, pool_w, stride=1):\n \"\"\"\n\n :param x: 2-dim ndarray\n :param pool_h: pooling window height\n :param pool_w: pooling window width\n :param stride: 보폭\n :return: max-pooling\n \"\"\"\n h, w = x.shape[0], x.shape[1] # 원본 데이터의 height/widtgh\n oh = (h - pool_h) // stride + 1 # 출력 배열의 height\n ow = (w - pool_w) // stride + 1 # 출력 배열의 width\n output = np.zeros((oh, ow)) # 출력 배열 초기화\n for i in range(oh):\n for j in range(ow):\n x_sub = x[(i * stride):(i * stride) + pool_h,\n (j * stride):(j * stride) + pool_w]\n output[i, j] = np.max(x_sub)\n return output\n\n\nif __name__ == '__main__':\n np.random.seed(114)\n x = np.random.randint(10, size=10)\n print(x)\n\n pooled = pooling1d(x, pool_size=2, stride=2)\n print(pooled)\n\n pooled = pooling1d(x, pool_size=4, stride=2)\n print(pooled)\n\n pooled = pooling1d(x, pool_size=4, stride=3)\n print(pooled)\n\n pooled = pooling1d(x, pool_size=3, stride=3)\n print(pooled)\n\n x = np.random.randint(100, size=(8, 8))\n print(x)\n\n pooled = pooling2d(x, pool_h=4, pool_w=4, stride=4)\n print(pooled)\n\n print()\n x = np.random.randint(100, size=(5, 5))\n print(x)\n pooled = pooling2d(x, pool_h=3, pool_w=3, stride=2)\n print(pooled)\n\n # MNIST 데이터 세트를 로드\n # 손글씨 이미지를 하나를 선택: shape=(1, 28, 28) -> (28, 28) 변환\n # 선택된 이미지를 pyplot을 사용해서 출력\n # window shape=(4, 4), stride=4 pooling -> output shape=(7,7)\n # pyplot으로 출력\n\n (x_train, y_train), (x_test, y_test) = load_mnist(normalize=False,\n flatten=False)\n print('x_train', x_train.shape) # (samples, color, height, width)\n print('x_train[0]', x_train[0].shape) # (color, height, width)\n # plt.imshow(x_train[0])\n # (c, h, w) 형식의 이미지 데이터는 matplotlib이 사용할 수 없음\n # (h, w, c) 형식으로 변환해야 함.\n num_img = np.moveaxis(x_train[0], 0, 2)\n print(num_img.shape) # (height, width, color)\n num_img = num_img.reshape((28, 28)) # 단색인 경우 2차원으로 변환\n plt.imshow(num_img, cmap='gray')\n plt.show()\n\n transformed_pic = pooling2d(num_img, 4, 4, 4)\n plt.imshow(transformed_pic, cmap='gray')\n plt.show()\n\n img = Image.open('desert.jpg')\n\n img_pixel = np.array(img)\n print(img_pixel.shape)\n img_r = img_pixel[:,:,0]\n img_g = img_pixel[:,:,1]\n img_b = img_pixel[:,:,2]\n\n tf_img_r=pooling2d(img_r, 32, 32, 32)\n tf_img_g=pooling2d(img_g, 32, 32, 32)\n tf_img_b=pooling2d(img_b, 32, 32, 32)\n plt.imshow(tf_img_r, cmap='pink_r')\n plt.show()\n plt.imshow(tf_img_g, cmap='Greens')\n plt.show()\n plt.imshow(tf_img_b, cmap='Blues')\n plt.show()\n tf_img_integrated=np.array([tf_img_r, tf_img_g, tf_img_b]).astype(np.uint8)\n print(tf_img_integrated.shape)\n tf_img_integrated = np.moveaxis(tf_img_integrated, 0, 2)\n print(tf_img_integrated.shape)\n plt.imshow(tf_img_integrated)\n plt.show()\n ###################################################################\n\n\n"
] | [
[
"numpy.arange",
"numpy.nditer",
"numpy.random.seed",
"numpy.random.randint"
],
[
"numpy.linspace",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.random.seed",
"numpy.max",
"numpy.append",
"numpy.argmax",
"numpy.mean",
"numpy.exp",
"numpy.array",
"numpy.random.randint"
],
[
"matplotlib.pyplot.imshow",
"numpy.random.seed",
"numpy.max",
"numpy.moveaxis",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhangarejiu/algorithmic-trading | [
"ae5def0934a97d454a987f73e2096eb5f20d5166"
] | [
"market_maker/exchange_interface.py"
] | [
"import bitmex, json, sys\nfrom datetime import datetime\nfrom time import sleep\nimport pandas as pd\nimport decimal # this module is used to round up the tickSize\nfrom market_maker.utils import log, errors, constants\n# import mombot.settings as settings\nfrom market_maker.settings import settings\nfrom market_maker.utils.math import toNearest\nfrom future.utils import iteritems\nimport atexit\nimport signal\nfrom urllib.error import HTTPError\nimport time\nimport keyboard\n\n\nlogger = log.setup_custom_logger(__name__)\n\nclass ExchangeInterface:\n\n def __init__(self, dry_run=False):\n self.dry_run = dry_run\n \n if len(sys.argv) > 1:\n self.symbol = sys.argv[1]\n else:\n self.symbol = settings.SYMBOL\n \n if self.dry_run:\n logger.info(\"Initializing dry run. Orders printed below represent what would be posted to BitMEX.\")\n else:\n if (settings.USE_TESTNET):\n logger.info(\"Connecting to TESTNET\")\n try:\n self.bitmex = bitmex.bitmex(test=settings.USE_TESTNET, api_key=settings.API_KEY_TEST, api_secret=settings.API_SECRET_TEST)\n logger.info(\"Connected to TESTNET\")\n\n except:\n logger.error(\"Can't connect to Bitmex. Please check your connection\")\n else:\n logger.info(\"Connecting to REAL\")\n try:\n self.bitmex = bitmex.bitmex(test=settings.USE_TESTNET, api_key=settings.API_KEY_REAL, api_secret=settings.API_SECRET_REAL)\n logger.info(\"Connected to REALNET\")\n except:\n logger.error(\"Can't connect to Bitmex. Please check your connection\")\n # append order prefix to identify bot id\n self.orderIDPrefix=settings.ORDERID_PREFIX\n\n def get_instrument(self, symbol=None):\n \"\"\"\n Instrument is the currency pair we are trading. In this case XBTUSD.\n We filter out only the one in use XBTUSD\n \"\"\"\n if symbol is None:\n symbol = self.symbol\n try:\n instrument = None\n while not instrument:\n instrument = self.bitmex.Instrument.Instrument_get(filter=json.dumps({'symbol':symbol})).result()\n instrument = instrument[0][0]\n instrument['tickLog'] = decimal.Decimal(str(instrument['tickSize'])).as_tuple().exponent * -1 # tickLog represent the number of decimal points on the right of the comma 0.1234...\n # if instrument is None:\n # raise errors.MarketEmptyError(\"Instrument is empty\")\n # logger.info(\"Instrument correctly imported\")\n except:\n logger.info(\"Connection error. Couldn't retrive 'instrument'. Sleeping...\")\n sleep(settings.LOOP_INTERVAL)\n instrument = self.get_instrument()\n return instrument\n\n def get_currentQty(self, symbol=None):\n if symbol is None:\n symbol = self.symbol\n return self.get_position(symbol)['currentQty']\n\n def get_simpleCost(self, symbol=None):\n if symbol is None:\n symbol = self.symbol\n try:\n position = self.get_position(symbol)\n if position is None: \n raise errors.MarketEmptyError(\"Couldn't get simplecost\")\n simpleCost = position['simpleCost'] \n return simpleCost\n except:\n logger.info(\"Connection error. Couldn't retrive 'simple_cost'. Sleeping...\")\n sleep(settings.LOOP_INTERVAL)\n self.get_simpleCost()\n\n def get_unrealisedPnlPcnt(self, symbol=None):\n \"\"\"\n return the unrealised profit and loss as percentage\n \"\"\"\n if symbol is None:\n symbol = self.symbol\n position = self.get_position(symbol)\n pnl = position['unrealisedPnlPcnt'] \n return pnl\n # print(\"########\")\n # print(pnl)\n # timestamp = position['timestamp']\n # record = {'timestamp':timestamp, 'unrealisedPnlPcnt':str(pnl)}\n # tmp_df = pd.DataFrame.from_dict([record]) # convert to pandas\n # tmp_df.set_index('timestamp', inplace=True) # set the df index\n # tmp_df.index = pd.to_datetime(tmp_df.index) # convert index to datetime obj\n # tmp_df = tmp_df.dropna(axis=0, how='all') # remove nans\n # return tmp_df\n\n def get_position(self, symbol=None):\n \"\"\"\n simpleCost is expressed in USD\n currentQty is expressed in XBT\n\n \"account\": 25306,\n \"commission\": 0.00075,\n \"initMarginReq\": 0.3333333333333333,\n \"maintMarginReq\": 0.005,\n \"riskLimit\": 20000000000,\n \"leverage\": 3,\n \"crossMargin\": false,\n \"deleveragePercentile\": 1,\n \"rebalancedPnl\": 23624,\n \"prevRealisedPnl\": -272,\n \"prevUnrealisedPnl\": 0,\n \"prevClosePrice\": 10631.26,\n \"openingTimestamp\": \"2018-03-08T00:00:00.000Z\",\n \"openingQty\": 0,\n \"openingCost\": 30870,\n \"openingComm\": -11599,\n \"openOrderBuyQty\": 0,\n \"openOrderBuyCost\": 0,\n \"openOrderBuyPremium\": 0,\n \"openOrderSellQty\": 0,\n \"openOrderSellCost\": 0,\n \"openOrderSellPremium\": 0,\n \"execBuyQty\": 180,\n \"execBuyCost\": 1793670,\n \"execSellQty\": 150,\n \"execSellCost\": 1499190,\n \"execQty\": 30,\n \"execCost\": -294480,\n \"execComm\": -221,\n \"currentTimestamp\": \"2018-03-08T01:34:51.019Z\",\n \"currentQty\": 30,\n \"currentCost\": -263610,\n \"currentComm\": -11820,\n \"realisedCost\": 35370,\n \"unrealisedCost\": -298980,\n \"grossOpenCost\": 0,\n \"grossOpenPremium\": 0,\n \"grossExecCost\": 298945,\n \"isOpen\": true,\n \"markPrice\": 9976.27,\n \"markValue\": -300720,\n \"riskValue\": 300720,\n \"homeNotional\": 0.0030072,\n \"foreignNotional\": -30,\n \"posState\": \"\",\n \"posCost\": -298980,\n \"posCost2\": -298980,\n \"posCross\": 0,\n \"posInit\": 99660,\n \"posComm\": 299,\n \"posLoss\": 0,\n \"posMargin\": 99959,\n \"posMaint\": 2916,\n \"posAllowance\": 0,\n \"taxableMargin\": 0,\n \"initMargin\": 0,\n \"maintMargin\": 98219,\n \"sessionMargin\": 0,\n \"targetExcessMargin\": 0,\n \"varMargin\": 0,\n \"realisedGrossPnl\": -35370,\n \"realisedTax\": 0,\n \"realisedPnl\": -23550,\n \"unrealisedGrossPnl\": -1740,\n \"longBankrupt\": 0,\n \"shortBankrupt\": 0,\n \"taxBase\": 0,\n \"indicativeTaxRate\": 0,\n \"indicativeTax\": 0,\n \"unrealisedTax\": 0,\n \"unrealisedPnl\": -1740,\n \"unrealisedPnlPcnt\": -0.0058,\n \"unrealisedRoePcnt\": -0.0175,\n \"simpleQty\": 0.003,\n \"simpleCost\": 30,\n \"simpleValue\": 30,\n \"simplePnl\": 0,\n \"simplePnlPcnt\": 0,\n \"avgCostPrice\": 10034,\n \"avgEntryPrice\": 10034,\n \"breakEvenPrice\": 10032.5,\n \"marginCallPrice\": 7576,\n \"liquidationPrice\": 7576,\n \"bankruptPrice\": 7526,\n \"timestamp\": \"2018-03-08T01:34:51.019Z\",\n \"lastPrice\": 9976.27,\n \"lastValue\": -300720\n \n\"\"\"\n if symbol is None:\n symbol = self.symbol\n try:\n position = self.bitmex.Position.Position_get(filter=json.dumps({'symbol':symbol})).result()[0][0]\n if position is None:\n raise errors.MarketEmptyError(\"Position is empty\")\n # logger.info(\"Position correctly imported\")\n # print(position)\n return position\n except:\n logger.info(\"Connection error. Couldn't retrive 'position'. Sleeping...\")\n sleep(settings.LOOP_INTERVAL)\n self.get_position()\n\n def get_quoteBucketed(self, symbol=None, count=settings.BUFFER):\n\n \"\"\"\n OUTDATED ---> REMOVE THIS METHOD\n \"\"\"\n logger.info(\"get_quoteBucketed is outdated, should not be used\")\n \"\"\"\n \"\"\"\n if symbol is None:\n symbol = self.symbol\n try:\n # quote = self.bitmex.Quote.Quote_getBucketed(symbol=symbol, reverse=False, binSize=settings.TIMEFRAME, count=settings.BUFFER, partial=False).result()[0]\n quote = self.bitmex.Quote.Quote_getBucketed(symbol=symbol, reverse=True, binSize=settings.TIMEFRAME, count=count, partial=False).result()[0]\n if quote is None:\n raise errors.MarketEmptyError(\"Quote is empty\")\n # logger.info(\"QuoteBucketed correctly imported\")\n return quote\n except:\n logger.info(\"Connection error. Couldn't retrive 'quoteBucketed'. Sleeping...\")\n sleep(settings.LOOP_INTERVAL)\n self.get_quoteBucketed()\n\n def get_latest_quote(self, symbol=\"XBTUSD\", binSize=settings.TIMEFRAME, partial=False, count=1):\n \n \"\"\"\n @returns data-frame containing quotes for the past x minutes\n \n DF created:\n askPrice askSize bidPrice bidSize symbol available_margin available_margin\n timestamp\n 2018-03-14 17:45:00+00:00 8363.5 700 8352.0 21266 XBTUSD 3.13691847 3.13691847\n -------------------------------------------------------------------------------------------------------------- \n \"\"\"\n \n # Return a list of dictionaries containing quotes sorted in reverse order\n quote = self.bitmex.Quote.Quote_getBucketed(symbol=symbol, reverse=True, binSize=settings.TIMEFRAME, count=count, partial=partial).result()[0]\n df = pd.DataFrame.from_dict(quote) # convert to pandas\n \n \"\"\"\n # df = df.dropna(axis=0, how='all') # remove nans\n # logger.info('\\n {}'.format(df))\n The following part is redundant and here because of legacy: it is not necessary if you get bucketed quotes per minutes and display data for the 1m timefarme.\n The candles would all be the same hloc values.\n\n # resample_time = {'1m':'1Min'}\n # candle_df = df[column].resample(resample_time[settings.TIMEFRAME]).ohlc() # group again and sorts 1Min candles (just to make sure!)\n # print(candle_df)\n \"\"\"\n\n # add datetime, margin and balance to the DF\n df.set_index('timestamp', inplace=True) # set the df index\n df.index = pd.to_datetime(df.index) # convert index to datetime obj\n current_time = df.index.tolist()[0] # extrapolate latest time\n \n # margin balance df\n margin_balance = self.bitmex.User.User_getMargin(currency='XBt').result()[0]['marginBalance']\n margin_in_XBT = self.XBt_to_XBT(margin_balance)\n available_margin_record = {'timestamp':current_time, 'marginBalance':str(margin_in_XBT)}\n margin_df = pd.DataFrame.from_records([available_margin_record], index='timestamp') # convert to pandas\n \n # wallet balance df\n wallet_balance = self.bitmex.User.User_getMargin(currency='XBt').result()[0]['walletBalance']\n wallet_in_XBT = self.XBt_to_XBT(wallet_balance)\n available_wallet_record = {'timestamp':current_time, 'walletBalance':str(margin_in_XBT)}\n wallet_df = pd.DataFrame.from_records([available_wallet_record], index='timestamp') # convert to pandas\n\n # merge DFs\n combined_df = pd.concat([df, wallet_df, margin_df], axis=1) # join axes is not needed as there is only one row\n \n return combined_df\n\n def get_latest_quote_with_funding(self, symbol=\"XBTUSD\", binSize=settings.TIMEFRAME, partial=False, count=1):\n \n \"\"\"\n @returns data-frame containing quotes for the past x minutes\n \n DF created:\n askPrice askSize bidPrice bidSize symbol walletBalance marginBalance fundingRate indicativeFundingRate\n timestamp\n 2018-04-02 03:05:00+00:00 6949.5 500924 6939.0 1010 XBTUSD 3.13026403 3.13026403 -0.001697 0.001086\n -------------------------------------------------------------------------------------------------------------- \n \"\"\"\n \n # Return a list of dictionaries containing quotes sorted in reverse order\n quote = self.bitmex.Quote.Quote_getBucketed(symbol=symbol, reverse=True, binSize=settings.TIMEFRAME, count=count, partial=partial).result()[0]\n df = pd.DataFrame.from_dict(quote) # convert to pandas\n \n \"\"\"\n # df = df.dropna(axis=0, how='all') # remove nans\n # logger.info('\\n {}'.format(df))\n The following part is redundant and here because of legacy: it is not necessary if you get bucketed quotes per minutes and display data for the 1m timefarme.\n The candles would all be the same hloc values.\n\n # resample_time = {'1m':'1Min'}\n # candle_df = df[column].resample(resample_time[settings.TIMEFRAME]).ohlc() # group again and sorts 1Min candles (just to make sure!)\n # print(candle_df)\n \"\"\"\n\n # add datetime, margin and balance to the DF\n df.set_index('timestamp', inplace=True) # set the df index\n df.index = pd.to_datetime(df.index) # convert index to datetime obj\n current_time = df.index.tolist()[0] # extrapolate latest time\n \n # margin balance df\n margin_balance = self.bitmex.User.User_getMargin(currency='XBt').result()[0]['marginBalance']\n margin_in_XBT = self.XBt_to_XBT(margin_balance)\n available_margin_record = {'timestamp':current_time, 'marginBalance':str(margin_in_XBT)}\n margin_df = pd.DataFrame.from_records([available_margin_record], index='timestamp') # convert to pandas\n \n # wallet balance df\n wallet_balance = self.bitmex.User.User_getMargin(currency='XBt').result()[0]['walletBalance']\n wallet_in_XBT = self.XBt_to_XBT(wallet_balance)\n available_wallet_record = {'timestamp':current_time, 'walletBalance':str(margin_in_XBT)}\n wallet_df = pd.DataFrame.from_records([available_wallet_record], index='timestamp') # convert to pandas\n\n # funding rate data\n funding_time = self.get_instrument()['fundingTimestamp']\n funding_rate = self.get_instrument()['fundingRate']\n indicative_funding_rate = self.get_instrument()['indicativeFundingRate']\n funding_record = {'timestamp':current_time, 'fundingTimestamp':funding_time, 'fundingRate':str(funding_rate), 'indicativeFundingRate':indicative_funding_rate}\n funding_df = pd.DataFrame.from_records([funding_record], index='timestamp') # convert to pandas\n\n # merge DFs\n combined_df = pd.concat([df, wallet_df, margin_df, funding_df], axis=1) # join axes is not needed as there is only one row\n \n return combined_df\n\n def cancel_order_by_id(self, id):\n if self.dry_run:\n return\n try:\n logger.info(\"Cancelling order id {}\".format(id))\n tickLog = self.get_instrument()['tickLog'] # tickLog represent the number of decimal points on the right of the comma 0.1234... \n cancelled_order = self.bitmex.Order.Order_cancel(orderID=id, text='Time too long').result()[0][0]\n # logger\n # if cancelled_order:\n # logger.info(\"Cancelled: %s %d @ %.*f\" % (cancelled_order['side'], cancelled_order['orderQty'], tickLog, cancelled_order['price']))\n # if cancelled_order is None:\n # raise errors.MarketEmptyError(\"Orderbook is empty, cannot quote\")\n return cancelled_order\n # else:\n # logger.info(\"No order was cancel\")\n # return\n except:\n logger.info(\"Unexprected error\", sys.exc_info()[0])\n logger.info(\"Connection error. Couldn't cancel all orders. Sleeping...\")\n sleep(settings.LOOP_INTERVAL)\n return self.cancel_order_by_id(id)\n\n def cancel_all_orders(self):\n if self.dry_run:\n return\n logger.info(\"Resetting current orders. Cancelling pending orders.\")\n tickLog = self.get_instrument()['tickLog'] # tickLog represent the number of decimal points on the right of the comma 0.1234...\n orders = self.bitmex.Order.Order_cancelAll().result()[0]\n\n if len(orders):\n for order in orders:\n logger.info(\"Cancelling order: {}\".format(order['orderID']))\n return orders\n else:\n logger.info(\"No orders to cancel\")\n return\n\n def close_position(self):\n if self.dry_run:\n return\n logger.info(\"Resetting current position. Cancelling all open positions.\")\n # check how much we have open\n # logger.info(self.get_position()['simpleCost'])\n open_position = self.get_position()['simpleCost']\n # then close the position\n if (open_position != 0):\n # if there was an open position print out the id, symbol and amount to close\n position_to_close = self.bitmex.Order.Order_closePosition(symbol=self.symbol).result()[0]\n logger.info(\"Closing position: {}\".format(position_to_close['orderID']))\n else:\n logger.info(\"No position to close.\")\n\n###################\n def get_order_with_id(self, id):\n if self.dry_run:\n return []\n try:\n order = self.bitmex.Order.Order_getOrders(filter=json.dumps({'orderID':str(id)})).result()[0]\n return order\n except:\n logger.info(\"Connection error. Couldn't get order with id. Sleeping...\")\n sleep(settings.LOOP_INTERVAL)\n return self.get_order_with_id()\n####################\n\n def get_num_open_orders(self):\n if self.dry_run:\n return []\n try:\n orders = self.bitmex.Order.Order_getOrders(filter=json.dumps({'ordStatus':'New'})).result()[0]\n order_num = len(orders)\n logger.info(\"Total open orders: {}\".format(order_num))\n return order_num\n except:\n logger.info(\"Connection error. Couldn't get num open orders. Sleeping...\")\n sleep(settings.LOOP_INTERVAL)\n return self.get_num_open_orders()\n####################\n\n def get_num_contingent_orders(self):\n if self.dry_run:\n return []\n try:\n counter = 0\n orders = self.bitmex.Order.Order_getOrders(filter=json.dumps({'ordStatus':'New'})).result()[0]\n if orders:\n for order in orders:\n if order['clOrdLinkID']:\n counter = counter + 1\n logger.info(\"Total contingent orders: {}\".format(counter))\n return counter\n except:\n logger.info(\"Connection error. Couldn't get num contingent orders. Sleeping...\")\n sleep(settings.LOOP_INTERVAL)\n return self.get_num_contingent_orders()\n####################\n\n def get_orders(self):\n if self.dry_run:\n return []\n try:\n # orders = self.bitmex.Order.Order_getOrders(filter=json.dumps({'ordStatus.isTerminated':False})).result()[0]\n # orders = self.bitmex.Order.Order_getOrders(filter=json.dumps({'ordStatus':'New'})).result()[0]\n orders = self.bitmex.Order.Order_getOrders().result()[0]\n # Only return orders that start with our clOrdID prefix.\n if orders:\n # logger.info(\"Orders correctly imported\")\n return [o for o in orders if str(o['clOrdID']).startswith(self.orderIDPrefix)]\n else:\n logger.error(\"Couldn't get orders\")\n except:\n logger.info(\"Connection error. Couldn't get orders. Sleeping...\")\n sleep(settings.LOOP_INTERVAL)\n return self.get_orders()\n#################### \n\n def check_market_open(self):\n instrument = self.get_instrument()\n if not instrument:\n # if instrument[\"state\"] != \"Open\":\n raise errors.MarketClosedError(\"The instrument %s is not open. State: %s\" %(self.symbol, instrument[\"state\"]))\n # send email ? \n \n def get_ticker(self, instrument, symbol=None):\n '''\n Return a ticker dictionary with last, buy, sell and mid. Generated from instrument. NOTE: Values are rounded up with tick size\n I.E. {'last': 10563.5, 'buy': 10563.5, 'sell': 10564.0, 'mid': 10564.0}\n '''\n if symbol is None:\n symbol = self.symbol\n\n # instrument = self.get_instrument(symbol)\n\n # If this is an index, we have to get the data from the last trade.\n if instrument['symbol'][0] == '.':\n ticker = {}\n ticker['mid'] = ticker['buy'] = ticker['sell'] = ticker['last'] = instrument['markPrice']\n # Normal instrument\n else:\n bid = instrument['bidPrice'] or instrument['lastPrice']\n ask = instrument['askPrice'] or instrument['lastPrice']\n ticker = {\n \"last\": instrument['lastPrice'],\n \"buy\": bid,\n \"sell\": ask,\n \"mid\": (bid + ask) / 2\n }\n\n # The instrument has a tickSize. Use it to round values.\n #print({k: toNearest(float(v or 0), instrument['tickSize']) for k, v in iteritems(ticker)})\n return {k: toNearest(float(v or 0), instrument['tickSize']) for k, v in iteritems(ticker)}\n\n def get_margin(self):\n \"\"\"\n Margin is the amount of equity expressed in XBT\n \"\"\"\n logger.info(\"DEPRECATED FUNCTION USE GET_LATEST_QUOTE TO RETURN A DF !!!!\")\n if self.dry_run:\n return {'marginBalance': float(settings.DRY_BTC), 'availableFunds': float(settings.DRY_BTC)}\n try:\n margin_balance = self.bitmex.User.User_getMargin(currency='XBt').result()[0]['marginBalance']\n if margin_balance:\n margin_in_XBT = self.XBt_to_XBT(margin_balance)\n # logger.info(\"Imported margin\")\n return margin_in_XBT\n else:\n raise errors.MarketEmptyError(\"Margin is empty\")\n except:\n sleep(settings.LOOP_INTERVAL)\n self.get_margin()\n\n def XBt_to_XBT(self, XBt):\n return float(XBt) / constants.XBt_TO_XBT \n\n def send_trailing_order(self, clOrdLinkID, original_side, pegOffsetValue=settings.TRAILSTOP_OFFSET, orderQty=settings.ORDER_QUANTITY, symbol=None):\n if symbol is None:\n symbol = self.symbol\n # try: \n # Define variables\n if original_side == 'Buy': # if we have a buy we need to sell and to set an offset to price - offsett\n opposite_side = 'Sell'\n pegOffsetValue = pegOffsetValue * -1\n if original_side == 'Sell':\n opposite_side = 'Buy'\n orderQty = settings.ORDER_QUANTITY # should be equal to the bidding quantity\n pegPriceType = 'TrailingStopPeg' # follows the price movement in one direction\n # execInst = 'LastPrice, ParticipateDoNotInitiate' # this could be assigned to the mark price, but for now uses the last-price\n execInst = 'LastPrice' # this could be assigned to the mark price, but for now uses the last-price\n contingencyType = 'OneCancelsTheOther' # to allow the stoploss and takeprofit to be linked\n # ordType ='LimitIfTouched'\n # ordType ='Stop'\n\n # SET SMART ORDERS\n order = self.bitmex.Order.Order_new(symbol=symbol, side=opposite_side, orderQty=orderQty, clOrdLinkID=clOrdLinkID, pegOffsetValue=pegOffsetValue, pegPriceType=pegPriceType, execInst=execInst, contingencyType=contingencyType).result()\n\n # LOGGING\n if order is not None:\n order_size = order[0]['orderQty']\n price = order[0]['price']\n logger.info(\"NEW Trailing Order: {} contract @ {}\".format(order_size,price))\n else:\n logger.info(\"Attention! ORDER IS NONE\")\n\n return order\n # except :\n # logger.info(\"Couldn't place the order\")\n # sleep(settings.LOOP_INTERVAL)\n # self.send_smart_order(opposite_side, orderQty, symbol)\n\n def send_stoploss_order(self, clOrdLinkID, original_side, takeprofitOffset=settings.TAKEPROFIT_OFFSET, orderQty=settings.ORDER_QUANTITY, symbol=None):\n if symbol is None:\n symbol = self.symbol\n try:\n # Define variables\n orderQty = settings.ORDER_QUANTITY\n # ordType ='StopLimit'\n ordType ='Stop'\n # execInst = 'LastPrice, ParticipateDoNotInitiate'\n execInst = 'MarkPrice' # mark price uses the sma of the price to avoid spikes\n # execInst='ParticipateDoNotInitiate' # might want to use this to get green fees (to test)\n contingencyType = 'OneCancelsTheOther'\n instrument = self.get_instrument(symbol)\n ticker = self.get_ticker(instrument) \n # Set up our buy & sell positions as the smallest possible unit above and below the current spread\n if original_side == 'Buy':\n stopPx = ticker[\"sell\"] - takeprofitOffset\n position_limit = ticker[\"sell\"] - takeprofitOffset - instrument['tickSize']\n opposite_side = 'Sell'\n else:\n stopPx = ticker[\"buy\"] + takeprofitOffset\n position_limit = ticker[\"buy\"] + takeprofitOffset + instrument['tickSize'] \n opposite_side = 'Buy'\n\n # SET SMART ORDERS\n # order = self.bitmex.Order.Order_new(symbol=symbol, orderQty=orderQty, original_side=opposite_side, price=position_limit, stopPx=stopPx, ordType=ordType, execInst=execInst).result()\n # order = self.bitmex.Order.Order_new(symbol=symbol, side=opposite_side, orderQty=orderQty, price=position_limit, stopPx=stopPx, clOrdLinkID=clOrdLinkID, ordType=ordType, execInst=execInst, contingencyType=contingencyType).result()\n # order = self.bitmex.Order.Order_new(symbol=symbol, side=opposite_side, orderQty=orderQty, stopPx=stopPx, clOrdLinkID=clOrdLinkID, ordType=ordType, execInst=execInst, contingencyType=contingencyType).result()\n # order = self.bitmex.Order.Order_new(symbol=symbol, side=opposite_side, orderQty=orderQty, price=position_limit, stopPx=stopPx, clOrdLinkID=clOrdLinkID, ordType=ordType, execInst=execInst, contingencyType=contingencyType).result()\n # order = self.bitmex.Order.Order_new(symbol=symbol, side=opposite_side, orderQty=orderQty, stopPx=stopPx, clOrdLinkID=clOrdLinkID, ordType=ordType, execInst=execInst, contingencyType=contingencyType).result()\n order = self.bitmex.Order.Order_new(symbol=symbol, side=opposite_side, orderQty=orderQty, stopPx=stopPx, clOrdLinkID=clOrdLinkID, ordType=ordType, execInst=execInst, contingencyType=contingencyType).result()\n # logger.info(order)\n\n # LOGGING\n if order is not None:\n order_size = order[0]['orderQty']\n price = order[0]['price']\n logger.info(\"NEW Takeprofit Order: {} contract @ {}\".format(order_size,price))\n else:\n logger.info(\"Attention! ORDER IS NONE\")\n\n return order\n except:\n # logger.info(\"Unexpected error\", sys.exc_info()[0])\n logger.info(\"Couldn't place the order\")\n return\n # sleep(settings.LOOP_INTERVAL)\n # self.send_smart_order(side, orderQty, symbol) \n\n def send_takeprofit_order(self, clOrdLinkID, original_side, takeprofitOffset=settings.TAKEPROFIT_OFFSET, orderQty=settings.ORDER_QUANTITY, symbol=None):\n if symbol is None:\n symbol = self.symbol\n\n # Define variables\n orderQty = settings.ORDER_QUANTITY\n \n # green fees\n # ordType ='LimitIfTouched'\n # execInst = 'LastPrice, ParticipateDoNotInitiate, ReduceOnly'\n \n # for limit\n ordType ='Limit'\n execInst = 'ReduceOnly'\n \n contingencyType = 'OneCancelsTheOther'\n instrument = self.get_instrument(symbol)\n ticker = self.get_ticker(instrument) \n # Set up our buy & sell positions as the smallest possible unit above and below the current spread\n if original_side == 'Buy':\n stopPx = ticker[\"sell\"] + takeprofitOffset\n position_limit = ticker[\"sell\"] + takeprofitOffset + instrument['tickSize']\n opposite_side = 'Sell'\n else:\n stopPx = ticker[\"buy\"] - takeprofitOffset\n position_limit = ticker[\"buy\"] - takeprofitOffset - instrument['tickSize'] \n opposite_side = 'Buy'\n\n # SET SMART ORDERS\n\n # regular limit order\n order = self.bitmex.Order.Order_new(symbol=symbol, side=opposite_side, orderQty=orderQty, price=position_limit, clOrdLinkID=clOrdLinkID, ordType=ordType, execInst=execInst, contingencyType=contingencyType).result()\n\n # green fees\n # order = self.bitmex.Order.Order_new(symbol=symbol, side=opposite_side, orderQty=orderQty, price=position_limit, stopPx=stopPx, clOrdLinkID=clOrdLinkID, ordType=ordType, execInst=execInst, contingencyType=contingencyType).result()\n \n # LOGGING\n if order is not None:\n order_size = order[0]['orderQty']\n price = order[0]['price']\n logger.info(\"NEW Takeprofit Order: {} contract @ {}\".format(order_size,price))\n else:\n logger.info(\"Attention! ORDER IS NONE\")\n return order\n \n def send_smart_order(self, side, orderQty=settings.ORDER_QUANTITY, symbol=None):\n \"\"\"\n send_smart_order is automatically setting BUY/SELL price to ensure we get paid market-maker fees\n @return a list of orders\n @return null if cannot cancel the orders\n \"\"\"\n if symbol is None:\n symbol = self.symbol\n \n # try:\n instrument = self.get_instrument(symbol)\n ticker = self.get_ticker(instrument)\n # Set up our buy & sell positions as the smallest possible unit above and below the current spread\n # and we'll work out from there. That way we always have the best price but we don't kill wide\n # and potentially profitable spreads.\n\n \n # SET ORDERS\n # buy_order = self.exchange.Order.Order_new(symbol='XBTUSD', side='Buy', orderQty=10, ordType='Market').result() # this should be optimized avoiding a market order\n # TODO: check if there is enough balance\n # order = self.bitmex.Order.Order_new(symbol=symbol, side=side, orderQty=orderQty, ordType='Market').result() # this should be optimized avoiding a market order\n if (side=='Buy'):\n opposite_side = 'Sell'\n position_limit = ticker[\"buy\"] - (instrument['tickSize'])\n else:\n opposite_side = 'Buy'\n position_limit = ticker[\"sell\"] + (instrument['tickSize'])\n \n order = self.bitmex.Order.Order_new(symbol=symbol, side=side, orderQty=orderQty, ordType='Limit', execInst='ParticipateDoNotInitiate', price=position_limit).result()\n\n if order:\n \n order_status = order[0]['ordStatus']\n \n if order_status == 'New':\n order_size = order[0]['orderQty']\n price = order[0]['price']\n order_id = order[0]['orderID']\n order_side = order[0]['side']\n order_time = time.perf_counter() # start a timer\n logger.info(\"NEW {} Order: {} contract @ {}\".format(order_side, order_size, price))\n else:\n # logger.info(order_status)\n return\n \n while order_status == 'New':\n\n sleep(max(3,settings.SMART_ORDER_TIMEOUT/100)) # wait at least x sec\n \n # check if the time elapsed since order is above threshold, then clear out the order\n current_time = time.perf_counter()\n elapsed_time = current_time - order_time\n \n logger.info(\"Waiting for order {} to fill.\\tElapsed time: {:3.1f}s ...\".format(order_id, elapsed_time))\n\n # get order updated details\n order = self.get_order_with_id(order_id)\n order_status = order[0]['ordStatus']\n\n if elapsed_time > settings.SMART_ORDER_TIMEOUT and order_status == 'New': # if time passed is greather than threshold and order is still new ... \n logger.info(\"Elapsed time waiting to fill order is too long {}s.\".format(settings.SMART_ORDER_TIMEOUT))\n # cancel order\n canceled_order = self.cancel_order_by_id(order_id)\n \"\"\"\n the assumption here is that if the order is not filled is because the price trend is moving in the same direction as our bet, \n which in other words means we are on the winning side of the trade => send the order with same direction at market price\n \"\"\"\n # order = self.bitmex.Order.Order_new(symbol=symbol, side=side, orderQty=orderQty, ordType='Market').result() \n \"\"\"\n # alternative idea: if the order doesn't fill in time send an order with opposite side\n order = self.bitmex.Order.Order_new(symbol=symbol, side=opposite_side, orderQty=orderQty, ordType='Limit', execInst='ParticipateDoNotInitiate', price=position_buy_limit).result() \n \"\"\"\n return\n # break\n \n # refresh the order status\n order = self.get_order_with_id(order_id)\n order_status = order[0]['ordStatus']\n logger.info(\"OrderID {} status: {}\".format(order_id, order_status))\n \n if order_status == 'New': # this should not happene\n logger.info(\"Order Status is still New, this should not happen\")\n pass\n elif order_status == 'Filled':\n return order\n elif order_status == 'Canceled':\n return # empty order\n elif order_status == 'PartiallyFilled':\n # cancel remaining order\n order_id = order[0]['orderID']\n canceled_order = self.cancel_order_by_id(order_id)\n return order\n else: \n logger.error(\"ATTENTION: Order status is not recognised : {}\".format(order_status))\n raise systemExit # quit\n else:\n logger.error(\"COULDN'T SEND ORDER TO BITMEX. ORDER IS NONE!\")"
] | [
[
"pandas.DataFrame.from_records",
"pandas.concat",
"pandas.to_datetime",
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
LazyBusyYang/mmhuman3d | [
"c653f05a6d3264cf49d86df5e3ffd8cdf9ca9057"
] | [
"tests/test_models/test_heads/test_hybrik_forward.py"
] | [
"import os\nimport os.path as osp\nimport tempfile\n\nimport numpy as np\nimport pytest\nimport torch\n\nfrom mmhuman3d.models import HybrIK_trainer, HybrIKHead\nfrom mmhuman3d.models.builder import build_body_model\nfrom mmhuman3d.models.utils.inverse_kinematics import (\n batch_get_3children_orient_svd,\n batch_get_pelvis_orient,\n batch_get_pelvis_orient_svd,\n batch_inverse_kinematics_transform,\n)\n\n\ndef generate_weights(output_dir):\n \"\"\"Generate a SMPL model weight file to initialize SMPL model, and generate\n a 3D joints regressor file.\"\"\"\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n joint_regressor_file = os.path.join(output_dir, 'J_regressor_h36m.npy')\n np.save(joint_regressor_file, np.zeros([17, 6890]))\n\n smpl_mean_file = os.path.join(output_dir, 'h36m_mean_beta.npy')\n np.save(smpl_mean_file, np.zeros([\n 10,\n ]))\n return\n\n\ndef test_HybrIK_head():\n\n tmpdir = tempfile.TemporaryDirectory()\n # generate weight file for SMPL model.\n generate_weights(tmpdir.name)\n\n # initialize models\n head = HybrIKHead(\n smpl_mean_params=osp.join(tmpdir.name, 'h36m_mean_beta.npy'))\n smpl = build_body_model(\n dict(\n type='HybrIKSMPL',\n model_path='data/body_models/smpl',\n extra_joints_regressor=osp.join(tmpdir.name,\n 'J_regressor_h36m.npy')))\n\n if torch.cuda.is_available():\n head = head.cuda()\n smpl = smpl.cuda()\n\n with pytest.raises(TypeError):\n _ = HybrIKHead()\n\n with pytest.raises(TypeError):\n _ = HybrIKHead(\n feature_channel=[512, 8],\n smpl_mean_params='data/body_models/h36m_mean_beta.npy')\n\n # mock inputs\n batch_size = 4\n input_shape = (batch_size, 512, 8, 8)\n mm_inputs = _demo_head_inputs(input_shape)\n features = mm_inputs.pop('features')\n trans_inv = mm_inputs.pop('trans_inv')\n joint_root = mm_inputs.pop('joint_root')\n depth_factor = mm_inputs.pop('depth_factor')\n intrinsic_param = mm_inputs.pop('intrinsic_param')\n\n if torch.cuda.is_available():\n predictions = head(features, trans_inv, intrinsic_param, joint_root,\n depth_factor, smpl)\n pred_keys = [\n 'pred_phi', 'pred_delta_shape', 'pred_shape', 'pred_pose',\n 'pred_uvd_jts', 'pred_xyz_jts_24', 'pred_xyz_jts_24_struct',\n 'pred_xyz_jts_17', 'pred_vertices', 'maxvals'\n ]\n for k in pred_keys:\n assert k in predictions\n assert predictions[k].shape[0] == batch_size\n\n with pytest.raises(RuntimeError):\n joint_root = torch.zeros((6, 3)).cuda()\n _ = head(features, trans_inv, intrinsic_param, joint_root,\n depth_factor, smpl)\n\n with pytest.raises(RuntimeError):\n joint_root = torch.zeros((batch_size, 3))\n _ = head(features, trans_inv, intrinsic_param, joint_root,\n depth_factor, smpl)\n\n tmpdir.cleanup()\n\n\ndef test_HybrIK_trainer():\n\n tmpdir = tempfile.TemporaryDirectory()\n # generate weight file for SMPL model.\n generate_weights(tmpdir.name)\n\n model_cfg = dict(\n backbone=dict(\n type='ResNet',\n depth=34,\n out_indices=[3],\n norm_eval=False,\n init_cfg=dict(\n type='Pretrained', checkpoint='torchvision://resnet34')),\n head=dict(\n type='HybrIKHead',\n smpl_mean_params=osp.join(tmpdir.name, 'h36m_mean_beta.npy')),\n body_model=dict(\n type='HybrIKSMPL',\n model_path= # noqa: E251\n 'data/body_models/smpl',\n extra_joints_regressor=osp.join(tmpdir.name,\n 'J_regressor_h36m.npy')),\n loss_beta=dict(type='MSELoss', loss_weight=1),\n loss_theta=dict(type='MSELoss', loss_weight=0.01),\n loss_twist=dict(type='MSELoss', loss_weight=0.01),\n loss_uvd=dict(type='L1Loss', loss_weight=1),\n )\n\n model = HybrIK_trainer(**model_cfg)\n if torch.cuda.is_available():\n model = model.cuda()\n input_shape = (4, 3, 256, 256)\n mm_inputs = _demo_mm_inputs(input_shape)\n img = mm_inputs.pop('img')\n img_metas = mm_inputs.pop('img_metas')\n if torch.cuda.is_available():\n output = model.forward_train(img, img_metas, **mm_inputs)\n assert isinstance(output, dict)\n assert 'loss_beta' in output\n assert output['loss_beta'].dtype == torch.float32\n\n with torch.no_grad():\n output = model.forward_test(img, img_metas, **mm_inputs)\n assert isinstance(output, dict)\n for k in ['vertices', 'xyz_17', 'uvd_jts', 'xyz_24', 'image_path']:\n assert k in output\n\n tmpdir.cleanup()\n\n\ndef test_IK_functions():\n\n N = 4\n mm_inputs = _demo_IK_inputs(N)\n\n pose_skeleton = mm_inputs['pose_skeleton']\n phis = mm_inputs['phis']\n rest_pose = mm_inputs['rest_pose']\n children = mm_inputs['children']\n parents = mm_inputs['parents']\n rel_pose_skeleton = mm_inputs['rel_pose_skeleton']\n rel_rest_pose = mm_inputs['rel_rest_pose']\n rot_mat_chain_parent = mm_inputs['rot_mat_chain_parent']\n global_orient = None\n dtype = torch.float32\n\n rot_mat, rot_rest_pose = batch_inverse_kinematics_transform(\n pose_skeleton,\n global_orient,\n phis,\n rest_pose,\n children,\n parents,\n dtype,\n train=False,\n leaf_thetas=None)\n assert rot_mat.shape == (N, 24, 3, 3)\n assert rot_rest_pose.shape == (N, 29, 3)\n\n rot_mat, rot_rest_pose = batch_inverse_kinematics_transform(\n pose_skeleton,\n global_orient,\n phis,\n rest_pose,\n children,\n parents,\n dtype,\n train=True,\n leaf_thetas=None)\n assert rot_mat.shape == (N, 24, 3, 3)\n assert rot_rest_pose.shape == (N, 29, 3)\n\n global_orient_mat = batch_get_pelvis_orient(rel_pose_skeleton.clone(),\n rel_rest_pose.clone(), parents,\n children, dtype)\n assert global_orient_mat.shape == (N, 3, 3)\n\n global_orient_mat = batch_get_pelvis_orient_svd(rel_pose_skeleton.clone(),\n rel_rest_pose.clone(),\n parents, children, dtype)\n assert global_orient_mat.shape == (N, 3, 3)\n\n rot_mat = batch_get_3children_orient_svd(rel_pose_skeleton, rel_rest_pose,\n rot_mat_chain_parent, children,\n dtype)\n assert rot_mat.shape == (N, 3, 3)\n\n\ndef _demo_mm_inputs(input_shape=(1, 3, 256, 256)):\n \"\"\"Create a superset of inputs needed to run test or train batches.\n\n Args:\n input_shape (tuple):\n input batch dimensions\n \"\"\"\n (N, C, H, W) = input_shape\n\n rng = np.random.RandomState(0)\n\n imgs = rng.rand(*input_shape)\n\n trans_inv = np.zeros([N, 2, 3])\n intrinsic_param = np.zeros([N, 3, 3])\n joint_root = np.zeros([N, 3])\n depth_factor = np.ones([N, 1])\n target_uvd_29 = np.zeros([N, 87])\n target_xyz_24 = np.zeros([N, 72])\n target_weight_24 = np.ones([N, 72])\n target_weight_29 = np.ones([N, 87])\n target_xyz_17 = np.zeros([N, 51])\n target_weight_17 = np.ones([N, 51])\n target_theta = np.zeros([N, 96])\n target_beta = np.zeros([N, 10])\n target_smpl_weight = np.ones([N, 1])\n target_theta_weight = np.ones([N, 96])\n target_twist = np.zeros([N, 23, 2])\n target_twist_weight = np.ones([N, 23, 2])\n bbox = np.zeros([N, 4])\n\n img_metas = [{\n 'img_shape': (H, W, C),\n 'center': np.array([W / 2, H / 2]),\n 'scale': np.array([0.5, 0.5]),\n 'rotation': 0,\n 'image_path': '<demo>.png',\n } for _ in range(N)]\n\n mm_inputs = {\n 'img': torch.FloatTensor(imgs).requires_grad_(True),\n 'trans_inv': torch.FloatTensor(trans_inv),\n 'intrinsic_param': torch.FloatTensor(intrinsic_param),\n 'joint_root': torch.FloatTensor(joint_root),\n 'depth_factor': torch.FloatTensor(depth_factor),\n 'target_uvd_29': torch.FloatTensor(target_uvd_29),\n 'target_xyz_24': torch.FloatTensor(target_xyz_24),\n 'target_weight_24': torch.FloatTensor(target_weight_24),\n 'target_weight_29': torch.FloatTensor(target_weight_29),\n 'target_xyz_17': torch.FloatTensor(target_xyz_17),\n 'target_weight_17': torch.FloatTensor(target_weight_17),\n 'target_theta': torch.FloatTensor(target_theta),\n 'target_beta': torch.FloatTensor(target_beta),\n 'target_smpl_weight': torch.FloatTensor(target_smpl_weight),\n 'target_theta_weight': torch.FloatTensor(target_theta_weight),\n 'target_twist': torch.FloatTensor(target_twist),\n 'target_twist_weight': torch.FloatTensor(target_twist_weight),\n 'bbox': torch.FloatTensor(bbox),\n 'img_metas': img_metas,\n 'sample_idx': np.arange(N)\n }\n\n return mm_inputs\n\n\ndef _demo_head_inputs(input_shape=(1, 512, 8, 8)):\n \"\"\"Create a superset of inputs needed to run test or train batches.\n\n Args:\n input_shape (tuple):\n input batch dimensions\n \"\"\"\n (N, C, H, W) = input_shape\n\n rng = np.random.RandomState(0)\n\n features = rng.rand(*input_shape)\n\n trans_inv = np.zeros([N, 2, 3])\n intrinsic_param = np.zeros([N, 3, 3])\n joint_root = np.zeros([N, 3])\n depth_factor = np.ones([N, 1])\n\n mm_inputs = {\n 'features': torch.FloatTensor(features),\n 'trans_inv': torch.FloatTensor(trans_inv),\n 'intrinsic_param': torch.FloatTensor(intrinsic_param),\n 'joint_root': torch.FloatTensor(joint_root),\n 'depth_factor': torch.FloatTensor(depth_factor),\n }\n\n if torch.cuda.is_available():\n for k, _ in mm_inputs.items():\n mm_inputs[k] = mm_inputs[k].cuda()\n\n return mm_inputs\n\n\ndef _demo_IK_inputs(batch_size=1):\n \"\"\"Create a superset of inputs for testing inverse kinematics function.\n\n Args:\n batch_size (int):\n input batch size\n \"\"\"\n N = batch_size\n pose_skeleton = np.ones([N, 29, 3])\n phis = np.ones([N, 23, 2])\n rest_pose = np.ones([N, 29, 3])\n rel_pose_skeleton = np.ones([N, 29, 3, 1])\n rel_rest_pose = np.ones([N, 29, 3, 1])\n parents = np.array([\n -1, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 12, 13, 14, 16, 17, 18,\n 19, 20, 21, 15, 22, 23, 10, 11\n ])\n children = np.array([\n 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 27, 28, 15, 16, 17, 24, 18, 19, 20,\n 21, 22, 23, 25, 26, -1, -1, -1, -1, -1\n ])\n rot_mat_chain_parent = np.ones([N, 3, 3])\n\n mm_inputs = {\n 'pose_skeleton': torch.FloatTensor(pose_skeleton),\n 'phis': torch.FloatTensor(phis),\n 'rest_pose': torch.FloatTensor(rest_pose),\n 'children': torch.Tensor(children).long(),\n 'parents': torch.Tensor(parents).long(),\n 'rel_pose_skeleton': torch.FloatTensor(rel_pose_skeleton),\n 'rel_rest_pose': torch.FloatTensor(rel_rest_pose),\n 'rot_mat_chain_parent': torch.FloatTensor(rot_mat_chain_parent),\n }\n\n if torch.cuda.is_available():\n for k, _ in mm_inputs.items():\n mm_inputs[k] = mm_inputs[k].cuda()\n\n return mm_inputs\n"
] | [
[
"torch.Tensor",
"torch.zeros",
"numpy.arange",
"numpy.ones",
"torch.FloatTensor",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.array",
"numpy.zeros",
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kjappelbaum/pymatgen | [
"0766980bf693d816fb9d9beebe85d3e51685fa76"
] | [
"pymatgen/analysis/diffusion_analyzer.py"
] | [
"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\"\"\"\nA module to perform diffusion analyses (e.g. calculating diffusivity from\nmean square displacements etc.). If you use this module, please consider\nciting the following papers::\n\n Ong, S. P., Mo, Y., Richards, W. D., Miara, L., Lee, H. S., & Ceder, G.\n (2013). Phase stability, electrochemical stability and ionic conductivity\n of the Li10+-1MP2X12 (M = Ge, Si, Sn, Al or P, and X = O, S or Se) family\n of superionic conductors. Energy & Environmental Science, 6(1), 148.\n doi:10.1039/c2ee23355j\n\n Mo, Y., Ong, S. P., & Ceder, G. (2012). First Principles Study of the\n Li10GeP2S12 Lithium Super Ionic Conductor Material. Chemistry of Materials,\n 24(1), 15-17. doi:10.1021/cm203303y\n\"\"\"\n\n\nimport multiprocessing\nimport warnings\n\nimport numpy as np\nimport scipy.constants as const\nfrom monty.json import MSONable\n\nfrom pymatgen.analysis.structure_matcher import (\n OrderDisorderElementComparator,\n StructureMatcher,\n)\nfrom pymatgen.core.periodic_table import get_el_sp\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.io.vasp.outputs import Vasprun\nfrom pymatgen.util.coord import pbc_diff\n\n__author__ = \"Will Richards, Shyue Ping Ong\"\n__version__ = \"0.2\"\n__maintainer__ = \"Will Richards\"\n__email__ = \"[email protected]\"\n__status__ = \"Beta\"\n__date__ = \"5/2/13\"\n\n\nclass DiffusionAnalyzer(MSONable):\n \"\"\"\n Class for performing diffusion analysis.\n\n .. attribute: diffusivity\n\n Diffusivity in cm^2 / s\n\n .. attribute: chg_diffusivity\n\n Charge diffusivity in cm^2 / s\n\n .. attribute: conductivity\n\n Conductivity in mS / cm\n\n .. attribute: chg_conductivity\n\n Conductivity derived from Nernst-Einstein equation using charge\n diffusivity, in mS / cm\n\n .. attribute: diffusivity_components\n\n A vector with diffusivity in the a, b and c directions in cm^2 / s\n\n .. attribute: conductivity_components\n\n A vector with conductivity in the a, b and c directions in mS / cm\n\n .. attribute: diffusivity_std_dev\n\n Std dev in diffusivity in cm^2 / s. Note that this makes sense only\n for non-smoothed analyses.\n\n .. attribute: chg_diffusivity_std_dev\n\n Std dev in charge diffusivity in cm^2 / s. Note that this makes sense only\n for non-smoothed analyses.\n\n .. attribute: conductivity_std_dev\n\n Std dev in conductivity in mS / cm. Note that this makes sense only\n for non-smoothed analyses.\n\n .. attribute: diffusivity_components_std_dev\n\n A vector with std dev. in diffusivity in the a, b and c directions in\n cm^2 / cm. Note that this makes sense only for non-smoothed analyses.\n\n .. attribute: conductivity_components_std_dev\n\n A vector with std dev. in conductivity in the a, b and c directions\n in mS / cm. Note that this makes sense only for non-smoothed analyses.\n\n .. attribute: max_framework_displacement\n\n The maximum (drift adjusted) distance of any framework atom from its\n starting location in A.\n\n .. attribute: max_ion_displacements\n\n nions x 1 array of the maximum displacement of each individual ion.\n\n .. attribute: msd\n\n nsteps x 1 array of the mean square displacement of specie.\n\n .. attribute: mscd\n\n nsteps x 1 array of the mean square charge displacement of specie.\n\n .. attribute: msd_components\n\n nsteps x 3 array of the MSD in each lattice direction of specie.\n\n .. attribute: sq_disp_ions\n\n The square displacement of all ion (both specie and other ions) as a\n nions x nsteps array.\n\n .. attribute: dt\n\n Time coordinate array.\n\n .. attribute: haven_ratio\n Haven ratio defined as diffusivity / chg_diffusivity.\n \"\"\"\n\n def __init__(\n self,\n structure,\n displacements,\n specie,\n temperature,\n time_step,\n step_skip,\n smoothed=\"max\",\n min_obs=30,\n avg_nsteps=1000,\n lattices=None,\n ):\n \"\"\"\n This constructor is meant to be used with pre-processed data.\n Other convenient constructors are provided as class methods (see\n from_vaspruns and from_files).\n\n Given a matrix of displacements (see arguments below for expected\n format), the diffusivity is given by::\n\n D = 1 / 2dt * <mean square displacement>\n\n where d is the dimensionality, t is the time. To obtain a reliable\n diffusion estimate, a least squares regression of the MSD against\n time to obtain the slope, which is then related to the diffusivity.\n\n For traditional analysis, use smoothed=False and weighted=False.\n\n Args:\n structure (Structure): Initial structure.\n displacements (array): Numpy array of with shape [site,\n time step, axis]\n specie (Element/Species): Species to calculate diffusivity for as a\n String. E.g., \"Li\".\n temperature (float): Temperature of the diffusion run in Kelvin.\n time_step (int): Time step between measurements.\n step_skip (int): Sampling frequency of the displacements (\n time_step is multiplied by this number to get the real time\n between measurements)\n smoothed (str): Whether to smooth the MSD, and what mode to smooth.\n Supported modes are:\n\n i. \"max\", which tries to use the maximum #\n of data points for each time origin, subject to a\n minimum # of observations given by min_obs, and then\n weights the observations based on the variance\n accordingly. This is the default.\n ii. \"constant\", in which each timestep is averaged over\n the number of time_steps given by min_steps.\n iii. None / False / any other false-like quantity. No\n smoothing.\n\n min_obs (int): Used with smoothed=\"max\". Minimum number of\n observations to have before including in the MSD vs dt\n calculation. E.g. If a structure has 10 diffusing atoms,\n and min_obs = 30, the MSD vs dt will be\n calculated up to dt = total_run_time / 3, so that each\n diffusing atom is measured at least 3 uncorrelated times.\n Only applies in smoothed=\"max\".\n avg_nsteps (int): Used with smoothed=\"constant\". Determines the\n number of time steps to average over to get the msd for each\n timestep. Default of 1000 is usually pretty good.\n lattices (array): Numpy array of lattice matrix of every step. Used\n for NPT-AIMD. For NVT-AIMD, the lattice at each time step is\n set to the lattice in the \"structure\" argument.\n \"\"\"\n self.structure = structure\n self.disp = displacements\n self.specie = specie\n self.temperature = temperature\n self.time_step = time_step\n self.step_skip = step_skip\n self.min_obs = min_obs\n self.smoothed = smoothed\n self.avg_nsteps = avg_nsteps\n self.lattices = lattices\n\n if lattices is None:\n self.lattices = np.array([structure.lattice.matrix.tolist()])\n\n indices = []\n framework_indices = []\n for i, site in enumerate(structure):\n if site.specie.symbol == specie:\n indices.append(i)\n else:\n framework_indices.append(i)\n if self.disp.shape[1] < 2:\n self.diffusivity = 0.0\n self.conductivity = 0.0\n self.diffusivity_components = np.array([0.0, 0.0, 0.0])\n self.conductivity_components = np.array([0.0, 0.0, 0.0])\n self.max_framework_displacement = 0\n else:\n framework_disp = self.disp[framework_indices]\n drift = np.average(framework_disp, axis=0)[None, :, :]\n\n # drift corrected position\n dc = self.disp - drift\n\n nions, nsteps, dim = dc.shape\n\n if not smoothed:\n timesteps = np.arange(0, nsteps)\n elif smoothed == \"constant\":\n if nsteps <= avg_nsteps:\n raise ValueError(\"Not enough data to calculate diffusivity\")\n timesteps = np.arange(0, nsteps - avg_nsteps)\n else:\n # limit the number of sampled timesteps to 200\n min_dt = int(1000 / (self.step_skip * self.time_step))\n max_dt = min(len(indices) * nsteps // self.min_obs, nsteps)\n if min_dt >= max_dt:\n raise ValueError(\"Not enough data to calculate diffusivity\")\n timesteps = np.arange(\n min_dt, max_dt, max(int((max_dt - min_dt) / 200), 1)\n )\n\n dt = timesteps * self.time_step * self.step_skip\n\n # calculate the smoothed msd values\n msd = np.zeros_like(dt, dtype=np.double)\n sq_disp_ions = np.zeros((len(dc), len(dt)), dtype=np.double)\n msd_components = np.zeros(dt.shape + (3,))\n\n # calculate mean square charge displacement\n mscd = np.zeros_like(msd, dtype=np.double)\n\n for i, n in enumerate(timesteps):\n if not smoothed:\n dx = dc[:, i : i + 1, :]\n dcomponents = dc[:, i : i + 1, :]\n elif smoothed == \"constant\":\n dx = dc[:, i : i + avg_nsteps, :] - dc[:, 0:avg_nsteps, :]\n dcomponents = dc[:, i : i + avg_nsteps, :] - dc[:, 0:avg_nsteps, :]\n else:\n dx = dc[:, n:, :] - dc[:, :-n, :]\n dcomponents = dc[:, n:, :] - dc[:, :-n, :]\n\n # Get msd\n sq_disp = dx ** 2\n sq_disp_ions[:, i] = np.average(np.sum(sq_disp, axis=2), axis=1)\n msd[i] = np.average(sq_disp_ions[:, i][indices])\n\n msd_components[i] = np.average(dcomponents[indices] ** 2, axis=(0, 1))\n\n # Get mscd\n sq_chg_disp = np.sum(dx[indices, :, :], axis=0) ** 2\n mscd[i] = np.average(np.sum(sq_chg_disp, axis=1), axis=0) / len(indices)\n\n def weighted_lstsq(a, b):\n if smoothed == \"max\":\n # For max smoothing, we need to weight by variance.\n w_root = (1 / dt) ** 0.5\n return np.linalg.lstsq(a * w_root[:, None], b * w_root, rcond=None)\n return np.linalg.lstsq(a, b, rcond=None)\n\n # Get self diffusivity\n m_components = np.zeros(3)\n m_components_res = np.zeros(3)\n a = np.ones((len(dt), 2))\n a[:, 0] = dt\n for i in range(3):\n (m, c), res, rank, s = weighted_lstsq(a, msd_components[:, i])\n m_components[i] = max(m, 1e-15)\n m_components_res[i] = res[0]\n\n (m, c), res, rank, s = weighted_lstsq(a, msd)\n # m shouldn't be negative\n m = max(m, 1e-15)\n\n # Get also the charge diffusivity\n (m_chg, c_chg), res_chg, _, _ = weighted_lstsq(a, mscd)\n # m shouldn't be negative\n m_chg = max(m_chg, 1e-15)\n\n # factor of 10 is to convert from A^2/fs to cm^2/s\n # factor of 6 is for dimensionality\n conv_factor = get_conversion_factor(\n self.structure, self.specie, self.temperature\n )\n self.diffusivity = m / 60\n self.chg_diffusivity = m_chg / 60\n\n # Calculate the error in the diffusivity using the error in the\n # slope from the lst sq.\n # Variance in slope = n * Sum Squared Residuals / (n * Sxx - Sx\n # ** 2) / (n-2).\n n = len(dt)\n\n # Pre-compute the denominator since we will use it later.\n # We divide dt by 1000 to avoid overflow errors in some systems (\n # e.g., win). This is subsequently corrected where denom is used.\n denom = (n * np.sum((dt / 1000) ** 2) - np.sum(dt / 1000) ** 2) * (n - 2)\n self.diffusivity_std_dev = np.sqrt(n * res[0] / denom) / 60 / 1000\n self.chg_diffusivity_std_dev = np.sqrt(n * res_chg[0] / denom) / 60 / 1000\n self.conductivity = self.diffusivity * conv_factor\n self.chg_conductivity = self.chg_diffusivity * conv_factor\n self.conductivity_std_dev = self.diffusivity_std_dev * conv_factor\n\n self.diffusivity_components = m_components / 20\n self.diffusivity_components_std_dev = (\n np.sqrt(n * m_components_res / denom) / 20 / 1000\n )\n self.conductivity_components = self.diffusivity_components * conv_factor\n self.conductivity_components_std_dev = (\n self.diffusivity_components_std_dev * conv_factor\n )\n\n # Drift and displacement information.\n self.drift = drift\n self.corrected_displacements = dc\n self.max_ion_displacements = np.max(np.sum(dc ** 2, axis=-1) ** 0.5, axis=1)\n self.max_framework_displacement = np.max(\n self.max_ion_displacements[framework_indices]\n )\n self.msd = msd\n self.mscd = mscd\n self.haven_ratio = self.diffusivity / self.chg_diffusivity\n self.sq_disp_ions = sq_disp_ions\n self.msd_components = msd_components\n self.dt = dt\n self.indices = indices\n self.framework_indices = framework_indices\n\n def get_drift_corrected_structures(self, start=None, stop=None, step=None):\n \"\"\"\n Returns an iterator for the drift-corrected structures. Use of\n iterator is to reduce memory usage as # of structures in MD can be\n huge. You don't often need all the structures all at once.\n\n Args:\n start, stop, step (int): applies a start/stop/step to the iterator.\n Faster than applying it after generation, as it reduces the\n number of structures created.\n \"\"\"\n coords = np.array(self.structure.cart_coords)\n species = self.structure.species_and_occu\n lattices = self.lattices\n nsites, nsteps, dim = self.corrected_displacements.shape\n\n for i in range(start or 0, stop or nsteps, step or 1):\n latt = lattices[0] if len(lattices) == 1 else lattices[i]\n yield Structure(\n latt,\n species,\n coords + self.corrected_displacements[:, i, :],\n coords_are_cartesian=True,\n )\n\n def get_summary_dict(self, include_msd_t=False, include_mscd_t=False):\n \"\"\"\n Provides a summary of diffusion information.\n\n Args:\n include_msd_t (bool): Whether to include mean square displace and\n time data with the data.\n include_msd_t (bool): Whether to include mean square charge displace and\n time data with the data.\n\n Returns:\n (dict) of diffusion and conductivity data.\n \"\"\"\n d = {\n \"D\": self.diffusivity,\n \"D_sigma\": self.diffusivity_std_dev,\n \"D_charge\": self.chg_diffusivity,\n \"D_charge_sigma\": self.chg_diffusivity_std_dev,\n \"S\": self.conductivity,\n \"S_sigma\": self.conductivity_std_dev,\n \"S_charge\": self.chg_conductivity,\n \"D_components\": self.diffusivity_components.tolist(),\n \"S_components\": self.conductivity_components.tolist(),\n \"D_components_sigma\": self.diffusivity_components_std_dev.tolist(),\n \"S_components_sigma\": self.conductivity_components_std_dev.tolist(),\n \"specie\": str(self.specie),\n \"step_skip\": self.step_skip,\n \"time_step\": self.time_step,\n \"temperature\": self.temperature,\n \"max_framework_displacement\": self.max_framework_displacement,\n \"Haven_ratio\": self.haven_ratio,\n }\n if include_msd_t:\n d[\"msd\"] = self.msd.tolist()\n d[\"msd_components\"] = self.msd_components.tolist()\n d[\"dt\"] = self.dt.tolist()\n if include_mscd_t:\n d[\"mscd\"] = self.mscd.tolist()\n return d\n\n def get_framework_rms_plot(self, plt=None, granularity=200, matching_s=None):\n \"\"\"\n Get the plot of rms framework displacement vs time. Useful for checking\n for melting, especially if framework atoms can move via paddle-wheel\n or similar mechanism (which would show up in max framework displacement\n but doesn't constitute melting).\n\n Args:\n plt (matplotlib.pyplot): If plt is supplied, changes will be made\n to an existing plot. Otherwise, a new plot will be created.\n granularity (int): Number of structures to match\n matching_s (Structure): Optionally match to a disordered structure\n instead of the first structure in the analyzer. Required when\n a secondary mobile ion is present.\n Notes:\n The method doesn't apply to NPT-AIMD simulation analysis.\n \"\"\"\n from pymatgen.util.plotting import pretty_plot\n\n if self.lattices is not None and len(self.lattices) > 1:\n warnings.warn(\n \"Note the method doesn't apply to NPT-AIMD \" \"simulation analysis!\"\n )\n\n plt = pretty_plot(12, 8, plt=plt)\n step = (self.corrected_displacements.shape[1] - 1) // (granularity - 1)\n f = (matching_s or self.structure).copy()\n f.remove_species([self.specie])\n sm = StructureMatcher(\n primitive_cell=False,\n stol=0.6,\n comparator=OrderDisorderElementComparator(),\n allow_subset=True,\n )\n rms = []\n for s in self.get_drift_corrected_structures(step=step):\n s.remove_species([self.specie])\n d = sm.get_rms_dist(f, s)\n if d:\n rms.append(d)\n else:\n rms.append((1, 1))\n max_dt = (len(rms) - 1) * step * self.step_skip * self.time_step\n if max_dt > 100000:\n plot_dt = np.linspace(0, max_dt / 1000, len(rms))\n unit = \"ps\"\n else:\n plot_dt = np.linspace(0, max_dt, len(rms))\n unit = \"fs\"\n rms = np.array(rms)\n plt.plot(plot_dt, rms[:, 0], label=\"RMS\")\n plt.plot(plot_dt, rms[:, 1], label=\"max\")\n plt.legend(loc=\"best\")\n plt.xlabel(\"Timestep ({})\".format(unit))\n plt.ylabel(\"normalized distance\")\n plt.tight_layout()\n return plt\n\n def get_msd_plot(self, plt=None, mode=\"specie\"):\n \"\"\"\n Get the plot of the smoothed msd vs time graph. Useful for\n checking convergence. This can be written to an image file.\n\n Args:\n plt: A plot object. Defaults to None, which means one will be\n generated.\n mode (str): Determines type of msd plot. By \"species\", \"sites\",\n or direction (default). If mode = \"mscd\", the smoothed mscd vs.\n time will be plotted.\n \"\"\"\n from pymatgen.util.plotting import pretty_plot\n\n plt = pretty_plot(12, 8, plt=plt)\n if np.max(self.dt) > 100000:\n plot_dt = self.dt / 1000\n unit = \"ps\"\n else:\n plot_dt = self.dt\n unit = \"fs\"\n\n if mode == \"species\":\n for sp in sorted(self.structure.composition.keys()):\n indices = [\n i for i, site in enumerate(self.structure) if site.specie == sp\n ]\n sd = np.average(self.sq_disp_ions[indices, :], axis=0)\n plt.plot(plot_dt, sd, label=sp.__str__())\n plt.legend(loc=2, prop={\"size\": 20})\n elif mode == \"sites\":\n for i, site in enumerate(self.structure):\n sd = self.sq_disp_ions[i, :]\n plt.plot(plot_dt, sd, label=\"%s - %d\" % (site.specie.__str__(), i))\n plt.legend(loc=2, prop={\"size\": 20})\n elif mode == \"mscd\":\n plt.plot(plot_dt, self.mscd, \"r\")\n plt.legend([\"Overall\"], loc=2, prop={\"size\": 20})\n else:\n # Handle default / invalid mode case\n plt.plot(plot_dt, self.msd, \"k\")\n plt.plot(plot_dt, self.msd_components[:, 0], \"r\")\n plt.plot(plot_dt, self.msd_components[:, 1], \"g\")\n plt.plot(plot_dt, self.msd_components[:, 2], \"b\")\n plt.legend([\"Overall\", \"a\", \"b\", \"c\"], loc=2, prop={\"size\": 20})\n\n plt.xlabel(\"Timestep ({})\".format(unit))\n if mode == \"mscd\":\n plt.ylabel(\"MSCD ($\\\\AA^2$)\")\n else:\n plt.ylabel(\"MSD ($\\\\AA^2$)\")\n plt.tight_layout()\n return plt\n\n def plot_msd(self, mode=\"default\"):\n \"\"\"\n Plot the smoothed msd vs time graph. Useful for checking convergence.\n\n Args:\n mode (str): Can be \"default\" (the default, shows only the MSD for\n the diffusing specie, and its components), \"ions\" (individual\n square displacements of all ions), \"species\" (mean square\n displacement by specie), or \"mscd\" (overall mean square charge\n displacement for diffusing specie).\n \"\"\"\n self.get_msd_plot(mode=mode).show()\n\n def export_msdt(self, filename):\n \"\"\"\n Writes MSD data to a csv file that can be easily plotted in other\n software.\n\n Args:\n filename (str): Filename. Supported formats are csv and dat. If\n the extension is csv, a csv file is written. Otherwise,\n a dat format is assumed.\n \"\"\"\n fmt = \"csv\" if filename.lower().endswith(\".csv\") else \"dat\"\n delimiter = \", \" if fmt == \"csv\" else \" \"\n with open(filename, \"wt\") as f:\n if fmt == \"dat\":\n f.write(\"# \")\n f.write(delimiter.join([\"t\", \"MSD\", \"MSD_a\", \"MSD_b\", \"MSD_c\", \"MSCD\"]))\n f.write(\"\\n\")\n for dt, msd, msdc, mscd in zip(\n self.dt, self.msd, self.msd_components, self.mscd\n ):\n f.write(\n delimiter.join([\"%s\" % v for v in [dt, msd] + list(msdc) + [mscd]])\n )\n f.write(\"\\n\")\n\n @classmethod\n def from_structures(\n cls,\n structures,\n specie,\n temperature,\n time_step,\n step_skip,\n initial_disp=None,\n initial_structure=None,\n **kwargs\n ):\n r\"\"\"\n Convenient constructor that takes in a list of Structure objects to\n perform diffusion analysis.\n\n Args:\n structures ([Structure]): list of Structure objects (must be\n ordered in sequence of run). E.g., you may have performed\n sequential VASP runs to obtain sufficient statistics.\n specie (Element/Species): Species to calculate diffusivity for as a\n String. E.g., \"Li\".\n temperature (float): Temperature of the diffusion run in Kelvin.\n time_step (int): Time step between measurements.\n step_skip (int): Sampling frequency of the displacements (\n time_step is multiplied by this number to get the real time\n between measurements)\n initial_disp (np.ndarray): Sometimes, you need to iteratively\n compute estimates of the diffusivity. This supplies an\n initial displacement that will be added on to the initial\n displacements. Note that this makes sense only when\n smoothed=False.\n initial_structure (Structure): Like initial_disp, this is used\n for iterative computations of estimates of the diffusivity. You\n typically need to supply both variables. This stipulates the\n initial structure from which the current set of displacements\n are computed.\n \\\\*\\\\*kwargs: kwargs supported by the :class:`DiffusionAnalyzer`_.\n Examples include smoothed, min_obs, avg_nsteps.\n \"\"\"\n p, l = [], []\n for i, s in enumerate(structures):\n if i == 0:\n structure = s\n p.append(np.array(s.frac_coords)[:, None])\n l.append(s.lattice.matrix)\n if initial_structure is not None:\n p.insert(0, np.array(initial_structure.frac_coords)[:, None])\n l.insert(0, initial_structure.lattice.matrix)\n else:\n p.insert(0, p[0])\n l.insert(0, l[0])\n\n p = np.concatenate(p, axis=1)\n dp = p[:, 1:] - p[:, :-1]\n dp = dp - np.round(dp)\n f_disp = np.cumsum(dp, axis=1)\n c_disp = []\n for i in f_disp:\n c_disp.append([np.dot(d, m) for d, m in zip(i, l[1:])])\n disp = np.array(c_disp)\n\n # If is NVT-AIMD, clear lattice data.\n if np.array_equal(l[0], l[-1]):\n l = np.array([l[0]])\n else:\n l = np.array(l)\n if initial_disp is not None:\n disp += initial_disp[:, None, :]\n\n return cls(\n structure,\n disp,\n specie,\n temperature,\n time_step,\n step_skip=step_skip,\n lattices=l,\n **kwargs\n )\n\n @classmethod\n def from_vaspruns(\n cls, vaspruns, specie, initial_disp=None, initial_structure=None, **kwargs\n ):\n r\"\"\"\n Convenient constructor that takes in a list of Vasprun objects to\n perform diffusion analysis.\n\n Args:\n vaspruns ([Vasprun]): List of Vaspruns (must be ordered in\n sequence of MD simulation). E.g., you may have performed\n sequential VASP runs to obtain sufficient statistics.\n specie (Element/Species): Species to calculate diffusivity for as a\n String. E.g., \"Li\".\n initial_disp (np.ndarray): Sometimes, you need to iteratively\n compute estimates of the diffusivity. This supplies an\n initial displacement that will be added on to the initial\n displacements. Note that this makes sense only when\n smoothed=False.\n initial_structure (Structure): Like initial_disp, this is used\n for iterative computations of estimates of the diffusivity. You\n typically need to supply both variables. This stipulates the\n initial stricture from which the current set of displacements\n are computed.\n \\\\*\\\\*kwargs: kwargs supported by the :class:`DiffusionAnalyzer`_.\n Examples include smoothed, min_obs, avg_nsteps.\n \"\"\"\n\n def get_structures(vaspruns):\n for i, vr in enumerate(vaspruns):\n if i == 0:\n step_skip = vr.ionic_step_skip or 1\n final_structure = vr.initial_structure\n temperature = vr.parameters[\"TEEND\"]\n time_step = vr.parameters[\"POTIM\"]\n yield step_skip, temperature, time_step\n # check that the runs are continuous\n fdist = pbc_diff(\n vr.initial_structure.frac_coords, final_structure.frac_coords\n )\n if np.any(fdist > 0.001):\n raise ValueError(\"initial and final structures do not \" \"match.\")\n final_structure = vr.final_structure\n\n assert (vr.ionic_step_skip or 1) == step_skip\n for s in vr.ionic_steps:\n yield s[\"structure\"]\n\n s = get_structures(vaspruns)\n step_skip, temperature, time_step = next(s)\n\n return cls.from_structures(\n structures=list(s),\n specie=specie,\n temperature=temperature,\n time_step=time_step,\n step_skip=step_skip,\n initial_disp=initial_disp,\n initial_structure=initial_structure,\n **kwargs\n )\n\n @classmethod\n def from_files(\n cls,\n filepaths,\n specie,\n step_skip=10,\n ncores=None,\n initial_disp=None,\n initial_structure=None,\n **kwargs\n ):\n r\"\"\"\n Convenient constructor that takes in a list of vasprun.xml paths to\n perform diffusion analysis.\n\n Args:\n filepaths ([str]): List of paths to vasprun.xml files of runs. (\n must be ordered in sequence of MD simulation). For example,\n you may have done sequential VASP runs and they are in run1,\n run2, run3, etc. You should then pass in\n [\"run1/vasprun.xml\", \"run2/vasprun.xml\", ...].\n specie (Element/Species): Species to calculate diffusivity for as a\n String. E.g., \"Li\".\n step_skip (int): Sampling frequency of the displacements (\n time_step is multiplied by this number to get the real time\n between measurements)\n ncores (int): Numbers of cores to use for multiprocessing. Can\n speed up vasprun parsing considerably. Defaults to None,\n which means serial. It should be noted that if you want to\n use multiprocessing, the number of ionic steps in all vasprun\n .xml files should be a multiple of the ionic_step_skip.\n Otherwise, inconsistent results may arise. Serial mode has no\n such restrictions.\n initial_disp (np.ndarray): Sometimes, you need to iteratively\n compute estimates of the diffusivity. This supplies an\n initial displacement that will be added on to the initial\n displacements. Note that this makes sense only when\n smoothed=False.\n initial_structure (Structure): Like initial_disp, this is used\n for iterative computations of estimates of the diffusivity. You\n typically need to supply both variables. This stipulates the\n initial structure from which the current set of displacements\n are computed.\n \\\\*\\\\*kwargs: kwargs supported by the :class:`DiffusionAnalyzer`_.\n Examples include smoothed, min_obs, avg_nsteps.\n \"\"\"\n if ncores is not None and len(filepaths) > 1:\n p = multiprocessing.Pool(ncores)\n vaspruns = p.imap(_get_vasprun, [(fp, step_skip) for fp in filepaths])\n analyzer = cls.from_vaspruns(\n vaspruns,\n specie=specie,\n initial_disp=initial_disp,\n initial_structure=initial_structure,\n **kwargs\n )\n p.close()\n p.join()\n return analyzer\n\n def vr(filepaths):\n offset = 0\n for p in filepaths:\n v = Vasprun(p, ionic_step_offset=offset, ionic_step_skip=step_skip)\n yield v\n # Recompute offset.\n offset = (-(v.nionic_steps - offset)) % step_skip\n\n return cls.from_vaspruns(\n vr(filepaths),\n specie=specie,\n initial_disp=initial_disp,\n initial_structure=initial_structure,\n **kwargs\n )\n\n def as_dict(self):\n \"\"\"\n Returns: MSONable dict\n \"\"\"\n return {\n \"@module\": self.__class__.__module__,\n \"@class\": self.__class__.__name__,\n \"structure\": self.structure.as_dict(),\n \"displacements\": self.disp.tolist(),\n \"specie\": self.specie,\n \"temperature\": self.temperature,\n \"time_step\": self.time_step,\n \"step_skip\": self.step_skip,\n \"min_obs\": self.min_obs,\n \"smoothed\": self.smoothed,\n \"avg_nsteps\": self.avg_nsteps,\n \"lattices\": self.lattices.tolist(),\n }\n\n @classmethod\n def from_dict(cls, d):\n \"\"\"\n Args:\n d (dict): Dict representation\n\n Returns: DiffusionAnalyzer\n \"\"\"\n structure = Structure.from_dict(d[\"structure\"])\n return cls(\n structure,\n np.array(d[\"displacements\"]),\n specie=d[\"specie\"],\n temperature=d[\"temperature\"],\n time_step=d[\"time_step\"],\n step_skip=d[\"step_skip\"],\n min_obs=d[\"min_obs\"],\n smoothed=d.get(\"smoothed\", \"max\"),\n avg_nsteps=d.get(\"avg_nsteps\", 1000),\n lattices=np.array(d.get(\"lattices\", [d[\"structure\"][\"lattice\"][\"matrix\"]])),\n )\n\n\ndef get_conversion_factor(structure, species, temperature):\n \"\"\"\n Conversion factor to convert between cm^2/s diffusivity measurements and\n mS/cm conductivity measurements based on number of atoms of diffusing\n species. Note that the charge is based on the oxidation state of the\n species (where available), or else the number of valence electrons\n (usually a good guess, esp for main group ions).\n\n Args:\n structure (Structure): Input structure.\n species (Element/Species): Diffusing species.\n temperature (float): Temperature of the diffusion run in Kelvin.\n\n Returns:\n Conversion factor.\n Conductivity (in mS/cm) = Conversion Factor * Diffusivity (in cm^2/s)\n \"\"\"\n df_sp = get_el_sp(species)\n if hasattr(df_sp, \"oxi_state\"):\n z = df_sp.oxi_state\n else:\n z = df_sp.full_electronic_structure[-1][2]\n\n n = structure.composition[species]\n\n vol = structure.volume * 1e-24 # units cm^3\n return (\n 1000\n * n\n / (vol * const.N_A)\n * z ** 2\n * (const.N_A * const.e) ** 2\n / (const.R * temperature)\n )\n\n\ndef _get_vasprun(args):\n \"\"\"\n Internal method to support multiprocessing.\n \"\"\"\n return Vasprun(args[0], ionic_step_skip=args[1], parse_dos=False, parse_eigen=False)\n\n\ndef fit_arrhenius(temps, diffusivities):\n \"\"\"\n Returns Ea, c, standard error of Ea from the Arrhenius fit:\n D = c * exp(-Ea/kT)\n\n Args:\n temps ([float]): A sequence of temperatures. units: K\n diffusivities ([float]): A sequence of diffusivities (e.g.,\n from DiffusionAnalyzer.diffusivity). units: cm^2/s\n \"\"\"\n t_1 = 1 / np.array(temps)\n logd = np.log(diffusivities)\n # Do a least squares regression of log(D) vs 1/T\n a = np.array([t_1, np.ones(len(temps))]).T\n w, res, _, _ = np.linalg.lstsq(a, logd, rcond=None)\n w = np.array(w)\n n = len(temps)\n if n > 2:\n std_Ea = (res[0] / (n - 2) / (n * np.var(t_1))) ** 0.5 * const.k / const.e\n else:\n std_Ea = None\n return -w[0] * const.k / const.e, np.exp(w[1]), std_Ea\n\n\ndef get_extrapolated_diffusivity(temps, diffusivities, new_temp):\n \"\"\"\n Returns (Arrhenius) extrapolated diffusivity at new_temp\n\n Args:\n temps ([float]): A sequence of temperatures. units: K\n diffusivities ([float]): A sequence of diffusivities (e.g.,\n from DiffusionAnalyzer.diffusivity). units: cm^2/s\n new_temp (float): desired temperature. units: K\n\n Returns:\n (float) Diffusivity at extrapolated temp in mS/cm.\n \"\"\"\n Ea, c, _ = fit_arrhenius(temps, diffusivities)\n return c * np.exp(-Ea / (const.k / const.e * new_temp))\n\n\ndef get_extrapolated_conductivity(temps, diffusivities, new_temp, structure, species):\n \"\"\"\n Returns extrapolated mS/cm conductivity.\n\n Args:\n temps ([float]): A sequence of temperatures. units: K\n diffusivities ([float]): A sequence of diffusivities (e.g.,\n from DiffusionAnalyzer.diffusivity). units: cm^2/s\n new_temp (float): desired temperature. units: K\n structure (structure): Structure used for the diffusivity calculation\n species (string/Species): conducting species\n\n Returns:\n (float) Conductivity at extrapolated temp in mS/cm.\n \"\"\"\n return get_extrapolated_diffusivity(\n temps, diffusivities, new_temp\n ) * get_conversion_factor(structure, species, new_temp)\n\n\ndef get_arrhenius_plot(temps, diffusivities, diffusivity_errors=None, **kwargs):\n r\"\"\"\n Returns an Arrhenius plot.\n\n Args:\n temps ([float]): A sequence of temperatures.\n diffusivities ([float]): A sequence of diffusivities (e.g.,\n from DiffusionAnalyzer.diffusivity).\n diffusivity_errors ([float]): A sequence of errors for the\n diffusivities. If None, no error bar is plotted.\n \\\\*\\\\*kwargs:\n Any keyword args supported by matplotlib.pyplot.plot.\n\n Returns:\n A matplotlib.pyplot object. Do plt.show() to show the plot.\n \"\"\"\n Ea, c, _ = fit_arrhenius(temps, diffusivities)\n\n from pymatgen.util.plotting import pretty_plot\n\n plt = pretty_plot(12, 8)\n\n # log10 of the arrhenius fit\n arr = c * np.exp(-Ea / (const.k / const.e * np.array(temps)))\n\n t_1 = 1000 / np.array(temps)\n\n plt.plot(t_1, diffusivities, \"ko\", t_1, arr, \"k--\", markersize=10, **kwargs)\n if diffusivity_errors is not None:\n n = len(diffusivity_errors)\n plt.errorbar(\n t_1[0:n],\n diffusivities[0:n],\n yerr=diffusivity_errors,\n fmt=\"ko\",\n ecolor=\"k\",\n capthick=2,\n linewidth=2,\n )\n ax = plt.axes()\n ax.set_yscale(\"log\")\n plt.text(\n 0.6,\n 0.85,\n \"E$_a$ = {:.0f} meV\".format(Ea * 1000),\n fontsize=30,\n transform=plt.axes().transAxes,\n )\n plt.ylabel(\"D (cm$^2$/s)\")\n plt.xlabel(\"1000/T (K$^{-1}$)\")\n plt.tight_layout()\n return plt\n"
] | [
[
"numpy.dot",
"numpy.log",
"numpy.sqrt",
"numpy.array_equal",
"numpy.arange",
"numpy.cumsum",
"numpy.var",
"numpy.concatenate",
"numpy.linalg.lstsq",
"numpy.max",
"numpy.round",
"numpy.zeros_like",
"numpy.any",
"numpy.average",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fengredrum/hands-on-es | [
"3432b818e4a522448516f2e84342d72a0eaa6531"
] | [
"es_to_df.py"
] | [
"from elasticsearch import Elasticsearch\nfrom elasticsearch_dsl import Q, Search\nimport pandas as pd\n\nes = Elasticsearch([{'host': '10.10.10.10', 'port': 9200}])\n\ndelete_docs = False\n\nquery = Q('range', update_time={'gte': \"2021-06-01T01:31:00\"}) | Q('range', title={'lte': 10})\ns = Search(using=es, index='s2').query(query)\nif delete_docs:\n s.delete()\ndocuments = [hit.to_dict() for hit in s.scan()]\ndf = pd.DataFrame.from_records(documents)\nnum_rows = len(df.index.values)\nprint(df)\nprint(f'nomber of rows: {num_rows}')\n"
] | [
[
"pandas.DataFrame.from_records"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
avani17101/trimesh | [
"e9fcff42384734b2d13dc4c8f66eb9b319945995",
"2115c0d393bbb75443cdd42cc18ab0f99bf1d081"
] | [
"trimesh/path/path.py",
"trimesh/primitives.py"
] | [
"\"\"\"\npath.py\n-----------\n\nA module designed to work with vector paths such as\nthose stored in a DXF or SVG file.\n\"\"\"\nimport numpy as np\n\nimport copy\nimport collections\n\nfrom ..points import plane_fit\nfrom ..geometry import plane_transform\nfrom ..visual import to_rgba\nfrom ..constants import log\nfrom ..constants import tol_path as tol\n\nfrom .util import concatenate\n\nfrom .. import util\nfrom .. import units\nfrom .. import bounds\nfrom .. import caching\nfrom .. import grouping\nfrom .. import exceptions\nfrom .. import transformations as tf\n\nfrom . import raster\nfrom . import repair\nfrom . import simplify\nfrom . import creation # NOQA\nfrom . import polygons\nfrom . import segments # NOQA\nfrom . import traversal\n\nfrom .exchange.export import export_path\n\nfrom scipy.spatial import cKDTree\nfrom shapely.geometry import Polygon\n\ntry:\n import networkx as nx\nexcept BaseException as E:\n # create a dummy module which will raise the ImportError\n # or other exception only when someone tries to use networkx\n nx = exceptions.ExceptionModule(E)\n\n\nclass Path(object):\n \"\"\"\n A Path object consists of vertices and entities. Vertices\n are a simple (n, dimension) float array of points in space.\n\n Entities are a list of objects representing geometric\n primitives, such as Lines, Arcs, BSpline, etc. All entities\n reference vertices by index, so any transform applied to the\n simple vertex array is applied to the entity.\n \"\"\"\n\n def __init__(self,\n entities=None,\n vertices=None,\n metadata=None,\n process=True,\n colors=None,\n **kwargs):\n \"\"\"\n Instantiate a path object.\n\n Parameters\n -----------\n entities : (m,) trimesh.path.entities.Entity\n Contains geometric entities\n vertices : (n, dimension) float\n The vertices referenced by entities\n metadata : dict\n Any metadata about the path\n process : bool\n Run simple cleanup or not\n \"\"\"\n\n self.entities = entities\n self.vertices = vertices\n\n # assign each color to each entity\n self.colors = colors\n # collect metadata into new dictionary\n self.metadata = dict()\n if metadata.__class__.__name__ == 'dict':\n self.metadata.update(metadata)\n\n # cache will dump whenever self.crc changes\n self._cache = caching.Cache(id_function=self.crc)\n\n if process:\n # literally nothing will work if vertices\n # aren't merged properly\n self.merge_vertices()\n\n def __repr__(self):\n \"\"\"\n Print a quick summary of the number of vertices and entities.\n \"\"\"\n return '<trimesh.{}(vertices.shape={}, len(entities)={})>'.format(\n type(self).__name__,\n self.vertices.shape,\n len(self.entities))\n\n def process(self):\n \"\"\"\n Apply basic cleaning functions to the Path object in- place.\n \"\"\"\n with self._cache:\n for func in self._process_functions():\n func()\n return self\n\n @property\n def colors(self):\n \"\"\"\n Colors are stored per-entity.\n\n Returns\n ------------\n colors : (len(entities), 4) uint8\n RGBA colors for each entity\n \"\"\"\n # start with default colors\n colors = np.ones((len(self.entities), 4))\n colors = (colors * [100, 100, 100, 255]).astype(np.uint8)\n # collect colors from entities\n for i, e in enumerate(self.entities):\n if hasattr(e, 'color') and e.color is not None:\n colors[i] = to_rgba(e.color)\n # don't allow parts of the color array to be written\n colors.flags['WRITEABLE'] = False\n return colors\n\n @colors.setter\n def colors(self, values):\n \"\"\"\n Set the color for every entity in the Path.\n\n Parameters\n ------------\n values : (len(entities), 4) uint8\n Color of each entity\n \"\"\"\n # if not set return\n if values is None:\n return\n # make sure colors are RGBA\n colors = to_rgba(values)\n if len(colors) != len(self.entities):\n raise ValueError('colors must be per-entity!')\n # otherwise assign each color to the entity\n for c, e in zip(colors, self.entities):\n e.color = c\n\n @property\n def vertices(self):\n return self._vertices\n\n @vertices.setter\n def vertices(self, values):\n self._vertices = caching.tracked_array(\n values, dtype=np.float64)\n\n @property\n def entities(self):\n \"\"\"\n The actual entities making up the path.\n\n Returns\n -----------\n entities : (n,) trimesh.path.entities.Entity\n Entities such as Line, Arc, or BSpline curves\n \"\"\"\n return self._entities\n\n @entities.setter\n def entities(self, values):\n if values is None:\n self._entities = np.array([])\n else:\n self._entities = np.asanyarray(values)\n\n @property\n def layers(self):\n \"\"\"\n Get a list of the layer for every entity.\n\n Returns\n ---------\n layers : (len(entities), ) any\n Whatever is stored in each `entity.layer`\n \"\"\"\n # layer is a required property for entities\n layers = [e.layer for e in self.entities]\n return layers\n\n def crc(self):\n \"\"\"\n A CRC of the current vertices and entities.\n\n Returns\n ------------\n crc : int\n CRC of entity points and vertices\n \"\"\"\n # first CRC the points in every entity\n target = caching.crc32(bytes().join(\n e._bytes() for e in self.entities))\n # XOR the CRC for the vertices\n target ^= self.vertices.fast_hash()\n return target\n\n def md5(self):\n \"\"\"\n An MD5 hash of the current vertices and entities.\n\n Returns\n ------------\n md5 : str\n Appended MD5 hashes\n \"\"\"\n # first MD5 the points in every entity\n target = '{}{}'.format(\n util.md5_object(bytes().join(\n e._bytes() for e in self.entities)),\n self.vertices.md5())\n\n return target\n\n @caching.cache_decorator\n def paths(self):\n \"\"\"\n Sequence of closed paths, encoded by entity index.\n\n Returns\n ---------\n paths : (n,) sequence of (*,) int\n Referencing self.entities\n \"\"\"\n paths = traversal.closed_paths(self.entities,\n self.vertices)\n return paths\n\n @caching.cache_decorator\n def dangling(self):\n \"\"\"\n List of entities that aren't included in a closed path\n\n Returns\n ----------\n dangling : (n,) int\n Index of self.entities\n \"\"\"\n if len(self.paths) == 0:\n return np.arange(len(self.entities))\n else:\n included = np.hstack(self.paths)\n dangling = np.setdiff1d(np.arange(len(self.entities)),\n included)\n return dangling\n\n @caching.cache_decorator\n def kdtree(self):\n \"\"\"\n A KDTree object holding the vertices of the path.\n\n Returns\n ----------\n kdtree : scipy.spatial.cKDTree\n Object holding self.vertices\n \"\"\"\n kdtree = cKDTree(self.vertices.view(np.ndarray))\n return kdtree\n\n @property\n def scale(self):\n \"\"\"\n What is a representitive number that reflects the magnitude\n of the world holding the paths, for numerical comparisons.\n\n Returns\n ----------\n scale : float\n Approximate size of the world holding this path\n \"\"\"\n # use vertices peak-peak rather than exact extents\n scale = float((self.vertices.ptp(axis=0) ** 2).sum() ** .5)\n return scale\n\n @caching.cache_decorator\n def length(self):\n \"\"\"\n The total discretized length of every entity.\n\n Returns\n --------\n length: float, summed length of every entity\n \"\"\"\n length = float(sum(i.length(self.vertices)\n for i in self.entities))\n return length\n\n @caching.cache_decorator\n def bounds(self):\n \"\"\"\n Return the axis aligned bounding box of the current path.\n\n Returns\n ----------\n bounds : (2, dimension) float\n AABB with (min, max) coordinates\n \"\"\"\n # get the exact bounds of each entity\n # some entities (aka 3- point Arc) have bounds that can't\n # be generated from just bound box of vertices\n points = np.array([e.bounds(self.vertices)\n for e in self.entities],\n dtype=np.float64)\n # flatten bound extrema into (n, dimension) array\n points = points.reshape((-1, self.vertices.shape[1]))\n # get the max and min of all bounds\n return np.array([points.min(axis=0),\n points.max(axis=0)],\n dtype=np.float64)\n\n @caching.cache_decorator\n def centroid(self):\n \"\"\"\n Return the centroid of axis aligned bounding box enclosing\n all entities of the path object.\n\n Returns\n -----------\n centroid : (d,) float\n Approximate centroid of the path\n \"\"\"\n centroid = self.bounds.mean(axis=0)\n return centroid\n\n @property\n def extents(self):\n \"\"\"\n The size of the axis aligned bounding box.\n\n Returns\n ---------\n extents : (dimension,) float\n Edge length of AABB\n \"\"\"\n return self.bounds.ptp(axis=0)\n\n @property\n def units(self):\n \"\"\"\n If there are units defined in self.metadata return them.\n\n Returns\n -----------\n units : str\n Current unit system\n \"\"\"\n if 'units' in self.metadata:\n return self.metadata['units']\n else:\n return None\n\n @units.setter\n def units(self, units):\n self.metadata['units'] = units\n\n def convert_units(self, desired, guess=False):\n \"\"\"\n Convert the units of the current drawing in place.\n\n Parameters\n -----------\n desired : str\n Unit system to convert to\n guess : bool\n If True will attempt to guess units\n \"\"\"\n units._convert_units(self,\n desired=desired,\n guess=guess)\n\n def explode(self):\n \"\"\"\n Turn every multi- segment entity into single segment\n entities in- place.\n \"\"\"\n new_entities = []\n for entity in self.entities:\n # explode into multiple entities\n new_entities.extend(entity.explode())\n # avoid setter and assign new entities\n self._entities = np.array(new_entities)\n # explicitly clear cache\n self._cache.clear()\n\n def fill_gaps(self, distance=0.025):\n \"\"\"\n Find vertices without degree 2 and try to connect to\n other vertices. Operations are done in-place.\n\n Parameters\n ----------\n distance : float\n Connect vertices up to this distance\n \"\"\"\n repair.fill_gaps(self, distance=distance)\n\n @property\n def is_closed(self):\n \"\"\"\n Are all entities connected to other entities.\n\n Returns\n -----------\n closed : bool\n Every entity is connected at its ends\n \"\"\"\n closed = all(i == 2 for i in\n dict(self.vertex_graph.degree()).values())\n\n return closed\n\n @property\n def is_empty(self):\n \"\"\"\n Are any entities defined for the current path.\n\n Returns\n ----------\n empty : bool\n True if no entities are defined\n \"\"\"\n return len(self.entities) == 0\n\n @caching.cache_decorator\n def vertex_graph(self):\n \"\"\"\n Return a networkx.Graph object for the entity connectiviy\n\n graph : networkx.Graph\n Holds vertex indexes\n \"\"\"\n graph, closed = traversal.vertex_graph(self.entities)\n return graph\n\n @caching.cache_decorator\n def vertex_nodes(self):\n \"\"\"\n Get a list of which vertex indices are nodes,\n which are either endpoints or points where the\n entity makes a direction change.\n\n Returns\n --------------\n nodes : (n, 2) int\n Indexes of self.vertices which are nodes\n \"\"\"\n nodes = np.vstack([e.nodes for e in self.entities])\n return nodes\n\n def apply_transform(self, transform):\n \"\"\"\n Apply a transformation matrix to the current path in- place\n\n Parameters\n -----------\n transform : (d+1, d+1) float\n Homogeneous transformations for vertices\n \"\"\"\n dimension = self.vertices.shape[1]\n transform = np.asanyarray(transform, dtype=np.float64)\n\n if transform.shape != (dimension + 1, dimension + 1):\n raise ValueError('transform is incorrect shape!')\n elif np.abs(transform - np.eye(dimension + 1)).max() < 1e-8:\n # if we've been passed an identity matrix do nothing\n return\n\n # make sure cache is up to date\n self._cache.verify()\n # new cache to transfer items\n cache = {}\n # apply transform to discretized paths\n if 'discrete' in self._cache.cache:\n cache['discrete'] = np.array([\n tf.transform_points(\n d, matrix=transform)\n for d in self.discrete])\n\n # things we can just straight up copy\n # as they are topological not geometric\n for key in ['root',\n 'paths',\n 'path_valid',\n 'dangling',\n 'vertex_graph',\n 'enclosure',\n 'enclosure_shell',\n 'enclosure_directed']:\n # if they're in cache save them from the purge\n if key in self._cache.cache:\n cache[key] = self._cache.cache[key]\n\n # transform vertices in place\n self.vertices = tf.transform_points(\n self.vertices,\n matrix=transform)\n # explicitly clear the cache\n self._cache.clear()\n self._cache.id_set()\n\n # populate the things we wangled\n self._cache.cache.update(cache)\n return self\n\n def apply_scale(self, scale):\n \"\"\"\n Apply a transformation matrix to the current path in- place\n\n Parameters\n -----------\n scale : float or (3,) float\n Scale to be applied to mesh\n \"\"\"\n dimension = self.vertices.shape[1]\n matrix = np.eye(dimension + 1)\n matrix[:dimension, :dimension] *= scale\n return self.apply_transform(matrix)\n\n def apply_translation(self, offset):\n \"\"\"\n Apply a transformation matrix to the current path in- place\n\n Parameters\n -----------\n offset : float or (3,) float\n Translation to be applied to mesh\n \"\"\"\n # work on 2D and 3D paths\n dimension = self.vertices.shape[1]\n # make sure offset is correct length and type\n offset = np.array(\n offset, dtype=np.float64).reshape(dimension)\n # create a homogeneous transform\n matrix = np.eye(dimension + 1)\n # apply the offset\n matrix[:dimension, dimension] = offset\n\n return self.apply_transform(matrix)\n\n def apply_layer(self, name):\n \"\"\"\n Apply a layer name to every entity in the path.\n\n Parameters\n ------------\n name : str\n Apply layer name to every entity\n \"\"\"\n for e in self.entities:\n e.layer = name\n\n def rezero(self):\n \"\"\"\n Translate so that every vertex is positive in the current\n mesh is positive.\n\n Returns\n -----------\n matrix : (dimension + 1, dimension + 1) float\n Homogeneous transformations that was applied\n to the current Path object.\n \"\"\"\n # transform to the lower left corner\n matrix = tf.translation_matrix(-self.bounds[0])\n # cleanly apply trransformation matrix\n self.apply_transform(matrix)\n\n return matrix\n\n def merge_vertices(self, digits=None):\n \"\"\"\n Merges vertices which are identical and replace references\n by altering `self.entities` and `self.vertices`\n\n Parameters\n --------------\n digits : None, or int\n How many digits to consider when merging vertices\n \"\"\"\n if len(self.vertices) == 0:\n return\n if digits is None:\n digits = util.decimal_to_digits(\n tol.merge * self.scale,\n min_digits=1)\n\n unique, inverse = grouping.unique_rows(self.vertices,\n digits=digits)\n self.vertices = self.vertices[unique]\n\n entities_ok = np.ones(len(self.entities), dtype=np.bool)\n\n for index, entity in enumerate(self.entities):\n # what kind of entity are we dealing with\n kind = type(entity).__name__\n\n # entities that don't need runs merged\n # don't screw up control- point- knot relationship\n if kind in 'BSpline Bezier Text':\n entity.points = inverse[entity.points]\n continue\n # if we merged duplicate vertices, the entity may\n # have multiple references to the same vertex\n points = grouping.merge_runs(inverse[entity.points])\n # if there are three points and two are identical fix it\n if kind == 'Line':\n if len(points) == 3 and points[0] == points[-1]:\n points = points[:2]\n elif len(points) < 2:\n # lines need two or more vertices\n entities_ok[index] = False\n elif kind == 'Arc' and len(points) != 3:\n # three point arcs need three points\n entities_ok[index] = False\n\n # store points in entity\n entity.points = points\n\n # remove degenerate entities\n self.entities = self.entities[entities_ok]\n\n def replace_vertex_references(self, mask):\n \"\"\"\n Replace the vertex index references in every entity.\n\n Parameters\n ------------\n mask : (len(self.vertices), ) int\n Contains new vertex indexes\n\n Alters\n ------------\n entity.points in self.entities\n Replaced by mask[entity.points]\n \"\"\"\n for entity in self.entities:\n entity.points = mask[entity.points]\n\n def remove_entities(self, entity_ids):\n \"\"\"\n Remove entities by index.\n\n Parameters\n -----------\n entity_ids : (n,) int\n Indexes of self.entities to remove\n \"\"\"\n if len(entity_ids) == 0:\n return\n keep = np.ones(len(self.entities))\n keep[entity_ids] = False\n self.entities = self.entities[keep]\n\n def remove_invalid(self):\n \"\"\"\n Remove entities which declare themselves invalid\n\n Alters\n ----------\n self.entities: shortened\n \"\"\"\n valid = np.array([i.is_valid for i in self.entities],\n dtype=np.bool)\n self.entities = self.entities[valid]\n\n def remove_duplicate_entities(self):\n \"\"\"\n Remove entities that are duplicated\n\n Alters\n -------\n self.entities: length same or shorter\n \"\"\"\n entity_hashes = np.array([hash(i) for i in self.entities])\n unique, inverse = grouping.unique_rows(entity_hashes)\n if len(unique) != len(self.entities):\n self.entities = self.entities[unique]\n\n @caching.cache_decorator\n def referenced_vertices(self):\n \"\"\"\n Which vertices are referenced by an entity.\n\n Returns\n -----------\n referenced_vertices: (n,) int, indexes of self.vertices\n \"\"\"\n # no entities no reference\n if len(self.entities) == 0:\n return np.array([], dtype=np.int64)\n referenced = np.concatenate([e.points for e in self.entities])\n referenced = np.unique(referenced.astype(np.int64))\n\n return referenced\n\n def remove_unreferenced_vertices(self):\n \"\"\"\n Removes all vertices which aren't used by an entity.\n\n Alters\n ---------\n self.vertices: reordered and shortened\n self.entities: entity.points references updated\n \"\"\"\n\n unique = self.referenced_vertices\n\n mask = np.ones(len(self.vertices), dtype=np.int64) * -1\n mask[unique] = np.arange(len(unique), dtype=np.int64)\n\n self.replace_vertex_references(mask=mask)\n self.vertices = self.vertices[unique]\n\n def discretize_path(self, path):\n \"\"\"\n Given a list of entities, return a list of connected points.\n\n Parameters\n -----------\n path: (n,) int, indexes of self.entities\n\n Returns\n -----------\n discrete: (m, dimension)\n \"\"\"\n discrete = traversal.discretize_path(self.entities,\n self.vertices,\n path,\n scale=self.scale)\n return discrete\n\n @caching.cache_decorator\n def discrete(self):\n \"\"\"\n A sequence of connected vertices in space, corresponding to\n self.paths.\n\n Returns\n ---------\n discrete : (len(self.paths),)\n A sequence of (m*, dimension) float\n \"\"\"\n discrete = np.array([self.discretize_path(i)\n for i in self.paths])\n return discrete\n\n def export(self,\n file_obj=None,\n file_type=None,\n **kwargs):\n \"\"\"\n Export the path to a file object or return data.\n\n Parameters\n ---------------\n file_obj : None, str, or file object\n File object or string to export to\n file_type : None or str\n Type of file: dxf, dict, svg\n\n Returns\n ---------------\n exported : bytes or str\n Exported as specified type\n \"\"\"\n return export_path(self,\n file_type=file_type,\n file_obj=file_obj,\n **kwargs)\n\n def to_dict(self):\n export_dict = self.export(file_type='dict')\n return export_dict\n\n def copy(self):\n \"\"\"\n Get a copy of the current mesh\n\n Returns\n ---------\n copied : Path object\n Copy of self\n \"\"\"\n\n metadata = {}\n # grab all the keys into a list so if something is added\n # in another thread it probably doesn't stomp on our loop\n for key in list(self.metadata.keys()):\n try:\n metadata[key] = copy.deepcopy(self.metadata[key])\n except RuntimeError:\n # multiple threads\n log.warning('key {} changed during copy'.format(key))\n\n # copy the core data\n copied = type(self)(entities=copy.deepcopy(self.entities),\n vertices=copy.deepcopy(self.vertices),\n metadata=metadata,\n process=False)\n\n cache = {}\n # try to copy the cache over to the new object\n try:\n # save dict keys before doing slow iteration\n keys = list(self._cache.cache.keys())\n # run through each key and copy into new cache\n for k in keys:\n cache[k] = copy.deepcopy(self._cache.cache[k])\n except RuntimeError:\n # if we have multiple threads this may error and is NBD\n log.debug('unable to copy cache')\n except BaseException:\n # catch and log errors we weren't expecting\n log.error('unable to copy cache', exc_info=True)\n copied._cache.cache = cache\n copied._cache.id_set()\n\n return copied\n\n def scene(self):\n \"\"\"\n Get a scene object containing the current Path3D object.\n\n Returns\n --------\n scene: trimesh.scene.Scene object containing current path\n \"\"\"\n from ..scene import Scene\n scene = Scene(self)\n return scene\n\n def __add__(self, other):\n \"\"\"\n Concatenate two Path objects by appending vertices and\n reindexing point references.\n\n Parameters\n -----------\n other: Path object\n\n Returns\n -----------\n concat: Path object, appended from self and other\n \"\"\"\n concat = concatenate([self, other])\n return concat\n\n\nclass Path3D(Path):\n \"\"\"\n Hold multiple vector curves (lines, arcs, splines, etc) in 3D.\n \"\"\"\n\n def _process_functions(self):\n return [self.merge_vertices,\n self.remove_duplicate_entities,\n self.remove_unreferenced_vertices]\n\n def to_planar(self,\n to_2D=None,\n normal=None,\n check=True):\n \"\"\"\n Check to see if current vectors are all coplanar.\n\n If they are, return a Path2D and a transform which will\n transform the 2D representation back into 3 dimensions\n\n Parameters\n -----------\n to_2D: (4,4) float\n Homogeneous transformation matrix to apply,\n If not passed a plane will be fitted to vertices.\n normal: (3,) float, or None\n Approximate normal of direction of plane\n If to_2D is not specified sign\n will be applied to fit plane normal\n check: bool\n If True: Raise a ValueError if\n points aren't coplanar\n\n Returns\n -----------\n planar : trimesh.path.Path2D\n Current path transformed onto plane\n to_3D : (4,4) float\n Homeogenous transformations to move planar\n back into 3D space\n \"\"\"\n # which vertices are actually referenced\n referenced = self.referenced_vertices\n # if nothing is referenced return an empty path\n if len(referenced) == 0:\n return Path2D(), np.eye(4)\n\n # no explicit transform passed\n if to_2D is None:\n # fit a plane to our vertices\n C, N = plane_fit(self.vertices[referenced])\n # apply the normal sign hint\n if normal is not None:\n normal = np.asanyarray(normal, dtype=np.float64)\n if normal.shape == (3,):\n N *= np.sign(np.dot(N, normal))\n N = normal\n else:\n log.warning(\n \"passed normal not used: {}\".format(\n normal.shape))\n # create a transform from fit plane to XY\n to_2D = plane_transform(origin=C,\n normal=N)\n\n # make sure we've extracted a transform\n to_2D = np.asanyarray(to_2D, dtype=np.float64)\n if to_2D.shape != (4, 4):\n raise ValueError('unable to create transform!')\n\n # transform all vertices to 2D plane\n flat = tf.transform_points(self.vertices,\n to_2D)\n\n # Z values of vertices which are referenced\n heights = flat[referenced][:, 2]\n # points are not on a plane because Z varies\n if heights.ptp() > tol.planar:\n # since Z is inconsistent set height to zero\n height = 0.0\n if check:\n raise ValueError('points are not flat!')\n else:\n # if the points were planar store the height\n height = heights.mean()\n\n # the transform from 2D to 3D\n to_3D = np.linalg.inv(to_2D)\n\n # if the transform didn't move the path to\n # exactly Z=0 adjust it so the returned transform does\n if np.abs(height) > tol.planar:\n # adjust to_3D transform by height\n adjust = tf.translation_matrix(\n [0, 0, height])\n # apply the height adjustment to_3D\n to_3D = np.dot(to_3D, adjust)\n\n # copy metadata to new object\n metadata = copy.deepcopy(self.metadata)\n # store transform we used to move it onto the plane\n metadata['to_3D'] = to_3D\n\n # create the Path2D with the same entities\n # and XY values of vertices projected onto the plane\n planar = Path2D(entities=copy.deepcopy(self.entities),\n vertices=flat[:, :2],\n metadata=metadata,\n process=False)\n\n return planar, to_3D\n\n def show(self, **kwargs):\n \"\"\"\n Show the current Path3D object.\n \"\"\"\n scene = self.scene()\n return scene.show(**kwargs)\n\n def plot_discrete(self, show=False):\n \"\"\"\n Plot closed curves\n\n Parameters\n ------------\n show : bool\n If False will not execute matplotlib.pyplot.show\n \"\"\"\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D # NOQA\n fig = plt.figure()\n axis = fig.add_subplot(111, projection='3d')\n for discrete in self.discrete:\n axis.plot(*discrete.T)\n if show:\n plt.show()\n\n def plot_entities(self, show=False):\n \"\"\"\n Plot discrete version of entities without regards\n for connectivity.\n\n Parameters\n -------------\n show : bool\n If False will not execute matplotlib.pyplot.show\n \"\"\"\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D # NOQA\n fig = plt.figure()\n axis = fig.add_subplot(111, projection='3d')\n for entity in self.entities:\n vertices = entity.discrete(self.vertices)\n axis.plot(*vertices.T)\n if show:\n plt.show()\n\n\nclass Path2D(Path):\n \"\"\"\n Hold multiple vector curves (lines, arcs, splines, etc) in 3D.\n \"\"\"\n\n def show(self, annotations=True):\n \"\"\"\n Plot the current Path2D object using matplotlib.\n \"\"\"\n if self.is_closed:\n self.plot_discrete(show=True, annotations=annotations)\n else:\n self.plot_entities(show=True, annotations=annotations)\n\n def _process_functions(self):\n \"\"\"\n Return a list of functions to clean up a Path2D\n \"\"\"\n return [self.merge_vertices,\n self.remove_duplicate_entities,\n self.remove_unreferenced_vertices]\n\n def apply_obb(self):\n \"\"\"\n Transform the current path so that its OBB is axis aligned\n and OBB center is at the origin.\n\n Returns\n -----------\n obb : (3, 3) float\n Homogeneous transformation matrix\n \"\"\"\n matrix = self.obb\n self.apply_transform(matrix)\n return matrix\n\n @caching.cache_decorator\n def obb(self):\n \"\"\"\n Get a transform that centers and aligns the OBB of the\n referenced vertices with the XY axis.\n\n Returns\n -----------\n obb : (3, 3) float\n Homogeneous transformation matrix\n \"\"\"\n matrix = bounds.oriented_bounds_2D(\n self.vertices[self.referenced_vertices])[0]\n return matrix\n\n def rasterize(self,\n pitch,\n origin,\n resolution=None,\n fill=True,\n width=None,\n **kwargs):\n \"\"\"\n Rasterize a Path2D object into a boolean image (\"mode 1\").\n\n Parameters\n ------------\n pitch: float, length in model space of a pixel edge\n origin: (2,) float, origin position in model space\n resolution: (2,) int, resolution in pixel space\n fill: bool, if True will return closed regions as filled\n width: int, if not None will draw outline this wide (pixels)\n\n Returns\n ------------\n raster: PIL.Image object, mode 1\n \"\"\"\n image = raster.rasterize(self,\n pitch=pitch,\n origin=origin,\n resolution=resolution,\n fill=fill,\n width=width)\n return image\n\n def sample(self, count, **kwargs):\n \"\"\"\n Use rejection sampling to generate random points inside a\n polygon.\n\n Parameters\n -----------\n count : int\n Number of points to return\n If there are multiple bodies, there will\n be up to count * bodies points returned\n factor : float\n How many points to test per loop\n IE, count * factor\n max_iter : int,\n Maximum number of intersection loops\n to run, total points sampled is\n count * factor * max_iter\n\n Returns\n -----------\n hit : (n, 2) float\n Random points inside polygon\n \"\"\"\n\n poly = self.polygons_full\n if len(poly) == 0:\n samples = np.array([])\n elif len(poly) == 1:\n samples = polygons.sample(poly[0], count=count, **kwargs)\n else:\n samples = util.vstack_empty([\n polygons.sample(i, count=count, **kwargs)\n for i in poly])\n\n return samples\n\n @property\n def body_count(self):\n return len(self.root)\n\n def to_3D(self, transform=None):\n \"\"\"\n Convert 2D path to 3D path on the XY plane.\n\n Parameters\n -------------\n transform : (4, 4) float\n If passed, will transform vertices.\n If not passed and 'to_3D' is in metadata\n that transform will be used.\n\n Returns\n -----------\n path_3D: Path3D version of current path\n \"\"\"\n # if there is a stored 'to_3D' transform in metadata use it\n if transform is None and 'to_3D' in self.metadata:\n transform = self.metadata['to_3D']\n\n # copy vertices and stack with zeros from (n, 2) to (n, 3)\n vertices = np.column_stack((copy.deepcopy(self.vertices),\n np.zeros(len(self.vertices))))\n if transform is not None:\n vertices = tf.transform_points(vertices,\n transform)\n # make sure everything is deep copied\n path_3D = Path3D(entities=copy.deepcopy(self.entities),\n vertices=vertices,\n metadata=copy.deepcopy(self.metadata))\n return path_3D\n\n @caching.cache_decorator\n def polygons_closed(self):\n \"\"\"\n Cycles in the vertex graph, as shapely.geometry.Polygons.\n These are polygon objects for every closed circuit, with no notion\n of whether a polygon is a hole or an area. Every polygon in this\n list will have an exterior, but NO interiors.\n\n Returns\n ---------\n polygons_closed: (n,) list of shapely.geometry.Polygon objects\n \"\"\"\n # will attempt to recover invalid garbage geometry\n # and will be None if geometry is unrecoverable\n polys = polygons.paths_to_polygons(self.discrete)\n return polys\n\n @caching.cache_decorator\n def polygons_full(self):\n \"\"\"\n A list of shapely.geometry.Polygon objects with interiors created\n by checking which closed polygons enclose which other polygons.\n\n Returns\n ---------\n full : (len(self.root),) shapely.geometry.Polygon\n Polygons containing interiors\n \"\"\"\n # pre- allocate the list to avoid indexing problems\n full = [None] * len(self.root)\n # store the graph to avoid cache thrashing\n enclosure = self.enclosure_directed\n # store closed polygons to avoid cache hits\n closed = self.polygons_closed\n\n # loop through root curves\n for i, root in enumerate(self.root):\n # a list of multiple Polygon objects that\n # are fully contained by the root curve\n children = [closed[child]\n for child in enclosure[root].keys()]\n # all polygons_closed are CCW, so for interiors reverse them\n holes = [np.array(p.exterior.coords)[::-1]\n for p in children]\n # a single Polygon object\n shell = closed[root].exterior\n # create a polygon with interiors\n full[i] = polygons.repair_invalid(Polygon(shell=shell,\n holes=holes))\n # so we can use advanced indexing\n full = np.array(full)\n\n return full\n\n @caching.cache_decorator\n def area(self):\n \"\"\"\n Return the area of the polygons interior.\n\n Returns\n ---------\n area: float, total area of polygons minus interiors\n \"\"\"\n area = float(sum(i.area for i in self.polygons_full))\n return area\n\n def extrude(self, height, **kwargs):\n \"\"\"\n Extrude the current 2D path into a 3D mesh.\n\n Parameters\n ----------\n height: float, how far to extrude the profile\n kwargs: passed directly to meshpy.triangle.build:\n triangle.build(mesh_info,\n verbose=False,\n refinement_func=None,\n attributes=False,\n volume_constraints=True,\n max_volume=None,\n allow_boundary_steiner=True,\n allow_volume_steiner=True,\n quality_meshing=True,\n generate_edges=None,\n generate_faces=False,\n min_angle=None)\n Returns\n --------\n mesh: trimesh object representing extruded polygon\n \"\"\"\n from ..primitives import Extrusion\n result = [Extrusion(polygon=i, height=height, **kwargs)\n for i in self.polygons_full]\n if len(result) == 1:\n return result[0]\n return result\n\n def triangulate(self, **kwargs):\n \"\"\"\n Create a region- aware triangulation of the 2D path.\n\n Parameters\n -------------\n **kwargs : dict\n Passed to trimesh.creation.triangulate_polygon\n\n Returns\n -------------\n vertices : (n, 2) float\n 2D vertices of triangulation\n faces : (n, 3) int\n Indexes of vertices for triangles\n \"\"\"\n from ..creation import triangulate_polygon\n\n # append vertices and faces into sequence\n v_seq = []\n f_seq = []\n\n # loop through polygons with interiors\n for polygon in self.polygons_full:\n v, f = triangulate_polygon(polygon, **kwargs)\n v_seq.append(v)\n f_seq.append(f)\n\n return util.append_faces(v_seq, f_seq)\n\n def medial_axis(self, resolution=None, clip=None):\n \"\"\"\n Find the approximate medial axis based\n on a voronoi diagram of evenly spaced points on the\n boundary of the polygon.\n\n Parameters\n ----------\n resolution : None or float\n Distance between each sample on the polygon boundary\n clip : None, or (2,) float\n Min, max number of samples\n\n Returns\n ----------\n medial : Path2D object\n Contains only medial axis of Path\n \"\"\"\n if resolution is None:\n resolution = self.scale / 1000.0\n\n # convert the edges to Path2D kwargs\n from .exchange.misc import edges_to_path\n\n # edges and vertices\n edge_vert = [polygons.medial_axis(i, resolution, clip)\n for i in self.polygons_full]\n # create a Path2D object for each region\n medials = [Path2D(**edges_to_path(\n edges=e, vertices=v)) for e, v in edge_vert]\n\n # get a single Path2D of medial axis\n medial = concatenate(medials)\n\n return medial\n\n def connected_paths(self, path_id, include_self=False):\n \"\"\"\n Given an index of self.paths find other paths which\n overlap with that path.\n\n Parameters\n -----------\n path_id : int\n Index of self.paths\n include_self : bool\n Should the result include path_id or not\n\n Returns\n -----------\n path_ids : (n, ) int\n Indexes of self.paths that overlap input path_id\n \"\"\"\n if len(self.root) == 1:\n path_ids = np.arange(len(self.polygons_closed))\n else:\n path_ids = list(nx.node_connected_component(\n self.enclosure,\n path_id))\n if include_self:\n return np.array(path_ids)\n return np.setdiff1d(path_ids, [path_id])\n\n def simplify(self, **kwargs):\n \"\"\"\n Return a version of the current path with colinear segments\n merged, and circles entities replacing segmented circular paths.\n\n Returns\n ---------\n simplified : Path2D object\n \"\"\"\n return simplify.simplify_basic(self, **kwargs)\n\n def simplify_spline(self, smooth=.0002, verbose=False):\n \"\"\"\n Convert paths into b-splines.\n\n Parameters\n -----------\n smooth : float\n How much the spline should smooth the curve\n verbose : bool\n Print detailed log messages\n\n Returns\n ------------\n simplified : Path2D\n Discrete curves replaced with splines\n \"\"\"\n return simplify.simplify_spline(self,\n smooth=smooth,\n verbose=verbose)\n\n def split(self, **kwargs):\n \"\"\"\n If the current Path2D consists of n 'root' curves,\n split them into a list of n Path2D objects\n\n Returns\n ----------\n split: (n,) list of Path2D objects\n Each connected region and interiors\n \"\"\"\n return traversal.split(self)\n\n def plot_discrete(self, show=False, annotations=True):\n \"\"\"\n Plot the closed curves of the path.\n \"\"\"\n import matplotlib.pyplot as plt\n axis = plt.axes()\n axis.set_aspect('equal', 'datalim')\n\n for i, points in enumerate(self.discrete):\n color = ['g', 'k'][i in self.root]\n axis.plot(*points.T, color=color)\n\n if annotations:\n for e in self.entities:\n if not hasattr(e, 'plot'):\n continue\n e.plot(self.vertices)\n\n if show:\n plt.show()\n return axis\n\n def plot_entities(self, show=False, annotations=True, color=None):\n \"\"\"\n Plot the entities of the path, with no notion of topology\n \"\"\"\n import matplotlib.pyplot as plt\n # keep plot axis scaled the same\n plt.axes().set_aspect('equal', 'datalim')\n # hardcode a format for each entity type\n eformat = {'Line0': {'color': 'g', 'linewidth': 1},\n 'Line1': {'color': 'y', 'linewidth': 1},\n 'Arc0': {'color': 'r', 'linewidth': 1},\n 'Arc1': {'color': 'b', 'linewidth': 1},\n 'Bezier0': {'color': 'k', 'linewidth': 1},\n 'Bezier1': {'color': 'k', 'linewidth': 1},\n 'BSpline0': {'color': 'm', 'linewidth': 1},\n 'BSpline1': {'color': 'm', 'linewidth': 1}}\n for entity in self.entities:\n # if the entity has it's own plot method use it\n if annotations and hasattr(entity, 'plot'):\n entity.plot(self.vertices)\n continue\n # otherwise plot the discrete curve\n discrete = entity.discrete(self.vertices)\n # a unique key for entities\n e_key = entity.__class__.__name__ + str(int(entity.closed))\n\n fmt = eformat[e_key].copy()\n if color is not None:\n # passed color will override other optons\n fmt['color'] = color\n elif hasattr(entity, 'color'):\n # if entity has specified color use it\n fmt['color'] = entity.color\n plt.plot(*discrete.T, **fmt)\n if show:\n plt.show()\n\n @property\n def identifier(self):\n \"\"\"\n A unique identifier for the path.\n\n Returns\n ---------\n identifier: (5,) float, unique identifier\n \"\"\"\n if len(self.polygons_full) != 1:\n raise TypeError('Identifier only valid for single body')\n return polygons.polygon_hash(self.polygons_full[0])\n\n @caching.cache_decorator\n def identifier_md5(self):\n \"\"\"\n Return an MD5 of the identifier\n \"\"\"\n as_int = (self.identifier * 1e4).astype(np.int64)\n hashed = util.md5_object(as_int.tostring(order='C'))\n return hashed\n\n @property\n def path_valid(self):\n \"\"\"\n Returns\n ----------\n path_valid: (n,) bool, indexes of self.paths self.polygons_closed\n which are valid polygons\n \"\"\"\n valid = [i is not None for i in self.polygons_closed]\n valid = np.array(valid, dtype=np.bool)\n return valid\n\n @caching.cache_decorator\n def root(self):\n \"\"\"\n Which indexes of self.paths/self.polygons_closed are root curves.\n Also known as 'shell' or 'exterior.\n\n Returns\n ---------\n root: (n,) int, list of indexes\n \"\"\"\n populate = self.enclosure_directed # NOQA\n return self._cache['root']\n\n @caching.cache_decorator\n def enclosure(self):\n \"\"\"\n Networkx Graph object of polygon enclosure.\n \"\"\"\n with self._cache:\n undirected = self.enclosure_directed.to_undirected()\n return undirected\n\n @caching.cache_decorator\n def enclosure_directed(self):\n \"\"\"\n Networkx DiGraph of polygon enclosure\n \"\"\"\n root, enclosure = polygons.enclosure_tree(self.polygons_closed)\n self._cache['root'] = root\n return enclosure\n\n @caching.cache_decorator\n def enclosure_shell(self):\n \"\"\"\n A dictionary of path indexes which are 'shell' paths, and values\n of 'hole' paths.\n\n Returns\n ----------\n corresponding: dict, {index of self.paths of shell : [indexes of holes]}\n \"\"\"\n pairs = [(r, self.connected_paths(r, include_self=False))\n for r in self.root]\n # OrderedDict to maintain corresponding order\n corresponding = collections.OrderedDict(pairs)\n return corresponding\n",
"\"\"\"\nprimitives.py\n----------------\n\nSubclasses of Trimesh objects that are parameterized as primitives.\n\nUseful because you can move boxes and spheres around, and then use\ntrimesh operations on them at any point.\n\"\"\"\nimport numpy as np\nimport pprint\nimport copy\n\nfrom . import util\nfrom . import sample\nfrom . import caching\nfrom . import inertia\nfrom . import creation\nfrom . import triangles\nfrom . import transformations as tf\n\nfrom .base import Trimesh\nfrom .constants import log, tol\n\n\nclass _Primitive(Trimesh):\n \"\"\"\n Geometric _Primitives which are a subclass of Trimesh.\n Mesh is generated lazily when vertices or faces are requested.\n \"\"\"\n\n # ignore superclass copy directives\n __copy__ = None\n __deepcopy__ = None\n\n def __init__(self, *args, **kwargs):\n super(_Primitive, self).__init__(*args, **kwargs)\n self._data.clear()\n self._validate = False\n\n def __repr__(self):\n return '<trimesh.primitives.{}>'.format(type(self).__name__)\n\n @property\n def faces(self):\n stored = self._cache['faces']\n if util.is_shape(stored, (-1, 3)):\n return stored\n self._create_mesh()\n return self._cache['faces']\n\n @faces.setter\n def faces(self, values):\n log.warning('Primitive faces are immutable! Not setting!')\n\n @property\n def vertices(self):\n stored = self._cache['vertices']\n if util.is_shape(stored, (-1, 3)):\n return stored\n\n self._create_mesh()\n return self._cache['vertices']\n\n @vertices.setter\n def vertices(self, values):\n if values is not None:\n log.warning('Primitive vertices are immutable! Not setting!')\n\n @property\n def face_normals(self):\n # we need to avoid the logic in the superclass that\n # is specific to the data model prioritizing faces\n stored = self._cache['face_normals']\n if util.is_shape(stored, (-1, 3)):\n return stored\n # just calculate if not stored\n unit, valid = triangles.normals(self.triangles)\n normals = np.zeros((len(valid), 3))\n normals[valid] = unit\n # store and return\n self._cache['face_normals'] = normals\n return normals\n\n @face_normals.setter\n def face_normals(self, values):\n if values is not None:\n log.warning('Primitive face normals are immutable! Not setting!')\n\n def copy(self, **kwargs):\n \"\"\"\n Return a copy of the Primitive object.\n\n Returns\n -------------\n copied : object\n Copy of current primitive\n \"\"\"\n return copy.deepcopy(self)\n\n def to_mesh(self):\n \"\"\"\n Return a copy of the Primitive object as a Trimesh object.\n \"\"\"\n result = Trimesh(vertices=self.vertices.copy(),\n faces=self.faces.copy(),\n face_normals=self.face_normals.copy(),\n process=False)\n return result\n\n def apply_transform(self, matrix):\n \"\"\"\n Apply a transform to the current primitive by\n setting self.transform\n\n Parameters\n ------------\n matrix: (4,4) float\n Homogeneous transformation\n \"\"\"\n matrix = np.asanyarray(matrix, order='C', dtype=np.float64)\n if matrix.shape != (4, 4):\n raise ValueError('Transformation matrix must be (4,4)!')\n\n if util.allclose(matrix, np.eye(4), 1e-8):\n log.debug('apply_transform received identity matrix')\n return\n\n new_transform = np.dot(matrix, self.primitive.transform)\n self.primitive.transform = new_transform\n return self\n\n def _create_mesh(self):\n raise ValueError('Primitive doesn\\'t define mesh creation!')\n\n\nclass _PrimitiveAttributes(object):\n \"\"\"\n Hold the mutable data which defines a primitive.\n \"\"\"\n\n def __init__(self, parent, defaults, kwargs):\n self._data = parent._data\n self._defaults = defaults\n self._parent = parent\n self._data.update(defaults)\n self._mutable = True\n for key, value in kwargs.items():\n if key in defaults:\n self._data[key] = util.convert_like(\n value, defaults[key])\n # if configured as immutable, apply setting after instantiation values\n # are set\n if 'mutable' in kwargs:\n self._mutable = bool(kwargs['mutable'])\n\n @property\n def __doc__(self):\n # this is generated dynamically as the format operation can be surprisingly\n # slow and if generated in __init__ it is called a lot of times\n # when we didn't really need to generate it\n\n doc = (\n 'Store the attributes of a {name} object.\\n\\n' +\n 'When these values are changed, the mesh geometry will \\n' +\n 'automatically be updated to reflect the new values.\\n\\n' +\n 'Available properties and their default values are:\\n {defaults}' +\n '\\n\\nExample\\n---------------\\n' +\n 'p = trimesh.primitives.{name}()\\n' +\n 'p.primitive.radius = 10\\n' +\n '\\n').format(\n name=self._parent.__class__.__name__,\n defaults=pprint.pformat(\n self._defaults,\n width=-1)[1:-1])\n return doc\n\n def __getattr__(self, key):\n if '_' in key:\n return super(_PrimitiveAttributes, self).__getattr__(key)\n elif key in self._defaults:\n return util.convert_like(self._data[key], self._defaults[key])\n return super(_PrimitiveAttributes, self).__getattr__(key)\n\n def __setattr__(self, key, value):\n if '_' in key:\n return super(_PrimitiveAttributes, self).__setattr__(key, value)\n elif key in self._defaults:\n if self._mutable:\n self._data[key] = util.convert_like(value,\n self._defaults[key])\n else:\n raise ValueError(\n 'Primitive is configured as immutable! Cannot set attribute!')\n else:\n keys = list(self._defaults.keys())\n raise ValueError(\n 'Only default attributes {} can be set!'.format(keys))\n\n def to_kwargs(self):\n \"\"\"\n Return a dict with copies of kwargs for the current\n Primitive.\n\n Returns\n ------------\n kwargs : dict\n Arguments to reconstruct current PrimitiveAttributes\n \"\"\"\n return {k: copy.deepcopy(self._data[k])\n for k in self._defaults.keys()}\n\n def __dir__(self):\n result = sorted(dir(type(self)) +\n list(self._defaults.keys()))\n return result\n\n\nclass Cylinder(_Primitive):\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Create a Cylinder Primitive, a subclass of Trimesh.\n\n Parameters\n -------------\n radius : float\n Radius of cylinder\n height : float\n Height of cylinder\n transform : (4, 4) float\n Homogeneous transformation matrix\n sections : int\n Number of facets in circle\n \"\"\"\n super(Cylinder, self).__init__(*args, **kwargs)\n\n defaults = {'height': 10.0,\n 'radius': 1.0,\n 'transform': np.eye(4),\n 'sections': 32}\n self.primitive = _PrimitiveAttributes(self,\n defaults,\n kwargs)\n\n @caching.cache_decorator\n def volume(self):\n \"\"\"\n The analytic volume of the cylinder primitive.\n\n Returns\n ---------\n volume : float\n Volume of the cylinder\n \"\"\"\n volume = ((np.pi * self.primitive.radius ** 2) *\n self.primitive.height)\n return volume\n\n @caching.cache_decorator\n def moment_inertia(self):\n \"\"\"\n The analytic inertia tensor of the cylinder primitive.\n\n Returns\n ----------\n tensor: (3,3) float, 3D inertia tensor\n \"\"\"\n\n tensor = inertia.cylinder_inertia(\n mass=self.volume,\n radius=self.primitive.radius,\n height=self.primitive.height,\n transform=self.primitive.transform)\n return tensor\n\n @caching.cache_decorator\n def direction(self):\n \"\"\"\n The direction of the cylinder's axis.\n\n Returns\n --------\n axis: (3,) float, vector along the cylinder axis\n \"\"\"\n axis = np.dot(self.primitive.transform, [0, 0, 1, 0])[:3]\n return axis\n\n @property\n def segment(self):\n \"\"\"\n A line segment which if inflated by cylinder radius\n would represent the cylinder primitive.\n\n Returns\n -------------\n segment : (2, 3) float\n Points representing a single line segment\n \"\"\"\n # half the height\n half = self.primitive.height / 2.0\n # apply the transform to the Z- aligned segment\n points = np.dot(\n self.primitive.transform,\n np.transpose([[0, 0, -half, 1], [0, 0, half, 1]])).T[:, :3]\n return points\n\n def buffer(self, distance):\n \"\"\"\n Return a cylinder primitive which covers the source cylinder\n by distance: radius is inflated by distance, height by twice\n the distance.\n\n Parameters\n ------------\n distance : float\n Distance to inflate cylinder radius and height\n\n Returns\n -------------\n buffered : Cylinder\n Cylinder primitive inflated by distance\n \"\"\"\n distance = float(distance)\n\n buffered = Cylinder(\n height=self.primitive.height + distance * 2,\n radius=self.primitive.radius + distance,\n transform=self.primitive.transform.copy())\n return buffered\n\n def _create_mesh(self):\n log.debug('creating mesh for Cylinder primitive')\n mesh = creation.cylinder(radius=self.primitive.radius,\n height=self.primitive.height,\n sections=self.primitive.sections,\n transform=self.primitive.transform)\n\n self._cache['vertices'] = mesh.vertices\n self._cache['faces'] = mesh.faces\n self._cache['face_normals'] = mesh.face_normals\n\n\nclass Capsule(_Primitive):\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Create a Capsule Primitive, a subclass of Trimesh.\n\n Parameters\n ----------\n radius: float, radius of cylinder\n height: float, height of cylinder\n transform: (4,4) float, transformation matrix\n sections: int, number of facets in circle\n \"\"\"\n super(Capsule, self).__init__(*args, **kwargs)\n\n defaults = {'height': 1.0,\n 'radius': 1.0,\n 'transform': np.eye(4),\n 'sections': 32}\n self.primitive = _PrimitiveAttributes(self,\n defaults,\n kwargs)\n\n @caching.cache_decorator\n def direction(self):\n \"\"\"\n The direction of the capsule's axis.\n\n Returns\n --------\n axis: (3,) float, vector along the cylinder axis\n \"\"\"\n axis = np.dot(self.primitive.transform, [0, 0, 1, 0])[:3]\n return axis\n\n def _create_mesh(self):\n log.debug('creating mesh for Capsule primitive')\n\n mesh = creation.capsule(radius=self.primitive.radius,\n height=self.primitive.height)\n mesh.apply_transform(self.primitive.transform)\n\n self._cache['vertices'] = mesh.vertices\n self._cache['faces'] = mesh.faces\n self._cache['face_normals'] = mesh.face_normals\n\n\nclass Sphere(_Primitive):\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Create a Sphere Primitive, a subclass of Trimesh.\n\n Parameters\n ----------\n radius: float, radius of sphere\n center: (3,) float, center of sphere\n subdivisions: int, number of subdivisions for icosphere. Default is 3\n \"\"\"\n\n super(Sphere, self).__init__(*args, **kwargs)\n\n defaults = {'radius': 1.0,\n 'center': np.zeros(3, dtype=np.float64),\n 'subdivisions': 3}\n\n self.primitive = _PrimitiveAttributes(self,\n defaults,\n kwargs)\n\n def apply_transform(self, matrix):\n \"\"\"\n Apply a transform to the sphere primitive\n\n Parameters\n ------------\n matrix: (4,4) float, homogeneous transformation\n \"\"\"\n matrix = np.asanyarray(matrix, dtype=np.float64)\n if matrix.shape != (4, 4):\n raise ValueError('shape must be 4,4')\n\n center = np.dot(matrix,\n np.append(self.primitive.center, 1.0))[:3]\n self.primitive.center = center\n\n @property\n def bounds(self):\n # no docstring so will inherit Trimesh docstring\n # return exact bounds from primitive center and radius (rather than faces)\n # self.extents will also use this information\n bounds = np.array([self.primitive.center - self.primitive.radius,\n self.primitive.center + self.primitive.radius])\n return bounds\n\n @property\n def bounding_box_oriented(self):\n # for a sphere the oriented bounding box is the same as the axis aligned\n # bounding box, and a sphere is the absolute slowest case for the OBB calculation\n # as it is a convex surface with a ton of face normals that all need to\n # be checked\n return self.bounding_box\n\n @caching.cache_decorator\n def area(self):\n \"\"\"\n Surface area of the current sphere primitive.\n\n Returns\n --------\n area: float, surface area of the sphere Primitive\n \"\"\"\n\n area = 4.0 * np.pi * (self.primitive.radius ** 2)\n return area\n\n @caching.cache_decorator\n def volume(self):\n \"\"\"\n Volume of the current sphere primitive.\n\n Returns\n --------\n volume: float, volume of the sphere Primitive\n \"\"\"\n\n volume = (4.0 * np.pi * (self.primitive.radius ** 3)) / 3.0\n return volume\n\n @caching.cache_decorator\n def moment_inertia(self):\n \"\"\"\n The analytic inertia tensor of the sphere primitive.\n\n Returns\n ----------\n tensor: (3,3) float, 3D inertia tensor\n \"\"\"\n tensor = inertia.sphere_inertia(mass=self.volume,\n radius=self.primitive.radius)\n return tensor\n\n def _create_mesh(self):\n log.debug('creating mesh for Sphere primitive')\n unit = creation.icosphere(subdivisions=self.primitive.subdivisions)\n unit.vertices *= self.primitive.radius\n unit.vertices += self.primitive.center\n\n self._cache['vertices'] = unit.vertices\n self._cache['faces'] = unit.faces\n self._cache['face_normals'] = unit.face_normals\n\n\nclass Box(_Primitive):\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Create a Box Primitive, a subclass of Trimesh\n\n Parameters\n ----------\n extents: (3,) float, size of box\n transform: (4,4) float, transformation matrix for box center\n \"\"\"\n super(Box, self).__init__(*args, **kwargs)\n\n defaults = {'transform': np.eye(4),\n 'extents': np.ones(3)}\n self.primitive = _PrimitiveAttributes(self,\n defaults,\n kwargs)\n\n def sample_volume(self, count):\n \"\"\"\n Return random samples from inside the volume of the box.\n\n Parameters\n -------------\n count : int\n Number of samples to return\n\n Returns\n ----------\n samples : (count, 3) float\n Points inside the volume\n \"\"\"\n samples = sample.volume_rectangular(\n extents=self.primitive.extents,\n count=count,\n transform=self.primitive.transform)\n return samples\n\n def sample_grid(self, count=None, step=None):\n \"\"\"\n Return a 3D grid which is contained by the box.\n Samples are either 'step' distance apart, or there are\n 'count' samples per box side.\n\n Parameters\n -----------\n count : int or (3,) int\n If specified samples are spaced with np.linspace\n step : float or (3,) float\n If specified samples are spaced with np.arange\n\n Returns\n -----------\n grid : (n, 3) float\n Points inside the box\n \"\"\"\n\n if (count is not None and\n step is not None):\n raise ValueError('only step OR count can be specified!')\n\n # create pre- transform bounds from extents\n bounds = np.array([-self.primitive.extents,\n self.primitive.extents]) * .5\n\n if step is not None:\n grid = util.grid_arange(bounds, step=step)\n elif count is not None:\n grid = util.grid_linspace(bounds, count=count)\n else:\n raise ValueError('either count or step must be specified!')\n\n transformed = tf.transform_points(\n grid, matrix=self.primitive.transform)\n return transformed\n\n @property\n def is_oriented(self):\n \"\"\"\n Returns whether or not the current box is rotated at all.\n \"\"\"\n if util.is_shape(self.primitive.transform, (4, 4)):\n return not np.allclose(self.primitive.transform[\n 0:3, 0:3], np.eye(3))\n else:\n return False\n\n @caching.cache_decorator\n def volume(self):\n \"\"\"\n Volume of the box Primitive.\n\n Returns\n --------\n volume: float, volume of box\n \"\"\"\n volume = float(np.product(self.primitive.extents))\n return volume\n\n def _create_mesh(self):\n log.debug('creating mesh for Box primitive')\n box = creation.box(extents=self.primitive.extents,\n transform=self.primitive.transform)\n\n self._cache['vertices'] = box.vertices\n self._cache['faces'] = box.faces\n self._cache['face_normals'] = box.face_normals\n\n def as_outline(self):\n \"\"\"\n Return a Path3D containing the outline of the box.\n\n Returns\n -----------\n outline : trimesh.path.Path3D\n Outline of box primitive\n \"\"\"\n # do the import in function to keep soft dependency\n from .path.creation import box_outline\n # return outline with same size as primitive\n return box_outline(\n extents=self.primitive.extents,\n transform=self.primitive.transform)\n\n\nclass Extrusion(_Primitive):\n\n def __init__(self, triangle_args=None, *args, **kwargs):\n \"\"\"\n Create an Extrusion primitive, which\n is a subclass of Trimesh.\n\n Parameters\n ----------\n polygon : shapely.geometry.Polygon\n Polygon to extrude\n transform : (4,4) float\n Transform to apply after extrusion\n height : float\n Height to extrude polygon by\n triangle_args : str\n Arguments to pass to triangle\n \"\"\"\n # do the import here, fail early if Shapely isn't installed\n from shapely.geometry import Point\n super(Extrusion, self).__init__(*args, **kwargs)\n # save arguments for triangulation\n self.triangle_args = triangle_args\n # set default values\n defaults = {'polygon': Point([0, 0]).buffer(1.0),\n 'transform': np.eye(4),\n 'height': 1.0}\n self.primitive = _PrimitiveAttributes(self,\n defaults,\n kwargs)\n\n @caching.cache_decorator\n def area(self):\n \"\"\"\n The surface area of the primitive extrusion.\n\n Calculated from polygon and height to avoid mesh creation.\n\n Returns\n ----------\n area: float\n Surface area of 3D extrusion\n \"\"\"\n # area of the sides of the extrusion\n area = abs(self.primitive.height *\n self.primitive.polygon.length)\n # area of the two caps of the extrusion\n area += self.primitive.polygon.area * 2\n return area\n\n @caching.cache_decorator\n def volume(self):\n \"\"\"\n The volume of the Extrusion primitive.\n Calculated from polygon and height to avoid mesh creation.\n\n Returns\n ----------\n volume : float\n Volume of 3D extrusion\n \"\"\"\n # height may be negative\n volume = abs(self.primitive.polygon.area *\n self.primitive.height)\n return volume\n\n @caching.cache_decorator\n def direction(self):\n \"\"\"\n Based on the extrudes transform what is the\n vector along which the polygon will be extruded.\n\n Returns\n ---------\n direction : (3,) float\n Unit direction vector\n \"\"\"\n # only consider rotation and signed height\n direction = np.dot(\n self.primitive.transform[:3, :3],\n [0.0, 0.0, np.sign(self.primitive.height)])\n return direction\n\n @property\n def origin(self):\n \"\"\"\n Based on the extrude transform what is the\n origin of the plane it is extruded from.\n\n Returns\n -----------\n origin : (3,) float\n Origin of extrusion plane\n \"\"\"\n return self.primitive.transform[:3, 3]\n\n @caching.cache_decorator\n def bounding_box_oriented(self):\n # no docstring for inheritance\n # calculate OBB using 2D polygon and known axis\n from . import bounds\n # find the 2D bounding box using the polygon\n to_origin, box = bounds.oriented_bounds_2D(\n self.primitive.polygon.exterior.coords)\n # 3D extents\n extents = np.append(box, abs(self.primitive.height))\n # calculate to_3D transform from 2D obb\n rotation_Z = np.linalg.inv(tf.planar_matrix_to_3D(to_origin))\n rotation_Z[2, 3] = self.primitive.height / 2.0\n # combine the 2D OBB transformation with the 2D projection transform\n to_3D = np.dot(self.primitive.transform, rotation_Z)\n obb = Box(transform=to_3D,\n extents=extents,\n mutable=False)\n return obb\n\n def slide(self, distance):\n \"\"\"\n Alter the transform of the current extrusion to slide it\n along its extrude_direction vector\n\n Parameters\n -----------\n distance : float\n Distance along self.extrude_direction to move\n \"\"\"\n distance = float(distance)\n translation = np.eye(4)\n translation[2, 3] = distance\n new_transform = np.dot(self.primitive.transform.copy(),\n translation.copy())\n self.primitive.transform = new_transform\n\n def buffer(self, distance, distance_height=None, **kwargs):\n \"\"\"\n Return a new Extrusion object which is expanded in profile\n and in height by a specified distance.\n\n Parameters\n --------------\n distance : float\n Distance to buffer polygon\n distance_height : float\n Distance to buffer above and below extrusion\n kwargs : dict\n Passed to Extrusion constructor\n\n Returns\n ----------\n buffered : primitives.Extrusion\n Extrusion object with new values\n \"\"\"\n distance = float(distance)\n # if not specified use same distance for everything\n if distance_height is None:\n distance_height = distance\n\n # start with current height\n height = self.primitive.height\n # if current height is negative offset by negative amount\n height += np.sign(height) * 2.0 * distance_height\n\n # create a new extrusion with a buffered polygon\n # use type(self) vs Extrusion to handle subclasses\n buffered = type(self)(\n transform=self.primitive.transform.copy(),\n polygon=self.primitive.polygon.buffer(distance),\n height=height,\n **kwargs)\n\n # slide the stock along the axis\n buffered.slide(-np.sign(height) * distance_height)\n\n return buffered\n\n def _create_mesh(self):\n log.debug('creating mesh for Extrusion primitive')\n # extrude the polygon along Z\n mesh = creation.extrude_polygon(\n polygon=self.primitive.polygon,\n height=self.primitive.height,\n transform=self.primitive.transform,\n triangle_args=self.triangle_args)\n\n # check volume here in unit tests\n if tol.strict and mesh.volume < 0.0:\n raise ValueError('matrix inverted mesh!')\n\n # cache mesh geometry in the primitive\n self._cache['vertices'] = mesh.vertices\n self._cache['faces'] = mesh.faces\n"
] | [
[
"numpy.hstack",
"numpy.dot",
"numpy.abs",
"numpy.linalg.inv",
"numpy.eye",
"matplotlib.pyplot.show",
"numpy.setdiff1d",
"numpy.concatenate",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.plot",
"numpy.asanyarray",
"numpy.array",
"numpy.vstack",
"matplotlib.pyplot.figure"
],
[
"numpy.dot",
"numpy.product",
"numpy.eye",
"numpy.ones",
"numpy.sign",
"numpy.append",
"numpy.asanyarray",
"numpy.transpose",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tyIceStream/Kats | [
"abb507615b8ee2470461e7c368226bbb9a634065"
] | [
"kats/detectors/cusum_detection.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# pyre-unsafe\n\n\"\"\"\nCUSUM stands for cumulative sum, it is a changepoint detection algorithm.\n\nIn the Kats implementation, it has two main components:\n\n 1. Locate the change point: The algorithm iteratively estimates the means\n before and after the change point and finds the change point\n maximizing/minimizing the cusum value until the change point has\n converged. The starting point for the change point is at the middle.\n\n 2. Hypothesis testing: Conducting log likelihood ratio test where the null\n hypothesis has no change point with one mean and the alternative\n hypothesis has a change point with two means.\n\nAnd here are a few things worth mentioning:\n\n * We assume there is only one increase/decrease change point;\n * We use Gaussian distribution as the underlying model to calculate the cusum\n value and conduct the hypothesis test;\n\nTypical usage example:\n\n>>> # Univariate CUSUM\n>>> timeseries = TimeSeriesData(...)\n>>> detector = CusumDetector(timeseries)\n>>> #Run detector\n>>> changepoints = detector.detector()\n>>> # Plot the results\n>>> detector.plot(changepoints)\n\nThe usage is the same for multivariate CUSUM except that the time series needs\nto be multivariate and that the plotting functions are not yet supported for\nthis use case.\n\"\"\"\n\nimport logging\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom kats.consts import (\n TimeSeriesChangePoint,\n TimeSeriesData,\n)\nfrom kats.detectors.detector import Detector\nfrom scipy.stats import chi2 # @manual\n\n\npd.options.plotting.matplotlib.register_converters = True\n\n# Constants\nCUSUM_DEFAULT_ARGS = {\n \"threshold\": 0.01,\n \"max_iter\": 10,\n \"delta_std_ratio\": 1.0,\n \"min_abs_change\": 0,\n \"start_point\": None,\n \"change_directions\": None,\n \"interest_window\": None,\n \"magnitude_quantile\": None,\n \"magnitude_ratio\": 1.3,\n \"magnitude_comparable_day\": 0.5,\n \"return_all_changepoints\": False,\n \"remove_seasonality\": False,\n}\n\n\ndef _get_arg(name: str, **kwargs) -> Any:\n return kwargs.get(name, CUSUM_DEFAULT_ARGS[name])\n\n\nclass CUSUMMetadata:\n \"\"\"CUSUM metadata\n\n This is the metadata of the changepoint returned by CusumDetectors\n\n Attributes:\n direction: a str stand for the changepoint change direction 'increase'\n or 'decrease'.\n cp_index: an int for changepoint index.\n mu0: a float indicates the mean before changepoint.\n mu1: a float indicates the mean after changepoint.\n delta: mu1 - mu0.\n llr: log likelihood ratio.\n llr_int: log likelihood ratio in the interest window.\n regression_detected: a bool indicates if regression detected.\n stable_changepoint: a bool indicates if we have a stable changepoint\n when locating the changepoint.\n p_value: p_value of the changepoint.\n p_value_int: p_value of the changepoint in the interest window.\n \"\"\"\n\n def __init__(\n self,\n direction: str,\n cp_index: int,\n mu0: Union[float, np.ndarray],\n mu1: Union[float, np.ndarray],\n delta: Union[float, np.ndarray],\n llr_int: float,\n llr: float,\n regression_detected: bool,\n stable_changepoint: bool,\n p_value: float,\n p_value_int: float,\n ):\n self._direction = direction\n self._cp_index = cp_index\n self._mu0 = mu0\n self._mu1 = mu1\n self._delta = delta\n self._llr_int = llr_int\n self._llr = llr\n self._regression_detected = regression_detected\n self._stable_changepoint = stable_changepoint\n self._p_value = p_value\n self._p_value_int = p_value_int\n\n @property\n def direction(self) -> str:\n return self._direction\n\n @property\n def cp_index(self) -> int:\n return self._cp_index\n\n @property\n def mu0(self) -> Union[float, np.ndarray]:\n return self._mu0\n\n @property\n def mu1(self) -> Union[float, np.ndarray]:\n return self._mu1\n\n @property\n def delta(self) -> Union[float, np.ndarray]:\n return self._delta\n\n @property\n def llr(self) -> float:\n return self._llr\n\n @property\n def llr_int(self) -> float:\n return self._llr_int\n\n @property\n def regression_detected(self) -> bool:\n return self._regression_detected\n\n @property\n def stable_changepoint(self) -> bool:\n return self._stable_changepoint\n\n @property\n def p_value(self) -> float:\n return self._p_value\n\n @property\n def p_value_int(self) -> float:\n return self._p_value_int\n\n def __str__(self) -> str:\n return (\n f\"CUSUMMetadata(direction: {self.direction}, index: {self.cp_index}\"\n f\", delta: {self.delta}, regression_detected: \"\n f\"{self.regression_detected}, stable_changepoint: \"\n f\"{self.stable_changepoint})\"\n )\n\n\nclass CUSUMDetector(Detector):\n interest_window: Optional[Tuple[int, int]] = None\n magnitude_quantile: Optional[float] = None\n magnitude_ratio: Optional[float] = None\n changes_meta: Optional[Dict[str, Dict[str, Any]]] = None\n\n def __init__(self, data: TimeSeriesData, is_multivariate: bool = False) -> None:\n \"\"\"Univariate CUSUM detector for level shifts\n\n Use cusum to detect changes, the algorithm is based on likelihood ratio\n cusum. See https://www.fs.isy.liu.se/Edu/Courses/TSFS06/PDFs/Basseville.pdf\n for details. This detector is used to detect mean changes in Normal\n Distribution.\n\n Args:\n\n data: :class:`kats.consts.TimeSeriesData`; The input time series data.\n is_multivariate: Optional; bool; should be False unless running\n MultiCUSUMDetector,\n \"\"\"\n super(CUSUMDetector, self).__init__(data=data)\n if not self.data.is_univariate() and not is_multivariate:\n msg = (\n \"CUSUMDetector only supports univariate time series, but got \"\n f\"{type(self.data.value)}. For multivariate time series, use \"\n \"MultiCUSUMDetector\"\n )\n logging.error(msg)\n raise ValueError(msg)\n\n def _get_change_point(\n self, ts: np.ndarray, max_iter: int, start_point: int, change_direction: str\n ) -> Dict[str, Any]:\n \"\"\"\n Find change point in the timeseries.\n \"\"\"\n interest_window = self.interest_window\n\n # locate the change point using cusum method\n if change_direction == \"increase\":\n changepoint_func = np.argmin\n logging.debug(\"Detecting increase changepoint.\")\n else:\n assert change_direction == \"decrease\"\n changepoint_func = np.argmax\n logging.debug(\"Detecting decrease changepoint.\")\n n = 0\n # use the middle point as initial change point to estimate mu0 and mu1\n if interest_window is not None:\n ts_int = ts[interest_window[0] : interest_window[1]]\n else:\n ts_int = ts\n\n if start_point is None:\n cusum_ts = np.cumsum(ts_int - np.mean(ts_int))\n changepoint = min(changepoint_func(cusum_ts), len(ts_int) - 2)\n else:\n changepoint = start_point\n\n mu0 = mu1 = None\n # iterate until the changepoint converage\n while n < max_iter:\n n += 1\n mu0 = np.mean(ts_int[: (changepoint + 1)])\n mu1 = np.mean(ts_int[(changepoint + 1) :])\n mean = (mu0 + mu1) / 2\n # here is where cusum is happening\n cusum_ts = np.cumsum(ts_int - mean)\n next_changepoint = max(1, min(changepoint_func(cusum_ts), len(ts_int) - 2))\n if next_changepoint == changepoint:\n break\n changepoint = next_changepoint\n\n if n == max_iter:\n logging.info(\"Max iteration reached and no stable changepoint found.\")\n stable_changepoint = False\n else:\n stable_changepoint = True\n\n # llr in interest window\n if interest_window is None:\n llr_int = np.inf\n pval_int = np.NaN\n delta_int = None\n else:\n llr_int = self._get_llr(\n ts_int,\n {\"mu0\": mu0, \"mu1\": mu1, \"changepoint\": changepoint},\n )\n pval_int = 1 - chi2.cdf(llr_int, 2)\n delta_int = mu1 - mu0\n changepoint += interest_window[0]\n\n # full time changepoint and mean\n mu0 = np.mean(ts[: (changepoint + 1)])\n mu1 = np.mean(ts[(changepoint + 1) :])\n return {\n \"changepoint\": changepoint,\n \"mu0\": mu0,\n \"mu1\": mu1,\n \"changetime\": self.data.time[changepoint],\n \"stable_changepoint\": stable_changepoint,\n \"delta\": mu1 - mu0,\n \"llr_int\": llr_int,\n \"p_value_int\": pval_int,\n \"delta_int\": delta_int,\n }\n\n def _get_llr(self, ts: np.ndarray, change_meta: Dict[str, Any]):\n \"\"\"\n Calculate the log likelihood ratio\n \"\"\"\n mu0: float = change_meta[\"mu0\"]\n mu1: float = change_meta[\"mu1\"]\n changepoint: int = change_meta[\"changepoint\"]\n scale = np.sqrt(\n (\n np.sum((ts[: (changepoint + 1)] - mu0) ** 2)\n + np.sum((ts[(changepoint + 1) :] - mu1) ** 2)\n )\n / (len(ts) - 2)\n )\n mu_tilde, sigma_tilde = np.mean(ts), np.std(ts)\n\n if scale == 0:\n scale = sigma_tilde * 0.01\n\n llr = -2 * (\n self._log_llr(ts[: (changepoint + 1)], mu_tilde, sigma_tilde, mu0, scale)\n + self._log_llr(ts[(changepoint + 1) :], mu_tilde, sigma_tilde, mu1, scale)\n )\n return llr\n\n def _log_llr(\n self, x: np.ndarray, mu0: float, sigma0: float, mu1: float, sigma1: float\n ) -> float:\n \"\"\"Helper function to calculate log likelihood ratio.\n\n This function calculate the log likelihood ratio of two Gaussian\n distribution log(l(0)/l(1)).\n\n Args:\n x: the data value.\n mu0: mean of model 0.\n sigma0: std of model 0.\n mu1: mean of model 1.\n sigma1: std of model 1.\n\n Returns:\n the value of log likelihood ratio.\n \"\"\"\n\n return np.sum(\n np.log(sigma1 / sigma0)\n + 0.5 * (((x - mu1) / sigma1) ** 2 - ((x - mu0) / sigma0) ** 2)\n )\n\n def _magnitude_compare(self, ts: np.ndarray) -> float:\n \"\"\"\n Compare daily magnitude to avoid daily seasonality false positives.\n \"\"\"\n time = self.data.time\n interest_window = self.interest_window\n magnitude_ratio = self.magnitude_ratio\n if interest_window is None:\n raise ValueError(\"detect must be called first\")\n assert magnitude_ratio is not None\n\n # get number of days in historical window\n days = (time.max() - time.min()).days\n\n # get interest window magnitude\n mag_int = self._get_time_series_magnitude(\n ts[interest_window[0] : interest_window[1]]\n )\n\n comparable_mag = 0\n\n for i in range(days):\n start_time = time[interest_window[0]] - pd.Timedelta(f\"{i}D\")\n end_time = time[interest_window[1]] - pd.Timedelta(f\"{i}D\")\n start_idx = time[time == start_time].index[0]\n end_idx = time[time == end_time].index[0]\n\n hist_int = self._get_time_series_magnitude(ts[start_idx:end_idx])\n if mag_int / hist_int >= magnitude_ratio:\n comparable_mag += 1\n\n return comparable_mag / days\n\n def _get_time_series_magnitude(self, ts: np.ndarray) -> float:\n \"\"\"\n Calculate the magnitude of a time series.\n \"\"\"\n magnitude = np.quantile(ts, self.magnitude_quantile, interpolation=\"nearest\")\n return magnitude\n\n # pyre-fixme[14]: `detector` overrides method defined in `Detector` inconsistently.\n def detector(self, **kwargs) -> List[Tuple[TimeSeriesChangePoint, CUSUMMetadata]]:\n \"\"\"\n Find the change point and calculate related statistics.\n\n Args:\n\n threshold: Optional; float; significance level, default: 0.01.\n max_iter: Optional; int, maximum iteration in finding the\n changepoint.\n delta_std_ratio: Optional; float; the mean delta have to larger than\n this parameter times std of the data to be consider as a change.\n min_abs_change: Optional; int; minimal absolute delta between mu0\n and mu1.\n start_point: Optional; int; the start idx of the changepoint, if\n None means the middle of the time series.\n change_directions: Optional; list<str>; a list contain either or\n both 'increase' and 'decrease' to specify what type of change\n want to detect.\n interest_window: Optional; list<int, int>, a list containing the\n start and end of interest windows where we will look for change\n points. Note that llr will still be calculated using all data\n points.\n magnitude_quantile: Optional; float; the quantile for magnitude\n comparison, if none, will skip the magnitude comparison.\n magnitude_ratio: Optional; float; comparable ratio.\n magnitude_comparable_day: Optional; float; maximal percentage of\n days can have comparable magnitude to be considered as\n regression.\n return_all_changepoints: Optional; bool; return all the changepoints\n found, even the insignificant ones.\n\n Returns:\n A list of tuple of TimeSeriesChangePoint and CUSUMMetadata.\n \"\"\"\n # Extract all arg values or assign defaults from default vals constant\n threshold = _get_arg(\"threshold\", **kwargs)\n max_iter = _get_arg(\"max_iter\", **kwargs)\n delta_std_ratio = _get_arg(\"delta_std_ratio\", **kwargs)\n min_abs_change = _get_arg(\"min_abs_change\", **kwargs)\n start_point = _get_arg(\"start_point\", **kwargs)\n change_directions = _get_arg(\"change_directions\", **kwargs)\n interest_window = _get_arg(\"interest_window\", **kwargs)\n magnitude_quantile = _get_arg(\"magnitude_quantile\", **kwargs)\n magnitude_ratio = _get_arg(\"magnitude_ratio\", **kwargs)\n magnitude_comparable_day = _get_arg(\"magnitude_comparable_day\", **kwargs)\n return_all_changepoints = _get_arg(\"return_all_changepoints\", **kwargs)\n\n self.interest_window = interest_window\n self.magnitude_quantile = magnitude_quantile\n self.magnitude_ratio = magnitude_ratio\n\n # Use array to store the data\n ts = self.data.value.to_numpy()\n ts = ts.astype(\"float64\")\n changes_meta = {}\n\n if change_directions is None:\n change_directions = [\"increase\", \"decrease\"]\n\n for change_direction in change_directions:\n if change_direction not in {\"increase\", \"decrease\"}:\n raise ValueError(\n \"Change direction must be 'increase' or 'decrease.' \"\n f\"Got {change_direction}\"\n )\n\n change_meta = self._get_change_point(\n ts,\n max_iter=max_iter,\n start_point=start_point,\n change_direction=change_direction,\n )\n change_meta[\"llr\"] = self._get_llr(ts, change_meta)\n change_meta[\"p_value\"] = 1 - chi2.cdf(change_meta[\"llr\"], 2)\n\n # compare magnitude on interest_window and historical_window\n if np.min(ts) >= 0:\n if magnitude_quantile and interest_window:\n change_ts = ts if change_direction == \"increase\" else -ts\n mag_change = (\n self._magnitude_compare(change_ts) >= magnitude_comparable_day\n )\n else:\n mag_change = True\n else:\n mag_change = True\n if magnitude_quantile:\n logging.warning(\n (\n \"The minimal value is less than 0. Cannot perform \"\n \"magnitude comparison.\"\n )\n )\n\n if_significant = change_meta[\"llr\"] > chi2.ppf(1 - threshold, 2)\n if_significant_int = change_meta[\"llr_int\"] > chi2.ppf(1 - threshold, 2)\n if change_direction == \"increase\":\n larger_than_min_abs_change = (\n change_meta[\"mu0\"] + min_abs_change < change_meta[\"mu1\"]\n )\n else:\n larger_than_min_abs_change = (\n change_meta[\"mu0\"] > change_meta[\"mu1\"] + min_abs_change\n )\n larger_than_std = (\n np.abs(change_meta[\"delta\"])\n > np.std(ts[: change_meta[\"changepoint\"]]) * delta_std_ratio\n )\n\n change_meta[\"regression_detected\"] = (\n if_significant\n and if_significant_int\n and larger_than_min_abs_change\n and larger_than_std\n and mag_change\n )\n changes_meta[change_direction] = change_meta\n\n self.changes_meta = changes_meta\n\n return self._convert_cusum_changepoints(changes_meta, return_all_changepoints)\n\n def _convert_cusum_changepoints(\n self,\n cusum_changepoints: Dict[str, Dict[str, Any]],\n return_all_changepoints: bool,\n ) -> List[Tuple[TimeSeriesChangePoint, CUSUMMetadata]]:\n \"\"\"\n Convert the output from the other kats cusum algorithm into\n TimeSeriesChangePoint type.\n \"\"\"\n converted = []\n detected_cps = cusum_changepoints\n\n for direction in detected_cps:\n dir_cps = detected_cps[direction]\n if dir_cps[\"regression_detected\"] or return_all_changepoints:\n # we have a change point\n change_point = TimeSeriesChangePoint(\n start_time=dir_cps[\"changetime\"],\n end_time=dir_cps[\"changetime\"],\n confidence=1 - dir_cps[\"p_value\"],\n )\n metadata = CUSUMMetadata(\n direction=direction,\n cp_index=dir_cps[\"changepoint\"],\n mu0=dir_cps[\"mu0\"],\n mu1=dir_cps[\"mu1\"],\n delta=dir_cps[\"delta\"],\n llr_int=dir_cps[\"llr_int\"],\n llr=dir_cps[\"llr\"],\n regression_detected=dir_cps[\"regression_detected\"],\n stable_changepoint=dir_cps[\"stable_changepoint\"],\n p_value=dir_cps[\"p_value\"],\n p_value_int=dir_cps[\"p_value_int\"],\n )\n converted.append((change_point, metadata))\n\n return converted\n\n def plot(\n self, change_points: List[Tuple[TimeSeriesChangePoint, CUSUMMetadata]]\n ) -> None:\n \"\"\"Plot detection results from CUSUM.\n\n Args:\n change_points: A list of tuple of TimeSeriesChangePoint and\n CUSUMMetadata.\n \"\"\"\n time_col_name = self.data.time.name\n val_col_name = self.data.value.name\n\n data_df = self.data.to_dataframe()\n\n plt.plot(data_df[time_col_name].to_numpy(), data_df[val_col_name].to_numpy())\n\n if len(change_points) == 0:\n logging.warning(\"No change points detected!\")\n\n for change in change_points:\n if change[1].regression_detected:\n # pyre-fixme[6]: Expected `int` for 1st param but got `Timestamp`.\n plt.axvline(x=change[0].start_time, color=\"red\")\n\n interest_window = self.interest_window\n if interest_window is not None:\n plt.axvspan(\n pd.to_datetime(self.data.time)[interest_window[0]],\n pd.to_datetime(self.data.time)[interest_window[1] - 1],\n alpha=0.3,\n label=\"interets_window\",\n )\n\n plt.show()\n\n\nclass MultiCUSUMDetector(CUSUMDetector):\n \"\"\"\n MultiCUSUM is similar to univariate CUSUM, but we use MultiCUSUM to find a\n changepoint in multivariate time series. The detector is used to detect\n changepoints in the multivariate mean of the time series. The cusum values\n and likelihood ratio test calculations assume the underlying distribution\n has a Multivariate Guassian distriubtion.\n\n Attributes:\n data: The input time series data from TimeSeriesData\n \"\"\"\n\n def __init__(self, data: TimeSeriesData) -> None:\n super(MultiCUSUMDetector, self).__init__(data=data, is_multivariate=True)\n\n def detector(self, **kwargs) -> List[Tuple[TimeSeriesChangePoint, CUSUMMetadata]]:\n \"\"\"\n Overwrite the detector method for MultiCUSUMDetector.\n\n Args:\n threshold: Optional; float; significance level, default: 0.01.\n max_iter: Optional; int, maximum iteration in finding the\n changepoint.\n start_point: Optional; int; the start idx of the changepoint, if\n None means the middle of the time series.\n \"\"\"\n\n # Extract all arg values or assign defaults from default vals constant\n threshold = _get_arg(\"threshold\", **kwargs)\n max_iter = _get_arg(\"max_iter\", **kwargs)\n start_point = _get_arg(\"start_point\", **kwargs)\n\n # TODO: Add support for interest windows\n\n return_all_changepoints = _get_arg(\"return_all_changepoints\", **kwargs)\n\n # Use array to store the data\n ts = self.data.value.to_numpy()\n ts = ts.astype(\"float64\")\n changes_meta = {}\n\n # We will always be looking for increases in the CUSUM values for\n # multivariate detection. We keep using change_direction = \"increase\"\n # here to have consistent CUSUMMetadata with the univariate detector.\n for change_direction in [\"increase\"]:\n\n change_meta = self._get_change_point(\n ts,\n max_iter=max_iter,\n start_point=start_point,\n )\n change_meta[\"llr\"] = self._get_llr(ts, change_meta)\n change_meta[\"p_value\"] = 1 - chi2.cdf(change_meta[\"llr\"], ts.shape[1] + 1)\n\n if_significant = change_meta[\"llr\"] > chi2.ppf(\n 1 - threshold, ts.shape[1] + 1\n )\n\n change_meta[\"regression_detected\"] = if_significant\n changes_meta[change_direction] = change_meta\n\n self.changes_meta = changes_meta\n\n return self._convert_cusum_changepoints(changes_meta, return_all_changepoints)\n\n def _get_llr(self, ts: np.ndarray, change_meta: Dict[str, Any]):\n mu0: float = change_meta[\"mu0\"]\n mu1: float = change_meta[\"mu1\"]\n sigma0: float = change_meta[\"sigma0\"]\n sigma1: float = change_meta[\"sigma1\"]\n changepoint: int = change_meta[\"changepoint\"]\n\n mu_tilde = np.mean(ts, axis=0)\n sigma_pooled = np.cov(ts, rowvar=False)\n llr = -2 * (\n self._log_llr_multi(\n ts[: (changepoint + 1)],\n mu_tilde,\n sigma_pooled,\n mu0,\n sigma0,\n )\n - self._log_llr_multi(\n ts[(changepoint + 1) :],\n mu_tilde,\n sigma_pooled,\n mu1,\n sigma1,\n )\n )\n return llr\n\n def _log_llr_multi(\n self,\n x: np.ndarray,\n mu0: Union[float, np.ndarray],\n sigma0: Union[float, np.ndarray],\n mu1: Union[float, np.ndarray],\n sigma1: Union[float, np.ndarray],\n ):\n try:\n sigma0_inverse = np.linalg.inv(sigma0)\n sigma1_inverse = np.linalg.inv(sigma1)\n log_det_sigma0 = np.log(np.linalg.det(sigma0))\n log_det_sigma1 = np.log(np.linalg.det(sigma1))\n except np.linalg.linalg.LinAlgError:\n msg = \"One or more covariance matrix is singular.\"\n logging.error(msg)\n raise ValueError(msg)\n\n return len(x) / 2 * (log_det_sigma0 - log_det_sigma1) + np.sum(\n -np.matmul(np.matmul(x[i] - mu1, sigma1_inverse), (x[i] - mu1).T)\n + np.matmul(np.matmul(x[i] - mu0, sigma0_inverse), (x[i] - mu0).T)\n for i in range(len(x))\n )\n\n def _get_change_point(\n self,\n ts: np.ndarray,\n max_iter: int,\n start_point: int,\n change_direction: str = \"increase\",\n ) -> Dict[str, Any]:\n\n # locate the change point using cusum method\n changepoint_func = np.argmin\n n = 0\n ts_int = ts\n\n if start_point is None:\n start_point = len(ts_int) // 2\n changepoint = start_point\n\n # iterate until the changepoint converage\n while n < max_iter:\n n += 1\n data_before_changepoint = ts_int[: (changepoint + 1)]\n data_after_changepoint = ts_int[(changepoint + 1) :]\n\n mu0 = np.mean(data_before_changepoint, axis=0)\n mu1 = np.mean(data_after_changepoint, axis=0)\n\n # TODO: replace pooled variance with sample variances before and\n # after changepoint.\n # sigma0 = np.cov(data_before_changepoint, rowvar=False)\n # sigma1 = np.cov(data_after_changepoint, rowvar=False)\n sigma0 = sigma1 = np.cov(ts_int, rowvar=False)\n\n try:\n log_det_sigma0 = np.log(np.linalg.det(sigma0))\n log_det_sigma1 = np.log(np.linalg.det(sigma1))\n sigma0_inverse = np.linalg.inv(sigma0)\n sigma1_inverse = np.linalg.inv(sigma1)\n except np.linalg.linalg.LinAlgError:\n msg = \"One or more covariance matrix is singular.\"\n logging.error(msg)\n raise ValueError(msg)\n\n si_values = np.diag(\n -(1 / 2) * log_det_sigma1\n - np.matmul(np.matmul(ts_int - mu1, sigma1_inverse), (ts_int - mu1).T)\n + (1 / 2) * log_det_sigma0\n + np.matmul(np.matmul(ts_int - mu0, sigma0_inverse), (ts_int - mu0).T)\n )\n\n cusum_ts = np.cumsum(si_values)\n next_changepoint = max(\n 1, min(changepoint_func(cusum_ts), len(cusum_ts) - 2)\n )\n\n if next_changepoint == changepoint:\n break\n else:\n changepoint = next_changepoint\n\n if n == max_iter:\n logging.info(\"Max iteration reached and no stable changepoint found.\")\n stable_changepoint = False\n else:\n stable_changepoint = True\n\n llr_int = np.inf\n pval_int = np.NaN\n delta_int = None\n\n # full time changepoint and mean\n mu0 = np.mean(ts[: (changepoint + 1)], axis=0)\n mu1 = np.mean(ts[(changepoint + 1) :], axis=0)\n sigma0 = sigma1 = np.cov(ts, rowvar=False)\n\n return {\n \"changepoint\": changepoint,\n \"mu0\": mu0,\n \"mu1\": mu1,\n \"sigma0\": sigma0,\n \"sigma1\": sigma1,\n \"changetime\": self.data.time[changepoint],\n \"stable_changepoint\": stable_changepoint,\n \"delta\": mu1 - mu0,\n \"llr_int\": llr_int,\n \"p_value_int\": pval_int,\n \"delta_int\": delta_int,\n }\n"
] | [
[
"scipy.stats.chi2.ppf",
"numpy.log",
"matplotlib.pyplot.axvline",
"pandas.to_datetime",
"numpy.abs",
"numpy.min",
"numpy.linalg.inv",
"numpy.quantile",
"numpy.cumsum",
"pandas.Timedelta",
"numpy.matmul",
"numpy.linalg.det",
"numpy.std",
"numpy.cov",
"numpy.mean",
"scipy.stats.chi2.cdf",
"matplotlib.pyplot.show",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WAzaizeh/Halal_o_Meter | [
"4e452a135aef1de72a17b8c18910d6e0bc0fc4e9"
] | [
"src/features/target_feature/zabiha_list_DEPRECATED.py"
] | [
"''' A script to scrape a list of 744 confirmed Halal restaurants in\n NYC area from Zabiha.com\n As well as, requesting 338 halal tagged restaurants in NYC from Zomato.com\n'''\n\nimport review_scraper\nimport pandas as pd\nimport os, requests, json\nfrom dotenv import load_dotenv\n\n\ndef _zabiha_to_csv(url_dict):\n\n webdriver = review_scraper._get_webdriver()\n\n res_names_xpath = '//div[@class=\"titleBS\"]'\n res_address_xpath = '//div[@class=\"titleBS\"]/../div[@class=\"tinyLink\"]'\n df = pd.DataFrame(columns=['name', 'address', 'borough'])\n\n for key in url_dict:\n print('scraping {} results from Zabiha.com'.format(key))\n webdriver.get(url_dict[key])\n names = webdriver.find_elements_by_xpath(res_names_xpath)\n addresses = webdriver.find_elements_by_xpath(res_address_xpath)\n for name, address in zip(names, addresses):\n row = {'name' : name.text,\n 'address' : address.text,\n 'borough' : key,\n 'source' : 'Zabiha'}\n df = df.append(row, ignore_index=True)\n review_scraper._close_webdriver(webdriver)\n df.to_csv('/Users/wesamazaizeh/Desktop/Projects/halal_o_meter/src/data/data_collection/target_list.csv', mode='a', index=False)\n print('\\n{} rows added from Zabiha\\n'.format(df.shape[0]))\n\ndef _zomato_to_csv(city_id):\n load_dotenv()\n API_KEY = os.getenv('ZOMATO_API_KEY')\n offset = 0\n url = 'https://developers.zomato.com/api/v2.1/search?entity_id='\\\n + str(city_id) + '&entity_type=city&q=halal&start=' + str(offset)\n headers = {'user-key': '488f11265c3bf28f5d563dfd98697ad2'}\n r = requests.request(\"GET\", url, headers=headers)\n response = r.text\n json_obj = json.loads(response)\n\n # get total number of results\n offset_max = json_obj['results_found']\n print('Found {} results in Zomato.com'.format(offset_max))\n\n df = pd.DataFrame(columns=['name', 'address', 'borough'])\n while offset < offset_max:\n # request next page\n r = requests.request(\"GET\", url, headers=headers)\n response = r.text\n json_obj = json.loads(response)\n # get info and append to dataframe\n for restaurant in json_obj['restaurants']:\n restaurant = restaurant['restaurant']\n row = {'name' : restaurant['name'],\n 'address' : restaurant['location']['address'],\n 'borough' : restaurant['location']['city'],\n 'source' : 'Zomato'}\n df = df.append(row, ignore_index=True)\n # advance offset\n print('Progress: {0}/{1}'.format(offset+20, offset_max), end='\\r', flush=True)\n offset += 20\n df.to_csv('/Users/wesamazaizeh/Desktop/Projects/halal_o_meter/src/data/data_collection/target_list.csv', mode='a', index=False)\n print('\\n{} rows added from Zomato\\n'.format(df.shape[0]))\n\n\nif __name__ == \"__main__\":\n borough_urls = {'Manhattan' : 'https://www.zabihah.com/sub/United-States/New-York/New-York-City/Manhattan/NEwhtS6OzN',\n 'Brooklyn' : 'https://www.zabihah.com/sub/United-States/New-York/New-York-City/Brooklyn/3avrh3Cth4',\n 'Queens' : 'https://www.zabihah.com/sub/United-States/New-York/New-York-City/Queens/9Gku594eh7',\n 'The Bronx' : 'https://www.zabihah.com/sub/United-States/New-York/New-York-City/The-Bronx/eIqsntUUuI',\n 'Staten Island' : 'https://www.zabihah.com/sub/United-States/New-York/New-York-City/Staten-Island/84zPaAaBZd'}\n _zabiha_to_csv(borough_urls)\n _zomato_to_csv(280) # city_id for NYC from Zomato cities API\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mpmkp2021/pandas | [
"7c40d2c85df03a66268987d14d76ec1421429dde"
] | [
"pandas/core/groupby/ops.py"
] | [
"\"\"\"\nProvide classes to perform the groupby aggregate operations.\n\nThese are not exposed to the user and provide implementations of the grouping\noperations, primarily in cython. These classes (BaseGrouper and BinGrouper)\nare contained *in* the SeriesGroupBy and DataFrameGroupBy objects.\n\"\"\"\n\nimport collections\nfrom typing import List, Optional, Sequence, Tuple, Type\n\nimport numpy as np\n\nfrom pandas._libs import NaT, iNaT, lib\nimport pandas._libs.groupby as libgroupby\nimport pandas._libs.reduction as libreduction\nfrom pandas._typing import F, FrameOrSeries, Label\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import cache_readonly\n\nfrom pandas.core.dtypes.common import (\n ensure_float64,\n ensure_int64,\n ensure_int_or_float,\n ensure_platform_int,\n is_bool_dtype,\n is_categorical_dtype,\n is_complex_dtype,\n is_datetime64_any_dtype,\n is_datetime64tz_dtype,\n is_extension_array_dtype,\n is_integer_dtype,\n is_numeric_dtype,\n is_period_dtype,\n is_sparse,\n is_timedelta64_dtype,\n needs_i8_conversion,\n)\nfrom pandas.core.dtypes.missing import _maybe_fill, isna\n\nimport pandas.core.algorithms as algorithms\nfrom pandas.core.base import SelectionMixin\nimport pandas.core.common as com\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.generic import NDFrame\nfrom pandas.core.groupby import base, grouper\nfrom pandas.core.indexes.api import Index, MultiIndex, ensure_index\nfrom pandas.core.series import Series\nfrom pandas.core.sorting import (\n compress_group_index,\n decons_obs_group_ids,\n get_flattened_iterator,\n get_group_index,\n get_group_index_sorter,\n get_indexer_dict,\n)\nfrom pandas.core.util.numba_ import (\n NUMBA_FUNC_CACHE,\n check_kwargs_and_nopython,\n get_jit_arguments,\n jit_user_function,\n split_for_numba,\n validate_udf,\n)\n\n\nclass BaseGrouper:\n \"\"\"\n This is an internal Grouper class, which actually holds\n the generated groups\n\n Parameters\n ----------\n axis : Index\n groupings : Sequence[Grouping]\n all the grouping instances to handle in this grouper\n for example for grouper list to groupby, need to pass the list\n sort : bool, default True\n whether this grouper will give sorted result or not\n group_keys : bool, default True\n mutated : bool, default False\n indexer : intp array, optional\n the indexer created by Grouper\n some groupers (TimeGrouper) will sort its axis and its\n group_info is also sorted, so need the indexer to reorder\n\n \"\"\"\n\n def __init__(\n self,\n axis: Index,\n groupings: \"Sequence[grouper.Grouping]\",\n sort: bool = True,\n group_keys: bool = True,\n mutated: bool = False,\n indexer: Optional[np.ndarray] = None,\n ):\n assert isinstance(axis, Index), axis\n\n self._filter_empty_groups = self.compressed = len(groupings) != 1\n self.axis = axis\n self._groupings: List[grouper.Grouping] = list(groupings)\n self.sort = sort\n self.group_keys = group_keys\n self.mutated = mutated\n self.indexer = indexer\n\n @property\n def groupings(self) -> List[\"grouper.Grouping\"]:\n return self._groupings\n\n @property\n def shape(self) -> Tuple[int, ...]:\n return tuple(ping.ngroups for ping in self.groupings)\n\n def __iter__(self):\n return iter(self.indices)\n\n @property\n def nkeys(self) -> int:\n return len(self.groupings)\n\n def get_iterator(self, data: FrameOrSeries, axis: int = 0):\n \"\"\"\n Groupby iterator\n\n Returns\n -------\n Generator yielding sequence of (name, subsetted object)\n for each group\n \"\"\"\n splitter = self._get_splitter(data, axis=axis)\n keys = self._get_group_keys()\n for key, (i, group) in zip(keys, splitter):\n yield key, group\n\n def _get_splitter(self, data: FrameOrSeries, axis: int = 0) -> \"DataSplitter\":\n comp_ids, _, ngroups = self.group_info\n return get_splitter(data, comp_ids, ngroups, axis=axis)\n\n def _get_grouper(self):\n \"\"\"\n We are a grouper as part of another's groupings.\n\n We have a specific method of grouping, so cannot\n convert to a Index for our grouper.\n \"\"\"\n return self.groupings[0].grouper\n\n def _get_group_keys(self):\n if len(self.groupings) == 1:\n return self.levels[0]\n else:\n comp_ids, _, ngroups = self.group_info\n\n # provide \"flattened\" iterator for multi-group setting\n return get_flattened_iterator(comp_ids, ngroups, self.levels, self.codes)\n\n def apply(self, f: F, data: FrameOrSeries, axis: int = 0):\n mutated = self.mutated\n splitter = self._get_splitter(data, axis=axis)\n group_keys = self._get_group_keys()\n result_values = None\n\n sdata: FrameOrSeries = splitter._get_sorted_data()\n if sdata.ndim == 2 and np.any(sdata.dtypes.apply(is_extension_array_dtype)):\n # calling splitter.fast_apply will raise TypeError via apply_frame_axis0\n # if we pass EA instead of ndarray\n # TODO: can we have a workaround for EAs backed by ndarray?\n pass\n\n elif (\n com.get_callable_name(f) not in base.plotting_methods\n and isinstance(splitter, FrameSplitter)\n and axis == 0\n # fast_apply/libreduction doesn't allow non-numpy backed indexes\n and not sdata.index._has_complex_internals\n ):\n try:\n result_values, mutated = splitter.fast_apply(f, sdata, group_keys)\n\n except libreduction.InvalidApply as err:\n # This Exception is raised if `f` triggers an exception\n # but it is preferable to raise the exception in Python.\n if \"Let this error raise above us\" not in str(err):\n # TODO: can we infer anything about whether this is\n # worth-retrying in pure-python?\n raise\n\n else:\n # If the fast apply path could be used we can return here.\n # Otherwise we need to fall back to the slow implementation.\n if len(result_values) == len(group_keys):\n return group_keys, result_values, mutated\n\n for key, (i, group) in zip(group_keys, splitter):\n object.__setattr__(group, \"name\", key)\n\n # result_values is None if fast apply path wasn't taken\n # or fast apply aborted with an unexpected exception.\n # In either case, initialize the result list and perform\n # the slow iteration.\n if result_values is None:\n result_values = []\n\n # If result_values is not None we're in the case that the\n # fast apply loop was broken prematurely but we have\n # already the result for the first group which we can reuse.\n elif i == 0:\n continue\n\n # group might be modified\n group_axes = group.axes\n res = f(group)\n if not _is_indexed_like(res, group_axes):\n mutated = True\n result_values.append(res)\n\n return group_keys, result_values, mutated\n\n @cache_readonly\n def indices(self):\n \"\"\" dict {group name -> group indices} \"\"\"\n if len(self.groupings) == 1:\n return self.groupings[0].indices\n else:\n codes_list = [ping.codes for ping in self.groupings]\n keys = [ping.group_index for ping in self.groupings]\n return get_indexer_dict(codes_list, keys)\n\n @property\n def codes(self) -> List[np.ndarray]:\n return [ping.codes for ping in self.groupings]\n\n @property\n def levels(self) -> List[Index]:\n return [ping.group_index for ping in self.groupings]\n\n @property\n def names(self) -> List[Label]:\n return [ping.name for ping in self.groupings]\n\n def size(self) -> Series:\n \"\"\"\n Compute group sizes.\n \"\"\"\n ids, _, ngroup = self.group_info\n ids = ensure_platform_int(ids)\n if ngroup:\n out = np.bincount(ids[ids != -1], minlength=ngroup)\n else:\n out = []\n return Series(out, index=self.result_index, dtype=\"int64\")\n\n @cache_readonly\n def groups(self):\n \"\"\" dict {group name -> group labels} \"\"\"\n if len(self.groupings) == 1:\n return self.groupings[0].groups\n else:\n to_groupby = zip(*(ping.grouper for ping in self.groupings))\n to_groupby = Index(to_groupby)\n return self.axis.groupby(to_groupby)\n\n @cache_readonly\n def is_monotonic(self) -> bool:\n # return if my group orderings are monotonic\n return Index(self.group_info[0]).is_monotonic\n\n @cache_readonly\n def group_info(self):\n comp_ids, obs_group_ids = self._get_compressed_codes()\n\n ngroups = len(obs_group_ids)\n comp_ids = ensure_int64(comp_ids)\n return comp_ids, obs_group_ids, ngroups\n\n @cache_readonly\n def codes_info(self) -> np.ndarray:\n # return the codes of items in original grouped axis\n codes, _, _ = self.group_info\n if self.indexer is not None:\n sorter = np.lexsort((codes, self.indexer))\n codes = codes[sorter]\n return codes\n\n def _get_compressed_codes(self) -> Tuple[np.ndarray, np.ndarray]:\n all_codes = self.codes\n if len(all_codes) > 1:\n group_index = get_group_index(all_codes, self.shape, sort=True, xnull=True)\n return compress_group_index(group_index, sort=self.sort)\n\n ping = self.groupings[0]\n return ping.codes, np.arange(len(ping.group_index))\n\n @cache_readonly\n def ngroups(self) -> int:\n return len(self.result_index)\n\n @property\n def reconstructed_codes(self) -> List[np.ndarray]:\n codes = self.codes\n comp_ids, obs_ids, _ = self.group_info\n return decons_obs_group_ids(comp_ids, obs_ids, self.shape, codes, xnull=True)\n\n @cache_readonly\n def result_index(self) -> Index:\n if not self.compressed and len(self.groupings) == 1:\n return self.groupings[0].result_index.rename(self.names[0])\n\n codes = self.reconstructed_codes\n levels = [ping.result_index for ping in self.groupings]\n result = MultiIndex(\n levels=levels, codes=codes, verify_integrity=False, names=self.names\n )\n return result\n\n def get_group_levels(self) -> List[Index]:\n if not self.compressed and len(self.groupings) == 1:\n return [self.groupings[0].result_index]\n\n name_list = []\n for ping, codes in zip(self.groupings, self.reconstructed_codes):\n codes = ensure_platform_int(codes)\n levels = ping.result_index.take(codes)\n\n name_list.append(levels)\n\n return name_list\n\n # ------------------------------------------------------------\n # Aggregation functions\n\n _cython_functions = {\n \"aggregate\": {\n \"add\": \"group_add\",\n \"prod\": \"group_prod\",\n \"min\": \"group_min\",\n \"max\": \"group_max\",\n \"mean\": \"group_mean\",\n \"median\": \"group_median\",\n \"var\": \"group_var\",\n \"first\": \"group_nth\",\n \"last\": \"group_last\",\n \"ohlc\": \"group_ohlc\",\n },\n \"transform\": {\n \"cumprod\": \"group_cumprod\",\n \"cumsum\": \"group_cumsum\",\n \"cummin\": \"group_cummin\",\n \"cummax\": \"group_cummax\",\n \"rank\": \"group_rank\",\n },\n }\n\n _cython_arity = {\"ohlc\": 4} # OHLC\n\n _name_functions = {\"ohlc\": [\"open\", \"high\", \"low\", \"close\"]}\n\n def _is_builtin_func(self, arg):\n \"\"\"\n if we define a builtin function for this argument, return it,\n otherwise return the arg\n \"\"\"\n return SelectionMixin._builtin_table.get(arg, arg)\n\n def _get_cython_function(\n self, kind: str, how: str, values: np.ndarray, is_numeric: bool\n ):\n\n dtype_str = values.dtype.name\n ftype = self._cython_functions[kind][how]\n\n # see if there is a fused-type version of function\n # only valid for numeric\n f = getattr(libgroupby, ftype, None)\n if f is not None and is_numeric:\n return f\n\n # otherwise find dtype-specific version, falling back to object\n for dt in [dtype_str, \"object\"]:\n f2 = getattr(libgroupby, f\"{ftype}_{dt}\", None)\n if f2 is not None:\n return f2\n\n if hasattr(f, \"__signatures__\"):\n # inspect what fused types are implemented\n if dtype_str == \"object\" and \"object\" not in f.__signatures__:\n # disallow this function so we get a NotImplementedError below\n # instead of a TypeError at runtime\n f = None\n\n func = f\n\n if func is None:\n raise NotImplementedError(\n f\"function is not implemented for this dtype: \"\n f\"[how->{how},dtype->{dtype_str}]\"\n )\n\n return func\n\n def _get_cython_func_and_vals(\n self, kind: str, how: str, values: np.ndarray, is_numeric: bool\n ):\n \"\"\"\n Find the appropriate cython function, casting if necessary.\n\n Parameters\n ----------\n kind : sttr\n how : srt\n values : np.ndarray\n is_numeric : bool\n\n Returns\n -------\n func : callable\n values : np.ndarray\n \"\"\"\n try:\n func = self._get_cython_function(kind, how, values, is_numeric)\n except NotImplementedError:\n if is_numeric:\n try:\n values = ensure_float64(values)\n except TypeError:\n if lib.infer_dtype(values, skipna=False) == \"complex\":\n values = values.astype(complex)\n else:\n raise\n func = self._get_cython_function(kind, how, values, is_numeric)\n else:\n raise\n return func, values\n\n def _cython_operation(\n self, kind: str, values, how: str, axis: int, min_count: int = -1, **kwargs\n ) -> Tuple[np.ndarray, Optional[List[str]]]:\n \"\"\"\n Returns the values of a cython operation as a Tuple of [data, names].\n\n Names is only useful when dealing with 2D results, like ohlc\n (see self._name_functions).\n \"\"\"\n assert kind in [\"transform\", \"aggregate\"]\n orig_values = values\n\n if values.ndim > 2:\n raise NotImplementedError(\"number of dimensions is currently limited to 2\")\n elif values.ndim == 2:\n # Note: it is *not* the case that axis is always 0 for 1-dim values,\n # as we can have 1D ExtensionArrays that we need to treat as 2D\n assert axis == 1, axis\n\n # can we do this operation with our cython functions\n # if not raise NotImplementedError\n\n # we raise NotImplemented if this is an invalid operation\n # entirely, e.g. adding datetimes\n\n # categoricals are only 1d, so we\n # are not setup for dim transforming\n if is_categorical_dtype(values) or is_sparse(values):\n raise NotImplementedError(f\"{values.dtype} dtype not supported\")\n elif is_datetime64_any_dtype(values):\n if how in [\"add\", \"prod\", \"cumsum\", \"cumprod\"]:\n raise NotImplementedError(\n f\"datetime64 type does not support {how} operations\"\n )\n elif is_timedelta64_dtype(values):\n if how in [\"prod\", \"cumprod\"]:\n raise NotImplementedError(\n f\"timedelta64 type does not support {how} operations\"\n )\n\n if is_datetime64tz_dtype(values.dtype):\n # Cast to naive; we'll cast back at the end of the function\n # TODO: possible need to reshape?\n # TODO(EA2D):kludge can be avoided when 2D EA is allowed.\n values = values.view(\"M8[ns]\")\n\n is_datetimelike = needs_i8_conversion(values.dtype)\n is_numeric = is_numeric_dtype(values.dtype)\n\n if is_datetimelike:\n values = values.view(\"int64\")\n is_numeric = True\n elif is_bool_dtype(values.dtype):\n values = ensure_float64(values)\n elif is_integer_dtype(values):\n # we use iNaT for the missing value on ints\n # so pre-convert to guard this condition\n if (values == iNaT).any():\n values = ensure_float64(values)\n else:\n values = ensure_int_or_float(values)\n elif is_numeric and not is_complex_dtype(values):\n values = ensure_float64(values)\n else:\n values = values.astype(object)\n\n arity = self._cython_arity.get(how, 1)\n\n vdim = values.ndim\n swapped = False\n if vdim == 1:\n values = values[:, None]\n out_shape = (self.ngroups, arity)\n else:\n if axis > 0:\n swapped = True\n assert axis == 1, axis\n values = values.T\n if arity > 1:\n raise NotImplementedError(\n \"arity of more than 1 is not supported for the 'how' argument\"\n )\n out_shape = (self.ngroups,) + values.shape[1:]\n\n func, values = self._get_cython_func_and_vals(kind, how, values, is_numeric)\n\n if how == \"rank\":\n out_dtype = \"float\"\n else:\n if is_numeric:\n out_dtype = f\"{values.dtype.kind}{values.dtype.itemsize}\"\n else:\n out_dtype = \"object\"\n\n codes, _, _ = self.group_info\n\n if kind == \"aggregate\":\n result = _maybe_fill(\n np.empty(out_shape, dtype=out_dtype), fill_value=np.nan\n )\n counts = np.zeros(self.ngroups, dtype=np.int64)\n result = self._aggregate(result, counts, values, codes, func, min_count)\n elif kind == \"transform\":\n result = _maybe_fill(\n np.empty_like(values, dtype=out_dtype), fill_value=np.nan\n )\n\n # TODO: min_count\n result = self._transform(\n result, values, codes, func, is_datetimelike, **kwargs\n )\n\n if is_integer_dtype(result) and not is_datetimelike:\n mask = result == iNaT\n if mask.any():\n result = result.astype(\"float64\")\n result[mask] = np.nan\n elif (\n how == \"add\"\n and is_integer_dtype(orig_values.dtype)\n and is_extension_array_dtype(orig_values.dtype)\n ):\n # We need this to ensure that Series[Int64Dtype].resample().sum()\n # remains int64 dtype.\n # Two options for avoiding this special case\n # 1. mask-aware ops and avoid casting to float with NaN above\n # 2. specify the result dtype when calling this method\n result = result.astype(\"int64\")\n\n if kind == \"aggregate\" and self._filter_empty_groups and not counts.all():\n assert result.ndim != 2\n result = result[counts > 0]\n\n if vdim == 1 and arity == 1:\n result = result[:, 0]\n\n names: Optional[List[str]] = self._name_functions.get(how, None)\n\n if swapped:\n result = result.swapaxes(0, axis)\n\n if is_datetime64tz_dtype(orig_values.dtype) or is_period_dtype(\n orig_values.dtype\n ):\n # We need to use the constructors directly for these dtypes\n # since numpy won't recognize them\n # https://github.com/pandas-dev/pandas/issues/31471\n result = type(orig_values)(result.astype(np.int64), dtype=orig_values.dtype)\n elif is_datetimelike and kind == \"aggregate\":\n result = result.astype(orig_values.dtype)\n\n return result, names\n\n def aggregate(\n self, values, how: str, axis: int = 0, min_count: int = -1\n ) -> Tuple[np.ndarray, Optional[List[str]]]:\n return self._cython_operation(\n \"aggregate\", values, how, axis, min_count=min_count\n )\n\n def transform(self, values, how: str, axis: int = 0, **kwargs):\n return self._cython_operation(\"transform\", values, how, axis, **kwargs)\n\n def _aggregate(\n self, result, counts, values, comp_ids, agg_func, min_count: int = -1,\n ):\n if agg_func is libgroupby.group_nth:\n # different signature from the others\n # TODO: should we be using min_count instead of hard-coding it?\n agg_func(result, counts, values, comp_ids, rank=1, min_count=-1)\n else:\n agg_func(result, counts, values, comp_ids, min_count)\n\n return result\n\n def _transform(\n self, result, values, comp_ids, transform_func, is_datetimelike: bool, **kwargs\n ):\n\n comp_ids, _, ngroups = self.group_info\n transform_func(result, values, comp_ids, ngroups, is_datetimelike, **kwargs)\n\n return result\n\n def agg_series(\n self,\n obj: Series,\n func: F,\n *args,\n engine: str = \"cython\",\n engine_kwargs=None,\n **kwargs,\n ):\n # Caller is responsible for checking ngroups != 0\n assert self.ngroups != 0\n\n if engine == \"numba\":\n return self._aggregate_series_pure_python(\n obj, func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs\n )\n if len(obj) == 0:\n # SeriesGrouper would raise if we were to call _aggregate_series_fast\n return self._aggregate_series_pure_python(obj, func)\n\n elif is_extension_array_dtype(obj.dtype):\n # _aggregate_series_fast would raise TypeError when\n # calling libreduction.Slider\n # In the datetime64tz case it would incorrectly cast to tz-naive\n # TODO: can we get a performant workaround for EAs backed by ndarray?\n return self._aggregate_series_pure_python(obj, func)\n\n elif obj.index._has_complex_internals:\n # Pre-empt TypeError in _aggregate_series_fast\n return self._aggregate_series_pure_python(obj, func)\n\n try:\n return self._aggregate_series_fast(obj, func)\n except ValueError as err:\n if \"Function does not reduce\" in str(err):\n # raised in libreduction\n pass\n else:\n raise\n return self._aggregate_series_pure_python(obj, func)\n\n def _aggregate_series_fast(self, obj: Series, func: F):\n # At this point we have already checked that\n # - obj.index is not a MultiIndex\n # - obj is backed by an ndarray, not ExtensionArray\n # - len(obj) > 0\n # - ngroups != 0\n func = self._is_builtin_func(func)\n\n group_index, _, ngroups = self.group_info\n\n # avoids object / Series creation overhead\n dummy = obj.iloc[:0]\n indexer = get_group_index_sorter(group_index, ngroups)\n obj = obj.take(indexer)\n group_index = algorithms.take_nd(group_index, indexer, allow_fill=False)\n grouper = libreduction.SeriesGrouper(obj, func, group_index, ngroups, dummy)\n result, counts = grouper.get_result()\n return result, counts\n\n def _aggregate_series_pure_python(\n self,\n obj: Series,\n func: F,\n *args,\n engine: str = \"cython\",\n engine_kwargs=None,\n **kwargs,\n ):\n\n if engine == \"numba\":\n nopython, nogil, parallel = get_jit_arguments(engine_kwargs)\n check_kwargs_and_nopython(kwargs, nopython)\n validate_udf(func)\n cache_key = (func, \"groupby_agg\")\n numba_func = NUMBA_FUNC_CACHE.get(\n cache_key, jit_user_function(func, nopython, nogil, parallel)\n )\n\n group_index, _, ngroups = self.group_info\n\n counts = np.zeros(ngroups, dtype=int)\n result = None\n\n splitter = get_splitter(obj, group_index, ngroups, axis=0)\n\n for label, group in splitter:\n if engine == \"numba\":\n values, index = split_for_numba(group)\n res = numba_func(values, index, *args)\n if cache_key not in NUMBA_FUNC_CACHE:\n NUMBA_FUNC_CACHE[cache_key] = numba_func\n else:\n res = func(group, *args, **kwargs)\n\n if result is None:\n if isinstance(res, (Series, Index, np.ndarray)):\n if len(res) == 1:\n # e.g. test_agg_lambda_with_timezone lambda e: e.head(1)\n # FIXME: are we potentially losing important res.index info?\n res = res.item()\n else:\n raise ValueError(\"Function does not reduce\")\n result = np.empty(ngroups, dtype=\"O\")\n\n counts[label] = group.shape[0]\n result[label] = res\n\n assert result is not None\n result = lib.maybe_convert_objects(result, try_float=0)\n # TODO: maybe_cast_to_extension_array?\n\n return result, counts\n\n\nclass BinGrouper(BaseGrouper):\n \"\"\"\n This is an internal Grouper class\n\n Parameters\n ----------\n bins : the split index of binlabels to group the item of axis\n binlabels : the label list\n filter_empty : boolean, default False\n mutated : boolean, default False\n indexer : a intp array\n\n Examples\n --------\n bins: [2, 4, 6, 8, 10]\n binlabels: DatetimeIndex(['2005-01-01', '2005-01-03',\n '2005-01-05', '2005-01-07', '2005-01-09'],\n dtype='datetime64[ns]', freq='2D')\n\n the group_info, which contains the label of each item in grouped\n axis, the index of label in label list, group number, is\n\n (array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5)\n\n means that, the grouped axis has 10 items, can be grouped into 5\n labels, the first and second items belong to the first label, the\n third and forth items belong to the second label, and so on\n\n \"\"\"\n\n def __init__(\n self,\n bins,\n binlabels,\n filter_empty: bool = False,\n mutated: bool = False,\n indexer=None,\n ):\n self.bins = ensure_int64(bins)\n self.binlabels = ensure_index(binlabels)\n self._filter_empty_groups = filter_empty\n self.mutated = mutated\n self.indexer = indexer\n\n # These lengths must match, otherwise we could call agg_series\n # with empty self.bins, which would raise in libreduction.\n assert len(self.binlabels) == len(self.bins)\n\n @cache_readonly\n def groups(self):\n \"\"\" dict {group name -> group labels} \"\"\"\n # this is mainly for compat\n # GH 3881\n result = {\n key: value\n for key, value in zip(self.binlabels, self.bins)\n if key is not NaT\n }\n return result\n\n @property\n def nkeys(self) -> int:\n return 1\n\n def _get_grouper(self):\n \"\"\"\n We are a grouper as part of another's groupings.\n\n We have a specific method of grouping, so cannot\n convert to a Index for our grouper.\n \"\"\"\n return self\n\n def get_iterator(self, data: FrameOrSeries, axis: int = 0):\n \"\"\"\n Groupby iterator\n\n Returns\n -------\n Generator yielding sequence of (name, subsetted object)\n for each group\n \"\"\"\n if axis == 0:\n slicer = lambda start, edge: data.iloc[start:edge]\n else:\n slicer = lambda start, edge: data.iloc[:, start:edge]\n\n length = len(data.axes[axis])\n\n start = 0\n for edge, label in zip(self.bins, self.binlabels):\n if label is not NaT:\n yield label, slicer(start, edge)\n start = edge\n\n if start < length:\n yield self.binlabels[-1], slicer(start, None)\n\n @cache_readonly\n def indices(self):\n indices = collections.defaultdict(list)\n\n i = 0\n for label, bin in zip(self.binlabels, self.bins):\n if i < bin:\n if label is not NaT:\n indices[label] = list(range(i, bin))\n i = bin\n return indices\n\n @cache_readonly\n def group_info(self):\n ngroups = self.ngroups\n obs_group_ids = np.arange(ngroups)\n rep = np.diff(np.r_[0, self.bins])\n\n rep = ensure_platform_int(rep)\n if ngroups == len(self.bins):\n comp_ids = np.repeat(np.arange(ngroups), rep)\n else:\n comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)\n\n return (\n comp_ids.astype(\"int64\", copy=False),\n obs_group_ids.astype(\"int64\", copy=False),\n ngroups,\n )\n\n @cache_readonly\n def reconstructed_codes(self) -> List[np.ndarray]:\n # get unique result indices, and prepend 0 as groupby starts from the first\n return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]]\n\n @cache_readonly\n def result_index(self):\n if len(self.binlabels) != 0 and isna(self.binlabels[0]):\n return self.binlabels[1:]\n\n return self.binlabels\n\n @property\n def levels(self) -> List[Index]:\n return [self.binlabels]\n\n @property\n def names(self) -> List[Label]:\n return [self.binlabels.name]\n\n @property\n def groupings(self) -> \"List[grouper.Grouping]\":\n return [\n grouper.Grouping(lvl, lvl, in_axis=False, level=None, name=name)\n for lvl, name in zip(self.levels, self.names)\n ]\n\n def agg_series(\n self,\n obj: Series,\n func: F,\n *args,\n engine: str = \"cython\",\n engine_kwargs=None,\n **kwargs,\n ):\n # Caller is responsible for checking ngroups != 0\n assert self.ngroups != 0\n assert len(self.bins) > 0 # otherwise we'd get IndexError in get_result\n\n if is_extension_array_dtype(obj.dtype):\n # pre-empt SeriesBinGrouper from raising TypeError\n return self._aggregate_series_pure_python(obj, func)\n\n dummy = obj[:0]\n grouper = libreduction.SeriesBinGrouper(obj, func, self.bins, dummy)\n return grouper.get_result()\n\n\ndef _is_indexed_like(obj, axes) -> bool:\n if isinstance(obj, Series):\n if len(axes) > 1:\n return False\n return obj.index.equals(axes[0])\n elif isinstance(obj, DataFrame):\n return obj.index.equals(axes[0])\n\n return False\n\n\n# ----------------------------------------------------------------------\n# Splitting / application\n\n\nclass DataSplitter:\n def __init__(self, data: FrameOrSeries, labels, ngroups: int, axis: int = 0):\n self.data = data\n self.labels = ensure_int64(labels)\n self.ngroups = ngroups\n\n self.axis = axis\n assert isinstance(axis, int), axis\n\n @cache_readonly\n def slabels(self):\n # Sorted labels\n return algorithms.take_nd(self.labels, self.sort_idx, allow_fill=False)\n\n @cache_readonly\n def sort_idx(self):\n # Counting sort indexer\n return get_group_index_sorter(self.labels, self.ngroups)\n\n def __iter__(self):\n sdata = self._get_sorted_data()\n\n if self.ngroups == 0:\n # we are inside a generator, rather than raise StopIteration\n # we merely return signal the end\n return\n\n starts, ends = lib.generate_slices(self.slabels, self.ngroups)\n\n for i, (start, end) in enumerate(zip(starts, ends)):\n yield i, self._chop(sdata, slice(start, end))\n\n def _get_sorted_data(self) -> FrameOrSeries:\n return self.data.take(self.sort_idx, axis=self.axis)\n\n def _chop(self, sdata, slice_obj: slice) -> NDFrame:\n raise AbstractMethodError(self)\n\n\nclass SeriesSplitter(DataSplitter):\n def _chop(self, sdata: Series, slice_obj: slice) -> Series:\n return sdata.iloc[slice_obj]\n\n\nclass FrameSplitter(DataSplitter):\n def fast_apply(self, f: F, sdata: FrameOrSeries, names):\n # must return keys::list, values::list, mutated::bool\n starts, ends = lib.generate_slices(self.slabels, self.ngroups)\n return libreduction.apply_frame_axis0(sdata, f, names, starts, ends)\n\n def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame:\n if self.axis == 0:\n return sdata.iloc[slice_obj]\n else:\n return sdata.iloc[:, slice_obj]\n\n\ndef get_splitter(\n data: FrameOrSeries, labels: np.ndarray, ngroups: int, axis: int = 0\n) -> DataSplitter:\n if isinstance(data, Series):\n klass: Type[DataSplitter] = SeriesSplitter\n else:\n # i.e. DataFrame\n klass = FrameSplitter\n\n return klass(data, labels, ngroups, axis)\n"
] | [
[
"pandas.core.base.SelectionMixin._builtin_table.get",
"pandas.core.common.get_callable_name",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas._libs.lib.generate_slices",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.core.sorting.get_group_index_sorter",
"pandas.core.dtypes.common.is_complex_dtype",
"pandas.core.sorting.get_group_index",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.core.dtypes.common.ensure_float64",
"pandas.core.series.Series",
"pandas.errors.AbstractMethodError",
"numpy.arange",
"numpy.empty_like",
"numpy.lexsort",
"numpy.flatnonzero",
"pandas.core.dtypes.common.ensure_int_or_float",
"numpy.diff",
"pandas.core.dtypes.common.ensure_int64",
"pandas._libs.reduction.apply_frame_axis0",
"numpy.zeros",
"pandas.core.algorithms.take_nd",
"pandas.core.sorting.decons_obs_group_ids",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.util.numba_.jit_user_function",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.sorting.compress_group_index",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.core.sorting.get_flattened_iterator",
"pandas.core.dtypes.common.is_period_dtype",
"pandas.core.util.numba_.get_jit_arguments",
"pandas.core.indexes.api.Index",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.core.sorting.get_indexer_dict",
"pandas.core.util.numba_.validate_udf",
"pandas.core.util.numba_.check_kwargs_and_nopython",
"pandas.core.dtypes.common.is_sparse",
"pandas.core.util.numba_.split_for_numba",
"pandas.core.dtypes.common.needs_i8_conversion",
"pandas._libs.reduction.SeriesGrouper",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas._libs.reduction.SeriesBinGrouper",
"pandas.core.indexes.api.MultiIndex",
"pandas.core.indexes.api.ensure_index",
"pandas._libs.lib.maybe_convert_objects",
"numpy.bincount",
"pandas.core.dtypes.missing.isna",
"pandas._libs.lib.infer_dtype",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OliverWarrington/nilearn | [
"d42d3b10eb543619ed4189f05b74ef2e75a92068",
"d42d3b10eb543619ed4189f05b74ef2e75a92068",
"d42d3b10eb543619ed4189f05b74ef2e75a92068"
] | [
"nilearn/surface/surface.py",
"nilearn/glm/first_level/first_level.py",
"nilearn/plotting/__init__.py"
] | [
"\"\"\"\nFunctions for surface manipulation.\n\"\"\"\nimport os\nimport warnings\nimport collections\nimport gzip\nfrom distutils.version import LooseVersion\nfrom collections import namedtuple\n\n\nimport numpy as np\nfrom scipy import sparse, interpolate\nimport sklearn.preprocessing\nimport sklearn.cluster\ntry:\n from sklearn.exceptions import EfficiencyWarning\nexcept ImportError:\n class EfficiencyWarning(UserWarning):\n \"\"\"Warning used to notify the user of inefficient computation.\"\"\"\n\nimport nibabel\nfrom nibabel import gifti\nfrom nibabel import freesurfer as fs\n\nfrom nilearn import datasets\nfrom nilearn.image import load_img\nfrom nilearn.image import resampling\nfrom nilearn._utils.path_finding import _resolve_globbing\nfrom nilearn import _utils\nfrom nilearn.image import get_data\n\n# Create a namedtuple object for meshes\nMesh = namedtuple(\"mesh\", [\"coordinates\", \"faces\"])\n\n# Create a namedtuple object for surfaces\nSurface = namedtuple(\"surface\", [\"mesh\", \"data\"])\n\ndef _uniform_ball_cloud(n_points=20, dim=3, n_monte_carlo=50000):\n \"\"\"Get points uniformly spaced in the unit ball.\"\"\"\n rng = np.random.RandomState(0)\n mc_cube = rng.uniform(-1, 1, size=(n_monte_carlo, dim))\n mc_ball = mc_cube[(mc_cube**2).sum(axis=1) <= 1.]\n centroids, assignments, _ = sklearn.cluster.k_means(\n mc_ball, n_clusters=n_points, random_state=0)\n return centroids\n\n\ndef _load_uniform_ball_cloud(n_points=20):\n stored_points = os.path.abspath(\n os.path.join(__file__, '..', 'data',\n 'ball_cloud_{}_samples.csv'.format(n_points)))\n if os.path.isfile(stored_points):\n points = np.loadtxt(stored_points)\n return points\n warnings.warn(\n 'Cached sample positions are provided for '\n 'n_samples = 10, 20, 40, 80, 160. Since the number of samples does '\n 'have a big impact on the result, we strongly recommend using one '\n 'of these values when using kind=\"ball\" for much better performance.',\n EfficiencyWarning)\n return _uniform_ball_cloud(n_points=n_points)\n\n\ndef _face_outer_normals(mesh):\n \"\"\"Get the normal to each triangle in a mesh.\n\n They are the outer normals if the mesh respects the convention that the\n direction given by the direct order of a triangle's vertices (right-hand\n rule) points outwards.\n\n \"\"\"\n vertices, faces = load_surf_mesh(mesh)\n face_vertices = vertices[faces]\n # The right-hand rule gives the direction of the outer normal\n normals = np.cross(face_vertices[:, 1, :] - face_vertices[:, 0, :],\n face_vertices[:, 2, :] - face_vertices[:, 0, :])\n normals = sklearn.preprocessing.normalize(normals)\n return normals\n\n\ndef _surrounding_faces(mesh):\n \"\"\"Get matrix indicating which faces the nodes belong to.\n\n i, j is set if node i is a vertex of triangle j.\n\n \"\"\"\n vertices, faces = load_surf_mesh(mesh)\n n_faces = faces.shape[0]\n return sparse.csr_matrix((np.ones(3 * n_faces), (faces.ravel(), np.tile(\n np.arange(n_faces), (3, 1)).T.ravel())), (vertices.shape[0], n_faces))\n\n\ndef _vertex_outer_normals(mesh):\n \"\"\"Get the normal at each vertex in a triangular mesh.\n\n They are the outer normals if the mesh respects the convention that the\n direction given by the direct order of a triangle's vertices (right-hand\n rule) points outwards.\n\n \"\"\"\n vertices, faces = load_surf_mesh(mesh)\n vertex_faces = _surrounding_faces(mesh)\n face_normals = _face_outer_normals(mesh)\n normals = vertex_faces.dot(face_normals)\n return sklearn.preprocessing.normalize(normals)\n\n\ndef _sample_locations_between_surfaces(\n mesh, inner_mesh, affine, n_points=10, depth=None):\n outer_vertices, _ = mesh\n inner_vertices, _ = inner_mesh\n # when we drop support for np 1.5 replace the next 2 lines with\n # sample_locations = np.linspace(inner_vertices, outer_vertices, n_points)\n if depth is None:\n steps = np.linspace(0, 1, n_points)[:, None, None]\n else:\n steps = np.asarray(depth)[:, None, None]\n sample_locations = outer_vertices + steps * (\n inner_vertices - outer_vertices)\n sample_locations = np.rollaxis(sample_locations, 1)\n sample_locations_voxel_space = np.asarray(\n resampling.coord_transform(\n *np.vstack(sample_locations).T,\n affine=np.linalg.inv(affine))).T.reshape(sample_locations.shape)\n return sample_locations_voxel_space\n\n\ndef _ball_sample_locations(\n mesh, affine, ball_radius=3., n_points=20, depth=None):\n \"\"\"Locations to draw samples from to project volume data onto a mesh.\n\n For each mesh vertex, the locations of `n_points` points evenly spread in a\n ball around the vertex are returned.\n\n Parameters\n ----------\n mesh : pair of np arrays.\n `mesh[0]` contains the 3d coordinates of the vertices\n (shape n_vertices, 3)\n `mesh[1]` contains, for each triangle, the indices into `mesh[0]` of its\n vertices (shape n_triangles, 3)\n\n affine : array of shape (4, 4)\n Affine transformation from image voxels to the vertices' coordinate\n space.\n\n ball_radius : float, optional\n Size in mm of the neighbourhood around each vertex in which to draw\n samples. Default=3.0.\n\n n_points : int, optional\n Number of samples to draw for each vertex. Default=20.\n\n depth : None\n Raises a ValueError if not None because incompatible with this sampling\n strategy.\n\n Returns\n -------\n sample_location_voxel_space : numpy array, shape (n_vertices, n_points, 3)\n The locations, in voxel space, from which to draw samples.\n First dimension iterates over mesh vertices, second dimension iterates\n over the sample points associated to a vertex, third dimension is x, y,\n z in voxel space.\n\n \"\"\"\n if depth is not None:\n raise ValueError(\"The 'ball' sampling strategy does not support \"\n \"the 'depth' parameter\")\n vertices, faces = mesh\n offsets_world_space = _load_uniform_ball_cloud(\n n_points=n_points) * ball_radius\n mesh_voxel_space = np.asarray(\n resampling.coord_transform(*vertices.T,\n affine=np.linalg.inv(affine))).T\n linear_map = np.eye(affine.shape[0])\n linear_map[:-1, :-1] = affine[:-1, :-1]\n offsets_voxel_space = np.asarray(\n resampling.coord_transform(*offsets_world_space.T,\n affine=np.linalg.inv(linear_map))).T\n sample_locations_voxel_space = (mesh_voxel_space[:, np.newaxis, :] +\n offsets_voxel_space[np.newaxis, :])\n return sample_locations_voxel_space\n\n\ndef _line_sample_locations(\n mesh, affine, segment_half_width=3., n_points=10, depth=None):\n \"\"\"Locations to draw samples from to project volume data onto a mesh.\n\n For each mesh vertex, the locations of `n_points` points evenly spread in a\n segment of the normal to the vertex are returned. The line segment has\n length 2 * `segment_half_width` and is centered at the vertex.\n\n Parameters\n ----------\n mesh : pair of numpy.ndarray\n `mesh[0]` contains the 3d coordinates of the vertices\n (shape n_vertices, 3)\n `mesh[1]` contains, for each triangle, the indices into `mesh[0]` of its\n vertices (shape n_triangles, 3)\n\n affine : numpy.ndarray of shape (4, 4)\n Affine transformation from image voxels to the vertices' coordinate\n space.\n\n segment_half_width : float, optional\n Size in mm of the neighbourhood around each vertex in which to draw\n samples. Default=3.0.\n\n n_points : int, optional\n Number of samples to draw for each vertex. Default=10.\n\n depth : sequence of floats or None, optional\n Cortical depth, expressed as a fraction of segment_half_width.\n Overrides n_points.\n\n Returns\n -------\n sample_location_voxel_space : numpy array, shape (n_vertices, n_points, 3)\n The locations, in voxel space, from which to draw samples.\n First dimension iterates over mesh vertices, second dimension iterates\n over the sample points associated to a vertex, third dimension is x, y,\n z in voxel space.\n\n \"\"\"\n vertices, faces = mesh\n normals = _vertex_outer_normals(mesh)\n if depth is None:\n offsets = np.linspace(\n segment_half_width, -segment_half_width, n_points)\n else:\n offsets = - segment_half_width * np.asarray(depth)\n sample_locations = vertices[\n np.newaxis, :, :] + normals * offsets[:, np.newaxis, np.newaxis]\n sample_locations = np.rollaxis(sample_locations, 1)\n sample_locations_voxel_space = np.asarray(\n resampling.coord_transform(\n *np.vstack(sample_locations).T,\n affine=np.linalg.inv(affine))).T.reshape(sample_locations.shape)\n return sample_locations_voxel_space\n\n\ndef _choose_kind(kind, inner_mesh):\n if kind == \"depth\" and inner_mesh is None:\n raise TypeError(\n \"'inner_mesh' must be provided to use \"\n \"the 'depth' sampling strategy\")\n if kind == \"auto\":\n kind = \"line\" if inner_mesh is None else \"depth\"\n return kind\n\n\ndef _sample_locations(mesh, affine, radius, kind='auto', n_points=None,\n inner_mesh=None, depth=None):\n \"\"\"Get either ball or line sample locations.\"\"\"\n kind = _choose_kind(kind, inner_mesh)\n kwargs = ({} if n_points is None else {'n_points': n_points})\n projectors = {\n 'line': (_line_sample_locations, {\"segment_half_width\": radius}),\n 'ball': (_ball_sample_locations, {\"ball_radius\": radius}),\n 'depth': (_sample_locations_between_surfaces,\n {\"inner_mesh\": inner_mesh})\n }\n if kind not in projectors:\n raise ValueError(\n '\"kind\" must be one of {}'.format(tuple(projectors.keys())))\n projector, extra_kwargs = projectors[kind]\n # let the projector choose the default for n_points\n # (for example a ball probably needs more than a line)\n sample_locations = projector(\n mesh=mesh, affine=affine, depth=depth, **kwargs, **extra_kwargs)\n return sample_locations\n\n\ndef _masked_indices(sample_locations, img_shape, mask=None):\n \"\"\"Get the indices of sample points which should be ignored.\n\n Parameters:\n -----------\n sample_locations : array, shape(n_sample_locations, 3)\n The coordinates of candidate interpolation points.\n\n img_shape : tuple\n The dimensions of the image to be sampled.\n\n mask : array of shape img_shape or None, optional\n Part of the image to be masked. If None, don't apply any mask.\n\n Returns\n -------\n array of shape (n_sample_locations,)\n True if this particular location should be ignored (outside of image or\n masked).\n\n \"\"\"\n kept = (sample_locations >= 0).all(axis=1)\n for dim, size in enumerate(img_shape):\n kept = np.logical_and(kept, sample_locations[:, dim] < size)\n if mask is not None:\n indices = np.asarray(np.floor(sample_locations[kept]), dtype=int)\n kept[kept] = mask[\n indices[:, 0], indices[:, 1], indices[:, 2]] != 0\n return ~kept\n\n\ndef _projection_matrix(mesh, affine, img_shape, kind='auto', radius=3.,\n n_points=None, mask=None, inner_mesh=None, depth=None):\n \"\"\"Get a sparse matrix that projects volume data onto a mesh.\n\n Parameters\n ----------\n mesh : str or numpy.ndarray\n Either a file containing surface mesh geometry (valid formats\n are .gii or Freesurfer specific files such as .orig, .pial,\n .sphere, .white, .inflated) or a list of two Numpy arrays,\n the first containing the x-y-z coordinates of the mesh\n vertices, the second containing the indices (into coords)\n of the mesh faces.\n\n affine : array of shape (4, 4)\n Affine transformation from image voxels to the vertices' coordinate\n space.\n\n img_shape : 3-tuple of integers\n The shape of the image to be projected.\n\n kind : {'auto', 'depth', 'line', 'ball'}, optional\n The strategy used to sample image intensities around each vertex.\n Ignored if `inner_mesh` is not None. Default='auto'.\n\n - 'auto':\n 'depth' if `inner_mesh` is not `None`, otherwise 'line.\n - 'depth':\n Sampled at the specified cortical depths between corresponding\n nodes of `mesh` and `inner_mesh`.\n - 'line':\n Samples are placed along the normal to the mesh.\n - 'ball':\n Samples are regularly spaced inside a ball centered at the mesh\n vertex.\n\n radius : float, optional\n The size (in mm) of the neighbourhood from which samples are drawn\n around each node. Ignored if `inner_mesh` is not None.\n Default=3.0.\n\n n_points : int or None, optional\n How many samples are drawn around each vertex and averaged. If None,\n use a reasonable default for the chosen sampling strategy (20 for\n 'ball' or 10 for lines ie using `line` or an `inner_mesh`).\n For performance reasons, if using kind=\"ball\", choose `n_points` in\n [10, 20, 40, 80, 160] (default is 20), because cached positions are\n available.\n\n mask : array of shape img_shape or None, optional\n Part of the image to be masked. If None, don't apply any mask.\n\n inner_mesh : str or numpy.ndarray, optional\n Either a file containing surface mesh or a pair of ndarrays\n (coordinates, triangles). If provided this is an inner surface that is\n nested inside the one represented by `mesh` -- e.g. `mesh` is a pial\n surface and `inner_mesh` a white matter surface. In this case nodes in\n both meshes must correspond: node i in `mesh` is just across the gray\n matter thickness from node i in `inner_mesh`. Image values for index i\n are then sampled along the line joining these two points (if `kind` is\n 'auto' or 'depth').\n\n depth : sequence of floats or None, optional\n Cortical depth, expressed as a fraction of segment_half_width.\n overrides n_points. Should be None if kind is 'ball'\n\n Returns\n -------\n proj : scipy.sparse.csr_matrix\n Shape (n_voxels, n_mesh_vertices). The dot product of this matrix with\n an image (represented as a column vector) gives the projection onto mesh\n vertices.\n\n See Also\n --------\n nilearn.surface.vol_to_surf\n Compute the projection for one or several images.\n\n \"\"\"\n # A user might want to call this function directly so check mask size.\n if mask is not None and tuple(mask.shape) != img_shape:\n raise ValueError('mask should have shape img_shape')\n mesh = load_surf_mesh(mesh)\n sample_locations = _sample_locations(\n mesh, affine, kind=kind, radius=radius, n_points=n_points,\n inner_mesh=inner_mesh, depth=depth)\n sample_locations = np.asarray(np.round(sample_locations), dtype=int)\n n_vertices, n_points, img_dim = sample_locations.shape\n masked = _masked_indices(np.vstack(sample_locations), img_shape, mask=mask)\n sample_locations = np.rollaxis(sample_locations, -1)\n sample_indices = np.ravel_multi_index(\n sample_locations, img_shape, mode='clip').ravel()\n row_indices, _ = np.mgrid[:n_vertices, :n_points]\n row_indices = row_indices.ravel()\n row_indices = row_indices[~masked]\n sample_indices = sample_indices[~masked]\n weights = np.ones(len(row_indices))\n proj = sparse.csr_matrix(\n (weights, (row_indices, sample_indices.ravel())),\n shape=(n_vertices, np.prod(img_shape)))\n proj = sklearn.preprocessing.normalize(proj, axis=1, norm='l1')\n return proj\n\n\ndef _nearest_voxel_sampling(images, mesh, affine, kind='auto', radius=3.,\n n_points=None, mask=None, inner_mesh=None,\n depth=None):\n \"\"\"In each image, measure the intensity at each node of the mesh.\n\n Image intensity at each sample point is that of the nearest voxel.\n A 2-d array is returned, where each row corresponds to an image and each\n column to a mesh vertex.\n See documentation of vol_to_surf for details.\n\n \"\"\"\n proj = _projection_matrix(\n mesh, affine, images[0].shape, kind=kind, radius=radius,\n n_points=n_points, mask=mask, inner_mesh=inner_mesh, depth=depth)\n data = np.asarray(images).reshape(len(images), -1).T\n texture = proj.dot(data)\n # if all samples around a mesh vertex are outside the image,\n # there is no reasonable value to assign to this vertex.\n # in this case we return NaN for this vertex.\n texture[np.asarray(proj.sum(axis=1) == 0).ravel()] = np.nan\n return texture.T\n\n\ndef _interpolation_sampling(images, mesh, affine, kind='auto', radius=3,\n n_points=None, mask=None, inner_mesh=None,\n depth=None):\n \"\"\"In each image, measure the intensity at each node of the mesh.\n\n Image intensity at each sample point is computed with trilinear\n interpolation.\n A 2-d array is returned, where each row corresponds to an image and each\n column to a mesh vertex.\n See documentation of vol_to_surf for details.\n\n \"\"\"\n sample_locations = _sample_locations(\n mesh, affine, kind=kind, radius=radius, n_points=n_points,\n inner_mesh=inner_mesh, depth=depth)\n n_vertices, n_points, img_dim = sample_locations.shape\n grid = [np.arange(size) for size in images[0].shape]\n interp_locations = np.vstack(sample_locations)\n masked = _masked_indices(interp_locations, images[0].shape, mask=mask)\n # loop over images rather than building a big array to use less memory\n all_samples = []\n for img in images:\n interpolator = interpolate.RegularGridInterpolator(\n grid, img,\n bounds_error=False, method='linear', fill_value=None)\n samples = interpolator(interp_locations)\n # if all samples around a mesh vertex are outside the image,\n # there is no reasonable value to assign to this vertex.\n # in this case we return NaN for this vertex.\n samples[masked] = np.nan\n all_samples.append(samples)\n all_samples = np.asarray(all_samples)\n all_samples = all_samples.reshape((len(images), n_vertices, n_points))\n texture = np.nanmean(all_samples, axis=2)\n return texture\n\n\ndef vol_to_surf(img, surf_mesh,\n radius=3., interpolation='linear', kind='auto',\n n_samples=None, mask_img=None, inner_mesh=None, depth=None):\n \"\"\"Extract surface data from a Nifti image.\n\n .. versionadded:: 0.4.0\n\n Parameters\n ----------\n img : Niimg-like object, 3d or 4d.\n See http://nilearn.github.io/manipulating_images/input_output.html\n\n surf_mesh : str or numpy.ndarray or Mesh\n Either a file containing surface mesh geometry (valid formats\n are .gii or Freesurfer specific files such as .orig, .pial,\n .sphere, .white, .inflated) or two Numpy arrays organized in a list,\n tuple or a namedtuple with the fields \"coordinates\" and \"faces\", or\n a Mesh object with \"coordinates\" and \"faces\" attributes.\n\n radius : float, optional\n The size (in mm) of the neighbourhood from which samples are drawn\n around each node. Ignored if `inner_mesh` is provided.\n Default=3.0.\n\n interpolation : {'linear', 'nearest'}, optional\n How the image intensity is measured at a sample point.\n Default='linear'.\n\n - 'linear':\n Use a trilinear interpolation of neighboring voxels.\n - 'nearest':\n Use the intensity of the nearest voxel.\n\n For one image, the speed difference is small, 'linear' takes about x1.5\n more time. For many images, 'nearest' scales much better, up to x20\n faster.\n\n kind : {'auto', 'depth', 'line', 'ball'}, optional\n The strategy used to sample image intensities around each vertex.\n Default='auto'.\n\n - 'auto':\n Chooses 'depth' if `inner_mesh` is provided and 'line' otherwise.\n - 'depth':\n `inner_mesh` must be a mesh whose nodes correspond to those in\n `surf_mesh`. For example, `inner_mesh` could be a white matter\n surface mesh and `surf_mesh` a pial surface mesh. Samples are\n placed between each pair of corresponding nodes at the specified\n cortical depths (regularly spaced by default, see `depth`\n parameter).\n - 'line':\n Samples are placed along the normal to the mesh, at the positions\n specified by `depth`, or by default regularly spaced over the\n interval [- `radius`, + `radius`].\n - 'ball':\n Samples are regularly spaced inside a ball centered at the mesh\n vertex.\n\n n_samples : int or None, optional\n How many samples are drawn around each vertex and averaged. If\n ``None``, use a reasonable default for the chosen sampling strategy\n (20 for 'ball' or 10 for 'line').\n For performance reasons, if using `kind` =\"ball\", choose `n_samples` in\n [10, 20, 40, 80, 160] (default is 20), because cached positions are\n available.\n\n mask_img : Niimg-like object or None, optional\n Samples falling out of this mask or out of the image are ignored.\n If ``None``, don't apply any mask.\n\n inner_mesh : str or numpy.ndarray, optional\n Either a file containing a surface mesh or a pair of ndarrays\n (coordinates, triangles). If provided this is an inner surface that is\n nested inside the one represented by `surf_mesh` -- e.g. `surf_mesh` is\n a pial surface and `inner_mesh` a white matter surface. In this case\n nodes in both meshes must correspond: node i in `surf_mesh` is just\n across the gray matter thickness from node i in `inner_mesh`. Image\n values for index i are then sampled along the line joining these two\n points (if `kind` is 'auto' or 'depth').\n\n depth : sequence of floats or None, optional\n The cortical depth of samples. If provided, n_samples is ignored.\n When `inner_mesh` is provided, each element of `depth` is a fraction of\n the distance from `mesh` to `inner_mesh`: 0 is exactly on the outer\n surface, .5 is halfway, 1. is exactly on the inner surface. `depth`\n entries can be negative or greater than 1.\n When `inner_mesh` is not provided and `kind` is \"line\", each element of\n `depth` is a fraction of `radius` along the inwards normal at each mesh\n node. For example if `radius==1` and `depth==[-.5, 0.]`, for each node\n values will be sampled .5 mm outside of the surface and exactly at the\n node position.\n This parameter is not supported for the \"ball\" strategy so passing\n `depth` when `kind==\"ball\"` results in a `ValueError`.\n\n Returns\n -------\n texture : numpy.ndarray, 1d or 2d.\n If 3D image is provided, a 1d vector is returned, containing one value\n for each mesh node.\n If 4D image is provided, a 2d array is returned, where each row\n corresponds to a mesh node.\n\n Notes\n -----\n This function computes a value for each vertex of the mesh. In order to do\n so, it selects a few points in the volume surrounding that vertex,\n interpolates the image intensities at these sampling positions, and\n averages the results.\n\n Three strategies are available to select these positions.\n\n - with 'depth', data is sampled at various cortical depths between\n corresponding nodes of `surface_mesh` and `inner_mesh` (which can be,\n for example, a pial surface and a white matter surface).\n - 'ball' uses points regularly spaced in a ball centered at the mesh\n vertex. The radius of the ball is controlled by the parameter\n `radius`.\n - 'line' starts by drawing the normal to the mesh passing through this\n vertex. It then selects a segment of this normal, centered at the\n vertex, of length 2 * `radius`. Image intensities are measured at\n points regularly spaced on this normal segment, or at positions\n determined by `depth`.\n - ('auto' chooses 'depth' if `inner_mesh` is provided and 'line'\n otherwise)\n\n You can control how many samples are drawn by setting `n_samples`, or their\n position by setting `depth`.\n\n Once the sampling positions are chosen, those that fall outside of the 3d\n image (or outside of the mask if you provided one) are discarded. If all\n sample positions are discarded (which can happen, for example, if the\n vertex itself is outside of the support of the image), the projection at\n this vertex will be ``numpy.nan``.\n\n The 3d image then needs to be interpolated at each of the remaining points.\n Two options are available: 'nearest' selects the value of the nearest\n voxel, and 'linear' performs trilinear interpolation of neighbouring\n voxels. 'linear' may give better results - for example, the projected\n values are more stable when resampling the 3d image or applying affine\n transformations to it. For one image, the speed difference is small,\n 'linear' takes about x1.5 more time. For many images, 'nearest' scales much\n better, up to x20 faster.\n\n Once the 3d image has been interpolated at each sample point, the\n interpolated values are averaged to produce the value associated to this\n particular mesh vertex.\n\n Warnings\n --------\n This function is experimental and details such as the interpolation method\n are subject to change.\n\n \"\"\"\n sampling_schemes = {'linear': _interpolation_sampling,\n 'nearest': _nearest_voxel_sampling}\n if interpolation not in sampling_schemes:\n raise ValueError('\"interpolation\" should be one of {}'.format(\n tuple(sampling_schemes.keys())))\n img = load_img(img)\n if mask_img is not None:\n mask_img = _utils.check_niimg(mask_img)\n mask = get_data(resampling.resample_to_img(\n mask_img, img, interpolation='nearest', copy=False))\n else:\n mask = None\n original_dimension = len(img.shape)\n img = _utils.check_niimg(img, atleast_4d=True)\n frames = np.rollaxis(get_data(img), -1)\n mesh = load_surf_mesh(surf_mesh)\n if inner_mesh is not None:\n inner_mesh = load_surf_mesh(inner_mesh)\n sampling = sampling_schemes[interpolation]\n texture = sampling(\n frames, mesh, img.affine, radius=radius, kind=kind,\n n_points=n_samples, mask=mask, inner_mesh=inner_mesh, depth=depth)\n if original_dimension == 3:\n texture = texture[0]\n return texture.T\n\n\ndef _load_surf_files_gifti_gzip(surf_file):\n \"\"\"Load surface data Gifti files which are gzipped. This\n function is used by load_surf_mesh and load_surf_data for\n extracting gzipped files.\n\n Part of the code can be removed while bumping nibabel 2.0.2\n\n \"\"\"\n with gzip.open(surf_file) as f:\n as_bytes = f.read()\n if LooseVersion(nibabel.__version__) >= LooseVersion('2.1.0'):\n parser = gifti.GiftiImage.parser()\n parser.parse(as_bytes)\n gifti_img = parser.img\n else:\n from nibabel.gifti.parse_gifti_fast import ParserCreate, Outputter\n parser = ParserCreate()\n parser.buffer_text = True\n out = Outputter()\n parser.StartElementHandler = out.StartElementHandler\n parser.EndElementHandler = out.EndElementHandler\n parser.CharacterDataHandler = out.CharacterDataHandler\n parser.Parse(as_bytes)\n gifti_img = out.img\n return gifti_img\n\n\ndef _gifti_img_to_data(gifti_img):\n \"\"\"Load surface image e.g. sulcal depth or statistical map in\n nibabel.gifti.GiftiImage to data\n\n Used by load_surf_data function in common to surface sulcal data\n acceptable to .gii or .gii.gz\n\n \"\"\"\n if not gifti_img.darrays:\n raise ValueError('Gifti must contain at least one data array')\n return np.asarray([arr.data for arr in gifti_img.darrays]).T.squeeze()\n\n\n# function to figure out datatype and load data\ndef load_surf_data(surf_data):\n \"\"\"Loading data to be represented on a surface mesh.\n\n Parameters\n ----------\n surf_data : str or numpy.ndarray\n Either a file containing surface data (valid format are .gii,\n .gii.gz, .mgz, .nii, .nii.gz, or Freesurfer specific files such as\n .thickness, .curv, .sulc, .annot, .label), lists of 1D data files are\n returned as 2D arrays, or a Numpy array containing surface data.\n\n Returns\n -------\n data : numpy.ndarray\n An array containing surface data\n\n \"\"\"\n # if the input is a filename, load it\n if isinstance(surf_data, str):\n\n # resolve globbing\n file_list = _resolve_globbing(surf_data)\n # _resolve_globbing handles empty lists\n\n for f in range(len(file_list)):\n surf_data = file_list[f]\n if (surf_data.endswith('nii') or surf_data.endswith('nii.gz') or\n surf_data.endswith('mgz')):\n data_part = np.squeeze(get_data(nibabel.load(surf_data)))\n elif (surf_data.endswith('curv') or surf_data.endswith('sulc') or\n surf_data.endswith('thickness')):\n data_part = fs.io.read_morph_data(surf_data)\n elif surf_data.endswith('annot'):\n data_part = fs.io.read_annot(surf_data)[0]\n elif surf_data.endswith('label'):\n data_part = fs.io.read_label(surf_data)\n elif surf_data.endswith('gii'):\n if LooseVersion(nibabel.__version__) >= LooseVersion('2.1.0'):\n gii = nibabel.load(surf_data)\n else:\n gii = gifti.read(surf_data)\n data_part = _gifti_img_to_data(gii)\n elif surf_data.endswith('gii.gz'):\n gii = _load_surf_files_gifti_gzip(surf_data)\n data_part = _gifti_img_to_data(gii)\n else:\n raise ValueError(('The input type is not recognized. %r was '\n 'given while valid inputs are a Numpy array '\n 'or one of the following file formats: .gii,'\n ' .gii.gz, .mgz, .nii, .nii.gz, Freesurfer '\n 'specific files such as .curv, .sulc, '\n '.thickness, .annot, .label') % surf_data)\n\n if len(data_part.shape) == 1:\n data_part = data_part[:, np.newaxis]\n if f == 0:\n data = data_part\n elif f > 0:\n try:\n data = np.concatenate((data, data_part), axis=1)\n except ValueError:\n raise ValueError('When more than one file is input, all '\n 'files must contain data with the same '\n 'shape in axis=0')\n\n # if the input is a numpy array\n elif isinstance(surf_data, np.ndarray):\n data = surf_data\n else:\n raise ValueError('The input type is not recognized. '\n 'Valid inputs are a Numpy array or one of the '\n 'following file formats: .gii, .gii.gz, .mgz, .nii, '\n '.nii.gz, Freesurfer specific files such as .curv, '\n '.sulc, .thickness, .annot, .label')\n return np.squeeze(data)\n\n\ndef _gifti_img_to_mesh(gifti_img):\n \"\"\"Load surface image in nibabel.gifti.GiftiImage to data\n\n Used by load_surf_mesh function in common to surface mesh\n acceptable to .gii or .gii.gz\n\n \"\"\"\n error_message = ('The surf_mesh input is not recognized. Valid Freesurfer '\n 'surface mesh inputs are .pial, .inflated, .sphere, '\n '.orig, .white. You provided input which have no '\n '{0} or of empty value={1}')\n if LooseVersion(nibabel.__version__) >= LooseVersion('2.1.0'):\n try:\n coords = gifti_img.get_arrays_from_intent(\n nibabel.nifti1.intent_codes['NIFTI_INTENT_POINTSET'])[0].data\n except IndexError:\n raise ValueError(error_message.format(\n 'NIFTI_INTENT_POINTSET', gifti_img.get_arrays_from_intent(\n nibabel.nifti1.intent_codes['NIFTI_INTENT_POINTSET'])))\n try:\n faces = gifti_img.get_arrays_from_intent(\n nibabel.nifti1.intent_codes['NIFTI_INTENT_TRIANGLE'])[0].data\n except IndexError:\n raise ValueError(error_message.format(\n 'NIFTI_INTENT_TRIANGLE', gifti_img.get_arrays_from_intent(\n nibabel.nifti1.intent_codes['NIFTI_INTENT_TRIANGLE'])))\n else:\n try:\n coords = gifti_img.getArraysFromIntent(\n nibabel.nifti1.intent_codes['NIFTI_INTENT_POINTSET'])[0].data\n except IndexError:\n raise ValueError(error_message.format(\n 'NIFTI_INTENT_POINTSET', gifti_img.getArraysFromIntent(\n nibabel.nifti1.intent_codes['NIFTI_INTENT_POINTSET'])))\n try:\n faces = gifti_img.getArraysFromIntent(\n nibabel.nifti1.intent_codes['NIFTI_INTENT_TRIANGLE'])[0].data\n except IndexError:\n raise ValueError(error_message.format(\n 'NIFTI_INTENT_TRIANGLE', gifti_img.getArraysFromIntent(\n nibabel.nifti1.intent_codes['NIFTI_INTENT_TRIANGLE'])))\n\n return coords, faces\n\n\n# function to figure out datatype and load data\ndef load_surf_mesh(surf_mesh):\n \"\"\"Loading a surface mesh geometry\n\n Parameters\n ----------\n surf_mesh : str or numpy.ndarray or Mesh\n Either a file containing surface mesh geometry (valid formats\n are .gii .gii.gz or Freesurfer specific files such as .orig, .pial,\n .sphere, .white, .inflated) or two Numpy arrays organized in a list,\n tuple or a namedtuple with the fields \"coordinates\" and \"faces\", or a\n Mesh object with \"coordinates\" and \"faces\" attributes.\n\n Returns\n --------\n mesh : Mesh\n With the fields \"coordinates\" and \"faces\", each containing a\n numpy.ndarray\n\n \"\"\"\n\n # if input is a filename, try to load it\n if isinstance(surf_mesh, str):\n # resolve globbing\n file_list = _resolve_globbing(surf_mesh)\n if len(file_list) == 1:\n surf_mesh = file_list[0]\n elif len(file_list) > 1:\n # empty list is handled inside _resolve_globbing function\n raise ValueError((\"More than one file matching path: %s \\n\"\n \"load_surf_mesh can only load one file at a time\")\n % surf_mesh)\n\n if (surf_mesh.endswith('orig') or surf_mesh.endswith('pial') or\n surf_mesh.endswith('white') or surf_mesh.endswith('sphere') or\n surf_mesh.endswith('inflated')):\n coords, faces = fs.io.read_geometry(surf_mesh)\n mesh = Mesh(coordinates=coords, faces=faces)\n elif surf_mesh.endswith('gii'):\n if LooseVersion(nibabel.__version__) >= LooseVersion('2.1.0'):\n gifti_img = nibabel.load(surf_mesh)\n else:\n gifti_img = gifti.read(surf_mesh)\n coords, faces = _gifti_img_to_mesh(gifti_img)\n mesh = Mesh(coordinates=coords, faces=faces)\n elif surf_mesh.endswith('.gii.gz'):\n gifti_img = _load_surf_files_gifti_gzip(surf_mesh)\n coords, faces = _gifti_img_to_mesh(gifti_img)\n mesh = Mesh(coordinates=coords, faces=faces)\n else:\n raise ValueError(('The input type is not recognized. %r was given '\n 'while valid inputs are one of the following '\n 'file formats: .gii, .gii.gz, Freesurfer '\n 'specific files such as .orig, .pial, .sphere, '\n '.white, .inflated or two Numpy arrays organized '\n 'in a list, tuple or a namedtuple with the '\n 'fields \"coordinates\" and \"faces\"'\n ) % surf_mesh)\n elif isinstance(surf_mesh, (list, tuple)):\n try:\n coords, faces = surf_mesh\n mesh = Mesh(coordinates=coords, faces=faces)\n except Exception:\n raise ValueError(('If a list or tuple is given as input, '\n 'it must have two elements, the first is '\n 'a Numpy array containing the x-y-z coordinates '\n 'of the mesh vertices, the second is a Numpy '\n 'array containing the indices (into coords) of '\n 'the mesh faces. The input was a list with '\n '%r elements.') % len(surf_mesh))\n elif (hasattr(surf_mesh, \"faces\") and hasattr(surf_mesh, \"coordinates\")):\n coords, faces = surf_mesh.coordinates, surf_mesh.faces\n mesh = Mesh(coordinates=coords, faces=faces)\n\n else:\n raise ValueError('The input type is not recognized. '\n 'Valid inputs are one of the following file '\n 'formats: .gii, .gii.gz, Freesurfer specific files '\n 'such as .orig, .pial, .sphere, .white, .inflated '\n 'or two Numpy arrays organized in a list, tuple or '\n 'a namedtuple with the fields \"coordinates\" and '\n '\"faces\"')\n\n return mesh\n\n\ndef load_surface(surface):\n \"\"\"Loads a surface.\n\n Parameters\n ----------\n surface : Surface-like (see description)\n The surface to be loaded.\n A surface can be:\n - a nilearn.surface.Surface\n - a sequence (mesh, data) where:\n - mesh can be:\n - a nilearn.surface.Mesh\n - a path to .gii or .gii.gz etc.\n - a sequence of two numpy arrays,\n the first containing vertex coordinates\n and the second containing triangles.\n - data can be:\n - a path to .gii or .gii.gz etc.\n - a numpy array with shape (n_vertices,)\n or (n_time_points, n_vertices)\n\n Returns\n --------\n surface : Surface\n With the fields \"mesh\" (Mesh object) and \"data\" (numpy.ndarray).\n\n \"\"\"\n # Handle the case where we received a Surface\n # object with mesh and data attributes\n if hasattr(surface, \"mesh\") and hasattr(surface, \"data\"):\n mesh = load_surf_mesh(surface.mesh)\n data = load_surf_data(surface.data)\n # Handle the case where we received a sequence\n # (mesh, data)\n elif isinstance(surface, (list, tuple, np.ndarray)):\n if len(surface) == 2:\n mesh = load_surf_mesh(surface[0])\n data = load_surf_data(surface[1])\n else:\n raise ValueError(\"`load_surface` accepts iterables \"\n \"of length 2 to define a surface. \"\n \"You provided a {} of length {}.\".format(\n type(surface), len(surface)))\n else:\n raise ValueError(\"Wrong parameter `surface` in `load_surface`. \"\n \"Please refer to the documentation for more information.\")\n return Surface(mesh, data)\n\n\ndef _check_mesh(mesh):\n \"\"\"Check that mesh data is either a str, or a dict with sufficient\n entries.\n\n Used by plotting.surf_plotting.plot_img_on_surf and\n plotting.html_surface.full_brain_info\n\n \"\"\"\n if isinstance(mesh, str):\n return datasets.fetch_surf_fsaverage(mesh)\n if not isinstance(mesh, collections.Mapping):\n raise TypeError(\"The mesh should be a str or a dictionary, \"\n \"you provided: {}.\".format(type(mesh).__name__))\n missing = {'pial_left', 'pial_right', 'sulc_left', 'sulc_right',\n 'infl_left', 'infl_right'}.difference(mesh.keys())\n if missing:\n raise ValueError(\n \"{} {} missing from the provided mesh dictionary\".format(\n missing, ('are' if len(missing) > 1 else 'is')))\n return mesh\n\n\ndef check_mesh_and_data(mesh, data):\n \"\"\"Load surface mesh and data, check that they have compatible shapes.\n\n Parameters\n ----------\n mesh : str or numpy.ndarray or Mesh\n Either a file containing surface mesh geometry (valid formats\n are .gii .gii.gz or Freesurfer specific files such as .orig, .pial,\n .sphere, .white, .inflated) or two Numpy arrays organized in a list,\n tuple or a namedtuple with the fields \"coordinates\" and \"faces\", or a\n Mesh object with \"coordinates\" and \"faces\" attributes.\n\n data : str or numpy.ndarray\n Either a file containing surface data (valid format are .gii,\n .gii.gz, .mgz, .nii, .nii.gz, or Freesurfer specific files such as\n .thickness, .curv, .sulc, .annot, .label), lists of 1D data files are\n returned as 2D arrays, or a Numpy array containing surface data.\n\n Returns\n -------\n mesh : Mesh\n Checked mesh.\n\n data : numpy.ndarray\n Checked data.\n\n \"\"\"\n mesh = load_surf_mesh(mesh)\n data = load_surf_data(data)\n # Check that mesh coordinates has a number of nodes\n # equal to the size of the data.\n if len(data) != len(mesh.coordinates):\n raise ValueError(\n 'Mismatch between number of nodes in mesh ({}) and '\n 'size of surface data ({})'.format(len(mesh.coordinates), len(data)))\n # Check that the indices of faces are consistent with the\n # mesh coordinates. That is, we shouldn't have an index\n # larger or equal to the length of the coordinates array.\n if mesh.faces.max() >= len(mesh.coordinates):\n raise ValueError(\n \"Mismatch between the indices of faces and the number of nodes. \"\n \"Maximum face index is {} while coordinates array has length {}.\".format(\n mesh.faces.max(), len(mesh.coordinates)))\n return mesh, data\n\n\ndef check_surface(surface):\n \"\"\"Load a surface as a Surface object.\n This function will make sure that the surfaces's\n mesh and data have compatible shapes.\n\n Parameters\n ----------\n surface : Surface-like (see description)\n The surface to be loaded.\n A surface can be:\n - a nilearn.surface.Surface\n - a sequence (mesh, data) where:\n - mesh can be:\n - a nilearn.surface.Mesh\n - a path to .gii or .gii.gz etc.\n - a sequence of two numpy arrays,\n the first containing vertex coordinates\n and the second containing triangles.\n - data can be:\n - a path to .gii or .gii.gz etc.\n - a numpy array with shape (n_vertices,)\n or (n_time_points, n_vertices)\n\n Returns\n -------\n surface : Surface\n Checked surface object.\n\n \"\"\"\n surface = load_surface(surface)\n mesh, data = check_mesh_and_data(surface.mesh,\n surface.data)\n return Surface(mesh, data)\n",
"\"\"\"\nThis module presents an interface to use the glm implemented in\nnistats.regression.\n\nIt contains the GLM and contrast classes that are meant to be the main objects\nof fMRI data analyses.\n\nAuthor: Bertrand Thirion, Martin Perez-Guevara, 2016\n\n\"\"\"\nimport glob\nimport json\nimport os\nimport sys\nimport time\nfrom warnings import warn\n\nimport numpy as np\nimport pandas as pd\nfrom joblib import Memory, Parallel, delayed\nfrom nibabel import Nifti1Image\nfrom nibabel.onetime import auto_attr\nfrom sklearn.base import clone\n\nfrom nilearn._utils.glm import (_check_events_file_uses_tab_separators,\n _check_run_tables, get_bids_files,\n parse_bids_filename)\nfrom nilearn._utils.niimg_conversions import check_niimg\nfrom nilearn.glm.contrasts import (_compute_fixed_effect_contrast,\n expression_to_contrast_vector)\nfrom nilearn.glm.first_level.design_matrix import \\\n make_first_level_design_matrix\nfrom nilearn.image import get_data\nfrom nilearn.glm.regression import (ARModel, OLSModel, RegressionResults,\n SimpleRegressionResults)\nfrom nilearn.glm._base import BaseGLM\n\n\ndef mean_scaling(Y, axis=0):\n \"\"\"Scaling of the data to have percent of baseline change along the\n specified axis\n\n Parameters\n ----------\n Y : array of shape (n_time_points, n_voxels)\n The input data.\n\n axis : int, optional\n Axis along which the scaling mean should be calculated. Default=0.\n\n Returns\n -------\n Y : array of shape (n_time_points, n_voxels),\n The data after mean-scaling, de-meaning and multiplication by 100.\n\n mean : array of shape (n_voxels,)\n The data mean.\n\n \"\"\"\n mean = Y.mean(axis=axis)\n if (mean == 0).any():\n warn('Mean values of 0 observed.'\n 'The data have probably been centered.'\n 'Scaling might not work as expected')\n mean = np.maximum(mean, 1)\n Y = 100 * (Y / mean - 1)\n return Y, mean\n\n\ndef _ar_model_fit(X, val, Y):\n \"\"\"Wrapper for fit method of ARModel to allow joblib parallelization\"\"\"\n return ARModel(X, val).fit(Y)\n\n\ndef run_glm(Y, X, noise_model='ar1', bins=100, n_jobs=1, verbose=0):\n \"\"\" GLM fit for an fMRI data matrix\n\n Parameters\n ----------\n Y : array of shape (n_time_points, n_voxels)\n The fMRI data.\n\n X : array of shape (n_time_points, n_regressors)\n The design matrix.\n\n noise_model : {'ar1', 'ols'}, optional\n The temporal variance model. Default='ar1'.\n\n bins : int, optional\n Maximum number of discrete bins for the AR(1) coef histogram.\n Default=100.\n\n n_jobs : int, optional\n The number of CPUs to use to do the computation. -1 means\n 'all CPUs'. Default=1.\n\n verbose : int, optional\n The verbosity level. Defaut=0.\n\n Returns\n -------\n labels : array of shape (n_voxels,),\n A map of values on voxels used to identify the corresponding model.\n\n results : dict,\n Keys correspond to the different labels values\n values are RegressionResults instances corresponding to the voxels.\n\n \"\"\"\n acceptable_noise_models = ['ar1', 'ols']\n if noise_model not in acceptable_noise_models:\n raise ValueError(\n \"Acceptable noise models are {0}. You provided \"\n \"'noise_model={1}'\".format(acceptable_noise_models,\n noise_model)\n )\n if Y.shape[0] != X.shape[0]:\n raise ValueError('The number of rows of Y '\n 'should match the number of rows of X.'\n ' You provided X with shape {0} '\n 'and Y with shape {1}'.\n format(X.shape, Y.shape))\n\n # Create the model\n ols_result = OLSModel(X).fit(Y)\n\n if noise_model == 'ar1':\n # compute and discretize the AR1 coefs\n ar1 = (\n (ols_result.residuals[1:]\n * ols_result.residuals[:-1]).sum(axis=0)\n / (ols_result.residuals ** 2).sum(axis=0)\n )\n del ols_result\n ar1 = (ar1 * bins).astype(np.int) * 1. / bins\n # Fit the AR model acccording to current AR(1) estimates\n results = {}\n labels = ar1\n # Parallelize by creating a job per ARModel\n vals = np.unique(ar1)\n ar_result = Parallel(n_jobs=n_jobs, verbose=verbose)(\n delayed(_ar_model_fit)(X, val, Y[:, labels == val])\n for val in vals)\n for val, result in zip(vals, ar_result):\n results[val] = result\n del vals\n del ar_result\n\n else:\n labels = np.zeros(Y.shape[1])\n results = {0.0: ols_result}\n\n return labels, results\n\n\nclass FirstLevelModel(BaseGLM):\n \"\"\" Implementation of the General Linear Model\n for single session fMRI data.\n\n Parameters\n ----------\n t_r : float\n This parameter indicates repetition times of the experimental runs.\n In seconds. It is necessary to correctly consider times in the design\n matrix. This parameter is also passed to nilearn.signal.clean.\n Please see the related documentation for details.\n\n slice_time_ref : float, optional\n This parameter indicates the time of the reference slice used in the\n slice timing preprocessing step of the experimental runs. It is\n expressed as a percentage of the t_r (time repetition), so it can have\n values between 0. and 1. Default=0.\n\n hrf_model : {'glover', 'spm', 'spm + derivative', 'spm + derivative + dispersion',\n 'glover + derivative', 'glover + derivative + dispersion', 'fir', None}, optional\n String that specifies the hemodynamic response function.\n Default='glover'.\n\n drift_model : string, optional\n This parameter specifies the desired drift model for the design\n matrices. It can be 'polynomial', 'cosine' or None.\n Default='cosine'.\n\n high_pass : float, optional\n This parameter specifies the cut frequency of the high-pass filter in\n Hz for the design matrices. Used only if drift_model is 'cosine'.\n Default=0.01.\n\n drift_order : int, optional\n This parameter specifices the order of the drift model (in case it is\n polynomial) for the design matrices. Default=1.\n\n fir_delays : array of shape(n_onsets) or list, optional\n In case of FIR design, yields the array of delays used in the FIR\n model, in scans. Default=[0].\n\n min_onset : float, optional\n This parameter specifies the minimal onset relative to the design\n (in seconds). Events that start before (slice_time_ref * t_r +\n min_onset) are not considered. Default=-24.\n\n mask_img : Niimg-like, NiftiMasker object or False, optional\n Mask to be used on data. If an instance of masker is passed,\n then its mask will be used. If no mask is given,\n it will be computed automatically by a NiftiMasker with default\n parameters. If False is given then the data will not be masked.\n\n target_affine : 3x3 or 4x4 matrix, optional\n This parameter is passed to nilearn.image.resample_img.\n Please see the related documentation for details.\n\n target_shape : 3-tuple of integers, optional\n This parameter is passed to nilearn.image.resample_img.\n Please see the related documentation for details.\n\n smoothing_fwhm : float, optional\n If smoothing_fwhm is not None, it gives the size in millimeters of\n the spatial smoothing to apply to the signal.\n\n memory : string, optional\n Path to the directory used to cache the masking process and the glm\n fit. By default, no caching is done.\n Creates instance of joblib.Memory.\n\n memory_level : integer, optional\n Rough estimator of the amount of memory used by caching. Higher value\n means more memory for caching.\n\n standardize : boolean, optional\n If standardize is True, the time-series are centered and normed:\n their variance is put to 1 in the time dimension. Default=False.\n\n signal_scaling : False, int or (int, int), optional\n If not False, fMRI signals are\n scaled to the mean value of scaling_axis given,\n which can be 0, 1 or (0, 1).\n 0 refers to mean scaling each voxel with respect to time,\n 1 refers to mean scaling each time point with respect to all voxels &\n (0, 1) refers to scaling with respect to voxels and time,\n which is known as grand mean scaling.\n Incompatible with standardize (standardize=False is enforced when\n signal_scaling is not False).\n Default=0.\n\n noise_model : {'ar1', 'ols'}, optional\n The temporal variance model. Default='ar1'.\n\n verbose : integer, optional\n Indicate the level of verbosity. By default, nothing is printed.\n If 0 prints nothing. If 1 prints progress by computation of\n each run. If 2 prints timing details of masker and GLM. If 3\n prints masker computation details. Default=0.\n\n n_jobs : integer, optional\n The number of CPUs to use to do the computation. -1 means\n 'all CPUs', -2 'all CPUs but one', and so on.\n Default=1.\n\n minimize_memory : boolean, optional\n Gets rid of some variables on the model fit results that are not\n necessary for contrast computation and would only be useful for\n further inspection of model details. This has an important impact\n on memory consumption. Default=True.\n\n subject_label : string, optional\n This id will be used to identify a `FirstLevelModel` when passed to\n a `SecondLevelModel` object.\n\n Attributes\n ----------\n labels_ : array of shape (n_voxels,),\n a map of values on voxels used to identify the corresponding model\n\n results_ : dict,\n with keys corresponding to the different labels values.\n Values are SimpleRegressionResults corresponding to the voxels,\n if minimize_memory is True,\n RegressionResults if minimize_memory is False\n\n Notes\n -----\n This class is experimental.\n It may change in any future release of Nilearn.\n\n \"\"\"\n def __init__(self, t_r=None, slice_time_ref=0., hrf_model='glover',\n drift_model='cosine', high_pass=.01, drift_order=1,\n fir_delays=[0], min_onset=-24, mask_img=None,\n target_affine=None, target_shape=None, smoothing_fwhm=None,\n memory=Memory(None), memory_level=1, standardize=False,\n signal_scaling=0, noise_model='ar1', verbose=0, n_jobs=1,\n minimize_memory=True, subject_label=None):\n # design matrix parameters\n self.t_r = t_r\n self.slice_time_ref = slice_time_ref\n self.hrf_model = hrf_model\n self.drift_model = drift_model\n self.high_pass = high_pass\n self.drift_order = drift_order\n self.fir_delays = fir_delays\n self.min_onset = min_onset\n # glm parameters\n self.mask_img = mask_img\n self.target_affine = target_affine\n self.target_shape = target_shape\n self.smoothing_fwhm = smoothing_fwhm\n if isinstance(memory, str):\n self.memory = Memory(memory)\n else:\n self.memory = memory\n self.memory_level = memory_level\n self.standardize = standardize\n if signal_scaling is False:\n self.signal_scaling = signal_scaling\n elif signal_scaling in [0, 1, (0, 1)]:\n self.scaling_axis = signal_scaling\n self.signal_scaling = True\n self.standardize = False\n else:\n raise ValueError('signal_scaling must be \"False\", \"0\", \"1\"'\n ' or \"(0, 1)\"')\n\n self.noise_model = noise_model\n self.verbose = verbose\n self.n_jobs = n_jobs\n self.minimize_memory = minimize_memory\n # attributes\n self.labels_ = None\n self.results_ = None\n self.subject_label = subject_label\n\n def fit(self, run_imgs, events=None, confounds=None,\n design_matrices=None):\n \"\"\"Fit the GLM\n\n For each run:\n 1. create design matrix X\n 2. do a masker job: fMRI_data -> Y\n 3. fit regression to (Y, X)\n\n Parameters\n ----------\n run_imgs : Niimg-like object or list of Niimg-like objects,\n Data on which the GLM will be fitted. If this is a list,\n the affine is considered the same for all.\n\n events : pandas Dataframe or string or list of pandas DataFrames or strings, optional\n fMRI events used to build design matrices. One events object\n expected per run_img. Ignored in case designs is not None.\n If string, then a path to a csv file is expected.\n\n confounds : pandas Dataframe, numpy array or string or\n list of pandas DataFrames, numpy arays or strings, optional\n Each column in a DataFrame corresponds to a confound variable\n to be included in the regression model of the respective run_img.\n The number of rows must match the number of volumes in the\n respective run_img. Ignored in case designs is not None.\n If string, then a path to a csv file is expected.\n\n design_matrices : pandas DataFrame or list of pandas DataFrames, optional\n Design matrices that will be used to fit the GLM. If given it\n takes precedence over events and confounds.\n\n \"\"\"\n # Initialize masker_ to None such that attribute exists\n self.masker_ = None\n\n # Raise a warning if both design_matrices and confounds are provided\n if design_matrices is not None and (confounds is not None or events is not None):\n warn('If design matrices are supplied, confounds and events will be ignored.')\n # Local import to prevent circular imports\n from nilearn.input_data import NiftiMasker # noqa\n\n # Check arguments\n # Check imgs type\n if events is not None:\n _check_events_file_uses_tab_separators(events_files=events)\n if not isinstance(run_imgs, (list, tuple)):\n run_imgs = [run_imgs]\n if design_matrices is None:\n if events is None:\n raise ValueError('events or design matrices must be provided')\n if self.t_r is None:\n raise ValueError('t_r not given to FirstLevelModel object'\n ' to compute design from events')\n else:\n design_matrices = _check_run_tables(run_imgs, design_matrices,\n 'design_matrices')\n # Check that number of events and confound files match number of runs\n # Also check that events and confound files can be loaded as DataFrame\n if events is not None:\n events = _check_run_tables(run_imgs, events, 'events')\n if confounds is not None:\n confounds = _check_run_tables(run_imgs, confounds, 'confounds')\n\n # Learn the mask\n if self.mask_img is False:\n # We create a dummy mask to preserve functionality of api\n ref_img = check_niimg(run_imgs[0])\n self.mask_img = Nifti1Image(np.ones(ref_img.shape[:3]),\n ref_img.affine)\n if not isinstance(self.mask_img, NiftiMasker):\n self.masker_ = NiftiMasker(mask_img=self.mask_img,\n smoothing_fwhm=self.smoothing_fwhm,\n target_affine=self.target_affine,\n standardize=self.standardize,\n mask_strategy='epi',\n t_r=self.t_r,\n memory=self.memory,\n verbose=max(0, self.verbose - 2),\n target_shape=self.target_shape,\n memory_level=self.memory_level\n )\n self.masker_.fit(run_imgs[0])\n else:\n # Make sure masker has been fitted otherwise no attribute mask_img_\n self.mask_img._check_fitted()\n if self.mask_img.mask_img_ is None and self.masker_ is None:\n self.masker_ = clone(self.mask_img)\n for param_name in ['target_affine', 'target_shape',\n 'smoothing_fwhm', 't_r', 'memory',\n 'memory_level']:\n our_param = getattr(self, param_name)\n if our_param is None:\n continue\n if getattr(self.masker_, param_name) is not None:\n warn('Parameter %s of the masker'\n ' overriden' % param_name)\n setattr(self.masker_, param_name, our_param)\n self.masker_.fit(run_imgs[0])\n else:\n self.masker_ = self.mask_img\n\n # For each run fit the model and keep only the regression results.\n self.labels_, self.results_, self.design_matrices_ = [], [], []\n n_runs = len(run_imgs)\n t0 = time.time()\n for run_idx, run_img in enumerate(run_imgs):\n # Report progress\n if self.verbose > 0:\n percent = float(run_idx) / n_runs\n percent = round(percent * 100, 2)\n dt = time.time() - t0\n # We use a max to avoid a division by zero\n if run_idx == 0:\n remaining = 'go take a coffee, a big one'\n else:\n remaining = (100. - percent) / max(0.01, percent) * dt\n remaining = '%i seconds remaining' % remaining\n\n sys.stderr.write(\n \"Computing run %d out of %d runs (%s)\\n\"\n % (run_idx + 1, n_runs, remaining))\n\n # Build the experimental design for the glm\n run_img = check_niimg(run_img, ensure_ndim=4)\n if design_matrices is None:\n n_scans = get_data(run_img).shape[3]\n if confounds is not None:\n confounds_matrix = confounds[run_idx].values\n if confounds_matrix.shape[0] != n_scans:\n raise ValueError('Rows in confounds does not match'\n 'n_scans in run_img at index %d'\n % (run_idx,))\n confounds_names = confounds[run_idx].columns.tolist()\n else:\n confounds_matrix = None\n confounds_names = None\n start_time = self.slice_time_ref * self.t_r\n end_time = (n_scans - 1 + self.slice_time_ref) * self.t_r\n frame_times = np.linspace(start_time, end_time, n_scans)\n design = make_first_level_design_matrix(frame_times,\n events[run_idx],\n self.hrf_model,\n self.drift_model,\n self.high_pass,\n self.drift_order,\n self.fir_delays,\n confounds_matrix,\n confounds_names,\n self.min_onset\n )\n else:\n design = design_matrices[run_idx]\n self.design_matrices_.append(design)\n\n # Mask and prepare data for GLM\n if self.verbose > 1:\n t_masking = time.time()\n sys.stderr.write('Starting masker computation \\r')\n\n Y = self.masker_.transform(run_img)\n del run_img # Delete unmasked image to save memory\n\n if self.verbose > 1:\n t_masking = time.time() - t_masking\n sys.stderr.write('Masker took %d seconds \\n'\n % t_masking)\n\n if self.signal_scaling:\n Y, _ = mean_scaling(Y, self.scaling_axis)\n if self.memory:\n mem_glm = self.memory.cache(run_glm, ignore=['n_jobs'])\n else:\n mem_glm = run_glm\n\n # compute GLM\n if self.verbose > 1:\n t_glm = time.time()\n sys.stderr.write('Performing GLM computation\\r')\n labels, results = mem_glm(Y, design.values,\n noise_model=self.noise_model,\n bins=100, n_jobs=self.n_jobs)\n if self.verbose > 1:\n t_glm = time.time() - t_glm\n sys.stderr.write('GLM took %d seconds \\n' % t_glm)\n\n self.labels_.append(labels)\n # We save memory if inspecting model details is not necessary\n if self.minimize_memory:\n for key in results:\n results[key] = SimpleRegressionResults(results[key])\n self.results_.append(results)\n del Y\n\n # Report progress\n if self.verbose > 0:\n sys.stderr.write(\"\\nComputation of %d runs done in %i seconds\\n\\n\"\n % (n_runs, time.time() - t0))\n return self\n\n def compute_contrast(self, contrast_def, stat_type=None,\n output_type='z_score'):\n \"\"\"Generate different outputs corresponding to\n the contrasts provided e.g. z_map, t_map, effects and variance.\n In multi-session case, outputs the fixed effects map.\n\n Parameters\n ----------\n contrast_def : str or array of shape (n_col) or list of (string or\n array of shape (n_col))\n\n where ``n_col`` is the number of columns of the design matrix,\n (one array per run). If only one array is provided when there\n are several runs, it will be assumed that the same contrast is\n desired for all runs. The string can be a formula compatible with\n `pandas.DataFrame.eval`. Basically one can use the name of the\n conditions as they appear in the design matrix of the fitted model\n combined with operators +- and combined with numbers\n with operators +-`*`/.\n\n stat_type : {'t', 'F'}, optional\n type of the contrast\n\n output_type : str, optional\n Type of the output map. Can be 'z_score', 'stat', 'p_value',\n 'effect_size', 'effect_variance' or 'all'.\n Default='z-score'.\n\n Returns\n -------\n output : Nifti1Image or dict\n The desired output image(s). If ``output_type == 'all'``, then\n the output is a dictionary of images, keyed by the type of image.\n\n \"\"\"\n if self.labels_ is None or self.results_ is None:\n raise ValueError('The model has not been fit yet')\n\n if isinstance(contrast_def, (np.ndarray, str)):\n con_vals = [contrast_def]\n elif isinstance(contrast_def, (list, tuple)):\n con_vals = contrast_def\n else:\n raise ValueError('contrast_def must be an array or str or list of'\n ' (array or str)')\n\n n_runs = len(self.labels_)\n n_contrasts = len(con_vals)\n if n_contrasts == 1 and n_runs > 1:\n warn('One contrast given, assuming it for all %d runs' % n_runs)\n con_vals = con_vals * n_runs\n elif n_contrasts != n_runs:\n raise ValueError('%n contrasts given, while there are %n runs' %\n (n_contrasts, n_runs))\n\n # Translate formulas to vectors\n for cidx, (con, design_mat) in enumerate(zip(con_vals,\n self.design_matrices_)\n ):\n design_columns = design_mat.columns.tolist()\n if isinstance(con, str):\n con_vals[cidx] = expression_to_contrast_vector(\n con, design_columns)\n\n valid_types = ['z_score', 'stat', 'p_value', 'effect_size',\n 'effect_variance']\n valid_types.append('all') # ensuring 'all' is the final entry.\n if output_type not in valid_types:\n raise ValueError(\n 'output_type must be one of {}'.format(valid_types))\n contrast = _compute_fixed_effect_contrast(self.labels_, self.results_,\n con_vals, stat_type)\n output_types = (valid_types[:-1]\n if output_type == 'all' else [output_type])\n outputs = {}\n for output_type_ in output_types:\n estimate_ = getattr(contrast, output_type_)()\n # Prepare the returned images\n output = self.masker_.inverse_transform(estimate_)\n contrast_name = str(con_vals)\n output.header['descrip'] = (\n '%s of contrast %s' % (output_type_, contrast_name))\n outputs[output_type_] = output\n\n return outputs if output_type == 'all' else output\n\n def _get_voxelwise_model_attribute(self, attribute,\n result_as_time_series):\n \"\"\"Transform RegressionResults instances within a dictionary\n (whose keys represent the autoregressive coefficient under the 'ar1'\n noise model or only 0.0 under 'ols' noise_model and values are the\n RegressionResults instances) into input nifti space.\n\n Parameters\n ----------\n attribute : str\n an attribute of a RegressionResults instance.\n possible values include: resid, norm_resid, predicted,\n SSE, r_square, MSE.\n\n result_as_time_series : bool\n whether the RegressionResult attribute has a value\n per timepoint of the input nifti image.\n\n Returns\n -------\n output : list\n A list of Nifti1Image(s).\n\n \"\"\"\n # check if valid attribute is being accessed.\n all_attributes = dict(vars(RegressionResults)).keys()\n possible_attributes = [prop\n for prop in all_attributes\n if '__' not in prop\n ]\n if attribute not in possible_attributes:\n msg = (\"attribute must be one of: \"\n \"{attr}\".format(attr=possible_attributes)\n )\n raise ValueError(msg)\n\n if self.minimize_memory:\n raise ValueError(\n 'To access voxelwise attributes like '\n 'R-squared, residuals, and predictions, '\n 'the `FirstLevelModel`-object needs to store '\n 'there attributes. '\n 'To do so, set `minimize_memory` to `False` '\n 'when initializing the `FirstLevelModel`-object.')\n\n if self.labels_ is None or self.results_ is None:\n raise ValueError('The model has not been fit yet')\n\n output = []\n\n for design_matrix, labels, results in zip(self.design_matrices_,\n self.labels_,\n self.results_\n ):\n if result_as_time_series:\n voxelwise_attribute = np.zeros((design_matrix.shape[0],\n len(labels))\n )\n else:\n voxelwise_attribute = np.zeros((1, len(labels)))\n\n for label_ in results:\n label_mask = labels == label_\n voxelwise_attribute[:, label_mask] = getattr(results[label_],\n attribute)\n\n output.append(self.masker_.inverse_transform(voxelwise_attribute))\n\n return output\n\n @auto_attr\n def residuals(self):\n \"\"\"Transform voxelwise residuals to the same shape\n as the input Nifti1Image(s)\n\n Returns\n -------\n output : list\n A list of Nifti1Image(s).\n\n \"\"\"\n return self._get_voxelwise_model_attribute('resid',\n result_as_time_series=True)\n\n @auto_attr\n def predicted(self):\n \"\"\"Transform voxelwise predicted values to the same shape\n as the input Nifti1Image(s)\n\n Returns\n -------\n output : list\n A list of Nifti1Image(s).\n\n \"\"\"\n return self._get_voxelwise_model_attribute('predicted',\n result_as_time_series=True)\n\n @auto_attr\n def r_square(self):\n \"\"\"Transform voxelwise r-squared values to the same shape\n as the input Nifti1Image(s)\n\n Returns\n -------\n output : list\n A list of Nifti1Image(s).\n\n \"\"\"\n return self._get_voxelwise_model_attribute('r_square',\n result_as_time_series=False\n )\n\n\ndef first_level_from_bids(dataset_path, task_label, space_label=None,\n img_filters=None, t_r=None, slice_time_ref=0.,\n hrf_model='glover', drift_model='cosine',\n high_pass=.01, drift_order=1, fir_delays=[0],\n min_onset=-24, mask_img=None,\n target_affine=None, target_shape=None,\n smoothing_fwhm=None, memory=Memory(None),\n memory_level=1, standardize=False,\n signal_scaling=0, noise_model='ar1',\n verbose=0, n_jobs=1,\n minimize_memory=True,\n derivatives_folder='derivatives'):\n \"\"\"Create FirstLevelModel objects and fit arguments from a BIDS dataset.\n\n It t_r is not specified this function will attempt to load it from a\n bold.json file alongside slice_time_ref. Otherwise t_r and slice_time_ref\n are taken as given.\n\n Parameters\n ----------\n dataset_path : str\n Directory of the highest level folder of the BIDS dataset. Should\n contain subject folders and a derivatives folder.\n\n task_label : str\n Task_label as specified in the file names like _task-<task_label>_.\n\n space_label : str, optional\n Specifies the space label of the preprocessed bold.nii images.\n As they are specified in the file names like _space-<space_label>_.\n\n img_filters : list of tuples (str, str), optional\n Filters are of the form (field, label). Only one filter per field\n allowed. A file that does not match a filter will be discarded.\n Possible filters are 'acq', 'ce', 'dir', 'rec', 'run', 'echo', 'res',\n 'den', and 'desc'. Filter examples would be ('desc', 'preproc'),\n ('dir', 'pa') and ('run', '10').\n\n derivatives_folder : str, optional\n derivatives and app folder path containing preprocessed files.\n Like \"derivatives/FMRIPREP\". Default=\"derivatives\".\n\n All other parameters correspond to a `FirstLevelModel` object, which\n contains their documentation. The subject label of the model will be\n determined directly from the BIDS dataset.\n\n Returns\n -------\n models : list of `FirstLevelModel` objects\n Each FirstLevelModel object corresponds to a subject. All runs from\n different sessions are considered together for the same subject to run\n a fixed effects analysis on them.\n\n models_run_imgs : list of list of Niimg-like objects,\n Items for the FirstLevelModel fit function of their respective model.\n\n models_events : list of list of pandas DataFrames,\n Items for the FirstLevelModel fit function of their respective model.\n\n models_confounds : list of list of pandas DataFrames or None,\n Items for the FirstLevelModel fit function of their respective model.\n\n \"\"\"\n # check arguments\n img_filters = img_filters if img_filters else []\n if not isinstance(dataset_path, str):\n raise TypeError(\n 'dataset_path must be a string, instead %s was given' %\n type(task_label))\n if not os.path.exists(dataset_path):\n raise ValueError('given path do not exist: %s' % dataset_path)\n if not isinstance(task_label, str):\n raise TypeError('task_label must be a string, instead %s was given' %\n type(task_label))\n if space_label is not None and not isinstance(space_label, str):\n raise TypeError('space_label must be a string, instead %s was given' %\n type(space_label))\n if not isinstance(img_filters, list):\n raise TypeError('img_filters must be a list, instead %s was given' %\n type(img_filters))\n for img_filter in img_filters:\n if (not isinstance(img_filter[0], str)\n or not isinstance(img_filter[1], str)):\n raise TypeError('filters in img filters must be (str, str), '\n 'instead %s was given' % type(img_filter))\n if img_filter[0] not in ['acq', 'ce', 'dir', 'rec', 'run',\n 'echo', 'desc', 'res', 'den',\n ]:\n raise ValueError(\n \"field %s is not a possible filter. Only \"\n \"'acq', 'ce', 'dir', 'rec', 'run', 'echo', \"\n \"'desc', 'res', 'den' are allowed.\" % img_filter[0])\n\n # check derivatives folder is present\n derivatives_path = os.path.join(dataset_path, derivatives_folder)\n if not os.path.exists(derivatives_path):\n raise ValueError('derivatives folder does not exist in given dataset')\n\n # Get acq specs for models. RepetitionTime and SliceTimingReference.\n # Throw warning if no bold.json is found\n if t_r is not None:\n warn('RepetitionTime given in model_init as %d' % t_r)\n warn('slice_time_ref is %d percent of the repetition '\n 'time' % slice_time_ref)\n else:\n filters = [('task', task_label)]\n for img_filter in img_filters:\n if img_filter[0] in ['acq', 'rec', 'run']:\n filters.append(img_filter)\n\n img_specs = get_bids_files(derivatives_path, modality_folder='func',\n file_tag='bold', file_type='json',\n filters=filters)\n # If we dont find the parameter information in the derivatives folder\n # we try to search in the raw data folder\n if not img_specs:\n img_specs = get_bids_files(dataset_path, modality_folder='func',\n file_tag='bold', file_type='json',\n filters=filters)\n if not img_specs:\n warn('No bold.json found in derivatives folder or '\n 'in dataset folder. t_r can not be inferred and will need to'\n ' be set manually in the list of models, otherwise their fit'\n ' will throw an exception')\n else:\n specs = json.load(open(img_specs[0], 'r'))\n if 'RepetitionTime' in specs:\n t_r = float(specs['RepetitionTime'])\n else:\n warn('RepetitionTime not found in file %s. t_r can not be '\n 'inferred and will need to be set manually in the '\n 'list of models. Otherwise their fit will throw an '\n ' exception' % img_specs[0])\n if 'SliceTimingRef' in specs:\n slice_time_ref = float(specs['SliceTimingRef'])\n else:\n warn('SliceTimingRef not found in file %s. It will be assumed'\n ' that the slice timing reference is 0.0 percent of the '\n 'repetition time. If it is not the case it will need to '\n 'be set manually in the generated list of models' %\n img_specs[0])\n\n # Infer subjects in dataset\n sub_folders = glob.glob(os.path.join(derivatives_path, 'sub-*/'))\n sub_labels = [os.path.basename(s[:-1]).split('-')[1] for s in sub_folders]\n sub_labels = sorted(list(set(sub_labels)))\n\n # Build fit_kwargs dictionaries to pass to their respective models fit\n # Events and confounds files must match number of imgs (runs)\n models = []\n models_run_imgs = []\n models_events = []\n models_confounds = []\n for sub_label in sub_labels:\n # Create model\n model = FirstLevelModel(\n t_r=t_r, slice_time_ref=slice_time_ref, hrf_model=hrf_model,\n drift_model=drift_model, high_pass=high_pass,\n drift_order=drift_order, fir_delays=fir_delays,\n min_onset=min_onset, mask_img=mask_img,\n target_affine=target_affine, target_shape=target_shape,\n smoothing_fwhm=smoothing_fwhm, memory=memory,\n memory_level=memory_level, standardize=standardize,\n signal_scaling=signal_scaling, noise_model=noise_model,\n verbose=verbose, n_jobs=n_jobs,\n minimize_memory=minimize_memory, subject_label=sub_label)\n models.append(model)\n\n # Get preprocessed imgs\n if space_label is None:\n filters = [('task', task_label)] + img_filters\n else:\n filters = [('task', task_label),\n ('space', space_label)] + img_filters\n imgs = get_bids_files(derivatives_path, modality_folder='func',\n file_tag='bold', file_type='nii*',\n sub_label=sub_label, filters=filters)\n # If there is more than one file for the same (ses, run), likely we\n # have an issue of underspecification of filters.\n run_check_list = []\n # If more than one run is present the run field is mandatory in BIDS\n # as well as the ses field if more than one session is present.\n if len(imgs) > 1:\n for img in imgs:\n img_dict = parse_bids_filename(img)\n if (\n '_ses-' in img_dict['file_basename']\n and '_run-' in img_dict['file_basename']\n ):\n if (img_dict['ses'], img_dict['run']) in run_check_list:\n raise ValueError(\n 'More than one nifti image found '\n 'for the same run %s and session %s. '\n 'Please verify that the '\n 'desc_label and space_label labels '\n 'corresponding to the BIDS spec '\n 'were correctly specified.' %\n (img_dict['run'], img_dict['ses']))\n else:\n run_check_list.append((img_dict['ses'],\n img_dict['run']))\n\n elif '_ses-' in img_dict['file_basename']:\n if img_dict['ses'] in run_check_list:\n raise ValueError(\n 'More than one nifti image '\n 'found for the same ses %s, while '\n 'no additional run specification present'\n '. Please verify that the desc_label and '\n 'space_label labels '\n 'corresponding to the BIDS spec '\n 'were correctly specified.' %\n img_dict['ses'])\n else:\n run_check_list.append(img_dict['ses'])\n\n elif '_run-' in img_dict['file_basename']:\n if img_dict['run'] in run_check_list:\n raise ValueError(\n 'More than one nifti image '\n 'found for the same run %s. '\n 'Please verify that the desc_label and '\n 'space_label labels '\n 'corresponding to the BIDS spec '\n 'were correctly specified.' %\n img_dict['run'])\n else:\n run_check_list.append(img_dict['run'])\n models_run_imgs.append(imgs)\n\n # Get events and extra confounds\n filters = [('task', task_label)]\n for img_filter in img_filters:\n if img_filter[0] in ['acq', 'rec', 'run']:\n filters.append(img_filter)\n\n # Get events files\n events = get_bids_files(dataset_path, modality_folder='func',\n file_tag='events', file_type='tsv',\n sub_label=sub_label, filters=filters)\n if events:\n if len(events) != len(imgs):\n raise ValueError('%d events.tsv files found for %d bold '\n 'files. Same number of event files as '\n 'the number of runs is expected' %\n (len(events), len(imgs)))\n events = [pd.read_csv(event, sep='\\t', index_col=None)\n for event in events]\n models_events.append(events)\n else:\n raise ValueError('No events.tsv files found')\n\n # Get confounds. If not found it will be assumed there are none.\n # If there are confounds, they are assumed to be present for all runs.\n confounds = get_bids_files(derivatives_path, modality_folder='func',\n file_tag='desc-confounds*',\n file_type='tsv', sub_label=sub_label,\n filters=filters)\n\n if confounds:\n if len(confounds) != len(imgs):\n raise ValueError('%d confounds.tsv files found for %d bold '\n 'files. Same number of confound files as '\n 'the number of runs is expected' %\n (len(events), len(imgs)))\n confounds = [pd.read_csv(c, sep='\\t', index_col=None)\n for c in confounds]\n models_confounds.append(confounds)\n\n return models, models_run_imgs, models_events, models_confounds\n",
"\"\"\"\nPlotting code for nilearn\n\"\"\"\n# Original Authors: Chris Filo Gorgolewski, Gael Varoquaux\nimport os\nimport sys\nimport importlib\n\n\n###############################################################################\n# Make sure that we don't get DISPLAY problems when running without X on\n# unices\ndef _set_mpl_backend():\n # We are doing local imports here to avoid polluting our namespace\n try:\n import matplotlib\n except ImportError:\n if importlib.util.find_spec(\"pytest\") is not None:\n from .._utils.testing import skip_if_running_tests\n # No need to fail when running tests\n skip_if_running_tests('matplotlib not installed')\n raise\n else:\n from ..version import (_import_module_with_version_check,\n OPTIONAL_MATPLOTLIB_MIN_VERSION)\n # When matplotlib was successfully imported we need to check\n # that the version is greater that the minimum required one\n _import_module_with_version_check('matplotlib',\n OPTIONAL_MATPLOTLIB_MIN_VERSION)\n current_backend = matplotlib.get_backend().lower()\n\n if 'inline' in current_backend or 'nbagg' in current_backend:\n return\n # Set the backend to a non-interactive one for unices without X\n # (see gh-2560)\n if (sys.platform not in ('darwin', 'win32') and\n 'DISPLAY' not in os.environ):\n matplotlib.use('Agg')\n\n\n_set_mpl_backend()\n\n###############################################################################\nfrom . import cm\nfrom .img_plotting import (\n plot_img, plot_anat, plot_epi, plot_roi, plot_stat_map,\n plot_glass_brain, plot_connectome, plot_connectome_strength,\n plot_markers, plot_prob_atlas, plot_carpet, plot_img_comparison, show)\nfrom .find_cuts import find_xyz_cut_coords, find_cut_slices, \\\n find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\nfrom .matrix_plotting import (plot_matrix, plot_contrast_matrix,\n plot_design_matrix, plot_event)\nfrom .html_surface import view_surf, view_img_on_surf\nfrom .html_stat_map import view_img\nfrom .html_connectome import view_connectome, view_markers\nfrom .surf_plotting import (plot_surf, plot_surf_stat_map, plot_surf_roi,\n plot_img_on_surf, plot_surf_contours)\n\n__all__ = ['cm', 'plot_img', 'plot_anat', 'plot_epi',\n 'plot_roi', 'plot_stat_map', 'plot_glass_brain',\n 'plot_markers', 'plot_connectome', 'plot_prob_atlas',\n 'find_xyz_cut_coords', 'find_cut_slices',\n 'plot_img_comparison',\n 'show', 'plot_matrix',\n 'plot_design_matrix', 'plot_contrast_matrix', 'plot_event',\n 'view_surf', 'view_img_on_surf',\n 'view_img', 'view_connectome', 'view_markers',\n 'find_parcellation_cut_coords',\n 'find_probabilistic_atlas_cut_coords',\n 'plot_surf', 'plot_surf_stat_map', 'plot_surf_roi',\n 'plot_img_on_surf', 'plot_connectome_strength', 'plot_carpet',\n 'plot_surf_contours']\n"
] | [
[
"numpy.rollaxis",
"numpy.linspace",
"numpy.asarray",
"numpy.squeeze",
"numpy.vstack",
"numpy.round",
"numpy.concatenate",
"numpy.nanmean",
"numpy.ravel_multi_index",
"numpy.cross",
"numpy.arange",
"numpy.eye",
"scipy.interpolate.RegularGridInterpolator",
"numpy.linalg.inv",
"numpy.floor",
"numpy.logical_and",
"numpy.random.RandomState",
"numpy.ones",
"numpy.prod",
"numpy.loadtxt"
],
[
"pandas.read_csv",
"numpy.maximum",
"numpy.linspace",
"numpy.unique",
"numpy.ones",
"sklearn.base.clone",
"numpy.zeros"
],
[
"matplotlib.get_backend",
"matplotlib.use"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yu-frank/Few-shot-Scene-adaptive-Anomaly-Detection | [
"702dfbdeb6abf235397de45aaa97d3194d0547f3"
] | [
"train.py"
] | [
"from __future__ import print_function\r\nimport matplotlib.pyplot as plt\r\nimport argparse\r\nimport torch\r\nimport torch.utils.data\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nfrom torch.autograd import Variable\r\nfrom torch.utils.data import Dataset,DataLoader\r\nfrom torchvision import datasets, transforms, models\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport ast\r\nfrom torch.nn import functional as F\r\nimport os\r\nimport random\r\nimport torch.utils.data\r\nimport torchvision.utils as vutils\r\nimport torch.backends.cudnn as cudn\r\nfrom torch.nn import functional as F\r\nfrom unet_parts import *\r\nfrom scipy.misc import imsave\r\nfrom torch.nn import BCELoss as adversarial_loss\r\nimport ast\r\n\r\nfrom rGAN import Generator, Discriminator\r\nfrom dataset import TrainingDataset\r\nfrom utils import createEpochData, roll_axis, loss_function, create_folder, prep_data\r\n\r\ndef Load_Dataloader(train_path_list, tf, batch_size):\r\n train_data = TrainingDataset(train_path_list, tf)\r\n train_dataloader = DataLoader(train_data,batch_size=batch_size)\r\n return train_dataloader\r\n\r\ndef overall_generator_pass(generator, discriminator, img, gt, valid):\r\n recon_batch = generator(img)\r\n msssim, f1, psnr = loss_function(recon_batch, gt)\r\n\r\n imgs = recon_batch.data.cpu().numpy()[0, :]\r\n imgs = roll_axis(imgs)\r\n loss= msssim+f1\r\n G_loss = adversarial_loss(discriminator(recon_batch),valid)\r\n g_loss = adversarial_loss(discriminator(recon_batch),valid) + loss\r\n return imgs, g_loss, recon_batch, loss, msssim\r\n\r\ndef overall_discriminator_pass(discriminator, recon_batch, gt, valid, fake):\r\n real_loss = adversarial_loss(discriminator(gt), valid)\r\n fake_loss = adversarial_loss(discriminator(recon_batch.detach()), fake)\r\n d_loss = (real_loss + fake_loss) / 2\r\n return d_loss\r\n\r\ndef meta_update_model(model, optimizer, loss, gradients):\r\n # Register a hook on each parameter in the net that replaces the current dummy grad\r\n # with our grads accumulated across the meta-batch\r\n # GENERATOR\r\n hooks = []\r\n for (k,v) in model.named_parameters():\r\n def get_closure():\r\n key = k\r\n def replace_grad(grad):\r\n return gradients[key]\r\n return replace_grad\r\n hooks.append(v.register_hook(get_closure()))\r\n\r\n # Compute grads for current step, replace with summed gradients as defined by hook\r\n optimizer.zero_grad()\r\n loss.backward()\r\n\r\n # Update the net parameters with the accumulated gradient according to optimizer\r\n optimizer.step()\r\n\r\n # Remove the hooks before next training phase\r\n for h in hooks:\r\n h.remove()\r\n\r\n\"\"\"MAIN TRAINING SCRIPT\"\"\"\r\ndef main(k_shots, num_tasks, adam_betas, gen_lr, dis_lr, total_epochs, model_folder_path, frame_path):\r\n torch.manual_seed(1)\r\n # Initialize generator and discriminator\r\n batch_size = 1\r\n generator = Generator(batch_size=batch_size) \r\n discriminator = Discriminator()\r\n generator.cuda()\r\n discriminator.cuda()\r\n\r\n # Training the Model\r\n\r\n # optimizer\r\n optimizer_G = optim.Adam (generator.parameters(), lr= gen_lr, betas=adam_betas)\r\n optimizer_D = optim.Adam (discriminator.parameters(), lr= dis_lr, betas=adam_betas) \r\n\r\n # define dataloader\r\n tf = transforms.Compose([transforms.Resize((256,256)),transforms.ToTensor()])\r\n\r\n create_folder(model_folder_path)\r\n \r\n generator_path = os.path.join(model_folder_path, str.format(\"Generator_previous.pt\"))\r\n discriminator_path = os.path.join(model_folder_path, str.format(\"Discriminator_previous.pt\"))\r\n\r\n torch.save(generator.state_dict(), generator_path)\r\n torch.save(discriminator.state_dict(), discriminator_path)\r\n previous_generator = generator_path\r\n previous_discriminator = discriminator_path\r\n\r\n\r\n # Set Up Training Loop\r\n for epoch in range(total_epochs):\r\n train_path_list = createEpochData(frame_path, num_tasks, k_shots)\r\n train_dataloader = Load_Dataloader(train_path_list, tf, batch_size)\r\n for _, epoch_of_tasks in enumerate(train_dataloader):\r\n \r\n # Create folder for saving results\r\n epoch_results = 'results'.format(epoch+1)\r\n create_folder(epoch_results)\r\n\r\n gen_epoch_grads = []\r\n dis_epoch_grads = []\r\n\r\n print(\"Epoch: \", epoch+1)\r\n\r\n # Meta-Training\r\n for tidx, task in enumerate(epoch_of_tasks):\r\n # Copy rGAN\r\n print ('\\n Meta Training \\n')\r\n print(\"Memory Allocated: \",torch.cuda.memory_allocated()/1e9)\r\n generator.load_state_dict(torch.load(previous_generator))\r\n discriminator.load_state_dict(torch.load(previous_discriminator))\r\n inner_optimizer_G = optim.Adam(generator.parameters(), lr=1e-4)\r\n inner_optimizer_D = optim.Adam(discriminator.parameters(), lr=1e-4)\r\n print(\"Task: \", tidx)\r\n for kidx, frame_sequence in enumerate(task[:k_shots]):\r\n print('k-Shot Training: ', kidx)\r\n # Configure input\r\n img = frame_sequence[0]\r\n gt = frame_sequence[1]\r\n img, gt, valid, fake = prep_data(img, gt)\r\n\r\n # Train Generator\r\n inner_optimizer_G.zero_grad()\r\n imgs, g_loss, recon_batch, loss, msssim = overall_generator_pass(generator, discriminator, img, gt, valid)\r\n img_path = os.path.join(epoch_results,'{}-fig-train{}.png'.format(tidx+1, kidx+1))\r\n imsave(img_path , imgs)\r\n g_loss.backward()\r\n inner_optimizer_G.step()\r\n\r\n # Train Discriminator\r\n inner_optimizer_D.zero_grad()\r\n # Measure discriminator's ability to classify real from generated samples\r\n d_loss = overall_discriminator_pass(discriminator, recon_batch, gt, valid, fake)\r\n d_loss.backward()\r\n inner_optimizer_D.step()\r\n print ('Epoch [{}/{}], Step [{}/{}], Reconstruction_Loss: {:.4f}, G_Loss: {:.4f}, D_loss: {:.4f}, msssim:{:.4f} '.format(epoch+1, total_epochs, tidx+1, 5, loss.item(), g_loss, d_loss, msssim))\r\n \r\n # Meta-Validation\r\n print ('\\n Meta Validation \\n')\r\n # Store Loss Values\r\n gen_validation_loss_store = 0.0\r\n dis_validation_loss_store = 0.0\r\n gen_validation_loss = 0.0\r\n dis_validation_loss = 0.0\r\n \r\n dummy_frame_sequence = []\r\n # forward pass\r\n for vidx, val_frame_sequence in enumerate(task[-k_shots:]):\r\n print(vidx)\r\n if vidx == 0:\r\n dummy_frame_sequence = val_frame_sequence\r\n \r\n img = val_frame_sequence[0]\r\n gt = val_frame_sequence[1]\r\n img, gt, valid, fake = prep_data(img, gt)\r\n \r\n # k-Validation Generator\r\n imgs, g_loss, recon_batch, loss, msssim = overall_generator_pass(generator, discriminator, img, gt, valid)\r\n img_path = os.path.join(epoch_results,'{}-fig-val{}.png'.format(tidx+1, vidx+1))\r\n imsave(img_path , imgs)\r\n\r\n # k-Validation Discriminator\r\n d_loss = overall_discriminator_pass(discriminator, recon_batch, gt, valid, fake)\r\n \r\n # Store Loss Items to reduce memory usage\r\n gen_validation_loss_store += g_loss.item()\r\n dis_validation_loss_store += d_loss.item()\r\n\r\n if (vidx == k_shots-1):\r\n # Store the loss\r\n gen_validation_loss = g_loss\r\n dis_validation_loss = d_loss\r\n gen_validation_loss.data = torch.FloatTensor([gen_validation_loss_store/k_shots]).cuda()\r\n dis_validation_loss.data = torch.FloatTensor([dis_validation_loss_store/k_shots]).cuda()\r\n\r\n\r\n print(\"Generator Validation Loss: \", gen_validation_loss_store)\r\n print(\"Discriminator Validation Loss: \", dis_validation_loss_store)\r\n print ('Epoch [{}/{}], Step [{}/{}], G_Loss: {:.4f}, D_loss: {:.4f}'.format(epoch+1, total_epochs, tidx+1, 5, loss.item(), g_loss, d_loss))\r\n print(\"Memory Allocated: \",torch.cuda.memory_allocated()/1e9)\r\n\r\n # Compute Validation Grad\r\n print(\"Memory Allocated: \",torch.cuda.memory_allocated()/1e9)\r\n\r\n generator.load_state_dict(torch.load(previous_generator))\r\n discriminator.load_state_dict(torch.load(previous_discriminator))\r\n\r\n gen_grads = torch.autograd.grad(gen_validation_loss, generator.parameters())\r\n dis_grads = torch.autograd.grad(dis_validation_loss, discriminator.parameters())\r\n \r\n gen_meta_grads = {name:g for ((name, _), g) in zip(generator.named_parameters(), gen_grads)}\r\n dis_meta_grads = {name:g for ((name, _), g) in zip(discriminator.named_parameters(), dis_grads)}\r\n \r\n gen_epoch_grads.append(gen_meta_grads)\r\n dis_epoch_grads.append(dis_meta_grads)\r\n\r\n\r\n # Meta Update\r\n print('\\n Meta update \\n')\r\n\r\n generator.load_state_dict(torch.load(previous_generator))\r\n discriminator.load_state_dict(torch.load(previous_discriminator))\r\n \r\n # Configure input\r\n img = dummy_frame_sequence[0]\r\n gt = dummy_frame_sequence[1]\r\n img, gt, valid, fake = prep_data(img, gt)\r\n\r\n # Dummy Forward Pass\r\n imgs, g_loss, recon_batch, loss, msssim = overall_generator_pass(generator, discriminator, img, gt, valid)\r\n d_loss = overall_discriminator_pass(discriminator, recon_batch, gt, valid, fake)\r\n\r\n # Unpack the list of grad dicts\r\n gen_gradients = {k: sum(d[k] for d in gen_epoch_grads) for k in gen_epoch_grads[0].keys()}\r\n dis_gradients = {k: sum(d[k] for d in dis_epoch_grads) for k in dis_epoch_grads[0].keys()}\r\n \r\n meta_update_model(generator, optimizer_G, g_loss, gen_gradients)\r\n meta_update_model(discriminator, optimizer_D, d_loss, dis_gradients)\r\n\r\n # Save the Model\r\n torch.save(generator.state_dict(), previous_generator)\r\n torch.save(discriminator.state_dict(), previous_discriminator)\r\n if (epoch % 10 == 0):\r\n gen_path = os.path.join(model_folder_path, str.format(\"Generator_{}.pt\", epoch+1))\r\n dis_path = os.path.join(model_folder_path, str.format(\"Discriminator_{}.pt\", epoch+1))\r\n torch.save(generator.state_dict(), gen_path)\r\n torch.save(discriminator.state_dict(), dis_path)\r\n \r\n\r\n print(\"Training Complete\")\r\n \r\n gen_path = os.path.join(model_folder_path, str.format(\"Generator_Final.pt\"))\r\n dis_path = os.path.join(model_folder_path, str.format(\"Discriminator_Final.pt\"))\r\n torch.save(generator.state_dict(), gen_path)\r\n torch.save(discriminator.state_dict(), dis_path)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n if (len(sys.argv) == 9):\r\n \"\"\"SYS ARG ORDER: \r\n K_shots, num_tasks, adam_betas, generator lr, discriminator lr, total epochs, save model path\r\n \"\"\"\r\n k_shots = int(sys.argv[1])\r\n num_tasks = int(sys.argv[2])\r\n adam_betas = ast.literal_eval(sys.argv[3])\r\n gen_lr = float(sys.argv[4])\r\n dis_lr = float(sys.argv[5])\r\n total_epochs = int(sys.argv[6])\r\n model_folder_path = sys.argv[7]\r\n frame_path = sys.argv[8]\r\n else:\r\n k_shots = 1 \r\n num_tasks = 6\r\n adam_betas = (0.5, 0.999)\r\n gen_lr = 2e-4\r\n dis_lr = 1e-5\r\n total_epochs = 2000\r\n model_folder_path = \"model\"\r\n frame_path = '/mnt/creeper/grad/luy2/Meta-Learning/data/shanghaitech-5tasks/training/frames/' \r\n main(k_shots, num_tasks, adam_betas, gen_lr, dis_lr, total_epochs, model_folder_path, frame_path)\r\n"
] | [
[
"torch.load",
"scipy.misc.imsave",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.FloatTensor",
"torch.cuda.memory_allocated"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"1.0",
"0.19",
"0.18",
"1.2",
"0.12",
"0.10",
"0.17",
"0.16"
],
"tensorflow": []
}
] |
johanlahti/reinforcement-algorithms | [
"d861b67bd2d3e48e949f30d11a70d072586c4c62"
] | [
"q-learning/q_learn_linear_func_approx.py"
] | [
"import numpy as np\nfrom numpy.core.numeric import Infinity\nimport gym\nfrom collections import namedtuple\nfrom time import sleep\nimport plotting\n\n\nDEFAULT_CONFIG = {\n 'MAX_EPISODES': 2000,\n 'MAX_STEPS_PER_EPISODE': 1000,\n 'LEARNING_RATE': 0.01,\n 'DISCOUNT_RATE': 0.99,\n 'EXPLORATION_DECAY': 0.01,\n 'MIN_EXPLORATION_RATE': 0.1,\n 'DO_RENDER': False,\n 'SHOW_STATS_PLOT': True,\n}\n\n# Named storage of experiences\nExperience = namedtuple('Experience', ('state', 'action', 'next_state', 'reward', 'step'))\n\n\ndef setup_environment(name):\n env = gym.make(name)\n return env\n\nclass Agent():\n exploration_rate = 1.0\n experiences = []\n highest_env_reward = -Infinity\n\n def __init__(self, env, reward_function = None, **config):\n self.env = env\n self.config = config\n\n self.calculate_reward = reward_function if reward_function is not None else lambda *args: args[0]\n self.reset()\n\n # Since we have a discrete and small action space, we can create\n # one set of weights per action.\n nbr_of_weights, nbr_of_actions = env.observation_space.shape[0], env.action_space.n\n\n # Start with random weights.\n self.W = np.random.uniform(low=0.0, high=1.0, size=(nbr_of_weights, nbr_of_actions))\n\n def reset(self):\n ''' Get and set initial state. Decay exploration rate. '''\n self.state = self.env.reset()\n self.step = 0\n\n # Decrease chance of exploration according to an epsilon-greedy strategy.\n self.exploration_rate -= self.config['EXPLORATION_DECAY']\n self.exploration_rate = np.max([self.exploration_rate, self.config['MIN_EXPLORATION_RATE']])\n \n def get_best_action(self, state):\n ''' Calculate the \"quality\" of each action as the weighted sum (dot-product) of \n features and weights and return the (index of the) best action. '''\n # \n action_values = self.W.T.dot(state)\n best_action = np.argmax(action_values)\n return best_action\n \n def observation_to_state(self, observation):\n ''' Convert observations to a new state (add logic for this here if needed…).\n Compare e.g. how our senses convert detected light-waves (the observation) into \"objects\" (the state).'''\n state = observation.copy()\n return state\n \n def should_explore(self):\n do_explore = np.random.uniform() < self.exploration_rate\n return do_explore\n\n def take_action(self):\n self.step += 1\n do_explore = self.should_explore()\n\n if do_explore:\n action = self.env.action_space.sample()\n else:\n action = self.get_best_action(self.state)\n\n observation, env_reward, done, _info = self.env.step(action) \n self.highest_env_reward = env_reward if env_reward > self.highest_env_reward else self.highest_env_reward\n\n next_state = self.observation_to_state(observation)\n\n # Allow creating home-made rewards instead of environment's reward (if any) to facilitate learning.\n our_reward = self.calculate_reward(env_reward, self.state.copy(), next_state.copy(), self.step)\n\n experience = Experience(self.state.copy(), action, observation, our_reward, self.step)\n self.experiences.append(experience)\n self.state = next_state.copy()\n return experience, done\n \n def learn_from(self, experiences):\n errors = []\n\n learning_rate = self.config['LEARNING_RATE']\n discount_rate = self.config['DISCOUNT_RATE']\n\n for exp in experiences:\n reward = exp.reward\n\n # Calculate the error (i.e. value difference) between the action we took and the actual value.\n action_value_step0 = np.dot(self.W[:, exp.action], exp.state.T)\n action_value_step1 = np.max(np.dot(self.W.T, exp.next_state))\n \n estimated_value = action_value_step0\n target = reward + discount_rate * action_value_step1\n\n error = np.abs(target - estimated_value)\n \n # Normalise errors as a fraction of the target value (since we don't normalise action values).\n norm_error = error / np.abs(target)\n norm_sqr_error = error ** 2 / np.abs(target)\n\n # Change the weights for this action by an amount proportional to how much \n # each state component (feature) contributes to the action value. The higher the\n # value of a feature – the more will its weights adjust towards the target.\n delta = norm_error * exp.state\n\n # Use gradient value clipping to prevent the error from bouncing away too much.\n # delta = np.clip(delta, -1.0, 1.0)\n\n delta = learning_rate * delta\n self.W[:, exp.action] += delta\n\n errors.append(norm_sqr_error)\n\n return errors\n\n\n\n\ndef execute(env_name, reward_function = None, **config_overrides):\n config = { **DEFAULT_CONFIG, **config_overrides }\n env = setup_environment(env_name)\n agent = Agent(env, reward_function=reward_function, **config)\n\n errors = []\n rewards = []\n avg_rewards = []\n\n for episode in range(config['MAX_EPISODES']):\n agent.reset()\n episode_experiences = []\n episode_rewards = []\n episode_rewards = []\n for _step in range(config['MAX_STEPS_PER_EPISODE']):\n if config['DO_RENDER'] and episode % 200 == 0:\n env.render()\n sleep(0.002)\n experience, done = agent.take_action()\n episode_rewards.append(experience.reward)\n episode_experiences.append(experience)\n if done:\n break\n \n # Fit weights to randomly picked experiences.\n nbr_samples = np.min([len(agent.experiences), 500])\n indices = np.random.choice(nbr_samples, size=nbr_samples, replace=False)\n learning_experiences = [agent.experiences[i] for i in indices]\n episode_avg_error = np.mean(agent.learn_from(learning_experiences))\n episode_max_reward = np.max(episode_rewards)\n episode_avg_reward = np.mean(episode_rewards)\n\n # Store statistics.\n errors.append(episode_avg_error)\n rewards.append(episode_max_reward)\n avg_rewards.append(episode_avg_reward)\n\n if episode % 100 == 0:\n print(f'Episode {episode}:\\nError: {episode_avg_error}\\nMax reward: {episode_max_reward}')\n \n if config['SHOW_STATS_PLOT']:\n plotting.show_stats_plot(rewards, errors, avg_rewards)\n return rewards\n \n\nif __name__ == '__main__':\n # optimise_hyper_params()\n _rewards = execute('CartPole-v1')"
] | [
[
"numpy.dot",
"numpy.abs",
"numpy.random.choice",
"numpy.max",
"numpy.argmax",
"numpy.mean",
"numpy.random.uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
XuelongSun/InsectNavigationToolkitModelling | [
"454daaee19cb0f18d6f194a2fa79669c07c0f3f3"
] | [
"source/insect_navigation.py"
] | [
"# @File: insect_navigation.py\r\n# @Info: to create an agent of insect navigation based on the insect brain model in insect_brain_model.py\r\n# @Author: Xuelong Sun, UoL, UK\r\n# @Time: 2020-02-17\r\n\r\nimport numpy as np\r\nfrom scipy.special import expit\r\nfrom image_processing import visual_sense\r\nfrom insect_brain_model import CentralComplexModel, MushroomBodyModel\r\nfrom insect_brain_model import SuperiorMedialProtocerebrumModel, RingAttractorModel, AOTuVisualPathwayModel\r\n\r\n\r\nclass InsectNavigationAgent(object):\r\n def __init__(self, world, route_mem, home_mem, zm_n_max,\r\n learning_rate, kc_tau, num_pn, num_kc,\r\n tun_k, sn_thr,\r\n ann_num_neurons,\r\n pi_initial_memory):\r\n self.mb = MushroomBodyModel(learning_rate, kc_tau, num_pn=num_pn, num_kc=num_kc)\r\n self.cx = CentralComplexModel()\r\n self.smp = SuperiorMedialProtocerebrumModel(tun_k, sn_thr)\r\n self.ra = RingAttractorModel()\r\n\r\n # simulated 3D world, an array with size Nx3\r\n self.world = world\r\n # a dictionary with keys: ['imgs', 'h', 'ZM_Ps', 'pos', 'ZM_As']\r\n self.route_mem = route_mem\r\n # a dictionary with keys: ['imgs', 'h', 'ZM_Ps', 'pos', 'ZM_As']\r\n self.home_mem = home_mem\r\n # frequency encoding parameters\r\n self.zm_n_max = zm_n_max\r\n if self.zm_n_max % 2:\r\n self.zm_coeff_num = int(((1 + zm_n_max) / 2) * ((3 + zm_n_max) / 2))\r\n else:\r\n self.zm_coeff_num = int((zm_n_max / 2.0 + 1) ** 2)\r\n\r\n # re arrange the memory\r\n mem_scene = self.route_mem['ZM_As'][:, :self.zm_coeff_num].copy()\r\n mem_phase = self.route_mem['ZM_Ps'][:, 16].copy()\r\n\r\n mem_phase_ring = np.zeros([len(mem_phase), 8])\r\n for i in range(len(mem_phase)):\r\n mem_scene[i, :] = (mem_scene[i, :] - np.min(mem_scene[i, :])) / np.max(mem_scene[i, :])\r\n mem_phase_ring[i, :] = np.cos(np.deg2rad(mem_phase[i]) - self.cx.phase_prefs)\r\n mem_phase_ring_sig = 1 / (1 + np.exp(-mem_phase_ring * 3 - 1.0))\r\n\r\n x = mem_scene\r\n y = mem_phase_ring_sig\r\n\r\n # ann for route following network\r\n self.ann = AOTuVisualPathwayModel(x, y, ann_num_neurons)\r\n\r\n # path integration extra connection\r\n # integrating the CelestialCurrentHeading (TB1) and speed (TN1,TN2)\r\n self.W_TB1_CPU4 = np.tile(np.eye(8), (2, 1))\r\n self.W_TN_CPU4 = np.array([\r\n [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]\r\n ]).T\r\n\r\n self.tn1 = 0\r\n self.tn2 = 0\r\n\r\n self.cpu4_mem_gain = 0.0025\r\n\r\n self.cpu4_memory = np.ones(16) * pi_initial_memory\r\n\r\n def train_mb_network(self):\r\n self.mb.reward = True\r\n print(\"$-Start training MB network with lr=%4.4s kc_tau=%4.4s\" % (self.mb.learning_rate, self.mb.tau))\r\n en_t = []\r\n for stimuli in self.route_mem['ZM_As']:\r\n en_t.append(self.mb.run(stimuli[:self.zm_coeff_num]))\r\n self.mb.reward = False\r\n print(\"$-Finish training MB network\")\r\n return en_t\r\n\r\n def train_ann_network(self, step=500, learning_rate=1.0, dyna_lr=True):\r\n print(\"$-Start training ANN network with lr=%4.4s\" % learning_rate)\r\n for t in range(step):\r\n self.ann.forward_propagation()\r\n temp = np.mean(np.abs(self.ann.output - self.ann.y))\r\n self.ann.error.append(temp)\r\n if dyna_lr:\r\n self.ann.learning_rate.append(learning_rate*temp/self.ann.error[0])\r\n else:\r\n self.ann.learning_rate.append(learning_rate)\r\n\r\n self.ann.back_propagation(learning_rate=self.ann.learning_rate[t])\r\n print(\"$-Finish training ANN network for %s steps\" % step)\r\n return self.ann.error\r\n\r\n def _update_pi_neuron_activation(self, heading, velocity):\r\n # update the celestial current heading\r\n self.cx.global_current_heading(heading)\r\n\r\n # optic flow and the activation of TN1 and TN2 neurons\r\n flow = get_flow(heading, velocity)\r\n output = (1.0 - flow) / 2.0\r\n if self.cx.noise > 0.0:\r\n output += np.random.normal(scale=self.cx.noise, size=flow.shape)\r\n self.tn1 = np.clip(output, 0.0, 1.0)\r\n output = flow\r\n if self.cx.noise > 0.0:\r\n output += np.random.normal(scale=self.cx.noise, size=flow.shape)\r\n self.tn2 = np.clip(output, 0.0, 1.0)\r\n\r\n # CPU4\r\n cpu4_mem_reshaped = self.cpu4_memory.reshape(2, -1)\r\n mem_update = (0.5 - self.tn1.reshape(2, 1)) * (1.0 - self.cx.I_tb1)\r\n mem_update -= 0.5 * (0.5 - self.tn1.reshape(2, 1))\r\n cpu4_mem_reshaped += self.cpu4_mem_gain * mem_update\r\n self.cpu4_memory = np.clip(cpu4_mem_reshaped.reshape(-1), 0.0, 1.0)\r\n\r\n return self.cpu4_memory\r\n\r\n def generate_pi_memory(self, pi_len, pi_dir, initial_memory):\r\n \"\"\"\r\n generate PI memory (population coding of CPU4)\r\n :param pi_len: the length of the home vector in meters\r\n :param pi_dir: the direction of the home vector in degree\r\n :param initial_memory: initial memory\r\n :return: CPU4 activation with size 16x1\r\n \"\"\"\r\n\r\n # outbound route parameters\r\n route_length = pi_len * 100.0 # m->cm\r\n pi_dir = np.deg2rad(pi_dir)\r\n velocity = 1.0 # cm/s\r\n dtt = 1 # s\r\n\r\n T_out = int(route_length / velocity / dtt)\r\n v_out = np.zeros([T_out, 2])\r\n v_out[:, 0] = np.ones(T_out) * velocity * np.cos(pi_dir)\r\n v_out[:, 1] = np.ones(T_out) * velocity * np.sin(pi_dir)\r\n\r\n movement_angle = np.arctan2(v_out[:, 1], v_out[:, 0])\r\n h_out = movement_angle * np.ones(T_out)\r\n pos_out = np.cumsum(v_out * dtt, axis=0)\r\n\r\n # reset neuron activation\r\n self.cpu4_memory = np.ones(16) * initial_memory\r\n self.cx.tb1 = np.zeros(8)\r\n\r\n T = len(pos_out)\r\n for t in range(T):\r\n self._update_pi_neuron_activation(h_out[t], v_out[t])\r\n\r\n return self.cpu4_memory\r\n\r\n def coordination_output(self, mb_delta, vh_k, ann_output):\r\n # shifted TB1 to obtain VH desired heading\r\n shift = np.min([np.max([int(mb_delta * vh_k), 0]), 3])\r\n vh = np.roll(self.cx.I_tb1, shift)\r\n vh = np.hstack([vh, vh])\r\n # ann output for RF\r\n rf = np.hstack([ann_output, ann_output])\r\n # MB->SMP TN neuron tuned cpu4 memory for PI\r\n pi = self.cpu4_memory * self.smp.tun\r\n # RA optimally integrate VH and PI\r\n self.ra.cue_integration_output(pi, vh)\r\n # non-linear integration of RF and optimally integrated PI_VH (output of RA)\r\n current_heading = self.cx.I_tb1 * self.smp.sn2 + self.cx.II_tb1 * self.smp.sn1\r\n desired_heading = self.ra.integration_neuron * self.smp.sn2 + rf * self.smp.sn1\r\n return vh, current_heading, desired_heading\r\n\r\n def homing(self, start_pos, start_h, time_out, vh_k, sn_thr, tn_k, motor_k, step_size=4):\r\n pos = np.zeros([time_out, 2])\r\n velocity = np.zeros([time_out, 2])\r\n h = np.zeros(time_out)\r\n pos[0] = start_pos\r\n h[0] = start_h\r\n\r\n dis = 0\r\n\r\n # output of neuron networks\r\n mb_out = np.zeros(time_out)\r\n mb_delta = np.zeros(time_out)\r\n ann_out = np.zeros([time_out, 8])\r\n pi_memory = np.zeros([time_out, 16])\r\n vh_memory = np.zeros([time_out, 16])\r\n ra_memory = np.zeros([time_out, 16])\r\n\r\n # output of SMP neurons\r\n sn1 = np.zeros(time_out)\r\n sn2 = np.zeros(time_out)\r\n tn = np.zeros(time_out)\r\n\r\n # parameter for coordination\r\n self.smp.sn_thr = sn_thr\r\n self.smp.tn_k = tn_k\r\n\r\n print(\"$-Start homing...\")\r\n for t in range(time_out - 1):\r\n # frequency coding info\r\n zm_a, p_temp = visual_sense(self.world, pos[t, 0] / 100.0, pos[t, 1] / 100.0, h[t], nmax=self.zm_n_max)\r\n\r\n # update celestial current heading - TB1 neurons and path integration - PI\r\n self._update_pi_neuron_activation(h[t], velocity[t])\r\n pi_memory[t, :] = self.cpu4_memory\r\n\r\n # update terrestrial current heading - Frequency phase sensitive neurons in PB\r\n self.cx.local_current_heading(p_temp[16])\r\n\r\n # MB output - VH\r\n mb_out[t] = self.mb.run(zm_a)\r\n # change of MBON via SMP\r\n if t == 0:\r\n mb_delta[t] = 0\r\n else:\r\n mb_delta[t] = mb_out[t] - mb_out[t - 1]\r\n\r\n # ANN output - RF\r\n input_temp = zm_a.copy()\r\n input_temp = (input_temp - np.min(input_temp)) / np.max(input_temp)\r\n ann_out[t, :] = self.ann.nn_output(input_temp)\r\n\r\n # SMP for guidance coordination\r\n self.smp.neurons_output(mb_out[t])\r\n\r\n tn[t] = self.smp.tun\r\n sn1[t] = self.smp.sn1\r\n sn2[t] = self.smp.sn2\r\n\r\n # get the integrated output\r\n vh_memory[t, :], current_heading, desired_heading = self.coordination_output(mb_delta[t], vh_k, ann_out[t])\r\n\r\n # store the output of ring attractor\r\n ra_memory[t, :] = self.ra.integration_neuron\r\n\r\n # steering circuit for motor command\r\n self.cx.desired_heading_memory = desired_heading\r\n self.cx.current_heading_memory = current_heading\r\n self.cx.steering_circuit_out()\r\n\r\n # moving forward\r\n h[t + 1] = (h[t] + self.cx.motor_value * motor_k + np.pi) % (2.0 * np.pi) - np.pi\r\n velocity[t + 1, :] = np.array([np.cos(h[t + 1]), np.sin(h[t + 1])]) * step_size\r\n pos[t + 1, :] = pos[t, :] + velocity[t + 1, :]\r\n\r\n dis = np.sqrt(np.sum(pos[t+1, 0]**2 + pos[t+1, 1]**2))\r\n if dis < 20:\r\n break\r\n\r\n print(\"$-End homing with nest distance %4.4s m\" % (dis/100.0))\r\n return t, pos, h, velocity, mb_out, mb_delta, ann_out, vh_memory, pi_memory, ra_memory, tn, sn1, sn2\r\n\r\n\r\ndef noisy_sigmoid(v, slope=1.0, bias=0.5, noise=0.01):\r\n \"\"\"Takes a vector v as input, puts through sigmoid and\r\n adds Gaussian noise. Results are clipped to return rate\r\n between 0 and 1\"\"\"\r\n sig = expit(v * slope - bias)\r\n if noise > 0:\r\n sig += np.random.normal(scale=noise, size=len(v))\r\n return np.clip(sig, 0, 1)\r\n\r\n\r\ndef get_flow(heading, velocity, tn_prefs=np.pi / 4.0, filter_steps=0):\r\n \"\"\"Calculate optic flow depending on preference angles. [L, R]\"\"\"\r\n\r\n A = np.array([[np.cos(heading + tn_prefs),\r\n np.sin(heading + tn_prefs)],\r\n [np.cos(heading - tn_prefs),\r\n np.sin(heading - tn_prefs)]])\r\n flow = np.dot(A, velocity)\r\n\r\n return flow\r\n"
] | [
[
"numpy.dot",
"numpy.cumsum",
"numpy.arctan2",
"numpy.max",
"numpy.exp",
"numpy.roll",
"numpy.hstack",
"numpy.clip",
"numpy.eye",
"numpy.sin",
"numpy.zeros",
"numpy.min",
"numpy.deg2rad",
"numpy.array",
"numpy.sum",
"numpy.abs",
"scipy.special.expit",
"numpy.cos",
"numpy.ones",
"numpy.random.normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HELLORPG/GraduationProjec | [
"5d3925502626e7179598d447d0a07c3c28a499e2"
] | [
"model/encoder.py"
] | [
"\"\"\"\nencoder是使用了学长文中TRecgNet中的构建\n使用ResNet18卷积部分的改版来构建\n\"\"\"\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\nfrom config.secret_config import SECRET_CONFIG\nimport os\nimport torch\n\n\nclass Encoder(nn.Module):\n def __init__(self, config):\n \"\"\"\n 用于初始化Encoder网络\n :param config: 传递一个配置信息,一般可以进行修改的参数均在这之中定义,以一个独立Class文件的形式存在\n \"\"\"\n super(Encoder, self).__init__()\n\n # 这里需要按照类似ResNet18的形式建立一个编码网络,也就是TRecgNet论文中描述的Encoder Net(E Net)。\n # 这个过程最终的网络输出是一个batch_size * out_channels * hight * weight\n # 这个输出最终会传递给分类网络Classifier(C Net)\n # 然而与ResNet18的平凡模式不同,每个残差层(ResLayer)都要将结果单独输出\n # 根据ResNet18的特性,一共会使用四层残差层(ResLayer)\n\n # 数据:\n # input: batch_size * 3 * 224 * 224\n # output: batch_size * 512 * 14 * 14\n # 后续在代码段中对于Data Size的描述,略去Batch Size,只讨论剩余的3维Size\n\n self.layer1 = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False),\n nn.BatchNorm2d(num_features=64),\n nn.ReLU()\n )\n # Size to 64 × 112 × 112\n # 注意,这里和ResNet18的结构有一定的区别,ResNet中在这样的一个卷积层之后,还存在一个不改变Channels的卷积层\n # 详情可见本仓库代码resnet18_rgb/model.py\n\n self.layer2 = ResLayer(in_channels=64, out_channels=128, stride=2)\n # Size to 128 × 56 × 56\n\n self.layer3 = ResLayer(in_channels=128, out_channels=256, stride=2)\n # Size to 256 × 28 × 28\n\n self.layer4 = ResLayer(in_channels=256, out_channels=512, stride=2)\n # Size to 512 × 14 × 14\n\n def forward(self, x, layer_output=True):\n \"\"\"\n 前向传播函数,分情况进行不同的输出\n :param x: 输入数据X\n :param layer_output: 是否对每一层进行输出,默认对每一层的数据都进行输出\n :return: 数据类型是字典,其中[\"layer1\"]到[\"layer4\"]是每一层的对应输出,[\"final\"]则是最终输出。\n \"\"\"\n output = {}\n if layer_output:\n # 此时需要输出每一层的output\n output[\"layer1\"] = self.layer1(x)\n output[\"layer2\"] = self.layer2(output[\"layer1\"])\n output[\"layer3\"] = self.layer3(output[\"layer2\"])\n output[\"layer4\"] = self.layer4(output[\"layer3\"])\n output[\"final\"] = output[\"layer4\"]\n\n else:\n # 此时不需要\n output[\"final\"] = self.layer1(x)\n output[\"final\"] = self.layer2(output[\"final\"])\n output[\"final\"] = self.layer3(output[\"final\"])\n output[\"final\"] = self.layer4(output[\"final\"])\n\n return output\n\n\nclass PreEncoder(nn.Module):\n \"\"\"\n 预训练的Encoder网络\n \"\"\"\n def __init__(self, config=None, pretrained=True, data_used=\"ImageNet\"):\n super(PreEncoder, self).__init__()\n\n if data_used == \"ImageNet\":\n resnet18 = models.resnet18(pretrained=pretrained)\n elif data_used == \"Places\":\n path = os.path.join(SECRET_CONFIG.PROJECT_PATH, \"checkpoints/resnet18_places365.pth\")\n resnet18 = models.__dict__[\"resnet18\"](num_classes=365)\n checkpoint = torch.load(path, map_location=lambda storage, loc: storage)\n state_dict = {str.replace(k, 'module.', ''): v for k, v in checkpoint['state_dict'].items()}\n resnet18.load_state_dict(state_dict)\n else:\n assert 0 # 发生了错误,未定义的data_used\n\n self.conv1 = resnet18.conv1\n self.bn1 = resnet18.bn1\n self.relu = resnet18.relu\n self.maxpool = resnet18.maxpool\n self.layer1 = resnet18.layer1\n self.layer2 = resnet18.layer2\n self.layer3 = resnet18.layer3\n self.layer4 = resnet18.layer4\n\n def forward(self, x, layer_output=True):\n output = {}\n if layer_output:\n output[\"layer1\"] = self.relu(self.bn1(self.conv1(x)))\n output[\"layer1\"] = self.layer1(output[\"layer1\"])\n output[\"layer2\"] = self.layer2(output[\"layer1\"])\n output[\"layer3\"] = self.layer3(output[\"layer2\"])\n output[\"final\"] = self.layer4(output[\"layer3\"])\n\n else:\n output[\"final\"] = self.relu(self.bn1(self.conv1(x)))\n output[\"final\"] = self.layer1(output[\"final\"])\n output[\"final\"] = self.layer2(output[\"final\"])\n output[\"final\"] = self.layer3(output[\"final\"])\n output[\"final\"] = self.layer4(output[\"final\"])\n\n return output\n\n\nclass ResBlock(nn.Module):\n \"\"\"\n 该类是一个残差块\n 两个Conv组成一个残差基本块,而每两个残差基本块组成一个ResLayer(残差层)\n \"\"\"\n def __init__(self, in_channels, out_channels, stride=1):\n \"\"\"\n 初始化网络结构,也传入了网络所需要的全部参数\n :param in_channels: 输入的通道数\n :param out_channels: 输出的通道数\n :param stride: 卷积的步长,用于DownSample图像,\n \"\"\"\n super(ResBlock, self).__init__()\n\n # 残差块分成学习路径和直接绕过学习而部分传递的路径\n # 学习路径,learn_way如下:\n self.learn_way = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=1, bias=False), # 该层,也仅有该层使用外部传入的Stride,以此来缩小图的大小\n nn.BatchNorm2d(num_features=out_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=False), # 该层则不进行特征图的大小的改变,所以Stride=1\n nn.BatchNorm2d(num_features=out_channels)\n )\n\n # 非学习路径,shortcut_way如下:\n if in_channels != out_channels or stride != 1:\n # 在这种情况下,还是需要通过网络转变,得到新的输出规格\n self.shortcut_way = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(num_features=out_channels)\n )\n else:\n self.shortcut_way = nn.Sequential() # 空操作\n\n def forward(self, x):\n \"\"\"\n :param x: 残差块的输入数据\n :return: 前向传播的输出\n \"\"\"\n output = self.learn_way(x) + self.shortcut_way(x)\n output = F.relu(output, inplace=True)\n # 关于inplace参数的说明:\n # 文档中的说明:inplace=True means that it will modify the input directly, without allocating any additional output. It can sometimes slightly decrease the memory usage, but may not always be a valid operation (because the original input is destroyed). However, if you don’t see an error, it means that your use case is valid.\n # 即inplace=True是可以一定程度上减少内存或者显存的占用,但是并不是每一次都有效,实际上只要没有报错,都可以使用这个参数设置。\n return output\n\n\nclass ResLayer(nn.Module):\n \"\"\"\n 该类就是对TRecgNet文中所标注的,Encoder的每一层进行抽象。\n 基本上,一个ResLayer残差层是由两个ResBlock残差块组成的。\n \"\"\"\n def __init__(self, in_channels, out_channels, stride=1):\n \"\"\"\n 用来初始化残差层,每个层内包括了两个残差块,使用尽可能少的参数进行这些操作,根据网络特性,当stride≠1的时候,只有第一个残差块承担DownSample的职责。\n :param in_channels:\n :param out_channels:\n :param stride:\n \"\"\"\n super(ResLayer, self).__init__()\n\n # 残差层由两个阶段的残差块组成,首先是第一层:\n self.res_layer = nn.Sequential(\n ResBlock(in_channels=in_channels, out_channels=out_channels, stride=stride),\n ResBlock(in_channels=out_channels, out_channels=out_channels, stride=1)\n )\n\n def forward(self, x):\n output = self.res_layer(x)\n return output\n\n\nif __name__ == '__main__':\n # 直接运行这个文件,可以测试该文件中的网络结构的部分正确性。\n encoder = Encoder(None)\n print(encoder)"
] | [
[
"torch.nn.Sequential",
"torch.load",
"torch.nn.Conv2d",
"torch.nn.functional.relu",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
telent/luaradio | [
"c1cb47325e4eb2886915f810fff5324571aeb59d"
] | [
"tests/blocks/signal/multiplyconjugate_spec.py"
] | [
"import numpy\nfrom generate import *\n\n\ndef generate():\n vectors = []\n\n x, y = random_complex64(256), random_complex64(256)\n vectors.append(TestVector([], [x, y], [x * numpy.conj(y)], \"2 256 ComplexFloat32 inputs, 256 ComplexFloat32 output\"))\n\n return BlockSpec(\"MultiplyConjugateBlock\", vectors, 1e-6)\n"
] | [
[
"numpy.conj"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gmshashank/pytorch_vision | [
"54367b83e9780fe14c6f8b93157091ffdf7266eb",
"54367b83e9780fe14c6f8b93157091ffdf7266eb"
] | [
"CIFAR10/Session3/dl_vision/utils/config.py",
"CIFAR10/Session5/dl_vision/model/resnet.py"
] | [
"import os\nimport numpy as np\nimport random\nimport torch\nimport torch.nn as nn\nimport yaml\n\nfrom typing import Any, List, Tuple, Dict\nfrom types import ModuleType\nfrom utils.logger import setup_logger\n\nlogger = setup_logger(__name__)\n\n\ndef get_instance(module: ModuleType, name: str, config: Dict, *args: Any) -> Any:\n # creae instance from constructor name and module name\n constr_name = config[name][\"type\"]\n logger.info(f\"Building: {module.__name__}.{constr_name}\")\n return getattr(module, constr_name)(*args, **config[name][\"args\"])\n\n\ndef load_config(file_name: str) -> dict:\n # Loading a configuration YAML file\n with open(file_name) as file_handle:\n config = yaml.safe_load(file_handle)\n\n return config\n\n\ndef setup_device(\n model: nn.Module, target_device: str\n) -> Tuple[torch.device, List[int]]:\n\n device = torch.device(f\"{target_device}\")\n try:\n model = model.to(device)\n except:\n device = torch.device(\"cpu\")\n model = model.to(device)\n\n return model, device\n\n\ndef setup_seed(seed: int) -> None:\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\n\ndef setup_model_params(model: nn.Module, config: Dict) -> List:\n return [{\"params\": model.parameters(), **config}]\n",
"\"\"\"PyTorch ResNet \n\nReference:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(\n in_channels=in_planes,\n out_channels=planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False,\n )\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(\n in_channels=planes,\n out_channels=planes,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=False,\n )\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(\n in_channels=in_planes,\n out_channels=self.expansion * planes,\n kernel_size=1,\n stride=stride,\n bias=False,\n ),\n nn.BatchNorm2d(self.expansion * planes),\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(\n in_channels=in_planes, out_channels=planes, kernel_size=1, bias=False\n )\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(\n in_channels=planes,\n out_channels=planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False,\n )\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(\n in_channels=planes,\n out_channels=self.expansion * planes,\n kernel_size=1,\n bias=False,\n )\n self.bn3 = nn.BatchNorm2d(self.expansion * planes)\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(\n in_channels=in_planes,\n out_channels=self.expansion * planes,\n kernel_size=1,\n stride=stride,\n bias=False,\n ),\n nn.BatchNorm2d(self.expansion * planes),\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, block, num_blocks, num_classes=10):\n super(ResNet, self).__init__()\n\n self.in_planes = 64\n self.conv1 = nn.Conv2d(\n in_channels=3,\n out_channels=64,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=False,\n )\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.linear = nn.Linear(512 * block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n\ndef ResNet18():\n return ResNet(BasicBlock, [2, 2, 2, 2], 10)\n\n\ndef ResNet34():\n return ResNet(BasicBlock, [3, 4, 6, 3], 10)\n\n\ndef ResNet50():\n return ResNet(Bottleneck, [3, 4, 6, 3], 10)\n\n\ndef ResNet101():\n return ResNet(Bottleneck, [3, 4, 23, 3], 10)\n\n\ndef test():\n model = ResNet18()\n y = model(torch.randn(1, 3, 32, 32))\n print(y.size())\n\n\n# test()\n"
] | [
[
"torch.device",
"torch.manual_seed",
"torch.cuda.manual_seed",
"numpy.random.seed"
],
[
"torch.nn.Sequential",
"torch.randn",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.nn.BatchNorm2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
icewing1996/uis-rnn | [
"3fd6a73dad1f999a4bcd2013171e041176812b26"
] | [
"demo.py"
] | [
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A demo script showing how to use the uisrnn package on toy data.\"\"\"\n\nimport numpy as np\n\nimport uisrnn\n\n\nSAVED_MODEL_NAME = 'saved_model.uisrnn'\n\n\ndef diarization_experiment(model_args, training_args, inference_args):\n \"\"\"Experiment pipeline.\n\n Load data --> train model --> test model --> output result\n\n Args:\n model_args: model configurations\n training_args: training configurations\n inference_args: inference configurations\n \"\"\"\n predicted_cluster_ids = []\n test_record = []\n \n # train_data = np.load('./data/toy_training_data.npz')\n # test_data = np.load('./data/toy_testing_data.npz')\n # train_sequence = train_data['train_sequence']\n # train_cluster_id = train_data['train_cluster_id']\n # test_sequences = test_data['test_sequences'].tolist()\n # test_cluster_ids = test_data['test_cluster_ids'].tolist()\n orig_train_sequences = np.load('data/train_sequence.npy').astype(np.float64)\n orig_train_cluster_ids = np.array(np.load('data/train_cluster_id.npy'))\n orig_test_sequences = np.load('data/test_sequence.npy').astype(np.float64)\n orig_test_cluster_ids = np.array(np.load('data/test_cluster_id.npy'))\n\n test_chunk_size = orig_test_sequences.shape[0] // 1000\n test_left_over = orig_test_sequences.shape[0] % test_chunk_size\n test_new_len = orig_test_sequences.shape[0] - test_left_over\n print(orig_test_sequences.shape)\n print(orig_test_cluster_ids.shape)\n print(test_chunk_size)\n print(test_left_over)\n print(test_new_len)\n\n test_sequences = np.split(orig_test_sequences[:test_new_len], test_chunk_size)\n test_cluster_ids = np.split(orig_test_cluster_ids[:test_new_len], test_chunk_size)\n\n\n model = uisrnn.UISRNN(model_args)\n # model.fit(orig_train_sequences, orig_train_cluster_ids, training_args)\n\n\n\n train_chunk_size = orig_train_sequences.shape[0] // 10000\n train_left_over = orig_train_sequences.shape[0] % train_chunk_size\n train_new_len = orig_train_sequences.shape[0] - train_left_over\n\n train_sequences = np.split(orig_train_sequences[:train_new_len], train_chunk_size)\n train_cluster_ids = np.split(orig_train_cluster_ids[:train_new_len], train_chunk_size)\n\n train_sequences = np.array(train_sequences)\n train_cluster_ids = np.array(train_cluster_ids)\n\n d = vars(training_args)\n # training\n for i in range(train_sequences.shape[0]):\n train_sequence = train_sequences[i]\n train_cluster_id = train_cluster_ids[i]\n train_cluster_id = train_cluster_id.tolist()\n d['learning_rate'] = 1e-3\n model.fit(train_sequence, train_cluster_id, training_args)\n\n # Take care of leftovers\n train_sequence = orig_train_sequences[train_new_len:]\n train_cluster_id = orig_train_cluster_ids[train_new_len:]\n d['learning_rate'] = 1e-3\n model.fit(train_sequence, train_cluster_id, training_args)\n model.save(SAVED_MODEL_NAME)\n\n # we can also skip training by calling:\n # model.load(SAVED_MODEL_NAME)\n \n\n # testing\n # Take care of leftover\n # test_sequence = orig_test_sequences[test_new_len:]\n # test_cluster_id = orig_test_cluster_ids[test_new_len:].tolist()\n # predicted_cluster_id = model.predict(test_sequence, inference_args)\n # predicted_cluster_ids.append(predicted_cluster_id)\n # accuracy = uisrnn.compute_sequence_match_accuracy(\n # test_cluster_id, predicted_cluster_id)\n # test_record.append((accuracy, len(test_cluster_id)))\n\n # Then the rest\n for (test_sequence, test_cluster_id) in zip(test_sequences, test_cluster_ids):\n test_cluster_id = test_cluster_id.tolist()\n predicted_cluster_id = model.predict(test_sequence, inference_args)\n predicted_cluster_ids.append(predicted_cluster_id)\n accuracy = uisrnn.compute_sequence_match_accuracy(\n test_cluster_id, predicted_cluster_id)\n test_record.append((accuracy, len(test_cluster_id)))\n print('Ground truth labels:')\n print(test_cluster_id)\n print('Predicted labels:')\n print(predicted_cluster_id)\n print('-' * 80)\n\n \n print('Ground truth labels:')\n print(test_cluster_id)\n print('Predicted labels:')\n print(predicted_cluster_id)\n print('-' * 80)\n\n output_string = uisrnn.output_result(model_args, training_args, test_record)\n\n print('Finished diarization experiment')\n print(output_string)\n\n\ndef main():\n \"\"\"The main function.\"\"\"\n model_args, training_args, inference_args = uisrnn.parse_arguments()\n diarization_experiment(model_args, training_args, inference_args)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.load",
"numpy.split",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ryul99/mmsr | [
"f2649d155f00478b96c7629bbe6f310e8f2549af"
] | [
"codes/models/SRGAN_model.py"
] | [
"import logging\nfrom collections import OrderedDict\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\nimport models.networks as networks\nimport models.lr_scheduler as lr_scheduler\nfrom .base_model import BaseModel\nfrom models.loss import GANLoss\nimport wandb\nfrom utils import util\n\nlogger = logging.getLogger('base')\n\n\nclass SRGANModel(BaseModel):\n def __init__(self, opt):\n super(SRGANModel, self).__init__(opt)\n if opt['dist']:\n self.rank = torch.distributed.get_rank()\n else:\n self.rank = -1 # non dist training\n train_opt = opt['train']\n\n # define networks and load pretrained models\n self.netG = networks.define_G(opt).to(self.device)\n if opt['dist']:\n self.netG = DistributedDataParallel(self.netG, device_ids=[torch.cuda.current_device()])\n else:\n self.netG = DataParallel(self.netG)\n if self.is_train:\n self.netD = networks.define_D(opt).to(self.device)\n if opt['dist']:\n self.netD = DistributedDataParallel(self.netD,\n device_ids=[torch.cuda.current_device()])\n else:\n self.netD = DataParallel(self.netD)\n\n self.netG.train()\n self.netD.train()\n\n # define losses, optimizer and scheduler\n if self.is_train:\n # G pixel loss\n if train_opt['pixel_weight'] > 0:\n l_pix_type = train_opt['pixel_criterion']\n if l_pix_type == 'l1':\n self.cri_pix = nn.L1Loss().to(self.device)\n elif l_pix_type == 'l2':\n self.cri_pix = nn.MSELoss().to(self.device)\n else:\n raise NotImplementedError('Loss type [{:s}] not recognized.'.format(l_pix_type))\n self.l_pix_w = train_opt['pixel_weight']\n else:\n logger.info('Remove pixel loss.')\n self.cri_pix = None\n\n # G feature loss\n if train_opt['feature_weight'] > 0:\n l_fea_type = train_opt['feature_criterion']\n if l_fea_type == 'l1':\n self.cri_fea = nn.L1Loss().to(self.device)\n elif l_fea_type == 'l2':\n self.cri_fea = nn.MSELoss().to(self.device)\n else:\n raise NotImplementedError('Loss type [{:s}] not recognized.'.format(l_fea_type))\n self.l_fea_w = train_opt['feature_weight']\n else:\n logger.info('Remove feature loss.')\n self.cri_fea = None\n if self.cri_fea: # load VGG perceptual loss\n self.netF = networks.define_F(opt, use_bn=False).to(self.device)\n if opt['dist']:\n pass # do not need to use DistributedDataParallel for netF\n else:\n self.netF = DataParallel(self.netF)\n\n # GD gan loss\n self.cri_gan = GANLoss(train_opt['gan_type'], 1.0, 0.0).to(self.device)\n self.l_gan_w = train_opt['gan_weight']\n # D_update_ratio and D_init_iters\n self.D_update_ratio = train_opt['D_update_ratio'] if train_opt['D_update_ratio'] else 1\n self.D_init_iters = train_opt['D_init_iters'] if train_opt['D_init_iters'] else 0\n\n # optimizers\n # G\n wd_G = train_opt['weight_decay_G'] if train_opt['weight_decay_G'] else 0\n optim_params = []\n for k, v in self.netG.named_parameters(): # can optimize for a part of the model\n if v.requires_grad:\n optim_params.append(v)\n else:\n if self.rank <= 0:\n logger.warning('Params [{:s}] will not optimize.'.format(k))\n self.optimizer_G = torch.optim.Adam(optim_params, lr=train_opt['lr_G'],\n weight_decay=wd_G,\n betas=(train_opt['beta1_G'], train_opt['beta2_G']))\n self.optimizers.append(self.optimizer_G)\n # D\n wd_D = train_opt['weight_decay_D'] if train_opt['weight_decay_D'] else 0\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=train_opt['lr_D'],\n weight_decay=wd_D,\n betas=(train_opt['beta1_D'], train_opt['beta2_D']))\n self.optimizers.append(self.optimizer_D)\n\n # schedulers\n if train_opt['lr_scheme'] == 'MultiStepLR':\n for optimizer in self.optimizers:\n self.schedulers.append(\n lr_scheduler.MultiStepLR_Restart(optimizer, train_opt['lr_steps'],\n restarts=train_opt['restarts'],\n weights=train_opt['restart_weights'],\n gamma=train_opt['lr_gamma'],\n clear_state=train_opt['clear_state']))\n elif train_opt['lr_scheme'] == 'CosineAnnealingLR_Restart':\n for optimizer in self.optimizers:\n self.schedulers.append(\n lr_scheduler.CosineAnnealingLR_Restart(\n optimizer, train_opt['T_period'], eta_min=train_opt['eta_min'],\n restarts=train_opt['restarts'], weights=train_opt['restart_weights']))\n else:\n raise NotImplementedError('MultiStepLR learning rate scheme is enough.')\n\n self.log_dict = OrderedDict()\n\n self.print_network() # print network\n self.load() # load G and D if needed\n if self.opt['use_wandb_logger'] and 'debug' not in self.opt['name']:\n wandb.watch(self.netG)\n wandb.watch(self.netD)\n\n def feed_data(self, data, need_GT=True, noise_mode=None, noise_rate=0.0):\n self.var_L = data['LQ'].to(self.device) # LQ\n if need_GT:\n self.var_H = data['GT'].to(self.device) # GT\n input_ref = data['ref'] if 'ref' in data else data['GT']\n self.var_ref = input_ref.to(self.device)\n if noise_mode is not None and torch.rand(1) < noise_rate:\n self.var_L = util.add_noise(self.var_L, noise_mode=noise_mode)\n\n def optimize_parameters(self, step):\n # G\n for p in self.netD.parameters():\n p.requires_grad = False\n\n self.optimizer_G.zero_grad()\n self.fake_H = self.netG(self.var_L)\n\n l_g_total = 0\n if step % self.D_update_ratio == 0 and step > self.D_init_iters:\n if self.cri_pix: # pixel loss\n l_g_pix = self.l_pix_w * self.cri_pix(self.fake_H, self.var_H)\n l_g_total += l_g_pix\n if self.cri_fea: # feature loss\n real_fea = self.netF(self.var_H).detach()\n fake_fea = self.netF(self.fake_H)\n l_g_fea = self.l_fea_w * self.cri_fea(fake_fea, real_fea)\n l_g_total += l_g_fea\n\n if self.opt['train']['gan_type'] == 'gan':\n pred_g_fake = self.netD(self.fake_H)\n l_g_gan = self.l_gan_w * self.cri_gan(pred_g_fake, True)\n elif self.opt['train']['gan_type'] == 'ragan':\n pred_d_real = self.netD(self.var_ref).detach()\n pred_g_fake = self.netD(self.fake_H)\n l_g_gan = self.l_gan_w * (\n self.cri_gan(pred_d_real - torch.mean(pred_g_fake), False) +\n self.cri_gan(pred_g_fake - torch.mean(pred_d_real), True)) / 2\n l_g_total += l_g_gan\n\n l_g_total.backward()\n self.optimizer_G.step()\n\n # D\n for p in self.netD.parameters():\n p.requires_grad = True\n\n self.optimizer_D.zero_grad()\n if self.opt['train']['gan_type'] == 'gan':\n # need to forward and backward separately, since batch norm statistics differ\n # real\n pred_d_real = self.netD(self.var_ref)\n l_d_real = self.cri_gan(pred_d_real, True)\n l_d_real.backward()\n # fake\n pred_d_fake = self.netD(self.fake_H.detach()) # detach to avoid BP to G\n l_d_fake = self.cri_gan(pred_d_fake, False)\n l_d_fake.backward()\n elif self.opt['train']['gan_type'] == 'ragan':\n # pred_d_real = self.netD(self.var_ref)\n # pred_d_fake = self.netD(self.fake_H.detach()) # detach to avoid BP to G\n # l_d_real = self.cri_gan(pred_d_real - torch.mean(pred_d_fake), True)\n # l_d_fake = self.cri_gan(pred_d_fake - torch.mean(pred_d_real), False)\n # l_d_total = (l_d_real + l_d_fake) / 2\n # l_d_total.backward()\n pred_d_fake = self.netD(self.fake_H.detach()).detach()\n pred_d_real = self.netD(self.var_ref)\n l_d_real = self.cri_gan(pred_d_real - torch.mean(pred_d_fake), True) * 0.5\n l_d_real.backward()\n pred_d_fake = self.netD(self.fake_H.detach())\n l_d_fake = self.cri_gan(pred_d_fake - torch.mean(pred_d_real.detach()), False) * 0.5\n l_d_fake.backward()\n self.optimizer_D.step()\n\n # set log\n if step % self.D_update_ratio == 0 and step > self.D_init_iters:\n if self.cri_pix:\n self.log_dict['l_g_pix'] = l_g_pix.item()\n if self.cri_fea:\n self.log_dict['l_g_fea'] = l_g_fea.item()\n self.log_dict['l_g_gan'] = l_g_gan.item()\n\n self.log_dict['l_d_real'] = l_d_real.item()\n self.log_dict['l_d_fake'] = l_d_fake.item()\n self.log_dict['D_real'] = torch.mean(pred_d_real.detach())\n self.log_dict['D_fake'] = torch.mean(pred_d_fake.detach())\n\n def test(self):\n self.netG.eval()\n with torch.no_grad():\n self.fake_H = self.netG(self.var_L)\n self.netG.train()\n\n def get_current_log(self):\n return self.log_dict\n\n def get_current_visuals(self, need_GT=True):\n out_dict = OrderedDict()\n out_dict['LQ'] = self.var_L.detach()[0].float().cpu()\n out_dict['rlt'] = self.fake_H.detach()[0].float().cpu()\n if need_GT:\n out_dict['GT'] = self.var_H.detach()[0].float().cpu()\n return out_dict\n\n def print_network(self):\n # Generator\n s, n = self.get_network_description(self.netG)\n if isinstance(self.netG, nn.DataParallel) or isinstance(self.netG, DistributedDataParallel):\n net_struc_str = '{} - {}'.format(self.netG.__class__.__name__,\n self.netG.module.__class__.__name__)\n else:\n net_struc_str = '{}'.format(self.netG.__class__.__name__)\n if self.rank <= 0:\n logger.info('Network G structure: {}, with parameters: {:,d}'.format(net_struc_str, n))\n logger.info(s)\n if self.is_train:\n # Discriminator\n s, n = self.get_network_description(self.netD)\n if isinstance(self.netD, nn.DataParallel) or isinstance(self.netD,\n DistributedDataParallel):\n net_struc_str = '{} - {}'.format(self.netD.__class__.__name__,\n self.netD.module.__class__.__name__)\n else:\n net_struc_str = '{}'.format(self.netD.__class__.__name__)\n if self.rank <= 0:\n logger.info('Network D structure: {}, with parameters: {:,d}'.format(\n net_struc_str, n))\n logger.info(s)\n\n if self.cri_fea: # F, Perceptual Network\n s, n = self.get_network_description(self.netF)\n if isinstance(self.netF, nn.DataParallel) or isinstance(\n self.netF, DistributedDataParallel):\n net_struc_str = '{} - {}'.format(self.netF.__class__.__name__,\n self.netF.module.__class__.__name__)\n else:\n net_struc_str = '{}'.format(self.netF.__class__.__name__)\n if self.rank <= 0:\n logger.info('Network F structure: {}, with parameters: {:,d}'.format(\n net_struc_str, n))\n logger.info(s)\n\n def load(self):\n load_path_G = self.opt['path']['pretrain_model_G']\n if load_path_G is not None:\n logger.info('Loading model for G [{:s}] ...'.format(load_path_G))\n self.load_network(load_path_G, self.netG, self.opt['path']['strict_load'],\n wandb_load_run_path=self.opt['path']['wandb_load_run_path'])\n load_path_D = self.opt['path']['pretrain_model_D']\n if self.opt['is_train'] and load_path_D is not None:\n logger.info('Loading model for D [{:s}] ...'.format(load_path_D))\n self.load_network(load_path_D, self.netD, self.opt['path']['strict_load'],\n wandb_load_run_path=self.opt['path']['wandb_load_run_path'])\n\n def save(self, iter_step):\n self.save_network(self.netG, 'G', iter_step)\n self.save_network(self.netD, 'D', iter_step)\n"
] | [
[
"torch.optim.Adam",
"torch.mean",
"torch.nn.MSELoss",
"torch.nn.parallel.DataParallel",
"torch.cuda.current_device",
"torch.no_grad",
"torch.rand",
"torch.distributed.get_rank",
"torch.nn.L1Loss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jalayrupera/Sentiment-analysis-on-amazon-product | [
"f04d77769f9c9b533d530ce5b217d741c09c93ee"
] | [
"code/code/load.py"
] | [
"import numpy as np\nfrom os import path\n\nfrom Tree import Tree\n\nDATASET = '../data'\n\n\ndef load():\n print('Load Trees...')\n with open(path.join(DATASET, 'STree.txt')) as f:\n trees = []\n for line in f.readlines():\n tree = line.split('|')\n tree = np.array(tree).astype(int)\n trees.append(tree)\n\n print('Load Sentences...')\n with open(path.join(DATASET, 'SOStr.txt')) as f:\n sentences = []\n lexicon = set()\n for line in f.readlines():\n sent = line.strip().split('|')\n sentences.append(sent)\n lexicon = lexicon.union(sent)\n\n print('Load data split')\n with open(path.join(DATASET, 'datasetSplit.txt')) as f:\n whichSet = []\n f.readline()\n for line in f.readlines():\n whichSet.append(int(line.strip().split(',')[1]))\n\n print('Load Index...')\n with open(path.join(DATASET, 'dictionary.txt')) as f:\n index = {}\n for line in f.readlines():\n phrase = line.split('|')\n index[int(phrase[1])] = phrase[0]\n\n print('Load Labels...')\n with open(path.join(DATASET, 'sentiment_labels.txt')) as f:\n f.readline()\n labels = {}\n for line in f.readlines():\n id_p, y = line.split('|')\n labels[index[int(id_p)]] = float(y)\n\n print('Build Trees...')\n X_trees_train = []\n X_trees_dev = []\n X_trees_test = []\n for s, t, k in zip(sentences, trees, whichSet):\n if k == 1:\n X_trees_train.append(Tree(s, t, labels))\n elif k == 2:\n X_trees_test.append(Tree(s, t, labels))\n elif k == 3:\n X_trees_dev.append(Tree(s, t, labels))\n else:\n raise(Exception('Erreur dans le parsing train/test/dev'))\n return lexicon, X_trees_train, X_trees_dev, X_trees_test, labels\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
marcelroed/equivariant-MLP | [
"453d948055936aab3e718c36863c2db318c038ce"
] | [
"emlp/reps/linear_operator_base.py"
] | [
"\"\"\"Abstract linear algebra library.\nThis module defines a class hierarchy that implements a kind of \"lazy\"\nmatrix representation, called the ``LinearOperator``. It can be used to do\nlinear algebra with extremely large sparse or structured matrices, without\nrepresenting those explicitly in memory. Such matrices can be added,\nmultiplied, transposed, etc.\nAs a motivating example, suppose you want have a matrix where almost all of\nthe elements have the value one. The standard sparse matrix representation\nskips the storage of zeros, but not ones. By contrast, a LinearOperator is\nable to represent such matrices efficiently. First, we need a compact way to\nrepresent an all-ones matrix::\n >>> import numpy as np\n >>> class Ones(LinearOperator):\n ... def __init__(self, shape):\n ... super(Ones, self).__init__(dtype=None, shape=shape)\n ... def _matvec(self, x):\n ... return np.repeat(x.sum(), self.shape[0])\nInstances of this class emulate ``np.ones(shape)``, but using a constant\namount of storage, independent of ``shape``. The ``_matvec`` method specifies\nhow this linear operator multiplies with (operates on) a vector. We can now\nadd this operator to a sparse matrix that stores only offsets from one::\n >>> from scipy.sparse import csr_matrix\n >>> offsets = csr_matrix([[1, 0, 2], [0, -1, 0], [0, 0, 3]])\n >>> A = aslinearoperator(offsets) + Ones(offsets.shape)\n >>> A.dot([1, 2, 3])\n array([13, 4, 15])\nThe result is the same as that given by its dense, explicitly-stored\ncounterpart::\n >>> (np.ones(A.shape, A.dtype) + offsets.toarray()).dot([1, 2, 3])\n array([13, 4, 15])\nSeveral algorithms in the ``scipy.sparse`` library are able to operate on\n``LinearOperator`` instances.\n\"\"\"\n\nimport warnings\n\nimport jax.numpy as np\nimport numpy as onp\n\n__all__ = ['LinearOperator', 'aslinearoperator']\n\nimport torch\n\nfrom emlp.utils import dbg\n\n\nclass LinearOperator(object):\n \"\"\"Common interface for performing matrix vector products\n Many iterative methods (e.g. cg, gmres) do not need to know the\n individual entries of a matrix to solve a linear system A*x=b.\n Such solvers only require the computation of matrix vector\n products, A*v where v is a dense vector. This class serves as\n an abstract interface between iterative solvers and matrix-like\n objects.\n To construct a concrete LinearOperator, either pass appropriate\n callables to the constructor of this class, or subclass it.\n A subclass must implement either one of the methods ``_matvec``\n and ``_matmat``, and the attributes/properties ``shape`` (pair of\n integers) and ``dtype`` (may be None). It may call the ``__init__``\n on this class to have these attributes validated. Implementing\n ``_matvec`` automatically implements ``_matmat`` (using a naive\n algorithm) and vice-versa.\n Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint``\n to implement the Hermitian adjoint (conjugate transpose). As with\n ``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or\n ``_adjoint`` implements the other automatically. Implementing\n ``_adjoint`` is preferable; ``_rmatvec`` is mostly there for\n backwards compatibility.\n Parameters\n ----------\n shape : tuple\n Matrix dimensions (M, N).\n matvec : callable f(v)\n Returns returns A * v.\n rmatvec : callable f(v)\n Returns A^H * v, where A^H is the conjugate transpose of A.\n matmat : callable f(V)\n Returns A * V, where V is a dense matrix with dimensions (N, K).\n dtype : dtype\n Data type of the matrix.\n rmatmat : callable f(V)\n Returns A^H * V, where V is a dense matrix with dimensions (M, K).\n Attributes\n ----------\n args : tuple\n For linear operators describing products etc. of other linear\n operators, the operands of the binary operation.\n ndim : int\n Number of dimensions (this is always 2)\n See Also\n --------\n aslinearoperator : Construct LinearOperators\n Notes\n -----\n The user-defined matvec() function must properly handle the case\n where v has shape (N,) as well as the (N,1) case. The shape of\n the return type is handled internally by LinearOperator.\n LinearOperator instances can also be multiplied, added with each\n other and exponentiated, all lazily: the result of these operations\n is always a new, composite LinearOperator, that defers linear\n operations to the original operators and combines the results.\n More details regarding how to subclass a LinearOperator and several\n examples of concrete LinearOperator instances can be found in the\n external project `PyLops <https://pylops.readthedocs.io>`_.\n Examples\n --------\n >>> import numpy as np\n >>> from scipy.sparse.linalg import LinearOperator\n >>> def mv(v):\n ... return np.array([2*v[0], 3*v[1]])\n ...\n >>> A = LinearOperator((2,2), matvec=mv)\n >>> A\n <2x2 _CustomLinearOperator with dtype=float64>\n >>> A.matvec(np.ones(2))\n array([ 2., 3.])\n >>> A * np.ones(2)\n array([ 2., 3.])\n \"\"\"\n\n ndim = 2\n\n def __new__(cls, *args, **kwargs):\n if cls is LinearOperator:\n # Operate as _CustomLinearOperator factory.\n return super(LinearOperator, cls).__new__(_CustomLinearOperator)\n else:\n obj = super(LinearOperator, cls).__new__(cls)\n\n if (type(obj)._matvec == LinearOperator._matvec\n and type(obj)._matmat == LinearOperator._matmat):\n warnings.warn(\"LinearOperator subclass should implement\"\n \" at least one of _matvec and _matmat.\",\n category=RuntimeWarning, stacklevel=2)\n\n return obj\n\n def __init__(self, dtype, shape):\n \"\"\"Initialize this LinearOperator.\n To be called by subclasses. ``dtype`` may be None; ``shape`` should\n be convertible to a length-2 tuple.\n \"\"\"\n # if dtype is not None:\n # dtype = np.dtype(dtype)\n\n shape = tuple(shape)\n if not isshape(shape):\n raise ValueError(\"invalid shape %r (must be 2-d)\" % (shape,))\n\n self.dtype = np.dtype('float32') # force float 32\n self.shape = shape\n\n def _init_dtype(self):\n \"\"\"Called from subclasses at the end of the __init__ routine.\n \"\"\"\n if self.dtype is None:\n # v = np.zeros(self.shape[-1])\n self.dtype = np.dtype('float32') # self.matvec(v).dtype #force float 32\n\n def _matmat(self, X):\n \"\"\"Default matrix-matrix multiplication handler.\n Falls back on the user-defined _matvec method, so defining that will\n define matrix multiplication (though in a very suboptimal way).\n \"\"\"\n\n return np.hstack([self.matvec(col.reshape(-1, 1)) for col in X.T])\n\n def _matvec(self, x):\n \"\"\"Default matrix-vector multiplication handler.\n If self is a linear operator of shape (M, N), then this method will\n be called on a shape (N,) or (N, 1) ndarray, and should return a\n shape (M,) or (M, 1) ndarray.\n This default implementation falls back on _matmat, so defining that\n will define matrix-vector multiplication as well.\n \"\"\"\n return self.matmat(x.reshape(-1, 1))\n\n def matvec(self, x):\n \"\"\"Matrix-vector multiplication.\n Performs the operation y=A*x where A is an MxN linear\n operator and x is a column vector or 1-d array.\n Parameters\n ----------\n x : {matrix, ndarray}\n An array with shape (N,) or (N,1).\n Returns\n -------\n y : {matrix, ndarray}\n A matrix or ndarray with shape (M,) or (M,1) depending\n on the type and shape of the x argument.\n Notes\n -----\n This matvec wraps the user-specified matvec routine or overridden\n _matvec method to ensure that y has the correct shape and type.\n \"\"\"\n\n M, N = self.shape\n if x.shape != (N,) and x.shape != (N, 1):\n raise ValueError('dimension mismatch')\n\n y = self._matvec(x)\n\n if x.ndim == 1:\n y = y.reshape(M)\n elif x.ndim == 2:\n y = y.reshape(M, 1)\n else:\n raise ValueError('invalid shape returned by user-defined matvec()')\n\n return y\n\n def rmatvec(self, x):\n \"\"\"Adjoint matrix-vector multiplication.\n Performs the operation y = A^H * x where A is an MxN linear\n operator and x is a column vector or 1-d array.\n Parameters\n ----------\n x : {matrix, ndarray}\n An array with shape (M,) or (M,1).\n Returns\n -------\n y : {matrix, ndarray}\n A matrix or ndarray with shape (N,) or (N,1) depending\n on the type and shape of the x argument.\n Notes\n -----\n This rmatvec wraps the user-specified rmatvec routine or overridden\n _rmatvec method to ensure that y has the correct shape and type.\n \"\"\"\n M, N = self.shape\n\n if x.shape != (M,) and x.shape != (M, 1):\n raise ValueError('dimension mismatch')\n\n y = self._rmatvec(x)\n\n if x.ndim == 1:\n y = y.reshape(N)\n elif x.ndim == 2:\n y = y.reshape(N, 1)\n else:\n raise ValueError('invalid shape returned by user-defined rmatvec()')\n\n return y\n\n def _rmatvec(self, x):\n \"\"\"Default implementation of _rmatvec; defers to adjoint.\"\"\"\n if type(self)._adjoint == LinearOperator._adjoint:\n # _adjoint not overridden, prevent infinite recursion\n raise NotImplementedError\n else:\n return self.H.matvec(x)\n\n def matmat(self, X):\n \"\"\"Matrix-matrix multiplication.\n Performs the operation y=A*X where A is an MxN linear\n operator and X dense N*K matrix or ndarray.\n Parameters\n ----------\n X : {matrix, ndarray}\n An array with shape (N,K).\n Returns\n -------\n Y : {matrix, ndarray}\n A matrix or ndarray with shape (M,K) depending on\n the type of the X argument.\n Notes\n -----\n This matmat wraps any user-specified matmat routine or overridden\n _matmat method to ensure that y has the correct type.\n \"\"\"\n\n if X.ndim != 2:\n raise ValueError('expected 2-d ndarray or matrix, not %d-d'\n % X.ndim)\n\n if X.shape[0] != self.shape[1]:\n raise ValueError('dimension mismatch: %r, %r'\n % (self.shape, X.shape))\n\n Y = self._matmat(X)\n return Y\n\n def rmatmat(self, X):\n \"\"\"Adjoint matrix-matrix multiplication.\n Performs the operation y = A^H * x where A is an MxN linear\n operator and x is a column vector or 1-d array, or 2-d array.\n The default implementation defers to the adjoint.\n Parameters\n ----------\n X : {matrix, ndarray}\n A matrix or 2D array.\n Returns\n -------\n Y : {matrix, ndarray}\n A matrix or 2D array depending on the type of the input.\n Notes\n -----\n This rmatmat wraps the user-specified rmatmat routine.\n \"\"\"\n\n if X.ndim != 2:\n raise ValueError('expected 2-d ndarray or matrix, not %d-d'\n % X.ndim)\n\n if X.shape[0] != self.shape[0]:\n raise ValueError('dimension mismatch: %r, %r'\n % (self.shape, X.shape))\n\n Y = self._rmatmat(X)\n return Y\n\n def _rmatmat(self, X):\n \"\"\"Default implementation of _rmatmat defers to rmatvec or adjoint.\"\"\"\n if type(self)._adjoint == LinearOperator._adjoint:\n return np.hstack([self.rmatvec(col.reshape(-1, 1)) for col in X.T])\n else:\n return self.H.matmat(X)\n\n def __call__(self, x):\n return self * x\n\n def __mul__(self, x):\n return self.dot(x)\n\n def dot(self, x):\n \"\"\"Matrix-matrix or matrix-vector multiplication.\n Parameters\n ----------\n x : array_like\n 1-d or 2-d array, representing a vector or matrix.\n Returns\n -------\n Ax : array\n 1-d or 2-d array (depending on the shape of x) that represents\n the result of applying this linear operator on x.\n \"\"\"\n if isinstance(x, LinearOperator):\n return _ProductLinearOperator(self, x)\n elif np.isscalar(x):\n return _ScaledLinearOperator(self, x)\n else:\n if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1:\n return self.matvec(x)\n elif x.ndim == 2:\n return self.matmat(x)\n else:\n raise ValueError('expected 1-d or 2-d array or matrix, got %r'\n % x)\n\n def __matmul__(self, other):\n if np.isscalar(other):\n raise ValueError(\"Scalar operands are not allowed, \"\n \"use '*' instead\")\n return self.__mul__(other)\n\n def __rmatmul__(self, other):\n if np.isscalar(other):\n raise ValueError(\"Scalar operands are not allowed, \"\n \"use '*' instead\")\n return self.__rmul__(other)\n\n def __rmul__(self, x):\n if np.isscalar(x):\n return _ScaledLinearOperator(self, x)\n else:\n return NotImplemented\n\n def __pow__(self, p):\n if np.isscalar(p):\n return _PowerLinearOperator(self, p)\n else:\n return NotImplemented\n\n def __add__(self, x):\n if isinstance(x, LinearOperator):\n return _SumLinearOperator(self, x)\n elif isinstance(x, np.ndarray) and len(x.shape) == 2:\n return _SumLinearOperator(self, Lazy(x))\n else:\n return NotImplemented\n\n def __radd__(self, x):\n return self.__add__(x)\n\n def __neg__(self):\n return _ScaledLinearOperator(self, -1)\n\n def __sub__(self, x):\n return self.__add__(-x)\n\n def __repr__(self):\n M, N = self.shape\n if self.dtype is None:\n dt = 'unspecified dtype'\n else:\n dt = 'dtype=' + str(self.dtype)\n\n return '<%dx%d %s with %s>' % (M, N, self.__class__.__name__, dt)\n\n def adjoint(self):\n \"\"\"Hermitian adjoint.\n Returns the Hermitian adjoint of self, aka the Hermitian\n conjugate or Hermitian transpose. For a complex matrix, the\n Hermitian adjoint is equal to the conjugate transpose.\n Can be abbreviated self.H instead of self.adjoint().\n Returns\n -------\n A_H : LinearOperator\n Hermitian adjoint of self.\n \"\"\"\n return self._adjoint()\n\n H = property(adjoint)\n\n def transpose(self):\n \"\"\"Transpose this linear operator.\n Returns a LinearOperator that represents the transpose of this one.\n Can be abbreviated self.T instead of self.transpose().\n \"\"\"\n return self._transpose()\n\n T = property(transpose)\n\n def _adjoint(self):\n \"\"\"Default implementation of _adjoint; defers to rmatvec.\"\"\"\n return _AdjointLinearOperator(self)\n\n def _transpose(self):\n \"\"\" Default implementation of _transpose; defers to rmatvec + conj\"\"\"\n return _TransposedLinearOperator(self)\n\n def to_dense(self):\n \"\"\" Default implementation of to_dense which produces the dense\n matrix corresponding to the given lazy matrix. Defaults to\n multiplying by the identity \"\"\"\n return self @ np.eye(self.shape[-1])\n\n\nclass _CustomLinearOperator(LinearOperator):\n \"\"\"Linear operator defined in terms of user-specified operations.\"\"\"\n\n def __init__(self, shape, matvec, rmatvec=None, matmat=None,\n dtype=None, rmatmat=None):\n super(_CustomLinearOperator, self).__init__(dtype, shape)\n\n self.args = ()\n\n self.__matvec_impl = matvec\n self.__rmatvec_impl = rmatvec\n self.__rmatmat_impl = rmatmat\n self.__matmat_impl = matmat\n\n self._init_dtype()\n\n def _matmat(self, X):\n if self.__matmat_impl is not None:\n return self.__matmat_impl(X)\n else:\n return super(_CustomLinearOperator, self)._matmat(X)\n\n def _matvec(self, x):\n return self.__matvec_impl(x)\n\n def _rmatvec(self, x):\n func = self.__rmatvec_impl\n if func is None:\n raise NotImplementedError(\"rmatvec is not defined\")\n return self.__rmatvec_impl(x)\n\n def _rmatmat(self, X):\n if self.__rmatmat_impl is not None:\n return self.__rmatmat_impl(X)\n else:\n return super(_CustomLinearOperator, self)._rmatmat(X)\n\n def _adjoint(self):\n return _CustomLinearOperator(shape=(self.shape[1], self.shape[0]),\n matvec=self.__rmatvec_impl,\n rmatvec=self.__matvec_impl,\n matmat=self.__rmatmat_impl,\n rmatmat=self.__matmat_impl,\n dtype=self.dtype)\n\n\nclass _AdjointLinearOperator(LinearOperator):\n \"\"\"Adjoint of arbitrary Linear Operator\"\"\"\n\n def __init__(self, A):\n shape = (A.shape[1], A.shape[0])\n super(_AdjointLinearOperator, self).__init__(dtype=A.dtype, shape=shape)\n self.A = A\n self.args = (A,)\n\n def _matvec(self, x):\n return self.A._rmatvec(x)\n\n def _rmatvec(self, x):\n return self.A._matvec(x)\n\n def _matmat(self, x):\n return self.A._rmatmat(x)\n\n def _rmatmat(self, x):\n return self.A._matmat(x)\n\n\nclass _TransposedLinearOperator(LinearOperator):\n \"\"\"Transposition of arbitrary Linear Operator\"\"\"\n\n def __init__(self, A):\n shape = (A.shape[1], A.shape[0])\n super(_TransposedLinearOperator, self).__init__(dtype=A.dtype, shape=shape)\n self.A = A\n self.args = (A,)\n\n def _matvec(self, x):\n # NB. np.conj works also on sparse matrices\n return np.conj(self.A._rmatvec(np.conj(x)))\n\n def _rmatvec(self, x):\n return np.conj(self.A._matvec(np.conj(x)))\n\n def _matmat(self, x):\n # NB. np.conj works also on sparse matrices\n return np.conj(self.A._rmatmat(np.conj(x)))\n\n def _rmatmat(self, x):\n return np.conj(self.A._matmat(np.conj(x)))\n\n\ndef _get_dtype(operators, dtypes=None):\n if dtypes is None:\n dtypes = []\n for obj in operators:\n if obj is not None and hasattr(obj, 'dtype'):\n dtypes.append(obj.dtype)\n return dtypes[0] # removed find_common_dtypes because not supported in jax\n\n\nclass _SumLinearOperator(LinearOperator):\n def __init__(self, A, B):\n if not isinstance(A, LinearOperator) or \\\n not isinstance(B, LinearOperator):\n raise ValueError('both operands have to be a LinearOperator')\n if A.shape != B.shape:\n raise ValueError('cannot add %r and %r: shape mismatch'\n % (A, B))\n self.args = (A, B)\n super(_SumLinearOperator, self).__init__(_get_dtype([A, B]), A.shape)\n\n def _matvec(self, x):\n return self.args[0].matvec(x) + self.args[1].matvec(x)\n\n def _rmatvec(self, x):\n return self.args[0].rmatvec(x) + self.args[1].rmatvec(x)\n\n def _rmatmat(self, x):\n return self.args[0].rmatmat(x) + self.args[1].rmatmat(x)\n\n def _matmat(self, x):\n return self.args[0].matmat(x) + self.args[1].matmat(x)\n\n def _adjoint(self):\n A, B = self.args\n return A.H + B.H\n\n def invT(self):\n A, B = self.args\n return A.invT() + B.invT()\n\n\nclass _ProductLinearOperator(LinearOperator):\n def __init__(self, A, B):\n if not isinstance(A, LinearOperator) or \\\n not isinstance(B, LinearOperator):\n raise ValueError('both operands have to be a LinearOperator')\n if A.shape[1] != B.shape[0]:\n raise ValueError('cannot multiply %r and %r: shape mismatch'\n % (A, B))\n super(_ProductLinearOperator, self).__init__(_get_dtype([A, B]),\n (A.shape[0], B.shape[1]))\n self.args = (A, B)\n\n def _matvec(self, x):\n return self.args[0].matvec(self.args[1].matvec(x))\n\n def _rmatvec(self, x):\n return self.args[1].rmatvec(self.args[0].rmatvec(x))\n\n def _rmatmat(self, x):\n return self.args[1].rmatmat(self.args[0].rmatmat(x))\n\n def _matmat(self, x):\n return self.args[0].matmat(self.args[1].matmat(x))\n\n def _adjoint(self):\n A, B = self.args\n return B.H * A.H\n\n def invT(self):\n A, B = self.args\n return A.invT() * B.invT()\n\n def to_dense(self):\n A, B = self.args\n A = A.to_dense() if isinstance(A, LinearOperator) else A\n B = B.to_dense() if isinstance(B, LinearOperator) else B\n return A @ B\n\n\nclass _ScaledLinearOperator(LinearOperator):\n def __init__(self, A, alpha):\n if not isinstance(A, LinearOperator):\n raise ValueError('LinearOperator expected as A')\n if not np.isscalar(alpha):\n raise ValueError('scalar expected as alpha')\n dtype = _get_dtype([A], [type(alpha)])\n super(_ScaledLinearOperator, self).__init__(dtype, A.shape)\n self.args = (A, alpha)\n\n def _matvec(self, x):\n return self.args[1] * self.args[0].matvec(x)\n\n def _rmatvec(self, x):\n return np.conj(self.args[1]) * self.args[0].rmatvec(x)\n\n def _rmatmat(self, x):\n return np.conj(self.args[1]) * self.args[0].rmatmat(x)\n\n def _matmat(self, x):\n return self.args[1] * self.args[0].matmat(x)\n\n def _adjoint(self):\n A, alpha = self.args\n return A.H * np.conj(alpha)\n\n def invT(self):\n A, alpha = self.args\n return (1 / alpha) * A.T\n\n def to_dense(self):\n A, alpha = self.args\n return alpha * A.to_dense()\n\n\nclass _PowerLinearOperator(LinearOperator):\n def __init__(self, A, p):\n if not isinstance(A, LinearOperator):\n raise ValueError('LinearOperator expected as A')\n if A.shape[0] != A.shape[1]:\n raise ValueError('square LinearOperator expected, got %r' % A)\n if not isinstance(p, int) or p < 0:\n raise ValueError('non-negative integer expected as p')\n\n super(_PowerLinearOperator, self).__init__(_get_dtype([A]), A.shape)\n self.args = (A, p)\n\n def _power(self, fun, x):\n res = np.array(x, copy=True)\n for i in range(self.args[1]):\n res = fun(res)\n return res\n\n def _matvec(self, x):\n return self._power(self.args[0].matvec, x)\n\n def _rmatvec(self, x):\n return self._power(self.args[0].rmatvec, x)\n\n def _rmatmat(self, x):\n return self._power(self.args[0].rmatmat, x)\n\n def _matmat(self, x):\n return self._power(self.args[0].matmat, x)\n\n def _adjoint(self):\n A, p = self.args\n return A.H ** p\n\n def invT(self):\n A, p = self.args\n return A.invT() ** p\n\n\nclass MatrixLinearOperator(LinearOperator):\n def __init__(self, A):\n super(MatrixLinearOperator, self).__init__(A.dtype, A.shape)\n self.A = A\n self.__adj = None\n self.args = (A,)\n\n def _matmat(self, X):\n return self.A.dot(X)\n\n def _adjoint(self):\n if self.__adj is None:\n self.__adj = _AdjointMatrixOperator(self)\n return self.__adj\n\n\nclass _AdjointMatrixOperator(MatrixLinearOperator):\n def __init__(self, adjoint):\n self.A = adjoint.A.T.conj()\n self.__adjoint = adjoint\n self.args = (adjoint,)\n self.shape = adjoint.shape[1], adjoint.shape[0]\n\n @property\n def dtype(self):\n return self.__adjoint.dtype\n\n def _adjoint(self):\n return self.__adjoint\n\n\nclass IdentityOperator(LinearOperator):\n def __init__(self, shape, dtype=None):\n super(IdentityOperator, self).__init__(dtype, shape)\n\n def _matvec(self, x):\n return x\n\n def _rmatvec(self, x):\n return x\n\n def _rmatmat(self, x):\n return x\n\n def _matmat(self, x):\n return x\n\n def _adjoint(self):\n return self\n\n\nclass Lazy(LinearOperator):\n def __init__(self, dense_matrix):\n self.A = dense_matrix # Retain also the dense array\n self._A_torch = None\n super().__init__(self.A.dtype, self.A.shape)\n\n def get_A_torch(self, device):\n if self._A_torch is not None:\n return self._A_torch\n if isinstance(self.A, onp.ndarray):\n dbg('Lazy: dense_matrix is a numpy array, converting to Tensor')\n self._A_torch = torch.tensor(self.A, device=device)\n elif isinstance(self.A, np.DeviceArray):\n dbg('Lazy: dense_matrix is a DeviceArray, converting to Tensor')\n self._A_torch = torch.tensor(onp.asarray(self.A), device=device)\n\n return self._A_torch\n\n def as_torch_lazy(self):\n if isinstance(self.A, np.ndarray):\n return Lazy(torch.from_numpy(onp.asarray(self.A)))\n else:\n raise NotImplementedError(f'Not implemented for {type(self.A)}')\n\n def _matmat(self, V):\n if isinstance(V, torch.Tensor):\n return self.get_A_torch(V.device) @ V\n return self.A @ V\n\n def _matvec(self, v):\n if isinstance(v, torch.Tensor):\n return self.get_A_torch(v.device) @ v\n return self.A @ v\n\n def _rmatmat(self, V):\n if isinstance(V, torch.Tensor):\n return self.get_A_torch(V.device).T @ V\n return self.A.T @ V\n\n def _rmatvec(self, v):\n if isinstance(v, torch.Tensor):\n return self.get_A_torch(v.device).T @ v\n return self.A.T @ v\n\n def to_dense(self):\n return self.A\n\n def invT(self):\n return Lazy(np.linalg.inv(self.A).T)\n\n\n# def aslinearoperator(A):\n# \"\"\"Return A as a LinearOperator.\n# 'A' may be any of the following types:\n# - ndarray\n# - matrix\n# - sparse matrix (e.g. csr_matrix, lil_matrix, etc.)\n# - LinearOperator\n# - An object with .shape and .matvec attributes\n# See the LinearOperator documentation for additional information.\n# Notes\n# -----\n# If 'A' has no .dtype attribute, the data type is determined by calling\n# :func:`LinearOperator.matvec()` - set the .dtype attribute to prevent this\n# call upon the linear operator creation.\n# Examples\n# --------\n# >>> from scipy.sparse.linalg import aslinearoperator\n# >>> M = np.array([[1,2,3],[4,5,6]], dtype=np.int32)\n# >>> aslinearoperator(M)\n# <2x3 MatrixLinearOperator with dtype=int32>\n# \"\"\"\n# if isinstance(A, LinearOperator):\n# return A\n\n# elif isinstance(A, np.ndarray):\n# if A.ndim > 2:\n# raise ValueError('array must have ndim <= 2')\n# return MatrixLinearOperator(A)\n\n# else:\n# if hasattr(A, 'shape') and hasattr(A, 'matvec'):\n# rmatvec = None\n# rmatmat = None\n# dtype = None\n\n# if hasattr(A, 'rmatvec'):\n# rmatvec = A.rmatvec\n# if hasattr(A, 'rmatmat'):\n# rmatmat = A.rmatmat\n# if hasattr(A, 'dtype'):\n# dtype = A.dtype\n# return LinearOperator(A.shape, A.matvec, rmatvec=rmatvec,\n# rmatmat=rmatmat, dtype=dtype)\n\n# else:\n# raise TypeError('type not understood')\n\n\ndef isintlike(x):\n return isinstance(x, int)\n\n\ndef isshape(x, nonneg=False):\n \"\"\"Is x a valid 2-tuple of dimensions?\n If nonneg, also checks that the dimensions are non-negative.\n \"\"\"\n try:\n # Assume it's a tuple of matrix dimensions (M, N)\n (M, N) = x\n except Exception:\n return False\n else:\n if isintlike(M) and isintlike(N):\n if np.ndim(M) == 0 and np.ndim(N) == 0:\n if not nonneg or (M >= 0 and N >= 0):\n return True\n return False\n"
] | [
[
"numpy.asarray",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ruyuanzhang/ccnss2018_students | [
"978b2414ade6116da01c19a945304f9c514fb93f"
] | [
"module2/2_model_fitting_and_model_comparison/untitled0.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 13 05:16:40 2018\n\n@author: J-Y Moon\n\"\"\"\nimport matplotlib.pyplot as plt # import matplotlib\nimport numpy as np # import numpy\nimport scipy as sp # import scipy\nfrom scipy import sparse # import sparse module from scipy\nimport networkx as nx # import networkx \n\n\n\n# code for generating connectivity matrix from a connectivity list, for small or near-full network\n\ndef calculate_network_mat(net_list):\n len_net = np.amax(net_list)+1\n len_list = len(net_list)\n net_mat = np.zeros((len_net,len_net))\n for i in range(len_list):\n net_mat[ net_list[i,0] , net_list[i,1] ] = net_list[i,2]\n \n return net_mat \n\n\n# code for generating connectivity matrix from a connectivity list, for large yet sparse network\n\ndef calculate_network_mat2(net_list):\n len_net = np.amax(net_list)+1\n net_mat = sp.sparse.coo_matrix((net_list[:,2], (net_list[:,0],net_list[:,1])), shape=(len_net,len_net) )\n # net_mat_csc = sp.sparse.csc_matrix(net_mat)\n \n return net_mat\n\n\n\n# computes degree of input network,\n# and also the cumulative probability distribution for the degree, and prints the resulting figures\n\ndef net_degree_plot(net_mat,net_name):\n\n net_degree = np.sum(net_mat,axis=0)\n net_degree_unique, net_degree_counts = np.unique(net_degree, return_counts=True)\n net_degree_cumul = np.zeros(len(net_degree_unique))\n\n #print(net_degree_unique)\n #print(net_degree_counts)\n\n net_degree_cumul[-1]=net_degree_counts[-1]\n for i in range(len(net_degree_unique)-2,-1,-1):\n net_degree_cumul[i] = net_degree_cumul[i+1]+net_degree_counts[i]\n\n \n plt.figure(figsize=(15,5))\n \n plt.subplot( 1, 2, 1 )\n plt.plot(net_degree_unique, net_degree_cumul,'C0o')\n plt.xlabel('degree')\n plt.ylabel('cumulative dist.')\n plt.title(net_name)\n \n plt.subplot( 1, 2, 2 )\n plt.loglog(net_degree_unique, net_degree_cumul,'C0o')\n plt.xlabel('degree')\n plt.ylabel('cumulative dist.')\n plt.title(net_name)\n plt.show \n \n \n \n# calculates clustering coefficient of a given network and a node\n\ndef clustering_coeff(net_mat, node_number):\n \n neighbors = np.nonzero(net_mat[node_number])[0]\n neighbors_N = neighbors.shape[0]\n\n if neighbors_N == 1: return 0\n links = 0\n for w in neighbors:\n for u in neighbors:\n neighbors_neighbors = np.nonzero(net_mat[w])[0]\n if u in neighbors_neighbors: links += 0.5\n \n return 2.0*links/(neighbors_N *(neighbors_N -1)) \n\n\n\n# calculate distance matrix from a given connectivity matrix\n\ndef mat_distance(net_mat):\n net_mat = np.matrix(net_mat)\n net_mat_N = len(net_mat)\n net_mat_distance = np.zeros((net_mat_N,net_mat_N))\n net_mat_product = net_mat.copy()\n D = net_mat.copy()\n T = net_mat.copy()\n i=3\n\n for k in range(net_mat_N):\n net_mat_product = net_mat_product*net_mat\n net_where = np.where(net_mat_product > 0) \n D[net_where]=1\n T = T+D \n net_mat_distance = i*D - T\n i = i+1\n if len(np.where(net_mat_distance==0)[0]) == 0:\n break\n \n return net_mat_distance\n\n\n# calculate characteristic path length and efficiency from a given distance matrix\n\ndef l_e_distance(net_mat_d):\n net_mat_d = np.matrix(net_mat_d)\n N = net_mat_d.shape[0]\n \n l = 1/N * 1/(N-1) * (np.sum(net_mat_d)-np.trace(net_mat_d))\n e = 1/N * 1/(N-1) * (np.sum(1/net_mat_d)-np.trace(1/net_mat_d))\n \n return l, e\n\n\n# calculates betweenness centrality from a given connectivity matrix\n\ndef calculate_bc(net_mat):\n net_mat = np.matrix(net_mat)\n graph = nx.to_networkx_graph(net_mat)\n bc = nx.betweenness_centrality(graph) # dictionary where key = node\n bc = np.array([bc[i] for i in range(len(bc))])\n return bc\n\n\n"
] | [
[
"numpy.matrix",
"scipy.sparse.coo_matrix",
"numpy.amax",
"matplotlib.pyplot.title",
"numpy.unique",
"numpy.nonzero",
"numpy.trace",
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"numpy.where",
"matplotlib.pyplot.xlabel",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
Ruhen-Bhuiyan/Logistic-regression-vs-SVM-vs-Decision-Tree-vs-Random-Forest | [
"6908d197bae1d7f8d7083b007aba9f71739822d2",
"6908d197bae1d7f8d7083b007aba9f71739822d2"
] | [
"Value count for replacing missing values.py",
"Heatmap.py"
] | [
"import pandas as pd\nd = pd.read_csv(\"D:\\\\445\\\\13.csv\")\n\nd['Smoke'].value_counts()\n",
"import pandas as pd \nimport numpy as np\nfrom sklearn import preprocessing\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn import svm\nimport itertools\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nimport seaborn\nget_ipython().run_line_magic('matplotlib', 'inline')\ndata = pd.read_csv(\"D:\\\\445\\\\KhidmahActuall.csv\")\ndf = pd.DataFrame(data)\ndf_corr = df.corr()\nplt.figure(figsize=(15,10))\nseaborn.heatmap(df_corr, cmap=\"YlGnBu\") # Displaying the Heatmap\nseaborn.set(font_scale=2,style='white')\nplt.title('Heatmap correlation')\nplt.show()\n\n\n\n\n"
] | [
[
"pandas.read_csv"
],
[
"pandas.read_csv",
"matplotlib.pyplot.title",
"pandas.DataFrame",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
FHaase/pandas | [
"dc8d35aa53d496b651f5e1ab4cb2604e9f7236c7"
] | [
"pandas/tests/sparse/frame/test_frame.py"
] | [
"# pylint: disable-msg=E1101,W0612\n\nimport operator\n\nimport pytest\nfrom numpy import nan\nimport numpy as np\nimport pandas as pd\n\nfrom pandas import Series, DataFrame, bdate_range, Panel\nfrom pandas.errors import PerformanceWarning\nfrom pandas.core.indexes.datetimes import DatetimeIndex\nfrom pandas.tseries.offsets import BDay\nfrom pandas.util import testing as tm\nfrom pandas.compat import lrange\nfrom pandas import compat\nfrom pandas.core.sparse import frame as spf\n\nfrom pandas._libs.sparse import BlockIndex, IntIndex\nfrom pandas.core.sparse.api import (\n SparseSeries, SparseDataFrame, SparseArray, SparseDtype\n)\nfrom pandas.tests.frame.test_api import SharedWithSparse\n\n\nclass TestSparseDataFrame(SharedWithSparse):\n klass = SparseDataFrame\n\n # SharedWithSparse tests use generic, klass-agnostic assertion\n _assert_frame_equal = staticmethod(tm.assert_sp_frame_equal)\n _assert_series_equal = staticmethod(tm.assert_sp_series_equal)\n\n def test_iterrows(self, float_frame, float_string_frame):\n # Same as parent, but we don't ensure the sparse kind is the same.\n for k, v in float_frame.iterrows():\n exp = float_frame.loc[k]\n tm.assert_sp_series_equal(v, exp, check_kind=False)\n\n for k, v in float_string_frame.iterrows():\n exp = float_string_frame.loc[k]\n tm.assert_sp_series_equal(v, exp, check_kind=False)\n\n def test_itertuples(self, float_frame):\n for i, tup in enumerate(float_frame.itertuples()):\n s = self.klass._constructor_sliced(tup[1:])\n s.name = tup[0]\n expected = float_frame.iloc[i, :].reset_index(drop=True)\n tm.assert_sp_series_equal(s, expected, check_kind=False)\n\n def test_fill_value_when_combine_const(self):\n # GH12723\n dat = np.array([0, 1, np.nan, 3, 4, 5], dtype='float')\n df = SparseDataFrame({'foo': dat}, index=range(6))\n\n exp = df.fillna(0).add(2)\n res = df.add(2, fill_value=0)\n tm.assert_sp_frame_equal(res, exp)\n\n def test_values(self, empty_frame, float_frame):\n empty = empty_frame.values\n assert empty.shape == (0, 0)\n\n no_cols = SparseDataFrame(index=np.arange(10))\n mat = no_cols.values\n assert mat.shape == (10, 0)\n\n no_index = SparseDataFrame(columns=np.arange(10))\n mat = no_index.values\n assert mat.shape == (0, 10)\n\n def test_copy(self, float_frame):\n cp = float_frame.copy()\n assert isinstance(cp, SparseDataFrame)\n tm.assert_sp_frame_equal(cp, float_frame)\n\n # as of v0.15.0\n # this is now identical (but not is_a )\n assert cp.index.identical(float_frame.index)\n\n def test_constructor(self, float_frame, float_frame_int_kind,\n float_frame_fill0):\n for col, series in compat.iteritems(float_frame):\n assert isinstance(series, SparseSeries)\n\n assert isinstance(float_frame_int_kind['A'].sp_index, IntIndex)\n\n # constructed zframe from matrix above\n assert float_frame_fill0['A'].fill_value == 0\n # XXX: changed asarray\n expected = pd.SparseArray([0, 0, 0, 0, 1., 2., 3., 4., 5., 6.],\n fill_value=0, kind='block')\n tm.assert_sp_array_equal(expected,\n float_frame_fill0['A'].values)\n tm.assert_numpy_array_equal(np.array([0., 0., 0., 0., 1., 2.,\n 3., 4., 5., 6.]),\n float_frame_fill0['A'].to_dense().values)\n\n # construct no data\n sdf = SparseDataFrame(columns=np.arange(10), index=np.arange(10))\n for col, series in compat.iteritems(sdf):\n assert isinstance(series, SparseSeries)\n\n # construct from nested dict\n data = {c: s.to_dict() for c, s in compat.iteritems(float_frame)}\n\n sdf = SparseDataFrame(data)\n tm.assert_sp_frame_equal(sdf, float_frame)\n\n # TODO: test data is copied from inputs\n\n # init dict with different index\n idx = float_frame.index[:5]\n cons = SparseDataFrame(\n float_frame, index=idx, columns=float_frame.columns,\n default_fill_value=float_frame.default_fill_value,\n default_kind=float_frame.default_kind, copy=True)\n reindexed = float_frame.reindex(idx)\n\n tm.assert_sp_frame_equal(cons, reindexed, exact_indices=False)\n\n # assert level parameter breaks reindex\n with pytest.raises(TypeError):\n float_frame.reindex(idx, level=0)\n\n repr(float_frame)\n\n def test_constructor_dict_order(self):\n # GH19018\n # initialization ordering: by insertion order if python>= 3.6, else\n # order by value\n d = {'b': [2, 3], 'a': [0, 1]}\n frame = SparseDataFrame(data=d)\n if compat.PY36:\n expected = SparseDataFrame(data=d, columns=list('ba'))\n else:\n expected = SparseDataFrame(data=d, columns=list('ab'))\n tm.assert_sp_frame_equal(frame, expected)\n\n def test_constructor_ndarray(self, float_frame):\n # no index or columns\n sp = SparseDataFrame(float_frame.values)\n\n # 1d\n sp = SparseDataFrame(float_frame['A'].values, index=float_frame.index,\n columns=['A'])\n tm.assert_sp_frame_equal(sp, float_frame.reindex(columns=['A']))\n\n # raise on level argument\n pytest.raises(TypeError, float_frame.reindex, columns=['A'],\n level=1)\n\n # wrong length index / columns\n with pytest.raises(ValueError, match=\"^Index length\"):\n SparseDataFrame(float_frame.values, index=float_frame.index[:-1])\n\n with pytest.raises(ValueError, match=\"^Column length\"):\n SparseDataFrame(float_frame.values,\n columns=float_frame.columns[:-1])\n\n # GH 9272\n def test_constructor_empty(self):\n sp = SparseDataFrame()\n assert len(sp.index) == 0\n assert len(sp.columns) == 0\n\n def test_constructor_dataframe(self, float_frame):\n dense = float_frame.to_dense()\n sp = SparseDataFrame(dense)\n tm.assert_sp_frame_equal(sp, float_frame)\n\n def test_constructor_convert_index_once(self):\n arr = np.array([1.5, 2.5, 3.5])\n sdf = SparseDataFrame(columns=lrange(4), index=arr)\n assert sdf[0].index is sdf[1].index\n\n def test_constructor_from_series(self):\n\n # GH 2873\n x = Series(np.random.randn(10000), name='a')\n x = x.to_sparse(fill_value=0)\n assert isinstance(x, SparseSeries)\n df = SparseDataFrame(x)\n assert isinstance(df, SparseDataFrame)\n\n x = Series(np.random.randn(10000), name='a')\n y = Series(np.random.randn(10000), name='b')\n x2 = x.astype(float)\n x2.loc[:9998] = np.NaN\n # TODO: x_sparse is unused...fix\n x_sparse = x2.to_sparse(fill_value=np.NaN) # noqa\n\n # Currently fails too with weird ufunc error\n # df1 = SparseDataFrame([x_sparse, y])\n\n y.loc[:9998] = 0\n # TODO: y_sparse is unsused...fix\n y_sparse = y.to_sparse(fill_value=0) # noqa\n # without sparse value raises error\n # df2 = SparseDataFrame([x2_sparse, y])\n\n def test_constructor_from_dense_series(self):\n # GH 19393\n # series with name\n x = Series(np.random.randn(10000), name='a')\n result = SparseDataFrame(x)\n expected = x.to_frame().to_sparse()\n tm.assert_sp_frame_equal(result, expected)\n\n # series with no name\n x = Series(np.random.randn(10000))\n result = SparseDataFrame(x)\n expected = x.to_frame().to_sparse()\n tm.assert_sp_frame_equal(result, expected)\n\n def test_constructor_from_unknown_type(self):\n # GH 19393\n class Unknown(object):\n pass\n with pytest.raises(TypeError,\n message='SparseDataFrame called with unknown type '\n '\"Unknown\" for data argument'):\n SparseDataFrame(Unknown())\n\n def test_constructor_preserve_attr(self):\n # GH 13866\n arr = pd.SparseArray([1, 0, 3, 0], dtype=np.int64, fill_value=0)\n assert arr.dtype == SparseDtype(np.int64)\n assert arr.fill_value == 0\n\n df = pd.SparseDataFrame({'x': arr})\n assert df['x'].dtype == SparseDtype(np.int64)\n assert df['x'].fill_value == 0\n\n s = pd.SparseSeries(arr, name='x')\n assert s.dtype == SparseDtype(np.int64)\n assert s.fill_value == 0\n\n df = pd.SparseDataFrame(s)\n assert df['x'].dtype == SparseDtype(np.int64)\n assert df['x'].fill_value == 0\n\n df = pd.SparseDataFrame({'x': s})\n assert df['x'].dtype == SparseDtype(np.int64)\n assert df['x'].fill_value == 0\n\n def test_constructor_nan_dataframe(self):\n # GH 10079\n trains = np.arange(100)\n thresholds = [10, 20, 30, 40, 50, 60]\n tuples = [(i, j) for i in trains for j in thresholds]\n index = pd.MultiIndex.from_tuples(tuples,\n names=['trains', 'thresholds'])\n matrix = np.empty((len(index), len(trains)))\n matrix.fill(np.nan)\n df = pd.DataFrame(matrix, index=index, columns=trains, dtype=float)\n result = df.to_sparse()\n expected = pd.SparseDataFrame(matrix, index=index, columns=trains,\n dtype=float)\n tm.assert_sp_frame_equal(result, expected)\n\n def test_type_coercion_at_construction(self):\n # GH 15682\n result = pd.SparseDataFrame(\n {'a': [1, 0, 0], 'b': [0, 1, 0], 'c': [0, 0, 1]}, dtype='uint8',\n default_fill_value=0)\n expected = pd.SparseDataFrame(\n {'a': pd.SparseSeries([1, 0, 0], dtype='uint8'),\n 'b': pd.SparseSeries([0, 1, 0], dtype='uint8'),\n 'c': pd.SparseSeries([0, 0, 1], dtype='uint8')},\n default_fill_value=0)\n tm.assert_sp_frame_equal(result, expected)\n\n def test_dtypes(self):\n df = DataFrame(np.random.randn(10000, 4))\n df.loc[:9998] = np.nan\n sdf = df.to_sparse()\n\n result = sdf.get_dtype_counts()\n expected = Series({'Sparse[float64, nan]': 4})\n tm.assert_series_equal(result, expected)\n\n def test_shape(self, float_frame, float_frame_int_kind,\n float_frame_fill0, float_frame_fill2):\n # see gh-10452\n assert float_frame.shape == (10, 4)\n assert float_frame_int_kind.shape == (10, 4)\n assert float_frame_fill0.shape == (10, 4)\n assert float_frame_fill2.shape == (10, 4)\n\n def test_str(self):\n df = DataFrame(np.random.randn(10000, 4))\n df.loc[:9998] = np.nan\n\n sdf = df.to_sparse()\n str(sdf)\n\n def test_array_interface(self, float_frame):\n res = np.sqrt(float_frame)\n dres = np.sqrt(float_frame.to_dense())\n tm.assert_frame_equal(res.to_dense(), dres)\n\n def test_pickle(self, float_frame, float_frame_int_kind, float_frame_dense,\n float_frame_fill0, float_frame_fill0_dense,\n float_frame_fill2, float_frame_fill2_dense):\n\n def _test_roundtrip(frame, orig):\n result = tm.round_trip_pickle(frame)\n tm.assert_sp_frame_equal(frame, result)\n tm.assert_frame_equal(result.to_dense(), orig, check_dtype=False)\n\n _test_roundtrip(SparseDataFrame(), DataFrame())\n _test_roundtrip(float_frame, float_frame_dense)\n _test_roundtrip(float_frame_int_kind, float_frame_dense)\n _test_roundtrip(float_frame_fill0, float_frame_fill0_dense)\n _test_roundtrip(float_frame_fill2, float_frame_fill2_dense)\n\n def test_dense_to_sparse(self):\n df = DataFrame({'A': [nan, nan, nan, 1, 2],\n 'B': [1, 2, nan, nan, nan]})\n sdf = df.to_sparse()\n assert isinstance(sdf, SparseDataFrame)\n assert np.isnan(sdf.default_fill_value)\n assert isinstance(sdf['A'].sp_index, BlockIndex)\n tm.assert_frame_equal(sdf.to_dense(), df)\n\n sdf = df.to_sparse(kind='integer')\n assert isinstance(sdf['A'].sp_index, IntIndex)\n\n df = DataFrame({'A': [0, 0, 0, 1, 2],\n 'B': [1, 2, 0, 0, 0]}, dtype=float)\n sdf = df.to_sparse(fill_value=0)\n assert sdf.default_fill_value == 0\n tm.assert_frame_equal(sdf.to_dense(), df)\n\n def test_density(self):\n df = SparseSeries([nan, nan, nan, 0, 1, 2, 3, 4, 5, 6])\n assert df.density == 0.7\n\n df = SparseDataFrame({'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],\n 'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],\n 'C': np.arange(10),\n 'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]})\n\n assert df.density == 0.75\n\n def test_sparse_to_dense(self):\n pass\n\n def test_sparse_series_ops(self, float_frame):\n self._check_frame_ops(float_frame)\n\n def test_sparse_series_ops_i(self, float_frame_int_kind):\n self._check_frame_ops(float_frame_int_kind)\n\n def test_sparse_series_ops_z(self, float_frame_fill0):\n self._check_frame_ops(float_frame_fill0)\n\n def test_sparse_series_ops_fill(self, float_frame_fill2):\n self._check_frame_ops(float_frame_fill2)\n\n def _check_frame_ops(self, frame):\n\n def _compare_to_dense(a, b, da, db, op):\n sparse_result = op(a, b)\n dense_result = op(da, db)\n\n fill = sparse_result.default_fill_value\n dense_result = dense_result.to_sparse(fill_value=fill)\n tm.assert_sp_frame_equal(sparse_result, dense_result,\n exact_indices=False)\n\n if isinstance(a, DataFrame) and isinstance(db, DataFrame):\n mixed_result = op(a, db)\n assert isinstance(mixed_result, SparseDataFrame)\n tm.assert_sp_frame_equal(mixed_result, sparse_result,\n exact_indices=False)\n\n opnames = ['add', 'sub', 'mul', 'truediv', 'floordiv']\n ops = [getattr(operator, name) for name in opnames]\n\n fidx = frame.index\n\n # time series operations\n\n series = [frame['A'], frame['B'], frame['C'], frame['D'],\n frame['A'].reindex(fidx[:7]), frame['A'].reindex(fidx[::2]),\n SparseSeries(\n [], index=[])]\n\n for op in opnames:\n _compare_to_dense(frame, frame[::2], frame.to_dense(),\n frame[::2].to_dense(), getattr(operator, op))\n\n # 2304, no auto-broadcasting\n for i, s in enumerate(series):\n f = lambda a, b: getattr(a, op)(b, axis='index')\n _compare_to_dense(frame, s, frame.to_dense(), s.to_dense(), f)\n\n # rops are not implemented\n # _compare_to_dense(s, frame, s.to_dense(),\n # frame.to_dense(), f)\n\n # cross-sectional operations\n series = [frame.xs(fidx[0]), frame.xs(fidx[3]), frame.xs(fidx[5]),\n frame.xs(fidx[7]), frame.xs(fidx[5])[:2]]\n\n for op in ops:\n for s in series:\n _compare_to_dense(frame, s, frame.to_dense(), s, op)\n _compare_to_dense(s, frame, s, frame.to_dense(), op)\n\n # it works!\n result = frame + frame.loc[:, ['A', 'B']] # noqa\n\n def test_op_corners(self, float_frame, empty_frame):\n empty = empty_frame + empty_frame\n assert empty.empty\n\n foo = float_frame + empty_frame\n assert isinstance(foo.index, DatetimeIndex)\n tm.assert_frame_equal(foo, float_frame * np.nan)\n\n foo = empty_frame + float_frame\n tm.assert_frame_equal(foo, float_frame * np.nan)\n\n def test_scalar_ops(self):\n pass\n\n def test_getitem(self):\n # 1585 select multiple columns\n sdf = SparseDataFrame(index=[0, 1, 2], columns=['a', 'b', 'c'])\n\n result = sdf[['a', 'b']]\n exp = sdf.reindex(columns=['a', 'b'])\n tm.assert_sp_frame_equal(result, exp)\n\n pytest.raises(Exception, sdf.__getitem__, ['a', 'd'])\n\n def test_iloc(self, float_frame):\n\n # GH 2227\n result = float_frame.iloc[:, 0]\n assert isinstance(result, SparseSeries)\n tm.assert_sp_series_equal(result, float_frame['A'])\n\n # preserve sparse index type. #2251\n data = {'A': [0, 1]}\n iframe = SparseDataFrame(data, default_kind='integer')\n tm.assert_class_equal(iframe['A'].sp_index,\n iframe.iloc[:, 0].sp_index)\n\n def test_set_value(self, float_frame):\n\n # ok, as the index gets converted to object\n frame = float_frame.copy()\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n res = frame.set_value('foobar', 'B', 1.5)\n assert res.index.dtype == 'object'\n\n res = float_frame\n res.index = res.index.astype(object)\n\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n res = float_frame.set_value('foobar', 'B', 1.5)\n assert res is not float_frame\n assert res.index[-1] == 'foobar'\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n assert res.get_value('foobar', 'B') == 1.5\n\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n res2 = res.set_value('foobar', 'qux', 1.5)\n assert res2 is not res\n tm.assert_index_equal(res2.columns,\n pd.Index(list(float_frame.columns) + ['qux']))\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n assert res2.get_value('foobar', 'qux') == 1.5\n\n def test_fancy_index_misc(self, float_frame):\n # axis = 0\n sliced = float_frame.iloc[-2:, :]\n expected = float_frame.reindex(index=float_frame.index[-2:])\n tm.assert_sp_frame_equal(sliced, expected)\n\n # axis = 1\n sliced = float_frame.iloc[:, -2:]\n expected = float_frame.reindex(columns=float_frame.columns[-2:])\n tm.assert_sp_frame_equal(sliced, expected)\n\n def test_getitem_overload(self, float_frame):\n # slicing\n sl = float_frame[:20]\n tm.assert_sp_frame_equal(sl,\n float_frame.reindex(float_frame.index[:20]))\n\n # boolean indexing\n d = float_frame.index[5]\n indexer = float_frame.index > d\n\n subindex = float_frame.index[indexer]\n subframe = float_frame[indexer]\n\n tm.assert_index_equal(subindex, subframe.index)\n pytest.raises(Exception, float_frame.__getitem__, indexer[:-1])\n\n def test_setitem(self, float_frame, float_frame_int_kind,\n float_frame_dense,\n float_frame_fill0, float_frame_fill0_dense,\n float_frame_fill2, float_frame_fill2_dense):\n\n def _check_frame(frame, orig):\n N = len(frame)\n\n # insert SparseSeries\n frame['E'] = frame['A']\n assert isinstance(frame['E'], SparseSeries)\n tm.assert_sp_series_equal(frame['E'], frame['A'],\n check_names=False)\n\n # insert SparseSeries differently-indexed\n to_insert = frame['A'][::2]\n frame['E'] = to_insert\n expected = to_insert.to_dense().reindex(frame.index)\n result = frame['E'].to_dense()\n tm.assert_series_equal(result, expected, check_names=False)\n assert result.name == 'E'\n\n # insert Series\n frame['F'] = frame['A'].to_dense()\n assert isinstance(frame['F'], SparseSeries)\n tm.assert_sp_series_equal(frame['F'], frame['A'],\n check_names=False)\n\n # insert Series differently-indexed\n to_insert = frame['A'].to_dense()[::2]\n frame['G'] = to_insert\n expected = to_insert.reindex(frame.index)\n expected.name = 'G'\n tm.assert_series_equal(frame['G'].to_dense(), expected)\n\n # insert ndarray\n frame['H'] = np.random.randn(N)\n assert isinstance(frame['H'], SparseSeries)\n\n to_sparsify = np.random.randn(N)\n to_sparsify[N // 2:] = frame.default_fill_value\n frame['I'] = to_sparsify\n assert len(frame['I'].sp_values) == N // 2\n\n # insert ndarray wrong size\n pytest.raises(Exception, frame.__setitem__, 'foo',\n np.random.randn(N - 1))\n\n # scalar value\n frame['J'] = 5\n assert len(frame['J'].sp_values) == N\n assert (frame['J'].sp_values == 5).all()\n\n frame['K'] = frame.default_fill_value\n assert len(frame['K'].sp_values) == 0\n\n _check_frame(float_frame, float_frame_dense)\n _check_frame(float_frame_int_kind, float_frame_dense)\n _check_frame(float_frame_fill0, float_frame_fill0_dense)\n _check_frame(float_frame_fill2, float_frame_fill2_dense)\n\n @pytest.mark.parametrize('values', [\n [True, False],\n [0, 1],\n [1, None],\n ['a', 'b'],\n [pd.Timestamp('2017'), pd.NaT],\n [pd.Timedelta('10s'), pd.NaT],\n ])\n def test_setitem_more(self, values):\n df = pd.DataFrame({\"A\": values})\n df['A'] = pd.SparseArray(values)\n expected = pd.DataFrame({'A': pd.SparseArray(values)})\n tm.assert_frame_equal(df, expected)\n\n def test_setitem_corner(self, float_frame):\n float_frame['a'] = float_frame['B']\n tm.assert_sp_series_equal(float_frame['a'], float_frame['B'],\n check_names=False)\n\n def test_setitem_array(self, float_frame):\n arr = float_frame['B']\n\n float_frame['E'] = arr\n tm.assert_sp_series_equal(float_frame['E'], float_frame['B'],\n check_names=False)\n\n float_frame['F'] = arr[:-1]\n index = float_frame.index[:-1]\n tm.assert_sp_series_equal(float_frame['E'].reindex(index),\n float_frame['F'].reindex(index),\n check_names=False)\n\n def test_setitem_chained_no_consolidate(self):\n # https://github.com/pandas-dev/pandas/pull/19268\n # issuecomment-361696418\n # chained setitem used to cause consolidation\n sdf = pd.SparseDataFrame([[np.nan, 1], [2, np.nan]])\n with pd.option_context('mode.chained_assignment', None):\n sdf[0][1] = 2\n assert len(sdf._data.blocks) == 2\n\n def test_delitem(self, float_frame):\n A = float_frame['A']\n C = float_frame['C']\n\n del float_frame['B']\n assert 'B' not in float_frame\n tm.assert_sp_series_equal(float_frame['A'], A)\n tm.assert_sp_series_equal(float_frame['C'], C)\n\n del float_frame['D']\n assert 'D' not in float_frame\n\n del float_frame['A']\n assert 'A' not in float_frame\n\n def test_set_columns(self, float_frame):\n float_frame.columns = float_frame.columns\n pytest.raises(Exception, setattr, float_frame, 'columns',\n float_frame.columns[:-1])\n\n def test_set_index(self, float_frame):\n float_frame.index = float_frame.index\n pytest.raises(Exception, setattr, float_frame, 'index',\n float_frame.index[:-1])\n\n def test_ctor_reindex(self):\n idx = pd.Index([0, 1, 2, 3])\n with pytest.raises(ValueError, match=''):\n pd.SparseDataFrame({\"A\": [1, 2]}, index=idx)\n\n def test_append(self, float_frame):\n a = float_frame[:5]\n b = float_frame[5:]\n\n appended = a.append(b)\n tm.assert_sp_frame_equal(appended, float_frame, exact_indices=False)\n\n a = float_frame.iloc[:5, :3]\n b = float_frame.iloc[5:]\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n # Stacklevel is set for pd.concat, not append\n appended = a.append(b)\n tm.assert_sp_frame_equal(appended.iloc[:, :3], float_frame.iloc[:, :3],\n exact_indices=False)\n\n a = a[['B', 'C', 'A']].head(2)\n b = b.head(2)\n\n expected = pd.SparseDataFrame({\n \"B\": [0., 1, None, 3],\n \"C\": [0., 1, 5, 6],\n \"A\": [None, None, 2, 3],\n \"D\": [None, None, 5, None],\n }, index=a.index | b.index, columns=['B', 'C', 'A', 'D'])\n with tm.assert_produces_warning(None):\n appended = a.append(b, sort=False)\n\n tm.assert_frame_equal(appended, expected)\n\n with tm.assert_produces_warning(None):\n appended = a.append(b, sort=True)\n\n tm.assert_sp_frame_equal(appended, expected[['A', 'B', 'C', 'D']],\n consolidate_block_indices=True,\n check_kind=False)\n\n def test_astype(self):\n sparse = pd.SparseDataFrame({'A': SparseArray([1, 2, 3, 4],\n dtype=np.int64),\n 'B': SparseArray([4, 5, 6, 7],\n dtype=np.int64)})\n assert sparse['A'].dtype == SparseDtype(np.int64)\n assert sparse['B'].dtype == SparseDtype(np.int64)\n\n # retain fill_value\n res = sparse.astype(np.float64)\n exp = pd.SparseDataFrame({'A': SparseArray([1., 2., 3., 4.],\n fill_value=0,\n kind='integer'),\n 'B': SparseArray([4., 5., 6., 7.],\n fill_value=0,\n kind='integer')},\n default_fill_value=np.nan)\n tm.assert_sp_frame_equal(res, exp)\n assert res['A'].dtype == SparseDtype(np.float64, 0)\n assert res['B'].dtype == SparseDtype(np.float64, 0)\n\n # update fill_value\n res = sparse.astype(SparseDtype(np.float64, np.nan))\n exp = pd.SparseDataFrame({'A': SparseArray([1., 2., 3., 4.],\n fill_value=np.nan,\n kind='integer'),\n 'B': SparseArray([4., 5., 6., 7.],\n fill_value=np.nan,\n kind='integer')},\n default_fill_value=np.nan)\n tm.assert_sp_frame_equal(res, exp)\n assert res['A'].dtype == SparseDtype(np.float64, np.nan)\n assert res['B'].dtype == SparseDtype(np.float64, np.nan)\n\n def test_astype_bool(self):\n sparse = pd.SparseDataFrame({'A': SparseArray([0, 2, 0, 4],\n fill_value=0,\n dtype=np.int64),\n 'B': SparseArray([0, 5, 0, 7],\n fill_value=0,\n dtype=np.int64)},\n default_fill_value=0)\n assert sparse['A'].dtype == SparseDtype(np.int64)\n assert sparse['B'].dtype == SparseDtype(np.int64)\n\n res = sparse.astype(SparseDtype(bool, False))\n exp = pd.SparseDataFrame({'A': SparseArray([False, True, False, True],\n dtype=np.bool,\n fill_value=False,\n kind='integer'),\n 'B': SparseArray([False, True, False, True],\n dtype=np.bool,\n fill_value=False,\n kind='integer')},\n default_fill_value=False)\n tm.assert_sp_frame_equal(res, exp)\n assert res['A'].dtype == SparseDtype(np.bool)\n assert res['B'].dtype == SparseDtype(np.bool)\n\n def test_astype_object(self):\n # This may change in GH-23125\n df = pd.DataFrame({\"A\": SparseArray([0, 1]),\n \"B\": SparseArray([0, 1])})\n result = df.astype(object)\n dtype = SparseDtype(object, 0)\n expected = pd.DataFrame({\"A\": SparseArray([0, 1], dtype=dtype),\n \"B\": SparseArray([0, 1], dtype=dtype)})\n tm.assert_frame_equal(result, expected)\n\n def test_fillna(self, float_frame_fill0, float_frame_fill0_dense):\n df = float_frame_fill0.reindex(lrange(5))\n dense = float_frame_fill0_dense.reindex(lrange(5))\n\n result = df.fillna(0)\n expected = dense.fillna(0)\n tm.assert_sp_frame_equal(result, expected.to_sparse(fill_value=0),\n exact_indices=False)\n tm.assert_frame_equal(result.to_dense(), expected)\n\n result = df.copy()\n result.fillna(0, inplace=True)\n expected = dense.fillna(0)\n\n tm.assert_sp_frame_equal(result, expected.to_sparse(fill_value=0),\n exact_indices=False)\n tm.assert_frame_equal(result.to_dense(), expected)\n\n result = df.copy()\n result = df['A']\n result.fillna(0, inplace=True)\n\n expected = dense['A'].fillna(0)\n # this changes internal SparseArray repr\n # tm.assert_sp_series_equal(result, expected.to_sparse(fill_value=0))\n tm.assert_series_equal(result.to_dense(), expected)\n\n def test_fillna_fill_value(self):\n df = pd.DataFrame({'A': [1, 0, 0], 'B': [np.nan, np.nan, 4]})\n\n sparse = pd.SparseDataFrame(df)\n tm.assert_frame_equal(sparse.fillna(-1).to_dense(),\n df.fillna(-1), check_dtype=False)\n\n sparse = pd.SparseDataFrame(df, default_fill_value=0)\n tm.assert_frame_equal(sparse.fillna(-1).to_dense(),\n df.fillna(-1), check_dtype=False)\n\n def test_sparse_frame_pad_backfill_limit(self):\n index = np.arange(10)\n df = DataFrame(np.random.randn(10, 4), index=index)\n sdf = df.to_sparse()\n\n result = sdf[:2].reindex(index, method='pad', limit=5)\n\n with tm.assert_produces_warning(PerformanceWarning):\n expected = sdf[:2].reindex(index).fillna(method='pad')\n expected = expected.to_dense()\n expected.values[-3:] = np.nan\n expected = expected.to_sparse()\n tm.assert_frame_equal(result, expected)\n\n result = sdf[-2:].reindex(index, method='backfill', limit=5)\n\n with tm.assert_produces_warning(PerformanceWarning):\n expected = sdf[-2:].reindex(index).fillna(method='backfill')\n expected = expected.to_dense()\n expected.values[:3] = np.nan\n expected = expected.to_sparse()\n tm.assert_frame_equal(result, expected)\n\n def test_sparse_frame_fillna_limit(self):\n index = np.arange(10)\n df = DataFrame(np.random.randn(10, 4), index=index)\n sdf = df.to_sparse()\n\n result = sdf[:2].reindex(index)\n with tm.assert_produces_warning(PerformanceWarning):\n result = result.fillna(method='pad', limit=5)\n\n with tm.assert_produces_warning(PerformanceWarning):\n expected = sdf[:2].reindex(index).fillna(method='pad')\n expected = expected.to_dense()\n expected.values[-3:] = np.nan\n expected = expected.to_sparse()\n tm.assert_frame_equal(result, expected)\n\n result = sdf[-2:].reindex(index)\n with tm.assert_produces_warning(PerformanceWarning):\n result = result.fillna(method='backfill', limit=5)\n\n with tm.assert_produces_warning(PerformanceWarning):\n expected = sdf[-2:].reindex(index).fillna(method='backfill')\n expected = expected.to_dense()\n expected.values[:3] = np.nan\n expected = expected.to_sparse()\n tm.assert_frame_equal(result, expected)\n\n def test_rename(self, float_frame):\n result = float_frame.rename(index=str)\n expected = SparseDataFrame(float_frame.values,\n index=float_frame.index.strftime(\n \"%Y-%m-%d %H:%M:%S\"),\n columns=list('ABCD'))\n tm.assert_sp_frame_equal(result, expected)\n\n result = float_frame.rename(columns=lambda x: '%s%d' % (x, 1))\n data = {'A1': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],\n 'B1': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],\n 'C1': np.arange(10, dtype=np.float64),\n 'D1': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}\n expected = SparseDataFrame(data, index=float_frame.index)\n tm.assert_sp_frame_equal(result, expected)\n\n def test_corr(self, float_frame):\n res = float_frame.corr()\n # XXX: this stays sparse\n tm.assert_frame_equal(res, float_frame.to_dense().corr().to_sparse())\n\n def test_describe(self, float_frame):\n float_frame['foo'] = np.nan\n float_frame.get_dtype_counts()\n str(float_frame)\n desc = float_frame.describe() # noqa\n\n def test_join(self, float_frame):\n left = float_frame.loc[:, ['A', 'B']]\n right = float_frame.loc[:, ['C', 'D']]\n joined = left.join(right)\n tm.assert_sp_frame_equal(joined, float_frame, exact_indices=False)\n\n right = float_frame.loc[:, ['B', 'D']]\n pytest.raises(Exception, left.join, right)\n\n with pytest.raises(ValueError, match='Other Series must have a name'):\n float_frame.join(Series(\n np.random.randn(len(float_frame)), index=float_frame.index))\n\n def test_reindex(self, float_frame, float_frame_int_kind,\n float_frame_fill0, float_frame_fill2):\n\n def _check_frame(frame):\n index = frame.index\n sidx = index[::2]\n sidx2 = index[:5] # noqa\n\n sparse_result = frame.reindex(sidx)\n dense_result = frame.to_dense().reindex(sidx)\n tm.assert_frame_equal(sparse_result.to_dense(), dense_result)\n\n tm.assert_frame_equal(frame.reindex(list(sidx)).to_dense(),\n dense_result)\n\n sparse_result2 = sparse_result.reindex(index)\n dense_result2 = dense_result.reindex(index)\n tm.assert_frame_equal(sparse_result2.to_dense(), dense_result2)\n\n # propagate CORRECT fill value\n tm.assert_almost_equal(sparse_result.default_fill_value,\n frame.default_fill_value)\n tm.assert_almost_equal(sparse_result['A'].fill_value,\n frame['A'].fill_value)\n\n # length zero\n length_zero = frame.reindex([])\n assert len(length_zero) == 0\n assert len(length_zero.columns) == len(frame.columns)\n assert len(length_zero['A']) == 0\n\n # frame being reindexed has length zero\n length_n = length_zero.reindex(index)\n assert len(length_n) == len(frame)\n assert len(length_n.columns) == len(frame.columns)\n assert len(length_n['A']) == len(frame)\n\n # reindex columns\n reindexed = frame.reindex(columns=['A', 'B', 'Z'])\n assert len(reindexed.columns) == 3\n tm.assert_almost_equal(reindexed['Z'].fill_value,\n frame.default_fill_value)\n assert np.isnan(reindexed['Z'].sp_values).all()\n\n _check_frame(float_frame)\n _check_frame(float_frame_int_kind)\n _check_frame(float_frame_fill0)\n _check_frame(float_frame_fill2)\n\n # with copy=False\n reindexed = float_frame.reindex(float_frame.index, copy=False)\n reindexed['F'] = reindexed['A']\n assert 'F' in float_frame\n\n reindexed = float_frame.reindex(float_frame.index)\n reindexed['G'] = reindexed['A']\n assert 'G' not in float_frame\n\n def test_reindex_fill_value(self, float_frame_fill0,\n float_frame_fill0_dense):\n rng = bdate_range('20110110', periods=20)\n\n result = float_frame_fill0.reindex(rng, fill_value=0)\n exp = float_frame_fill0_dense.reindex(rng, fill_value=0)\n exp = exp.to_sparse(float_frame_fill0.default_fill_value)\n tm.assert_sp_frame_equal(result, exp)\n\n def test_reindex_method(self):\n\n sparse = SparseDataFrame(data=[[11., 12., 14.],\n [21., 22., 24.],\n [41., 42., 44.]],\n index=[1, 2, 4],\n columns=[1, 2, 4],\n dtype=float)\n\n # Over indices\n\n # default method\n result = sparse.reindex(index=range(6))\n expected = SparseDataFrame(data=[[nan, nan, nan],\n [11., 12., 14.],\n [21., 22., 24.],\n [nan, nan, nan],\n [41., 42., 44.],\n [nan, nan, nan]],\n index=range(6),\n columns=[1, 2, 4],\n dtype=float)\n tm.assert_sp_frame_equal(result, expected)\n\n # method='bfill'\n result = sparse.reindex(index=range(6), method='bfill')\n expected = SparseDataFrame(data=[[11., 12., 14.],\n [11., 12., 14.],\n [21., 22., 24.],\n [41., 42., 44.],\n [41., 42., 44.],\n [nan, nan, nan]],\n index=range(6),\n columns=[1, 2, 4],\n dtype=float)\n tm.assert_sp_frame_equal(result, expected)\n\n # method='ffill'\n result = sparse.reindex(index=range(6), method='ffill')\n expected = SparseDataFrame(data=[[nan, nan, nan],\n [11., 12., 14.],\n [21., 22., 24.],\n [21., 22., 24.],\n [41., 42., 44.],\n [41., 42., 44.]],\n index=range(6),\n columns=[1, 2, 4],\n dtype=float)\n tm.assert_sp_frame_equal(result, expected)\n\n # Over columns\n\n # default method\n result = sparse.reindex(columns=range(6))\n expected = SparseDataFrame(data=[[nan, 11., 12., nan, 14., nan],\n [nan, 21., 22., nan, 24., nan],\n [nan, 41., 42., nan, 44., nan]],\n index=[1, 2, 4],\n columns=range(6),\n dtype=float)\n tm.assert_sp_frame_equal(result, expected)\n\n # method='bfill'\n with pytest.raises(NotImplementedError):\n sparse.reindex(columns=range(6), method='bfill')\n\n # method='ffill'\n with pytest.raises(NotImplementedError):\n sparse.reindex(columns=range(6), method='ffill')\n\n def test_take(self, float_frame):\n result = float_frame.take([1, 0, 2], axis=1)\n expected = float_frame.reindex(columns=['B', 'A', 'C'])\n tm.assert_sp_frame_equal(result, expected)\n\n def test_to_dense(self, float_frame, float_frame_int_kind,\n float_frame_dense,\n float_frame_fill0, float_frame_fill0_dense,\n float_frame_fill2, float_frame_fill2_dense):\n def _check(frame, orig):\n dense_dm = frame.to_dense()\n # Sparse[float] != float\n tm.assert_frame_equal(frame, dense_dm, check_dtype=False)\n tm.assert_frame_equal(dense_dm, orig, check_dtype=False)\n\n _check(float_frame, float_frame_dense)\n _check(float_frame_int_kind, float_frame_dense)\n _check(float_frame_fill0, float_frame_fill0_dense)\n _check(float_frame_fill2, float_frame_fill2_dense)\n\n @pytest.mark.filterwarnings(\"ignore:\\\\nPanel:FutureWarning\")\n def test_stack_sparse_frame(self, float_frame, float_frame_int_kind,\n float_frame_fill0, float_frame_fill2):\n def _check(frame):\n dense_frame = frame.to_dense() # noqa\n\n wp = Panel.from_dict({'foo': frame})\n from_dense_lp = wp.to_frame()\n\n from_sparse_lp = spf.stack_sparse_frame(frame)\n\n tm.assert_numpy_array_equal(from_dense_lp.values,\n from_sparse_lp.values)\n\n _check(float_frame)\n _check(float_frame_int_kind)\n\n # for now\n pytest.raises(Exception, _check, float_frame_fill0)\n pytest.raises(Exception, _check, float_frame_fill2)\n\n def test_transpose(self, float_frame, float_frame_int_kind,\n float_frame_dense,\n float_frame_fill0, float_frame_fill0_dense,\n float_frame_fill2, float_frame_fill2_dense):\n\n def _check(frame, orig):\n transposed = frame.T\n untransposed = transposed.T\n tm.assert_sp_frame_equal(frame, untransposed)\n\n tm.assert_frame_equal(frame.T.to_dense(), orig.T)\n tm.assert_frame_equal(frame.T.T.to_dense(), orig.T.T)\n tm.assert_sp_frame_equal(frame, frame.T.T, exact_indices=False)\n\n _check(float_frame, float_frame_dense)\n _check(float_frame_int_kind, float_frame_dense)\n _check(float_frame_fill0, float_frame_fill0_dense)\n _check(float_frame_fill2, float_frame_fill2_dense)\n\n def test_shift(self, float_frame, float_frame_int_kind, float_frame_dense,\n float_frame_fill0, float_frame_fill0_dense,\n float_frame_fill2, float_frame_fill2_dense):\n\n def _check(frame, orig):\n shifted = frame.shift(0)\n exp = orig.shift(0)\n tm.assert_frame_equal(shifted.to_dense(), exp)\n\n shifted = frame.shift(1)\n exp = orig.shift(1)\n tm.assert_frame_equal(shifted.to_dense(), exp)\n\n shifted = frame.shift(-2)\n exp = orig.shift(-2)\n tm.assert_frame_equal(shifted.to_dense(), exp)\n\n shifted = frame.shift(2, freq='B')\n exp = orig.shift(2, freq='B')\n exp = exp.to_sparse(frame.default_fill_value,\n kind=frame.default_kind)\n tm.assert_frame_equal(shifted, exp)\n\n shifted = frame.shift(2, freq=BDay())\n exp = orig.shift(2, freq=BDay())\n exp = exp.to_sparse(frame.default_fill_value,\n kind=frame.default_kind)\n tm.assert_frame_equal(shifted, exp)\n\n _check(float_frame, float_frame_dense)\n _check(float_frame_int_kind, float_frame_dense)\n _check(float_frame_fill0, float_frame_fill0_dense)\n _check(float_frame_fill2, float_frame_fill2_dense)\n\n def test_count(self, float_frame):\n dense_result = float_frame.to_dense().count()\n\n result = float_frame.count()\n tm.assert_series_equal(result.to_dense(), dense_result)\n\n result = float_frame.count(axis=None)\n tm.assert_series_equal(result.to_dense(), dense_result)\n\n result = float_frame.count(axis=0)\n tm.assert_series_equal(result.to_dense(), dense_result)\n\n result = float_frame.count(axis=1)\n dense_result = float_frame.to_dense().count(axis=1)\n\n # win32 don't check dtype\n tm.assert_series_equal(result, dense_result, check_dtype=False)\n\n def test_numpy_transpose(self):\n sdf = SparseDataFrame([1, 2, 3], index=[1, 2, 3], columns=['a'])\n result = np.transpose(np.transpose(sdf))\n tm.assert_sp_frame_equal(result, sdf)\n\n msg = \"the 'axes' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.transpose(sdf, axes=1)\n\n def test_combine_first(self, float_frame):\n df = float_frame\n\n result = df[::2].combine_first(df)\n\n expected = df[::2].to_dense().combine_first(df.to_dense())\n expected = expected.to_sparse(fill_value=df.default_fill_value)\n\n tm.assert_sp_frame_equal(result, expected)\n\n @pytest.mark.xfail(reason=\"No longer supported.\", strict=True)\n def test_combine_first_with_dense(self):\n # We could support this if we allow\n # pd.core.dtypes.cast.find_common_type to special case SparseDtype\n # but I don't think that's worth it.\n df = self.frame\n\n result = df[::2].combine_first(df.to_dense())\n expected = df[::2].to_dense().combine_first(df.to_dense())\n expected = expected.to_sparse(fill_value=df.default_fill_value)\n\n tm.assert_sp_frame_equal(result, expected)\n\n def test_combine_add(self, float_frame):\n df = float_frame.to_dense()\n df2 = df.copy()\n df2['C'][:3] = np.nan\n df['A'][:3] = 5.7\n\n result = df.to_sparse().add(df2.to_sparse(), fill_value=0)\n expected = df.add(df2, fill_value=0).to_sparse()\n tm.assert_sp_frame_equal(result, expected)\n\n def test_isin(self):\n sparse_df = DataFrame({'flag': [1., 0., 1.]}).to_sparse(fill_value=0.)\n xp = sparse_df[sparse_df.flag == 1.]\n rs = sparse_df[sparse_df.flag.isin([1.])]\n tm.assert_frame_equal(xp, rs)\n\n def test_sparse_pow_issue(self):\n # 2220\n df = SparseDataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})\n\n # note : no error without nan\n df = SparseDataFrame({'A': [nan, 0, 1]})\n\n # note that 2 ** df works fine, also df ** 1\n result = 1 ** df\n\n r1 = result.take([0], 1)['A']\n r2 = result['A']\n\n assert len(r2.sp_values) == len(r1.sp_values)\n\n def test_as_blocks(self):\n df = SparseDataFrame({'A': [1.1, 3.3], 'B': [nan, -3.9]},\n dtype='float64')\n\n # deprecated 0.21.0\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n df_blocks = df.blocks\n assert list(df_blocks.keys()) == ['Sparse[float64, nan]']\n tm.assert_frame_equal(df_blocks['Sparse[float64, nan]'], df)\n\n @pytest.mark.xfail(reason='nan column names in _init_dict problematic '\n '(GH#16894)',\n strict=True)\n def test_nan_columnname(self):\n # GH 8822\n nan_colname = DataFrame(Series(1.0, index=[0]), columns=[nan])\n nan_colname_sparse = nan_colname.to_sparse()\n assert np.isnan(nan_colname_sparse.columns[0])\n\n def test_isna(self):\n # GH 8276\n df = pd.SparseDataFrame({'A': [np.nan, np.nan, 1, 2, np.nan],\n 'B': [0, np.nan, np.nan, 2, np.nan]})\n\n res = df.isna()\n exp = pd.SparseDataFrame({'A': [True, True, False, False, True],\n 'B': [False, True, True, False, True]},\n default_fill_value=True)\n exp._default_fill_value = np.nan\n tm.assert_sp_frame_equal(res, exp)\n\n # if fill_value is not nan, True can be included in sp_values\n df = pd.SparseDataFrame({'A': [0, 0, 1, 2, np.nan],\n 'B': [0, np.nan, 0, 2, np.nan]},\n default_fill_value=0.)\n res = df.isna()\n assert isinstance(res, pd.SparseDataFrame)\n exp = pd.DataFrame({'A': [False, False, False, False, True],\n 'B': [False, True, False, False, True]})\n tm.assert_frame_equal(res.to_dense(), exp)\n\n def test_notna(self):\n # GH 8276\n df = pd.SparseDataFrame({'A': [np.nan, np.nan, 1, 2, np.nan],\n 'B': [0, np.nan, np.nan, 2, np.nan]})\n\n res = df.notna()\n exp = pd.SparseDataFrame({'A': [False, False, True, True, False],\n 'B': [True, False, False, True, False]},\n default_fill_value=False)\n exp._default_fill_value = np.nan\n tm.assert_sp_frame_equal(res, exp)\n\n # if fill_value is not nan, True can be included in sp_values\n df = pd.SparseDataFrame({'A': [0, 0, 1, 2, np.nan],\n 'B': [0, np.nan, 0, 2, np.nan]},\n default_fill_value=0.)\n res = df.notna()\n assert isinstance(res, pd.SparseDataFrame)\n exp = pd.DataFrame({'A': [True, True, True, True, False],\n 'B': [True, False, True, True, False]})\n tm.assert_frame_equal(res.to_dense(), exp)\n\n\nclass TestSparseDataFrameArithmetic(object):\n\n def test_numeric_op_scalar(self):\n df = pd.DataFrame({'A': [nan, nan, 0, 1, ],\n 'B': [0, 1, 2, nan],\n 'C': [1., 2., 3., 4.],\n 'D': [nan, nan, nan, nan]})\n sparse = df.to_sparse()\n\n tm.assert_sp_frame_equal(sparse + 1, (df + 1).to_sparse())\n\n def test_comparison_op_scalar(self):\n # GH 13001\n df = pd.DataFrame({'A': [nan, nan, 0, 1, ],\n 'B': [0, 1, 2, nan],\n 'C': [1., 2., 3., 4.],\n 'D': [nan, nan, nan, nan]})\n sparse = df.to_sparse()\n\n # comparison changes internal repr, compare with dense\n res = sparse > 1\n assert isinstance(res, pd.SparseDataFrame)\n tm.assert_frame_equal(res.to_dense(), df > 1)\n\n res = sparse != 0\n assert isinstance(res, pd.SparseDataFrame)\n tm.assert_frame_equal(res.to_dense(), df != 0)\n\n\nclass TestSparseDataFrameAnalytics(object):\n\n def test_cumsum(self, float_frame):\n expected = SparseDataFrame(float_frame.to_dense().cumsum())\n\n result = float_frame.cumsum()\n tm.assert_sp_frame_equal(result, expected)\n\n result = float_frame.cumsum(axis=None)\n tm.assert_sp_frame_equal(result, expected)\n\n result = float_frame.cumsum(axis=0)\n tm.assert_sp_frame_equal(result, expected)\n\n def test_numpy_cumsum(self, float_frame):\n result = np.cumsum(float_frame)\n expected = SparseDataFrame(float_frame.to_dense().cumsum())\n tm.assert_sp_frame_equal(result, expected)\n\n msg = \"the 'dtype' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.cumsum(float_frame, dtype=np.int64)\n\n msg = \"the 'out' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.cumsum(float_frame, out=result)\n\n def test_numpy_func_call(self, float_frame):\n # no exception should be raised even though\n # numpy passes in 'axis=None' or `axis=-1'\n funcs = ['sum', 'cumsum', 'var',\n 'mean', 'prod', 'cumprod',\n 'std', 'min', 'max']\n for func in funcs:\n getattr(np, func)(float_frame)\n\n @pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH 17386)',\n strict=True)\n def test_quantile(self):\n # GH 17386\n data = [[1, 1], [2, 10], [3, 100], [nan, nan]]\n q = 0.1\n\n sparse_df = SparseDataFrame(data)\n result = sparse_df.quantile(q)\n\n dense_df = DataFrame(data)\n dense_expected = dense_df.quantile(q)\n sparse_expected = SparseSeries(dense_expected)\n\n tm.assert_series_equal(result, dense_expected)\n tm.assert_sp_series_equal(result, sparse_expected)\n\n @pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH 17386)',\n strict=True)\n def test_quantile_multi(self):\n # GH 17386\n data = [[1, 1], [2, 10], [3, 100], [nan, nan]]\n q = [0.1, 0.5]\n\n sparse_df = SparseDataFrame(data)\n result = sparse_df.quantile(q)\n\n dense_df = DataFrame(data)\n dense_expected = dense_df.quantile(q)\n sparse_expected = SparseDataFrame(dense_expected)\n\n tm.assert_frame_equal(result, dense_expected)\n tm.assert_sp_frame_equal(result, sparse_expected)\n\n def test_assign_with_sparse_frame(self):\n # GH 19163\n df = pd.DataFrame({\"a\": [1, 2, 3]})\n res = df.to_sparse(fill_value=False).assign(newcol=False)\n exp = df.assign(newcol=False).to_sparse(fill_value=False)\n\n tm.assert_sp_frame_equal(res, exp)\n\n for column in res.columns:\n assert type(res[column]) is SparseSeries\n\n @pytest.mark.parametrize(\"inplace\", [True, False])\n @pytest.mark.parametrize(\"how\", [\"all\", \"any\"])\n def test_dropna(self, inplace, how):\n # Tests regression #21172.\n expected = pd.SparseDataFrame({\"F2\": [0, 1]})\n input_df = pd.SparseDataFrame(\n {\"F1\": [float('nan'), float('nan')], \"F2\": [0, 1]}\n )\n result_df = input_df.dropna(axis=1, inplace=inplace, how=how)\n if inplace:\n result_df = input_df\n tm.assert_sp_frame_equal(expected, result_df)\n"
] | [
[
"pandas.util.testing.assert_sp_frame_equal",
"pandas.util.testing.assert_class_equal",
"pandas.Series",
"numpy.sqrt",
"pandas.util.testing.assert_produces_warning",
"pandas.MultiIndex.from_tuples",
"pandas.SparseDataFrame",
"pandas.DataFrame",
"pandas.core.sparse.api.SparseDataFrame",
"pandas.util.testing.assert_frame_equal",
"pandas.SparseSeries",
"pandas.util.testing.assert_index_equal",
"pandas.core.sparse.api.SparseDtype",
"numpy.cumsum",
"pandas.compat.iteritems",
"numpy.random.randn",
"pandas.util.testing.round_trip_pickle",
"pandas.util.testing.assert_sp_series_equal",
"pandas.tseries.offsets.BDay",
"pandas.util.testing.assert_numpy_array_equal",
"numpy.arange",
"pandas.util.testing.assert_series_equal",
"pandas.Index",
"pandas.core.sparse.frame.stack_sparse_frame",
"pandas.core.sparse.api.SparseArray",
"pandas.bdate_range",
"numpy.isnan",
"pandas.option_context",
"pandas.util.testing.assert_almost_equal",
"pandas.Timedelta",
"pandas.util.testing.assert_sp_array_equal",
"pandas.Panel.from_dict",
"numpy.transpose",
"numpy.array",
"pandas.SparseArray",
"pandas.Timestamp",
"pandas.compat.lrange",
"pandas.core.sparse.api.SparseSeries"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.24",
"0.23",
"0.21",
"0.20"
],
"scipy": [],
"tensorflow": []
}
] |
GiscardBiamby/geo | [
"54f54ea1c41c1d573eb59f67d1b4338f936414d9"
] | [
"geoscreens/data/splitting.py"
] | [
"from pathlib import Path\nfrom typing import Any, Dict, List, Optional, Tuple, Union, cast\n\nimport numpy as np\nimport pandas as pd\nfrom IPython.display import display\nfrom pycocotools.helpers import CocoClassDistHelper as COCO\nfrom pycocotools.helpers import CocoJsonBuilder\nfrom tqdm.auto import tqdm\n\nfrom .metadata import get_all_metadata\n\n\ndef get_images_with_metadata(coco: COCO, df_meta: pd.DataFrame) -> List[Dict[str, Any]]:\n \"\"\"\n Gets list of all images in the given COCO object, combined with metadata info\n from df_meta.\n\n Returns::\n\n (Example)\n [\n {\n 'file_name': 'screen_samples_auto/DZ9JablpbhQ/frame_00000012.jpg',\n 'height': 720,\n 'id': 0,\n 'width': 1280,\n 'video_id': 'DZ9JablpbhQ',\n 'author': 'Mister Blue Geoguessr',\n 'ann_count': 6\n }, ...\n ]\n \"\"\"\n img_data = []\n for img in coco.dataset[\"images\"]:\n video_id = Path(img[\"file_name\"]).parent.name\n img_data.append(\n {\n **img,\n \"video_id\": video_id,\n \"author\": df_meta.loc[video_id][\"uploader\"] if video_id in df_meta.index else None,\n \"ann_count\": sum(\n [1 for ann in coco.dataset[\"annotations\"] if ann[\"image_id\"] == img[\"id\"]]\n ),\n \"meta_split\": df_meta.loc[video_id][\"split\"] if video_id in df_meta.index else None,\n }\n )\n return img_data\n\n\ndef get_metadata_df(all_metadata) -> pd.DataFrame:\n df_meta = pd.DataFrame(all_metadata).set_index(\"id\")\n df_meta = df_meta[~(df_meta.split == \"None\")].copy(deep=True)\n\n return cast(pd.DataFrame, df_meta)\n\n\ndef generate_train_val_splits(\n coco_path: Path,\n split_by: str = \"author\",\n train_pct: float = 0.8,\n):\n if isinstance(coco_path, str):\n coco_path = Path(coco_path)\n # Prepare metadata about the images;\n df_meta = cast(pd.DataFrame, get_metadata_df(get_all_metadata()))\n\n # Get image data from the source COCO dataset:\n coco = COCO(coco_path)\n img_data = get_images_with_metadata(coco, df_meta)\n df_images = pd.DataFrame(img_data)\n df_images.rename(columns={\"id\": \"image_id\"}, inplace=True)\n\n # Generate the grouping (e.g., group by author|video_id|etc first and then all images under a\n # group all go into exactly one split)\n df_groupings = pd.DataFrame(\n df_images.groupby([split_by])\n .agg(img_count=(\"image_id\", \"count\"), ann_count=(\"ann_count\", \"sum\"))\n .sort_values([\"img_count\"], ascending=False)\n ) # noqa\n df_groupings = df_groupings.reset_index()\n df_groupings = df_groupings.set_index(split_by)\n df_groupings = (\n df_groupings.merge(\n pd.DataFrame(df_groupings.img_count.cumsum())\n .reset_index()\n .set_index(split_by)\n .rename(columns={\"img_count\": \"img_count_cum\"}),\n left_index=True,\n right_index=True,\n )\n .reset_index()\n .set_index(split_by)\n )\n\n # Compute which groups go into which split, according to the train_pct setting:\n df_groupings[\"split\"] = df_groupings.apply(\n lambda row: \"train\" if row.img_count_cum <= int(train_pct * len(df_images)) else \"val\",\n axis=1,\n )\n\n display(df_groupings)\n\n # Generate and save the splits:\n train_images = df_images[\n df_images.author.isin(\n df_groupings[df_groupings.split == \"train\"].reset_index().author,\n )\n ]\n val_images = df_images[~df_images.image_id.isin(train_images.image_id)]\n generate_coco_split(\n coco_path,\n \"train\",\n train_images.image_id.values.tolist(),\n )\n generate_coco_split(\n coco_path,\n \"val\",\n val_images.image_id.values.tolist(),\n )\n\n\ndef generate_coco_split(input_file: Path, split: str, img_ids: List[int]):\n coco = COCO(input_file)\n output_path = input_file.with_name(f\"{input_file.stem}_{split}.json\")\n print(\"input_path: \", input_file)\n print(\"output_path: \", output_path)\n\n coco_builder = CocoJsonBuilder(\n coco.cats, dest_path=output_path.parent, dest_name=output_path.name\n )\n for idx, img_id in enumerate(img_ids):\n coco_builder.add_image(coco.imgs[img_id], coco.imgToAnns[img_id])\n coco_builder.save()\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
TamerMograbi/ShadowNet | [
"99a9fb4522546e58817bbdd373f63d6996685e21"
] | [
"data/aligned_dataset.py"
] | [
"import os.path\nfrom data.base_dataset import BaseDataset, get_params, get_transform\nfrom data.image_folder import make_dataset\nfrom PIL import Image\nimport cv2\nimport numpy as np\n\nclass AlignedDataset(BaseDataset):\n \"\"\"A dataset class for paired image dataset.\n\n It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.\n During test time, you need to prepare a directory '/path/to/data/test'.\n \"\"\"\n\n def __init__(self, opt):\n \"\"\"Initialize this dataset class.\n\n Parameters:\n opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions\n \"\"\"\n BaseDataset.__init__(self, opt)\n self.dir_AB = os.path.join(opt.dataroot, opt.phase) # get the image directory\n self.AB_paths = sorted(make_dataset(self.dir_AB, opt.max_dataset_size)) # get image paths\n assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image\n self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc\n self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc\n \"\"\"\n def __getitem__(self, index):\n Return a data point and its metadata information.\n\n Parameters:\n index - - a random integer for data indexing\n\n Returns a dictionary that contains A, B, A_paths and B_paths\n A (tensor) - - an image in the input domain\n B (tensor) - - its corresponding image in the target domain\n A_paths (str) - - image paths\n B_paths (str) - - image paths (same as A_paths)\n \n # read a image given a random integer index\n AB_path = self.AB_paths[index]\n AB = Image.open(AB_path).convert('RGB')\n # split AB image into A and B\n w, h = AB.size\n w2 = int(w / 2)\n A = AB.crop((0, 0, w2, h))\n B = AB.crop((w2, 0, w, h))\n\n print(\"before: \", A.size)\n\n # apply the same transform to both A and B\n transform_params = get_params(self.opt, A.size)\n A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))\n B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))\n\n A = A_transform(A)\n B = B_transform(B)\n\n print(\"after: \", A.shape)\n\n return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path}\n \"\"\"\n def __getitem__(self, index):\n \"\"\"Return a data point and its metadata information.\n\n Parameters:\n index - - a random integer for data indexing\n\n Returns a dictionary that contains A, B, A_paths and B_paths\n A (tensor) - - an image in the input domain\n B (tensor) - - its corresponding image in the target domain\n A_paths (str) - - image paths\n B_paths (str) - - image paths (same as A_paths)\n \"\"\"\n # read a image given a random integer index\n AB_path = self.AB_paths[index]\n AB = cv2.imread(AB_path, cv2.IMREAD_COLOR)\n # split AB image into A {noShadow, lightMap, depthMap} and B {groundTruth}\n height = AB.shape[0]\n width = AB.shape[1]\n #print(\"AB shape: \", AB.shape)\n\n \"\"\"\n noShadow = AB[:, :int(width/4), :]\n lightMap = AB[:, int(width/4):int(width/2), :]\n depthMap = AB[:, int(width/2):3 * int(width/4), :]\n \n A = np.stack((noShadow[:,:,0], noShadow[:,:,1], noShadow[:,:,2], \\\n lightMap[:,:,0], lightMap[:,:,1], lightMap[:,:,2], \\\n depthMap[:,:,0]), axis=2)\n # A has 7 channels - 3, 3, 1\n\n #print(\"A before: \", A.shape)\n # B has 3 channels\n B = AB[:, 3*int(width/4):width, :]\n \"\"\"\n\n noShadow = AB[:, :int(width/11), :]\n lightMap = AB[:, int(width/11):2 * int(width/11), :]\n depthMap0 = AB[:, 2 * int(width/11):3 * int(width/11), :]\n depthMap1 = AB[:, 3 * int(width / 11):4 * int(width / 11), :]\n depthMap2 = AB[:, 4 * int(width / 11):5 * int(width / 11), :]\n depthMap3 = AB[:, 5 * int(width / 11):6 * int(width / 11), :]\n depthMap4 = AB[:, 6 * int(width / 11):7 * int(width / 11), :]\n depthMap5 = AB[:, 7 * int(width / 11):8 * int(width / 11), :]\n depthMap6 = AB[:, 8 * int(width / 11):9 * int(width / 11), :]\n depthMap7 = AB[:, 9 * int(width / 11):10 * int(width / 11), :]\n\n A = np.stack((noShadow[:,:,0], noShadow[:,:,1], noShadow[:,:,2], \\\n lightMap[:,:,0], lightMap[:,:,1], lightMap[:,:,2], \\\n depthMap0[:,:,0], depthMap1[:,:,0], depthMap2[:,:,0], \\\n depthMap3[:,:,0], depthMap4[:,:,0], depthMap5[:,:,0], \\\n depthMap6[:,:,0], depthMap7[:,:,0]), axis=2)\n # A has 14 channels - 3, 3, 8\n\n #print(\"A before: \", A.shape)\n # B has 3 channels\n B = AB[:, 10*int(width/11):width, :]\n\n A_transform = get_transform(self.opt, None, A.shape[2]) # third argument is number of channels\n B_transform = get_transform(self.opt, None, B.shape[2]) # third argument is number of channels\n\n #A_transform = get_transform(self.opt, None, grayscale=(self.input_nc == 1)) # third argument is number of channels\n #B_transform = get_transform(self.opt, None, grayscale=(self.input_nc == 1)) # third argument is number of channels\n\n A = A_transform(A)\n B = B_transform(B)\n #print(\"A after: \", A.shape)\n return {'A':A, 'B':B, 'A_paths': AB_path, 'B_paths': AB_path}\n\n def __len__(self):\n \"\"\"Return the total number of images in the dataset.\"\"\"\n return len(self.AB_paths)\n"
] | [
[
"numpy.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hershg/ray | [
"a1744f67fe954d8408c5b84e28ecccc130157f8e",
"a1744f67fe954d8408c5b84e28ecccc130157f8e",
"a1744f67fe954d8408c5b84e28ecccc130157f8e",
"a1744f67fe954d8408c5b84e28ecccc130157f8e",
"a1744f67fe954d8408c5b84e28ecccc130157f8e",
"a1744f67fe954d8408c5b84e28ecccc130157f8e",
"2e30f7ba386e716bf80f019dcd473b67d83abb95"
] | [
"rllib/models/catalog.py",
"rllib/examples/custom_keras_rnn_model.py",
"rllib/tests/test_nested_spaces.py",
"python/ray/experimental/sgd/tests/test_tensorflow.py",
"rllib/tests/test_reproducibility.py",
"rllib/policy/rnn_sequencing.py",
"rllib/offline/mixed_input.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gym\nimport logging\nimport numpy as np\nfrom functools import partial\n\nfrom ray.tune.registry import RLLIB_MODEL, RLLIB_PREPROCESSOR, \\\n RLLIB_ACTION_DIST, _global_registry\n\nfrom ray.rllib.models.extra_spaces import Simplex\nfrom ray.rllib.models.torch.torch_action_dist import (TorchCategorical,\n TorchDiagGaussian)\nfrom ray.rllib.models.tf.fcnet_v2 import FullyConnectedNetwork as FCNetV2\nfrom ray.rllib.models.tf.visionnet_v2 import VisionNetwork as VisionNetV2\nfrom ray.rllib.models.tf.tf_action_dist import (\n Categorical, MultiCategorical, Deterministic, DiagGaussian,\n MultiActionDistribution, Dirichlet)\nfrom ray.rllib.models.preprocessors import get_preprocessor\nfrom ray.rllib.models.tf.fcnet_v1 import FullyConnectedNetwork\nfrom ray.rllib.models.tf.lstm_v1 import LSTM\nfrom ray.rllib.models.tf.modelv1_compat import make_v1_wrapper\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\nfrom ray.rllib.models.tf.visionnet_v1 import VisionNetwork\nfrom ray.rllib.models.modelv2 import ModelV2\nfrom ray.rllib.utils import try_import_tf\nfrom ray.rllib.utils.annotations import DeveloperAPI, PublicAPI\nfrom ray.rllib.utils.error import UnsupportedSpaceException\n\ntf = try_import_tf()\n\nlogger = logging.getLogger(__name__)\n\n# yapf: disable\n# __sphinx_doc_begin__\nMODEL_DEFAULTS = {\n # === Built-in options ===\n # Filter config. List of [out_channels, kernel, stride] for each filter\n \"conv_filters\": None,\n # Nonlinearity for built-in convnet\n \"conv_activation\": \"relu\",\n # Nonlinearity for fully connected net (tanh, relu)\n \"fcnet_activation\": \"tanh\",\n # Number of hidden layers for fully connected net\n \"fcnet_hiddens\": [256, 256],\n # For control envs, documented in ray.rllib.models.Model\n \"free_log_std\": False,\n # Whether to skip the final linear layer used to resize the hidden layer\n # outputs to size `num_outputs`. If True, then the last hidden layer\n # should already match num_outputs.\n \"no_final_linear\": False,\n # Whether layers should be shared for the value function.\n \"vf_share_layers\": True,\n\n # == LSTM ==\n # Whether to wrap the model with a LSTM\n \"use_lstm\": False,\n # Max seq len for training the LSTM, defaults to 20\n \"max_seq_len\": 20,\n # Size of the LSTM cell\n \"lstm_cell_size\": 256,\n # Whether to feed a_{t-1}, r_{t-1} to LSTM\n \"lstm_use_prev_action_reward\": False,\n # When using modelv1 models with a modelv2 algorithm, you may have to\n # define the state shape here (e.g., [256, 256]).\n \"state_shape\": None,\n\n # == Atari ==\n # Whether to enable framestack for Atari envs\n \"framestack\": True,\n # Final resized frame dimension\n \"dim\": 84,\n # (deprecated) Converts ATARI frame to 1 Channel Grayscale image\n \"grayscale\": False,\n # (deprecated) Changes frame to range from [-1, 1] if true\n \"zero_mean\": True,\n\n # === Options for custom models ===\n # Name of a custom preprocessor to use\n \"custom_preprocessor\": None,\n # Name of a custom model to use\n \"custom_model\": None,\n # Name of a custom action distribution to use\n \"custom_action_dist\": None,\n # Extra options to pass to the custom classes\n \"custom_options\": {},\n}\n# __sphinx_doc_end__\n# yapf: enable\n\n\n@PublicAPI\nclass ModelCatalog(object):\n \"\"\"Registry of models, preprocessors, and action distributions for envs.\n\n Examples:\n >>> prep = ModelCatalog.get_preprocessor(env)\n >>> observation = prep.transform(raw_observation)\n\n >>> dist_class, dist_dim = ModelCatalog.get_action_dist(\n env.action_space, {})\n >>> model = ModelCatalog.get_model(inputs, dist_dim, options)\n >>> dist = dist_class(model.outputs, model)\n >>> action = dist.sample()\n \"\"\"\n\n @staticmethod\n @DeveloperAPI\n def get_action_dist(action_space, config, dist_type=None, torch=False):\n \"\"\"Returns action distribution class and size for the given action space.\n\n Args:\n action_space (Space): Action space of the target gym env.\n config (dict): Optional model config.\n dist_type (str): Optional identifier of the action distribution.\n torch (bool): Optional whether to return PyTorch distribution.\n\n Returns:\n dist_class (ActionDistribution): Python class of the distribution.\n dist_dim (int): The size of the input vector to the distribution.\n \"\"\"\n\n config = config or MODEL_DEFAULTS\n if config.get(\"custom_action_dist\"):\n action_dist_name = config[\"custom_action_dist\"]\n logger.debug(\n \"Using custom action distribution {}\".format(action_dist_name))\n dist = _global_registry.get(RLLIB_ACTION_DIST, action_dist_name)\n\n elif isinstance(action_space, gym.spaces.Box):\n if len(action_space.shape) > 1:\n raise UnsupportedSpaceException(\n \"Action space has multiple dimensions \"\n \"{}. \".format(action_space.shape) +\n \"Consider reshaping this into a single dimension, \"\n \"using a custom action distribution, \"\n \"using a Tuple action space, or the multi-agent API.\")\n if dist_type is None:\n dist = TorchDiagGaussian if torch else DiagGaussian\n elif dist_type == \"deterministic\":\n dist = Deterministic\n elif isinstance(action_space, gym.spaces.Discrete):\n dist = TorchCategorical if torch else Categorical\n elif isinstance(action_space, gym.spaces.Tuple):\n if torch:\n raise NotImplementedError(\"Tuple action spaces not supported \"\n \"for Pytorch.\")\n child_dist = []\n input_lens = []\n for action in action_space.spaces:\n dist, action_size = ModelCatalog.get_action_dist(\n action, config)\n child_dist.append(dist)\n input_lens.append(action_size)\n return partial(\n MultiActionDistribution,\n child_distributions=child_dist,\n action_space=action_space,\n input_lens=input_lens), sum(input_lens)\n elif isinstance(action_space, Simplex):\n if torch:\n raise NotImplementedError(\"Simplex action spaces not \"\n \"supported for Pytorch.\")\n dist = Dirichlet\n elif isinstance(action_space, gym.spaces.MultiDiscrete):\n if torch:\n raise NotImplementedError(\"MultiDiscrete action spaces not \"\n \"supported for Pytorch.\")\n return partial(MultiCategorical, input_lens=action_space.nvec), \\\n int(sum(action_space.nvec))\n\n return dist, dist.required_model_output_shape(action_space, config)\n\n raise NotImplementedError(\"Unsupported args: {} {}\".format(\n action_space, dist_type))\n\n @staticmethod\n @DeveloperAPI\n def get_action_shape(action_space):\n \"\"\"Returns action tensor dtype and shape for the action space.\n\n Args:\n action_space (Space): Action space of the target gym env.\n Returns:\n (dtype, shape): Dtype and shape of the actions tensor.\n \"\"\"\n\n if isinstance(action_space, gym.spaces.Discrete):\n return (tf.int64, (None, ))\n elif isinstance(action_space, (gym.spaces.Box, Simplex)):\n return (tf.float32, (None, ) + action_space.shape)\n elif isinstance(action_space, gym.spaces.MultiDiscrete):\n return (tf.as_dtype(action_space.dtype),\n (None, ) + action_space.shape)\n elif isinstance(action_space, gym.spaces.Tuple):\n size = 0\n all_discrete = True\n for i in range(len(action_space.spaces)):\n if isinstance(action_space.spaces[i], gym.spaces.Discrete):\n size += 1\n else:\n all_discrete = False\n size += np.product(action_space.spaces[i].shape)\n return (tf.int64 if all_discrete else tf.float32, (None, size))\n else:\n raise NotImplementedError(\"action space {}\"\n \" not supported\".format(action_space))\n\n @staticmethod\n @DeveloperAPI\n def get_action_placeholder(action_space):\n \"\"\"Returns an action placeholder consistent with the action space\n\n Args:\n action_space (Space): Action space of the target gym env.\n Returns:\n action_placeholder (Tensor): A placeholder for the actions\n \"\"\"\n\n dtype, shape = ModelCatalog.get_action_shape(action_space)\n\n return tf.placeholder(dtype, shape=shape, name=\"action\")\n\n @staticmethod\n @DeveloperAPI\n def get_model_v2(obs_space,\n action_space,\n num_outputs,\n model_config,\n framework,\n name=\"default_model\",\n model_interface=None,\n default_model=None,\n **model_kwargs):\n \"\"\"Returns a suitable model compatible with given spaces and output.\n\n Args:\n obs_space (Space): Observation space of the target gym env. This\n may have an `original_space` attribute that specifies how to\n unflatten the tensor into a ragged tensor.\n action_space (Space): Action space of the target gym env.\n num_outputs (int): The size of the output vector of the model.\n framework (str): Either \"tf\" or \"torch\".\n name (str): Name (scope) for the model.\n model_interface (cls): Interface required for the model\n default_model (cls): Override the default class for the model. This\n only has an effect when not using a custom model\n model_kwargs (dict): args to pass to the ModelV2 constructor\n\n Returns:\n model (ModelV2): Model to use for the policy.\n \"\"\"\n\n if model_config.get(\"custom_model\"):\n model_cls = _global_registry.get(RLLIB_MODEL,\n model_config[\"custom_model\"])\n if issubclass(model_cls, ModelV2):\n if model_interface and not issubclass(model_cls,\n model_interface):\n raise ValueError(\"The given model must subclass\",\n model_interface)\n\n if framework == \"tf\":\n created = set()\n\n # Track and warn if vars were created but not registered\n def track_var_creation(next_creator, **kw):\n v = next_creator(**kw)\n created.add(v)\n return v\n\n with tf.variable_creator_scope(track_var_creation):\n instance = model_cls(obs_space, action_space,\n num_outputs, model_config, name,\n **model_kwargs)\n registered = set(instance.variables())\n not_registered = set()\n for var in created:\n if var not in registered:\n not_registered.add(var)\n if not_registered:\n raise ValueError(\n \"It looks like variables {} were created as part \"\n \"of {} but does not appear in model.variables() \"\n \"({}). Did you forget to call \"\n \"model.register_variables() on the variables in \"\n \"question?\".format(not_registered, instance,\n registered))\n else:\n # no variable tracking\n instance = model_cls(obs_space, action_space, num_outputs,\n model_config, name, **model_kwargs)\n return instance\n elif tf.executing_eagerly():\n raise ValueError(\n \"Eager execution requires a TFModelV2 model to be \"\n \"used, however you specified a custom model {}\".format(\n model_cls))\n\n if framework == \"tf\":\n v2_class = None\n # try to get a default v2 model\n if not model_config.get(\"custom_model\"):\n v2_class = default_model or ModelCatalog._get_v2_model(\n obs_space, model_config)\n # fallback to a default v1 model\n if v2_class is None:\n if tf.executing_eagerly():\n raise ValueError(\n \"Eager execution requires a TFModelV2 model to be \"\n \"used, however there is no default V2 model for this \"\n \"observation space: {}, use_lstm={}\".format(\n obs_space, model_config.get(\"use_lstm\")))\n v2_class = make_v1_wrapper(ModelCatalog.get_model)\n # wrap in the requested interface\n wrapper = ModelCatalog._wrap_if_needed(v2_class, model_interface)\n return wrapper(obs_space, action_space, num_outputs, model_config,\n name, **model_kwargs)\n elif framework == \"torch\":\n if default_model:\n return default_model(obs_space, action_space, num_outputs,\n model_config, name)\n return ModelCatalog._get_default_torch_model_v2(\n obs_space, action_space, num_outputs, model_config, name)\n else:\n raise NotImplementedError(\n \"Framework must be 'tf' or 'torch': {}\".format(framework))\n\n @staticmethod\n @DeveloperAPI\n def get_preprocessor(env, options=None):\n \"\"\"Returns a suitable preprocessor for the given env.\n\n This is a wrapper for get_preprocessor_for_space().\n \"\"\"\n\n return ModelCatalog.get_preprocessor_for_space(env.observation_space,\n options)\n\n @staticmethod\n @DeveloperAPI\n def get_preprocessor_for_space(observation_space, options=None):\n \"\"\"Returns a suitable preprocessor for the given observation space.\n\n Args:\n observation_space (Space): The input observation space.\n options (dict): Options to pass to the preprocessor.\n\n Returns:\n preprocessor (Preprocessor): Preprocessor for the observations.\n \"\"\"\n\n options = options or MODEL_DEFAULTS\n for k in options.keys():\n if k not in MODEL_DEFAULTS:\n raise Exception(\"Unknown config key `{}`, all keys: {}\".format(\n k, list(MODEL_DEFAULTS)))\n\n if options.get(\"custom_preprocessor\"):\n preprocessor = options[\"custom_preprocessor\"]\n logger.info(\"Using custom preprocessor {}\".format(preprocessor))\n prep = _global_registry.get(RLLIB_PREPROCESSOR, preprocessor)(\n observation_space, options)\n else:\n cls = get_preprocessor(observation_space)\n prep = cls(observation_space, options)\n\n logger.debug(\"Created preprocessor {}: {} -> {}\".format(\n prep, observation_space, prep.shape))\n return prep\n\n @staticmethod\n @PublicAPI\n def register_custom_preprocessor(preprocessor_name, preprocessor_class):\n \"\"\"Register a custom preprocessor class by name.\n\n The preprocessor can be later used by specifying\n {\"custom_preprocessor\": preprocesor_name} in the model config.\n\n Args:\n preprocessor_name (str): Name to register the preprocessor under.\n preprocessor_class (type): Python class of the preprocessor.\n \"\"\"\n _global_registry.register(RLLIB_PREPROCESSOR, preprocessor_name,\n preprocessor_class)\n\n @staticmethod\n @PublicAPI\n def register_custom_model(model_name, model_class):\n \"\"\"Register a custom model class by name.\n\n The model can be later used by specifying {\"custom_model\": model_name}\n in the model config.\n\n Args:\n model_name (str): Name to register the model under.\n model_class (type): Python class of the model.\n \"\"\"\n _global_registry.register(RLLIB_MODEL, model_name, model_class)\n\n @staticmethod\n @PublicAPI\n def register_custom_action_dist(action_dist_name, action_dist_class):\n \"\"\"Register a custom action distribution class by name.\n\n The model can be later used by specifying\n {\"custom_action_dist\": action_dist_name} in the model config.\n\n Args:\n model_name (str): Name to register the action distribution under.\n model_class (type): Python class of the action distribution.\n \"\"\"\n _global_registry.register(RLLIB_ACTION_DIST, action_dist_name,\n action_dist_class)\n\n @staticmethod\n def _wrap_if_needed(model_cls, model_interface):\n assert issubclass(model_cls, TFModelV2), model_cls\n\n if not model_interface or issubclass(model_cls, model_interface):\n return model_cls\n\n class wrapper(model_interface, model_cls):\n pass\n\n name = \"{}_as_{}\".format(model_cls.__name__, model_interface.__name__)\n wrapper.__name__ = name\n wrapper.__qualname__ = name\n\n return wrapper\n\n @staticmethod\n def _get_default_torch_model_v2(obs_space, action_space, num_outputs,\n model_config, name):\n from ray.rllib.models.torch.fcnet import (FullyConnectedNetwork as\n PyTorchFCNet)\n from ray.rllib.models.torch.visionnet import (VisionNetwork as\n PyTorchVisionNet)\n\n model_config = model_config or MODEL_DEFAULTS\n\n if model_config.get(\"use_lstm\"):\n raise NotImplementedError(\n \"LSTM auto-wrapping not implemented for torch\")\n\n if isinstance(obs_space, gym.spaces.Discrete):\n obs_rank = 1\n else:\n obs_rank = len(obs_space.shape)\n\n if obs_rank > 2:\n return PyTorchVisionNet(obs_space, action_space, num_outputs,\n model_config, name)\n\n return PyTorchFCNet(obs_space, action_space, num_outputs, model_config,\n name)\n\n @staticmethod\n def get_model(input_dict,\n obs_space,\n action_space,\n num_outputs,\n options,\n state_in=None,\n seq_lens=None):\n \"\"\"Deprecated: use get_model_v2() instead.\"\"\"\n\n assert isinstance(input_dict, dict)\n options = options or MODEL_DEFAULTS\n model = ModelCatalog._get_model(input_dict, obs_space, action_space,\n num_outputs, options, state_in,\n seq_lens)\n\n if options.get(\"use_lstm\"):\n copy = dict(input_dict)\n copy[\"obs\"] = model.last_layer\n feature_space = gym.spaces.Box(\n -1, 1, shape=(model.last_layer.shape[1], ))\n model = LSTM(copy, feature_space, action_space, num_outputs,\n options, state_in, seq_lens)\n\n logger.debug(\n \"Created model {}: ({} of {}, {}, {}, {}) -> {}, {}\".format(\n model, input_dict, obs_space, action_space, state_in, seq_lens,\n model.outputs, model.state_out))\n\n model._validate_output_shape()\n return model\n\n @staticmethod\n def _get_model(input_dict, obs_space, action_space, num_outputs, options,\n state_in, seq_lens):\n if options.get(\"custom_model\"):\n model = options[\"custom_model\"]\n logger.debug(\"Using custom model {}\".format(model))\n return _global_registry.get(RLLIB_MODEL, model)(\n input_dict,\n obs_space,\n action_space,\n num_outputs,\n options,\n state_in=state_in,\n seq_lens=seq_lens)\n\n obs_rank = len(input_dict[\"obs\"].shape) - 1 # drops batch dim\n\n if obs_rank > 2:\n return VisionNetwork(input_dict, obs_space, action_space,\n num_outputs, options)\n\n return FullyConnectedNetwork(input_dict, obs_space, action_space,\n num_outputs, options)\n\n @staticmethod\n def _get_v2_model(obs_space, options):\n options = options or MODEL_DEFAULTS\n obs_rank = len(obs_space.shape)\n\n if options.get(\"use_lstm\"):\n return None # TODO: default LSTM v2 not implemented\n\n if obs_rank > 2:\n return VisionNetV2\n\n return FCNetV2\n\n @staticmethod\n def get_torch_model(obs_space,\n num_outputs,\n options=None,\n default_model_cls=None):\n raise DeprecationWarning(\"Please use get_model_v2() instead.\")\n",
"\"\"\"Example of using a custom RNN keras model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gym\nfrom gym.spaces import Discrete\nimport numpy as np\nimport random\nimport argparse\n\nimport ray\nfrom ray import tune\nfrom ray.tune.registry import register_env\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.models.modelv2 import ModelV2\nfrom ray.rllib.models.tf.recurrent_tf_modelv2 import RecurrentTFModelV2\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.utils import try_import_tf\n\ntf = try_import_tf()\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--run\", type=str, default=\"PPO\")\nparser.add_argument(\"--env\", type=str, default=\"RepeatAfterMeEnv\")\nparser.add_argument(\"--stop\", type=int, default=90)\n\n\nclass MyKerasRNN(RecurrentTFModelV2):\n \"\"\"Example of using the Keras functional API to define a RNN model.\"\"\"\n\n def __init__(self,\n obs_space,\n action_space,\n num_outputs,\n model_config,\n name,\n hiddens_size=256,\n cell_size=64):\n super(MyKerasRNN, self).__init__(obs_space, action_space, num_outputs,\n model_config, name)\n self.cell_size = cell_size\n\n # Define input layers\n input_layer = tf.keras.layers.Input(\n shape=(None, obs_space.shape[0]), name=\"inputs\")\n state_in_h = tf.keras.layers.Input(shape=(cell_size, ), name=\"h\")\n state_in_c = tf.keras.layers.Input(shape=(cell_size, ), name=\"c\")\n seq_in = tf.keras.layers.Input(shape=(), name=\"seq_in\", dtype=tf.int32)\n\n # Preprocess observation with a hidden layer and send to LSTM cell\n dense1 = tf.keras.layers.Dense(\n hiddens_size, activation=tf.nn.relu, name=\"dense1\")(input_layer)\n lstm_out, state_h, state_c = tf.keras.layers.LSTM(\n cell_size, return_sequences=True, return_state=True, name=\"lstm\")(\n inputs=dense1,\n mask=tf.sequence_mask(seq_in),\n initial_state=[state_in_h, state_in_c])\n\n # Postprocess LSTM output with another hidden layer and compute values\n logits = tf.keras.layers.Dense(\n self.num_outputs,\n activation=tf.keras.activations.linear,\n name=\"logits\")(lstm_out)\n values = tf.keras.layers.Dense(\n 1, activation=None, name=\"values\")(lstm_out)\n\n # Create the RNN model\n self.rnn_model = tf.keras.Model(\n inputs=[input_layer, seq_in, state_in_h, state_in_c],\n outputs=[logits, values, state_h, state_c])\n self.register_variables(self.rnn_model.variables)\n self.rnn_model.summary()\n\n @override(RecurrentTFModelV2)\n def forward_rnn(self, inputs, state, seq_lens):\n model_out, self._value_out, h, c = self.rnn_model([inputs, seq_lens] +\n state)\n return model_out, [h, c]\n\n @override(ModelV2)\n def get_initial_state(self):\n return [\n np.zeros(self.cell_size, np.float32),\n np.zeros(self.cell_size, np.float32),\n ]\n\n @override(ModelV2)\n def value_function(self):\n return tf.reshape(self._value_out, [-1])\n\n\nclass RepeatInitialEnv(gym.Env):\n \"\"\"Simple env in which the policy learns to repeat the initial observation\n seen at timestep 0.\"\"\"\n\n def __init__(self):\n self.observation_space = Discrete(2)\n self.action_space = Discrete(2)\n self.token = None\n self.num_steps = 0\n\n def reset(self):\n self.token = random.choice([0, 1])\n self.num_steps = 0\n return self.token\n\n def step(self, action):\n if action == self.token:\n reward = 1\n else:\n reward = -1\n self.num_steps += 1\n done = self.num_steps > 100\n return 0, reward, done, {}\n\n\nclass RepeatAfterMeEnv(gym.Env):\n \"\"\"Simple env in which the policy learns to repeat a previous observation\n token after a given delay.\"\"\"\n\n def __init__(self, config):\n self.observation_space = Discrete(2)\n self.action_space = Discrete(2)\n self.delay = config[\"repeat_delay\"]\n assert self.delay >= 1, \"delay must be at least 1\"\n self.history = []\n\n def reset(self):\n self.history = [0] * self.delay\n return self._next_obs()\n\n def step(self, action):\n if action == self.history[-(1 + self.delay)]:\n reward = 1\n else:\n reward = -1\n done = len(self.history) > 100\n return self._next_obs(), reward, done, {}\n\n def _next_obs(self):\n token = random.choice([0, 1])\n self.history.append(token)\n return token\n\n\nif __name__ == \"__main__\":\n ray.init()\n args = parser.parse_args()\n ModelCatalog.register_custom_model(\"rnn\", MyKerasRNN)\n register_env(\"RepeatAfterMeEnv\", lambda c: RepeatAfterMeEnv(c))\n register_env(\"RepeatInitialEnv\", lambda _: RepeatInitialEnv())\n tune.run(\n args.run,\n stop={\"episode_reward_mean\": args.stop},\n config={\n \"env\": args.env,\n \"env_config\": {\n \"repeat_delay\": 2,\n },\n \"gamma\": 0.9,\n \"num_workers\": 0,\n \"num_envs_per_worker\": 20,\n \"entropy_coeff\": 0.001,\n \"num_sgd_iter\": 5,\n \"vf_loss_coeff\": 1e-5,\n \"model\": {\n \"custom_model\": \"rnn\",\n \"max_seq_len\": 20,\n },\n })\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pickle\n\nfrom gym import spaces\nfrom gym.envs.registration import EnvSpec\nimport gym\nimport torch.nn as nn\nimport unittest\n\nimport ray\nfrom ray.rllib.agents.a3c import A2CTrainer\nfrom ray.rllib.agents.pg import PGTrainer\nfrom ray.rllib.agents.pg.pg_policy import PGTFPolicy\nfrom ray.rllib.env import MultiAgentEnv\nfrom ray.rllib.env.base_env import BaseEnv\nfrom ray.rllib.env.vector_env import VectorEnv\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.models.model import Model\nfrom ray.rllib.models.torch.fcnet import FullyConnectedNetwork\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\nfrom ray.rllib.rollout import rollout\nfrom ray.rllib.tests.test_external_env import SimpleServing\nfrom ray.tune.registry import register_env\nfrom ray.rllib.utils import try_import_tf\n\ntf = try_import_tf()\n\nDICT_SPACE = spaces.Dict({\n \"sensors\": spaces.Dict({\n \"position\": spaces.Box(low=-100, high=100, shape=(3, )),\n \"velocity\": spaces.Box(low=-1, high=1, shape=(3, )),\n \"front_cam\": spaces.Tuple(\n (spaces.Box(low=0, high=1, shape=(10, 10, 3)),\n spaces.Box(low=0, high=1, shape=(10, 10, 3)))),\n \"rear_cam\": spaces.Box(low=0, high=1, shape=(10, 10, 3)),\n }),\n \"inner_state\": spaces.Dict({\n \"charge\": spaces.Discrete(100),\n \"job_status\": spaces.Dict({\n \"task\": spaces.Discrete(5),\n \"progress\": spaces.Box(low=0, high=100, shape=()),\n })\n })\n})\n\nDICT_SAMPLES = [DICT_SPACE.sample() for _ in range(10)]\n\nTUPLE_SPACE = spaces.Tuple([\n spaces.Box(low=-100, high=100, shape=(3, )),\n spaces.Tuple((spaces.Box(low=0, high=1, shape=(10, 10, 3)),\n spaces.Box(low=0, high=1, shape=(10, 10, 3)))),\n spaces.Discrete(5),\n])\n\nTUPLE_SAMPLES = [TUPLE_SPACE.sample() for _ in range(10)]\n\n\ndef one_hot(i, n):\n out = [0.0] * n\n out[i] = 1.0\n return out\n\n\nclass NestedDictEnv(gym.Env):\n def __init__(self):\n self.action_space = spaces.Discrete(2)\n self.observation_space = DICT_SPACE\n self._spec = EnvSpec(\"NestedDictEnv-v0\")\n self.steps = 0\n\n def reset(self):\n self.steps = 0\n return DICT_SAMPLES[0]\n\n def step(self, action):\n self.steps += 1\n return DICT_SAMPLES[self.steps], 1, self.steps >= 5, {}\n\n\nclass NestedTupleEnv(gym.Env):\n def __init__(self):\n self.action_space = spaces.Discrete(2)\n self.observation_space = TUPLE_SPACE\n self._spec = EnvSpec(\"NestedTupleEnv-v0\")\n self.steps = 0\n\n def reset(self):\n self.steps = 0\n return TUPLE_SAMPLES[0]\n\n def step(self, action):\n self.steps += 1\n return TUPLE_SAMPLES[self.steps], 1, self.steps >= 5, {}\n\n\nclass NestedMultiAgentEnv(MultiAgentEnv):\n def __init__(self):\n self.steps = 0\n\n def reset(self):\n return {\n \"dict_agent\": DICT_SAMPLES[0],\n \"tuple_agent\": TUPLE_SAMPLES[0],\n }\n\n def step(self, actions):\n self.steps += 1\n obs = {\n \"dict_agent\": DICT_SAMPLES[self.steps],\n \"tuple_agent\": TUPLE_SAMPLES[self.steps],\n }\n rew = {\n \"dict_agent\": 0,\n \"tuple_agent\": 0,\n }\n dones = {\"__all__\": self.steps >= 5}\n infos = {\n \"dict_agent\": {},\n \"tuple_agent\": {},\n }\n return obs, rew, dones, infos\n\n\nclass InvalidModel(Model):\n def _build_layers_v2(self, input_dict, num_outputs, options):\n return \"not\", \"valid\"\n\n\nclass InvalidModel2(Model):\n def _build_layers_v2(self, input_dict, num_outputs, options):\n return tf.constant(0), tf.constant(0)\n\n\nclass TorchSpyModel(TorchModelV2, nn.Module):\n capture_index = 0\n\n def __init__(self, obs_space, action_space, num_outputs, model_config,\n name):\n TorchModelV2.__init__(self, obs_space, action_space, num_outputs,\n model_config, name)\n nn.Module.__init__(self)\n self.fc = FullyConnectedNetwork(\n obs_space.original_space.spaces[\"sensors\"].spaces[\"position\"],\n action_space, num_outputs, model_config, name)\n\n def forward(self, input_dict, state, seq_lens):\n pos = input_dict[\"obs\"][\"sensors\"][\"position\"].numpy()\n front_cam = input_dict[\"obs\"][\"sensors\"][\"front_cam\"][0].numpy()\n task = input_dict[\"obs\"][\"inner_state\"][\"job_status\"][\"task\"].numpy()\n ray.experimental.internal_kv._internal_kv_put(\n \"torch_spy_in_{}\".format(TorchSpyModel.capture_index),\n pickle.dumps((pos, front_cam, task)),\n overwrite=True)\n TorchSpyModel.capture_index += 1\n return self.fc({\n \"obs\": input_dict[\"obs\"][\"sensors\"][\"position\"]\n }, state, seq_lens)\n\n def value_function(self):\n return self.fc.value_function()\n\n\nclass DictSpyModel(Model):\n capture_index = 0\n\n def _build_layers_v2(self, input_dict, num_outputs, options):\n def spy(pos, front_cam, task):\n # TF runs this function in an isolated context, so we have to use\n # redis to communicate back to our suite\n ray.experimental.internal_kv._internal_kv_put(\n \"d_spy_in_{}\".format(DictSpyModel.capture_index),\n pickle.dumps((pos, front_cam, task)),\n overwrite=True)\n DictSpyModel.capture_index += 1\n return 0\n\n spy_fn = tf.py_func(\n spy, [\n input_dict[\"obs\"][\"sensors\"][\"position\"],\n input_dict[\"obs\"][\"sensors\"][\"front_cam\"][0],\n input_dict[\"obs\"][\"inner_state\"][\"job_status\"][\"task\"]\n ],\n tf.int64,\n stateful=True)\n\n with tf.control_dependencies([spy_fn]):\n output = tf.layers.dense(input_dict[\"obs\"][\"sensors\"][\"position\"],\n num_outputs)\n return output, output\n\n\nclass TupleSpyModel(Model):\n capture_index = 0\n\n def _build_layers_v2(self, input_dict, num_outputs, options):\n def spy(pos, cam, task):\n # TF runs this function in an isolated context, so we have to use\n # redis to communicate back to our suite\n ray.experimental.internal_kv._internal_kv_put(\n \"t_spy_in_{}\".format(TupleSpyModel.capture_index),\n pickle.dumps((pos, cam, task)),\n overwrite=True)\n TupleSpyModel.capture_index += 1\n return 0\n\n spy_fn = tf.py_func(\n spy, [\n input_dict[\"obs\"][0],\n input_dict[\"obs\"][1][0],\n input_dict[\"obs\"][2],\n ],\n tf.int64,\n stateful=True)\n\n with tf.control_dependencies([spy_fn]):\n output = tf.layers.dense(input_dict[\"obs\"][0], num_outputs)\n return output, output\n\n\nclass NestedSpacesTest(unittest.TestCase):\n def testInvalidModel(self):\n ModelCatalog.register_custom_model(\"invalid\", InvalidModel)\n self.assertRaises(ValueError, lambda: PGTrainer(\n env=\"CartPole-v0\", config={\n \"model\": {\n \"custom_model\": \"invalid\",\n },\n }))\n\n def testInvalidModel2(self):\n ModelCatalog.register_custom_model(\"invalid2\", InvalidModel2)\n self.assertRaisesRegexp(\n ValueError, \"Expected output.*\",\n lambda: PGTrainer(\n env=\"CartPole-v0\", config={\n \"model\": {\n \"custom_model\": \"invalid2\",\n },\n }))\n\n def doTestNestedDict(self, make_env, test_lstm=False):\n ModelCatalog.register_custom_model(\"composite\", DictSpyModel)\n register_env(\"nested\", make_env)\n pg = PGTrainer(\n env=\"nested\",\n config={\n \"num_workers\": 0,\n \"sample_batch_size\": 5,\n \"train_batch_size\": 5,\n \"model\": {\n \"custom_model\": \"composite\",\n \"use_lstm\": test_lstm,\n },\n })\n pg.train()\n\n # Check that the model sees the correct reconstructed observations\n for i in range(4):\n seen = pickle.loads(\n ray.experimental.internal_kv._internal_kv_get(\n \"d_spy_in_{}\".format(i)))\n pos_i = DICT_SAMPLES[i][\"sensors\"][\"position\"].tolist()\n cam_i = DICT_SAMPLES[i][\"sensors\"][\"front_cam\"][0].tolist()\n task_i = one_hot(\n DICT_SAMPLES[i][\"inner_state\"][\"job_status\"][\"task\"], 5)\n self.assertEqual(seen[0][0].tolist(), pos_i)\n self.assertEqual(seen[1][0].tolist(), cam_i)\n self.assertEqual(seen[2][0].tolist(), task_i)\n\n def doTestNestedTuple(self, make_env):\n ModelCatalog.register_custom_model(\"composite2\", TupleSpyModel)\n register_env(\"nested2\", make_env)\n pg = PGTrainer(\n env=\"nested2\",\n config={\n \"num_workers\": 0,\n \"sample_batch_size\": 5,\n \"train_batch_size\": 5,\n \"model\": {\n \"custom_model\": \"composite2\",\n },\n })\n pg.train()\n\n # Check that the model sees the correct reconstructed observations\n for i in range(4):\n seen = pickle.loads(\n ray.experimental.internal_kv._internal_kv_get(\n \"t_spy_in_{}\".format(i)))\n pos_i = TUPLE_SAMPLES[i][0].tolist()\n cam_i = TUPLE_SAMPLES[i][1][0].tolist()\n task_i = one_hot(TUPLE_SAMPLES[i][2], 5)\n self.assertEqual(seen[0][0].tolist(), pos_i)\n self.assertEqual(seen[1][0].tolist(), cam_i)\n self.assertEqual(seen[2][0].tolist(), task_i)\n\n def testNestedDictGym(self):\n self.doTestNestedDict(lambda _: NestedDictEnv())\n\n def testNestedDictGymLSTM(self):\n self.doTestNestedDict(lambda _: NestedDictEnv(), test_lstm=True)\n\n def testNestedDictVector(self):\n self.doTestNestedDict(\n lambda _: VectorEnv.wrap(lambda i: NestedDictEnv()))\n\n def testNestedDictServing(self):\n self.doTestNestedDict(lambda _: SimpleServing(NestedDictEnv()))\n\n def testNestedDictAsync(self):\n self.doTestNestedDict(lambda _: BaseEnv.to_base_env(NestedDictEnv()))\n\n def testNestedTupleGym(self):\n self.doTestNestedTuple(lambda _: NestedTupleEnv())\n\n def testNestedTupleVector(self):\n self.doTestNestedTuple(\n lambda _: VectorEnv.wrap(lambda i: NestedTupleEnv()))\n\n def testNestedTupleServing(self):\n self.doTestNestedTuple(lambda _: SimpleServing(NestedTupleEnv()))\n\n def testNestedTupleAsync(self):\n self.doTestNestedTuple(lambda _: BaseEnv.to_base_env(NestedTupleEnv()))\n\n def testMultiAgentComplexSpaces(self):\n ModelCatalog.register_custom_model(\"dict_spy\", DictSpyModel)\n ModelCatalog.register_custom_model(\"tuple_spy\", TupleSpyModel)\n register_env(\"nested_ma\", lambda _: NestedMultiAgentEnv())\n act_space = spaces.Discrete(2)\n pg = PGTrainer(\n env=\"nested_ma\",\n config={\n \"num_workers\": 0,\n \"sample_batch_size\": 5,\n \"train_batch_size\": 5,\n \"multiagent\": {\n \"policies\": {\n \"tuple_policy\": (\n PGTFPolicy, TUPLE_SPACE, act_space,\n {\"model\": {\"custom_model\": \"tuple_spy\"}}),\n \"dict_policy\": (\n PGTFPolicy, DICT_SPACE, act_space,\n {\"model\": {\"custom_model\": \"dict_spy\"}}),\n },\n \"policy_mapping_fn\": lambda a: {\n \"tuple_agent\": \"tuple_policy\",\n \"dict_agent\": \"dict_policy\"}[a],\n },\n })\n pg.train()\n\n for i in range(4):\n seen = pickle.loads(\n ray.experimental.internal_kv._internal_kv_get(\n \"d_spy_in_{}\".format(i)))\n pos_i = DICT_SAMPLES[i][\"sensors\"][\"position\"].tolist()\n cam_i = DICT_SAMPLES[i][\"sensors\"][\"front_cam\"][0].tolist()\n task_i = one_hot(\n DICT_SAMPLES[i][\"inner_state\"][\"job_status\"][\"task\"], 5)\n self.assertEqual(seen[0][0].tolist(), pos_i)\n self.assertEqual(seen[1][0].tolist(), cam_i)\n self.assertEqual(seen[2][0].tolist(), task_i)\n\n for i in range(4):\n seen = pickle.loads(\n ray.experimental.internal_kv._internal_kv_get(\n \"t_spy_in_{}\".format(i)))\n pos_i = TUPLE_SAMPLES[i][0].tolist()\n cam_i = TUPLE_SAMPLES[i][1][0].tolist()\n task_i = one_hot(TUPLE_SAMPLES[i][2], 5)\n self.assertEqual(seen[0][0].tolist(), pos_i)\n self.assertEqual(seen[1][0].tolist(), cam_i)\n self.assertEqual(seen[2][0].tolist(), task_i)\n\n def testRolloutDictSpace(self):\n register_env(\"nested\", lambda _: NestedDictEnv())\n agent = PGTrainer(env=\"nested\")\n agent.train()\n path = agent.save()\n agent.stop()\n\n # Test train works on restore\n agent2 = PGTrainer(env=\"nested\")\n agent2.restore(path)\n agent2.train()\n\n # Test rollout works on restore\n rollout(agent2, \"nested\", 100)\n\n def testPyTorchModel(self):\n ModelCatalog.register_custom_model(\"composite\", TorchSpyModel)\n register_env(\"nested\", lambda _: NestedDictEnv())\n a2c = A2CTrainer(\n env=\"nested\",\n config={\n \"num_workers\": 0,\n \"use_pytorch\": True,\n \"sample_batch_size\": 5,\n \"train_batch_size\": 5,\n \"model\": {\n \"custom_model\": \"composite\",\n },\n })\n\n a2c.train()\n\n # Check that the model sees the correct reconstructed observations\n for i in range(4):\n seen = pickle.loads(\n ray.experimental.internal_kv._internal_kv_get(\n \"torch_spy_in_{}\".format(i)))\n pos_i = DICT_SAMPLES[i][\"sensors\"][\"position\"].tolist()\n cam_i = DICT_SAMPLES[i][\"sensors\"][\"front_cam\"][0].tolist()\n task_i = one_hot(\n DICT_SAMPLES[i][\"inner_state\"][\"job_status\"][\"task\"], 5)\n self.assertEqual(seen[0][0].tolist(), pos_i)\n self.assertEqual(seen[1][0].tolist(), cam_i)\n self.assertEqual(seen[2][0].tolist(), task_i)\n\n\nif __name__ == \"__main__\":\n ray.init(num_cpus=5)\n unittest.main(verbosity=2)\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport pytest\nimport tempfile\nimport numpy as np\nimport shutil\n\nfrom ray import tune\nfrom ray.tests.conftest import ray_start_2_cpus # noqa: F401\nfrom ray.experimental.sgd.tf import TFTrainer, TFTrainable\n\nfrom ray.experimental.sgd.examples.tensorflow_train_example import (\n simple_model, simple_dataset)\n\nSIMPLE_CONFIG = {\n \"batch_size\": 128,\n \"fit_config\": {\n \"steps_per_epoch\": 3,\n },\n \"evaluate_config\": {\n \"steps\": 3,\n }\n}\n\n\[email protected]( # noqa: F811\n \"num_replicas\", [1, 2])\ndef test_train(ray_start_2_cpus, num_replicas): # noqa: F811\n trainer = TFTrainer(\n model_creator=simple_model,\n data_creator=simple_dataset,\n num_replicas=num_replicas,\n config=SIMPLE_CONFIG)\n\n train_stats1 = trainer.train()\n train_stats1.update(trainer.validate())\n\n train_stats2 = trainer.train()\n train_stats2.update(trainer.validate())\n\n\[email protected]( # noqa: F811\n \"num_replicas\", [1, 2])\ndef test_tune_train(ray_start_2_cpus, num_replicas): # noqa: F811\n\n config = {\n \"model_creator\": tune.function(simple_model),\n \"data_creator\": tune.function(simple_dataset),\n \"num_replicas\": num_replicas,\n \"use_gpu\": False,\n \"trainer_config\": SIMPLE_CONFIG\n }\n\n tune.run(\n TFTrainable,\n num_samples=2,\n config=config,\n stop={\"training_iteration\": 2},\n verbose=1)\n\n\[email protected]( # noqa: F811\n \"num_replicas\", [1, 2])\ndef test_save_and_restore(ray_start_2_cpus, num_replicas): # noqa: F811\n trainer1 = TFTrainer(\n model_creator=simple_model,\n data_creator=simple_dataset,\n num_replicas=num_replicas,\n config=SIMPLE_CONFIG)\n trainer1.train()\n\n tmpdir = tempfile.mkdtemp()\n filename = os.path.join(tmpdir, \"checkpoint\")\n trainer1.save(filename)\n\n model1 = trainer1.get_model()\n trainer1.shutdown()\n\n trainer2 = TFTrainer(\n model_creator=simple_model,\n data_creator=simple_dataset,\n num_replicas=num_replicas,\n config=SIMPLE_CONFIG)\n trainer2.restore(filename)\n\n model2 = trainer2.get_model()\n trainer2.shutdown()\n\n shutil.rmtree(tmpdir)\n\n model1_config = model1.get_config()\n model2_config = model2.get_config()\n assert _compare(model1_config, model2_config, skip_keys=[\"name\"])\n\n model1_weights = model1.get_weights()\n model2_weights = model2.get_weights()\n assert _compare(model1_weights, model2_weights)\n\n model1_opt_weights = model1.optimizer.get_weights()\n model2_opt_weights = model2.optimizer.get_weights()\n assert _compare(model1_opt_weights, model2_opt_weights)\n\n\ndef _compare(d1, d2, skip_keys=None):\n \"\"\"Compare two lists or dictionaries or array\"\"\"\n if type(d1) != type(d2):\n return False\n\n if isinstance(d1, dict):\n if set(d1) != set(d2):\n return False\n\n for key in d1:\n if skip_keys is not None and key in skip_keys:\n continue\n\n if not _compare(d1[key], d2[key], skip_keys=skip_keys):\n return False\n\n elif isinstance(d1, list):\n for i, _ in enumerate(d1):\n if not _compare(d1[i], d2[i], skip_keys=skip_keys):\n return False\n\n elif isinstance(d1, np.ndarray):\n if not np.array_equal(d1, d2):\n return False\n else:\n if d1 != d2:\n return False\n\n return True\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport unittest\n\nimport ray\nfrom ray.rllib.agents.dqn import DQNTrainer\nfrom ray.tune.registry import register_env\nimport numpy as np\nimport gym\n\n\nclass TestReproducibility(unittest.TestCase):\n def testReproducingTrajectory(self):\n class PickLargest(gym.Env):\n def __init__(self):\n self.observation_space = gym.spaces.Box(\n low=float(\"-inf\"), high=float(\"inf\"), shape=(4, ))\n self.action_space = gym.spaces.Discrete(4)\n\n def reset(self, **kwargs):\n self.obs = np.random.randn(4)\n return self.obs\n\n def step(self, action):\n reward = self.obs[action]\n return self.obs, reward, True, {}\n\n def env_creator(env_config):\n return PickLargest()\n\n trajs = list()\n for trial in range(3):\n ray.init()\n register_env(\"PickLargest\", env_creator)\n agent = DQNTrainer(\n env=\"PickLargest\",\n config={\"seed\": 666 if trial in [0, 1] else 999})\n\n trajectory = list()\n for _ in range(8):\n r = agent.train()\n trajectory.append(r[\"episode_reward_max\"])\n trajectory.append(r[\"episode_reward_min\"])\n trajs.append(trajectory)\n\n ray.shutdown()\n\n # trial0 and trial1 use same seed and thus\n # expect identical trajectories.\n all_same = True\n for v0, v1 in zip(trajs[0], trajs[1]):\n if v0 != v1:\n all_same = False\n self.assertTrue(all_same)\n\n # trial1 and trial2 use different seeds and thus\n # most rewards tend to be different.\n diff_cnt = 0\n for v1, v2 in zip(trajs[1], trajs[2]):\n if v1 != v2:\n diff_cnt += 1\n self.assertTrue(diff_cnt > 8)\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\"\"\"RNN utils for RLlib.\n\nThe main trick here is that we add the time dimension at the last moment.\nThe non-LSTM layers of the model see their inputs as one flat batch. Before\nthe LSTM cell, we reshape the input to add the expected time dimension. During\npostprocessing, we dynamically pad the experience batches so that this\nreshaping is possible.\n\nNote that this padding strategy only works out if we assume zero inputs don't\nmeaningfully affect the loss function. This happens to be true for all the\ncurrent algorithms: https://github.com/ray-project/ray/issues/2992\n\"\"\"\n\nimport numpy as np\n\nfrom ray.rllib.utils.annotations import DeveloperAPI\nfrom ray.rllib.utils import try_import_tf\n\ntf = try_import_tf()\n\n\n@DeveloperAPI\ndef add_time_dimension(padded_inputs, seq_lens):\n \"\"\"Adds a time dimension to padded inputs.\n\n Arguments:\n padded_inputs (Tensor): a padded batch of sequences. That is,\n for seq_lens=[1, 2, 2], then inputs=[A, *, B, B, C, C], where\n A, B, C are sequence elements and * denotes padding.\n seq_lens (Tensor): the sequence lengths within the input batch,\n suitable for passing to tf.nn.dynamic_rnn().\n\n Returns:\n Reshaped tensor of shape [NUM_SEQUENCES, MAX_SEQ_LEN, ...].\n \"\"\"\n\n # Sequence lengths have to be specified for LSTM batch inputs. The\n # input batch must be padded to the max seq length given here. That is,\n # batch_size == len(seq_lens) * max(seq_lens)\n padded_batch_size = tf.shape(padded_inputs)[0]\n max_seq_len = padded_batch_size // tf.shape(seq_lens)[0]\n\n # Dynamically reshape the padded batch to introduce a time dimension.\n new_batch_size = padded_batch_size // max_seq_len\n new_shape = ([new_batch_size, max_seq_len] +\n padded_inputs.get_shape().as_list()[1:])\n return tf.reshape(padded_inputs, new_shape)\n\n\n@DeveloperAPI\ndef chop_into_sequences(episode_ids,\n unroll_ids,\n agent_indices,\n feature_columns,\n state_columns,\n max_seq_len,\n dynamic_max=True,\n shuffle=False,\n _extra_padding=0):\n \"\"\"Truncate and pad experiences into fixed-length sequences.\n\n Arguments:\n episode_ids (list): List of episode ids for each step.\n unroll_ids (list): List of identifiers for the sample batch. This is\n used to make sure sequences are cut between sample batches.\n agent_indices (list): List of agent ids for each step. Note that this\n has to be combined with episode_ids for uniqueness.\n feature_columns (list): List of arrays containing features.\n state_columns (list): List of arrays containing LSTM state values.\n max_seq_len (int): Max length of sequences before truncation.\n dynamic_max (bool): Whether to dynamically shrink the max seq len.\n For example, if max len is 20 and the actual max seq len in the\n data is 7, it will be shrunk to 7.\n shuffle (bool): Whether to shuffle the sequence outputs.\n _extra_padding (int): Add extra padding to the end of sequences.\n\n Returns:\n f_pad (list): Padded feature columns. These will be of shape\n [NUM_SEQUENCES * MAX_SEQ_LEN, ...].\n s_init (list): Initial states for each sequence, of shape\n [NUM_SEQUENCES, ...].\n seq_lens (list): List of sequence lengths, of shape [NUM_SEQUENCES].\n\n Examples:\n >>> f_pad, s_init, seq_lens = chop_into_sequences(\n episode_ids=[1, 1, 5, 5, 5, 5],\n unroll_ids=[4, 4, 4, 4, 4, 4],\n agent_indices=[0, 0, 0, 0, 0, 0],\n feature_columns=[[4, 4, 8, 8, 8, 8],\n [1, 1, 0, 1, 1, 0]],\n state_columns=[[4, 5, 4, 5, 5, 5]],\n max_seq_len=3)\n >>> print(f_pad)\n [[4, 4, 0, 8, 8, 8, 8, 0, 0],\n [1, 1, 0, 0, 1, 1, 0, 0, 0]]\n >>> print(s_init)\n [[4, 4, 5]]\n >>> print(seq_lens)\n [2, 3, 1]\n \"\"\"\n\n prev_id = None\n seq_lens = []\n seq_len = 0\n unique_ids = np.add(\n np.add(episode_ids, agent_indices),\n np.array(unroll_ids) << 32)\n for uid in unique_ids:\n if (prev_id is not None and uid != prev_id) or \\\n seq_len >= max_seq_len:\n seq_lens.append(seq_len)\n seq_len = 0\n seq_len += 1\n prev_id = uid\n if seq_len:\n seq_lens.append(seq_len)\n assert sum(seq_lens) == len(unique_ids)\n seq_lens = np.array(seq_lens)\n\n # Dynamically shrink max len as needed to optimize memory usage\n if dynamic_max:\n max_seq_len = max(seq_lens) + _extra_padding\n\n feature_sequences = []\n for f in feature_columns:\n f = np.array(f)\n f_pad = np.zeros((len(seq_lens) * max_seq_len, ) + np.shape(f)[1:])\n seq_base = 0\n i = 0\n for l in seq_lens:\n for seq_offset in range(l):\n f_pad[seq_base + seq_offset] = f[i]\n i += 1\n seq_base += max_seq_len\n assert i == len(unique_ids), f\n feature_sequences.append(f_pad)\n\n initial_states = []\n for s in state_columns:\n s = np.array(s)\n s_init = []\n i = 0\n for l in seq_lens:\n s_init.append(s[i])\n i += l\n initial_states.append(np.array(s_init))\n\n if shuffle:\n permutation = np.random.permutation(len(seq_lens))\n for i, f in enumerate(feature_sequences):\n orig_shape = f.shape\n f = np.reshape(f, (len(seq_lens), -1) + f.shape[1:])\n f = f[permutation]\n f = np.reshape(f, orig_shape)\n feature_sequences[i] = f\n for i, s in enumerate(initial_states):\n s = s[permutation]\n initial_states[i] = s\n seq_lens = seq_lens[permutation]\n\n return feature_sequences, initial_states, seq_lens\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom ray.rllib.offline.input_reader import InputReader\nfrom ray.rllib.offline.json_reader import JsonReader\nfrom ray.rllib.utils.annotations import override, DeveloperAPI\n\n\n@DeveloperAPI\nclass MixedInput(InputReader):\n \"\"\"Mixes input from a number of other input sources.\n\n Examples:\n >>> MixedInput({\n \"sampler\": 0.4,\n \"/tmp/experiences/*.json\": 0.4,\n \"s3://bucket/expert.json\": 0.2,\n }, ioctx)\n \"\"\"\n\n @DeveloperAPI\n def __init__(self, dist, ioctx):\n \"\"\"Initialize a MixedInput.\n\n Arguments:\n dist (dict): dict mapping JSONReader paths or \"sampler\" to\n probabilities. The probabilities must sum to 1.0.\n ioctx (IOContext): current IO context object.\n \"\"\"\n if sum(dist.values()) != 1.0:\n raise ValueError(\"Values must sum to 1.0: {}\".format(dist))\n self.choices = []\n self.p = []\n for k, v in dist.items():\n if k == \"sampler\":\n self.choices.append(ioctx.default_sampler_input())\n else:\n self.choices.append(JsonReader(k))\n self.p.append(v)\n\n @override(InputReader)\n def next(self):\n source = np.random.choice(self.choices, p=self.p)\n return source.next()\n"
] | [
[
"numpy.product"
],
[
"numpy.zeros"
],
[
"torch.nn.Module.__init__"
],
[
"numpy.array_equal"
],
[
"numpy.random.randn"
],
[
"numpy.reshape",
"numpy.add",
"numpy.array",
"numpy.shape"
],
[
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
artofimagination/py-algo-trading-client | [
"4459e53cf184cd2cdda402ef903561d8df7ce3bc"
] | [
"src/trade_platforms/platform_wrapper_base.py"
] | [
"from datetime import datetime\nfrom typing import List\nfrom enum import Enum\nimport pandas as pd\nimport time\nimport plotly.graph_objects as go\n\n\n## Enum to identify platforms\nclass Platforms(Enum):\n FTX = \"FTX\"\n\n\n## Base class for platfrom wrappers.\n# Implements all common functionalities\n# for platforms and platform simulators\nclass PlatformWrapper():\n def __init__(self, name):\n self.name = name\n ## Wait time between cycles.\n self.sleep_time = 2\n ## Storing the current price, to reduce the number of REST gets.\n # Value is updated once per cycle.\n self.current_price = 0\n ## Cycle count.\n self.cycle = 0\n ## Enables cycle progress print\n self.allow_cycle_progress_print = True\n ## Stores the current cycle timestamp\n self.cycle_timestamp = datetime.now()\n ## Holds the current market price.\n self.current_price = 0\n ## Holds the current bids of the orderbook.\n self.current_bids = None\n ## holds the current asks of the orderbook.\n self.current_asks = None\n ## Stores the start time of the run\n self.start_time = datetime.now()\n ## Message showing details every cycle.\n self.cyclic_message_appendix = ''\n\n # Interface to place_order.\n def place_order(self):\n pass\n\n ## Get plotting historical data\n # @param start_time starting date of the data\n # @param end_time end date of the data\n # @param resolution of the data\n def plot_historical(self, start_time=None, end_time=None, resolution=None):\n df = self.historical_data(start_time, end_time, resolution)\n # Convert time to date\n df['date'] = pd.to_datetime(\n df['time'] / 1000, unit='s', origin='unix'\n )\n\n fig = go.Figure()\n fig.update_layout(\n title={\n 'text': \"Result\",\n 'x': 0.5,\n 'xanchor': 'center'\n },\n xaxis_title=\"Date\",\n yaxis_title=\"Price\",\n xaxis_rangeslider_visible=False\n )\n fig.add_trace(\n go.Candlestick(\n x=df['startTime'],\n open=df['open'],\n high=df['high'],\n low=df['low'],\n close=df['close']\n )\n )\n fig.show()\n\n ## Interface for cancel_order.\n def cancel_order(self):\n pass\n\n ## Returns the fetched price.\n def get_current_price(self):\n return self.current_price\n\n ## Fetches the current market price from remote.\n # Implemented on real platform wrappers.\n def fetch_current_price(self):\n pass\n\n ## Returns the orderbook.\n def get_orderbook(self):\n return (self.current_bids, self.current_asks)\n\n ## Fetches the orderbook from remote.\n # Implemented on real platform wrappers.\n def fetch_orderbook(self, depth):\n pass\n\n ## Returns the current cycle timestamp.\n def get_cycle_timestamp(self):\n return self.cycle_timestamp\n\n ## Appends additional info to the cyclic print\n def append_to_cyclic_message(self, message):\n self.cyclic_message_appendix = message\n\n ## Updates the current cycle timestamp.\n # Implementation differs in Test, Validation and Production modes.\n def update_cycle_timestamp(self):\n self.cycle_timestamp = datetime.now()\n\n ## Evaluates processes common for all type of platforms.\n def evaluate(self, trade):\n self.update_cycle_timestamp()\n (self.current_bids, self.current_asks) = self.fetch_orderbook(100)\n if self.allow_cycle_progress_print:\n print(f\"Cycle: {self.cycle}, \\\ntime: {self.cycle_timestamp}, orders: {len(self.get_order_history())} \\\n{self.cyclic_message_appendix}\")\n self.cycle += 1\n self.current_price = self.fetch_current_price()\n if self.sleep_time > 0:\n time.sleep(self.sleep_time)\n running = trade()\n return (running, self.cycle_timestamp)\n\n ## Interface for get_order_history\n def get_order_history(\n self,\n side=None,\n order_type=None,\n start_time=None,\n end_time=None) -> List[dict]:\n return None\n\n def get_account_info(self):\n return None\n\n def get_balances(self):\n return None\n\n def cleanup_iteration(self):\n pass\n\n def get_start_timestamp(self):\n return None\n\n def set_data_interval(self, start_time, end_time):\n pass\n\n def set_start_balance(self, balance_USD):\n pass\n\n ## Sets the wait time between cycles when running the platform.\n def set_wait_time(self, sleep_seconds):\n self.sleep_time = sleep_seconds\n"
] | [
[
"pandas.to_datetime"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jairotunior/gym_suppy | [
"71eb58c9e40723e9474d20b7439a50cedea3e085"
] | [
"deeplog/wrappers/model.py"
] | [
"import gym\nimport gym.spaces\nfrom deeplog.environments import SupplyEnv\nimport pandas as pd\nimport numpy as np\n\nfrom deeplog.wrappers import Base\n\nfrom abc import ABC, abstractmethod\n\n\nclass Model(Base):\n\n def __init__(self, env):\n Base.__init__(self, env)\n\n self.series: pd.DataFrame = pd.DataFrame({'date': self.env.range_date})\n self.series = self.series.set_index('date')\n\n self.series_info: dict = {}\n\n self.legends = []\n\n def step(self, action):\n self._plot_series()\n return self.env.step(action)\n\n def reset(self):\n return self.env.reset()\n\n def render(self, mode='human'):\n self.env.render(mode=mode)\n\n # Plot all series\n self._plot()\n\n def _plot_series(self):\n pass\n\n def _plot(self):\n if self.unwrapped.iterator > 0:\n window_size = 20\n window_start = max(self.unwrapped.iterator - window_size, 1)\n step_range = slice(window_start, self.unwrapped.iterator + 1)\n\n # Plot all series\n for serie in self.series.columns:\n self.unwrapped.chart.history.plot(self.series.iloc[window_start:self.unwrapped.iterator][serie].index.values,\n self.series.iloc[window_start:self.unwrapped.iterator][serie].values,\n color=self.series_info[serie]['color'])\n\n self.unwrapped.chart.history.legend(self.unwrapped.chart.legends + self.legends)\n\n\n def add_serie(self, serie_name, color='r'):\n if serie_name in self.series_info.keys():\n assert ValueError(\"El nombre de serie '{}' ya ha sido asignado, seleccione un nombre unico.\".format(serie_name))\n\n self.series_info[serie_name] = {\n #'type': type,\n 'color': color\n }\n\n self.legends.append(serie_name)\n\n self.series[serie_name] = 0\n #self.series[serie_name] = self.series[serie_name].astype(type)\n #self.env.history[serie_name] = initial_value\n\n\n def add_point(self, serie_name, value):\n #self.env.history.at[self.env.current_date, serie_name] = value\n\n if serie_name not in self.series:\n assert ValueError(\"La serie nombre {} no existe.\".format(serie_name))\n\n self.series.at[self.unwrapped.current_date, serie_name] = value"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
rafelafrance/traiter_butterflynet | [
"a96300d43ef855ef06b8d15196d39ca5628b0480"
] | [
"costa_rica_downloader.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"Download files from UPenn Butterflies of Costa Rica Web Site.\"\"\"\n\nimport argparse\nimport re\nimport socket\nimport urllib.request\nfrom urllib.error import HTTPError\n\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\nfrom src.pylib.consts import DATA_DIR\n\n# Make a few attempts to download a page\nERROR_SLEEP = 120\nERROR_RETRY = 10\n\n# Set a timeout for requests\nTIMEOUT = 30\nsocket.setdefaulttimeout(TIMEOUT)\n\n# Just throw everything in here\nOUT_DIR = DATA_DIR / 'costa_rica'\n\n# URL parts for the crawl\nSITE = 'http://janzen.sas.upenn.edu/caterpillars/'\nFAMILY = SITE + 'checklists/{}list.htm'\nGENUS = SITE + 'dblinks/cklistfd.lasso?{}herbsp={}'\nSKIP = '-SkipRecords={}&'\n\n# Used to figure out how many pages for each genus\nCOUNTS = re.compile(\n r'Displaying records (\\d+) to (\\d+) of (\\d+) records',\n flags=re.IGNORECASE)\n\n\ndef main(args):\n \"\"\"Download the data.\"\"\"\n print(args.family)\n family_page = get_family_page(args)\n genera = get_genera(family_page)\n\n if args.reverse:\n genera.reverse()\n\n for genus in genera:\n print(genus)\n genus_links = get_genus_links(args, genus)\n print(genus_links[0][1].name)\n for url, path in genus_links[1:]:\n print(path.name)\n download_page(url, path)\n\n if args.csv_file:\n write_results(args)\n\n\ndef write_results(args):\n \"\"\"Output the concatenated tables to a CSV file.\"\"\"\n paths = [p for p in OUT_DIR.glob(f'{args.family}_*.html')]\n dfs = [pd.read_html(str(p), header=0)[0].fillna('') for p in paths]\n df = pd.concat(dfs, ignore_index=True)\n df.to_csv(args.csv_file, index=False)\n\n\ndef get_genus_links(args, genus):\n \"\"\"Get the first page for the genus.\"\"\"\n links = []\n\n url = GENUS.format('', genus)\n path = OUT_DIR / f'{args.family}_{genus}_1.html'\n links.append((url, path))\n download_page(url, path)\n\n with open(path) as in_file:\n page = in_file.read()\n soup = BeautifulSoup(page, features='lxml')\n\n for font in soup.findAll('font'):\n text = font.get_text()\n if match := COUNTS.search(text):\n _, step, total = [int(g) for g in match.groups()]\n if step == 0 or total == 0:\n continue\n for page_no, skip in enumerate(range(step, total, step), 2):\n skip = SKIP.format(skip)\n url = GENUS.format(skip, genus)\n path = OUT_DIR / f'{args.family}_{genus}_{page_no}.html'\n links.append((url, path))\n break\n\n print(f'Count: {len(links)}')\n return links\n\n\ndef get_genera(family_page):\n \"\"\"Get all genera for the family.\"\"\"\n genera = []\n with open(family_page) as in_file:\n page = in_file.read()\n\n soup = BeautifulSoup(page, features='lxml')\n for tr in soup.findAll('tr'):\n tds = tr.findAll('td')\n genus = tds[0].get_text() if tds else ''\n if len(genus.split()) == 1:\n genera.append(genus)\n\n return genera\n\n\ndef get_family_page(args):\n \"\"\"Download the master list of checklists.\"\"\"\n url = FAMILY.format(args.family)\n path = OUT_DIR / f'{args.family}.html'\n download_page(url, path)\n return path\n\n\ndef download_page(url, path):\n \"\"\"Download a page if it does not exist.\"\"\"\n if path.exists():\n return\n\n for attempt in range(ERROR_RETRY):\n if attempt > 0:\n print(f'Attempt {attempt + 1}')\n try:\n urllib.request.urlretrieve(url, path)\n # time.sleep(random.randint(SLEEP_RANGE[0], SLEEP_RANGE[1]))\n break\n except (TimeoutError, socket.timeout, HTTPError):\n pass\n\n\ndef parse_args():\n \"\"\"Process command-line arguments.\"\"\"\n description = \"\"\"Download files from Butterflies of Costa Rica Web Site.\"\"\"\n arg_parser = argparse.ArgumentParser(description=description)\n\n arg_parser.add_argument(\n '--family', '-f', default='hesperiidae',\n help=\"\"\"The family (or superfamily to download.\"\"\")\n\n arg_parser.add_argument(\n '--reverse', '-r', action='store_true',\n help=\"\"\"Go through the genus list backwards.\"\"\")\n\n arg_parser.add_argument(\n '--csv-file', '-C',\n help=\"\"\"Output the results to this CSV file.\"\"\")\n\n args = arg_parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n ARGS = parse_args()\n main(ARGS)\n"
] | [
[
"pandas.concat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
katherineedgley/model-stats-extension | [
"1e983c3588ccedc1b6faf2531312ee0b45e30ba1"
] | [
"linear_model_extension/model_stats.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\n\nclass ModelStats:\n '''\n General class (not to be called outside of RegressionStats) for classes\n that generate model statistics, like RegressionStats\n Input:\n fitted_model - a scikit-learn LinearRegression fitted model\n X - feature matrix used to fit the sklearn model, can be \n numpy array or pandas df\n y - target array used to fit the sklearn model, \n np array or pandas series\n colnames - default None, only for supplying a list of the column\n names when inputting X as a numpy array, which\n does not include the names of the columns\n If colnames=None and X is numpy array, \n will name the variables in order.\n '''\n def __init__(self, fitted_model, X, y, colnames = None):\n self.X = np.asarray(X)\n self.y = np.asarray(y)\n self.fitted_model = fitted_model\n if (self.X.shape[0] != self.y.shape[0]):\n raise ValueError('X and y different number of samples')\n else:\n if isinstance(X, pd.DataFrame):\n self.colnames = ['Intercept'] + list(X.columns)\n elif colnames == None:\n self.colnames = ['Intercept'] + ['Variable_' + str(x+1) \n for x in range(X.shape[1])]\n else:\n self.colnames = ['Intercept'] + colnames\n\n\n "
] | [
[
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tkaya94/UdemyDataScience | [
"83fe006bace0e91a273006546df3f3ee408b7797",
"83fe006bace0e91a273006546df3f3ee408b7797"
] | [
"4_Matplotlib/PandasVisualisation.py",
"3_Pandas/ConcatAppendJoin.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nnp.random.seed(0)\nimport pandas as pd\n\nnum_samples = 100\nts = pd.Series(np.random.randn(num_samples), index=pd.date_range(\"1/1/2000\", periods=num_samples))\n# # print(ts.head())\n# ts.plot()\n# plt.show()\n\ndata = np.concatenate([np.random.randn(num_samples, 2),\n np.random.randint(low=0, high=10, size=(num_samples, 1))], axis=1)\n\ndf = pd.DataFrame(data, index=ts.index, columns=[\"A\", \"B\", \"C\"]).astype(\n {\"A\": np.float32, \"B\": np.float32, \"C\": np.uint8})\n# print(df.head())\ndf.plot()\nplt.show()\n",
"import numpy as np\nnp.random.seed(0)\nimport pandas as pd\n\ndf1 = pd.DataFrame(\n {\n \"A\": [\"A0\", \"A1\", \"A2\", \"A3\"],\n \"B\": [\"B0\", \"B1\", \"B2\", \"B3\"],\n },\n index=[0, 1, 2, 3]\n)\n\ndf2 = pd.DataFrame(\n {\n \"A\": [\"A4\", \"A5\", \"A6\", \"A7\"],\n \"B\": [\"B4\", \"B5\", \"B6\", \"B7\"],\n },\n index=[0, 1, 2, 5]\n)\n\ndf12_append = df1.append(df2)\nprint(df12_append)\n\ndf_concat = pd.concat([df1, df2], join=\"inner\", ignore_index=True)\nprint(\"concatenated:\", df_concat)\n\nleft = pd.DataFrame(\n {\n \"key\": [\"K0\", \"K1\", \"K2\"],\n \"A\": [\"A0\", \"A1\", \"A2\"]\n }\n)\n\nright = pd.DataFrame(\n {\n \"key\": [\"K0\", \"K1\", \"K2\"],\n \"B\": [\"B0\", \"B1\", \"B2\"]\n }\n)\nprint(left)\nprint(right)\nprint(pd.merge(left, right, on=\"key\"))\n\nleft = pd.DataFrame(\n {\n \"A\": [\"A0\", \"A1\", \"A2\"],\n },\n index=[\"K0\", \"K1\", \"K2\"]\n)\n\nright = pd.DataFrame(\n {\n \"B\": [\"B0\", \"B1\", \"B2\"],\n },\n index=[\"K0\", \"K1\", \"K3\"]\n)\n\nprint(left)\nprint(right)\nprint(left.join(right, how=\"outer\"))\n"
] | [
[
"numpy.random.seed",
"pandas.DataFrame",
"numpy.random.randn",
"pandas.date_range",
"matplotlib.pyplot.show",
"numpy.random.randint"
],
[
"pandas.concat",
"pandas.merge",
"numpy.random.seed",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
beantowel/librosa | [
"f54cc970e6089b3d1254cdb430a8747e5e68a940"
] | [
"tests/test_sequence.py"
] | [
"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nimport numpy as np\n\nimport pytest\nfrom test_core import srand\n\nimport librosa\n\n\n# Core viterbi tests\ndef test_viterbi_example():\n # Example from https://en.wikipedia.org/wiki/Viterbi_algorithm#Example\n\n # States: 0 = healthy, 1 = fever\n p_init = np.asarray([0.6, 0.4])\n\n # state 0 = hi, state 1 = low\n transition = np.asarray([[0.7, 0.3], [0.4, 0.6]])\n\n # emission likelihoods\n emit_p = [dict(normal=0.5, cold=0.4, dizzy=0.1), dict(normal=0.1, cold=0.3, dizzy=0.6)]\n\n obs = [\"normal\", \"cold\", \"dizzy\"]\n\n prob = np.asarray([np.asarray([ep[o] for o in obs]) for ep in emit_p])\n\n path, logp = librosa.sequence.viterbi(prob, transition, p_init, return_logp=True)\n\n # True maximum likelihood state\n assert np.array_equal(path, [0, 0, 1])\n assert np.isclose(logp, np.log(0.01512))\n\n # And check the second execution path\n path2 = librosa.sequence.viterbi(prob, transition, p_init, return_logp=False)\n\n assert np.array_equal(path, path2)\n\n\ndef test_viterbi_init():\n # Example from https://en.wikipedia.org/wiki/Viterbi_algorithm#Example\n\n # States: 0 = healthy, 1 = fever\n p_init = np.asarray([0.5, 0.5])\n\n # state 0 = hi, state 1 = low\n transition = np.asarray([[0.7, 0.3], [0.4, 0.6]])\n\n # emission likelihoods\n emit_p = [dict(normal=0.5, cold=0.4, dizzy=0.1), dict(normal=0.1, cold=0.3, dizzy=0.6)]\n\n obs = [\"normal\", \"cold\", \"dizzy\"]\n\n prob = np.asarray([np.asarray([ep[o] for o in obs]) for ep in emit_p])\n\n path1, logp1 = librosa.sequence.viterbi(prob, transition, p_init, return_logp=True)\n\n path2, logp2 = librosa.sequence.viterbi(prob, transition, return_logp=True)\n\n assert np.array_equal(path1, path2)\n assert logp1 == logp2\n\n\[email protected](raises=librosa.ParameterError)\[email protected](\"x\", [np.random.random(size=(3, 5))])\[email protected](\n \"trans\",\n [\n np.ones((3, 3), dtype=float),\n np.ones((3, 2), dtype=float),\n np.ones((2, 2), dtype=float),\n np.asarray([[1, 1, -1], [1, 1, -1], [1, 1, -1]], dtype=float),\n ],\n ids=[\"sum!=1\", \"not square\", \"too small\", \"negative\"],\n)\ndef test_viterbi_bad_transition(trans, x):\n librosa.sequence.viterbi(x, trans)\n\n\[email protected](raises=librosa.ParameterError)\[email protected](\"x\", [np.random.random(size=(3, 5))])\[email protected](\"trans\", [np.ones((3, 3), dtype=float) / 3.0])\[email protected](\n \"p_init\",\n [np.ones(3, dtype=float), np.ones(4, dtype=float) / 4.0, np.asarray([1, 1, -1], dtype=float)],\n ids=[\"sum!=1\", \"wrong size\", \"negative\"],\n)\ndef test_viterbi_bad_init(x, trans, p_init):\n librosa.sequence.viterbi(x, trans, p_init=p_init)\n\n\[email protected](raises=librosa.ParameterError)\[email protected](\"trans\", [np.ones((3, 3), dtype=float) / 3])\[email protected](\n \"x\", [np.random.random(size=(3, 5)) + 2, np.random.random(size=(3, 5)) - 1], ids=[\"p>1\", \"p<0\"]\n)\ndef test_viterbi_bad_obs(trans, x):\n librosa.sequence.viterbi(x, trans)\n\n\n# Discriminative viterbi\ndef test_viterbi_discriminative_example():\n # A pre-baked example with coin tosses\n\n transition = np.asarray([[0.75, 0.25], [0.25, 0.75]])\n\n # Joint XY model\n p_joint = np.asarray([[0.25, 0.25], [0.1, 0.4]])\n\n # marginals\n p_obs_marginal = p_joint.sum(axis=0)\n p_state_marginal = p_joint.sum(axis=1)\n\n p_init = p_state_marginal\n\n # Make the Y|X distribution\n p_state_given_obs = (p_joint / p_obs_marginal).T\n\n # Let's make a test observation sequence\n seq = np.asarray([1, 1, 0, 1, 1, 1, 0, 0])\n\n # Then our conditional probability table can be constructed directly as\n prob_d = np.asarray([p_state_given_obs[i] for i in seq]).T\n\n path, logp = librosa.sequence.viterbi_discriminative(\n prob_d, transition, p_state=p_state_marginal, p_init=p_init, return_logp=True\n )\n\n # Pre-computed optimal path, determined by brute-force search\n assert np.array_equal(path, [1, 1, 1, 1, 1, 1, 0, 0])\n\n # And check the second code path\n path2 = librosa.sequence.viterbi_discriminative(\n prob_d, transition, p_state=p_state_marginal, p_init=p_init, return_logp=False\n )\n assert np.array_equal(path, path2)\n\n\ndef test_viterbi_discriminative_example_init():\n # A pre-baked example with coin tosses\n\n transition = np.asarray([[0.75, 0.25], [0.25, 0.75]])\n\n # Joint XY model\n p_joint = np.asarray([[0.25, 0.25], [0.1, 0.4]])\n\n # marginals\n p_obs_marginal = p_joint.sum(axis=0)\n p_state_marginal = p_joint.sum(axis=1)\n\n p_init = np.asarray([0.5, 0.5])\n\n # Make the Y|X distribution\n p_state_given_obs = (p_joint / p_obs_marginal).T\n\n # Let's make a test observation sequence\n seq = np.asarray([1, 1, 0, 1, 1, 1, 0, 0])\n\n # Then our conditional probability table can be constructed directly as\n prob_d = np.asarray([p_state_given_obs[i] for i in seq]).T\n\n path, logp = librosa.sequence.viterbi_discriminative(\n prob_d, transition, p_state=p_state_marginal, p_init=p_init, return_logp=True\n )\n path2, logp2 = librosa.sequence.viterbi_discriminative(\n prob_d, transition, p_state=p_state_marginal, return_logp=True\n )\n assert np.array_equal(path, path2)\n assert np.allclose(logp, logp2)\n\n\[email protected](scope=\"module\")\ndef x_disc():\n srand()\n x = np.random.random(size=(3, 5)) ** 2\n x /= x.sum(axis=0, keepdims=True)\n return x\n\n\[email protected](raises=librosa.ParameterError)\[email protected](\n \"trans\",\n [\n np.ones((3, 3), dtype=float),\n np.ones((3, 2), dtype=float) * 0.5,\n np.ones((2, 2), dtype=float) * 0.5,\n np.asarray([[1, 1, -1], [1, 1, -1], [1, 1, -1]], dtype=float),\n ],\n ids=[\"sum>1\", \"bad shape\", \"too small\", \"negative\"],\n)\ndef test_viterbi_discriminative_bad_transition(x_disc, trans):\n librosa.sequence.viterbi_discriminative(x_disc, trans)\n\n\[email protected](raises=librosa.ParameterError)\[email protected](\"trans\", [np.ones((3, 3), dtype=float) / 3])\[email protected](\n \"p_init\",\n [np.ones(3, dtype=float), np.ones(4, dtype=float) / 4.0, np.asarray([1, 1, -1], dtype=float)],\n ids=[\"sum>1\", \"too many states\", \"negative\"],\n)\ndef test_viterbi_discriminative_bad_init(p_init, trans, x_disc):\n librosa.sequence.viterbi_discriminative(x_disc, trans, p_init=p_init)\n\n\[email protected](raises=librosa.ParameterError)\[email protected](\"trans\", [np.ones((3, 3), dtype=float) / 3])\[email protected](\n \"p_state\",\n [np.ones(3, dtype=float), np.ones(4, dtype=float) / 4.0, np.asarray([1, 1, -1], dtype=float)],\n ids=[\"sum>1\", \"too many states\", \"negative\"],\n)\ndef test_viterbi_discriminative_bad_marginal(x_disc, trans, p_state):\n librosa.sequence.viterbi_discriminative(x_disc, trans, p_state=p_state)\n\n\[email protected](raises=librosa.ParameterError)\[email protected](\"trans\", [np.ones((3, 3), dtype=float) / 3])\[email protected](\n \"x\",\n [\n np.zeros((3, 5), dtype=float),\n np.ones((3, 5), dtype=float),\n np.asarray([[1, 1, -1], [0, 0, 1], [0, 0, 0]], dtype=float),\n ],\n ids=[\"zeros\", \"ones\", \"neg\"],\n)\ndef test_viterbi_discriminative_bad_obs(x, trans):\n librosa.sequence.viterbi_discriminative(x, trans)\n\n\n# Multi-label viterbi\ndef test_viterbi_binary_example():\n\n # 0 stays 0,\n # 1 is uninformative\n transition = np.asarray([[0.9, 0.1], [0.5, 0.5]])\n\n # Initial state distribution\n p_init = np.asarray([0.25, 0.75])\n\n p_binary = np.asarray([0.25, 0.5, 0.75, 0.1, 0.1, 0.8, 0.9])\n\n p_full = np.vstack((1 - p_binary, p_binary))\n\n # Compute the viterbi_binary result for one class\n path, logp = librosa.sequence.viterbi_binary(\n p_binary, transition, p_state=p_init[1:], p_init=p_init[1:], return_logp=True\n )\n\n # And the full multi-label result\n path_c, logp_c = librosa.sequence.viterbi_binary(\n p_full, transition, p_state=p_init, p_init=p_init, return_logp=True\n )\n path_c2 = librosa.sequence.viterbi_binary(p_full, transition, p_state=p_init, p_init=p_init, return_logp=False)\n\n # Check that the single and multilabel cases agree\n assert np.allclose(logp, logp_c[1])\n assert np.array_equal(path[0], path_c[1])\n assert np.array_equal(path_c, path_c2)\n\n # And do an explicit multi-class comparison\n path_d, logp_d = librosa.sequence.viterbi_discriminative(\n p_full, transition, p_state=p_init, p_init=p_init, return_logp=True\n )\n assert np.allclose(logp[0], logp_d)\n assert np.array_equal(path[0], path_d)\n\n\ndef test_viterbi_binary_example_init():\n\n # 0 stays 0,\n # 1 is uninformative\n transition = np.asarray([[0.9, 0.1], [0.5, 0.5]])\n\n # Initial state distribution\n p_init = np.asarray([0.5, 0.5])\n\n p_binary = np.asarray([0.25, 0.5, 0.75, 0.1, 0.1, 0.8, 0.9])\n\n p_full = np.vstack((1 - p_binary, p_binary))\n\n # And the full multi-label result\n path_c, logp_c = librosa.sequence.viterbi_binary(\n p_full, transition, p_state=p_init, p_init=p_init, return_logp=True\n )\n path_c2, logp_c2 = librosa.sequence.viterbi_binary(p_full, transition, p_state=p_init, return_logp=True)\n\n # Check that the single and multilabel cases agree\n assert np.allclose(logp_c, logp_c2)\n assert np.array_equal(path_c, path_c2)\n\n\[email protected](raises=librosa.ParameterError)\[email protected](\"x\", [np.random.random(size=(3, 5)) ** 2])\[email protected](\n \"trans\",\n [\n np.ones((2, 2), dtype=float),\n np.ones((3, 3), dtype=float) / 3,\n np.ones((3, 5, 5), dtype=float),\n np.asarray([[2, -1], [2, -1]]),\n ],\n ids=[\"sum>1\", \"wrong size\", \"wrong shape\", \"negative\"],\n)\ndef test_viterbi_binary_bad_transition(x, trans):\n librosa.sequence.viterbi_binary(x, trans)\n\n\[email protected](\"x\", [np.random.random(size=(3, 5)) ** 2])\[email protected](\"trans\", [np.ones((2, 2), dtype=float) * 0.5])\[email protected](\n \"p_init\",\n [2 * np.ones(3, dtype=float), np.ones(4, dtype=float), -np.ones(3, dtype=float)],\n ids=[\"too big\", \"wrong shape\", \"negative\"],\n)\[email protected](raises=librosa.ParameterError)\ndef test_viterbi_binary_bad_init(x, trans, p_init):\n librosa.sequence.viterbi_binary(x, trans, p_init=p_init)\n\n\[email protected](raises=librosa.ParameterError)\[email protected](\"x\", [np.random.random(size=(3, 5)) ** 2])\[email protected](\"trans\", [np.ones((2, 2), dtype=float) * 0.5])\[email protected](\n \"p_state\",\n [2 * np.ones(3, dtype=float), np.ones(4, dtype=float), -np.ones(3, dtype=float)],\n ids=[\"too big\", \"bad shape\", \"negative\"],\n)\ndef test_viterbi_binary_bad_marginal(p_state, trans, x):\n librosa.sequence.viterbi_binary(x, trans, p_state=p_state)\n\n\[email protected](raises=librosa.ParameterError)\[email protected](\"trans\", [np.ones((2, 2), dtype=float) * 0.5])\[email protected](\n \"x\", [-np.ones((3, 5), dtype=float), 2 * np.ones((3, 5), dtype=float)], ids=[\"non-positive\", \"too big\"]\n)\ndef test_viterbi_binary_bad_obs(x, trans):\n librosa.sequence.viterbi_binary(x, trans)\n\n\n# Transition operator constructors\[email protected](\"n\", range(1, 4))\ndef test_trans_uniform(n):\n A = librosa.sequence.transition_uniform(n)\n assert A.shape == (n, n)\n assert np.allclose(A, 1.0 / n)\n\n\[email protected](raises=librosa.ParameterError)\[email protected](\"n\", [0, None])\ndef test_trans_uniform_badshape(n):\n librosa.sequence.transition_uniform(n)\n\n\[email protected](\"n,p\", [(2, 0.5), (3, 0.5), (3, [0.8, 0.7, 0.5])])\ndef test_trans_loop(n, p):\n A = librosa.sequence.transition_loop(n, p)\n\n # Right shape\n assert A.shape == (n, n)\n # diag is correct\n assert np.allclose(np.diag(A), p)\n\n # we have well-formed distributions\n assert np.all(A >= 0)\n assert np.allclose(A.sum(axis=1), 1)\n\n\[email protected](raises=librosa.ParameterError)\[email protected](\n \"n,p\",\n [(1, 0.5), (None, 0.5), (3, 1.5), (3, -0.25), (3, [0.5, 0.2])],\n ids=[\"missing states\", \"wrong states\", \"not probability\", \"neg prob\", \"shape mismatch\"],\n)\ndef test_trans_loop_fail(n, p):\n librosa.sequence.transition_loop(n, p)\n\n\[email protected](\"n,p\", [(2, 0.5), (3, 0.5), (3, [0.8, 0.7, 0.5])])\ndef test_trans_cycle(n, p):\n A = librosa.sequence.transition_cycle(n, p)\n\n # Right shape\n assert A.shape == (n, n)\n # diag is correct\n assert np.allclose(np.diag(A), p)\n\n for i in range(n):\n assert A[i, np.mod(i + 1, n)] == 1 - A[i, i]\n\n # we have well-formed distributions\n assert np.all(A >= 0)\n assert np.allclose(A.sum(axis=1), 1)\n\n\[email protected](raises=librosa.ParameterError)\[email protected](\n \"n,p\",\n [(1, 0.5), (None, 0.5), (3, 1.5), (3, -0.25), (3, [0.5, 0.2])],\n ids=[\"too few states\", \"wrong n_states\", \"p>1\", \"p<0\", \"shape mismatch\"],\n)\ndef test_trans_cycle_fail(n, p):\n librosa.sequence.transition_cycle(n, p)\n\n\[email protected](raises=librosa.ParameterError)\[email protected](\"n\", [1.5, 0])\ndef test_trans_local_nstates_fail(n):\n librosa.sequence.transition_local(n, 3)\n\n\[email protected](raises=librosa.ParameterError)\[email protected](\"width\", [-1, 0, [2, 3]])\ndef test_trans_local_width_fail(width):\n librosa.sequence.transition_local(5, width)\n\n\ndef test_trans_local_wrap_const():\n\n A = librosa.sequence.transition_local(5, 3, window=\"triangle\", wrap=True)\n\n A_true = np.asarray(\n [\n [0.5, 0.25, 0.0, 0.0, 0.25],\n [0.25, 0.5, 0.25, 0.0, 0.0],\n [0.0, 0.25, 0.5, 0.25, 0.0],\n [0.0, 0.0, 0.25, 0.5, 0.25],\n [0.25, 0.0, 0.0, 0.25, 0.5],\n ]\n )\n\n assert np.allclose(A, A_true)\n\n\ndef test_trans_local_nowrap_const():\n\n A = librosa.sequence.transition_local(5, 3, window=\"triangle\", wrap=False)\n\n A_true = np.asarray(\n [\n [2.0 / 3, 1.0 / 3, 0.0, 0.0, 0.0],\n [0.25, 0.5, 0.25, 0.0, 0.0],\n [0.0, 0.25, 0.5, 0.25, 0.0],\n [0.0, 0.0, 0.25, 0.5, 0.25],\n [0.0, 0.0, 0.0, 1.0 / 3, 2.0 / 3],\n ]\n )\n\n assert np.allclose(A, A_true)\n\n\ndef test_trans_local_wrap_var():\n\n A = librosa.sequence.transition_local(5, [2, 1, 3, 3, 2], window=\"ones\", wrap=True)\n\n A_true = np.asarray(\n [\n [0.5, 0.0, 0.0, 0.0, 0.5],\n [0.0, 1.0, 0.0, 0.0, 0.0],\n [0.0, 1.0 / 3, 1.0 / 3, 1.0 / 3, 0.0],\n [0.0, 0.0, 1.0 / 3, 1.0 / 3, 1.0 / 3],\n [0.0, 0.0, 0.0, 0.5, 0.5],\n ]\n )\n\n assert np.allclose(A, A_true)\n\n\ndef test_trans_local_nowrap_var():\n\n A = librosa.sequence.transition_local(5, [2, 1, 3, 3, 2], window=\"ones\", wrap=False)\n\n A_true = np.asarray(\n [\n [1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0, 0.0],\n [0.0, 1.0 / 3, 1.0 / 3, 1.0 / 3, 0.0],\n [0.0, 0.0, 1.0 / 3, 1.0 / 3, 1.0 / 3],\n [0.0, 0.0, 0.0, 0.5, 0.5],\n ]\n )\n\n assert np.allclose(A, A_true)\n\n\[email protected](\"gap_onset\", [1, np.inf])\[email protected](\"gap_extend\", [1, np.inf])\[email protected](\"knight\", [False, True])\[email protected](\"backtrack\", [False, True])\ndef test_rqa_edge(gap_onset, gap_extend, knight, backtrack):\n\n rec = np.asarray([[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 1], [0, 0, 0, 1]])\n\n out = librosa.sequence.rqa(\n rec, gap_onset=gap_onset, gap_extend=gap_extend, knight_moves=knight, backtrack=backtrack\n )\n\n if backtrack:\n score, path = out\n __validate_rqa_results(rec, score, path, gap_onset, gap_extend, backtrack, knight)\n assert len(path) == 3\n else:\n # without backtracking, make sure the output is just the score matrix\n assert out.shape == rec.shape\n\n\[email protected](\"gap_onset\", [1, np.inf])\[email protected](\"gap_extend\", [1, np.inf])\[email protected](\"knight\", [False, True])\ndef test_rqa_empty(gap_onset, gap_extend, knight):\n rec = np.zeros((5, 5))\n\n score, path = librosa.sequence.rqa(\n rec, gap_onset=gap_onset, gap_extend=gap_extend, knight_moves=knight, backtrack=True\n )\n\n assert score.shape == rec.shape\n assert np.allclose(score, 0)\n assert path.shape == (0, 2)\n\n\[email protected](\"gap_onset\", [1, np.inf])\[email protected](\"gap_extend\", [1, np.inf])\[email protected](\"knight\", [False, True])\[email protected](\"backtrack\", [False, True])\ndef test_rqa_interior(gap_onset, gap_extend, knight, backtrack):\n rec = np.asarray([[0, 0, 0, 1], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 0]])\n\n out = librosa.sequence.rqa(\n rec, gap_onset=gap_onset, gap_extend=gap_extend, knight_moves=knight, backtrack=backtrack\n )\n\n if backtrack:\n score, path = out\n __validate_rqa_results(rec, score, path, gap_onset, gap_extend, backtrack, knight)\n assert len(path) == 2\n else:\n # without backtracking, make sure the output is just the score matrix\n assert out.shape == rec.shape\n\n\[email protected](\"gap_onset\", [1, np.inf])\[email protected](\"gap_extend\", [1, np.inf])\ndef test_rqa_gaps(gap_onset, gap_extend):\n rec = np.ones((5, 5))\n librosa.sequence.rqa(rec, gap_onset=gap_onset, gap_extend=gap_extend)\n\n\[email protected](raises=librosa.ParameterError)\ndef test_rqa_bad_onset():\n rec = np.ones((5, 5))\n librosa.sequence.rqa(rec, gap_onset=-1)\n\n\[email protected](raises=librosa.ParameterError)\ndef test_rqa_bad_extend():\n rec = np.ones((5, 5))\n librosa.sequence.rqa(rec, gap_extend=-1)\n\n\ndef __validate_rqa_results(rec, score, path, gap_onset, gap_extend, backtrack, knight):\n # Test maximal end-point\n assert np.all(score[tuple(path[-1])] >= score)\n\n # Test non-zero start point\n assert rec[tuple(path[0])] > 0\n\n # If we can't have gaps, then all values must be nonzero\n if not np.isfinite(gap_onset) and not np.isfinite(gap_extend):\n assert np.all([rec[tuple(i)] > 0 for i in path])\n\n path_diff = np.diff(path, axis=0)\n if knight:\n for d in path_diff:\n assert np.allclose(d, (1, 1)) or np.allclose(d, (1, 2)) or np.allclose(d, (2, 1))\n else:\n # Without knight moves, only diagonal steps are allowed\n assert np.allclose(path_diff, 1)\n"
] | [
[
"numpy.diag",
"numpy.log",
"numpy.random.random",
"numpy.allclose",
"numpy.array_equal",
"numpy.isfinite",
"numpy.asarray",
"numpy.ones",
"numpy.all",
"numpy.diff",
"numpy.mod",
"numpy.zeros",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WeiXuanChan/medImgProc | [
"d3f6da63d426993e1a5bccd322313e9e79ab039c"
] | [
"medImgProc/GUI.py"
] | [
"'''\nFile: GUI.py\nDescription: load all class for medImgProc\n Contains externally usable class\nHistory:\n Date Programmer SAR# - Description\n ---------- ---------- ----------------------------\n Author: [email protected] 12JAN2019 - Created\nAuthor: [email protected] 08OCT2019 - v1.4.0\n -added colortoggler\nAuthor: [email protected] 08OCT2019 - v1.5.2\n -added Intensity scaling\n - lower slider\nAuthor: [email protected] 10Jan2020 - v2.3.9\n -added cubic spline line drawing\n -removed latex dependency\n -debug function show() in image2DGUI\nAuthor: [email protected] 12Jan2020 - v2.3.10\n -debug keypress switch frame of rgb image\nAuthor: [email protected] 23MAR2020 - v2.6.4\n -added color contour \nAuthor: [email protected] 24MAR2020 - v2.6.13\n -allow save image\nAuthor: [email protected] 29APR2020 - v2.6.19\n -allow switch line\nRequirements:\n numpy.py\n matplotlib.py\n imageio.py\n\nKnown Bug:\n HSV color format not supported\nAll rights reserved.\n'''\n_version='2.6.19'\nimport logging\nlogger = logging.getLogger(__name__)\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom matplotlib.widgets import Slider\nfrom matplotlib.widgets import Button\nfrom scipy import interpolate\n#matplotlib.rc('text', usetex=True)\ntry:\n import tkinter\n from tkinter import filedialog\n import os\nexcept:\n pass\n'''\nvariables\n'''\nSTD_INSTRUCTIONS='Press Enter to save and exit, Esc to exit\\n '\n'''\nInternal Use functions\n'''\ndef getLastTwoDimArray(imageArray,dimLoad,color=0):\n outputArray=np.copy(imageArray)\n if len(outputArray.shape)>(2+color):\n outputArray=getLastTwoDimArray(outputArray[dimLoad[0]],dimLoad[1:],color=color)\n return outputArray\ndef getFramePts(pts,dimLoad):\n newPts=np.array(pts)\n for n in range(len(dimLoad)-2):\n if len(newPts)!=0:\n filterFrame=(newPts[:,n]==dimLoad[n])\n newPts=newPts[filterFrame,:]\n else:\n break\n return newPts\ndef dimToTitle(dimension,showIndex):\n titleOutput=''\n for n in range(-1,-len(dimension)-1,-1):\n titleOutput+=dimension[n]+':'+str(showIndex[n])\n if n==-1:\n titleOutput+='^v ' #r'$\\updownarrow$'\n \n else:\n titleOutput+='<>'#r'$\\leftrightarrow$ \\hspace{1cm} '\n break\n #titleOutput+='\\n '+r'$\\leftarrow$ $\\rightarrow$ \\hspace{1cm} $\\uparrow$ $\\downarrow$'\n \n return titleOutput\n\n'''\nMain GUI class\n'''\nclass image2DGUI:\n def __init__(self,imageClass,addInstruct='',disable=[],initPointList=None,initLineList=None,manualToleranceRatio=0.05,showNow=True,contourImageArray=None):\n self.title=None\n self.addInstruct=STD_INSTRUCTIONS\n if addInstruct!='':\n self.addInstruct+=addInstruct+'\\n '\n if type(imageClass)==str:\n self.image=medImgProc.imread(imageClass)\n self.image=imageClass.clone()\n self.image.data=np.maximum(0,self.image.data)\n if contourImageArray is None:\n self.contourImage=None\n else:\n self.contourImage=imageClass.clone()\n self.contourImage.data[:]=contourImageArray\n self.contourImage.data=self.contourImage.data.astype(int)\n self.color=0\n if 'RGB' in self.image.dim:\n self.image.rearrangeDim('RGB',False)\n if self.contourImage is not None:\n self.contourImage.rearrangeDim('RGB',False)\n self.color=1\n elif 'RGBA' in self.image.dim:\n self.image.rearrangeDim('RGBA',False)\n if self.contourImage is not None:\n self.contourImage.rearrangeDim('RGBA',False)\n self.color=1\n if self.color==1:\n self.addInstruct+='press 1,2,3,.. to toggler color channel and 0 to show all.\\n '\n self.colorToggler=[]\n self.fig=plt.figure(1)\n self.showIndex=[]\n for n in range(len(self.image.data.shape)-self.color):\n self.showIndex.append(0)\n self.disable=disable\n self.sliderLoc=0.02\n self.connectionID=[]\n if 'click' not in self.disable or 'line' not in self.disable:\n self.connectionID.append(self.fig.canvas.mpl_connect('button_press_event', self.onclick))\n if 'line' not in self.disable:\n self.sliderLoc+=0.06\n self.connectionID.append(self.fig.canvas.mpl_connect('button_release_event', self.onrelease))\n self.connectionID.append(self.fig.canvas.mpl_connect('motion_notify_event', self.onmotion))\n \n self.connectionID.append(self.fig.canvas.mpl_connect('key_press_event',self.onKeypress)) \n self.enter=False\n self.scaleVisual=1.\n self.logVisual=0.\n self.line_selected=-1\n self.point_selected=-1\n self.show_line=False\n '''\n Return parameters\n '''\n if initPointList is None:\n self.points=np.empty((0,len(self.image.dim)))\n else:\n '''sanitize to show'''\n initPointList=np.copy(initPointList)\n for n in range(len(initPointList)):\n for m in range(len(initPointList[n])-2):\n initPointList[n,m]=np.round(initPointList[n,m])\n self.points=initPointList\n \n \n self.lineplt=[]\n self.lines=[]\n if initLineList is not None:\n for line in initLineList:\n self.lines.append(line.copy())\n self.show_line=True\n \n self.axslide=[]\n self.sSlide=[]\n \n self.manualToleranceRatio=manualToleranceRatio\n self.manualTolerance=(max(*self.image.data.shape[-2:])*self.manualToleranceRatio)**2.\n \n self.loadImageFrame()\n if showNow:\n plt.show()\n def sliderUpdate(self,val):\n self.line_selected=-1\n for n in range(len(self.showIndex)-2):\n self.showIndex[n]=int(self.sSlide[n].val)\n self.scaleVisual=self.sSlide[-2].val\n self.logVisual=self.sSlide[-1].val\n self.showNewFrame()\n if self.show_line:\n for nline in range(len(self.lines)):\n self.showNewLine(nline)\n def getLineIndex(self,y,x):\n chosen=None\n distance=float('inf')\n if self.line_selected<0:\n for nline in range(len(self.lines)):\n points=getFramePts(self.lines[nline],self.showIndex)\n if len(points)>1:\n tck,temp = interpolate.splprep([points[:,-1], points[:,-2]], s=0,k=min(4,len(points))-1)\n cspline_detectline = np.array(interpolate.splev(np.arange(0, 1.01, 0.01), tck)).T\n distance_temp=np.min(np.sum((cspline_detectline-np.array([[x,y]]))**2.,axis=1))\n if distance_temp<distance:\n distance=distance_temp\n chosen=nline\n elif len(points)==1:\n distance_temp=np.min(np.sum((np.array([points[:,-1], points[:,-2]])-np.array([[x,y]]))**2.,axis=1))\n if distance_temp<distance:\n distance=distance_temp\n chosen=nline\n elif len(self.lines[self.line_selected])>0:\n points=getFramePts(self.lines[self.line_selected],self.showIndex)\n distance=np.sum((points[:,-2:]-np.array([[y,x]]))**2.,axis=1)\n chosen=np.argmin(distance)\n return (chosen,distance)\n def onclick(self,event):\n if not(event.dblclick) and event.button==1 and event.inaxes==self.ax:\n newPt=np.array([*self.showIndex[:-2],event.ydata,event.xdata])\n if 'click' not in self.disable:\n self.points=np.vstack((self.points,newPt))\n self.showNewPoints()\n if self.show_line:\n lineIndex,distance=self.getLineIndex(event.ydata,event.xdata)\n if self.line_selected>=0:\n addlinept=len(self.lines[self.line_selected])\n if lineIndex is not None:\n if distance.min() < self.manualTolerance:\n self.point_selected=lineIndex\n addlinept=None\n elif addlinept>1:\n detectline=getFramePts(self.lines[self.line_selected],self.showIndex)\n tck,temp = interpolate.splprep([detectline[:,-1], detectline[:,-2]], s=0,k=min(4,len(detectline))-1)\n cspline_detectline = np.array(interpolate.splev(np.arange(0, 1.+1./100./(addlinept-1), 1./100./(addlinept-1)), tck)).T\n detectdistance=np.sum((cspline_detectline-np.array([[event.xdata,event.ydata]]))**2.,axis=1)\n if detectdistance.min()<self.manualTolerance:\n splineIndex=detectdistance.argmin()\n for npoint in range(1,addlinept):\n if splineIndex<np.argmin(np.sum((cspline_detectline-np.array([[self.lines[self.line_selected][npoint][-1],self.lines[self.line_selected][npoint][-2]]]))**2.,axis=1)):\n addlinept=npoint\n break\n if addlinept is not None:\n self.point_selected=-1\n self.lines[self.line_selected].insert(addlinept,newPt)\n self.showNewLine(self.line_selected)\n elif lineIndex is not None:\n if self.line_selected==-1:\n self.line_selected=lineIndex\n self.showNewLine(lineIndex)\n elif self.line_selected==-2:\n self.lines.pop(lineIndex)\n self.line_selected=-1\n self.loadImageFrame()\n \n def onrelease(self,event):\n self.point_selected=-1\n if self.line_selected>=0:\n if len(self.lines[self.line_selected])>0:\n self.showNewLine(self.line_selected)\n return\n def onmotion(self,event):\n if self.line_selected<0 or self.point_selected<0:\n return\n if event.xdata is None or event.ydata is None:\n if self.line_selected>=0 and self.point_selected>=0:\n self.lines[self.line_selected].pop(self.point_selected)\n self.point_selected=-1\n if len(self.lines[self.line_selected])>0:\n self.showNewLine(self.line_selected)\n else:\n self.lines.pop(self.line_selected)\n self.line_selected=-1\n self.loadImageFrame()\n return\n newPt=np.array([*self.showIndex[:-2],event.ydata,event.xdata])\n self.lines[self.line_selected][self.point_selected]=newPt.copy()\n self.showNewLine(self.line_selected)\n return\n def onKeypress(self,event):\n if event.key == 'escape':#return values and quit\n for connection in self.connectionID:\n self.fig.canvas.mpl_disconnect(connection)\n plt.close(event.canvas.figure)\n elif event.key == 'enter':\n self.enter=True\n for connection in self.connectionID:\n self.fig.canvas.mpl_disconnect(connection)\n plt.close(event.canvas.figure)\n elif event.key=='up':\n self.switchFrame(-3,1)\n elif event.key=='down':\n self.switchFrame(-3,-1)\n elif event.key=='right':\n self.switchFrame(-4,1)\n elif event.key=='left':\n self.switchFrame(-4,-1)\n elif event.key=='ctrl+z':\n self.removeLastPoint()\n elif self.color==1 and event.key=='0':\n self.colorToggler=[]\n self.showNewFrame()\n elif self.color==1 and event.key in ['1','2','3','4','5','6','7','8','9']:\n if (int(event.key)-1) in self.colorToggler:\n self.colorToggler.remove(int(event.key)-1)\n elif self.image.data.shape[-1] > (int(event.key)-1):\n self.colorToggler+=[int(event.key)-1]\n self.showNewFrame()\n elif event.key in self.image.dim:\n self.swapFrame(event.key)\n else:\n logger.info(event.key)\n def save_image(self,event):\n self.ax.axis('off')\n extent = self.ax.get_window_extent().transformed(self.fig.dpi_scale_trans.inverted())\n maxdpi=max(self.image.data.shape[-2-self.color]/extent.height,self.image.data.shape[-1-self.color]/extent.width)\n root = tkinter.Tk()\n fileName=root.withdraw()\n fileName=''.join(self.image.dim[(-2-self.color):][:2])\n for dimN in range(len(self.image.dim)-2-self.color):\n fileName+='_'+self.image.dim[dimN]+str(self.showIndex[dimN])\n tempdir = filedialog.asksaveasfilename(parent=root, initialdir=os.getcwd(), title='Save Image '+fileName+' as')\n if isinstance(tempdir,str):\n if tempdir[-4:]!='.png':\n tempdir+='.png'\n self.fig.savefig(tempdir, bbox_inches=extent,dpi=maxdpi)\n root.destroy()\n self.ax.axis('on')\n def togger_line(self,event):\n self.show_line=not(self.show_line)\n if not(self.show_line):\n self.line_selected=-1\n self.loadImageFrame()\n def new_line(self,event):\n self.show_line=True\n if len(self.lineplt)>self.line_selected and self.line_selected>=0:\n if len(self.lineplt[self.line_selected])>1:\n self.lineplt[self.line_selected][1].set_color('b')\n self.line_selected=len(self.lines)\n self.lines.append([])\n def edit_line(self,event):\n if len(self.lineplt)>self.line_selected:\n if len(self.lineplt[self.line_selected])>1:\n self.lineplt[self.line_selected][1].set_color('b')\n self.line_selected=-1\n def switch_line(self,event):\n temp_select=self.line_selected\n if (len(self.lines)-1)<=self.line_selected:\n self.line_selected=-1\n if temp_select!=-1:\n self.showNewLine(temp_select)\n else:\n self.line_selected+=1\n if temp_select!=-1:\n self.showNewLine(temp_select)\n self.showNewLine(self.line_selected)\n def del_line(self,event):\n self.line_selected=-2\n def switchFrame(self,index,val=1):\n self.line_selected=-1\n if len(self.showIndex)>=(-index):\n self.showIndex[index]+=val\n if self.showIndex[index]>(self.image.data.shape[len(self.showIndex)+index]-1):\n self.showIndex[index]=0\n elif self.showIndex[index]<0:\n self.showIndex[index]=self.image.data.shape[len(self.showIndex)+index]-1\n self.sSlide[len(self.showIndex)+index].set_val(self.showIndex[index])\n if self.show_line:\n self.loadImageFrame()\n else:\n self.showNewFrame()\n def swapFrame(self,axis):\n self.line_selected=-1\n if 'swap' not in self.disable:\n if self.color:\n transposeIndex=self.image.rearrangeDim([axis,self.image.dim[-1]],arrangeFront=False)\n if self.contourImage is not None:\n self.contourImage.rearrangeDim([axis,self.image.dim[-1]],arrangeFront=False)\n else:\n transposeIndex=self.image.rearrangeDim([axis],arrangeFront=False)\n if self.contourImage is not None:\n self.contourImage.rearrangeDim([axis],arrangeFront=False)\n newShowIndex=[]\n if self.color:\n transposeIndex=transposeIndex[:-1]\n for n in range(len(transposeIndex)):\n newShowIndex.append(self.showIndex[transposeIndex[n]])\n self.showIndex=newShowIndex\n self.points=self.points[:,transposeIndex]\n for nline in range(len(self.lines)):\n self.lines[nline]=self.lines[nline][:,transposeIndex]\n self.manualTolerance=(max(*self.image.data.shape[-2:])*self.manualToleranceRatio)**2.\n self.loadImageFrame()\n self.showNewFrame()\n def showNewFrame(self):\n newShowImage=getLastTwoDimArray(self.image.data,self.showIndex,color=self.color)\n if self.color:\n newShowImage[...,tuple(self.colorToggler)]=0\n newShowImage=np.maximum(0,np.minimum(255,(newShowImage*self.scaleVisual)**(10.**self.logVisual))).astype('uint8')\n self.main.set_data(newShowImage)\n self.ax.set_aspect(self.image.dimlen[self.image.dim[-2-self.color]]/self.image.dimlen[self.image.dim[-1-self.color]])\n if self.contourImage is not None:\n if self.contour is not None:\n for coll in self.contour.collections:\n coll.remove()\n showContour=getLastTwoDimArray(self.contourImage.data,self.showIndex,color=0)\n getlevels=np.arange(0.5+showContour.min(),showContour.max(),1)\n if len(getlevels)>0:\n self.contour=self.ax.contour(showContour,getlevels,linewidths=1.2)\n else:\n self.contour=None\n self.showNewPoints()\n \n pp=plt.setp(self.title,text=self.addInstruct+dimToTitle(self.image.dim[:-2-self.color],self.showIndex[:-2]))\n self.fig.canvas.draw()\n def showNewPoints(self):\n showpoints=getFramePts(self.points,self.showIndex)\n self.ptplt.set_offsets(showpoints[:,[-1,-2]])\n self.fig.canvas.draw()\n def showNewLine(self,lineIndex):\n temp_color='b'\n if lineIndex==self.line_selected:\n temp_color='r'\n showline=getFramePts(self.lines[lineIndex],self.showIndex)\n if len(showline)<=0:\n self.lineplt[lineIndex][0].set_visible(False)\n self.lineplt[lineIndex][1].set_visible(False)\n else:\n if len(self.lineplt)<=lineIndex:\n self.loadImageFrame()\n return;\n elif type(self.lineplt[lineIndex])!=list:\n self.loadImageFrame()\n return;\n self.lineplt[lineIndex][0].set_offsets(showline[:,[-1,-2]])\n self.lineplt[lineIndex][0].set_visible(True)\n self.lineplt[lineIndex][1].set_visible(True)\n if len(showline)>1:\n self.lineplt[lineIndex][0].set_color('b')\n tck,temp = interpolate.splprep([showline[:,-1], showline[:,-2]], s=0,k=min(4,len(showline))-1)\n cspline_line = interpolate.splev(np.arange(0, 1.01, 0.01), tck)\n self.lineplt[lineIndex][1].set_xdata(cspline_line[0])\n self.lineplt[lineIndex][1].set_ydata(cspline_line[1])\n self.lineplt[lineIndex][1].set_color(temp_color)\n #self.lineplt[lineIndex][1].set_offsets(np.array(cspline_line[0],cspline_line[1]).T)\n else:\n self.lineplt[lineIndex][0].set_color('r')\n self.lineplt[lineIndex][1].set_xdata(np.array([showline[0,-1],showline[0,-1]]))\n self.lineplt[lineIndex][1].set_ydata(np.array([showline[0,-2],showline[0,-2]]))\n self.lineplt[lineIndex][1].set_color(temp_color)\n #self.lineplt[lineIndex][1].set_offsets(np.array([showline[0,[-1,-2]],showline[0,[-1,-2]]]))\n if self.line_selected>=0 and self.point_selected>=0:\n if type(self.lineplt[-1])==list:\n self.lineplt.append(self.ax.scatter([self.lines[self.line_selected][self.point_selected][-1]],[self.lines[self.line_selected][self.point_selected][-2]],color='r',marker='x'))\n else:\n self.lineplt[-1].set_offsets([self.lines[self.line_selected][self.point_selected][-1],self.lines[self.line_selected][self.point_selected][-2]])\n self.lineplt[-1].set_visible(True)\n else:\n if type(self.lineplt[-1])!=list:\n self.lineplt[-1].set_visible(False)\n self.fig.canvas.draw()\n def loadImageFrame(self):\n plt.clf()\n self.ax = self.fig.add_subplot(111)\n self.fig.subplots_adjust(bottom=(len(self.showIndex)+3)*0.04)\n showImage=getLastTwoDimArray(self.image.data,self.showIndex,color=self.color)\n if self.color:\n showImage[...,tuple(self.colorToggler)]=0\n showImage=np.maximum(0,np.minimum(255,(showImage*self.scaleVisual)**(10.**self.logVisual))).astype('uint8')\n self.main=self.ax.imshow(showImage,cmap=matplotlib.cm.gray, vmin=0, vmax=255)\n self.ax.set_aspect(self.image.dimlen[self.image.dim[-2-self.color]]/self.image.dimlen[self.image.dim[-1-self.color]])\n if self.contourImage is not None:\n showContour=getLastTwoDimArray(self.contourImage.data,self.showIndex,color=0)\n getlevels=np.arange(0.5+showContour.min(),showContour.max(),1)\n if len(getlevels)>0:\n self.contour=self.ax.contour(showContour,getlevels,linewidths=1.2)\n else:\n self.contour=None\n showpoints=getFramePts(self.points,self.showIndex)\n self.ptplt=self.ax.scatter(showpoints[:,-1],showpoints[:,-2],color='r',marker='x')\n if self.show_line:\n self.lineplt=[]\n for nline in range(len(self.lines)):\n temp_color='b'\n if nline==self.line_selected:\n temp_color='r'\n if len(self.lines[nline])>0:\n self.lineplt.append([self.ax.scatter(np.array(self.lines[nline])[:,-1],np.array(self.lines[nline])[:,-2],color='b',marker='x')])\n if len(self.lines[nline])>1:\n tck,temp = interpolate.splprep([np.array(self.lines[nline])[:,-1], np.array(self.lines[nline])[:,-2]], s=0,k=min(4,len(self.lines[nline]))-1)\n cspline_line = interpolate.splev(np.arange(0, 1.01, 0.01), tck)\n self.lineplt[-1]+=self.ax.plot(cspline_line[0].copy(),cspline_line[1].copy(),color=temp_color)\n else:\n self.lineplt[-1]+=self.ax.plot([self.lines[nline][0][-1],self.lines[nline][0][-1]],[self.lines[nline][0][-2],self.lines[nline][0][-2]],color=temp_color)\n self.lineplt[-1][0].set_color('r')\n if len(self.showIndex)>2:\n if np.any(self.lines[nline][0][:(len(self.showIndex)-2)]!=self.showIndex[:-2]):\n self.lineplt[-1][0].set_visible(False)\n self.lineplt[-1][1].set_visible(False)\n if self.line_selected>=0 and self.point_selected>=0:\n self.lineplt.append(self.ax.scatter([self.lines[self.line_selected][self.point_selected][-1]],[self.lines[self.line_selected][self.point_selected][-2]],color='r',marker='x'))\n self.title=plt.title(self.addInstruct+dimToTitle(self.image.dim[:-2-self.color],self.showIndex[:-2]))\n plt.ylabel(self.image.dim[-2-self.color])\n plt.xlabel(self.image.dim[-1-self.color])\n\n '''set slider for image control'''\n axcolor = 'lightgoldenrodyellow'\n self.saveImageButton=Button(self.fig.add_axes([0.75, 0.02, 0.1, 0.05]), 'Save')\n self.saveImageButton.on_clicked(self.save_image)\n \n self.lineControl=[]\n if 'line' not in self.disable:\n self.lineControl.append(Button(self.fig.add_axes([0.05, 0.02, 0.1, 0.05]), 'Line'))\n self.lineControl[0].on_clicked(self.togger_line)\n self.lineControl.append(Button(self.fig.add_axes([0.2, 0.02, 0.1, 0.05]), 'new'))\n self.lineControl[1].on_clicked(self.new_line)\n self.lineControl.append(Button(self.fig.add_axes([0.35, 0.02, 0.1, 0.05]), 'edit...'))\n self.lineControl[2].on_clicked(self.edit_line)\n self.lineControl.append(Button(self.fig.add_axes([0.5, 0.02, 0.1, 0.05]), 'del...'))\n self.lineControl[3].on_clicked(self.del_line)\n self.lineControl.append(Button(self.fig.add_axes([0.65, 0.02, 0.1, 0.05]), 'switch'))\n self.lineControl[4].on_clicked(self.switch_line)\n \n self.axslide=[]\n self.sSlide=[]\n for n in range(len(self.showIndex)-2):\n self.axslide.append(self.fig.add_axes([0.1, self.sliderLoc+n*0.04, 0.65, 0.03], facecolor=axcolor))\n self.sSlide.append(Slider(self.axslide[-1], self.image.dim[n], 0, self.image.data.shape[n]-1, valinit=self.showIndex[n],valfmt=\"%i\"))\n self.sSlide[-1].on_changed(self.sliderUpdate)\n self.axslide.append(self.fig.add_axes([0.1, self.sliderLoc+(len(self.showIndex)-2)*0.04, 0.65, 0.03], facecolor=axcolor))\n self.sSlide.append(Slider(self.axslide[-1], 'Iscale', 0.1, 10., valinit=self.scaleVisual,valstep=0.1))\n self.sSlide[-1].on_changed(self.sliderUpdate)\n self.axslide.append(self.fig.add_axes([0.1, self.sliderLoc+(len(self.showIndex)-1)*0.04, 0.65, 0.03], facecolor=axcolor))\n self.sSlide.append(Slider(self.axslide[-1], 'logPOW', -1., 1., valinit=self.logVisual,valstep=0.02))\n self.sSlide[-1].on_changed(self.sliderUpdate)\n \n def removeLastPoint(self):\n self.points=self.points[:-1,:]\n self.showNewPoints()\n def show(self):\n self.fig=plt.figure(1)\n self.sliderLoc=0.02\n self.connectionID=[]\n if 'click' not in self.disable or 'line' not in self.disable:\n self.connectionID.append(self.fig.canvas.mpl_connect('button_press_event', self.onclick))\n if 'line' not in self.disable:\n self.sliderLoc+=0.06\n self.connectionID.append(self.fig.canvas.mpl_connect('button_release_event', self.onrelease))\n self.connectionID.append(self.fig.canvas.mpl_connect('motion_notify_event', self.onmotion))\n \n self.connectionID.append(self.fig.canvas.mpl_connect('key_press_event',self.onKeypress)) \n self.enter=False\n self.loadImageFrame()\n plt.show()\n"
] | [
[
"numpy.maximum",
"numpy.minimum",
"numpy.arange",
"numpy.vstack",
"numpy.round",
"matplotlib.widgets.Slider",
"numpy.copy",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.ylabel",
"numpy.argmin",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
datactive/bigbang | [
"ea2e9aab156490d1af965409adb60b68291281dc",
"ea2e9aab156490d1af965409adb60b68291281dc",
"ea2e9aab156490d1af965409adb60b68291281dc"
] | [
"bigbang/listserv.py",
"tests/analysis/test_listserv.py",
"bigbang/visualisation/stackedareachart.py"
] | [
"import datetime\nimport email\nimport logging\nimport os\nimport re\nimport subprocess\nimport time\nimport warnings\nimport mailbox\nfrom mailbox import mboxMessage\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Tuple, Union\nfrom urllib.parse import urljoin, urlparse\n\nimport numpy as np\nimport pandas as pd\nimport requests\nimport yaml\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\n\nfrom config.config import CONFIG\n\nfrom bigbang.bigbang_io import (\n ListservMessageIO,\n ListservListIO,\n ListservArchiveIO,\n)\nfrom bigbang.utils import (\n get_paths_to_files_in_directory,\n get_paths_to_dirs_in_directory,\n)\n\nfilepath_auth = CONFIG.config_path + \"authentication.yaml\"\ndirectory_project = str(Path(os.path.abspath(__file__)).parent.parent)\nlogging.basicConfig(\n filename=directory_project + \"/listserv.scraping.log\",\n filemode=\"w\",\n level=logging.INFO,\n format=\"%(asctime)s %(message)s\",\n)\nlogger = logging.getLogger(__name__)\n\n\nclass ListservMessageParserWarning(BaseException):\n \"\"\"Base class for Archive class specific exceptions\"\"\"\n\n pass\n\n\nclass ListservListWarning(BaseException):\n \"\"\"Base class for Archive class specific exceptions\"\"\"\n\n pass\n\n\nclass ListservArchiveWarning(BaseException):\n \"\"\"Base class for Archive class specific exceptions\"\"\"\n\n pass\n\n\nclass ListservMessageParser(email.parser.Parser):\n \"\"\"\n This class handles the creation of an mailbox.mboxMessage object\n (using the from_*() methods) and its storage in various other file formats\n (using the to_*() methods) that can be saved on the local memory.\n\n Parameters\n ----------\n website : Set 'True' if messages are going to be scraped from websites,\n otherwise 'False' if read from local memory.\n url_login : URL to the 'Log In' page.\n url_pref : URL to the 'Preferences'/settings page.\n login : Login credentials (username and password) that were used to set\n up AuthSession. You can create your own for the 3GPP archive.\n session : requests.Session() object for the Email archive website.\n\n Methods\n -------\n from_url()\n from_listserv_file()\n _get_header_from_html()\n _get_body_from_html()\n _get_header_from_listserv_file()\n _get_body_from_listserv_file()\n get_datetime()\n\n Example\n -------\n To create a Email message parser object, use the following syntax:\n >>> msg_parser = ListservMessageParser(\n >>> website=True,\n >>> login={\"username\": <your_username>, \"password\": <your_password>},\n >>> )\n\n To obtain the Email message content and return it as `email.message.EmailMessage`\n object, you need to do the following:\n >>> msg = msg_parser.from_url(\n >>> list_name=\"3GPP_TSG_RAN_DRAFTS\",\n >>> url=\"https://list.etsi.org/scripts/wa.exe?A2=ind2010B&L=3GPP_TSG_RAN_DRAFTS&O=D&P=29883\",\n >>> fields=\"total\",\n >>> )\n \"\"\"\n\n empty_header = {}\n\n def __init__(\n self,\n website=False,\n url_login: str = \"https://list.etsi.org/scripts/wa.exe?LOGON\",\n url_pref: str = \"https://list.etsi.org/scripts/wa.exe?PREF\",\n login: Optional[Dict[str, str]] = {\"username\": None, \"password\": None},\n session: Optional[requests.Session] = None,\n ):\n if website:\n if session is None:\n session = get_auth_session(url_login, **login)\n session = set_website_preference_for_header(url_pref, session)\n self.session = session\n\n def create_email_message(\n self,\n archived_at: str,\n body: str,\n **header,\n ) -> mboxMessage:\n \"\"\"\n Parameters\n ----------\n archived_at : URL to the Email message.\n body : String that contains the body of the message.\n header : Dictionary that contains all available header fields of the\n message.\n \"\"\"\n # crea EmailMessage\n msg = email.message.EmailMessage()\n if body is not None:\n try:\n msg.set_content(body) # don't use charset=\"utf-16\"\n except Exception:\n # UnicodeEncodeError: 'utf-16' codec can't encode character\n # '\\ud83d' in position 8638: surrogates not allowed\n pass\n for key, value in header.items():\n if \"content-type\" == key:\n msg.set_param(\"Content-Type\", value)\n elif \"mime-version\" == key:\n msg.set_param(\"MIME-Version\", value)\n elif \"content-transfer-encoding\" == key:\n msg.set_param(\"Content-Transfer-Encoding\", value)\n else:\n try:\n # TODO: find out why it sometimes raises\n # email/_header_value_parser.py\n # IndexError: list index out of range.\n # Also look into UTF-8 encoding.\n msg[key] = value\n except Exception:\n pass\n if (\n (msg[\"Message-ID\"] is None)\n and (msg[\"Date\"] is not None)\n and (msg[\"From\"] is not None)\n ):\n msg[\"Message-ID\"] = archived_at.split(\"/\")[-1]\n # convert to EmailMessage to mboxMessage\n mbox_msg = mboxMessage(msg)\n mbox_msg.add_header(\"Archived-At\", \"<\" + archived_at + \">\")\n return mbox_msg\n\n def from_url(\n self,\n list_name: str,\n url: str,\n fields: str = \"total\",\n ) -> mboxMessage:\n \"\"\"\n Parameters\n ----------\n list_name : The name of the LISTSERV Email list.\n url : URL of this Email\n fields : Indicates whether to return 'header', 'body' or 'total'/both or\n the Email. The latter is the default.\n \"\"\"\n soup = get_website_content(url, session=self.session)\n if soup == \"RequestException\":\n body = \"RequestException\"\n header = self.empty_header\n else:\n if fields in [\"header\", \"total\"]:\n header = self._get_header_from_html(soup)\n else:\n header = self.empty_header\n if fields in [\"body\", \"total\"]:\n body = self._get_body_from_html(list_name, url, soup)\n else:\n body = None\n return self.create_email_message(url, body, **header)\n\n def from_listserv_file(\n self,\n list_name: str,\n file_path: str,\n header_start_line_nr: int,\n fields: str = \"total\",\n ) -> mboxMessage:\n \"\"\"\n This method is required if the message is inside a file that was directly\n exported from LISTSERV 16.5 (e.g. by a member of 3GPP). Such files have\n an extension starting with LOG and ending with five digits.\n\n Parameters\n ----------\n list_name : The name of the LISTSERV Email list.\n file_path : Path to file that contains the Email list.\n header_start_line_nr : Line number in the file on which a new message starts.\n fields : Indicates whether to return 'header', 'body' or 'total'/both or\n the Email.\n \"\"\"\n file = open(file_path, \"r\", errors=\"replace\")\n fcontent = file.readlines()\n file.close()\n header_end_line_nr = self._get_header_end_line_nr(\n fcontent, header_start_line_nr\n )\n if fields in [\"header\", \"total\"]:\n header = self._get_header_from_listserv_file(\n fcontent, header_start_line_nr, header_end_line_nr\n )\n else:\n header = self.empty_header\n if fields in [\"body\", \"total\"]:\n body = self._get_body_from_listserv_file(\n fcontent, header_end_line_nr\n )\n else:\n body = None\n return self.create_email_message(file_path, body, **header)\n\n def _get_header_end_line_nr(\n self,\n content: List[str],\n header_start_line_nr: int,\n ) -> List[int]:\n \"\"\"\n The header ends with the first empty line encountered.\n\n Parameters\n ----------\n content : The content of one LISTSERV file.\n header_start_line_nr : Line number in the file on which a new message starts.\n \"\"\"\n for lnr, lcont in enumerate(content[header_start_line_nr:]):\n if len(lcont) <= 1:\n header_end_line_nr = header_start_line_nr + lnr\n break\n return header_end_line_nr\n\n def _get_header_from_listserv_file(\n self,\n content: List[str],\n header_start_line_nr: int,\n header_end_line_nr: int,\n ) -> Dict[str, str]:\n \"\"\"\n Lexer for the message header.\n\n Parameters\n ----------\n content : The content of one LISTSERV-file.\n header_start_line_nr : Line number in the file on which a new message starts.\n header_end_line_nr : Line number in the file on which a new message ends.\n \"\"\"\n # TODO re-write using email.parser.Parser\n content = content[header_start_line_nr:header_end_line_nr]\n # collect important info from LISTSERV header\n header = {}\n for lnr in range(len(content)):\n line = content[lnr]\n # get header keyword and value\n if re.match(r\"\\S+:\\s+\\S+\", line):\n key = line.split(\":\")[0]\n value = line.replace(key + \":\", \"\").strip().rstrip(\"\\n\")\n # if not at the end of header\n if lnr < len(content) - 1:\n # if header-keyword value is split over two lines\n if not re.match(r\"\\S+:\\s+\\S+\", content[lnr + 1]):\n value += \" \" + content[lnr + 1].strip().rstrip(\"\\n\")\n header[key.lower()] = value\n return header\n\n def _get_body_from_listserv_file(\n self,\n content: List[str],\n header_end_line_nr: int,\n ) -> str:\n \"\"\"\n Lexer for the message body/payload.\n\n Parameters\n ----------\n content : The content of one LISTSERV-file.\n header_end_line_nr : Line number in the file on which a new message ends.\n \"\"\"\n # TODO re-write using email.parser.Parser\n found = False\n # find body 'position' in file\n for line_nr, line in enumerate(content[header_end_line_nr:]):\n if \"=\" * 73 in line:\n body_end_line_nr = line_nr + header_end_line_nr\n found = True\n break\n if not found:\n body_end_line_nr = -1\n # get body content\n body = content[header_end_line_nr:body_end_line_nr]\n # remove empty lines and join into one string\n body = (\"\").join([line for line in body if len(line) > 1])\n return body\n\n def _get_header_from_html(self, soup: BeautifulSoup) -> Dict[str, str]:\n \"\"\"\n Lexer for the message header.\n\n Parameters\n ----------\n soup : HTML code from which the Email header can be obtained.\n\n Note\n ----\n Currently, this module encodes Chinese characters in UTF-8.\n This should be avoided. When improving this, you can use\n https://list.etsi.org/scripts/wa.exe?A2=3GPP_TSG_CT_WG4;d2c3487b.2106A&S=\n to test.\n \"\"\"\n try:\n for string in [\"Subject\", \"SUBJECT\"]:\n try:\n _text = soup.find(\n \"b\",\n text=re.compile(r\"^\\b%s\\b\" % string),\n ) # Sometimes this returns None!\n text = _text.parent.parent.parent.parent # .text\n break\n except Exception:\n continue\n # collect important info from LISTSERV header\n header = {}\n for line in text.find_all(\"tr\"):\n key = str(line.find_all(re.compile(\"^b\"))[0])\n key = re.search(r\"<b>(.*?)<\\/b>\", key).group(1).lower()\n key = re.sub(r\":\", \"\", key).strip()\n if \"subject\" in key:\n value = repr(\n str(line.find_all(re.compile(\"^a\"))[0].text).strip()\n )\n else:\n try: # Listserv 17\n value = repr(str(line.find_all(re.compile(\"^div\"))[0]))\n value = re.search(r'\">(.*)<\\/div>', value).group(1)\n if \"content-type\" in key:\n value = value.split(\";\")[0]\n except Exception: # Listserv 16.5\n value = repr(str(line.find_all(re.compile(\"^p\"))[1]))\n value = re.search(r\"<p>(.*)<\\/p>\", value).group(1)\n value = value.split(\" <\")[0]\n value = re.sub(r\">\", \">\", value).strip()\n value = re.sub(r\"<\", \"<\", value).strip()\n # remove Carriage return\n value = re.sub(r\"\\\\r\", \"\", value).strip()\n # remove Linefeed\n value = re.sub(r\"\\\\n\", \"\", value).strip()\n if \"parts/attachments\" in key:\n break\n elif \"comments\" in key:\n key = \"comments-to\"\n value = re.sub(r\"To:\", \"\", value).strip()\n header[key] = value\n except Exception:\n header = self.empty_header\n return header\n\n def _get_body_from_html(\n self, list_name: str, url: str, soup: BeautifulSoup\n ) -> Union[str, None]:\n \"\"\"\n Lexer for the message body/payload.\n This methods look first whether the body is available in text/plain,\n before it looks for the text/html option. If neither is available it\n returns None.\n\n Therefore this method does not try to return the richest information\n content, but simply the ascii format.\n\n Parameters\n ----------\n list_name : The name of the LISTSERV Email list.\n url : URL to the Email.\n soup : HTML code from which the Email body can be obtained.\n \"\"\"\n # TODO re-write using email.parser.Parser\n url_root = (\"/\").join(url.split(\"/\")[:-2])\n a_tags = soup.select(f'a[href*=\"A3=\"][href*=\"{list_name}\"]')\n href_plain_text = [\n tag.get(\"href\") for tag in a_tags if \"Fplain\" in tag.get(\"href\")\n ]\n href_html_text = [\n tag.get(\"href\") for tag in a_tags if \"Fhtml\" in tag.get(\"href\")\n ]\n try:\n if href_plain_text:\n body_soup = get_website_content(\n urljoin(url_root, href_plain_text[0])\n )\n if body_soup == \"RequestException\":\n return body_soup\n else:\n return body_soup.find(\"pre\").text\n elif href_html_text:\n body_soup = get_website_content(\n urljoin(url_root, href_html_text[0])\n )\n if body_soup == \"RequestException\":\n return body_soup\n else:\n return body_soup.get_text(strip=True)\n except Exception:\n logger.info(\n f\"The message body of {url} which is part of the \"\n f\"list {list_name} could not be loaded.\"\n )\n return None\n\n @staticmethod\n def get_datetime(line: str) -> str:\n \"\"\"\n Parameters\n ----------\n line : String that contains date and time.\n \"\"\"\n line = (\" \").join(line.split(\" \")[:-1]).lstrip()\n # convert format to local version of date and time\n date_time_obj = datetime.datetime.strptime(\n line, \"%a, %d %b %Y %H:%M:%S\"\n )\n return date_time_obj.strftime(\"%c\")\n\n @staticmethod\n def create_message_id(date: str, from_address: str) -> str:\n \"\"\"\n Parameters\n ----------\n date : Date and time of Email.\n from_address : The sender address of the Email.\n \"\"\"\n message_id = (\".\").join([date, from_address])\n # remove special characters\n message_id = re.sub(r\"[^a-zA-Z0-9]+\", \"\", message_id)\n return message_id\n\n @staticmethod\n def to_dict(msg: mboxMessage) -> Dict[str, List[str]]:\n \"\"\"Convert mboxMessage to a Dictionary\"\"\"\n return ListservMessageIO.to_dict(msg)\n\n @staticmethod\n def to_pandas_dataframe(msg: mboxMessage) -> pd.DataFrame:\n \"\"\"Convert mboxMessage to a pandas.DataFrame\"\"\"\n return ListservMessageIO.to_pandas_dataframe(msg)\n\n @staticmethod\n def to_mbox(msg: mboxMessage, filepath: str):\n \"\"\"\n Parameters\n ----------\n msg : The Email.\n filepath : Path to file in which the Email will be stored.\n \"\"\"\n return ListservMessageIO.to_mbox(msg, filepath)\n\n\nclass ListservList(ListservListIO):\n \"\"\"\n This class handles the scraping of a single mailing list of a public archive\n in the LISTSERV 16.5 format.\n\n Parameters\n ----------\n name : The of whom the list (e.g. 3GPP_COMMON_IMS_XFER, IEEESCO-DIFUSION, ...)\n source : Contains the information of the location of the mailing list.\n It can be either an URL where the list or a path to the file(s).\n msgs : List of mboxMessage objects\n\n Methods\n -------\n from_url()\n from_messages()\n from_mbox()\n from_listserv_files()\n from_listserv_directories()\n get_messages_from_url()\n get_message_urls()\n get_period_urls()\n get_line_numbers_of_header_starts()\n get_index_of_elements_in_selection()\n to_dict()\n to_pandas_dataframe()\n to_mbox()\n\n\n Example\n -------\n To scrape a Listserv mailing list from an URL and store it in\n run-time memory, we do the following\n >>> mlist = ListservList.from_url(\n >>> name=\"IEEE-TEST\",\n >>> url=\"https://listserv.ieee.org/cgi-bin/wa?A0=IEEE-TEST\",\n >>> select={\n >>> \"years\": 2015,\n >>> \"months\": \"November\",\n >>> \"weeks\": 4,\n >>> \"fields\": \"header\",\n >>> },\n >>> login={\"username\": <your_username>, \"password\": <your_password>},\n >>> )\n\n To save it as *.mbox file we do the following\n >>> mlist.to_mbox(path_to_file)\n \"\"\"\n\n def __init__(\n self,\n name: str,\n source: Union[List[str], str],\n msgs: List[mboxMessage],\n ):\n self.name = name\n self.source = source\n self.messages = msgs\n\n def __len__(self) -> int:\n \"\"\"Get number of messsages within the mailing list.\"\"\"\n return len(self.messages)\n\n def __iter__(self):\n \"\"\"Iterate over each message within the mailing list.\"\"\"\n return iter(self.messages)\n\n def __getitem__(self, index) -> mboxMessage:\n \"\"\"Get specific message at position `index` within the mailing list.\"\"\"\n return self.messages[index]\n\n @classmethod\n def from_url(\n cls,\n name: str,\n url: str,\n select: Optional[dict] = None,\n url_login: str = \"https://list.etsi.org/scripts/wa.exe?LOGON\",\n url_pref: str = \"https://list.etsi.org/scripts/wa.exe?PREF\",\n login: Optional[Dict[str, str]] = {\"username\": None, \"password\": None},\n session: Optional[requests.Session] = None,\n ) -> \"ListservList\":\n \"\"\"\n Parameters\n ----------\n name : Name of the list of messages, e.g. '3GPP_TSG_SA_WG2_UPCON'\n url : URL to the LISTSERV list.\n select : Selection criteria that can filter messages by:\n - content, i.e. header and/or body\n - period, i.e. written in a certain year, month, week-of-month\n url_login : URL to the 'Log In' page\n url_pref : URL to the 'Preferences'/settings page\n login : Login credentials (username and password) that were used to set\n up AuthSession. You can create your own for the 3GPP archive.\n session : requests.Session() object for the Email archive website.\n \"\"\"\n if session is None:\n session = get_auth_session(url_login, **login)\n session = set_website_preference_for_header(url_pref, session)\n if select is None:\n select = {\"fields\": \"total\"}\n elif \"fields\" not in list(select.keys()):\n select[\"fields\"] = \"total\"\n msgs = cls.get_messages_from_url(name, url, select, session)\n return cls.from_messages(name, url, msgs)\n\n @classmethod\n def from_messages(\n cls,\n name: str,\n url: str,\n messages: List[Union[str, mboxMessage]],\n fields: str = \"total\",\n url_login: str = \"https://list.etsi.org/scripts/wa.exe?LOGON\",\n url_pref: str = \"https://list.etsi.org/scripts/wa.exe?PREF\",\n login: Optional[Dict[str, str]] = {\"username\": None, \"password\": None},\n session: Optional[str] = None,\n ) -> \"ListservList\":\n \"\"\"\n Parameters\n ----------\n name : Name of the list of messages, e.g. '3GPP_TSG_SA_WG2_UPCON'\n url : URL to the LISTSERV Email list.\n messages : Can either be a list of URLs to specific LISTSERV messages\n or a list of `mboxMessage` objects.\n url_login : URL to the 'Log In' page.\n url_pref : URL to the 'Preferences'/settings page.\n login : Login credentials (username and password) that were used to set\n up AuthSession. You can create your own for the 3GPP archive.\n session : requests.Session() object for the LISTSERV Email archive website.\n \"\"\"\n if not messages:\n # create empty ListservList for ListservArchive\n msgs = messages\n elif isinstance(messages[0], str):\n # create ListservList from message URLs\n msgs = []\n msg_parser = ListservMessageParser(\n website=True,\n url_login=url_login,\n login=login,\n )\n for msg_url in tqdm(messages, ascii=True, desc=name):\n msg = msg_parser.from_url(\n list_name=name,\n url=msg_url,\n fields=fields,\n )\n if msg.get_payload() == \"RequestException\":\n time.sleep(30)\n else:\n msgs.append(msg)\n logger.info(f\"Recorded the message {msg_url}.\")\n else:\n # create ListservList from list of mboxMessage\n msgs = messages\n return cls(name, url, msgs)\n\n @classmethod\n def from_mbox(cls, name: str, filepath: str) -> \"ListservList\":\n \"\"\"\n Parameters\n ----------\n name : Name of the list of messages, e.g. '3GPP_TSG_SA_WG2_UPCON'.\n filepath : Path to file in which mailing list is stored.\n \"\"\"\n msgs = ListservListIO.from_mbox(filepath)\n return cls(name, filepath, msgs)\n\n @classmethod\n def from_listserv_directories(\n cls,\n name: str,\n directorypaths: List[str],\n filedsc: str = \"*.LOG?????\",\n select: Optional[dict] = None,\n ) -> \"ListservList\":\n \"\"\"\n This method is required if the files that contain the list messages\n were directly exported from LISTSERV 16.5 (e.g. by a member of 3GPP).\n Each mailing list has its own directory and is split over multiple\n files with an extension starting with LOG and ending with five digits.\n\n Parameters\n ----------\n name : Name of the list of messages, e.g. '3GPP_TSG_SA_WG2_UPCON'.\n directorypaths : List of directory paths where LISTSERV formatted\n messages are.\n filedsc : A description of the relevant files, e.g. *.LOG?????\n select : Selection criteria that can filter messages by:\n - content, i.e. header and/or body\n - period, i.e. written in a certain year, month, week-of-month\n \"\"\"\n _filepaths = []\n # run through directories and collect all filepaths\n for directorypath in directorypaths:\n _filepaths.append(\n get_paths_to_files_in_directory(directorypath, filedsc)\n )\n # flatten list of lists\n filepaths = [fp for li in _filepaths for fp in li]\n return cls.from_listserv_files(name, filepaths, select)\n\n @classmethod\n def from_listserv_files(\n cls,\n name: str,\n filepaths: List[str],\n select: Optional[dict] = None,\n ) -> \"ListservList\":\n \"\"\"\n This method is required if the files that contain the list messages\n were directly exported from LISTSERV 16.5 (e.g. by a member of 3GPP).\n Each mailing list has its own directory and is split over multiple\n files with an extension starting with LOG and ending with five digits.\n Compared to `ListservList.from_listserv_directories()`, this method\n reads messages from single files, instead of all the files contained in\n a directory.\n\n Parameters\n ----------\n name : Name of the list of messages, e.g. '3GPP_TSG_SA_WG2_UPCON'\n filepaths : List of file paths where LISTSERV formatted messages are.\n Such files can have a file extension of the form: *.LOG1405D\n select : Selection criteria that can filter messages by:\n - content, i.e. header and/or body\n - period, i.e. written in a certain year, month, week-of-month\n \"\"\"\n if select is None:\n select = {\"fields\": \"total\"}\n msgs = []\n for filepath in filepaths:\n # TODO: implement selection filter\n file = open(filepath, \"r\", errors=\"replace\")\n fcontent = file.readlines()\n # get positions of all Emails in file\n header_start_line_nrs = cls.get_line_numbers_of_header_starts(\n fcontent\n )\n file.close()\n # run through all messages in file\n msg_parser = ListservMessageParser(website=False)\n for msg_nr in header_start_line_nrs:\n msgs.append(\n msg_parser.from_listserv_file(\n name,\n filepath,\n msg_nr,\n select[\"fields\"],\n )\n )\n return cls(name, filepaths, msgs)\n\n @classmethod\n def get_messages_from_url(\n cls,\n name: str,\n url: str,\n select: Optional[dict] = None,\n session: Optional[dict] = None,\n ) -> List[mboxMessage]:\n \"\"\"\n Generator that returns all messages within a certain period\n (e.g. January 2021, Week 5).\n\n Parameters\n ----------\n name : Name of the list of messages, e.g. '3GPP_TSG_SA_WG2_UPCON'\n url : URL to the LISTSERV list.\n select : Selection criteria that can filter messages by:\n - content, i.e. header and/or body\n - period, i.e. written in a certain year, month, week-of-month\n session : requests.Session() object for the LISTSERV Email archive website.\n \"\"\"\n # get all message URLs\n msg_urls = cls.get_message_urls(name, url, select)\n msg_parser = ListservMessageParser(website=True, session=session)\n # get all message contents\n msgs = []\n for msg_url in tqdm(msg_urls, ascii=True, desc=name):\n msg = msg_parser.from_url(\n list_name=name,\n url=msg_url,\n fields=select[\"fields\"],\n )\n if msg.get_payload() == \"RequestException\":\n time.sleep(30)\n else:\n msgs.append(msg)\n logger.info(f\"Recorded the message {msg_url}.\")\n # wait between loading messages, for politeness\n time.sleep(1)\n return msgs\n\n @classmethod\n def get_message_urls(\n cls,\n name: str,\n url: str,\n select: Optional[dict] = None,\n ) -> List[str]:\n \"\"\"\n Parameters\n ----------\n name : Name of the list of messages, e.g. '3GPP_TSG_SA_WG2_UPCON'\n url : URL to the LISTSERV list.\n select : Selection criteria that can filter messages by:\n - content, i.e. header and/or body\n - period, i.e. written in a certain year, month, week-of-month\n\n Returns\n -------\n List of all selected URLs of the messages in the mailing list.\n \"\"\"\n if select is None:\n select = {\"fields\": \"total\"}\n msg_urls = []\n # run through periods\n for period_url in ListservList.get_period_urls(url, select):\n # run through messages within period\n for msg_url in ListservList.get_messages_urls(name, period_url):\n msg_urls.append(msg_url)\n return msg_urls\n\n @classmethod\n def get_period_urls(\n cls, url: str, select: Optional[dict] = None\n ) -> List[str]:\n \"\"\"\n All messages within a certain period\n (e.g. January 2021, Week 5).\n\n Parameters\n ----------\n url : URL to the LISTSERV list.\n select : Selection criteria that can filter messages by:\n - content, i.e. header and/or body\n - period, i.e. written in a certain year, month, week-of-month\n \"\"\"\n # create dictionary with key indicating period and values the url\n periods, urls_of_periods = cls.get_all_periods_and_their_urls(url)\n\n if any(\n period in list(select.keys())\n for period in [\"years\", \"months\", \"weeks\"]\n ):\n for key, value in select.items():\n if key == \"years\":\n cond = lambda x: int(re.findall(r\"\\d{4}\", x)[0])\n elif key == \"months\":\n cond = lambda x: x.split(\" \")[0]\n elif key == \"weeks\":\n cond = lambda x: int(x.split(\" \")[-1])\n else:\n continue\n\n periodquants = [cond(period) for period in periods]\n\n indices = ListservList.get_index_of_elements_in_selection(\n periodquants,\n urls_of_periods,\n value,\n )\n\n periods = [periods[idx] for idx in indices]\n urls_of_periods = [urls_of_periods[idx] for idx in indices]\n return urls_of_periods\n\n @staticmethod\n def get_all_periods_and_their_urls(\n url: str,\n ) -> Tuple[List[str], List[str]]:\n \"\"\"\n LISTSERV groups messages into weekly time bundles. This method\n obtains all the URLs that lead to the messages of each time bundle.\n \"\"\"\n # wait between loading messages, for politeness\n time.sleep(0.5)\n\n url_root = (\"/\").join(url.split(\"/\")[:-2])\n soup = get_website_content(url)\n periods = [list_tag.find(\"a\").text for list_tag in soup.find_all(\"li\")]\n urls_of_periods = [\n urljoin(url_root, list_tag.find(\"a\").get(\"href\"))\n for list_tag in soup.find_all(\"li\")\n ]\n return periods, urls_of_periods\n\n @staticmethod\n def get_index_of_elements_in_selection(\n times: List[Union[int, str]],\n urls: List[str],\n filtr: Union[tuple, list, int, str],\n ) -> List[int]:\n \"\"\"\n Filter out messages that where in a specific period. Period here is a set\n containing units of year, month, and week-of-month which can have the following\n example elements:\n - years: (1992, 2010), [2000, 2008], 2021\n - months: [\"January\", \"July\"], \"November\"\n - weeks: (1, 4), [1, 5], 2\n\n Parameters\n ----------\n times : A list containing information of the period for each\n group of mboxMessage.\n urls : Corresponding URLs of each group of mboxMessage of which the\n period info is contained in `times`.\n filtr : Containing info on what should be filtered.\n\n Returns\n -------\n Indices of to the elements in `times`/`ursl`.\n \"\"\"\n if isinstance(filtr, tuple):\n # filter year or week in range\n cond = lambda x: (np.min(filtr) <= x <= np.max(filtr))\n if isinstance(filtr, list):\n # filter in year, week, or month in list\n cond = lambda x: x in filtr\n if isinstance(filtr, int):\n # filter specific year or week\n cond = lambda x: x == filtr\n if isinstance(filtr, str):\n # filter specific month\n cond = lambda x: x == filtr\n return [idx for idx, time in enumerate(times) if cond(time)]\n\n @staticmethod\n def get_name_from_url(url: str) -> str:\n \"\"\"Get name of mailing list.\"\"\"\n return url.split(\"A0=\")[-1]\n\n @classmethod\n def get_messages_urls(cls, name: str, url: str) -> List[str]:\n \"\"\"\n Parameters\n ----------\n name : Name of the `ListservList`\n url : URL to group of messages that are within the same period.\n\n Returns\n -------\n List to URLs from which `mboxMessage` can be initialized.\n \"\"\"\n url_root = (\"/\").join(url.split(\"/\")[:-2])\n soup = get_website_content(url)\n a_tags = soup.select(f'a[href*=\"A2=\"][href*=\"{name}\"]')\n if a_tags:\n a_tags = [urljoin(url_root, url.get(\"href\")) for url in a_tags]\n return a_tags\n\n @classmethod\n def get_line_numbers_of_header_starts(\n cls, content: List[str]\n ) -> List[int]:\n \"\"\"\n By definition LISTSERV logs seperate new messages by a row\n of 73 equal signs.\n\n Parameters\n ----------\n content : The content of one LISTSERV file.\n\n Returns\n -------\n List of line numbers where header starts\n \"\"\"\n return [\n line_nr for line_nr, line in enumerate(content) if \"=\" * 73 in line\n ]\n\n def to_dict(self, include_body: bool = True) -> Dict[str, List[str]]:\n \"\"\"\n Parameters\n ----------\n include_body : A boolean that indicates whether the message body should\n be included or not.\n\n Returns\n -------\n A Dictionary with the first key layer being the header field names and\n the \"body\" key. Each value field is a list containing the respective\n header field contents arranged by the order as they were scraped from\n the web. This format makes the conversion to a pandas.DataFrame easier.\n \"\"\"\n return ListservListIO.to_dict(self.messages, include_body)\n\n def to_pandas_dataframe(self, include_body: bool = True) -> pd.DataFrame:\n \"\"\"\n Parameters\n ----------\n include_body : A boolean that indicates whether the message body should\n be included or not.\n\n Returns\n -------\n Converts the mailing list into a pandas.DataFrame object in which each\n row represents an Email.\n \"\"\"\n return ListservListIO.to_pandas_dataframe(self.messages, include_body)\n\n def to_mbox(self, dir_out: str, filename: Optional[str] = None):\n \"\"\"Safe mailing list to .mbox files.\"\"\"\n if filename is None:\n ListservListIO.to_mbox(self.messages, dir_out, self.name)\n else:\n ListservListIO.to_mbox(self.messages, dir_out, filename)\n\n\nclass ListservArchive(object):\n \"\"\"\n This class handles the scraping of a public mailing list archive that uses\n the LISTSERV 16.5 and 17 format.\n An archive is a list of ListservList elements.\n\n Parameters\n ----------\n name : The of whom the archive is (e.g. 3GPP, IEEE, ...)\n url : The URL where the archive lives\n lists : A list containing the mailing lists as `ListservList` types\n\n Methods\n -------\n from_url()\n from_mbox()\n from_mailing_lists()\n from_listserv_directory()\n get_lists()\n get_sections()\n to_dict()\n to_pandas_dataframe()\n to_mbox()\n\n Example\n -------\n To scrape a Listserv mailing list archive from an URL and store it in\n run-time memory, we do the following\n >>> arch = ListservArchive.from_url(\n >>> name=\"IEEE\",\n >>> url_root=\"https://listserv.ieee.org/cgi-bin/wa?\",\n >>> url_home=\"https://listserv.ieee.org/cgi-bin/wa?HOME\",\n >>> select={\n >>> \"years\": 2015,\n >>> \"months\": \"November\",\n >>> \"weeks\": 4,\n >>> \"fields\": \"header\",\n >>> },\n >>> login={\"username\": <your_username>, \"password\": <your_password>},\n >>> instant_save=False,\n >>> only_mlist_urls=False,\n >>> )\n\n To save it as *.mbox file we do the following\n >>> arch.to_mbox(path_to_directory)\n \"\"\"\n\n def __init__(\n self, name: str, url: str, lists: List[Union[ListservList, str]]\n ):\n self.name = name\n self.url = url\n self.lists = lists\n\n def __len__(self):\n \"\"\"Get number of mailing lists within the mailing archive.\"\"\"\n return len(self.lists)\n\n def __iter__(self):\n \"\"\"Iterate over each mailing list within the mailing archive.\"\"\"\n return iter(self.lists)\n\n def __getitem__(self, index):\n \"\"\"Get specific mailing list at position `index` from the mailing archive.\"\"\"\n return self.lists[index]\n\n @classmethod\n def from_url(\n cls,\n name: str,\n url_root: str,\n url_home: str,\n select: Optional[dict] = None,\n url_login: str = \"https://list.etsi.org/scripts/wa.exe?LOGON\",\n url_pref: str = \"https://list.etsi.org/scripts/wa.exe?PREF\",\n login: Optional[Dict[str, str]] = {\"username\": None, \"password\": None},\n session: Optional[str] = None,\n instant_save: bool = True,\n only_mlist_urls: bool = True,\n ) -> \"ListservArchive\":\n \"\"\"\n Create ListservArchive from a given URL.\n\n Parameters\n ----------\n name : Email archive name, such that multiple instances of `ListservArchive`\n can easily be distinguished.\n url_root : The invariant root URL that does not change no matter what\n part of the LISTSERV archive we access.\n url_home : The 'home' space of the LISTSERV archive. This is required as\n it contains the different sections which we obtain using `get_sections()`.\n select: Selection criteria that can filter messages by:\n - content, i.e. header and/or body\n - period, i.e. written in a certain year, month, week-of-month\n url_login : URL to the 'Log In' page.\n url_pref : URL to the 'Preferences'/settings page.\n login : Login credentials (username and password) that were used to set\n up AuthSession. You can create your own for the 3GPP archive.\n session : requests.Session() object for the LISTSERV Email archive website.\n instant_save : Boolean giving the choice to save a `ListservList` as\n soon as it is completely scraped or collect entire archive. The\n prior is recommended if a large number of mailing lists are\n scraped which can require a lot of memory and time.\n only_list_urls : Boolean giving the choice to collect only `ListservList`\n URLs or also their contents.\n \"\"\"\n if session is None:\n session = get_auth_session(url_login, **login)\n session = set_website_preference_for_header(url_pref, session)\n lists = cls.get_lists_from_url(\n url_root,\n url_home,\n select,\n session,\n instant_save,\n only_mlist_urls,\n )\n return cls.from_mailing_lists(\n name,\n url_root,\n lists,\n select,\n session,\n only_mlist_urls,\n )\n\n @classmethod\n def from_mailing_lists(\n cls,\n name: str,\n url_root: str,\n url_mailing_lists: Union[List[str], List[ListservList]],\n select: Optional[dict] = None,\n url_login: str = \"https://list.etsi.org/scripts/wa.exe?LOGON\",\n url_pref: str = \"https://list.etsi.org/scripts/wa.exe?PREF\",\n login: Optional[Dict[str, str]] = {\"username\": None, \"password\": None},\n session: Optional[str] = None,\n only_mlist_urls: bool = True,\n instant_save: Optional[bool] = True,\n ) -> \"ListservArchive\":\n \"\"\"\n Create ListservArchive from a given list of 'ListservList'.\n\n Parameters\n ----------\n name : Email archive name, such that multiple instances of `ListservArchive`\n can easily be distinguished.\n url_root : The invariant root URL that does not change no matter what\n part of the LISTSERV archive we access.\n url_mailing_lists : This argument can either be a list of `ListservList`\n objects or a list of string containing the URLs to the LISTSERV\n Email lists of interest.\n url_login : URL to the 'Log In' page.\n url_pref : URL to the 'Preferences'/settings page.\n login : Login credentials (username and password) that were used to set\n up AuthSession. You can create your own for the 3GPP archive.\n session : requests.Session() object for the LISTSERV Email archive website.\n only_list_urls : Boolean giving the choice to collect only `ListservList`\n URLs or also their contents.\n instant_save : Boolean giving the choice to save a `ListservList` as\n soon as it is completely scraped or collect entire archive. The\n prior is recommended if a large number of mailing lists are\n scraped which can require a lot of memory and time.\n \"\"\"\n if isinstance(url_mailing_lists[0], str) and only_mlist_urls is False:\n if session is None:\n session = get_auth_session(url_login, **login)\n session = set_website_preference_for_header(url_pref, session)\n lists = []\n for url in url_mailing_lists:\n mlist_name = url.split(\"A0=\")[-1]\n mlist = ListservList.from_url(\n name=mlist_name,\n url=url,\n select=select,\n session=session,\n )\n if len(mlist) != 0:\n if instant_save:\n dir_out = CONFIG.mail_path + name\n Path(dir_out).mkdir(parents=True, exist_ok=True)\n mlist.to_mbox(dir_out=dir_out)\n else:\n logger.info(f\"Recorded the list {mlist.name}.\")\n lists.append(mlist)\n else:\n lists = url_mailing_lists\n return cls(name, url_root, lists)\n\n @classmethod\n def from_listserv_directory(\n cls,\n name: str,\n directorypath: str,\n folderdsc: str = \"*\",\n filedsc: str = \"*.LOG?????\",\n select: Optional[dict] = None,\n ) -> \"ListservArchive\":\n \"\"\"\n This method is required if the files that contain the archive messages\n were directly exported from LISTSERV 16.5 (e.g. by a member of 3GPP).\n Each mailing list has its own subdirectory and is split over multiple\n files with an extension starting with LOG and ending with five digits.\n\n Parameters\n ----------\n name : Email archive name, such that multiple instances of `ListservArchive`\n can easily be distinguished.\n directorypath : Where the `ListservArchive` can be initialised.\n folderdsc : A description of the relevant folders\n filedsc : A description of the relevant files, e.g. *.LOG?????\n select : Selection criteria that can filter messages by:\n - content, i.e. header and/or body\n - period, i.e. written in a certain year, month, week-of-month\n \"\"\"\n lists = []\n _dirpaths_to_lists = get_paths_to_dirs_in_directory(\n directorypath, folderdsc\n )\n # run through directories and collect all filepaths\n for dirpath in _dirpaths_to_lists:\n _filepaths = get_paths_to_files_in_directory(dirpath, filedsc)\n mlist = ListservList.from_listserv_files(\n dirpath.split(\"/\")[-2],\n _filepaths,\n select,\n )\n lists.append(mlist)\n return cls(name, directorypath, lists)\n\n @classmethod\n def from_mbox(\n cls,\n name: str,\n directorypath: str,\n filedsc: str = \"*.mbox\",\n ) -> \"ListservArchive\":\n \"\"\"\n Parameters\n ----------\n name : Email archive name, such that multiple instances of `ListservArchive`\n can easily be distinguished.\n directorypath : Path to the folder in which `ListservArchive` is stored.\n filedsc : Optional filter that only reads files matching the description.\n By default all files with an mbox extension are read.\n \"\"\"\n filepaths = get_paths_to_files_in_directory(directorypath, filedsc)\n lists = []\n for filepath in filepaths:\n name = filepath.split(\"/\")[-1].split(\".\")[0]\n lists.append(ListservList.from_mbox(name, filepath))\n return cls(name, directorypath, lists)\n\n @staticmethod\n def get_lists_from_url(\n url_root: str,\n url_home: str,\n select: dict,\n session: Optional[str] = None,\n instant_save: bool = True,\n only_mlist_urls: bool = True,\n ) -> List[Union[ListservList, str]]:\n \"\"\"\n Created dictionary of all lists in the archive.\n\n Parameters\n ----------\n url_root : The invariant root URL that does not change no matter what\n part of the LISTSERV archive we access.\n url_home : The 'home' space of the LISTSERV archive. This is required as\n it contains the different sections which we obtain using `get_sections()`.\n select : Selection criteria that can filter messages by:\n - content, i.e. header and/or body\n - period, i.e. written in a certain year, month, week-of-month\n session : requests.Session() object for the LISTSERV Email archive website.\n instant_save : Boolean giving the choice to save a `ListservList` as\n soon as it is completely scraped or collect entire archive. The\n prior is recommended if a large number of mailing lists are\n scraped which can require a lot of memory and time.\n only_list_urls : Boolean giving the choice to collect only `ListservList`\n URLs or also their contents.\n\n Returns\n -------\n archive_dict : the keys are the names of the lists and the value their url\n \"\"\"\n archive = []\n # run through archive sections\n for url in list(\n ListservArchive.get_sections(url_root, url_home).keys()\n ):\n soup = get_website_content(url)\n a_tags_in_section = soup.select(\n f'a[href^=\"{urlparse(url).path}?A0=\"]',\n )\n\n mlist_urls = [\n urljoin(url_root, a_tag.get(\"href\"))\n for a_tag in a_tags_in_section\n ]\n mlist_urls = list(set(mlist_urls)) # remove duplicates\n\n if only_mlist_urls:\n # collect mailing-list urls\n for mlist_url in mlist_urls:\n name = ListservList.get_name_from_url(mlist_url)\n # check if mailing list contains messages in period\n _period_urls = ListservList.get_all_periods_and_their_urls(\n mlist_url\n )[1]\n # check if mailing list is public\n if len(_period_urls) > 0:\n loops = 0\n for _period_url in _period_urls:\n loops += 1\n nr_msgs = len(\n ListservList.get_messages_urls(\n name=name, url=_period_url\n )\n )\n if nr_msgs > 0:\n archive.append(mlist_url)\n break\n else:\n # collect mailing-list contents\n for mlist_url in mlist_urls:\n name = ListservList.get_name_from_url(mlist_url)\n mlist = ListservList.from_url(\n name=name,\n url=mlist_url,\n select=select,\n session=session,\n )\n if len(mlist) != 0:\n if instant_save:\n dir_out = CONFIG.mail_path + name\n Path(dir_out).mkdir(parents=True, exist_ok=True)\n mlist.to_mbox(dir_out=CONFIG.mail_path)\n archive.append(mlist.name)\n else:\n logger.info(f\"Recorded the list {mlist.name}.\")\n archive.append(mlist)\n return archive\n\n def get_sections(url_root: str, url_home: str) -> int:\n \"\"\"\n Get different sections of archive.\n On the Listserv 16.5 website they look like:\n [3GPP] [3GPP–AT1] [AT2–CONS] [CONS–EHEA] [EHEA–ERM_] ...\n On the Listserv 17 website they look like:\n [<<][<]1-50(798)[>][>>]\n\n Returns\n -------\n If sections exist, it returns their urls and names. Otherwise it returns\n the url_home.\n \"\"\"\n soup = get_website_content(url_home)\n sections = soup.select(\n 'a[href*=\"INDEX=\"][href*=\"p=\"]',\n )\n archive_sections_dict = {}\n if sections:\n for sec in sections:\n key = urljoin(url_root, sec.get(\"href\"))\n value = sec.text\n if value in [\"Next\", \"Previous\"]:\n continue\n archive_sections_dict[key] = value\n archive_sections_dict[re.sub(r\"p=[0-9]+\", \"p=1\", key)] = \"FIRST\"\n else:\n archive_sections_dict[url_home] = \"Home\"\n return archive_sections_dict\n\n def to_conversationkg_dict(self) -> Dict[str, List[str]]:\n \"\"\"\n Place all message in all lists into a dictionary of the form:\n dic = {\n \"message_ID1\": {\n \"body\": ...,\n \"subject\": ...,\n ... ,\n }\n \"message_ID2\": {\n \"body\": ...,\n \"subject\": ...,\n ... ,\n }\n }\n \"\"\"\n # initialize dictionary\n dic = {}\n msg_nr = 0\n # run through lists\n for mlist in self.lists:\n # run through messages\n for msg in mlist.messages:\n dic[f\"ID{msg_nr}\"] = msg.to_dict()\n msg_nr += 1\n return dic\n\n def to_dict(self, include_body: bool = True) -> Dict[str, List[str]]:\n \"\"\"\n Concatenates mailing list dictionaries created using\n `ListservList.to_dict()`.\n \"\"\"\n return ListservArchiveIO.to_dict(self.lists, include_body)\n\n def to_pandas_dataframe(self, include_body: bool = True) -> pd.DataFrame:\n \"\"\"\n Concatenates mailing list pandas.DataFrames created using\n `ListservList.to_pandas_dataframe()`.\n \"\"\"\n return ListservArchiveIO.to_pandas_dataframe(self.lists, include_body)\n\n def to_mbox(self, dir_out: str):\n \"\"\"\n Save Archive content to .mbox files\n \"\"\"\n ListservArchiveIO.to_mbox(self.lists, dir_out)\n\n\ndef set_website_preference_for_header(\n url_pref: str,\n session: requests.Session,\n) -> requests.Session:\n \"\"\"\n Set the 'Email Headers' of the 'Archive Preferences' for the auth session\n to 'Show All Headers'. Otherwise only a restricted list of header fields is\n shown.\n \"\"\"\n url_archpref = url_pref + \"&TAB=2\"\n payload = {\n \"Email Headers\": \"b\",\n }\n session.post(url_archpref, data=payload)\n return session\n\n\ndef get_auth_session(\n url_login: str, username: str, password: str\n) -> requests.Session:\n \"\"\"\n Create AuthSession.\n\n There are three ways to create an AuthSession:\n - parse username & password directly into method\n - create a /bigbang/config/authentication.yaml file that contains keys\n - type then into terminal when the method 'get_login_from_terminal'\n is raised\n \"\"\"\n if os.path.isfile(filepath_auth):\n # read from /config/authentication.yaml\n with open(filepath_auth, \"r\") as stream:\n auth_key = yaml.safe_load(stream)\n username = auth_key[\"username\"]\n password = auth_key[\"password\"]\n else:\n # ask user for login keys\n username, password = get_login_from_terminal(username, password)\n\n if username is None or password is None:\n # continue without authentication\n return None\n else:\n # Start the AuthSession\n session = requests.Session()\n # Create the payload\n payload = {\n \"LOGIN1\": \"\",\n \"Y\": username,\n \"p\": password,\n \"X\": \"\",\n }\n # Post the payload to the site to log in\n session.post(url_login, data=payload)\n return session\n\n\ndef get_login_from_terminal(\n username: Union[str, None],\n password: Union[str, None],\n file_auth: str = directory_project + \"/config/authentication.yaml\",\n) -> Tuple[Union[str, None]]:\n \"\"\"\n Get login key from user during run time if 'username' and/or 'password' is 'None'.\n Return 'None' if no reply within 15 sec.\n \"\"\"\n if username is None or password is None:\n record = True\n else:\n record = False\n if username is None:\n username = ask_for_input(\"Enter your Email: \")\n if password is None:\n password = ask_for_input(\"Enter your Password: \")\n if record and isinstance(username, str) and isinstance(password, str):\n loginkey_to_file(username, password, file_auth)\n return username, password\n\n\ndef ask_for_input(request: str) -> Union[str, None]:\n timeout = 15\n end_time = time.time() + timeout\n while time.time() < end_time:\n reply = input(request)\n try:\n assert isinstance(reply, str)\n break\n except Exception:\n reply = None\n continue\n return reply\n\n\ndef loginkey_to_file(\n username: str,\n password: str,\n file_auth: str,\n) -> None:\n \"\"\"Safe login key to yaml\"\"\"\n file = open(file_auth, \"w\")\n file.write(f\"username: '{username}'\\n\")\n file.write(f\"password: '{password}'\")\n file.close()\n\n\ndef get_website_content(\n url: str,\n session: Optional[requests.Session] = None,\n) -> Union[str, BeautifulSoup]:\n \"\"\"\n Get HTML code from website\n\n Note\n ----\n Servers don't like it when one is sending too many requests from same\n ip address in short period of time. Therefore we need to:\n a) catch 'requests.exceptions.RequestException' errors\n (includes all possible errors to be on the safe side),\n b) safe intermediate results,\n c) continue where we left off at a later stage.\n \"\"\"\n # TODO: include option to change BeautifulSoup args\n try:\n if session is None:\n sauce = requests.get(url)\n assert sauce.status_code == 200\n soup = BeautifulSoup(sauce.content, \"html.parser\")\n else:\n sauce = session.get(url)\n soup = BeautifulSoup(sauce.text, \"html.parser\")\n return soup\n except requests.exceptions.RequestException as e:\n if \"A2=\" in url:\n # if URL of mboxMessage\n logger.info(f\"{e} for {url}.\")\n return \"RequestException\"\n else:\n SystemExit()\n",
"import os\nimport tempfile\nfrom pathlib import Path\nfrom unittest import mock\nfrom unittest import TestCase\n\nimport numpy as np\nimport pytest\nimport yaml\n\nfrom bigbang.analysis.listserv import ListservArchive\nfrom bigbang.analysis.listserv import ListservList\n\nfrom config.config import CONFIG\n\ndir_temp = tempfile.gettempdir()\nfile_temp_mbox = dir_temp + \"/listserv.mbox\"\nfile_auth = CONFIG.config_path + \"authentication.yaml\"\nauth_key_mock = {\"username\": \"bla\", \"password\": \"bla\"}\n\n\[email protected](name=\"march\", scope=\"module\")\ndef get_mailingarchive():\n march = ListservArchive.from_mbox(\n name=\"3GPP\",\n directorypath=CONFIG.test_data_path + \"3GPP_mbox/\",\n filedsc=\"3GPP_TSG_*\",\n )\n return march\n\n\[email protected](name=\"mlist\", scope=\"module\")\ndef get_mailinglist():\n mlist = ListservList.from_mbox(\n name=\"3GPP_TSG_SA_WG4_EVS\",\n filepath=CONFIG.test_data_path + \"3GPP_mbox/3GPP_TSG_SA_WG4_EVS.mbox\",\n )\n return mlist\n\n\nclass TestListservList:\n def test__to_percentage(self):\n abso = np.array([1, 3])\n perc = ListservList.to_percentage(abso)\n np.testing.assert_array_equal(perc, np.array([0.25, 0.75]))\n\n def test__get_name_localpart_domain(self):\n addr = '\"gabin frederic\" <[email protected]>'\n name, localpart, domain = ListservList.get_name_localpart_domain(addr)\n assert name == \"gabin frederic\"\n assert localpart == \"frederic.gabin\"\n assert domain == \"dolby.com\"\n\n def test__period_of_activity(self, mlist):\n datetimes = mlist.period_of_activity()\n years = [dt.year for dt in datetimes]\n assert years == [2020, 2021]\n\n def test__crop_by_year(self, mlist):\n _mlist = mlist.crop_by_year(2020)\n assert len(_mlist.df.index.values) == 25\n datetimes = _mlist.period_of_activity()\n years = [dt.year for dt in datetimes]\n assert years == [2020, 2020]\n\n def test__crop_by_address(self, mlist):\n _mlist = mlist.crop_by_address(\n header_field=\"from\",\n per_address_field={\"domain\": [\"samsung.com\"]},\n )\n assert len(_mlist.df.index.values) == 1\n\n def test__crop_by_subject(self, mlist):\n _mlist = mlist.crop_by_subject(match=\"EVS SWG Sessions\", place=0)\n assert len(_mlist.df.index.values) == 3\n\n def test__get_domains(self, mlist):\n domains = mlist.get_domains(\n header_fields=[\"comments-to\"], return_msg_counts=True\n )\n domains_comp = [\n \"ericsson.com\",\n \"qti.qualcomm.com\",\n \"list.etsi.org\",\n \"usherbrooke.ca\",\n \"philips.com\",\n ]\n for domain in domains[\"comments-to\"]:\n assert domain[0] in domains_comp\n if domain[0] == \"qti.qualcomm.com\":\n assert domain[1] == 8\n domains = mlist.get_domains(\n header_fields=[\"from\"], return_msg_counts=False\n )\n domains_comp = [\n \"samsung.com\",\n \"qti.qualcomm.com\",\n \"philips.com\",\n \"iis.fraunhofer.de\",\n \"ericsson.com\",\n \"usherbrooke.ca\",\n \"3gpp.org\",\n \"dolby.com\",\n \"qosound.com\",\n ]\n assert set(domains[\"from\"]) == set(domains_comp)\n\n def test__get_domainscount(self, mlist):\n domains = mlist.get_domainscount(\n header_fields=[\"comments-to\"],\n per_year=True,\n )\n assert domains[\"comments-to\"][2020] == 2\n assert domains[\"comments-to\"][2021] == 4\n domains = mlist.get_domainscount(\n header_fields=[\"from\"],\n per_year=False,\n )\n assert domains[\"from\"] == 9\n\n def test__get_localparts(self, mlist):\n localparts = mlist.get_localparts(\n header_fields=[\"comments-to\"],\n per_domain=True,\n return_msg_counts=False,\n )\n assert localparts[\"comments-to\"][\"ericsson.com\"] == [\"tomas.toftgard\"]\n assert set(localparts[\"comments-to\"][\"qti.qualcomm.com\"]) == set(\n [\"nleung\", \"ivarga\"]\n )\n localparts = mlist.get_localparts(\n header_fields=[\"comments-to\"],\n per_domain=False,\n return_msg_counts=True,\n )\n localparts = list(map(list, zip(*localparts[\"comments-to\"])))\n assert \"3gpp_tsg_sa_wg4_video\" in localparts[0]\n assert \"ivarga\" in localparts[0]\n assert \"milan.jelinek\" in localparts[0]\n assert set(localparts[1]) == {1, 2, 3, 4, 6, 7}\n\n def test__get_localpartscount(self, mlist):\n localparts = mlist.get_localpartscount(\n header_fields=[\"comments-to\"],\n per_domain=True,\n per_year=False,\n )\n assert localparts[\"comments-to\"][\"list.etsi.org\"] == 5\n assert localparts[\"comments-to\"][\"usherbrooke.ca\"] == 1\n assert localparts[\"comments-to\"][\"qti.qualcomm.com\"] == 2\n localparts = mlist.get_localpartscount(\n header_fields=[\"from\"],\n per_domain=False,\n per_year=True,\n )\n assert localparts[\"from\"][2020] == 6\n assert localparts[\"from\"][2021] == 9\n\n def test__get_threadsroot(self, mlist):\n subjects = mlist.get_threadsroot()\n subjects_true = {\n \"Draft EVS-8a\": 6,\n \"IVAS-1 v0.4.0 available in the Inbox\": 7,\n \"Updated CRs to 26.442/443/444/452 in Inbox\": 8,\n \"Draft IVAS-8a in Draft folder\": 9,\n \"Revised IVAS-1 in Draft folder\": 10,\n \"Draft LS reply to SG12 on P.SUPPL800 & draft IVAS call for labs\": 11,\n \"Information related to EVS SWG Sessions during SA4#115e meeting\": 13,\n \"Draft IVAS-8a (IVAS test plan skeleton with Appendix with example test designs)\": 14,\n \"Information related to EVS SWG Sessions during SA4#114e meeting\": 15,\n \"IVAS-1 is in the Inbox now\": 16,\n \"Information for #113e EVS SWG participants\": 19,\n \"IVAS-7a_v.0.2.0 available in S4-210315\": 20,\n \"Final IVAS-1 is available in the Inbox\": 21,\n \"Rev1 of S4-210133 (IVAS-1) is available in the draft folder\": 22,\n \"FW: [11.5, S4-210129, Block A, 3 Feb 16:00 CET] Update to: Audio mixing of multiple streaming in ITT4RT\": 23,\n \"Draft revised agenda and report template\": 34,\n \"FW: [11.5; 1451; 18 Nov 1600 CET] Audio mixing of multiple streaming in ITT4RT - for agreement\": 27,\n \"Information related to EVS SWG sessions\": 28,\n \"3GPP SA4#110-e SQ SWG\": 29,\n \"Update on the Tohru Hand raising Tool\": 30,\n \"Wednesday meeting\": 31,\n \"FW: Updated Draft Schedule of MTSI SWG Telco sessions at SA4#110-e\": 33,\n \"3GPP SA4#110-e EVS SWG\": 41,\n \"EVS SWG on 28th May: cancelled\": 42,\n \"GTM Links A/B/C: SA4#109-e SWG Sessions\": 44,\n \"subscribe\": 45,\n \"SQA4 Breakout Sessions: =?utf-8?q?Today=E2=80=99s?= link for the online sessions\": 46,\n \"Hosted: Agenda for SA4#108-e meeting\": 47,\n \"test mail -\": 49,\n }\n for sl in subjects.keys():\n assert subjects[sl] == subjects_true[sl]\n\n def test__get_threadsrootcount(self, mlist):\n count = mlist.get_threadsrootcount()\n assert count == 29 # as they are all replies\n\n def test__get_messagescount(self, mlist):\n msgcount = mlist.get_messagescount()\n assert msgcount == 50\n msgcount = mlist.get_messagescount(\n header_fields=[\"comments-to\"],\n per_address_field=\"domain\",\n per_year=False,\n )\n assert msgcount[\"comments-to\"][\"list.etsi.org\"] == 17\n assert msgcount[\"comments-to\"][\"usherbrooke.ca\"] == 3\n assert msgcount[\"comments-to\"][\"qti.qualcomm.com\"] == 8\n msgcount = mlist.get_messagescount(\n header_fields=[\"from\"],\n per_address_field=\"localpart\",\n per_year=True,\n )\n assert msgcount[\"from\"][2020][\"milan.jelinek\"] == 1\n assert msgcount[\"from\"][2021][\"milan.jelinek\"] == 3\n assert msgcount[\"from\"][2021][\"markus.multrus\"] == 2\n\n def test__get_messagescount_per_timezone(self, mlist):\n msgcount = mlist.get_messagescount_per_timezone()\n assert msgcount[\"+00:00\"] == 38\n assert msgcount[\"+08:00\"] == 6\n assert msgcount[\"-04:00\"] == 3\n assert msgcount[\"-05:00\"] == 1\n\n def test__get_sender_receiver_dict(self, mlist):\n dic = mlist.get_sender_receiver_dict()\n dic_true = {\n \"ericsson.com\": {\"usherbrooke.ca\": 1, \"qti.qualcomm.com\": 1},\n \"usherbrooke.ca\": {\"ericsson.com\": 1, \"qti.qualcomm.com\": 2},\n \"qti.qualcomm.com\": {\"usherbrooke.ca\": 2},\n \"philips.com\": {\"qti.qualcomm.com\": 1, \"philips.com\": 1},\n \"iis.fraunhofer.de\": {\"qti.qualcomm.com\": 2},\n \"3gpp.org\": {\"list.etsi.org\": 15, \"qti.qualcomm.com\": 1},\n \"samsung.com\": {\"list.etsi.org\": 2},\n \"qosound.com\": {\"qti.qualcomm.com\": 1},\n \"dolby.com\": {},\n \"list.etsi.org\": {},\n }\n for key1, value1 in dic.items():\n for key2, value2 in value1.items():\n assert dic_true[key1][key2] == value2\n",
"from typing import Dict, List, Optional, Tuple, Union\nimport numpy as np\n\nimport pylab\nfrom colour import Color\nfrom pylab import cm\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.lines as mlines\nfrom matplotlib.pyplot import figure\n\nfrom bigbang.visualisation import utils\n\n\ndef get_ylabels(data) -> List[str]:\n ylabels = list(\n set([ykey for ydic in data.values() for ykey in ydic.keys()])\n )\n return ylabels\n\n\ndef data_transformation(\n idata: dict,\n ylabels: Optional[List[str]] = None,\n percentage: bool = False,\n) -> np.ndarray:\n \"\"\"\n Parameters\n ----------\n idata :\n ylabels :\n percentage :\n\n Returns\n -------\n odata : array with the format (# of ylabels, # of xlabels)\n \"\"\"\n if ylabels is None:\n # collect all ylabels\n ylabels = get_ylabels(idata)\n # create numpy array and fill sparse matrix\n odata = np.zeros((len(ylabels), len(idata.keys())))\n for iy, ylab in enumerate(ylabels):\n for ix, ydic in enumerate(idata.values()):\n if ylab in list(ydic.keys()):\n odata[iy, ix] = ydic[ylab]\n if percentage:\n odata = odata / np.sum(odata, axis=0)\n return odata\n\n\ndef stacked_area_chart(\n data: dict,\n ax: mpl.axes,\n domains_in_focus: Optional[list] = None,\n percentage: bool = False,\n colormap: Optional[mpl.colors.LinearSegmentedColormap] = None,\n color_default: Optional[np.array] = None,\n):\n \"\"\"\n Parameters\n ----------\n data : Dictionary with a format {'x_axis_labels': {'y_axis_labels': y_values}}\n domains_in_focus :\n percentage :\n \"\"\"\n x = list(data.keys())\n y = data_transformation(data, percentage)\n ylabels = get_ylabels(data)\n colors = utils.create_color_palette(\n ylabels,\n domains_in_focus,\n colormap,\n include_dof=True,\n return_dict=False,\n )\n ax.stackplot(\n x,\n y,\n colors=colors,\n )\n ax.set_xlim(np.min(x), np.max(x))\n ax.set_ylim(np.min(y), np.max(y))\n return ax\n"
] | [
[
"numpy.max",
"numpy.min"
],
[
"numpy.array"
],
[
"numpy.max",
"numpy.sum",
"numpy.min"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
steipatr/EMAworkbench | [
"3438b7a48d7104a121d0d28a9cbadc8219ddb74c"
] | [
"ema_workbench/em_framework/outcomes.py"
] | [
"'''\nModule for outcome classes\n\n'''\nimport abc\nimport collections\nimport numbers\nimport six\n\nimport pandas\n\nfrom .util import Variable\nfrom ema_workbench.util.ema_exceptions import EMAError\nfrom ..util import get_module_logger\n\n# Created on 24 mei 2011\n#\n# .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>\n\n__all__ = ['Outcome', 'ScalarOutcome', 'ArrayOutcome', 'TimeSeriesOutcome',\n 'Constraint']\n_logger = get_module_logger(__name__)\n\n\nclass AbstractOutcome(Variable):\n '''\n Base Outcome class\n\n Parameters\n ----------\n name : str\n Name of the outcome.\n kind : {INFO, MINIMZE, MAXIMIZE}, optional\n variable_name : str, optional\n if the name of the outcome in the underlying model\n is different from the name of the outcome, you can\n supply the variable name as an optional argument,\n if not provided, defaults to name\n function : callable, optional\n a callable to perform postprocessing on data retrieved\n from model\n expected_range : 2 tuple, optional\n expected min and max value for outcome,\n used by HyperVolume convergence metric\n shape : {tuple, None} optional\n\n Attributes\n ----------\n name : str\n kind : int\n variable_name : str\n function : callable\n shape : tuple\n\n '''\n __metaclass__ = abc.ABCMeta\n\n MINIMIZE = -1\n MAXIMIZE = 1\n INFO = 0\n\n def __init__(self, name, kind=INFO, variable_name=None,\n function=None, expected_range=None,\n shape=None):\n super(AbstractOutcome, self).__init__(name)\n\n if function is not None and not callable(function):\n raise ValueError('function must be a callable')\n if variable_name:\n if (not isinstance(variable_name, six.string_types)) and (\n not all(isinstance(elem, six.string_types) for elem in variable_name)):\n raise ValueError(\n 'variable name must be a string or list of strings')\n if expected_range is not None and len(expected_range) != 2:\n raise ValueError('expected_range must be a min-max tuple')\n self.kind = kind\n self.variable_name = variable_name\n self.function = function\n self._expected_range = expected_range\n self.shape = shape\n\n def process(self, values):\n if self.function:\n var_names = self.variable_name\n\n n_variables = len(var_names)\n try:\n n_values = len(values)\n except TypeError:\n n_values = None\n\n if (n_values is None) and (n_variables == 1):\n return self.function(values)\n elif n_variables != n_values:\n raise ValueError(\n ('number of variables is {}, '\n 'number of outputs is {}').format(\n n_variables, n_values))\n else:\n return self.function(*values)\n else:\n if len(values) > 1:\n raise EMAError(('more than one value returned without '\n 'processing function'))\n\n return values[0]\n\n def __eq__(self, other):\n comparison = [all(hasattr(self, key) == hasattr(other, key) and\n getattr(self, key) == getattr(other, key) for key\n in self.__dict__.keys())]\n comparison.append(self.__class__ == other.__class__)\n return all(comparison)\n\n def __repr__(self, *args, **kwargs):\n klass = self.__class__.__name__\n name = self.name\n\n rep = '{}(\\'{}\\''.format(klass, name)\n\n if self.variable_name != [self.name]:\n rep += ', variable_name={}'.format(self.variable_name)\n if self.function:\n rep += ', function={}'.format(self.function)\n\n rep += ')'\n\n return rep\n\n\nclass ScalarOutcome(AbstractOutcome):\n '''\n Scalar Outcome class\n\n Parameters\n ----------\n name : str\n Name of the outcome.\n kind : {INFO, MINIMZE, MAXIMIZE}, optional\n variable_name : str, optional\n if the name of the outcome in the underlying model\n is different from the name of the outcome, you can\n supply the variable name as an optional argument,\n if not provided, defaults to name\n function : callable, optional\n a callable to perform postprocessing on data retrieved\n from model\n expected_range : 2 tuple, optional\n expected min and max value for outcome,\n used by HyperVolume convergence metric\n\n Attributes\n ----------\n name : str\n kind : int\n variable_name : str\n function : callable\n shape : tuple\n expected_range : tuple\n\n '''\n\n @property\n def expected_range(self):\n if self._expected_range is None:\n raise ValueError(\n 'no expected_range is set for {}'.format(\n self.variable_name))\n return self._expected_range\n\n @expected_range.setter\n def expected_range(self, expected_range):\n self._expected_range = expected_range\n\n def __init__(self, name, kind=AbstractOutcome.INFO, variable_name=None,\n function=None, expected_range=None):\n super(ScalarOutcome, self).__init__(name, kind,\n variable_name=variable_name,\n function=function)\n self.expected_range = expected_range\n\n def process(self, values):\n values = super(ScalarOutcome, self).process(values)\n if not isinstance(values, numbers.Number):\n raise EMAError(\n f\"outcome {self.name} should be a scalar, but is {type(values)}: {values}\".format())\n return values\n\n\nclass ArrayOutcome(AbstractOutcome):\n '''Array Outcome class for n-dimensional collections\n\n Parameters\n ----------\n name : str\n Name of the outcome.\n variable_name : str, optional\n if the name of the outcome in the underlying model\n is different from the name of the outcome, you can\n supply the variable name as an optional argument,\n if not provided, defaults to name\n function : callable, optional\n a callable to perform postprocessing on data retrieved\n from model\n expected_range : 2 tuple, optional\n expected min and max value for outcome,\n used by HyperVolume convergence metric\n shape : {tuple, None}, optional\n\n Attributes\n ----------\n name : str\n kind : int\n variable_name : str\n function : callable\n shape : tuple\n expected_range : tuple\n\n\n '''\n\n def __init__(self, name, variable_name=None,\n function=None, expected_range=None,\n shape=None):\n super(\n ArrayOutcome,\n self).__init__(\n name,\n kind=AbstractOutcome.INFO,\n variable_name=variable_name,\n function=function,\n expected_range=expected_range,\n shape=shape)\n\n def process(self, values):\n values = super(ArrayOutcome, self).process(values)\n if not isinstance(values, collections.abc.Iterable):\n raise EMAError(\n \"outcome {} should be a collection\".format(self.name))\n return values\n\n\nclass TimeSeriesOutcome(ArrayOutcome):\n '''\n TimeSeries Outcome class\n\n Parameters\n ----------\n name : str\n Name of the outcome.\n variable_name : str, optional\n if the name of the outcome in the underlying model\n is different from the name of the outcome, you can\n supply the variable name as an optional argument,\n if not provided, defaults to name\n function : callable, optional\n a callable to perform postprocessing on data retrieved\n from model\n expected_range : 2 tuple, optional\n expected min and max value for outcome,\n used by HyperVolume convergence metric\n shape : {tuple, None}, optional\n\n Attributes\n ----------\n name : str\n kind : int\n variable_name : str\n function : callable\n shape : tuple\n expected_range : tuple\n\n '''\n\n def __init__(self, name, variable_name=None,\n function=None, expected_range=None,\n shape=None):\n super(\n TimeSeriesOutcome,\n self).__init__(\n name,\n variable_name=variable_name,\n function=function,\n expected_range=expected_range,\n shape=shape)\n\n\nclass Constraint(ScalarOutcome):\n '''Constraints class that can be used when defining constrained\n optimization problems.\n\n Parameters\n ----------\n name : str\n parameter_names : str or collection of str\n outcome_names : str or collection of str\n function : callable\n\n Attributes\n ----------\n name : str\n parameter_names : str, list of str\n name(s) of the uncertain parameter(s) and/or\n lever parameter(s) to which the constraint applies\n outcome_names : str, list of str\n name(s) of the outcome(s) to which the constraint applies\n function : callable\n The function should return the distance from the feasibility\n threshold, given the model outputs with a variable name. The\n distance should be 0 if the constraint is met.\n\n '''\n\n def __init__(self, name, parameter_names=None, outcome_names=None,\n function=None):\n assert callable(function)\n if not parameter_names:\n parameter_names = []\n elif isinstance(parameter_names, six.string_types):\n parameter_names = [parameter_names]\n\n if not outcome_names:\n outcome_names = []\n elif isinstance(outcome_names, six.string_types):\n outcome_names = [outcome_names]\n\n variable_names = parameter_names + outcome_names\n\n super(Constraint, self).__init__(name, kind=AbstractOutcome.INFO,\n variable_name=variable_names,\n function=function)\n\n self.parameter_names = parameter_names\n self.outcome_names = outcome_names\n\n def process(self, values):\n value = super(Constraint, self).process(values)\n assert value >= 0\n return value\n\n\ndef create_outcomes(outcomes, **kwargs):\n '''Helper function for creating multiple outcomes\n\n Parameters\n ----------\n outcomes : DataFrame, or something convertable to a DataFrame\n in case of string, the string will be passed\n\n Returns\n -------\n list\n\n '''\n\n if isinstance(outcomes, six.string_types):\n outcomes = pandas.read_csv(outcomes, **kwargs)\n elif not isinstance(outcomes, pandas.DataFrame):\n outcomes = pandas.DataFrame.from_dict(outcomes)\n\n for entry in ['name', 'type']:\n if entry not in outcomes.columns:\n raise ValueError('no {} column in dataframe'.format(entry))\n\n temp_outcomes = []\n for _, row in outcomes.iterrows():\n name = row['name']\n kind = row['type']\n\n if kind == 'scalar':\n outcome = ScalarOutcome(name)\n elif kind == 'timeseries':\n outcome = TimeSeriesOutcome(name)\n else:\n raise ValueError('unknown type for ' + name)\n temp_outcomes.append(outcome)\n return temp_outcomes\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
JiwonCocoder/-Joint-Learning-of-Feature-Extraction-and-Cost-Aggregation-for-Semantic-Matching | [
"b79e0e20fd5a1a9ddc0ffa9d7a92e0ebd21018b9",
"b79e0e20fd5a1a9ddc0ffa9d7a92e0ebd21018b9",
"b79e0e20fd5a1a9ddc0ffa9d7a92e0ebd21018b9",
"b79e0e20fd5a1a9ddc0ffa9d7a92e0ebd21018b9"
] | [
"trash/train_ncnet_adap_pixelCT1.py",
"trash/train_pixelCT_both_negative_normalized_WTA01.py",
"trash/train_ncnet_adap_pixelCT_L2norm2.py",
"lib/model_eval/model_eval_ncnet_adap.py"
] | [
"from __future__ import print_function, division\nimport os\nimport numpy as np\nimport numpy.random\nimport datetime\nimport torch\nimport torch.optim as optim\nfrom torch.nn.functional import relu\n\nfrom lib.dataloader import DataLoader # modified dataloader\nfrom lib.model_train.model_pixelCT_ncnet_adap import ImMatchNet\nfrom lib.matching_model import unNormMap1D_to_NormMap2D\nfrom lib.showPlot import plot_test_map, plot_test_flow, warpImg_fromMap, warpImg_fromMap2, matplotlib_imshow, return_plot_test_map, get_img_from_fig\n\nfrom lib.im_pair_dataset import ImagePairDataset\nfrom lib.normalization import NormalizeImageDict\nfrom lib.torch_util import save_checkpoint\nfrom lib.torch_util import BatchTensorToVars\nimport torch.nn.functional as F\nimport argparse\nfrom tensorboardX import SummaryWriter\nimport random\nuse_cuda = torch.cuda.is_available()\nprint(\"use_cuda:\",use_cuda)\nGPU_NUM = 2\ndevice = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')\n\nprint('Available devices', torch.cuda.device_count())\nprint('Current cuda device', torch.cuda.current_device())\nprint(torch.cuda.get_device_name(device))\ntorch.cuda.set_device(device)\nprint('Current cuda device', torch.cuda.current_device())\ntorch.manual_seed(1)\nif use_cuda:\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\n\nprint('ImMatchNet training script')\n\n# Argument parsing\nparser = argparse.ArgumentParser(description='Compute PF Pascal matches')\nparser.add_argument('--checkpoint', type=str, default='')\nparser.add_argument('--image_size', type=int, default=400)\nparser.add_argument('--dataset_image_path', type=str, default='datasets/pf-pascal/', help='path to PF Pascal dataset')\nparser.add_argument('--dataset_csv_path', type=str, default='datasets/pf-pascal/image_pairs/',\n help='path to PF Pascal training csv')\nparser.add_argument('--num_epochs', type=int, default=5, help='number of training epochs')\nparser.add_argument('--batch_size', type=int, default=16, help='training batch size')\nparser.add_argument('--lr', type=float, default=0.00005, help='learning rate')\nparser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5, 5, 5],\n help='kernels sizes in neigh. cons.')\nparser.add_argument('--ncons_channels', nargs='+', type=int, default=[16, 16, 1], help='channels in neigh. cons')\n# parser.add_argument('--result_model_fn', type=str, default='checkpoint_pixelCT_ncnet_adap_lr55_temp007_1', help='trained model filename')\nparser.add_argument('--result_model_fn', type=str, default='checkpoint_pixelCT_ncnet_adap_lr55_temp10_noNorm', help='trained model filename')\nparser.add_argument('--result-model-dir', type=str, default='trained_models', help='path to trained models folder')\nparser.add_argument('--fe_finetune_params', type=int, default=0, help='number of layers to finetune')\n# parser.add_argument('--temperature', type=float, default=0.07, help='pixelCT_temperature')\nparser.add_argument('--temperature', type=float, default=10, help='pixelCT_temperature')\n\nargs = parser.parse_args()\nprint(args)\n# save_path = 'trainLog_pixelCT_ncnet_adap_lr54_temp007_1'\nsave_path = 'trainLog_pixelCT_ncnet_adap_lr55_temp10_noNorm'\nif not os.path.isdir(save_path):\n os.mkdir(save_path)\nwriter = SummaryWriter(os.path.join(save_path, ''))\n# Create model\nprint('Creating CNN model...')\ntorch.manual_seed(1)\nif use_cuda:\n print('torch cuda manual seed used =========================')\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\n\nrandom.seed(1)\nmodel = ImMatchNet(use_cuda=use_cuda,\n checkpoint=args.checkpoint,\n ncons_kernel_sizes=args.ncons_kernel_sizes,\n ncons_channels=args.ncons_channels)\ntorch.manual_seed(1)\nif use_cuda:\n print('torch cuda manual seed used =========================')\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\n\nrandom.seed(1)\n# Set which parts of the model to train\nif args.fe_finetune_params > 0:\n for i in range(args.fe_finetune_params):\n for p in model.FeatureExtraction.model[-1][-(i + 1)].parameters():\n p.requires_grad = True\n\nprint('Trainable parameters:')\nfor i, p in enumerate(filter(lambda p: p.requires_grad, model.parameters())):\n print(str(i + 1) + \": \" + str(p.shape))\n\n# Optimizer\nprint('using Adam optimizer')\noptimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)\n\ncnn_image_size = (args.image_size, args.image_size)\n\nDataset = ImagePairDataset\ntrain_csv = 'train_pairs.csv'\ntest_csv = 'val_pairs.csv'\nnormalization_tnf = NormalizeImageDict(['source_image', 'target_image'])\nbatch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda)\n\n# Dataset and dataloader\ndataset = Dataset(transform=normalization_tnf,\n dataset_image_path=args.dataset_image_path,\n dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=train_csv,\n output_size=cnn_image_size)\n\ndataloader = DataLoader(dataset, batch_size=args.batch_size,\n shuffle=True,\n num_workers=0)\n\ndataset_test = Dataset(transform=normalization_tnf,\n dataset_image_path=args.dataset_image_path,\n dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=test_csv,\n output_size=cnn_image_size)\n\ndataloader_test = DataLoader(dataset_test, batch_size=args.batch_size,\n shuffle=True, num_workers=4)\n\n\n# Train\nbest_test_loss = float(\"inf\")\ndef featureL2Norm(feature):\n epsilon = 1e-6\n norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5).unsqueeze(1).expand_as(feature)\n return torch.div(feature, norm)\ndef writer_grad_flow(named_parameters, writer, writer_position):\n ave_grads = []\n layers = []\n for n, p in named_parameters:\n if(p.requires_grad) and (\"bias\" not in n):\n if p.grad is None:\n continue\n print(n, \"p.grad is None\")\n writer.add_scalar('gradient_flow/{}'.format(n), p.grad.abs().mean().data.cpu().numpy(), writer_position)\n\ndef calc_pixelCT(nc_A_Bvec, index_NET, temperature):\n batch_size, _, feature_size, feature_size = nc_A_Bvec.size() #(B, B_S * B_S, A_S, A_S)\n nc_BSS = nc_A_Bvec.contiguous().view(batch_size * feature_size * feature_size, feature_size * feature_size)\n nc_BSS_numpy = nc_BSS.detach().cpu().numpy()\n index1D_NET = index_NET.view(batch_size * feature_size * feature_size, 1)\n index1D_NET_numpy = index1D_NET.detach().cpu().numpy()\n # (B * tgt_s * tgt_s, src_s * src_s)\n mask_pixelCT = torch.zeros(batch_size * feature_size * feature_size, feature_size * feature_size).bool()\n\n mask_pixelCT[torch.arange(batch_size * feature_size * feature_size), index1D_NET.detach().squeeze(1)] = True\n mask_pixelCT_numpy = mask_pixelCT.detach().cpu().numpy()\n # positive = scores_WTA_B.view(batch_size * feature_size * feature_size, -1)\n positive = nc_BSS[mask_pixelCT].view(batch_size * feature_size * feature_size, -1)\n print(\"-------------calc----------------------\")\n print(\"index_NET\", index1D_NET[15*25 + 15])\n print(\"nc_BSS_score\", nc_BSS[15*25 + 15, index1D_NET[15*25 + 15]])\n print(\"positive\", positive[15*25 + 15])\n print(\"-----------------------------------\")\n\n positive_numpy = positive.detach().cpu().numpy()\n negative = nc_BSS[~mask_pixelCT].view(batch_size * feature_size * feature_size, -1)\n negative_numpy = negative.detach().cpu().numpy()\n eps_temp = 1e-6\n logits = torch.cat([positive, negative], dim=1)\n logits = (logits / temperature) + eps_temp\n labels = torch.zeros(batch_size * feature_size * feature_size, device=device, dtype=torch.int64)\n loss_pixelCT = F.cross_entropy(logits, labels, reduction='sum')\n loss_pixelCT = loss_pixelCT / (batch_size * feature_size * feature_size)\n return loss_pixelCT\n\n\ndef weak_loss(model, batch, writer_position, mode, normalization='softmax', alpha=30):\n if normalization is None:\n normalize = lambda x: x\n elif normalization == 'softmax':\n normalize = lambda x: torch.nn.functional.softmax(x, 1)\n elif normalization == 'l1':\n normalize = lambda x: x / (torch.sum(x, dim=1, keepdim=True) + 0.0001)\n\n b = batch['source_image'].size(0)\n # positive\n # corr4d = model({'source_image':batch['source_image'], 'target_image':batch['target_image']})\n corr4d = model(batch, writer, writer_position, mode, label='pos')\n\n batch_size = corr4d.size(0)\n feature_size = corr4d.size(2)\n\n nc_B_Avec = corr4d.view(batch_size, feature_size * feature_size, feature_size,\n feature_size) # [batch_idx,k_A,i_B,j_B]\n nc_A_Bvec = corr4d.view(batch_size, feature_size, feature_size, feature_size * feature_size).permute(0, 3, 1, 2) #\n # nc_A_Bvec = featureL2Norm(nc_A_Bvec)\n nc_B_Avec_norm = normalize(nc_B_Avec)\n scores_B, index_B = torch.max(nc_B_Avec_norm, dim=1)\n\n #check\n # print(\"before_L2norm(NET)\",nc_B_Avec[0, int(index_B[0, 15, 15]), 15, 15])\n # nc_B_Avec = featureL2Norm(nc_B_Avec)\n # print(\"after_L2norm(NET)\",nc_B_Avec[0, int(index_B[0, 15, 15]), 15, 15])\n \n\n # print(\"------------------pos-----------------\")\n # print(\"score\", scores_B[0, 15, 15])\n # print(\"index_NET_B\", index_B[0, 15, 15])\n loss_pixelCT_NET_B_Avec_by_NET_pos = calc_pixelCT(nc_A_Bvec, index_B, args.temperature)\n\n\n # negative\n batch['source_image'] = batch['source_image'][np.roll(np.arange(b), -1), :] # roll\n corr4d = model(batch, writer, writer_position, mode, label = 'neg')\n # corr4d = model({'source_image':batch['source_image'], 'target_image':batch['negative_image']})\n\n batch_size = corr4d.size(0)\n feature_size = corr4d.size(2)\n nc_B_Avec = corr4d.view(batch_size, feature_size * feature_size, feature_size,\n feature_size) # [batch_idx,k_A,i_B,j_B]\n nc_A_Bvec = corr4d.view(batch_size, feature_size, feature_size, feature_size * feature_size).permute(0, 3, 1, 2) #\n # nc_A_Bvec = featureL2Norm(nc_A_Bvec)\n nc_B_Avec_norm = normalize(nc_B_Avec)\n scores_B, index_B = torch.max(nc_B_Avec_norm, dim=1)\n\n # print(\"score\", scores_B[0, 15, 15])\n # print(\"index_NET_B\", index_B[0, 15, 15])\n loss_pixelCT_NET_B_Avec_by_NET_neg = calc_pixelCT(nc_A_Bvec, index_B, args.temperature)\n\n # loss\n\n return loss_pixelCT_NET_B_Avec_by_NET_pos, loss_pixelCT_NET_B_Avec_by_NET_neg\n\n\nloss_fn = lambda model, batch, writer_position, mode : weak_loss(model, batch, writer_position, mode, normalization='softmax')\n\n\n# define epoch function\ndef process_epoch(mode, epoch, model, loss_fn, optimizer, dataloader, batch_preprocessing_fn, writer, use_cuda=True,\n log_interval=50):\n epoch_loss = 0\n for batch_idx, batch in enumerate(dataloader):\n if mode == 'train':\n optimizer.zero_grad()\n writer_position = (epoch -1) * len(dataloader) + batch_idx\n tnf_batch = batch_preprocessing_fn(batch)\n loss_pixelCT_NET_B_Avec_by_NET_pos, loss_pixelCT_NET_B_Avec_by_NET_neg = loss_fn(model, tnf_batch, writer_position, mode)\n loss = loss_pixelCT_NET_B_Avec_by_NET_pos - loss_pixelCT_NET_B_Avec_by_NET_neg\n loss_np = loss.data.cpu().numpy()\n if writer_position % 9 == 0:\n writer.add_scalar('Loss_{}/loss_pixelCT_NET_B_Avec_by_NET_pos'.format(mode), loss_pixelCT_NET_B_Avec_by_NET_pos, writer_position)\n writer.add_scalar('Loss_{}/loss_pixelCT_NET_B_Avec_by_NET_neg'.format(mode), loss_pixelCT_NET_B_Avec_by_NET_neg,\n writer_position)\n writer.add_scalar('Loss_{}/loss_nc'.format(mode), loss, writer_position)\n epoch_loss += loss_np\n if mode == 'train':\n loss.backward()\n writer_grad_flow(model.named_parameters(), writer,writer_position)\n optimizer.step()\n else:\n loss = None\n if batch_idx % log_interval == 0:\n print(mode.capitalize() + ' Epoch: {} [{}/{} ({:.0f}%)]\\t\\tLoss: {:.6f}'.format(\n epoch, batch_idx, len(dataloader),\n 100. * batch_idx / len(dataloader), loss_np))\n epoch_loss /= len(dataloader)\n print(mode.capitalize() + ' set: Average loss: {:.4f}'.format(epoch_loss))\n return epoch_loss\n\n\ntrain_loss = np.zeros(args.num_epochs)\ntest_loss = np.zeros(args.num_epochs)\n\nprint('Starting training...')\n\nmodel.FeatureExtraction.eval()\n\nfor epoch in range(1, args.num_epochs + 1):\n train_loss[epoch - 1] = process_epoch('train', epoch, model, loss_fn, optimizer, dataloader, batch_preprocessing_fn,\n writer, log_interval=1)\n test_loss[epoch - 1] = process_epoch('test', epoch, model, loss_fn, optimizer, dataloader_test,\n batch_preprocessing_fn, writer, log_interval=1)\n\n # remember best loss\n is_best = test_loss[epoch - 1] < best_test_loss\n best_test_loss = min(test_loss[epoch - 1], best_test_loss)\n checkpoint_name = os.path.join(args.result_model_dir,\n datetime.datetime.now().strftime(\n \"%Y-%m-%d_%H:%M\") + '_epoch_' + str(\n epoch) + '_' + args.result_model_fn + '.pth.tar')\n\n print('Checkpoint name: ' + checkpoint_name)\n save_checkpoint({\n 'epoch': epoch,\n 'args': args,\n 'state_dict': model.state_dict(),\n 'best_test_loss': best_test_loss,\n 'optimizer': optimizer.state_dict(),\n 'train_loss': train_loss,\n 'test_loss': test_loss,\n }, is_best, checkpoint_name)\n\nprint('Done!')\n",
"from __future__ import print_function, division\nimport os\nimport numpy as np\nimport numpy.random\nimport datetime\nimport torch\nimport torch.optim as optim\nfrom torch.nn.functional import relu\nfrom lib.dataloader import DataLoader # modified dataloader\nfrom lib.model_train.model_pixelCT_both_normalized import ImMatchNet\nfrom lib.im_pair_dataset import ImagePairDataset\nfrom lib.normalization import NormalizeImageDict\nfrom lib.torch_util import save_checkpoint\nfrom lib.torch_util import BatchTensorToVars\nimport argparse\nfrom lib.matching_model import EPE\nfrom tensorboardX import SummaryWriter\nimport matplotlib.pyplot as plt\nimport random\nfrom lib.matching_model import multiscaleEPE\nimport torch.nn.functional as F\n\n\n# Seed and CUDA\nuse_cuda = torch.cuda.is_available()\ntorch.cuda.manual_seed_all(1) # if use multi-GPU\nos.environ['PYTHONHASHSEED'] = str(1)\n\nprint(\"use_cuda:\",use_cuda)\nGPU_NUM = 1\ndevice = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')\nprint('Available devices', torch.cuda.device_count())\nprint('Current cuda device', torch.cuda.current_device())\nprint(torch.cuda.get_device_name(device))\ntorch.cuda.set_device(device)\nprint('Current cuda device', torch.cuda.current_device())\n\nprint('ImMatchNet training script')\n\n# Argument parsing\nparser = argparse.ArgumentParser(description='Compute PF Pascal matches')\nparser.add_argument('--checkpoint', type=str, default='')\nparser.add_argument('--image_size', type=int, default=400)\nparser.add_argument('--dataset_image_path', type=str, default='datasets/pf-pascal/', help='path to PF Pascal dataset')\nparser.add_argument('--dataset_csv_path', type=str, default='datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')\nparser.add_argument('--num_epochs', type=int, default=5, help='number of training epochs')\nparser.add_argument('--batch_size', type=int, default=16, help='training batch size')\nparser.add_argument('--lr', type=float, default=0.0000001, help='learning rate')\nparser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,5,5], help='kernels sizes in neigh. cons.')\nparser.add_argument('--ncons_channels', nargs='+', type=int, default=[16,16,1], help='channels in neigh. cons')\nparser.add_argument('--result_model_fn', type=str, default='checkpoint_pixelCT_both_neg_fixed_lr7_WTA01', help='trained model filename')\nparser.add_argument('--result-model-dir', type=str, default='trained_models', help='path to trained models folder')\nparser.add_argument('--fe_finetune_params', type=int, default=0, help='number of layers to finetune')\nparser.add_argument('--temperature', type=float, default=0.03, help='pixelCT_temperature')\nparser.add_argument('--threshold', type=float, default=0.4, help='pixelCT_threshold')\n\ndef calc_pixelCT_mask(nc_vec, index_NET, mask, temperature):\n batch_size, _, feature_size, feature_size = nc_vec.size()\n nc_BSS = nc_vec.contiguous().view(batch_size * feature_size * feature_size, feature_size * feature_size)\n nc_BSS_numpy = nc_BSS.detach().cpu().numpy()\n index1D_NET = index_NET.view(batch_size * feature_size * feature_size, 1)\n index1D_NET_numpy = index1D_NET.detach().cpu().numpy()\n #(B * tgt_s * tgt_s, src_s * src_s)\n mask_pixelCT = torch.zeros(batch_size * feature_size * feature_size, feature_size * feature_size).bool()\n\n mask_pixelCT[torch.arange(batch_size * feature_size * feature_size), index1D_NET.detach().squeeze(1)] = True\n mask_pixelCT_numpy = mask_pixelCT.detach().cpu().numpy()\n # positive = scores_WTA_B.view(batch_size * feature_size * feature_size, -1)\n\n positive = nc_BSS[mask_pixelCT].view(batch_size * feature_size * feature_size, -1)\n positive_numpy = positive.detach().cpu().numpy()\n negative = nc_BSS[~mask_pixelCT].view(batch_size * feature_size * feature_size, -1)\n negative_numpy = negative.detach().cpu().numpy()\n\n mask1D = torch.zeros(batch_size * feature_size * feature_size, 1).bool()\n mask_label = mask.view(-1, 1).bool()\n mask_label_numpy = mask_label.detach().cpu().numpy()\n mask1D[mask_label] = True\n mask1D_numpy = mask1D.detach().cpu().numpy()\n positive= positive[mask1D.squeeze(1), :]\n positive_numpy2 = positive.detach().cpu().numpy()\n negative = negative[mask1D.squeeze(1), :]\n negative_numpy2 = negative.detach().cpu().numpy()\n masked_logits = torch.cat([positive, negative], dim=1)\n\n\n eps_temp = 1e-6\n masked_logits = masked_logits / (temperature + eps_temp)\n src_num_fgnd = mask.sum(dim=3, keepdim=True).sum(dim=2, keepdim=True).sum(dim=0, keepdim=True)\n src_num_fgnd_label = src_num_fgnd.item()\n labels = torch.zeros(int(src_num_fgnd_label), device=device, dtype=torch.int64)\n\n loss_pixelCT = F.cross_entropy(masked_logits, labels, reduction='sum')\n loss_pixelCT = (loss_pixelCT / src_num_fgnd).sum()\n return loss_pixelCT\ndef writer_grad_flow(named_parameters, writer, writer_position):\n ave_grads = []\n layers = []\n for n, p in named_parameters:\n if(p.requires_grad) and (\"bias\" not in n):\n if p.grad is None:\n continue\n print(n, \"p.grad is None\")\n writer.add_scalar('gradient_flow/{}'.format(n), p.grad.abs().mean().data.cpu().numpy(), writer_position)\n\ndef plot_grad_flow(named_parameters):\n ave_grads = []\n layers = []\n for n, p in named_parameters:\n if(p.requires_grad) and (\"bias\" not in n):\n if p.grad is None:\n continue\n print(n)\n\n print(n)\n layers.append(n)\n ave_grads.append(p.grad.abs().mean())\n print(p.grad.abs().mean())\n plt.plot(ave_grads, alpha=0.3, color=\"b\")\n plt.hlines(0, 0, len(ave_grads)+1, linewidth=1, color=\"k\" )\n plt.xticks(range(0,len(ave_grads), 1), layers, rotation=\"vertical\")\n plt.xlim(xmin=0, xmax=len(ave_grads))\n plt.xlabel(\"Layers\")\n plt.ylabel(\"average gradient\")\n plt.title(\"Gradient flow\")\n plt.grid(True)\nargs = parser.parse_args()\nprint(args)\nsave_path = 'trainLog_pixelCT_both_neg_fixed_lr7_WTA01'\nif not os.path.isdir(save_path):\n os.mkdir(save_path)\nwriter = SummaryWriter(os.path.join(save_path, ''))\n\n# Create model\nprint('Creating CNN model...')\nmodel = ImMatchNet(use_cuda=use_cuda,\n\t\t\t\t checkpoint=args.checkpoint,\n ncons_kernel_sizes=args.ncons_kernel_sizes,\n ncons_channels=args.ncons_channels,\n threshold = args.threshold)\ntorch.manual_seed(1)\nif use_cuda:\n print('torch cuda manual seed used =========================')\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\n\nrandom.seed(1)\n# Set which parts of the model to train\nif args.fe_finetune_params>0:\n for i in range(args.fe_finetune_params):\n for p in model.FeatureExtraction.model[-1][-(i+1)].parameters():\n p.requires_grad=True\n\nprint('Trainable parameters:')\nfor i,p in enumerate(filter(lambda p: p.requires_grad, model.parameters())):\n print(str(i+1)+\": \"+str(p.shape))\nfor i, p in model.named_parameters():\n # print(str(i + 1) + \": \" + str(p.shape))\n writer.add_histogram(i, p.clone().cpu().data.numpy(), 0)\n# Optimizer\nprint('using Adam optimizer')\noptimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)\n\ncnn_image_size=(args.image_size,args.image_size)\n\nDataset = ImagePairDataset\ntrain_csv = 'train_pairs.csv'\ntest_csv = 'val_pairs.csv'\nnormalization_tnf = NormalizeImageDict(['source_image','target_image'])\nbatch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda)\n\n# Dataset and dataloader\ndataset = Dataset(transform=normalization_tnf,\n\t dataset_image_path=args.dataset_image_path,\n\t dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file = train_csv,\n output_size=cnn_image_size)\nprint(args.batch_size)\ndataloader = DataLoader(dataset, batch_size=args.batch_size,\n shuffle=True,\n num_workers=0)\n\ndataset_test = Dataset(transform=normalization_tnf,\n\t dataset_image_path=args.dataset_image_path,\n\t dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=test_csv,\n output_size=cnn_image_size)\n\ndataloader_test = DataLoader(dataset_test, batch_size=args.batch_size,\n shuffle=True, num_workers=4)\n\n\n\n# Train\nbest_test_loss = float(\"inf\")\n\n\n\ndef weak_loss(model,batch,writer_position, normalization='softmax',alpha=30):\n if normalization is None:\n normalize = lambda x: x\n elif normalization=='softmax':\n normalize = lambda x: torch.nn.functional.softmax(x,1)\n elif normalization=='l1':\n normalize = lambda x: x/(torch.sum(x,dim=1,keepdim=True)+0.0001)\n\n b = batch['source_image'].size(0)\n # positive\n #corr4d = model({'source_image':batch['source_image'], 'target_image':batch['target_image']})\n corr_WTA , corr_NET, mask_B_Avec, masked_index_B_Avec = model(batch, writer, writer_position)\n\n batch_size = corr_WTA.size(0)\n feature_size = corr_WTA.size(2)\n nc_WTA_B_Avec = corr_WTA.view(batch_size, feature_size * feature_size, feature_size,\n feature_size) # [batch_idx,k_A,i_B,j_B]\n nc_NET_B_Avec = corr_NET.view(batch_size, feature_size * feature_size, feature_size,\n feature_size)\n\n\n #PixelCT\n loss_pixelCT_WTA_B_Avec_pos = calc_pixelCT_mask(nc_WTA_B_Avec, masked_index_B_Avec, mask_B_Avec, args.temperature)\n loss_pixelCT_NET_B_Avec_pos = calc_pixelCT_mask(nc_NET_B_Avec, masked_index_B_Avec, mask_B_Avec, args.temperature)\n score_pos_pixelCT = loss_pixelCT_WTA_B_Avec_pos + loss_pixelCT_NET_B_Avec_pos\n loss_pixelCT_WTA_B_Avec_pos = loss_pixelCT_WTA_B_Avec_pos * 0.1\n #neg\n batch['source_image'] = batch['source_image'][np.roll(np.arange(b), -1), :] # roll\n corr_WTA , corr_NET, mask_B_Avec, masked_index_B_Avec = model(batch, writer, writer_position)\n\n batch_size = corr_WTA.size(0)\n feature_size = corr_WTA.size(2)\n nc_WTA_B_Avec = corr_WTA.view(batch_size, feature_size * feature_size, feature_size,\n feature_size) # [batch_idx,k_A,i_B,j_B]\n nc_NET_B_Avec = corr_NET.view(batch_size, feature_size * feature_size, feature_size,\n feature_size)\n\n\n #PixelCT\n loss_pixelCT_WTA_B_Avec_neg = calc_pixelCT_mask(nc_WTA_B_Avec, masked_index_B_Avec, mask_B_Avec, args.temperature)\n loss_pixelCT_NET_B_Avec_neg = calc_pixelCT_mask(nc_NET_B_Avec, masked_index_B_Avec, mask_B_Avec, args.temperature)\n loss_pixelCT_WTA_B_Avec_neg = loss_pixelCT_WTA_B_Avec_neg * 0.1\n return loss_pixelCT_WTA_B_Avec_pos, loss_pixelCT_NET_B_Avec_pos, loss_pixelCT_WTA_B_Avec_neg, loss_pixelCT_NET_B_Avec_neg\nloss_fn = lambda model,batch, writer_position : weak_loss(model,batch,writer_position, normalization='softmax')\n\n# define epoch function\ndef process_epoch(mode,epoch,model,loss_fn,optimizer,dataloader,batch_preprocessing_fn, writer, use_cuda=True,log_interval=50):\n epoch_loss = 0\n for batch_idx, batch in enumerate(dataloader):\n if mode=='train':\n optimizer.zero_grad()\n writer_position = (epoch -1) * len(dataloader) + batch_idx\n tnf_batch = batch_preprocessing_fn(batch)\n # loss_nc, loss_flow, loss_contrastive = loss_fn(model,tnf_batch, batch_idx)\n loss_pixelCT_WTA_B_Avec_pos, loss_pixelCT_NET_B_Avec_pos, loss_pixelCT_WTA_B_Avec_neg, loss_pixelCT_NET_B_Avec_neg = loss_fn(model, tnf_batch, writer_position)\n score_pos_pixelCT = loss_pixelCT_WTA_B_Avec_pos + loss_pixelCT_NET_B_Avec_pos\n score_neg_pixelCT = loss_pixelCT_WTA_B_Avec_neg + loss_pixelCT_NET_B_Avec_neg\n writer.add_scalar('Loss_{}/loss_pixelCT_WTA_B_Avec_pos01'.format(mode), loss_pixelCT_WTA_B_Avec_pos, writer_position)\n writer.add_scalar('Loss_{}/loss_pixelCT_NET_B_Avec_pos'.format(mode), loss_pixelCT_NET_B_Avec_pos, writer_position)\n writer.add_scalar('Loss_{}/loss_pixelCT_WTA_B_Avec_neg01'.format(mode), loss_pixelCT_WTA_B_Avec_neg, writer_position)\n writer.add_scalar('Loss_{}/loss_pixelCT_NET_B_Avec_neg'.format(mode), loss_pixelCT_NET_B_Avec_neg, writer_position)\n writer.add_scalar('Loss_{}/score_pos_pixelCT'.format(mode), score_pos_pixelCT, writer_position)\n writer.add_scalar('Loss_{}/score_neg_pixelCT'.format(mode), score_neg_pixelCT, writer_position)\n\n print(\"score_pos_pixelCT:\"+ str(score_pos_pixelCT.data.cpu().numpy()))\n print(\"score_neg_pixelCT:\" + str(score_neg_pixelCT.data.cpu().numpy()))\n loss = score_pos_pixelCT - score_neg_pixelCT\n writer.add_scalar('Loss_total/score_both_pixelCT', loss, writer_position)\n loss_np = loss.data.cpu().numpy()\n epoch_loss += loss_np\n # save_checkpoint({\n # 'epoch': epoch,\n # 'args': args,\n # 'state_dict': model.state_dict(),\n # 'best_test_loss': best_test_loss,\n # 'optimizer': optimizer.state_dict(),\n # 'train_loss': epoch_loss,\n # 'test_loss': epoch_loss,\n # }, False, os.path.join('./trained_models/',\n # 'baseline_feature_extraction' + '.pth.tar'))\n # print(\"baseline!!\")\n\n if mode=='train':\n loss.backward()\n writer_grad_flow(model.named_parameters(), writer,writer_position)\n # plot_grad_flow(model.named_parameters())\n if writer_position % 100 == 0:\n for i,p in model.named_parameters():\n if(p.requires_grad) and (\"bias\" not in i):\n writer.add_histogram(i, p.clone().cpu().data.numpy(), writer_position)\n optimizer.step()\n else:\n loss=None\n if batch_idx % log_interval == 0:\n print(mode.capitalize()+' Epoch: {} [{}/{} ({:.0f}%)]\\t\\tLoss: {:.6f}'.format(epoch, batch_idx , len(dataloader), 100. * batch_idx / len(dataloader), loss_np))\n epoch_loss /= len(dataloader)\n print(mode.capitalize()+' set: Average loss: {:.4f}'.format(epoch_loss))\n return epoch_loss\n\ntrain_loss = np.zeros(args.num_epochs)\ntest_loss = np.zeros(args.num_epochs)\n\nprint('Starting training...')\n\nmodel.FeatureExtraction.eval()\n\nfor epoch in range(1, args.num_epochs+1):\n train_loss[epoch-1] = process_epoch('train',epoch,model,loss_fn,optimizer,dataloader,batch_preprocessing_fn,writer, log_interval=1)\n test_loss[epoch-1] = process_epoch('test',epoch,model,loss_fn,optimizer,dataloader_test,batch_preprocessing_fn,writer, log_interval=1)\n\n # remember best loss\n is_best = test_loss[epoch-1] < best_test_loss\n best_test_loss = min(test_loss[epoch-1], best_test_loss)\n # Define checkpoint name\n checkpoint_name = os.path.join(args.result_model_dir,\n datetime.datetime.now().strftime(\n \"%Y-%m-%d_%H:%M\") + '_epoch_' + str(epoch) + '_' + args.result_model_fn + '.pth.tar')\n\n print('Checkpoint name: ' + checkpoint_name)\n save_checkpoint({\n 'epoch': epoch,\n 'args': args,\n 'state_dict': model.state_dict(),\n 'best_test_loss': best_test_loss,\n 'optimizer' : optimizer.state_dict(),\n 'train_loss': train_loss,\n 'test_loss': test_loss,\n }, is_best,checkpoint_name)\n\nprint('Done!')\n",
"from __future__ import print_function, division\nimport os\nimport numpy as np\nimport numpy.random\nimport datetime\nimport torch\nimport torch.optim as optim\nfrom torch.nn.functional import relu\n\nfrom lib.dataloader import DataLoader # modified dataloader\nfrom lib.model_train.model_pixelCT_ncnet_adap import ImMatchNet\nfrom lib.matching_model import unNormMap1D_to_NormMap2D\nfrom lib.showPlot import plot_test_map, plot_test_flow, warpImg_fromMap, warpImg_fromMap2, matplotlib_imshow, return_plot_test_map, get_img_from_fig\n\nfrom lib.im_pair_dataset import ImagePairDataset\nfrom lib.normalization import NormalizeImageDict\nfrom lib.torch_util import save_checkpoint\nfrom lib.torch_util import BatchTensorToVars\nimport torch.nn.functional as F\nimport argparse\nfrom tensorboardX import SummaryWriter\nimport random\nuse_cuda = torch.cuda.is_available()\nprint(\"use_cuda:\",use_cuda)\nGPU_NUM = 1\ndevice = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')\n\nprint('Available devices', torch.cuda.device_count())\nprint('Current cuda device', torch.cuda.current_device())\nprint(torch.cuda.get_device_name(device))\ntorch.cuda.set_device(device)\nprint('Current cuda device', torch.cuda.current_device())\ntorch.manual_seed(1)\nif use_cuda:\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\n\nprint('ImMatchNet training script')\n\n# Argument parsing\nparser = argparse.ArgumentParser(description='Compute PF Pascal matches')\nparser.add_argument('--checkpoint', type=str, default='')\nparser.add_argument('--image_size', type=int, default=400)\nparser.add_argument('--dataset_image_path', type=str, default='datasets/pf-pascal/', help='path to PF Pascal dataset')\nparser.add_argument('--dataset_csv_path', type=str, default='datasets/pf-pascal/image_pairs/',\n help='path to PF Pascal training csv')\nparser.add_argument('--num_epochs', type=int, default=5, help='number of training epochs')\nparser.add_argument('--batch_size', type=int, default=16, help='training batch size')\nparser.add_argument('--lr', type=float, default=0.00005, help='learning rate')\nparser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5, 5, 5],\n help='kernels sizes in neigh. cons.')\nparser.add_argument('--ncons_channels', nargs='+', type=int, default=[16, 16, 1], help='channels in neigh. cons')\nparser.add_argument('--result_model_fn', type=str, default='checkpoint_pixelCT_ncnet_adap_lr55_temp1_L2norm', help='trained model filename')\nparser.add_argument('--result-model-dir', type=str, default='trained_models', help='path to trained models folder')\nparser.add_argument('--fe_finetune_params', type=int, default=0, help='number of layers to finetune')\nparser.add_argument('--temperature', type=float, default=1, help='pixelCT_temperature')\n\nargs = parser.parse_args()\nprint(args)\nsave_path = 'trainLog_pixelCT_ncnet_adap_lr55_temp1_L2norm'\nif not os.path.isdir(save_path):\n os.mkdir(save_path)\nwriter = SummaryWriter(os.path.join(save_path, ''))\n# Create model\nprint('Creating CNN model...')\ntorch.manual_seed(1)\nif use_cuda:\n print('torch cuda manual seed used =========================')\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\n\nrandom.seed(1)\nmodel = ImMatchNet(use_cuda=use_cuda,\n checkpoint=args.checkpoint,\n ncons_kernel_sizes=args.ncons_kernel_sizes,\n ncons_channels=args.ncons_channels)\ntorch.manual_seed(1)\nif use_cuda:\n print('torch cuda manual seed used =========================')\n torch.cuda.manual_seed(1)\nnp.random.seed(1)\n\nrandom.seed(1)\n# Set which parts of the model to train\nif args.fe_finetune_params > 0:\n for i in range(args.fe_finetune_params):\n for p in model.FeatureExtraction.model[-1][-(i + 1)].parameters():\n p.requires_grad = True\n\nprint('Trainable parameters:')\nfor i, p in enumerate(filter(lambda p: p.requires_grad, model.parameters())):\n print(str(i + 1) + \": \" + str(p.shape))\n\n# Optimizer\nprint('using Adam optimizer')\noptimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)\n\ncnn_image_size = (args.image_size, args.image_size)\n\nDataset = ImagePairDataset\ntrain_csv = 'train_pairs.csv'\ntest_csv = 'val_pairs.csv'\nnormalization_tnf = NormalizeImageDict(['source_image', 'target_image'])\nbatch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda)\n\n# Dataset and dataloader\ndataset = Dataset(transform=normalization_tnf,\n dataset_image_path=args.dataset_image_path,\n dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=train_csv,\n output_size=cnn_image_size)\n\ndataloader = DataLoader(dataset, batch_size=args.batch_size,\n shuffle=True,\n num_workers=0)\n\ndataset_test = Dataset(transform=normalization_tnf,\n dataset_image_path=args.dataset_image_path,\n dataset_csv_path=args.dataset_csv_path,\n dataset_csv_file=test_csv,\n output_size=cnn_image_size)\n\ndataloader_test = DataLoader(dataset_test, batch_size=args.batch_size,\n shuffle=True, num_workers=4)\n\n\n# Train\nbest_test_loss = float(\"inf\")\ndef featureL2Norm(feature):\n epsilon = 1e-6\n norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5).unsqueeze(1).expand_as(feature)\n return torch.div(feature, norm)\ndef writer_grad_flow(named_parameters, writer, writer_position):\n ave_grads = []\n layers = []\n for n, p in named_parameters:\n if(p.requires_grad) and (\"bias\" not in n):\n if p.grad is None:\n continue\n print(n, \"p.grad is None\")\n writer.add_scalar('gradient_flow/{}'.format(n), p.grad.abs().mean().data.cpu().numpy(), writer_position)\n\ndef calc_pixelCT(nc_A_Bvec, index_NET, temperature):\n batch_size, _, feature_size, feature_size = nc_A_Bvec.size() #(B, B_S * B_S, A_S, A_S)\n nc_BSS = nc_A_Bvec.contiguous().view(batch_size * feature_size * feature_size, feature_size * feature_size)\n nc_BSS_numpy = nc_BSS.detach().cpu().numpy()\n index1D_NET = index_NET.view(batch_size * feature_size * feature_size, 1)\n index1D_NET_numpy = index1D_NET.detach().cpu().numpy()\n # (B * tgt_s * tgt_s, src_s * src_s)\n mask_pixelCT = torch.zeros(batch_size * feature_size * feature_size, feature_size * feature_size).bool()\n\n mask_pixelCT[torch.arange(batch_size * feature_size * feature_size), index1D_NET.detach().squeeze(1)] = True\n mask_pixelCT_numpy = mask_pixelCT.detach().cpu().numpy()\n # positive = scores_WTA_B.view(batch_size * feature_size * feature_size, -1)\n positive = nc_BSS[mask_pixelCT].view(batch_size * feature_size * feature_size, -1)\n print(\"-------------calc----------------------\")\n print(\"index_NET\", index1D_NET[15*25 + 15])\n print(\"nc_BSS_score\", nc_BSS[15*25 + 15, index1D_NET[15*25 + 15]])\n print(\"positive\", positive[15*25 + 15])\n print(\"-----------------------------------\")\n\n positive_numpy = positive.detach().cpu().numpy()\n negative = nc_BSS[~mask_pixelCT].view(batch_size * feature_size * feature_size, -1)\n negative_numpy = negative.detach().cpu().numpy()\n eps_temp = 1e-6\n logits = torch.cat([positive, negative], dim=1)\n logits = (logits / temperature) + eps_temp\n labels = torch.zeros(batch_size * feature_size * feature_size, device=device, dtype=torch.int64)\n loss_pixelCT = F.cross_entropy(logits, labels, reduction='sum')\n loss_pixelCT = loss_pixelCT / (batch_size * feature_size * feature_size)\n return loss_pixelCT\n\n\ndef weak_loss(model, batch, writer_position, mode, normalization='softmax', alpha=30):\n if normalization is None:\n normalize = lambda x: x\n elif normalization == 'softmax':\n normalize = lambda x: torch.nn.functional.softmax(x, 1)\n elif normalization == 'l1':\n normalize = lambda x: x / (torch.sum(x, dim=1, keepdim=True) + 0.0001)\n\n b = batch['source_image'].size(0)\n # positive\n # corr4d = model({'source_image':batch['source_image'], 'target_image':batch['target_image']})\n corr4d = model(batch, writer, writer_position, mode, label='pos')\n\n batch_size = corr4d.size(0)\n feature_size = corr4d.size(2)\n\n nc_B_Avec = corr4d.view(batch_size, feature_size * feature_size, feature_size,\n feature_size) # [batch_idx,k_A,i_B,j_B]\n nc_A_Bvec = corr4d.view(batch_size, feature_size, feature_size, feature_size * feature_size).permute(0, 3, 1, 2) #\n nc_A_Bvec = featureL2Norm(nc_A_Bvec)\n nc_B_Avec_norm = normalize(nc_B_Avec)\n scores_B, index_B = torch.max(nc_B_Avec_norm, dim=1)\n\n #check\n # print(\"before_L2norm(NET)\",nc_B_Avec[0, int(index_B[0, 15, 15]), 15, 15])\n # nc_B_Avec = featureL2Norm(nc_B_Avec)\n # print(\"after_L2norm(NET)\",nc_B_Avec[0, int(index_B[0, 15, 15]), 15, 15])\n \n\n # print(\"------------------pos-----------------\")\n # print(\"score\", scores_B[0, 15, 15])\n # print(\"index_NET_B\", index_B[0, 15, 15])\n loss_pixelCT_NET_B_Avec_by_NET_pos = calc_pixelCT(nc_A_Bvec, index_B, args.temperature)\n\n\n # negative\n batch['source_image'] = batch['source_image'][np.roll(np.arange(b), -1), :] # roll\n corr4d = model(batch, writer, writer_position, mode, label = 'neg')\n # corr4d = model({'source_image':batch['source_image'], 'target_image':batch['negative_image']})\n\n batch_size = corr4d.size(0)\n feature_size = corr4d.size(2)\n nc_B_Avec = corr4d.view(batch_size, feature_size * feature_size, feature_size,\n feature_size) # [batch_idx,k_A,i_B,j_B]\n nc_A_Bvec = corr4d.view(batch_size, feature_size, feature_size, feature_size * feature_size).permute(0, 3, 1, 2) #\n nc_A_Bvec = featureL2Norm(nc_A_Bvec)\n nc_B_Avec_norm = normalize(nc_B_Avec)\n scores_B, index_B = torch.max(nc_B_Avec_norm, dim=1)\n\n # print(\"score\", scores_B[0, 15, 15])\n # print(\"index_NET_B\", index_B[0, 15, 15])\n loss_pixelCT_NET_B_Avec_by_NET_neg = calc_pixelCT(nc_A_Bvec, index_B, args.temperature)\n\n # loss\n\n return loss_pixelCT_NET_B_Avec_by_NET_pos, loss_pixelCT_NET_B_Avec_by_NET_neg\n\n\nloss_fn = lambda model, batch, writer_position, mode : weak_loss(model, batch, writer_position, mode, normalization='softmax')\n\n\n# define epoch function\ndef process_epoch(mode, epoch, model, loss_fn, optimizer, dataloader, batch_preprocessing_fn, writer, use_cuda=True,\n log_interval=50):\n epoch_loss = 0\n for batch_idx, batch in enumerate(dataloader):\n if mode == 'train':\n optimizer.zero_grad()\n writer_position = (epoch -1) * len(dataloader) + batch_idx\n tnf_batch = batch_preprocessing_fn(batch)\n loss_pixelCT_NET_B_Avec_by_NET_pos, loss_pixelCT_NET_B_Avec_by_NET_neg = loss_fn(model, tnf_batch, writer_position, mode)\n loss = loss_pixelCT_NET_B_Avec_by_NET_pos - loss_pixelCT_NET_B_Avec_by_NET_neg\n loss_np = loss.data.cpu().numpy()\n if writer_position % 9 == 0:\n writer.add_scalar('Loss_{}/loss_pixelCT_NET_B_Avec_by_NET_pos'.format(mode), loss_pixelCT_NET_B_Avec_by_NET_pos, writer_position)\n writer.add_scalar('Loss_{}/loss_pixelCT_NET_B_Avec_by_NET_neg'.format(mode), loss_pixelCT_NET_B_Avec_by_NET_neg,\n writer_position)\n writer.add_scalar('Loss_{}/loss_nc'.format(mode), loss, writer_position)\n epoch_loss += loss_np\n if mode == 'train':\n loss.backward()\n writer_grad_flow(model.named_parameters(), writer,writer_position)\n optimizer.step()\n else:\n loss = None\n if batch_idx % log_interval == 0:\n print(mode.capitalize() + ' Epoch: {} [{}/{} ({:.0f}%)]\\t\\tLoss: {:.6f}'.format(\n epoch, batch_idx, len(dataloader),\n 100. * batch_idx / len(dataloader), loss_np))\n epoch_loss /= len(dataloader)\n print(mode.capitalize() + ' set: Average loss: {:.4f}'.format(epoch_loss))\n return epoch_loss\n\n\ntrain_loss = np.zeros(args.num_epochs)\ntest_loss = np.zeros(args.num_epochs)\n\nprint('Starting training...')\n\nmodel.FeatureExtraction.eval()\n\nfor epoch in range(1, args.num_epochs + 1):\n train_loss[epoch - 1] = process_epoch('train', epoch, model, loss_fn, optimizer, dataloader, batch_preprocessing_fn,\n writer, log_interval=1)\n test_loss[epoch - 1] = process_epoch('test', epoch, model, loss_fn, optimizer, dataloader_test,\n batch_preprocessing_fn, writer, log_interval=1)\n\n # remember best loss\n is_best = test_loss[epoch - 1] < best_test_loss\n best_test_loss = min(test_loss[epoch - 1], best_test_loss)\n checkpoint_name = os.path.join(args.result_model_dir,\n datetime.datetime.now().strftime(\n \"%Y-%m-%d_%H:%M\") + '_epoch_' + str(\n epoch) + '_' + args.result_model_fn + '.pth.tar')\n\n print('Checkpoint name: ' + checkpoint_name)\n save_checkpoint({\n 'epoch': epoch,\n 'args': args,\n 'state_dict': model.state_dict(),\n 'best_test_loss': best_test_loss,\n 'optimizer': optimizer.state_dict(),\n 'train_loss': train_loss,\n 'test_loss': test_loss,\n }, is_best, checkpoint_name)\n\nprint('Done!')\n",
"from __future__ import print_function, division\nfrom collections import OrderedDict\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torchvision.models as models\nimport numpy as np\nimport numpy.matlib\nimport pickle\n\nfrom lib.torch_util import Softmax1D\nfrom lib.conv4d import Conv4d\nfrom lib.matching_model import CMDTop\nfrom lib.matching_model import unNormMap1D_to_NormMap2D, NormMap2D_to_unNormMap2D\nfrom lib.showPlot import plot_test_map, plot_test_flow, warpImg_fromMap, warpImg_fromMap2, matplotlib_imshow, return_plot_test_map, get_img_from_fig\nimport torch.nn.functional as F\n\n\ndef featureL2Norm(feature):\n epsilon = 1e-6\n norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5).unsqueeze(1).expand_as(feature)\n return torch.div(feature, norm)\n\n\nclass FeatureExtraction(torch.nn.Module):\n def __init__(self, train_fe=False, feature_extraction_cnn='resnet101', feature_extraction_model_file='',\n normalization=False, last_layer='', use_cuda=True):\n super(FeatureExtraction, self).__init__()\n self.normalization = normalization\n self.feature_extraction_cnn = feature_extraction_cnn\n if feature_extraction_cnn == 'vgg':\n self.model = models.vgg16(pretrained=True)\n # keep feature extraction network up to indicated layer\n vgg_feature_layers = ['conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',\n 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',\n 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1',\n 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4',\n 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'pool5']\n if last_layer == '':\n last_layer = 'pool4'\n last_layer_idx = vgg_feature_layers.index(last_layer)\n self.model = nn.Sequential(*list(self.model.features.children())[:last_layer_idx + 1])\n # for resnet below\n resnet_feature_layers = ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4']\n if feature_extraction_cnn == 'resnet101':\n self.model = models.resnet101(pretrained=True)\n if last_layer == '':\n last_layer = 'layer3'\n resnet_module_list = [getattr(self.model, l) for l in resnet_feature_layers]\n last_layer_idx = resnet_feature_layers.index(last_layer)\n self.model = nn.Sequential(*resnet_module_list[:last_layer_idx + 1])\n\n if feature_extraction_cnn == 'resnet101fpn':\n if feature_extraction_model_file != '':\n resnet = models.resnet101(pretrained=True)\n # swap stride (2,2) and (1,1) in first layers (PyTorch ResNet is slightly different to caffe2 ResNet)\n # this is required for compatibility with caffe2 models\n resnet.layer2[0].conv1.stride = (2, 2)\n resnet.layer2[0].conv2.stride = (1, 1)\n resnet.layer3[0].conv1.stride = (2, 2)\n resnet.layer3[0].conv2.stride = (1, 1)\n resnet.layer4[0].conv1.stride = (2, 2)\n resnet.layer4[0].conv2.stride = (1, 1)\n else:\n resnet = models.resnet101(pretrained=True)\n resnet_module_list = [getattr(resnet, l) for l in resnet_feature_layers]\n conv_body = nn.Sequential(*resnet_module_list)\n self.model = fpn_body(conv_body,\n resnet_feature_layers,\n fpn_layers=['layer1', 'layer2', 'layer3'],\n normalize=normalization,\n hypercols=True)\n if feature_extraction_model_file != '':\n self.model.load_pretrained_weights(feature_extraction_model_file)\n\n if feature_extraction_cnn == 'densenet201':\n self.model = models.densenet201(pretrained=True)\n # keep feature extraction network up to denseblock3\n # self.model = nn.Sequential(*list(self.model.features.children())[:-3])\n # keep feature extraction network up to transitionlayer2\n self.model = nn.Sequential(*list(self.model.features.children())[:-4])\n if train_fe == False:\n # freeze parameters\n for param in self.model.parameters():\n param.requires_grad = False\n # move to GPU\n if use_cuda:\n self.model = self.model.cuda()\n\n def forward(self, image_batch):\n features = self.model(image_batch)\n return features\n\n\nclass adap_layer_feat3(nn.Module):\n def __init__(self):\n super(adap_layer_feat3, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(1024, 1024, kernel_size=5, stride=1, padding=2),\n nn.BatchNorm2d(1024),\n nn.ReLU()\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(1024, 1024, kernel_size=5, stride=1, padding=2),\n nn.BatchNorm2d(1024),\n nn.ReLU()\n )\n GPU_NUM = torch.cuda.current_device()\n device = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')\n print(\"find_correspondence_gpu:\",device)\n use_cuda = torch.cuda.is_available()\n if use_cuda:\n self.conv1.cuda()\n self.conv2.cuda()\n def forward(self, feature):\n feature = feature + self.conv1(feature)\n feature = feature + self.conv2(feature)\n return feature\nclass FeatureCorrelation(torch.nn.Module):\n def __init__(self, shape='3D', normalization=True):\n super(FeatureCorrelation, self).__init__()\n self.normalization = normalization\n self.shape = shape\n self.ReLU = nn.ReLU()\n\n def forward(self, feature_A, feature_B):\n if self.shape == '3D':\n b, c, h, w = feature_A.size()\n # reshape features for matrix multiplication\n feature_A = feature_A.transpose(2, 3).contiguous().view(b, c, h * w)\n feature_B = feature_B.view(b, c, h * w).transpose(1, 2)\n # perform matrix mult.\n feature_mul = torch.bmm(feature_B, feature_A)\n # indexed [batch,idx_A=row_A+h*col_A,row_B,col_B]\n correlation_tensor = feature_mul.view(b, h, w, h * w).transpose(2, 3).transpose(1, 2)\n elif self.shape == '4D':\n b, c, hA, wA = feature_A.size()\n b, c, hB, wB = feature_B.size()\n # reshape features for matrix multiplication\n feature_A = feature_A.view(b, c, hA * wA).transpose(1, 2) # size [b,c,h*w]\n feature_B = feature_B.view(b, c, hB * wB) # size [b,c,h*w]\n # perform matrix mult.\n feature_mul = torch.bmm(feature_A, feature_B)\n # indexed [batch,row_A,col_A,row_B,col_B]\n correlation_tensor = feature_mul.view(b, hA, wA, hB, wB).unsqueeze(1)\n\n if self.normalization:\n correlation_tensor = featureL2Norm(self.ReLU(correlation_tensor))\n\n return correlation_tensor\n\n\nclass NeighConsensus(torch.nn.Module):\n def __init__(self, use_cuda=True, kernel_sizes=[3, 3, 3], channels=[10, 10, 1], symmetric_mode=False):\n super(NeighConsensus, self).__init__()\n self.symmetric_mode = symmetric_mode\n self.kernel_sizes = kernel_sizes\n self.channels = channels\n num_layers = len(kernel_sizes)\n nn_modules = list()\n for i in range(num_layers):\n if i == 0:\n ch_in = 1\n else:\n ch_in = channels[i - 1]\n ch_out = channels[i]\n k_size = kernel_sizes[i]\n nn_modules.append(Conv4d(in_channels=ch_in, out_channels=ch_out, kernel_size=k_size, bias=True))\n nn_modules.append(nn.ReLU(inplace=True))\n self.conv = nn.Sequential(*nn_modules)\n if use_cuda:\n self.conv.cuda()\n\n def forward(self, x):\n if self.symmetric_mode:\n # apply network on the input and its \"transpose\" (swapping A-B to B-A ordering of the correlation tensor),\n # this second result is \"transposed back\" to the A-B ordering to match the first result and be able to add together\n x = self.conv(x) + self.conv(x.permute(0, 1, 4, 5, 2, 3)).permute(0, 1, 4, 5, 2, 3)\n # because of the ReLU layers in between linear layers,\n # this operation is different than convolving a single time with the filters+filters^T\n # and therefore it makes sense to do this.\n else:\n x = self.conv(x)\n return x\n\n\ndef MutualMatching(corr4d):\n # mutual matching\n batch_size, ch, fs1, fs2, fs3, fs4 = corr4d.size()\n\n corr4d_B = corr4d.view(batch_size, fs1 * fs2, fs3, fs4) # [batch_idx,k_A,i_B,j_B]\n corr4d_A = corr4d.view(batch_size, fs1, fs2, fs3 * fs4)\n\n # get max\n corr4d_B_max, _ = torch.max(corr4d_B, dim=1, keepdim=True)\n corr4d_A_max, _ = torch.max(corr4d_A, dim=3, keepdim=True)\n\n eps = 1e-5\n corr4d_B = corr4d_B / (corr4d_B_max + eps)\n corr4d_A = corr4d_A / (corr4d_A_max + eps)\n\n corr4d_B = corr4d_B.view(batch_size, 1, fs1, fs2, fs3, fs4)\n corr4d_A = corr4d_A.view(batch_size, 1, fs1, fs2, fs3, fs4)\n\n corr4d = corr4d * (corr4d_A * corr4d_B) # parenthesis are important for symmetric output\n\n return corr4d\n\n\ndef maxpool4d(corr4d_hres, k_size=4):\n slices = []\n for i in range(k_size):\n for j in range(k_size):\n for k in range(k_size):\n for l in range(k_size):\n slices.append(corr4d_hres[:, 0, i::k_size, j::k_size, k::k_size, l::k_size].unsqueeze(0))\n slices = torch.cat(tuple(slices), dim=1)\n corr4d, max_idx = torch.max(slices, dim=1, keepdim=True)\n max_l = torch.fmod(max_idx, k_size)\n max_k = torch.fmod(max_idx.sub(max_l).div(k_size), k_size)\n max_j = torch.fmod(max_idx.sub(max_l).div(k_size).sub(max_k).div(k_size), k_size)\n max_i = max_idx.sub(max_l).div(k_size).sub(max_k).div(k_size).sub(max_j).div(k_size)\n # i,j,k,l represent the *relative* coords of the max point in the box of size k_size*k_size*k_size*k_size\n return (corr4d, max_i, max_j, max_k, max_l)\n\n\nclass find_correspondence(nn.Module):\n def __init__(self, feature_H, feature_W, beta, kernel_sigma):\n super(find_correspondence, self).__init__()\n GPU_NUM = torch.cuda.current_device()\n device = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')\n print(\"find_correspondence_gpu:\",device)\n self.beta = beta\n self.kernel_sigma = kernel_sigma\n\n # regular grid / [-1,1] normalized\n self.grid_X, self.grid_Y = np.meshgrid(np.linspace(-1, 1, feature_W),\n np.linspace(-1, 1, feature_H)) # grid_X & grid_Y : feature_H x feature_W\n self.grid_X = torch.tensor(self.grid_X, dtype=torch.float, requires_grad=False).to(device)\n self.grid_Y = torch.tensor(self.grid_Y, dtype=torch.float, requires_grad=False).to(device)\n\n # kernels for computing gradients\n self.dx_kernel = torch.tensor([-1, 0, 1], dtype=torch.float, requires_grad=False).view(1, 1, 1, 3).expand(1, 2,\n 1,\n 3).to(\n device)\n self.dy_kernel = torch.tensor([-1, 0, 1], dtype=torch.float, requires_grad=False).view(1, 1, 3, 1).expand(1, 2,\n 3,\n 1).to(\n device)\n\n # 1-d indices for generating Gaussian kernels\n self.x = np.linspace(0, feature_W - 1, feature_W)\n self.x = torch.tensor(self.x, dtype=torch.float, requires_grad=False).to(device)\n self.y = np.linspace(0, feature_H - 1, feature_H)\n self.y = torch.tensor(self.y, dtype=torch.float, requires_grad=False).to(device)\n\n # 1-d indices for kernel-soft-argmax / [-1,1] normalized\n self.x_normal = np.linspace(-1, 1, feature_W)\n self.x_normal = torch.tensor(self.x_normal, dtype=torch.float, requires_grad=False).to(device)\n self.y_normal = np.linspace(-1, 1, feature_H)\n self.y_normal = torch.tensor(self.y_normal, dtype=torch.float, requires_grad=False).to(device)\n\n def apply_gaussian_kernel(self, corr, sigma=5):\n b, hw, h, w = corr.size()\n\n idx = corr.max(dim=1)[1] # b x h x w get maximum value along channel\n idx_y = (idx // w).view(b, 1, 1, h, w).float()\n idx_x = (idx % w).view(b, 1, 1, h, w).float()\n\n x = self.x.view(1, 1, w, 1, 1).expand(b, 1, w, h, w)\n y = self.y.view(1, h, 1, 1, 1).expand(b, h, 1, h, w)\n\n gauss_kernel = torch.exp(-((x - idx_x) ** 2 + (y - idx_y) ** 2) / (2 * sigma ** 2))\n gauss_kernel = gauss_kernel.view(b, hw, h, w)\n\n return gauss_kernel * corr\n\n def softmax_with_temperature(self, x, beta, d=1):\n M, _ = x.max(dim=d, keepdim=True)\n x = x - M # subtract maximum value for stability\n exp_x = torch.exp(beta * x)\n exp_x_sum = exp_x.sum(dim=d, keepdim=True)\n return exp_x / exp_x_sum\n\n def kernel_soft_argmax(self, corr):\n b, _, h, w = corr.size()\n # corr = self.apply_gaussian_kernel(corr, sigma=self.kernel_sigma)\n corr = self.softmax_with_temperature(corr, beta=self.beta, d=1)\n corr = corr.view(-1, h, w, h, w) # (target hxw) x (source hxw)\n\n grid_x = corr.sum(dim=1, keepdim=False) # marginalize to x-coord.\n x_normal = self.x_normal.expand(b, w)\n x_normal = x_normal.view(b, w, 1, 1)\n grid_x = (grid_x * x_normal).sum(dim=1, keepdim=True) # b x 1 x h x w\n\n grid_y = corr.sum(dim=2, keepdim=False) # marginalize to y-coord.\n y_normal = self.y_normal.expand(b, h)\n y_normal = y_normal.view(b, h, 1, 1)\n grid_y = (grid_y * y_normal).sum(dim=1, keepdim=True) # b x 1 x h x w\n return grid_x, grid_y\n\n def get_flow_smoothness(self, flow, GT_mask):\n flow_dx = F.conv2d(F.pad(flow, (1, 1, 0, 0)), self.dx_kernel) / 2 # (padLeft, padRight, padTop, padBottom)\n flow_dy = F.conv2d(F.pad(flow, (0, 0, 1, 1)), self.dy_kernel) / 2 # (padLeft, padRight, padTop, padBottom)\n\n flow_dx = torch.abs(flow_dx) * GT_mask # consider foreground regions only\n flow_dy = torch.abs(flow_dy) * GT_mask\n\n smoothness = torch.cat((flow_dx, flow_dy), 1)\n return smoothness\n\n def forward(self, corr, GT_mask=None):\n b, _, h, w = corr.size()\n grid_X = self.grid_X.expand(b, h, w) # x coordinates of a regular grid\n grid_X = grid_X.unsqueeze(1) # b x 1 x h x w\n grid_Y = self.grid_Y.expand(b, h, w) # y coordinates of a regular grid\n grid_Y = grid_Y.unsqueeze(1)\n if self.beta is not None:\n grid_x, grid_y = self.kernel_soft_argmax(corr)\n else: # discrete argmax\n _, idx = torch.max(corr, dim=1)\n grid_x = idx % w\n grid_x = (grid_x.float() / (w - 1) - 0.5) * 2\n grid_y = idx // w\n grid_y = (grid_y.float() / (h - 1) - 0.5) * 2\n grid_x = grid_x.unsqueeze(1) # b x 1 x h x w\n grid_y = grid_y.unsqueeze(1)\n\n grid = torch.cat((grid_x.permute(0, 2, 3, 1), grid_y.permute(0, 2, 3, 1)),\n 3)\n # 2-channels@3rd-dim, first channel for x / second channel for y\n flow = torch.cat((grid_x - grid_X, grid_y - grid_Y),\n 1) # 2-channels@1st-dim, first channel for x / second channel for y\n\n if GT_mask is None: # test\n return grid.permute(0, 3, 1, 2), flow.permute(0, 3, 1, 2)\n else: # train\n smoothness = self.get_flow_smoothness(flow, GT_mask)\n return grid, flow, smoothness\n\n\nclass ImMatchNet(nn.Module):\n def __init__(self,\n feature_extraction_cnn='resnet101',\n feature_extraction_last_layer='',\n feature_extraction_model_file=None,\n return_correlation=False,\n ncons_kernel_sizes=[3, 3, 3],\n ncons_channels=[10, 10, 1],\n normalize_features=True,\n train_fe=False,\n use_cuda=True,\n relocalization_k_size=0,\n half_precision=False,\n checkpoint=None,\n ):\n\n super(ImMatchNet, self).__init__()\n # Load checkpoint\n if checkpoint is not None and checkpoint is not '':\n print('Loading checkpoint...')\n checkpoint = torch.load(checkpoint, map_location=lambda storage, loc: storage)\n checkpoint['state_dict'] = OrderedDict(\n [(k.replace('vgg', 'model'), v) for k, v in checkpoint['state_dict'].items()])\n # override relevant parameters\n print('Using checkpoint parameters: ')\n ncons_channels = checkpoint['args'].ncons_channels\n print(' ncons_channels: ' + str(ncons_channels))\n ncons_kernel_sizes = checkpoint['args'].ncons_kernel_sizes\n print(' ncons_kernel_sizes: ' + str(ncons_kernel_sizes))\n self.ReLU = nn.ReLU()\n self.use_cuda = use_cuda\n self.normalize_features = normalize_features\n print(\"self.normalize_features\", self.normalize_features)\n self.return_correlation = return_correlation\n self.relocalization_k_size = relocalization_k_size\n self.half_precision = half_precision\n\n self.FeatureExtraction = FeatureExtraction(train_fe=train_fe,\n feature_extraction_cnn=feature_extraction_cnn,\n feature_extraction_model_file=feature_extraction_model_file,\n last_layer=feature_extraction_last_layer,\n normalization=False,\n use_cuda=self.use_cuda)\n\n self.adap_layer_feat3 = adap_layer_feat3()\n self.FeatureCorrelation = FeatureCorrelation(shape='4D', normalization=False)\n\n self.NeighConsensus = NeighConsensus(use_cuda=self.use_cuda,\n kernel_sizes=ncons_kernel_sizes,\n channels=ncons_channels)\n feature_H = 25\n feature_W = 25\n beta = 50\n kernel_sigma = 5\n self.find_correspondence = find_correspondence(feature_H, feature_W, beta, kernel_sigma)\n # nd = 25 * 25 # global correlation\n # od = nd + 2\n # batch_norm = True\n # self.decoder4 = CMDTop(in_channels=od, bn=batch_norm, use_cuda=self.use_cuda)\n # Load weights\n if checkpoint is not None and checkpoint is not '':\n print('Copying weights...')\n for name, param in self.FeatureExtraction.state_dict().items():\n if 'num_batches_tracked' not in name:\n self.FeatureExtraction.state_dict()[name].copy_(\n checkpoint['state_dict']['FeatureExtraction.' + name])\n for name, param in self.NeighConsensus.state_dict().items():\n self.NeighConsensus.state_dict()[name].copy_(checkpoint['state_dict']['NeighConsensus.' + name])\n for name, param in self.adap_layer_feat3.state_dict().items():\n self.adap_layer_feat3.state_dict()[name].copy_(checkpoint['state_dict']['adap_layer_feat3.' + name])\n print('Done!')\n\n self.FeatureExtraction.eval()\n\n if self.half_precision:\n for p in self.NeighConsensus.parameters():\n p.data = p.data.half()\n for l in self.NeighConsensus.conv:\n if isinstance(l, Conv4d):\n l.use_half = True\n\n # used only for foward pass at eval and for training with strong supervision\n def forward(self, tnf_batch, writer, writer_position):\n # feature extraction\n feature_A = self.FeatureExtraction(tnf_batch['source_image'])\n feature_B = self.FeatureExtraction(tnf_batch['target_image'])\n adap_feature_A = self.adap_layer_feat3(feature_A)\n adap_feature_B = self.adap_layer_feat3(feature_B)\n adap_feature_A = featureL2Norm(adap_feature_A)\n adap_feature_B = featureL2Norm(adap_feature_B)\n if self.half_precision:\n feature_A = feature_A.half()\n feature_B = feature_B.half()\n # feature correlation\n corr4d = self.FeatureCorrelation(adap_feature_A, adap_feature_B)\n # corr4d = self.FeatureCorrelation(feature_A, feature_B)\n # do 4d maxpooling for relocalization\n if self.relocalization_k_size > 1:\n corr4d, max_i, max_j, max_k, max_l = maxpool4d(corr4d, k_size=self.relocalization_k_size)\n # WTA\n batch_size, ch, fs1, fs2, fs3, fs4 = corr4d.size()\n nc_B_Avec_WTA = corr4d.view(batch_size, fs1 * fs2, fs3, fs4) # [batch_idx,k_A,i_B,j_B]\n # nc_B_Avec = featureL2Norm(self.ReLU(nc_B_Avec))\n # compute matching scores\n scores_WTA_B, index_WTA_B = torch.max(nc_B_Avec_WTA, dim=1)\n # warping Map\n index1D_WTA_B = index_WTA_B.view(batch_size, -1)\n Map2D_WTA = unNormMap1D_to_NormMap2D(index1D_WTA_B) # (B,2,S,S)\n # Map2D_WTA_np = Map2D_WTA.detach().cpu().numpy()\n\n # scores_B_np =scores_B.detach().cpu().numpy()\n # grid_np = grid.detach().cpu().numpy()\n # corr4d_Net = corr4d.clone()\n # corr4d_Net = corr4d_Net.detach()\n # run match processing model\n corr4d = MutualMatching(corr4d)\n corr4d_Net = self.NeighConsensus(corr4d.detach())\n corr4d_Net = MutualMatching(corr4d_Net)\n\n nc_B_Avec_NET = corr4d_Net.view(batch_size, fs1 * fs2, fs3, fs4) # [batch_idx,k_A,i_B,j_B]\n # nc_B_Avec2 = featureL2Norm(self.ReLU(nc_B_Avec2))\n\n # nc_B_Avec_NET = torch.nn.functional.softmax(nc_B_Avec_NET, 1)\n Map2D_NET, Flow2D_NET = self.find_correspondence(nc_B_Avec_NET)\n # scores_B2, index_B2 = torch.max(nc_B_Avec2, dim=1)\n # index1D_B2 = index_B2.view(batch_size, -1)\n unNormMap2D_NET = NormMap2D_to_unNormMap2D(Map2D_NET) # (B,2,S,S\n\n # img_grid = return_plot_test_map(tnf_batch['source_image'][0].unsqueeze(0), tnf_batch['target_image'][0].unsqueeze(0), Map2D_WTA[0].unsqueeze(0),\n # Map2D_NET[0].unsqueeze(0), scale_factor=16, plot_name='AtoB_MAP')\n # writer.add_figure('adap_grid/adap_NET_{}'.format(writer_position), img_grid)\n # plot_test_map(tnf_batch['source_image'], tnf_batch['target_image'], MAP2D_NET, Map2D_WTA, scale_factor=16,plot_name='AtoB_MAP' )\n # Flow2D_WTA = F.interpolate(input=Map2D_WTA, scale_factor=16, mode='bilinear', align_corners= True)\n # Flow2D_NET = F.interpolate(input=grid, scale_factor=16, mode='bilinear', align_corners= True)\n #\n # Flow2D_WTA = unnormalise_and_convert_mapping_to_flow(Flow2D_WTA)\n # Flow2D_NET = unnormalise_and_convert_mapping_to_flow(Flow2D_NET)\n # plot_test_flow(tnf_batch['source_image'], tnf_batch['target_image'], Flow2D_NET, Flow2D_WTA, scale_factor=16,plot_name='AtoB_FLOW' )\n\n # Flow2D_WTA = F.interpolate(input = Map2D_WTA, scale_factor = 16, mode = 'bilinear', align_corners= True)\n # grid = F.interpolate(input=grid, scale_factor=16, mode='bilinear', align_corners=True)\n\n # if torch.cuda.is_available():\n # init_map = torch.FloatTensor(batch_size, 2, fs3, fs4).zero_().cuda()\n # else:\n # init_map = torch.FloatTensor(batch_size, 2, fs3, fs4).zero_()\n # est_map4 = self.decoder4(x1=nc_B_Avec, x3=init_map)\n # flow4 = unnormalise_and_convert_mapping_to_flow(est_map4) / self.div\n # ratio = 16\n # flow4[:, 0, :, :] = flow4[:, 0, :, :] / ratio\n # flow4[:, 1, :, :] = flow4[:, 1, :, :] / ratio\n\n if self.relocalization_k_size > 1:\n delta4d = (max_i, max_j, max_k, max_l)\n return (corr4d, delta4d)\n else:\n return corr4d_Net\n\n"
] | [
[
"torch.nn.functional.softmax",
"torch.max",
"torch.cat",
"torch.zeros",
"torch.sum",
"torch.cuda.is_available",
"torch.pow",
"numpy.arange",
"torch.arange",
"numpy.zeros",
"torch.div",
"torch.cuda.current_device",
"torch.cuda.device_count",
"torch.cuda.set_device",
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.nn.functional.cross_entropy",
"torch.cuda.get_device_name"
],
[
"torch.nn.functional.softmax",
"torch.cat",
"torch.zeros",
"torch.sum",
"matplotlib.pyplot.plot",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"numpy.arange",
"torch.arange",
"numpy.zeros",
"matplotlib.pyplot.title",
"torch.cuda.current_device",
"torch.cuda.device_count",
"matplotlib.pyplot.ylabel",
"torch.cuda.set_device",
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.nn.functional.cross_entropy",
"matplotlib.pyplot.grid",
"torch.cuda.get_device_name",
"matplotlib.pyplot.xlabel"
],
[
"torch.nn.functional.softmax",
"torch.max",
"torch.cat",
"torch.zeros",
"torch.sum",
"torch.cuda.is_available",
"torch.pow",
"numpy.arange",
"torch.arange",
"numpy.zeros",
"torch.div",
"torch.cuda.current_device",
"torch.cuda.device_count",
"torch.cuda.set_device",
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.nn.functional.cross_entropy",
"torch.cuda.get_device_name"
],
[
"torch.div",
"torch.fmod",
"torch.nn.Sequential",
"torch.abs",
"torch.max",
"numpy.linspace",
"torch.cuda.current_device",
"torch.cat",
"torch.load",
"torch.nn.Conv2d",
"torch.tensor",
"torch.exp",
"torch.bmm",
"torch.cuda.is_available",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.pow",
"torch.nn.functional.pad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yf19970118/OPLD-Pytorch | [
"4939bf62587da4533276fda20db36bb019575511",
"4939bf62587da4533276fda20db36bb019575511",
"4939bf62587da4533276fda20db36bb019575511"
] | [
"utils/events.py",
"rcnn/modeling/rpn/loss.py",
"rcnn/modeling/rpn/anchor_generator.py"
] | [
"import os\nimport json\nimport torch\nimport shutil\nimport logging\nimport datetime\nimport numpy as np\n\nfrom collections import defaultdict, deque\nfrom contextlib import contextmanager\n\n_CURRENT_STORAGE_STACK = []\n\n\ndef get_event_storage():\n assert len(\n _CURRENT_STORAGE_STACK\n ), \"get_event_storage() has to be called inside a 'with EventStorage(...)' context!\"\n return _CURRENT_STORAGE_STACK[-1]\n\n\nclass EventWriter:\n \"\"\"\n Base class for writers that obtain events from :class:`EventStorage` and process them.\n \"\"\"\n\n def write(self, **kwargs):\n raise NotImplementedError\n\n def close(self):\n pass\n\n\nclass JSONWriter(EventWriter):\n def __init__(self, json_file, window_size=20):\n \"\"\"\n Args:\n json_file (str): path to the json file. New data will be appended if the file exists.\n window_size (int): the window size of median smoothing for the scalars whose\n `smoothing_hint` are True.\n \"\"\"\n # resume\n # if os.path.exists(json_file):\n # os.remove(json_file)\n self._file_handle = open(json_file, \"a\")\n self._window_size = window_size\n\n def write(self, **kwargs):\n storage = get_event_storage()\n to_save = {\"iteration\": storage.iter + 1}\n to_save.update(storage.latest_with_smoothing_hint(self._window_size))\n self._file_handle.write(json.dumps(to_save, sort_keys=True) + \"\\n\")\n self._file_handle.flush()\n try:\n os.fsync(self._file_handle.fileno())\n except AttributeError:\n pass\n\n def close(self):\n self._file_handle.close()\n\n\nclass TensorboardXWriter(EventWriter):\n \"\"\"\n Write all scalars to a tensorboard file.\n \"\"\"\n\n def __init__(self, log_dir: str, window_size: int = 20, **kwargs):\n \"\"\"\n Args:\n log_dir (str): The directory to save the output events\n window_size (int): the scalars will be median-smoothed by this window size\n kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)`\n \"\"\"\n self._window_size = window_size\n from torch.utils.tensorboard import SummaryWriter\n\n self._writer = SummaryWriter(log_dir, **kwargs)\n\n def write(self, **kwargs):\n storage = get_event_storage()\n for k, v in storage.latest_with_smoothing_hint(self._window_size).items():\n self._writer.add_scalar(k, v, storage.iter)\n\n def close(self):\n if hasattr(self, \"_writer\"): # doesn't exist when the code fails at import\n self._writer.close()\n\n\nclass CommonMetricPrinter(EventWriter):\n \"\"\"\n Print __common__ metrics to the terminal, including\n iteration time, ETA, memory, all losses, and the learning rate.\n\n To print something different, please implement a similar printer by yourself.\n \"\"\"\n\n def __init__(self, yaml, max_iter):\n \"\"\"\n Args:\n max_iter (int): the maximum number of iterations to train.\n Used to compute ETA.\n \"\"\"\n self.logger = logging.getLogger(__name__)\n self._max_iter = max_iter\n self.yaml = yaml\n\n def write(self, epoch, max_epoch, **kwargs):\n storage = get_event_storage()\n iteration = storage.iter\n\n data_time, time = None, None\n eta_string = \"N/A\"\n try:\n data_time = storage.history(\"data_time\").avg(20)\n time = storage.history(\"time\").global_avg()\n if max_epoch is not None:\n eta_iter = max_epoch * self._max_iter - iteration - 1\n iteration = iteration % self._max_iter\n else:\n eta_iter = self._max_iter - iteration\n eta_seconds = time * (eta_iter)\n eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n except KeyError: # they may not exist in the first few iterations (due to warmup)\n pass\n\n try:\n lr = \"{:.6f}\".format(storage.history(\"lr\").latest())\n except KeyError:\n lr = \"N/A\"\n\n if torch.cuda.is_available():\n max_mem_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0\n else:\n max_mem_mb = None\n\n losses = [\n \"{}: {:.4f}\".format(k, v.median(20))\n for k, v in storage.histories().items()\n if \"loss\" in k and \"total\" not in k\n ]\n skip_losses = len(losses) == 1\n # NOTE: max_mem is parsed by grep in \"dev/parse_results.sh\"\n print(\n \"\"\"\\\n[Training]|-[{yaml}]-{epoch}[iter: {iter}/{max_iter}]-[lr: {lr}]-[eta: {eta}]\n |-[{memory}]-[{time}]-[{data_time}] \n |-[total loss: {total_loss}]{losses} \\\n\"\"\".format(\n yaml=self.yaml.split('/')[-1] + '.yaml',\n eta=eta_string,\n iter=iteration + 1, # start from iter 1\n epoch='' if epoch is None else '[epoch: {}/{}]-'.format(epoch, max_epoch),\n max_iter=self._max_iter,\n lr=lr,\n memory=\"max_mem: {:.0f}M\".format(max_mem_mb) if max_mem_mb is not None else \"\",\n time=\"iter_time: {:.4f}\".format(time) if time is not None else \"iter_time: N/A\",\n data_time=\"data_time: {:.4f}\".format(data_time) if data_time is not None else \"\",\n total_loss=\"{:.4f}\".format(storage.histories()[\"total_loss\"].median(20)),\n losses=\"-[losses]-[{}]\".format(\" \".join(losses)) if not skip_losses else \"\",\n )\n )\n\n\nclass EventStorage:\n \"\"\"\n The user-facing class that provides metric storage functionalities.\n\n In the future we may add support for storing / logging other types of data if needed.\n \"\"\"\n\n def __init__(self, start_iter=0, log_period=20, iter_per_epoch=-1):\n \"\"\"\n Args:\n start_iter (int): the iteration number to start with\n \"\"\"\n self.window_size = iter_per_epoch if iter_per_epoch != -1 else log_period\n self._history = defaultdict(HistoryBuffer)\n self._smoothing_hints = {}\n self._latest_scalars = {}\n self._iter = start_iter\n self._current_prefix = \"\"\n\n def put_scalar(self, name, value, smoothing_hint=True):\n \"\"\"\n Add a scalar `value` to the `HistoryBuffer` associated with `name`.\n\n Args:\n smoothing_hint (bool): a 'hint' on whether this scalar is noisy and should be\n smoothed when logged. The hint will be accessible through\n :meth:`EventStorage.smoothing_hints`. A writer may ignore the hint\n and apply custom smoothing rule.\n\n It defaults to True because most scalars we save need to be smoothed to\n provide any useful signal.\n \"\"\"\n name = self._current_prefix + name\n history = self._history[name]\n value = float(value)\n history.update(value, self._iter)\n self._latest_scalars[name] = value\n\n existing_hint = self._smoothing_hints.get(name)\n if existing_hint is not None:\n assert (\n existing_hint == smoothing_hint\n ), \"Scalar {} was put with a different smoothing_hint!\".format(name)\n else:\n self._smoothing_hints[name] = smoothing_hint\n\n def put_scalars(self, *, smoothing_hint=True, **kwargs):\n \"\"\"\n Put multiple scalars from keyword arguments.\n\n Examples:\n\n storage.put_scalars(loss=my_loss, accuracy=my_accuracy, smoothing_hint=True)\n \"\"\"\n for k, v in kwargs.items():\n self.put_scalar(k, v, smoothing_hint=smoothing_hint)\n\n def history(self, name):\n \"\"\"\n Returns:\n HistoryBuffer: the scalar history for name\n \"\"\"\n ret = self._history.get(name, None)\n if ret is None:\n raise KeyError(\"No history metric available for {}!\".format(name))\n return ret\n\n def histories(self):\n \"\"\"\n Returns:\n dict[name -> HistoryBuffer]: the HistoryBuffer for all scalars\n \"\"\"\n return self._history\n\n def latest(self):\n \"\"\"\n Returns:\n dict[name -> number]: the scalars that's added in the current iteration.\n \"\"\"\n return self._latest_scalars\n\n def latest_with_smoothing_hint(self, window_size=20):\n \"\"\"\n Similar to :meth:`latest`, but the returned values\n are either the un-smoothed original latest value,\n or a median of the given window_size,\n depend on whether the smoothing_hint is True.\n\n This provides a default behavior that other writers can use.\n \"\"\"\n result = {}\n\n for k, v in self._latest_scalars.items():\n result[k] = self._history[k].median(window_size) if self._smoothing_hints[k] else v\n return result\n\n def smoothing_hints(self):\n \"\"\"\n Returns:\n dict[name -> bool]: the user-provided hint on whether the scalar\n is noisy and needs smoothing.\n \"\"\"\n return self._smoothing_hints\n\n def step(self):\n \"\"\"\n User should call this function at the beginning of each iteration, to\n notify the storage of the start of a new iteration.\n The storage will then be able to associate the new data with the\n correct iteration number.\n \"\"\"\n self._iter += 1\n self._latest_scalars = {}\n\n @property\n def iter(self):\n return self._iter\n\n @property\n def iteration(self):\n # for backward compatibility\n return self._iter\n\n def __enter__(self):\n _CURRENT_STORAGE_STACK.append(self)\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n assert _CURRENT_STORAGE_STACK[-1] == self\n _CURRENT_STORAGE_STACK.pop()\n\n @contextmanager\n def name_scope(self, name):\n \"\"\"\n Yields:\n A context within which all the events added to this storage\n will be prefixed by the name scope.\n \"\"\"\n old_prefix = self._current_prefix\n self._current_prefix = name.rstrip(\"/\") + \"/\"\n yield\n self._current_prefix = old_prefix\n\n\nclass HistoryBuffer:\n \"\"\"\n Track a series of scalar values and provide access to smoothed values over a\n window or the global average of the series.\n \"\"\"\n\n def __init__(self, max_length: int = 1000000):\n \"\"\"\n Args:\n max_length: maximal number of values that can be stored in the\n buffer. When the capacity of the buffer is exhausted, old\n values will be removed.\n \"\"\"\n self._max_length = max_length # int\n self._data = [] # List[Tuple[float, float]] (value, iteration) pairs\n self._count = 0 # int\n self._global_avg = 0 # float\n\n def update(self, value: float, iteration: float = None):\n \"\"\"\n Add a new scalar value produced at certain iteration. If the length\n of the buffer exceeds self._max_length, the oldest element will be\n removed from the buffer.\n \"\"\"\n if iteration is None:\n iteration = self._count\n if len(self._data) == self._max_length:\n self._data.pop(0)\n self._data.append((value, iteration))\n\n self._count += 1\n self._global_avg += (value - self._global_avg) / self._count\n\n def latest(self):\n \"\"\"\n Return the latest scalar value added to the buffer.\n \"\"\"\n return self._data[-1][0]\n\n def median(self, window_size: int):\n \"\"\"\n Return the median of the latest `window_size` values in the buffer.\n \"\"\"\n return np.median([x[0] for x in self._data[-window_size:]])\n\n def avg(self, window_size: int):\n \"\"\"\n Return the mean of the latest `window_size` values in the buffer.\n \"\"\"\n return np.mean([x[0] for x in self._data[-window_size:]])\n\n def global_avg(self):\n \"\"\"\n Return the mean of all the elements in the buffer. Note that this\n includes those getting removed due to limited buffer storage.\n \"\"\"\n return self._global_avg\n\n def values(self):\n \"\"\"\n Returns:\n list[(number, iteration)]: content of the current buffer.\n \"\"\"\n return self._data\n",
"import torch\nfrom torch.nn import functional as F\n\nfrom models.ops import smooth_l1_loss\nfrom utils.data.structures.boxlist_ops import boxlist_iou, cat_boxlist\nfrom rcnn.utils.matcher import Matcher\nfrom rcnn.utils.balanced_positive_negative_sampler import BalancedPositiveNegativeSampler\nfrom rcnn.utils.misc import concat_box_prediction_layers, cat\nfrom rcnn.core.config import cfg\n\n\nclass RPNLossComputation(object):\n \"\"\"\n This class computes the RPN loss.\n \"\"\"\n\n def __init__(self, proposal_matcher, fg_bg_sampler, box_coder, generate_labels_func):\n \"\"\"\n Arguments:\n proposal_matcher (Matcher)\n fg_bg_sampler (BalancedPositiveNegativeSampler)\n box_coder (BoxCoder)\n generate_labels_func (function)\n \"\"\"\n self.proposal_matcher = proposal_matcher\n self.fg_bg_sampler = fg_bg_sampler\n self.box_coder = box_coder\n self.copied_fields = []\n self.generate_labels_func = generate_labels_func\n self.discard_cases = ['not_visibility', 'between_thresholds']\n\n def match_targets_to_anchors(self, anchor, target, copied_fields=[]):\n match_quality_matrix = boxlist_iou(target, anchor)\n matched_idxs = self.proposal_matcher(match_quality_matrix)\n # RPN doesn't need any fields from target\n # for creating the labels, so clear them all\n target = target.copy_with_fields(copied_fields)\n # get the targets corresponding GT for each anchor\n # NB: need to clamp the indices because we can have a single\n # GT in the image, and matched_idxs can be -2, which goes\n # out of bounds\n matched_targets = target[matched_idxs.clamp(min=0)]\n matched_targets.add_field(\"matched_idxs\", matched_idxs)\n return matched_targets\n\n def prepare_targets(self, anchors, targets):\n labels = []\n regression_targets = []\n for anchors_per_image, targets_per_image in zip(anchors, targets):\n matched_targets = self.match_targets_to_anchors(\n anchors_per_image, targets_per_image, self.copied_fields\n )\n\n matched_idxs = matched_targets.get_field(\"matched_idxs\")\n labels_per_image = self.generate_labels_func(matched_targets)\n labels_per_image = labels_per_image.to(dtype=torch.float32)\n\n # Background (negative examples)\n bg_indices = matched_idxs == Matcher.BELOW_LOW_THRESHOLD\n labels_per_image[bg_indices] = 0\n\n # discard anchors that go out of the boundaries of the image\n if \"not_visibility\" in self.discard_cases:\n labels_per_image[~anchors_per_image.get_field(\"visibility\")] = -1\n\n # discard indices that are between thresholds\n if \"between_thresholds\" in self.discard_cases:\n inds_to_discard = matched_idxs == Matcher.BETWEEN_THRESHOLDS\n labels_per_image[inds_to_discard] = -1\n\n # compute regression targets\n regression_targets_per_image = self.box_coder.encode(\n matched_targets.bbox, anchors_per_image.bbox\n )\n\n labels.append(labels_per_image)\n regression_targets.append(regression_targets_per_image)\n\n return labels, regression_targets\n\n def __call__(self, anchors, objectness, box_regression, targets):\n \"\"\"\n Arguments:\n anchors (list[BoxList])\n objectness (list[Tensor])\n box_regression (list[Tensor])\n targets (list[BoxList])\n\n Returns:\n objectness_loss (Tensor)\n box_loss (Tensor\n \"\"\"\n anchors = [cat_boxlist(anchors_per_image) for anchors_per_image in anchors]\n labels, regression_targets = self.prepare_targets(anchors, targets)\n sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)\n sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0)).squeeze(1)\n sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0)).squeeze(1)\n\n sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)\n objectness, box_regression = concat_box_prediction_layers(objectness, box_regression)\n objectness = objectness.squeeze()\n\n labels = torch.cat(labels, dim=0)\n regression_targets = torch.cat(regression_targets, dim=0)\n\n box_loss = smooth_l1_loss(\n box_regression[sampled_pos_inds],\n regression_targets[sampled_pos_inds],\n beta=cfg.RPN.SMOOTH_L1_BETA,\n size_average=False,\n ) / (sampled_inds.numel())\n\n objectness_loss = F.binary_cross_entropy_with_logits(\n objectness[sampled_inds], labels[sampled_inds]\n )\n\n return objectness_loss, box_loss\n\n\ndef generate_rpn_labels(matched_targets):\n matched_idxs = matched_targets.get_field(\"matched_idxs\")\n labels_per_image = matched_idxs >= 0\n return labels_per_image\n\n\ndef make_rpn_loss_evaluator(box_coder):\n matcher = Matcher(\n cfg.RPN.FG_IOU_THRESHOLD,\n cfg.RPN.BG_IOU_THRESHOLD,\n allow_low_quality_matches=True,\n )\n fg_bg_sampler = BalancedPositiveNegativeSampler(cfg.RPN.BATCH_SIZE_PER_IMAGE, cfg.RPN.POSITIVE_FRACTION)\n\n loss_evaluator = RPNLossComputation(\n matcher,\n fg_bg_sampler,\n box_coder,\n generate_rpn_labels\n )\n return loss_evaluator\n",
"import math\nimport numpy as np\n\nimport torch\nfrom torch import nn\n\nfrom utils.data.structures.bounding_box import BoxList\n\n\nclass BufferList(nn.Module):\n \"\"\"\n Similar to nn.ParameterList, but for buffers\n \"\"\"\n\n def __init__(self, buffers=None):\n super(BufferList, self).__init__()\n if buffers is not None:\n self.extend(buffers)\n\n def extend(self, buffers):\n offset = len(self)\n for i, buffer in enumerate(buffers):\n self.register_buffer(str(offset + i), buffer)\n return self\n\n def __len__(self):\n return len(self._buffers)\n\n def __iter__(self):\n return iter(self._buffers.values())\n\n\nclass AnchorGenerator(nn.Module):\n \"\"\"\n For a set of image sizes and feature maps, computes a set\n of anchors\n \"\"\"\n\n def __init__(self, sizes=(128, 256, 512), aspect_ratios=(0.5, 1.0, 2.0), anchor_strides=(8, 16, 32),\n straddle_thresh=0):\n super(AnchorGenerator, self).__init__()\n\n if len(anchor_strides) == 1:\n anchor_stride = anchor_strides[0]\n cell_anchors = [\n generate_anchors(anchor_stride, sizes, aspect_ratios).float()\n ]\n else:\n if len(anchor_strides) != len(sizes):\n raise RuntimeError(\"FPN should have #anchor_strides == #sizes\")\n\n cell_anchors = [\n generate_anchors(\n anchor_stride,\n size if isinstance(size, (tuple, list)) else (size,),\n aspect_ratios\n ).float()\n for anchor_stride, size in zip(anchor_strides, sizes)\n ]\n self.strides = anchor_strides\n self.cell_anchors = BufferList(cell_anchors)\n self.straddle_thresh = straddle_thresh\n\n def num_anchors_per_location(self):\n return [len(cell_anchors) for cell_anchors in self.cell_anchors]\n\n def grid_anchors(self, grid_sizes):\n anchors = []\n for size, stride, base_anchors in zip(grid_sizes, self.strides, self.cell_anchors):\n grid_height, grid_width = size\n device = base_anchors.device\n shifts_x = torch.arange(\n 0, grid_width * stride, step=stride, dtype=torch.float32, device=device\n )\n shifts_y = torch.arange(\n 0, grid_height * stride, step=stride, dtype=torch.float32, device=device\n )\n shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)\n shift_x = shift_x.reshape(-1)\n shift_y = shift_y.reshape(-1)\n shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)\n\n anchors.append(\n (shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)\n )\n\n return anchors\n\n def add_visibility_to(self, boxlist):\n image_width, image_height = boxlist.size\n anchors = boxlist.bbox\n if self.straddle_thresh >= 0:\n inds_inside = (\n (anchors[..., 0] >= -self.straddle_thresh)\n & (anchors[..., 1] >= -self.straddle_thresh)\n & (anchors[..., 2] < image_width + self.straddle_thresh)\n & (anchors[..., 3] < image_height + self.straddle_thresh)\n )\n else:\n device = anchors.device\n inds_inside = torch.ones(anchors.shape[0], dtype=torch.uint8, device=device)\n boxlist.add_field(\"visibility\", inds_inside)\n\n def forward(self, image_list, feature_maps):\n grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps]\n anchors_over_all_feature_maps = self.grid_anchors(grid_sizes)\n anchors = []\n for i, (image_height, image_width) in enumerate(image_list.image_sizes): # 对图像而言\n anchors_in_image = []\n for anchors_per_feature_map in anchors_over_all_feature_maps:\n boxlist = BoxList(\n anchors_per_feature_map, (image_width, image_height), mode=\"xyxy\"\n )\n self.add_visibility_to(boxlist)\n anchors_in_image.append(boxlist)\n anchors.append(anchors_in_image)\n return anchors\n\n\ndef make_anchor_generator(cfg):\n anchor_sizes = cfg.RPN.ANCHOR_SIZES\n aspect_ratios = cfg.RPN.ASPECT_RATIOS\n anchor_stride = cfg.RPN.ANCHOR_STRIDE\n straddle_thresh = cfg.RPN.BOUNDARY_THRESH\n\n if cfg.MODEL.FPN_ON:\n assert len(anchor_stride) == len(anchor_sizes), \"FPN should have len(ANCHOR_STRIDE) == len(ANCHOR_SIZES)\"\n else:\n assert len(anchor_stride) == 1, \"Non-FPN should have a single ANCHOR_STRIDE\"\n anchor_generator = AnchorGenerator(\n anchor_sizes, aspect_ratios, anchor_stride, straddle_thresh\n )\n return anchor_generator\n\n# Copyright (c) 2017-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n#\n# Based on:\n# --------------------------------------------------------\n# Faster R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick and Sean Bell\n# --------------------------------------------------------\n\n\n# Verify that we compute the same anchors as Shaoqing's matlab implementation:\n#\n# >> load output/rpn_cachedir/faster_rcnn_VOC2007_ZF_stage1_rpn/anchors.mat\n# >> anchors\n#\n# anchors =\n#\n# -83 -39 100 56\n# -175 -87 192 104\n# -359 -183 376 200\n# -55 -55 72 72\n# -119 -119 136 136\n# -247 -247 264 264\n# -35 -79 52 96\n# -79 -167 96 184\n# -167 -343 184 360\n\n# array([[ -83., -39., 100., 56.],\n# [-175., -87., 192., 104.],\n# [-359., -183., 376., 200.],\n# [ -55., -55., 72., 72.],\n# [-119., -119., 136., 136.],\n# [-247., -247., 264., 264.],\n# [ -35., -79., 52., 96.],\n# [ -79., -167., 96., 184.],\n# [-167., -343., 184., 360.]])\n\n\ndef generate_anchors(stride=16, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)):\n \"\"\"Generates a matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors\n are centered on stride / 2, have (approximate) sqrt areas of the specified\n sizes, and aspect ratios as given.\n \"\"\"\n return _generate_anchors(\n stride,\n np.array(sizes, dtype=np.float) / stride,\n np.array(aspect_ratios, dtype=np.float),\n )\n\n\ndef _generate_anchors(base_size, scales, aspect_ratios):\n \"\"\"Generate anchor (reference) windows by enumerating aspect ratios X\n scales wrt a reference (0, 0, base_size - 1, base_size - 1) window.\n \"\"\"\n anchor = np.array([1, 1, base_size, base_size], dtype=np.float) - 1\n anchors = _ratio_enum(anchor, aspect_ratios)\n anchors = np.vstack(\n [_scale_enum(anchors[i, :], scales) for i in range(anchors.shape[0])]\n )\n return torch.from_numpy(anchors)\n\n\ndef _whctrs(anchor):\n \"\"\"Return width, height, x center, and y center for an anchor (window).\"\"\"\n w = anchor[2] - anchor[0] + 1\n h = anchor[3] - anchor[1] + 1\n x_ctr = anchor[0] + 0.5 * (w - 1)\n y_ctr = anchor[1] + 0.5 * (h - 1)\n return w, h, x_ctr, y_ctr\n\n\ndef _mkanchors(ws, hs, x_ctr, y_ctr):\n \"\"\"Given a vector of widths (ws) and heights (hs) around a center\n (x_ctr, y_ctr), output a set of anchors (windows).\n \"\"\"\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack(\n (\n x_ctr - 0.5 * (ws - 1),\n y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1),\n y_ctr + 0.5 * (hs - 1),\n )\n )\n return anchors\n\n\ndef _ratio_enum(anchor, ratios):\n \"\"\"Enumerate a set of anchors for each aspect ratio wrt an anchor.\"\"\"\n w, h, x_ctr, y_ctr = _whctrs(anchor)\n size = w * h\n size_ratios = size / ratios\n ws = np.round(np.sqrt(size_ratios))\n hs = np.round(ws * ratios)\n anchors = _mkanchors(ws, hs, x_ctr, y_ctr)\n return anchors\n\n\ndef _scale_enum(anchor, scales):\n \"\"\"Enumerate a set of anchors for each scale wrt an anchor.\"\"\"\n w, h, x_ctr, y_ctr = _whctrs(anchor)\n ws = w * scales\n hs = h * scales\n anchors = _mkanchors(ws, hs, x_ctr, y_ctr)\n return anchors\n"
] | [
[
"numpy.median",
"torch.cuda.max_memory_allocated",
"numpy.mean",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.is_available"
],
[
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.cat"
],
[
"numpy.hstack",
"torch.ones",
"numpy.sqrt",
"torch.from_numpy",
"numpy.round",
"torch.arange",
"torch.stack",
"numpy.array",
"torch.meshgrid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sakdag/crime-data-analysis | [
"9c95238c6aaf1394f68be59e26e8c6d75f669d7e"
] | [
"src/preprocessing/census_preprocessor.py"
] | [
"import numpy as np\nimport pandas as pd\n\nimport src.utils.crime_classification_utils as utils\nimport src.config.column_names as col_names\n\n\ndef preprocess_and_save(original_file_name: str,\n preprocessed_file_name: str,\n zip_codes_dataset_file_path: str):\n\n census_df = utils.read_dataset(original_file_name)\n zip_codes_df = utils.read_dataset(zip_codes_dataset_file_path)\n\n # Insert latitude and longitude values using zip_codes_df\n insert_geolocation_info(census_df, zip_codes_df)\n print(census_df)\n\n # Save\n census_df.to_csv(preprocessed_file_name, index=False)\n\n\ndef insert_geolocation_info(df: pd.DataFrame, zip_codes_df: pd.DataFrame):\n ca_zip_codes = zip_codes_df[zip_codes_df['state_id'] == 'CA']\n df[col_names.LATITUDE] = np.NAN\n df[col_names.LONGITUDE] = np.NaN\n\n for census_index, census_row in df.iterrows():\n for zip_index, zip_row in ca_zip_codes.iterrows():\n\n if census_row[col_names.ZIP_CODE] == zip_row['zip']:\n df.loc[census_index, col_names.LATITUDE] = zip_row['lat']\n df.loc[census_index, col_names.LONGITUDE] = zip_row['lng']\n break\n\n\ndef categorize_total_population(df):\n low = df.loc[df[col_names.TOTAL_POPULATION] <= 20000]\n low.loc[low[col_names.TOTAL_POPULATION] <= 20000, col_names.TOTAL_POPULATION] = 'Low'\n\n medium = df.loc[np.logical_and(df[col_names.TOTAL_POPULATION] > 20000,\n df[col_names.TOTAL_POPULATION] <= 40000)]\n medium.loc[\n np.logical_and(medium[col_names.TOTAL_POPULATION] > 20000,\n medium[col_names.TOTAL_POPULATION] <= 40000),\n col_names.TOTAL_POPULATION] = 'Medium'\n\n high = df.loc[np.logical_and(df[col_names.TOTAL_POPULATION] > 40000,\n df[col_names.TOTAL_POPULATION] <= 60000)]\n high.loc[np.logical_and(high[col_names.TOTAL_POPULATION] > 40000,\n high[col_names.TOTAL_POPULATION] <= 60000),\n col_names.TOTAL_POPULATION] = 'High'\n\n extreme = df.loc[df[col_names.TOTAL_POPULATION] > 60000]\n extreme.loc[extreme[col_names.TOTAL_POPULATION] > 60000, col_names.TOTAL_POPULATION] = 'Extreme'\n\n low = low.append(medium, ignore_index=True)\n low = low.append(high, ignore_index=True)\n low = low.append(extreme, ignore_index=True)\n\n low = low.rename({col_names.TOTAL_POPULATION: col_names.TOTAL_POPULATION_CATEGORIZED}, axis=1)\n\n return low\n\n\ndef categorize_total_males_and_females(df):\n df[col_names.FEMALE_TO_MALE_RATIO_CATEGORIZED] = np.nan\n for index, row in df.iterrows():\n total_males = row[col_names.TOTAL_MALES]\n total_females = row[col_names.TOTAL_FEMALES]\n\n female_to_male_ratio = float(total_females + 1) / float(total_males + 1) * 100\n df.loc[index, col_names.FEMALE_TO_MALE_RATIO_CATEGORIZED] = female_to_male_ratio\n\n low = df.loc[df[col_names.FEMALE_TO_MALE_RATIO_CATEGORIZED] <= 48.0]\n low.loc[low[col_names.FEMALE_TO_MALE_RATIO_CATEGORIZED] <= 48.0,\n col_names.FEMALE_TO_MALE_RATIO_CATEGORIZED] = 'Low'\n\n almost_equal = df.loc[np.logical_and(df[col_names.FEMALE_TO_MALE_RATIO_CATEGORIZED] > 48.0,\n df[col_names.FEMALE_TO_MALE_RATIO_CATEGORIZED] <= 52.0)]\n almost_equal.loc[\n np.logical_and(almost_equal[col_names.FEMALE_TO_MALE_RATIO_CATEGORIZED] > 48.0,\n almost_equal[col_names.FEMALE_TO_MALE_RATIO_CATEGORIZED] <= 52.0),\n col_names.FEMALE_TO_MALE_RATIO_CATEGORIZED] = 'AlmostEqual'\n\n high = df.loc[df[col_names.FEMALE_TO_MALE_RATIO_CATEGORIZED] > 52.0]\n high.loc[high[col_names.FEMALE_TO_MALE_RATIO_CATEGORIZED] > 52.0,\n col_names.FEMALE_TO_MALE_RATIO_CATEGORIZED] = 'High'\n\n low = low.append(almost_equal, ignore_index=True)\n low = low.append(high, ignore_index=True)\n\n return low\n\n\ndef categorize_median_age(df):\n low = df.loc[df[col_names.MEDIAN_AGE] <= 30.0]\n low.loc[low[col_names.MEDIAN_AGE] <= 30.0, col_names.MEDIAN_AGE] = 'Low'\n\n low_to_medium = df.loc[np.logical_and(df[col_names.MEDIAN_AGE] > 30.0,\n df[col_names.MEDIAN_AGE] <= 35.0)]\n low_to_medium.loc[\n np.logical_and(low_to_medium[col_names.MEDIAN_AGE] > 30.0,\n low_to_medium[col_names.MEDIAN_AGE] <= 35.0),\n col_names.MEDIAN_AGE] = 'LowToMedium'\n\n medium = df.loc[np.logical_and(df[col_names.MEDIAN_AGE] > 35.0,\n df[col_names.MEDIAN_AGE] <= 40.0)]\n medium.loc[\n np.logical_and(medium[col_names.MEDIAN_AGE] > 35.0,\n medium[col_names.MEDIAN_AGE] <= 40.0),\n col_names.MEDIAN_AGE] = 'Medium'\n\n medium_to_high = df.loc[np.logical_and(df[col_names.MEDIAN_AGE] > 40.0,\n df[col_names.MEDIAN_AGE] <= 45.0)]\n medium_to_high.loc[np.logical_and(medium_to_high[col_names.MEDIAN_AGE] > 40.0,\n medium_to_high[col_names.MEDIAN_AGE] <= 45.0),\n col_names.MEDIAN_AGE] = 'MediumToHigh'\n\n high = df.loc[df[col_names.MEDIAN_AGE] > 45.0]\n high.loc[high[col_names.MEDIAN_AGE] > 45.0, col_names.MEDIAN_AGE] = 'High'\n\n low = low.append(low_to_medium, ignore_index=True)\n low = low.append(medium, ignore_index=True)\n low = low.append(medium_to_high, ignore_index=True)\n low = low.append(high, ignore_index=True)\n\n low = low.rename({col_names.MEDIAN_AGE: col_names.MEDIAN_AGE_CATEGORIZED}, axis=1)\n\n return low\n"
] | [
[
"numpy.logical_and"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Na1an/VIP | [
"2e45617a8b58dd4a4889114592bac3c4786abc15",
"2e45617a8b58dd4a4889114592bac3c4786abc15"
] | [
"vip_hci/pca/pca_fullfr.py",
"vip_hci/negfc/mcmc_sampling.py"
] | [
"#! /usr/bin/env python\n\n\"\"\"\nFull-frame PCA algorithm for ADI, ADI+RDI and ADI+mSDI (IFS data) cubes.\n\"\"\"\n\n__author__ = 'Carlos Alberto Gomez Gonzalez'\n__all__ = ['pca']\n\nimport numpy as np\nfrom multiprocessing import cpu_count\nfrom .svd import svd_wrapper, SVDecomposer\nfrom .utils_pca import pca_incremental, pca_grid\nfrom ..preproc.derotation import _find_indices_adi, _compute_pa_thresh\nfrom ..preproc import cube_rescaling_wavelengths as scwave\nfrom ..preproc import (cube_derotate, cube_collapse, check_pa_vector,\n check_scal_vector, cube_crop_frames, \n cube_subtract_sky_pca)\nfrom ..conf import (timing, time_ini, check_enough_memory, Progressbar,\n check_array)\nfrom ..conf.utils_conf import pool_map, iterable\nfrom ..var import (frame_center, dist, prepare_matrix, reshape_matrix,\n cube_filter_lowpass, mask_circle)\nfrom ..stats import descriptive_stats\n\n\ndef pca(cube, angle_list, cube_ref=None, scale_list=None, ncomp=1,\n svd_mode='lapack', scaling=None, mask_center_px=None, source_xy=None,\n delta_rot=1, fwhm=4, adimsdi='single', crop_ifs=True, imlib='opencv',\n imlib2='opencv', interpolation='lanczos4', collapse='median', \n ifs_collapse_range='all', mask_rdi=None, check_memory=True, batch=None, \n nproc=1, full_output=False, verbose=True, weights=None, conv=False):\n \"\"\" Algorithm where the reference PSF and the quasi-static speckle pattern\n are modeled using Principal Component Analysis. Depending on the input\n parameters this PCA function can work in ADI, RDI or SDI (IFS data) mode.\n\n ADI: the target ``cube`` itself is used to learn the PCs and to obtain a\n low-rank approximation model PSF (star + speckles). Both `cube_ref`` and\n ``scale_list`` must be None. The full-frame ADI-PCA implementation is based\n on Soummer et al. 2012 (http://arxiv.org/abs/1207.4197) and Amara & Quanz\n 2012 (http://arxiv.org/abs/1207.6637). If ``batch`` is provided then the\n cube if processed with incremental PCA as described in Gomez Gonzalez et al.\n 2017 (https://arxiv.org/abs/1705.06184).\n\n ADI + RDI: if a reference cube is provided (``cube_ref``), its PCs are used\n to reconstruct the target frames to obtain the model PSF (star + speckles).\n\n ADI + SDI (IFS data): if a scaling vector is provided (``scale_list``) and\n the cube is a 4d array [# channels, # adi-frames, Y, X], its assumed it\n contains several multi-spectral ADI frames. A single or two stages PCA can\n be performed, depending on ``adimsdi``.\n\n Parameters\n ----------\n cube : str or numpy ndarray, 3d or 4d\n Input cube (ADI or ADI+mSDI). If a string is given, it must correspond\n to the path to the fits file to be opened in memmap mode (for PCA\n incremental of ADI 3d cubes).\n angle_list : numpy ndarray, 1d\n Corresponding parallactic angle for each frame.\n cube_ref : numpy ndarray, 3d, optional\n Reference library cube. For Reference Star Differential Imaging.\n scale_list : numpy ndarray, 1d, optional\n Scaling factors in case of IFS data (ADI+mSDI cube). Usually, the\n scaling factors are the central channel wavelength divided by the\n shortest wavelength in the cube (more thorough approaches can be used\n to get the scaling factors). This scaling factors are used to re-scale\n the spectral channels and align the speckles.\n ncomp : int, float or tuple of int/None, optional\n How many PCs are used as a lower-dimensional subspace to project the\n target frames.\n\n * ADI (``cube`` is a 3d array): if an int is provided, ``ncomp`` is the\n number of PCs extracted from ``cube`` itself. If ``ncomp`` is a float\n in the interval (0, 1] then it corresponds to the desired cumulative\n explained variance ratio (the corresponding number of components is\n estimated). If ``ncomp`` is a tuple of two integers, then it\n corresponds to an interval of PCs in which final residual frames are\n computed (optionally, if a tuple of 3 integers is passed, the third\n value is the step). When``source_xy`` is not None, then the S/Ns\n (mean value in a 1xFWHM circular aperture) of the given\n (X,Y) coordinates are computed.\n\n * ADI+RDI (``cube`` and ``cube_ref`` are 3d arrays): ``ncomp`` is the\n number of PCs obtained from ``cube_ref``. If ``ncomp`` is a tuple,\n then it corresponds to an interval of PCs (obtained from ``cube_ref``)\n in which final residual frames are computed. If ``source_xy`` is not\n None, then the S/Ns (mean value in a 1xFWHM circular aperture) of the\n given (X,Y) coordinates are computed.\n\n * ADI+mSDI (``cube`` is a 4d array and ``adimsdi=\"single\"``): ``ncomp``\n is the number of PCs obtained from the whole set of frames\n (n_channels * n_adiframes). If ``ncomp`` is a float in the interval\n (0, 1] then it corresponds to the desired CEVR, and the corresponding\n number of components will be estimated. If ``ncomp`` is a tuple, then\n it corresponds to an interval of PCs in which final residual frames\n are computed. If ``source_xy`` is not None, then the S/Ns (mean value\n in a 1xFWHM circular aperture) of the given (X,Y) coordinates are\n computed.\n\n * ADI+mSDI (``cube`` is a 4d array and ``adimsdi=\"double\"``): ``ncomp``\n must be a tuple, where the first value is the number of PCs obtained\n from each multi-spectral frame (if None then this stage will be\n skipped and the spectral channels will be combined without\n subtraction); the second value sets the number of PCs used in the\n second PCA stage, ADI-like using the residuals of the first stage (if\n None then the second PCA stage is skipped and the residuals are\n de-rotated and combined).\n\n svd_mode : {'lapack', 'arpack', 'eigen', 'randsvd', 'cupy', 'eigencupy',\n 'randcupy', 'pytorch', 'eigenpytorch', 'randpytorch'}, str optional\n Switch for the SVD method/library to be used.\n\n ``lapack``: uses the LAPACK linear algebra library through Numpy\n and it is the most conventional way of computing the SVD\n (deterministic result computed on CPU).\n\n ``arpack``: uses the ARPACK Fortran libraries accessible through\n Scipy (computation on CPU).\n\n ``eigen``: computes the singular vectors through the\n eigendecomposition of the covariance M.M' (computation on CPU).\n\n ``randsvd``: uses the randomized_svd algorithm implemented in\n Sklearn (computation on CPU).\n\n ``cupy``: uses the Cupy library for GPU computation of the SVD as in\n the LAPACK version. `\n\n `eigencupy``: offers the same method as with the ``eigen`` option\n but on GPU (through Cupy).\n\n ``randcupy``: is an adaptation of the randomized_svd algorithm,\n where all the computations are done on a GPU (through Cupy). `\n\n `pytorch``: uses the Pytorch library for GPU computation of the SVD.\n\n ``eigenpytorch``: offers the same method as with the ``eigen``\n option but on GPU (through Pytorch).\n\n ``randpytorch``: is an adaptation of the randomized_svd algorithm,\n where all the linear algebra computations are done on a GPU\n (through Pytorch).\n\n scaling : {None, \"temp-mean\", spat-mean\", \"temp-standard\",\n \"spat-standard\"}, None or str optional\n Pixel-wise scaling mode using ``sklearn.preprocessing.scale``\n function. If set to None, the input matrix is left untouched. Otherwise:\n\n ``temp-mean``: temporal px-wise mean is subtracted.\n\n ``spat-mean``: spatial mean is subtracted.\n\n ``temp-standard``: temporal mean centering plus scaling pixel values\n to unit variance. HIGHLY RECOMMENDED FOR ASDI AND RDI CASES!\n\n ``spat-standard``: spatial mean centering plus scaling pixel values\n to unit variance.\n\n mask_center_px : None or int\n If None, no masking is done. If an integer > 1 then this value is the\n radius of the circular mask.\n source_xy : tuple of int, optional\n For ADI-PCA, this triggers a frame rejection in the PCA library, with\n ``source_xy`` as the coordinates X,Y of the center of the annulus where\n the PA criterion is estimated. When ``ncomp`` is a tuple, a PCA grid is\n computed and the S/Ns (mean value in a 1xFWHM circular aperture) of the\n given (X,Y) coordinates are computed.\n delta_rot : int, optional\n Factor for tunning the parallactic angle threshold, expressed in FWHM.\n Default is 1 (excludes 1xFHWM on each side of the considered frame).\n fwhm : float, optional\n Known size of the FHWM in pixels to be used. Default value is 4. \n adimsdi : {'single', 'double'}, str optional\n Changes the way the 4d cubes (ADI+mSDI) are processed. Basically it\n determines whether a single or double pass PCA is going to be computed.\n\n ``single``: the multi-spectral frames are rescaled wrt the largest\n wavelength to align the speckles and all the frames (n_channels *\n n_adiframes) are processed with a single PCA low-rank approximation.\n\n ``double``: a first stage is run on the rescaled spectral frames, and a\n second PCA frame is run on the residuals in an ADI fashion.\n\n crop_ifs: bool, optional\n [adimsdi='single'] If True cube is cropped at the moment of frame\n rescaling in wavelength. This is recommended for large FOVs such as the\n one of SPHERE, but can remove significant amount of information close to\n the edge of small FOVs (e.g. SINFONI).\n imlib : str, optional\n See the documentation of the ``vip_hci.preproc.frame_rotate`` function.\n imlib2 : str, optional\n See the documentation of the ``vip_hci.preproc.cube_rescaling_wavelengths`` \n function.\n interpolation : str, optional\n See the documentation of the ``vip_hci.preproc.frame_rotate`` function.\n collapse : {'median', 'mean', 'sum', 'trimmean'}, str optional\n Sets the way of collapsing the frames for producing a final image.\n ifs_collapse_range: str 'all' or tuple of 2 int\n If a tuple, it should contain the first and last channels where the mSDI \n residual channels will be collapsed (by default collapses all channels).\n check_memory : bool, optional\n If True, it checks that the input cube is smaller than the available\n system memory.\n batch : None, int or float, optional\n When it is not None, it triggers the incremental PCA (for ADI and\n ADI+mSDI cubes). If an int is given, it corresponds to the number of\n frames in each sequential mini-batch. If a float (0, 1] is given, it\n corresponds to the size of the batch is computed wrt the available\n memory in the system.\n nproc : None or int, optional\n Number of processes for parallel computing. If None the number of\n processes will be set to (cpu_count()/2). Defaults to ``nproc=1``.\n full_output: bool, optional\n Whether to return the final median combined image only or with other\n intermediate arrays.\n verbose : bool, optional\n If True prints intermediate info and timing.\n weights: 1d numpy array or list, optional\n Weights to be applied for a weighted mean. Need to be provided if \n collapse mode is 'wmean'.\n \n Returns\n -------\n frame : numpy ndarray\n 2D array, median combination of the de-rotated/re-scaled residuals cube.\n Always returned.\n pcs : numpy ndarray\n [full_output=True, source_xy=None] Principal components. Valid for\n ADI cubes (3D). This is also returned when ``batch`` is not None\n (incremental PCA).\n recon_cube, recon : numpy ndarray\n [full_output=True] Reconstructed cube. Valid for ADI cubes (3D).\n residuals_cube : numpy ndarray\n [full_output=True] Residuals cube. Valid for ADI cubes (3D).\n residuals_cube_ : numpy ndarray\n [full_output=True] Derotated residuals cube. Valid for ADI cubes (3D).\n residuals_cube_channels : numpy ndarray\n [full_output=True, adimsdi='double'] Residuals for each multi-spectral\n cube. Valid for ADI+mSDI (4D) cubes.\n residuals_cube_channels_ : numpy ndarray\n [full_output=True, adimsdi='double'] Derotated final residuals. Valid\n for ADI+mSDI (4D) cubes.\n cube_allfr_residuals : numpy ndarray\n [full_output=True, adimsdi='single'] Residuals cube (of the big cube\n with channels and time processed together). Valid for ADI+mSDI (4D)\n cubes.\n cube_adi_residuals : numpy ndarray\n [full_output=True, adimsdi='single'] Residuals cube (of the big cube\n with channels and time processed together) after de-scaling the wls.\n Valid for ADI+mSDI (4D).\n medians : numpy ndarray\n [full_output=True, source_xy=None] This is also returned when ``batch``\n is not None (incremental PCA).\n final_residuals_cube : numpy ndarray\n [ncomp is tuple] The residual final PCA frames for a grid a PCs.\n\n\n \"\"\"\n start_time = time_ini(verbose)\n\n if batch is None:\n check_array(cube, (3, 4), msg='cube')\n else:\n if not isinstance(cube, (str, np.ndarray)):\n raise TypeError('`cube` must be a numpy (3d or 4d) array or a str '\n 'with the full path on disk')\n\n # checking memory (if in-memory numpy array is provided)\n if not isinstance(cube, str):\n input_bytes = cube_ref.nbytes if cube_ref is not None else cube.nbytes\n mem_msg = 'Set check_memory=False to override this memory check or ' \\\n 'set `batch` to run incremental PCA (valid for ADI or ' \\\n 'ADI+mSDI single-pass)'\n check_enough_memory(input_bytes, 1.0, raise_error=check_memory,\n error_msg=mem_msg, verbose=verbose)\n\n if nproc is None:\n nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores\n\n # ADI + mSDI. Shape of cube: (n_channels, n_adi_frames, y, x)\n if scale_list is not None: #isinstance(cube, np.ndarray) and cube.ndim == 4:\n if adimsdi == 'double':\n res_pca = _adimsdi_doublepca(cube, angle_list, scale_list, ncomp,\n scaling, mask_center_px, svd_mode,\n imlib, imlib2, interpolation, collapse, \n ifs_collapse_range, verbose, start_time, \n nproc, weights, fwhm, conv)\n residuals_cube_channels, residuals_cube_channels_, frame = res_pca\n elif adimsdi == 'single':\n res_pca = _adimsdi_singlepca(cube, angle_list, scale_list, ncomp,\n fwhm, source_xy, scaling,\n mask_center_px, svd_mode, imlib,\n imlib2, interpolation, collapse, \n ifs_collapse_range, verbose, start_time, \n crop_ifs, batch, full_output=True,\n weights=weights)\n if isinstance(ncomp, (int, float)):\n cube_allfr_residuals, cube_adi_residuals, frame = res_pca\n elif isinstance(ncomp, tuple):\n if source_xy is None:\n final_residuals_cube, pclist = res_pca\n else:\n final_residuals_cube, frame, table, _ = res_pca\n else:\n raise ValueError('`adimsdi` mode not recognized')\n\n # ADI + RDI\n elif cube_ref is not None:\n res_pca = _adi_rdi_pca(cube, cube_ref, angle_list, ncomp, scaling,\n mask_center_px, svd_mode, imlib, interpolation,\n collapse, verbose, start_time, weights, mask_rdi)\n pcs, recon, residuals_cube, residuals_cube_, frame = res_pca\n\n # ADI. Shape of cube: (n_adi_frames, y, x)\n elif cube_ref is None:\n res_pca = _adi_pca(cube, angle_list, ncomp, batch, source_xy, delta_rot,\n fwhm, scaling, mask_center_px, svd_mode, imlib,\n interpolation, collapse, verbose, start_time, True,\n weights)\n\n if batch is None:\n if source_xy is not None:\n # PCA grid, computing S/Ns\n if isinstance(ncomp, tuple):\n if full_output:\n final_residuals_cube, frame, table, _ = res_pca\n else:\n # returning only the optimal residual\n final_residuals_cube = res_pca[1]\n # full-frame PCA with rotation threshold\n else:\n recon_cube, residuals_cube, residuals_cube_, frame = res_pca\n else:\n # PCA grid\n if isinstance(ncomp, tuple):\n final_residuals_cube, pclist = res_pca\n # full-frame standard PCA\n else:\n pcs, recon, residuals_cube, residuals_cube_, frame = res_pca\n # full-frame incremental PCA\n else:\n frame, _, pcs, medians = res_pca\n\n else:\n raise RuntimeError('Only ADI, ADI+RDI and ADI+mSDI observing techniques'\n ' are supported')\n\n # --------------------------------------------------------------------------\n # Returns for each case (ADI, ADI+RDI and ADI+mSDI) and combination of\n # parameters: full_output, source_xy, batch, ncomp\n # --------------------------------------------------------------------------\n if isinstance(cube, np.ndarray) and cube.ndim == 4:\n # ADI+mSDI double-pass PCA\n if adimsdi == 'double':\n if full_output:\n return frame, residuals_cube_channels, residuals_cube_channels_\n else:\n return frame\n\n elif adimsdi == 'single':\n # ADI+mSDI single-pass PCA\n if isinstance(ncomp, (float, int)):\n if full_output:\n return frame, cube_allfr_residuals, cube_adi_residuals\n else:\n return frame\n # ADI+mSDI single-pass PCA grid\n elif isinstance(ncomp, tuple):\n if source_xy is None and full_output:\n return final_residuals_cube, pclist\n elif source_xy is None and not full_output:\n return final_residuals_cube\n elif source_xy is not None and full_output:\n return final_residuals_cube, frame, table\n elif source_xy is not None and not full_output:\n return frame\n\n # ADI and ADI+RDI\n else:\n if source_xy is not None and full_output:\n # PCA grid, computing S/Ns\n if isinstance(ncomp, tuple):\n return final_residuals_cube, frame, table\n # full-frame PCA with rotation threshold\n else:\n return frame, recon_cube, residuals_cube, residuals_cube_\n\n elif source_xy is None and full_output:\n # incremental PCA\n if batch is not None:\n return frame, pcs, medians\n else:\n # PCA grid\n if isinstance(ncomp, tuple):\n return final_residuals_cube, pclist\n # full-frame standard PCA or ADI+RDI\n else:\n return frame, pcs, recon, residuals_cube, residuals_cube_\n\n elif not full_output:\n # PCA grid\n if isinstance(ncomp, tuple):\n return final_residuals_cube\n # full-frame standard PCA or ADI+RDI\n else:\n return frame\n\n\ndef _adi_pca(cube, angle_list, ncomp, batch, source_xy, delta_rot, fwhm,\n scaling, mask_center_px, svd_mode, imlib, interpolation, collapse,\n verbose, start_time, full_output, weights=None):\n \"\"\" Handles the ADI PCA post-processing.\n \"\"\"\n # Full/Single ADI processing, incremental PCA\n if batch is not None:\n result = pca_incremental(cube, angle_list, batch=batch, ncomp=ncomp,\n imlib=imlib, interpolation=interpolation,\n collapse=collapse, verbose=verbose,\n full_output=full_output, start_time=start_time,\n weights=weights)\n return result\n\n else:\n # Full/Single ADI processing\n n, y, x = cube.shape\n\n angle_list = check_pa_vector(angle_list)\n if not n == angle_list.shape[0]:\n raise ValueError(\"`angle_list` vector has wrong length. It must \"\n \"equal the number of frames in the cube\")\n\n if not isinstance(ncomp, (int, float, tuple)):\n raise TypeError(\"`ncomp` must be an int, float or a tuple in the \"\n \"ADI case\")\n\n if isinstance(ncomp, (int, float)):\n if isinstance(ncomp, int) and ncomp > n:\n ncomp = min(ncomp, n)\n print('Number of PCs too high (max PCs={}), using {} PCs '\n 'instead.'.format(n, ncomp))\n\n if source_xy is None:\n residuals_result = _project_subtract(cube, None, ncomp, scaling,\n mask_center_px, svd_mode,\n verbose, full_output)\n if verbose:\n timing(start_time)\n if full_output:\n residuals_cube = residuals_result[0]\n reconstructed = residuals_result[1]\n V = residuals_result[2]\n pcs = reshape_matrix(V, y, x)\n recon = reshape_matrix(reconstructed, y, x)\n else:\n residuals_cube = residuals_result\n\n # A rotation threshold is applied\n else:\n if delta_rot is None or fwhm is None:\n msg = 'Delta_rot or fwhm parameters missing. Needed for the'\n msg += 'PA-based rejection of frames from the library'\n raise TypeError(msg)\n nfrslib = []\n residuals_cube = np.zeros_like(cube)\n recon_cube = np.zeros_like(cube)\n yc, xc = frame_center(cube[0], False)\n x1, y1 = source_xy\n ann_center = dist(yc, xc, y1, x1)\n pa_thr = _compute_pa_thresh(ann_center, fwhm, delta_rot)\n mid_range = np.abs(np.amax(angle_list) - np.amin(angle_list))/2\n if pa_thr >= mid_range - mid_range * 0.1:\n new_pa_th = float(mid_range - mid_range * 0.1)\n if verbose:\n msg = 'PA threshold {:.2f} is too big, will be set to '\n msg += '{:.2f}'\n print(msg.format(pa_thr, new_pa_th))\n pa_thr = new_pa_th\n\n for frame in range(n):\n if ann_center > fwhm * 3: # TODO: 3 optimal value? new par?\n ind = _find_indices_adi(angle_list, frame, pa_thr,\n truncate=True)\n else:\n ind = _find_indices_adi(angle_list, frame, pa_thr)\n\n res_result = _project_subtract(cube, None, ncomp, scaling,\n mask_center_px, svd_mode,\n verbose, full_output, ind,\n frame)\n if full_output:\n nfrslib.append(res_result[0])\n residual_frame = res_result[1]\n recon_frame = res_result[2]\n residuals_cube[frame] = residual_frame.reshape((y, x))\n recon_cube[frame] = recon_frame.reshape((y, x))\n else:\n nfrslib.append(res_result[0])\n residual_frame = res_result[1]\n residuals_cube[frame] = residual_frame.reshape((y, x))\n\n # number of frames in library printed for each annular quadrant\n if verbose:\n descriptive_stats(nfrslib, verbose=verbose,\n label='Size LIB: ')\n\n residuals_cube_ = cube_derotate(residuals_cube, angle_list,\n imlib=imlib,\n interpolation=interpolation)\n frame = cube_collapse(residuals_cube_, mode=collapse, w=weights)\n if verbose:\n print('Done de-rotating and combining')\n timing(start_time)\n if source_xy is not None:\n return recon_cube, residuals_cube, residuals_cube_, frame\n else:\n return pcs, recon, residuals_cube, residuals_cube_, frame\n\n # When ncomp is a tuple, pca_grid is called\n else:\n gridre = pca_grid(cube, angle_list, fwhm, range_pcs=ncomp,\n source_xy=source_xy, cube_ref=None, mode='fullfr',\n svd_mode=svd_mode, scaling=scaling,\n mask_center_px=mask_center_px, fmerit='mean',\n imlib=imlib, interpolation=interpolation,\n collapse=collapse, verbose=verbose,\n full_output=full_output, debug=False,\n plot=verbose, start_time=start_time, \n weights=weights)\n return gridre\n\n\ndef _adimsdi_singlepca(cube, angle_list, scale_list, ncomp, fwhm, source_xy,\n scaling, mask_center_px, svd_mode, imlib, imlib2, \n interpolation, collapse, ifs_collapse_range, verbose, \n start_time, crop_ifs, batch, full_output, weights=None):\n \"\"\" Handles the full-frame ADI+mSDI single PCA post-processing.\n \"\"\"\n z, n, y_in, x_in = cube.shape\n\n angle_list = check_pa_vector(angle_list)\n if not angle_list.shape[0] == n:\n msg = \"Angle list vector has wrong length. It must equal the number\"\n msg += \" frames in the cube\"\n raise ValueError(msg)\n\n if scale_list is None:\n raise ValueError('`scale_list` must be provided')\n else:\n check_array(scale_list, dim=1, msg='scale_list')\n if not scale_list.shape[0] == z:\n raise ValueError('`scale_list` has wrong length')\n\n scale_list = check_scal_vector(scale_list)\n big_cube = []\n\n if verbose:\n print('Rescaling the spectral channels to align the speckles')\n for i in Progressbar(range(n), verbose=verbose):\n cube_resc = scwave(cube[:, i, :, :], scale_list, imlib=imlib2,\n interpolation=interpolation)[0]\n if crop_ifs:\n cube_resc = cube_crop_frames(cube_resc, size=y_in, verbose=False)\n big_cube.append(cube_resc)\n\n big_cube = np.array(big_cube)\n big_cube = big_cube.reshape(z * n, big_cube.shape[2], big_cube.shape[3])\n\n if verbose:\n timing(start_time)\n print('{} total frames'.format(n * z))\n print('Performing single-pass PCA')\n\n if isinstance(ncomp, (int, float)):\n # When ncomp is a int and batch is not None, incremental ADI-PCA is run\n if batch is not None:\n res_cube = pca_incremental(big_cube, angle_list, batch, ncomp, imlib,\n interpolation, collapse, verbose,\n return_residuals=True,\n start_time=start_time, weights=weights)\n # When ncomp is a int/float and batch is None, standard ADI-PCA is run\n else:\n res_cube = _project_subtract(big_cube, None, ncomp, scaling,\n mask_center_px, svd_mode, verbose,\n False)\n\n if verbose:\n timing(start_time)\n\n resadi_cube = np.zeros((n, y_in, x_in))\n\n if verbose:\n print('Descaling the spectral channels')\n if ifs_collapse_range == 'all':\n idx_ini = 0\n idx_fin = z\n else:\n idx_ini = ifs_collapse_range[0]\n idx_fin = ifs_collapse_range[1]\n \n for i in Progressbar(range(n), verbose=verbose):\n frame_i = scwave(res_cube[i*z+idx_ini:i*z+idx_fin, :, :], \n scale_list[idx_ini:idx_fin],\n full_output=False, inverse=True, y_in=y_in,\n x_in=x_in, imlib=imlib2,\n interpolation=interpolation, collapse=collapse)\n resadi_cube[i] = frame_i\n\n if verbose:\n print('De-rotating and combining residuals')\n timing(start_time)\n der_res = cube_derotate(resadi_cube, angle_list, imlib=imlib,\n interpolation=interpolation)\n frame = cube_collapse(der_res, mode=collapse, w=weights)\n cube_allfr_residuals = res_cube\n cube_adi_residuals = resadi_cube\n return cube_allfr_residuals, cube_adi_residuals, frame\n\n # When ncomp is a tuple, pca_grid is called\n elif isinstance(ncomp, tuple):\n gridre = pca_grid(big_cube, angle_list, fwhm, range_pcs=ncomp,\n source_xy=source_xy, cube_ref=None, mode='fullfr',\n svd_mode=svd_mode, scaling=scaling,\n mask_center_px=mask_center_px, fmerit='mean',\n imlib=imlib, interpolation=interpolation,\n collapse=collapse, \n ifs_collapse_range=ifs_collapse_range, \n verbose=verbose, full_output=full_output, debug=False,\n plot=verbose, start_time=start_time,\n scale_list=scale_list, initial_4dshape=cube.shape,\n weights=weights)\n return gridre\n\n else:\n raise TypeError(\"`ncomp` must be an int, float or a tuple for \"\n \"single-pass PCA\")\n\n\ndef _adimsdi_doublepca(cube, angle_list, scale_list, ncomp, scaling,\n mask_center_px, svd_mode, imlib, imlib2, interpolation,\n collapse, ifs_collapse_range, verbose, start_time, nproc,\n weights=None, fwhm=4, conv=False):\n \"\"\"\n Handle the full-frame ADI+mSDI double PCA post-processing.\n\n \"\"\"\n z, n, y_in, x_in = cube.shape\n\n global ARRAY\n ARRAY = cube # to be passed to _adimsdi_doublepca_ifs\n\n if not isinstance(ncomp, tuple):\n raise TypeError(\"`ncomp` must be a tuple when a double pass PCA\"\n \" is performed\")\n else:\n ncomp_ifs, ncomp_adi = ncomp\n\n angle_list = check_pa_vector(angle_list)\n if not angle_list.shape[0] == n:\n msg = \"Angle list vector has wrong length. It must equal the number\"\n msg += \" frames in the cube\"\n raise ValueError(msg)\n\n if scale_list is None:\n raise ValueError('Scaling factors vector must be provided')\n else:\n if np.array(scale_list).ndim > 1:\n raise ValueError('Scaling factors vector is not 1d')\n if not scale_list.shape[0] == cube.shape[0]:\n raise ValueError('Scaling factors vector has wrong length')\n\n scale_list = check_scal_vector(scale_list)\n\n if verbose:\n print('{} spectral channels in IFS cube'.format(z))\n if ncomp_ifs is None:\n print('Combining multi-spectral frames (skipping PCA)')\n else:\n print('First PCA stage exploiting spectral variability')\n\n if ncomp_ifs is not None and ncomp_ifs > z:\n ncomp_ifs = min(ncomp_ifs, z)\n msg = 'Number of PCs too high (max PCs={}), using {} PCs instead'\n print(msg.format(z, ncomp_ifs))\n\n res = pool_map(nproc, _adimsdi_doublepca_ifs, iterable(range(n)), ncomp_ifs,\n scale_list, scaling, mask_center_px, svd_mode, imlib2, \n interpolation, collapse, ifs_collapse_range, fwhm, conv)\n residuals_cube_channels = np.array(res)\n\n if verbose:\n timing(start_time)\n\n # de-rotation of the PCA processed channels, ADI fashion\n if ncomp_adi is None:\n if verbose:\n print('{} ADI frames'.format(n))\n print('De-rotating and combining frames (skipping PCA)')\n residuals_cube_channels_ = cube_derotate(residuals_cube_channels,\n angle_list, imlib=imlib,\n interpolation=interpolation)\n frame = cube_collapse(residuals_cube_channels_, mode=collapse, \n w=weights)\n if verbose:\n timing(start_time)\n else:\n if ncomp_adi > n:\n ncomp_adi = n\n print('Number of PCs too high, using maximum of {} PCs '\n 'instead'.format(n))\n if verbose:\n print('{} ADI frames'.format(n))\n print('Second PCA stage exploiting rotational variability')\n\n res_ifs_adi = _project_subtract(residuals_cube_channels, None,\n ncomp_adi, scaling, mask_center_px,\n svd_mode, verbose=False,\n full_output=False)\n if verbose:\n print('De-rotating and combining residuals')\n der_res = cube_derotate(res_ifs_adi, angle_list, imlib=imlib,\n interpolation=interpolation)\n residuals_cube_channels_ = der_res\n frame = cube_collapse(residuals_cube_channels_, mode=collapse, \n w=weights)\n if verbose:\n timing(start_time)\n return residuals_cube_channels, residuals_cube_channels_, frame\n\n\ndef _adimsdi_doublepca_ifs(fr, ncomp, scale_list, scaling, mask_center_px,\n svd_mode, imlib, interpolation, collapse, \n ifs_collapse_range, fwhm, conv):\n \"\"\"\n Called by _adimsdi_doublepca with pool_map.\n \"\"\"\n global ARRAY\n\n z, n, y_in, x_in = ARRAY.shape\n multispec_fr = ARRAY[:, fr, :, :]\n\n if ifs_collapse_range == 'all':\n idx_ini = 0\n idx_fin = z\n else:\n idx_ini = ifs_collapse_range[0]\n idx_fin = ifs_collapse_range[1]\n\n if ncomp is None:\n frame_i = cube_collapse(multispec_fr[idx_ini:idx_fin])\n else:\n cube_resc = scwave(multispec_fr, scale_list, imlib=imlib, \n interpolation=interpolation)[0]\n if conv:\n # convolve all frames with the same kernel\n cube_resc = cube_filter_lowpass(cube_resc, mode='gauss', \n fwhm_size=fwhm, verbose=False)\n residuals = _project_subtract(cube_resc, None, ncomp, scaling,\n mask_center_px, svd_mode, verbose=False,\n full_output=False)\n frame_i = scwave(residuals[idx_ini:idx_fin], scale_list[idx_ini:idx_fin], \n full_output=False, inverse=True, y_in=y_in, x_in=x_in,\n imlib=imlib, interpolation=interpolation, \n collapse=collapse)\n if mask_center_px:\n frame_i = mask_circle(frame_i, mask_center_px)\n\n return frame_i\n\n\ndef _adi_rdi_pca(cube, cube_ref, angle_list, ncomp, scaling, mask_center_px,\n svd_mode, imlib, interpolation, collapse, verbose, start_time,\n weights=None, mask_rdi=None):\n \"\"\" Handles the ADI+RDI post-processing.\n \"\"\"\n n, y, x = cube.shape\n n_ref, y_ref, x_ref = cube_ref.shape \n angle_list = check_pa_vector(angle_list)\n\n if not isinstance(ncomp, int):\n raise TypeError(\"`ncomp` must be an int in the ADI+RDI case\")\n if ncomp > n_ref:\n msg = 'Number of PCs too high (max PCs={}), using {} PCs, the number frames of the reference cube, instead.'\n print(msg.format(ncomp,n_ref))\n ncomp = n_ref\n\n if not cube_ref.ndim == 3:\n msg = 'Input reference array is not a cube or 3d array'\n raise ValueError(msg)\n if not cube_ref.shape[1] == y:\n msg = 'Reference and target frames have different shape'\n raise TypeError(msg)\n\n if mask_rdi is None:\n residuals_result = _project_subtract(cube, cube_ref, ncomp, scaling,\n mask_center_px, svd_mode, verbose,\n True)\n residuals_cube = residuals_result[0]\n reconstructed = residuals_result[1]\n V = residuals_result[2]\n pcs = reshape_matrix(V, y, x)\n recon = reshape_matrix(reconstructed, y, x)\n else:\n residuals_result = cube_subtract_sky_pca(cube, cube_ref, mask_rdi,\n ncomp=ncomp, full_output=True)\n residuals_cube = residuals_result[0]\n pcs = residuals_result[2]\n recon = residuals_result[-1]\n \n residuals_cube_ = cube_derotate(residuals_cube, angle_list, imlib=imlib,\n interpolation=interpolation)\n frame = cube_collapse(residuals_cube_, mode=collapse, w=weights)\n if mask_center_px:\n frame = mask_circle(frame, mask_center_px)\n\n if verbose:\n print('Done de-rotating and combining')\n timing(start_time)\n\n return pcs, recon, residuals_cube, residuals_cube_, frame\n\n\ndef _project_subtract(cube, cube_ref, ncomp, scaling, mask_center_px,\n svd_mode, verbose, full_output, indices=None, frame=None):\n \"\"\"\n PCA projection and model PSF subtraction. Used as a helping function by\n each of the PCA modes (ADI, ADI+RDI, ADI+mSDI).\n\n Parameters\n ----------\n cube : numpy ndarray\n Input cube.\n cube_ref : numpy ndarray\n Refernce cube.\n ncomp : int\n Number of principal components.\n scaling : str\n Scaling of pixel values. See ``pca`` docstrings.\n mask_center_px : int\n Masking out a centered circular aperture.\n svd_mode : str\n Mode for SVD computation. See ``pca`` docstrings.\n verbose : bool\n Verbosity.\n full_output : bool\n Whether to return intermediate arrays or not.\n indices : list\n Indices to be used to discard frames (a rotation threshold is used).\n frame : int\n Index of the current frame (when indices is a list and a rotation\n threshold was applied).\n\n Returns\n -------\n ref_lib_shape : int\n [indices is not None, frame is not None] Number of\n rows in the reference library for the given frame.\n residuals: numpy ndarray\n Residuals, returned in every case.\n reconstructed : numpy ndarray\n [full_output=True] The reconstructed array.\n V : numpy ndarray\n [full_output=True, indices is None, frame is None] The right singular\n vectors of the input matrix, as returned by ``svd/svd_wrapper()``\n \"\"\"\n _, y, x = cube.shape\n if isinstance(ncomp, int):\n if indices is not None and frame is not None:\n matrix = prepare_matrix(cube, scaling, mask_center_px,\n mode='fullfr', verbose=False)\n else:\n matrix = prepare_matrix(cube, scaling, mask_center_px,\n mode='fullfr', verbose=verbose)\n\n if cube_ref is not None:\n ref_lib = prepare_matrix(cube_ref, scaling, mask_center_px,\n mode='fullfr', verbose=verbose)\n else:\n ref_lib = matrix\n\n # a rotation threshold is used (frames are processed one by one)\n if indices is not None and frame is not None:\n ref_lib = ref_lib[indices]\n if ref_lib.shape[0] <= 10:\n raise RuntimeError('Less than 10 frames left in the PCA library'\n ', Try decreasing the parameter delta_rot')\n curr_frame = matrix[frame] # current frame\n V = svd_wrapper(ref_lib, svd_mode, ncomp, False)\n transformed = np.dot(curr_frame, V.T)\n reconstructed = np.dot(transformed.T, V)\n residuals = curr_frame - reconstructed\n if full_output:\n return ref_lib.shape[0], residuals, reconstructed\n else:\n return ref_lib.shape[0], residuals\n\n # the whole matrix is processed at once\n else:\n V = svd_wrapper(ref_lib, svd_mode, ncomp, verbose)\n transformed = np.dot(V, matrix.T)\n reconstructed = np.dot(transformed.T, V)\n residuals = matrix - reconstructed\n residuals_res = reshape_matrix(residuals, y, x)\n if full_output:\n return residuals_res, reconstructed, V\n else:\n return residuals_res\n\n elif isinstance(ncomp, float):\n if not 1 > ncomp > 0:\n raise ValueError(\"when `ncomp` if float, it mus lie in the \"\n \"interval (0,1]\")\n\n svdecomp = SVDecomposer(cube, mode='fullfr', svd_mode=svd_mode,\n scaling=scaling, verbose=verbose)\n _ = svdecomp.get_cevr(plot=False)\n # in this case ncomp is the desired CEVR\n cevr = ncomp\n ncomp = svdecomp.cevr_to_ncomp(cevr)\n V = svdecomp.v[:ncomp]\n transformed = np.dot(V, svdecomp.matrix.T)\n reconstructed = np.dot(transformed.T, V)\n residuals = svdecomp.matrix - reconstructed\n residuals_res = reshape_matrix(residuals, y, x)\n\n if verbose and isinstance(cevr, float):\n print(\"Components used : {}\".format(V.shape[0]))\n\n if full_output:\n return residuals_res, reconstructed, V\n else:\n return residuals_res\n",
"#! /usr/bin/env python\n\n\"\"\"\nModule with the MCMC (``emcee``) sampling for NEGFC parameter estimation.\n\"\"\"\n\n\n__author__ = 'O. Wertz, Carlos Alberto Gomez Gonzalez, V. Christiaens'\n__all__ = ['mcmc_negfc_sampling',\n 'chain_zero_truncated',\n 'show_corner_plot',\n 'show_walk_plot',\n 'confidence']\nimport numpy as np\nimport os\nimport emcee\nimport inspect\nimport datetime\nimport corner\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\nimport pickle\nfrom scipy.stats import norm\nfrom ..metrics import cube_inject_companions\nfrom ..conf import time_ini, timing\nfrom ..conf.utils_conf import sep\nfrom ..pca import pca_annulus\nfrom .simplex_fmerit import get_values_optimize, get_mu_and_sigma\nfrom .utils_mcmc import gelman_rubin, autocorr_test\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom ..fits import write_fits\n\ndef lnprior(param, bounds):\n \"\"\" Define the prior log-function.\n \n Parameters\n ----------\n param: tuple\n The model parameters.\n bounds: list\n The bounds for each model parameter.\n Ex: bounds = [(10,20),(0,360),(0,5000)]\n \n Returns\n -------\n out: float.\n 0 if all the model parameters satisfy the prior conditions defined here.\n -np.inf if at least one model parameters is out of bounds.\n \"\"\"\n \n try:\n r, theta, flux = param\n except TypeError:\n print('param must be a tuple, {} given'.format(type(param)))\n\n try:\n r_bounds, theta_bounds, flux_bounds = bounds\n except TypeError:\n print('bounds must be a list of tuple, {} given'.format(type(bounds)))\n \n if r_bounds[0] <= r <= r_bounds[1] and \\\n theta_bounds[0] <= theta <= theta_bounds[1] and \\\n flux_bounds[0] <= flux <= flux_bounds[1]:\n return 0.0\n else:\n return -np.inf\n\n\ndef lnlike(param, cube, angs, plsc, psf_norm, fwhm, annulus_width,\n ncomp, aperture_radius, initial_state, cube_ref=None,\n svd_mode='lapack', scaling='temp-mean', algo=pca_annulus,\n delta_rot=1, fmerit='sum', imlib='opencv', interpolation='lanczos4', \n collapse='median', algo_options={}, weights=None, transmission=None, \n mu_sigma=True, debug=False):\n \"\"\" Define the likelihood log-function.\n \n Parameters\n ----------\n param: tuple\n The model parameters, typically (r, theta, flux).\n cube: numpy.array\n The cube of fits images expressed as a numpy.array.\n angs: numpy.array\n The parallactic angle fits image expressed as a numpy.array.\n plsc: float\n The platescale, in arcsec per pixel.\n psf_norm: numpy.array\n The scaled psf expressed as a numpy.array.\n annulus_width: float\n The width of the annulus of interest in pixels.\n ncomp: int or None\n The number of principal components for PCA-based algorithms.\n fwhm : float\n The FHWM in pixels.\n aperture_radius: float\n The radius of the circular aperture in terms of the FWHM.\n initial_state: numpy.array\n The initial guess for the position and the flux of the planet.\n cube_ref: numpy ndarray, 3d, optional\n Reference library cube. For Reference Star Differential Imaging.\n svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional\n Switch for different ways of computing the SVD and selected PCs.\n scaling : {'temp-mean', 'temp-standard'} or None, optional\n With None, no scaling is performed on the input data before SVD. With\n \"temp-mean\" then temporal px-wise mean subtraction is done and with\n \"temp-standard\" temporal mean centering plus scaling to unit variance\n is done.\n algo: vip function, optional {pca_annulus, pca_annular}\n Post-processing algorithm used.\n delta_rot: float, optional\n If algo is set to pca_annular, delta_rot is the angular threshold used\n to select frames in the PCA library (see description of pca_annular).\n fmerit : {'sum', 'stddev'}, string optional\n Chooses the figure of merit to be used. stddev works better for close in\n companions sitting on top of speckle noise.\n imlib : str, optional\n See the documentation of the ``vip_hci.preproc.frame_shift`` function.\n interpolation : str, optional\n See the documentation of the ``vip_hci.preproc.frame_shift`` function.\n collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional\n Sets the way of collapsing the frames for producing a final image. If\n None then the cube of residuals is used when measuring the function of\n merit (instead of a single final frame).\n algo_options: dict, opt\n Dictionary with additional parameters related to the algorithm \n (e.g. tol, min_frames_lib, max_frames_lib). If 'algo' is not a vip\n routine, this dict should contain all necessary arguments apart from\n the cube and derotation angles. Note: arguments such as ncomp, svd_mode,\n scaling, imlib, interpolation or collapse can also be included in this\n dict (the latter are also kept as function arguments for consistency\n with older versions of vip). \n weights : 1d array, optional\n If provided, the negative fake companion fluxes will be scaled according\n to these weights before injection in the cube. Can reflect changes in \n the observing conditions throughout the sequence.\n transmission: numpy array, optional\n Array with 2 columns. First column is the radial separation in pixels. \n Second column is the off-axis transmission (between 0 and 1) at the \n radial separation given in column 1.\n mu_sigma: tuple of 2 floats or None, opt\n If set to None: not used, and falls back to original version of the \n algorithm, using fmerit. Otherwise, should be a tuple of 2 elements,\n containing the mean and standard deviation of pixel intensities in an \n annulus centered on the location of the companion, excluding the area \n directly adjacent to the companion.\n scale_fac: float\n Factor by which the intensities in the cube are scaled up, to \n increase the residuals. This operation is needed for very low companion \n fluxes. The probability exp(lnprob) would otherwise be ~1 for all flux \n values less or similar to the true value. In practice, for companion \n fluxes > 100, letting scale_fac to 1 should be fine.\n debug: boolean\n If True, the cube is returned along with the likelihood log-function.\n \n Returns\n -------\n out: float\n The log of the likelihood.\n \n \"\"\"\n # Create the cube with the negative fake companion injected\n if weights is None: \n flux = -param[2]\n norm_weights=weights\n else:\n flux = -param[2]*weights\n norm_weights = weights/np.sum(weights)\n cube_negfc = cube_inject_companions(cube, psf_norm, angs, flevel=flux,\n plsc=plsc, rad_dists=[param[0]],\n n_branches=1, theta=param[1],\n imlib=imlib, \n interpolation=interpolation,\n transmission=transmission,\n verbose=False)\n# if scale_fac > 1:\n# cube_negfc*=scale_fac \n # Perform PCA and extract the zone of interest\n values = get_values_optimize(cube_negfc, angs, ncomp, annulus_width,\n aperture_radius, fwhm, initial_state[0],\n initial_state[1], cube_ref=cube_ref,\n svd_mode=svd_mode, scaling=scaling,\n algo=algo, delta_rot=delta_rot, imlib=imlib, \n interpolation=interpolation, collapse=collapse, \n algo_options=algo_options, \n weights=norm_weights)\n \n if isinstance(mu_sigma, tuple):\n mu = mu_sigma[0]\n sigma = mu_sigma[1]\n lnlikelihood = -0.5 * np.sum(np.power(mu-values,2)/sigma**2)\n else:\n # old version - delete?\n if fmerit == 'sum':\n lnlikelihood = -0.5 * np.sum(np.abs(values))\n elif fmerit == 'stddev':\n values = values[values != 0]\n lnlikelihood = -np.std(values)*values.size\n else:\n raise RuntimeError('fmerit choice not recognized.')\n \n if debug:\n return lnlikelihood, cube_negfc\n else:\n return lnlikelihood\n\n\ndef lnprob(param,bounds, cube, angs, plsc, psf_norm, fwhm,\n annulus_width, ncomp, aperture_radius, initial_state, cube_ref=None,\n svd_mode='lapack', scaling='temp-mean', algo=pca_annulus,\n delta_rot=1, fmerit='sum', imlib='opencv', interpolation='lanczos4', \n collapse='median', algo_options={}, weights=None, transmission=None, \n mu_sigma=True, display=False):\n \"\"\" Define the probability log-function as the sum between the prior and\n likelihood log-funtions.\n \n Parameters\n ----------\n param: tuple\n The model parameters.\n bounds: list\n The bounds for each model parameter.\n Ex: bounds = [(10,20),(0,360),(0,5000)]\n cube: numpy.array\n The cube of fits images expressed as a numpy.array.\n angs: numpy.array\n The parallactic angle fits image expressed as a numpy.array.\n plsc: float\n The platescale, in arcsec per pixel.\n psf_norm: numpy.array\n The scaled psf expressed as a numpy.array.\n fwhm : float\n The FHWM in pixels.\n annulus_width: float\n The width in pixel of the annulus on wich the PCA is performed.\n ncomp: int or None\n The number of principal components for PCA-based algorithms.\n aperture_radius: float\n The radius of the circular aperture in FWHM.\n initial_state: numpy.array\n The initial guess for the position and the flux of the planet.\n cube_ref : numpy ndarray, 3d, optional\n Reference library cube. For Reference Star Differential Imaging.\n svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional\n Switch for different ways of computing the SVD and selected PCs.\n scaling : {'temp-mean', 'temp-standard'} or None, optional\n With None, no scaling is performed on the input data before SVD. With\n \"temp-mean\" then temporal px-wise mean subtraction is done and with\n \"temp-standard\" temporal mean centering plus scaling to unit variance\n is done.\n fmerit : {'sum', 'stddev'}, string optional\n Chooses the figure of merit to be used. stddev works better for close in\n companions sitting on top of speckle noise.\n imlib : str, optional\n See the documentation of the ``vip_hci.preproc.frame_rotate`` function.\n interpolation : str, optional\n See the documentation of the ``vip_hci.preproc.frame_rotate`` function.\n algo_options, : dict, opt\n Dictionary with additional parameters related to the algorithm \n (e.g. tol, min_frames_lib, max_frames_lib). If 'algo' is not a vip\n routine, this dict should contain all necessary arguments apart from\n the cube and derotation angles. Note: arguments such as ncomp, svd_mode,\n scaling, imlib, interpolation or collapse can also be included in this\n dict (the latter are also kept as function arguments for consistency\n with older versions of vip). \n collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional\n Sets the way of collapsing the frames for producing a final image. If\n None then the cube of residuals is used when measuring the function of\n merit (instead of a single final frame).\n weights : 1d array, optional\n If provided, the negative fake companion fluxes will be scaled according\n to these weights before injection in the cube. Can reflect changes in \n the observing conditions throughout the sequence.\n transmission: numpy array, optional\n Array with 2 columns. First column is the radial separation in pixels. \n Second column is the off-axis transmission (between 0 and 1) at the \n radial separation given in column 1.\n mu_sigma: tuple of 2 floats or None, opt\n If set to None: not used, and falls back to original version of the \n algorithm, using fmerit. Otherwise, should be a tuple of 2 elements,\n containing the mean and standard deviation of pixel intensities in an \n annulus centered on the location of the companion, excluding the area \n directly adjacent to the companion.\n scale_fac: float\n Factor by which the intensities in the cube are scaled up, to \n increase the residuals. This operation is needed for very low companion \n fluxes. The probability exp(lnprob) would otherwise be ~1 for all flux \n values less or similar to the true value. In practice, for companion \n fluxes > 100, letting scale_fac to 1 should be fine.\n display: boolean\n If True, the cube is displayed with ds9.\n \n Returns\n -------\n out: float\n The probability log-function.\n \n \"\"\"\n if initial_state is None:\n initial_state = param\n \n lp = lnprior(param, bounds)\n \n if np.isinf(lp):\n return -np.inf\n \n return lp + lnlike(param, cube, angs, plsc, psf_norm, fwhm, annulus_width, \n ncomp, aperture_radius, initial_state, cube_ref, \n svd_mode, scaling, algo, delta_rot, fmerit, imlib,\n interpolation, collapse, algo_options, weights, \n transmission, mu_sigma)\n\n\ndef mcmc_negfc_sampling(cube, angs, psfn, ncomp, plsc, initial_state, fwhm=4,\n annulus_width=8, aperture_radius=1, cube_ref=None,\n svd_mode='lapack', scaling='temp-mean', \n algo=pca_annulus, delta_rot=1, fmerit='sum',\n imlib='opencv', interpolation='lanczos4',\n collapse='median', algo_options={}, wedge=None, \n weights=None, transmission=None, mu_sigma=None, \n nwalkers=100, bounds=None, a=2.0, burnin=0.3, \n rhat_threshold=1.01, rhat_count_threshold=1, \n niteration_min=10, niteration_limit=10000, \n niteration_supp=0, check_maxgap=20, conv_test='gb',\n ac_c=50, ac_count_thr=3, nproc=1, output_dir='results/', \n output_file=None, display=False, verbosity=0, \n save=False):\n r\"\"\" Runs an affine invariant mcmc sampling algorithm in order to determine\n the position and the flux of the planet using the 'Negative Fake Companion'\n technique. The result of this procedure is a chain with the samples from the\n posterior distributions of each of the 3 parameters.\n \n This technique can be summarized as follows:\n 1) We inject a negative fake companion (one candidate) at a given position\n and characterized by a given flux, both close to the expected values.\n 2) We run PCA on an full annulus which pass through the initial guess,\n regardless of the position of the candidate.\n 3) We extract the intensity values of all the pixels contained in a\n circular aperture centered on the initial guess.\n 4) We calculate the function of merit. The associated chi^2 is given by\n chi^2 = sum(\\|I_j\\|) where j \\in {1,...,N} with N the total number of pixels\n contained in the circular aperture.\n The steps 1) to 4) are looped. At each iteration, the candidate model\n parameters are defined by the emcee Affine Invariant algorithm.\n \n Parameters\n ----------\n cube: numpy.array\n ADI fits cube.\n angs: numpy.array\n The parallactic angle vector.\n psfn: numpy 2D or 3D array\n Normalised PSF template used for negative fake companion injection. \n The PSF must be centered and the flux in a 1*FWHM aperture must equal 1 \n (use ``vip_hci.metrics.normalize_psf``).\n If a 3D array is provided, it must match the number of frames of ADI \n cube. This can be useful if the cube was unsaturated and conditions \n were variable.s\n ncomp: int or None\n The number of principal components for PCA-based algorithms.\n plsc: float\n The platescale, in arcsec per pixel.\n annulus_width: float, optional\n The width in pixels of the annulus on which the PCA is performed.\n aperture_radius: float, optional\n The radius in FWHM of the circular aperture.\n nwalkers: int optional\n The number of Goodman & Weare 'walkers'.\n initial_state: numpy.array\n The first guess for the position and flux of the planet, respectively.\n Each walker will start in a small ball around this preferred position.\n cube_ref : numpy ndarray, 3d, optional\n Reference library cube. For Reference Star Differential Imaging.\n svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional\n Switch for different ways of computing the SVD and selected PCs.\n 'randsvd' is not recommended for the negative fake companion technique.\n algo : python routine\n Post-processing algorithm used to model and subtract the star. First\n 2 arguments must be input cube and derotation angles. Must return a\n post-processed 2d frame.\n scaling : {'temp-mean', 'temp-standard'} or None, optional\n With None, no scaling is performed on the input data before SVD. With\n \"temp-mean\" then temporal px-wise mean subtraction is done and with\n \"temp-standard\" temporal mean centering plus scaling to unit variance\n is done.\n fmerit : {'sum', 'stddev'}, string optional\n Chooses the figure of merit to be used. stddev works better for close in\n companions sitting on top of speckle noise.\n imlib : str, optional\n See the documentation of the ``vip_hci.preproc.frame_rotate`` function.\n interpolation : str, optional\n See the documentation of the ``vip_hci.preproc.frame_rotate`` function.\n collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional\n Sets the way of collapsing the frames for producing a final image. If\n None then the cube of residuals is used when measuring the function of\n merit (instead of a single final frame).\n algo_options: dict, opt\n Dictionary with additional parameters related to the algorithm \n (e.g. tol, min_frames_lib, max_frames_lib). If 'algo' is not a vip\n routine, this dict should contain all necessary arguments apart from\n the cube and derotation angles. Note: arguments such as ncomp, svd_mode,\n scaling, imlib, interpolation or collapse can also be included in this\n dict (the latter are also kept as function arguments for consistency\n with older versions of vip). \n wedge: tuple, opt\n Range in theta where the mean and standard deviation are computed in an \n annulus defined in the PCA image. If None, it will be calculated \n automatically based on initial guess and derotation angles to avoid.\n If some disc signal is present elsewhere in the annulus, it is \n recommended to provide wedge manually. The provided range should be \n continuous and >0. E.g. provide (270, 370) to consider a PA range \n between [-90,+10].\n weights : 1d array, optional\n If provided, the negative fake companion fluxes will be scaled according\n to these weights before injection in the cube. Can reflect changes in \n the observing conditions throughout the sequence.\n transmission: numpy array, optional\n Array with 2 columns. First column is the radial separation in pixels. \n Second column is the off-axis transmission (between 0 and 1) at the \n radial separation given in column 1.\n mu_sigma: tuple of 2 floats or bool, opt\n If set to None: not used, and falls back to original version of the \n algorithm, using fmerit.\n If a tuple of 2 elements: should be the mean and standard deviation of \n pixel intensities in an annulus centered on the lcoation of the \n companion candidate, excluding the area directly adjacent to the CC.\n If set to anything else, but None/False/tuple: will compute said mean \n and standard deviation automatically.\n These values will then be used in the log-probability of the MCMC.\n bounds: numpy.array or list, default=None, optional\n The prior knowledge on the model parameters. If None, large bounds will\n be automatically estimated from the initial state.\n a: float, default=2.0\n The proposal scale parameter. See notes.\n burnin: float, default=0.3\n The fraction of a walker chain which is discarded. NOTE: only used for\n Gelman-Rubin convergence test - the chains are returned full. \n rhat_threshold: float, default=0.01\n The Gelman-Rubin threshold used for the test for nonconvergence.\n rhat_count_threshold: int, optional\n The Gelman-Rubin test must be satisfied 'rhat_count_threshold' times in\n a row before claiming that the chain has converged.\n conv_test: str, optional {'gb','ac'}\n Method to check for convergence: \n - 'gb' for gelman-rubin test\n (http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/305.pdf)\n - 'ac' for autocorrelation analysis \n (https://emcee.readthedocs.io/en/stable/tutorials/autocorr/)\n ac_c: float, optional\n If the convergence test is made using the auto-correlation, this is the\n value of C such that tau/N < 1/C is the condition required for tau to be\n considered a reliable auto-correlation time estimate (for N number of \n samples). Recommended: C>50.\n More details here: \n https://emcee.readthedocs.io/en/stable/tutorials/autocorr/\n ac_c_thr: int, optional\n The auto-correlation test must be satisfied ac_c_thr times in a row \n before claiming that the chain has converged.\n niteration_min: int, optional\n Steps per walker lower bound. The simulation will run at least this\n number of steps per walker.\n niteration_limit: int, optional\n Steps per walker upper bound. If the simulation runs up to\n 'niteration_limit' steps without having reached the convergence\n criterion, the run is stopped.\n niteration_supp: int, optional\n Number of iterations to run after having \"reached the convergence\".\n check_maxgap: int, optional\n Maximum number of steps per walker between two Gelman-Rubin test.\n nproc: int, optional\n The number of processes to use for parallelization.\n output_dir: str, optional\n The name of the output directory which contains the output files in the \n case ``save`` is True. \n output_file: str, optional\n The name of the output file which contains the MCMC results in the case\n ``save`` is True.\n display: bool, optional\n If True, the walk plot is displayed at each evaluation of the Gelman-\n Rubin test.\n verbosity: 0, 1, 2 or 3, optional\n Verbosity level. 0 for no output and 3 for full information.\n (only difference between 2 and 3 is that 3 also writes intermediate\n pickles containing the state of the chain at convergence tests; these \n can end up taking a lot of space).\n save: bool, optional\n If True, the MCMC results are pickled.\n \n Returns\n -------\n out : numpy.array\n The MCMC chain.\n \n Notes\n -----\n The parameter ``a`` must be > 1. For more theoretical information concerning\n this parameter, see Goodman & Weare, 2010, Comm. App. Math. Comp. Sci.,\n 5, 65, Eq. [9] p70.\n \n The parameter 'rhat_threshold' can be a numpy.array with individual\n threshold value for each model parameter.\n \"\"\"\n if verbosity >0:\n start_time = time_ini()\n print(\" MCMC sampler for the NEGFC technique \")\n print(sep)\n\n # If required, one create the output folder.\n if save:\n \n output_file_tmp = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n \n if output_dir[-1] == '/':\n output_dir = output_dir[:-1]\n try:\n os.makedirs(output_dir)\n except OSError as exc:\n if exc.errno == 17 and os.path.isdir(output_dir):\n # errno.EEXIST == 17 -> File exists\n pass\n else:\n raise\n\n\n if not isinstance(cube, np.ndarray) or cube.ndim != 3:\n raise ValueError('`cube` must be a 3D numpy array')\n\n if cube_ref is not None:\n if not isinstance(cube_ref, np.ndarray) or cube_ref.ndim != 3:\n raise ValueError('`cube_ref` must be a 3D numpy array')\n if weights is not None:\n if not len(weights)==cube.shape[0]:\n raise TypeError(\"Weights should have same length as cube axis 0\")\n norm_weights = weights/np.sum(weights)\n else:\n norm_weights=weights\n \n if psfn.ndim==3:\n if psfn.shape[0] != cube.shape[0]:\n msg = \"If PSF is 3D, number of frames must match cube length\"\n raise TypeError(msg)\n \n # #########################################################################\n # Initialization of the variables\n # #########################################################################\n dim = 3 # There are 3 model parameters: rad, theta, flux\n itermin = niteration_min\n limit = niteration_limit\n supp = niteration_supp\n maxgap = check_maxgap\n initial_state = np.array(initial_state)\n\n # Measure mu and sigma once in the annulus (instead of each MCMC step)\n if isinstance(mu_sigma, tuple):\n if len(mu_sigma) != 2:\n raise TypeError(\"if a tuple, mu_sigma should have 2 elements\")\n elif mu_sigma:\n mu_sigma = get_mu_and_sigma(cube, angs, ncomp, annulus_width, \n aperture_radius, fwhm, initial_state[0], \n initial_state[1], cube_ref=cube_ref, \n wedge=wedge, svd_mode=svd_mode, \n scaling=scaling, algo=algo, \n delta_rot=delta_rot, imlib=imlib, \n interpolation=interpolation,\n collapse=collapse, weights=norm_weights, \n algo_options=algo_options)\n if verbosity >0:\n msg = \"The mean and stddev in the annulus at the radius of the \"\n msg+= \"companion (excluding the PA area directly adjacent to it)\"\n msg+=\" are {:.2f} and {:.2f} respectively.\"\n print(msg.format(mu_sigma[0],mu_sigma[1]))\n# pca_args['mu']=mu\n# pca_args['sigma']=sigma\n # if does not work, activate scale fac\n \n # If companion flux is too low MCMC will not converge. Solution: scale up \n # the intensities in the cube after injecting the negfc.\n# if initial_state[2] < 100:\n# scale_fac = 100./initial_state[2]\n# else:\n #scale_fac = 1\n \n if itermin > limit:\n itermin = 0\n\n fraction = 0.3\n geom = 0\n lastcheck = 0\n konvergence = np.inf\n rhat_count = 0\n ac_count = 0\n chain = np.empty([nwalkers, 1, dim])\n isamples = np.empty(0)\n pos = initial_state*(1+np.random.normal(0, 0.01, (nwalkers, 3)))\n nIterations = limit + supp\n rhat = np.zeros(dim)\n stop = np.inf\n\n if bounds is None:\n bounds = [(initial_state[0] - annulus_width/2.,\n initial_state[0] + annulus_width/2.), # radius\n (initial_state[1] - 10, initial_state[1] + 10), # angle\n (0.1* initial_state[2], 2 * initial_state[2])] # flux\n \n sampler = emcee.EnsembleSampler(nwalkers, dim, lnprob, a,\n args=([bounds, cube, angs, plsc, psfn,\n fwhm, annulus_width, ncomp,\n aperture_radius, initial_state,\n cube_ref, svd_mode, scaling, algo,\n delta_rot, fmerit, imlib, \n interpolation, collapse, algo_options, \n weights, transmission, mu_sigma]),\n threads=nproc)\n \n start = datetime.datetime.now()\n\n # #########################################################################\n # Affine Invariant MCMC run\n # #########################################################################\n if verbosity > 1:\n print('\\nStart of the MCMC run ...')\n print('Step | Duration/step (sec) | Remaining Estimated Time (sec)')\n \n for k, res in enumerate(sampler.sample(pos, iterations=nIterations,\n storechain=True)):\n elapsed = (datetime.datetime.now()-start).total_seconds()\n if verbosity > 1:\n if k == 0:\n q = 0.5\n else:\n q = 1\n print('{}\\t\\t{:.5f}\\t\\t\\t{:.5f}'.format(k, elapsed * q,\n elapsed * (limit-k-1) * q))\n \n start = datetime.datetime.now()\n\n # ---------------------------------------------------------------------\n # Store the state manually in order to handle with dynamical sized chain\n # ---------------------------------------------------------------------\n # Check if the size of the chain is long enough.\n s = chain.shape[1]\n if k+1 > s: # if not, one doubles the chain length\n empty = np.zeros([nwalkers, 2*s, dim])\n chain = np.concatenate((chain, empty), axis=1)\n # Store the state of the chain\n chain[:, k] = res[0]\n\n # ---------------------------------------------------------------------\n # If k meets the criterion, one tests the non-convergence.\n # ---------------------------------------------------------------------\n criterion = int(np.amin([np.ceil(itermin*(1+fraction)**geom),\n lastcheck+np.floor(maxgap)]))\n if k == criterion:\n if verbosity > 1:\n print('\\n {} convergence test in progress...'.format(conv_test))\n \n geom += 1\n lastcheck = k\n if display:\n show_walk_plot(chain)\n \n if save and verbosity == 3:\n fname = '{d}/{f}_temp_k{k}'.format(d=output_dir,f=output_file_tmp, k=k)\n data = {'chain': sampler.chain,\n 'lnprob': sampler.lnprobability,\n 'AR': sampler.acceptance_fraction}\n with open(fname, 'wb') as fileSave:\n pickle.dump(data, fileSave)\n \n # We only test the rhat if we have reached the min # of steps\n if (k+1) >= itermin and konvergence == np.inf:\n if conv_test == 'gb':\n thr0 = int(np.floor(burnin*k))\n thr1 = int(np.floor((1-burnin)*k*0.25))\n \n # We calculate the rhat for each model parameter.\n for j in range(dim):\n part1 = chain[:, thr0:thr0 + thr1, j].reshape(-1)\n part2 = chain[:, thr0 + 3 * thr1:thr0 + 4 * thr1, j\n ].reshape(-1)\n series = np.vstack((part1, part2))\n rhat[j] = gelman_rubin(series)\n if verbosity > 0:\n print(' r_hat = {}'.format(rhat))\n cond = rhat <= rhat_threshold\n print(' r_hat <= threshold = {} \\n'.format(cond))\n # We test the rhat.\n if (rhat <= rhat_threshold).all():\n rhat_count += 1\n if rhat_count < rhat_count_threshold:\n if verbosity > 0:\n msg = \"Gelman-Rubin test OK {}/{}\"\n print(msg.format(rhat_count, rhat_count_threshold))\n elif rhat_count >= rhat_count_threshold:\n if verbosity > 0 :\n print('... ==> convergence reached')\n konvergence = k\n stop = konvergence + supp\n else:\n rhat_count = 0\n elif conv_test == 'ac':\n # We calculate the auto-corr test for each model parameter.\n if save:\n write_fits(output_dir+\"/TMP_test_chain{:.0f}.fits\".format(k),chain[:,:k])\n for j in range(dim):\n rhat[j] = autocorr_test(chain[:,:k,j])\n thr = 1./ac_c\n if verbosity > 0:\n print('Auto-corr tau/N = {}'.format(rhat))\n print('tau/N <= {} = {} \\n'.format(thr, rhat<thr))\n if (rhat <= thr).all():\n ac_count+=1\n if verbosity > 0:\n msg = \"Auto-correlation test passed for all params!\"\n msg+= \"{}/{}\".format(ac_count,ac_count_thr)\n print(msg)\n if ac_count >= ac_count_thr:\n msg='\\n ... ==> convergence reached'\n print(msg)\n stop = k\n else:\n ac_count = 0\n else:\n raise ValueError('conv_test value not recognized')\n # We have reached the maximum number of steps for our Markov chain.\n if k+1 >= stop:\n if verbosity > 0:\n print('We break the loop because we have reached convergence')\n break\n \n if k == nIterations-1:\n if verbosity > 0:\n print(\"We have reached the limit # of steps without convergence\")\n \n # #########################################################################\n # Construction of the independent samples\n # #########################################################################\n temp = np.where(chain[0, :, 0] == 0.0)[0]\n if len(temp) != 0:\n idxzero = temp[0]\n else:\n idxzero = chain.shape[1]\n \n # commented due to arbitrary cutoffs, rather tweak \"burnin\":\n # idx = int(np.amin([np.floor(2e5/nwalkers), np.floor(0.1*idxzero)]))\n \n idx=0\n \n if idx == 0:\n isamples = chain[:, 0:idxzero, :]\n else:\n isamples = chain[:, idxzero-idx:idxzero, :]\n\n if save:\n frame = inspect.currentframe()\n args, _, _, values = inspect.getargvalues(frame)\n input_parameters = {j: values[j] for j in args[1:]}\n \n output = {'isamples': isamples,\n 'chain': chain_zero_truncated(chain),\n 'input_parameters': input_parameters,\n 'AR': sampler.acceptance_fraction,\n 'lnprobability': sampler.lnprobability}\n \n if output_file is None:\n output_file = 'MCMC_results'\n with open(output_dir+'/'+output_file, 'wb') as fileSave:\n pickle.dump(output, fileSave)\n \n msg = \"\\nThe file MCMC_results has been stored in the folder {}\"\n print(msg.format(output_dir+'/'))\n\n if verbosity > 0:\n timing(start_time)\n \n return chain_zero_truncated(chain)\n\n \ndef chain_zero_truncated(chain):\n \"\"\"\n Return the Markov chain with the dimension: walkers x steps* x parameters,\n where steps* is the last step before having 0 (not yet constructed chain).\n \n Parameters\n ----------\n chain: numpy.array\n The MCMC chain.\n \n Returns\n -------\n out: numpy.array\n The truncated MCMC chain, that is to say, the chain which only contains\n relevant information.\n \"\"\"\n try:\n idxzero = np.where(chain[0, :, 0] == 0.0)[0][0]\n except:\n idxzero = chain.shape[1]\n return chain[:, 0:idxzero, :]\n \n \ndef show_walk_plot(chain, save=False, output_dir='', **kwargs):\n \"\"\"\n Display or save a figure showing the path of each walker during the MCMC run\n \n Parameters\n ----------\n chain: numpy.array\n The Markov chain. The shape of chain must be nwalkers x length x dim.\n If a part of the chain is filled with zero values, the method will\n discard these steps.\n save: boolean, default: False\n If True, a pdf file is created.\n kwargs:\n Additional attributes are passed to the matplotlib plot method.\n \n Returns\n -------\n Display the figure or create a pdf file named walk_plot.pdf in the working\n directory.\n \n \"\"\"\n temp = np.where(chain[0, :, 0] == 0.0)[0]\n if len(temp) != 0:\n chain = chain[:, :temp[0], :]\n\n labels = kwargs.pop('labels', [\"$r$\", r\"$\\theta$\", \"$f$\"])\n fig, axes = plt.subplots(3, 1, sharex=True,\n figsize=kwargs.pop('figsize', (8, 6)))\n axes[2].set_xlabel(kwargs.pop('xlabel', 'step number'))\n axes[2].set_xlim(kwargs.pop('xlim', [0, chain.shape[1]]))\n color = kwargs.pop('color', 'k')\n alpha = kwargs.pop('alpha', 0.4)\n for j in range(3):\n axes[j].plot(chain[:, :, j].T, color=color, alpha=alpha, **kwargs)\n axes[j].yaxis.set_major_locator(MaxNLocator(5))\n axes[j].set_ylabel(labels[j])\n fig.tight_layout(h_pad=0)\n if save:\n plt.savefig(output_dir+'walk_plot.pdf')\n plt.close(fig)\n else:\n plt.show()\n\n\ndef show_corner_plot(chain, burnin=0.5, save=False, output_dir='', **kwargs):\n \"\"\"\n Display or save a figure showing the corner plot (pdfs + correlation plots)\n \n Parameters\n ----------\n chain: numpy.array\n The Markov chain. The shape of chain must be nwalkers x length x dim.\n If a part of the chain is filled with zero values, the method will\n discard these steps.\n burnin: float, default: 0\n The fraction of a walker chain we want to discard.\n save: boolean, default: False\n If True, a pdf file is created.\n \n kwargs:\n Additional attributs are passed to the corner.corner() method.\n \n Returns\n -------\n Display the figure or create a pdf file named walk_plot.pdf in the working\n directory.\n \n Raises\n ------\n ImportError\n \n \"\"\"\n try:\n temp = np.where(chain[0, :, 0] == 0.0)[0]\n if len(temp) != 0:\n chain = chain[:, :temp[0], :]\n length = chain.shape[1]\n indburn = int(np.floor(burnin*(length-1)))\n chain = chain[:, indburn:length, :].reshape((-1, 3))\n except IndexError:\n pass\n\n if chain.shape[0] == 0:\n print(\"It seems the chain is empty. Have you already run the MCMC?\")\n else:\n labels = kwargs.pop('labels', [\"$r$\", r\"$\\theta$\", \"$f$\"])\n fig = corner.corner(chain, labels=labels, **kwargs)\n if save:\n plt.savefig(output_dir+'corner_plot.pdf')\n plt.close(fig)\n else:\n plt.show()\n\n\ndef confidence(isamples, cfd=68.27, bins=100, gaussian_fit=False, weights=None,\n verbose=True, save=False, output_dir='', force=False, **kwargs):\n \"\"\"\n Determine the highly probable value for each model parameter, as well as\n the 1-sigma confidence interval.\n \n Parameters\n ----------\n isamples: numpy.array\n The independent samples for each model parameter.\n cfd: float, optional\n The confidence level given in percentage.\n bins: int, optional\n The number of bins used to sample the posterior distributions.\n gaussian_fit: boolean, optional\n If True, a gaussian fit is performed in order to determine (\\mu,\\sigma)\n weights : (n, ) numpy ndarray or None, optional\n An array of weights for each sample.\n verbose: boolean, optional\n Display information in the shell.\n save: boolean, optional\n If \"True\", a txt file with the results is saved in the output\n repository.\n output_dir: str, optional\n If save is True, this is the full path to a directory where the results\n are saved.\n force: bool, optional\n If set to True, to force the confidence interval estimate even if too\n many samples fall in a single bin (unreliable CI estimates). If False, \n an error message is raised if the percentile of samples falling in a \n single bin is larger than cfd, suggesting to increase number of bins.\n kwargs: optional\n Additional attributes are passed to the matplotlib hist() method.\n \n Returns\n -------\n out: tuple\n A 2 elements tuple with either 1) the highly probable solution and the \n confidence interval; or 2) (if gaussian_fit is True) the center of the \n best-fit 1d gaussian distribution and its standard deviation, for each\n planet parameter.\n \n \"\"\"\n\n plsc = kwargs.pop('plsc', 0.001)\n title = kwargs.pop('title', None)\n \n output_file = kwargs.pop('filename', 'confidence.txt')\n \n try:\n l = isamples.shape[1]\n if l == 1:\n isamples = isamples[:,0]\n pKey = ['f']\n label_file = ['flux']\n label = [r'$\\Delta f$']\n elif l == 3:\n pKey = ['r', 'theta', 'f']\n label_file = ['r', 'theta', 'flux']\n label = [r'$\\Delta r$', r'$\\Delta \\theta$', r'$\\Delta f$']\n else:\n raise TypeError(\"input shape of isamples not recognized\")\n except:\n l = 1\n pKey = ['f']\n label_file = ['flux']\n label = [r'$\\Delta f$']\n \n confidenceInterval = {}\n val_max = {}\n \n \n if cfd == 100:\n cfd = 99.9\n \n #########################################\n ## Determine the confidence interval ##\n #########################################\n if gaussian_fit:\n mu = np.zeros(l)\n sigma = np.zeros_like(mu)\n \n if gaussian_fit:\n fig, ax = plt.subplots(2, l, figsize=(int(l*4),8))\n else:\n fig, ax = plt.subplots(1, l, figsize=(int(l*4),4))\n \n for j in range(l):\n if l>1:\n if gaussian_fit:\n n, bin_vertices, _ = ax[0][j].hist(isamples[:,j], bins=bins,\n weights=weights, histtype='step',\n edgecolor='gray')\n else:\n n, bin_vertices, _ = ax[j].hist(isamples[:,j], bins=bins,\n weights=weights, histtype='step',\n edgecolor='gray')\n else:\n if gaussian_fit:\n n, bin_vertices, _ = ax[0].hist(isamples[:], bins=bins,\n weights=weights, histtype='step',\n edgecolor='gray')\n else:\n n, bin_vertices, _ = ax.hist(isamples[:], bins=bins,\n weights=weights, histtype='step',\n edgecolor='gray') \n bins_width = np.mean(np.diff(bin_vertices))\n surface_total = np.sum(np.ones_like(n)*bins_width * n)\n n_arg_sort = np.argsort(n)[::-1]\n \n test = 0\n pourcentage = 0\n for k, jj in enumerate(n_arg_sort):\n test = test + bins_width*n[int(jj)]\n pourcentage = test/surface_total*100\n if pourcentage > cfd:\n if verbose:\n msg = 'percentage for {}: {}%'\n print(msg.format(label_file[j], pourcentage))\n break\n if k ==0:\n msg = \"WARNING: Percentile reached in a single bin. \"\n msg += \"This may be due to outliers or a small sample.\"\n msg += \"Uncertainties will be unreliable. Try one of these:\"\n msg += \"increase bins, or trim outliers, or decrease cfd.\"\n if force:\n raise ValueError(msg)\n else:\n print(msg) \n n_arg_min = int(n_arg_sort[:k+1].min())\n n_arg_max = int(n_arg_sort[:k+1].max())\n\n if n_arg_min == 0:\n n_arg_min += 1\n if n_arg_max == bins:\n n_arg_max -= 1\n \n val_max[pKey[j]] = bin_vertices[int(n_arg_sort[0])]+bins_width/2.\n confidenceInterval[pKey[j]] = np.array([bin_vertices[n_arg_min-1],\n bin_vertices[n_arg_max+1]]\n - val_max[pKey[j]])\n if l>1: \n arg = (isamples[:, j] >= bin_vertices[n_arg_min - 1]) * \\\n (isamples[:, j] <= bin_vertices[n_arg_max + 1]) \n if gaussian_fit:\n ax[0][j].hist(isamples[arg,j], bins=bin_vertices,\n facecolor='gray', edgecolor='darkgray',\n histtype='stepfilled', alpha=0.5)\n ax[0][j].vlines(val_max[pKey[j]], 0, n[int(n_arg_sort[0])],\n linestyles='dashed', color='red')\n ax[0][j].set_xlabel(label[j])\n if j == 0:\n ax[0][j].set_ylabel('Counts')\n \n mu[j], sigma[j] = norm.fit(isamples[:, j])\n n_fit, bins_fit = np.histogram(isamples[:, j], bins, normed=1,\n weights=weights)\n ax[1][j].hist(isamples[:, j], bins, density=1, weights=weights,\n facecolor='gray', edgecolor='darkgray',\n histtype='step')\n y = norm.pdf(bins_fit, mu[j], sigma[j])\n ax[1][j].plot(bins_fit, y, 'r--', linewidth=2, alpha=0.7)\n \n ax[1][j].set_xlabel(label[j])\n if j == 0:\n ax[1][j].set_ylabel('Counts')\n \n if title is not None:\n msg = r\"{} $\\mu$ = {:.4f}, $\\sigma$ = {:.4f}\"\n ax[1][j].set_title(msg.format(title, mu[j], sigma[j]),\n fontsize=10)\n \n else: \n ax[j].hist(isamples[arg,j],bins=bin_vertices, facecolor='gray',\n edgecolor='darkgray', histtype='stepfilled',\n alpha=0.5)\n ax[j].vlines(val_max[pKey[j]], 0, n[int(n_arg_sort[0])],\n linestyles='dashed', color='red')\n ax[j].set_xlabel(label[j])\n if j == 0:\n ax[j].set_ylabel('Counts')\n \n if title is not None:\n msg = r\"{} - {:.3f} {:.3f} +{:.3f}\"\n ax[1].set_title(msg.format(title, val_max[pKey[j]], \n confidenceInterval[pKey[j]][0], \n confidenceInterval[pKey[j]][1]),\n fontsize=10)\n else:\n arg = (isamples[:] >= bin_vertices[n_arg_min - 1]) * \\\n (isamples[:] <= bin_vertices[n_arg_max + 1])\n if gaussian_fit:\n ax[0].hist(isamples[arg], bins=bin_vertices,\n facecolor='gray', edgecolor='darkgray',\n histtype='stepfilled', alpha=0.5)\n ax[0].vlines(val_max[pKey[j]], 0, n[int(n_arg_sort[0])],\n linestyles='dashed', color='red')\n ax[0].set_xlabel(label[j])\n if j == 0:\n ax[0].set_ylabel('Counts')\n \n mu[j], sigma[j] = norm.fit(isamples[:])\n n_fit, bins_fit = np.histogram(isamples[:], bins, normed=1,\n weights=weights)\n ax[1].hist(isamples[:], bins, density=1, weights=weights,\n facecolor='gray', edgecolor='darkgray',\n histtype='step')\n y = norm.pdf(bins_fit, mu[j], sigma[j])\n ax[1].plot(bins_fit, y, 'r--', linewidth=2, alpha=0.7)\n \n ax[1].set_xlabel(label[j])\n if j == 0:\n ax[1].set_ylabel('Counts')\n \n if title is not None:\n msg = r\"{} $\\mu$ = {:.4f}, $\\sigma$ = {:.4f}\"\n ax[1].set_title(msg.format(title, mu[j], sigma[j]),\n fontsize=10)\n \n else:\n ax.hist(isamples[arg],bins=bin_vertices, facecolor='gray',\n edgecolor='darkgray', histtype='stepfilled',\n alpha=0.5)\n ax.vlines(val_max[pKey[j]], 0, n[int(n_arg_sort[0])],\n linestyles='dashed', color='red')\n ax.set_xlabel(label[j])\n if j == 0:\n ax.set_ylabel('Counts')\n \n if title is not None:\n msg = r\"{} - {:.3f} {:.3f} +{:.3f}\"\n ax.set_title(msg.format(title, val_max[pKey[j]], \n confidenceInterval[pKey[j]][0], \n confidenceInterval[pKey[j]][1]),\n fontsize=10) \n\n plt.tight_layout(w_pad=0.1)\n\n if save:\n if gaussian_fit:\n plt.savefig(output_dir+'confi_hist_flux_r_theta_gaussfit.pdf')\n else:\n plt.savefig(output_dir+'confi_hist_flux_r_theta.pdf')\n\n if verbose:\n print('\\n\\nConfidence intervals:')\n if l>1:\n print('r: {} [{},{}]'.format(val_max['r'],\n confidenceInterval['r'][0],\n confidenceInterval['r'][1]))\n print('theta: {} [{},{}]'.format(val_max['theta'],\n confidenceInterval['theta'][0],\n confidenceInterval['theta'][1]))\n print('flux: {} [{},{}]'.format(val_max['f'],\n confidenceInterval['f'][0],\n confidenceInterval['f'][1]))\n if gaussian_fit:\n print()\n print('Gaussian fit results:')\n if l>1:\n print('r: {} +-{}'.format(mu[0], sigma[0]))\n print('theta: {} +-{}'.format(mu[1], sigma[1]))\n print('f: {} +-{}'.format(mu[2], sigma[2]))\n else:\n print('f: {} +-{}'.format(mu[0], sigma[0]))\n \n ##############################################\n ## Write inference results in a text file ##\n ##############################################\n if save:\n with open(output_dir+output_file, \"w\") as f:\n f.write('###########################\\n')\n f.write('#### INFERENCE TEST ###\\n')\n f.write('###########################\\n')\n f.write(' \\n')\n f.write('Results of the MCMC fit\\n')\n f.write('----------------------- \\n')\n f.write(' \\n')\n f.write('>> Position and flux of the planet (highly probable):\\n')\n f.write('{} % confidence interval\\n'.format(cfd))\n f.write(' \\n')\n\n for i in range(l):\n confidenceMax = confidenceInterval[pKey[i]][1]\n confidenceMin = -confidenceInterval[pKey[i]][0]\n if i == 2 or l==1:\n text = '{}: \\t\\t\\t{:.3f} \\t-{:.3f} \\t+{:.3f}\\n'\n else:\n text = '{}: \\t\\t\\t{:.3f} \\t\\t-{:.3f} \\t\\t+{:.3f}\\n'\n \n f.write(text.format(pKey[i], val_max[pKey[i]],\n confidenceMin, confidenceMax))\n if l>1:\n f.write(' ')\n f.write('Platescale = {} mas\\n'.format(plsc*1000))\n f.write('r (mas): \\t\\t{:.2f} \\t\\t-{:.2f} \\t\\t+{:.2f}\\n'.format(\n val_max[pKey[0]]*plsc*1000,\n -confidenceInterval[pKey[0]][0]*plsc*1000,\n confidenceInterval[pKey[0]][1]*plsc*1000))\n\n if gaussian_fit:\n return mu, sigma\n else:\n return val_max, confidenceInterval"
] | [
[
"numpy.dot",
"numpy.amax",
"numpy.amin",
"numpy.zeros_like",
"numpy.array",
"numpy.zeros"
],
[
"numpy.vstack",
"numpy.concatenate",
"numpy.zeros_like",
"numpy.histogram",
"numpy.where",
"matplotlib.pyplot.tight_layout",
"numpy.ones_like",
"scipy.stats.norm.fit",
"numpy.ceil",
"numpy.std",
"numpy.diff",
"matplotlib.pyplot.close",
"numpy.zeros",
"numpy.power",
"matplotlib.pyplot.savefig",
"numpy.floor",
"numpy.argsort",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.sum",
"numpy.abs",
"scipy.stats.norm.pdf",
"numpy.random.normal",
"matplotlib.ticker.MaxNLocator",
"numpy.isinf",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
leandro-gracia-gil/addons | [
"af6866a2e6d9ddbc79d612d7cb04a8a5befe4a47",
"d981b0f1d1bc23f697d159eb1510c24b3c476d28"
] | [
"tensorflow_addons/image/tests/dense_image_warp_test.py",
"tensorflow_addons/layers/snake.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for dense_image_warp.\"\"\"\n\nimport pytest\nimport math\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_addons.image import dense_image_warp\nfrom tensorflow_addons.image import interpolate_bilinear\n\n\ndef test_interpolate_small_grid_ij():\n grid = tf.constant(\n [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0]],\n shape=[1, 4, 3, 1],\n )\n query_points = tf.constant(\n [[0.0, 0.0], [1.0, 0.0], [2.0, 0.5], [1.5, 1.5], [3.0, 2.0]], shape=[1, 5, 2],\n )\n expected_results = np.reshape(np.array([0.0, 3.0, 6.5, 6.0, 11.0]), [1, 5, 1])\n\n interp = interpolate_bilinear(grid, query_points)\n\n np.testing.assert_allclose(expected_results, interp)\n\n\ndef test_interpolate_small_grid_xy():\n grid = tf.constant(\n [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0]],\n shape=[1, 4, 3, 1],\n )\n query_points = tf.constant(\n [[0.0, 0.0], [0.0, 1.0], [0.5, 2.0], [1.5, 1.5], [2.0, 3.0]], shape=[1, 5, 2],\n )\n expected_results = np.reshape(np.array([0.0, 3.0, 6.5, 6.0, 11.0]), [1, 5, 1])\n\n interp = interpolate_bilinear(grid, query_points, indexing=\"xy\")\n\n np.testing.assert_allclose(expected_results, interp)\n\n\ndef test_interpolate_small_grid_batched():\n grid = tf.constant(\n [[[0.0, 1.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], shape=[2, 2, 2, 1]\n )\n query_points = tf.constant(\n [[[0.0, 0.0], [1.0, 0.0], [0.5, 0.5]], [[0.5, 0.0], [1.0, 0.0], [1.0, 1.0]]]\n )\n expected_results = np.reshape(\n np.array([[0.0, 3.0, 2.0], [6.0, 7.0, 8.0]]), [2, 3, 1]\n )\n\n interp = interpolate_bilinear(grid, query_points)\n\n np.testing.assert_allclose(expected_results, interp)\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_unknown_shape():\n query_points = tf.constant(\n [[0.0, 0.0], [0.0, 1.0], [0.5, 2.0], [1.5, 1.5]], shape=[1, 4, 2]\n )\n fn = interpolate_bilinear.get_concrete_function(\n tf.TensorSpec(shape=None, dtype=tf.float32),\n tf.TensorSpec(shape=None, dtype=tf.float32),\n )\n for shape in (2, 4, 3, 6), (6, 2, 4, 3), (1, 2, 4, 3):\n image = tf.ones(shape=shape)\n res = fn(image, query_points)\n assert res.shape == (shape[0], 4, shape[3])\n\n\ndef _check_zero_flow_correctness(shape, image_type, flow_type):\n \"\"\"Assert using zero flows doesn't change the input image.\"\"\"\n rand_image, rand_flows = _get_random_image_and_flows(shape, image_type, flow_type)\n rand_flows *= 0\n\n interp = dense_image_warp(\n image=tf.convert_to_tensor(rand_image), flow=tf.convert_to_tensor(rand_flows),\n )\n\n np.testing.assert_allclose(rand_image, interp, rtol=1e-6, atol=1e-6)\n\n\ndef test_zero_flows():\n \"\"\"Apply _check_zero_flow_correctness() for a few sizes and types.\"\"\"\n shapes_to_try = [[3, 4, 5, 6], [1, 2, 2, 1]]\n for shape in shapes_to_try:\n _check_zero_flow_correctness(shape, image_type=\"float32\", flow_type=\"float32\")\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_gradients_exist():\n \"\"\"Check that backprop can run.\n\n The correctness of the gradients is assumed, since the forward\n propagation is tested to be correct and we only use built-in tf\n ops. However, we perform a simple test to make sure that\n backprop can actually run.\n \"\"\"\n batch_size, height, width, num_channels = [4, 5, 6, 7]\n image_shape = [batch_size, height, width, num_channels]\n image = tf.random.normal(image_shape)\n flow_shape = [batch_size, height, width, 2]\n flows = tf.Variable(tf.random.normal(shape=flow_shape) * 0.25, dtype=tf.float32)\n\n with tf.GradientTape() as t:\n interp = dense_image_warp(image, flows)\n\n grads = t.gradient(interp, flows).numpy()\n assert np.sum(np.abs(grads)) != 0\n\n\ndef _assert_correct_interpolation_value(\n image,\n flows,\n pred_interpolation,\n batch_index,\n y_index,\n x_index,\n low_precision=False,\n):\n \"\"\"Assert that the tf interpolation matches hand-computed value.\"\"\"\n height = image.shape[1]\n width = image.shape[2]\n displacement = flows[batch_index, y_index, x_index, :]\n float_y = y_index - displacement[0]\n float_x = x_index - displacement[1]\n floor_y = max(min(height - 2, math.floor(float_y)), 0)\n floor_x = max(min(width - 2, math.floor(float_x)), 0)\n ceil_y = floor_y + 1\n ceil_x = floor_x + 1\n\n alpha_y = min(max(0.0, float_y - floor_y), 1.0)\n alpha_x = min(max(0.0, float_x - floor_x), 1.0)\n\n floor_y = int(floor_y)\n floor_x = int(floor_x)\n ceil_y = int(ceil_y)\n ceil_x = int(ceil_x)\n\n top_left = image[batch_index, floor_y, floor_x, :]\n top_right = image[batch_index, floor_y, ceil_x, :]\n bottom_left = image[batch_index, ceil_y, floor_x, :]\n bottom_right = image[batch_index, ceil_y, ceil_x, :]\n\n interp_top = alpha_x * (top_right - top_left) + top_left\n interp_bottom = alpha_x * (bottom_right - bottom_left) + bottom_left\n interp = alpha_y * (interp_bottom - interp_top) + interp_top\n atol = 1e-6\n rtol = 1e-6\n if low_precision:\n atol = 1e-2\n rtol = 1e-3\n np.testing.assert_allclose(\n interp,\n pred_interpolation[batch_index, y_index, x_index, :],\n atol=atol,\n rtol=rtol,\n )\n\n\ndef _get_random_image_and_flows(shape, image_type, flow_type):\n batch_size, height, width, num_channels = shape\n image_shape = [batch_size, height, width, num_channels]\n image = np.random.normal(size=image_shape)\n flow_shape = [batch_size, height, width, 2]\n flows = np.random.normal(size=flow_shape) * 3\n return image.astype(image_type), flows.astype(flow_type)\n\n\ndef _check_interpolation_correctness(\n shape, image_type, flow_type, call_with_unknown_shapes=False, num_probes=5\n):\n \"\"\"Interpolate, and then assert correctness for a few query\n locations.\"\"\"\n low_precision = image_type == \"float16\" or flow_type == \"float16\"\n rand_image, rand_flows = _get_random_image_and_flows(shape, image_type, flow_type)\n\n if call_with_unknown_shapes:\n fn = dense_image_warp.get_concrete_function(\n tf.TensorSpec(shape=None, dtype=image_type),\n tf.TensorSpec(shape=None, dtype=flow_type),\n )\n interp = fn(\n image=tf.convert_to_tensor(rand_image),\n flow=tf.convert_to_tensor(rand_flows),\n )\n else:\n interp = dense_image_warp(\n image=tf.convert_to_tensor(rand_image),\n flow=tf.convert_to_tensor(rand_flows),\n )\n\n for _ in range(num_probes):\n batch_index = np.random.randint(0, shape[0])\n y_index = np.random.randint(0, shape[1])\n x_index = np.random.randint(0, shape[2])\n\n _assert_correct_interpolation_value(\n rand_image,\n rand_flows,\n interp,\n batch_index,\n y_index,\n x_index,\n low_precision=low_precision,\n )\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_interpolation():\n \"\"\"Apply _check_interpolation_correctness() for a few sizes and\n types.\"\"\"\n shapes_to_try = [[3, 4, 5, 6], [1, 2, 2, 1]]\n for im_type in [\"float32\", \"float64\", \"float16\"]:\n for flow_type in [\"float32\", \"float64\", \"float16\"]:\n for shape in shapes_to_try:\n _check_interpolation_correctness(shape, im_type, flow_type)\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_size_exception():\n \"\"\"Make sure it throws an exception for images that are too small.\"\"\"\n shape = [1, 2, 1, 1]\n errors = (ValueError, tf.errors.InvalidArgumentError)\n with pytest.raises(errors) as exception_raised:\n _check_interpolation_correctness(shape, \"float32\", \"float32\")\n assert \"Grid width must be at least 2.\" in str(exception_raised.value)\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_unknown_shapes():\n \"\"\"Apply _check_interpolation_correctness() for a few sizes and check\n for tf.Dataset compatibility.\"\"\"\n shapes_to_try = [[3, 4, 5, 6], [1, 2, 2, 1]]\n for shape in shapes_to_try:\n _check_interpolation_correctness(shape, \"float32\", \"float32\", True)\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements Snake layer.\"\"\"\n\nimport tensorflow as tf\nfrom typeguard import typechecked\n\nfrom tensorflow_addons.activations.snake import snake\n\nfrom tensorflow_addons.utils import types\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass Snake(tf.keras.layers.Layer):\n \"\"\"Snake layer to learn periodic functions with the trainable `frequency` scalar.\n\n https://arxiv.org/abs/2006.08195\n\n Arguments:\n frequency_initializer: Initializer for the `frequency` scalar.\n \"\"\"\n\n @typechecked\n def __init__(self, frequency_initializer: types.Initializer = \"ones\", **kwargs):\n super().__init__(**kwargs)\n self.frequency_initializer = tf.keras.initializers.get(frequency_initializer)\n self.frequency = self.add_weight(\n initializer=frequency_initializer, trainable=True\n )\n\n def call(self, inputs):\n return snake(inputs, self.frequency)\n\n def get_config(self):\n config = {\n \"frequency_initializer\": tf.keras.initializers.serialize(\n self.frequency_initializer\n ),\n }\n base_config = super().get_config()\n return {**base_config, **config}\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.constant",
"numpy.abs",
"tensorflow.ones",
"numpy.random.normal",
"numpy.random.randint",
"numpy.testing.assert_allclose",
"numpy.array",
"tensorflow.random.normal",
"tensorflow.TensorSpec",
"tensorflow.GradientTape"
],
[
"tensorflow.keras.initializers.serialize",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.keras.initializers.get"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
ocefpaf/pysal | [
"7e397bdb4c22d4e2442b4ee88bcd691d2421651d",
"7e397bdb4c22d4e2442b4ee88bcd691d2421651d",
"7e397bdb4c22d4e2442b4ee88bcd691d2421651d",
"7e397bdb4c22d4e2442b4ee88bcd691d2421651d"
] | [
"pysal/model/spvcm/abstracts.py",
"pysal/explore/esda/lee.py",
"pysal/lib/weights/set_operations.py",
"pysal/model/spvcm/both_levels/generic/sample.py"
] | [
"import warnings\nfrom datetime import datetime as dt\nimport numpy as np\nimport copy\nimport multiprocessing as mp\nimport pandas as pd\nimport os\n\n\nfrom .sqlite import head_to_sql, start_sql\nfrom .plotting import plot_trace\nfrom collections import OrderedDict\ntry:\n from tqdm import tqdm\n import six\n if not six.PY3:\n range = xrange\nexcept ImportError:\n from .utils import thru_op\n tqdm = thru_op\n\n__all__ = ['Sampler_Mixin', 'Hashmap', 'Trace']\n\n######################\n# SAMPLER MECHANISMS #\n######################\n\n\nclass Sampler_Mixin(object):\n \"\"\"\n A Mixin class designed to facilitate code reuse. This should be the parent class of anything that uses the sampling framework in this package.\n \"\"\"\n def __init__(self):\n super(Sampler_Mixin, self).__init__()\n\n def sample(self, n_samples, n_jobs=1):\n \"\"\"\n Sample from the joint posterior distribution defined by all of the\n parameters in the gibbs sampler.\n\n Parameters\n ----------\n n_samples : int\n number of samples from the joint posterior density to take\n n_jobs : int\n number of parallel chains to run.\n\n Returns\n -------\n Implicitly updates all values in place, returns None\n \"\"\"\n if n_jobs > 1:\n self._parallel_sample(n_samples, n_jobs)\n return\n elif isinstance(self.state, list):\n self._parallel_sample(n_samples, n_jobs=len(self.state))\n return\n _start = dt.now()\n try:\n for _ in tqdm(range(n_samples)):\n if (self._verbose > 1) and (n_samples % 100 == 0):\n print('{} Draws to go'.format(n_samples))\n self.draw()\n except KeyboardInterrupt:\n warnings.warn('Sampling interrupted, drew {} samples'.format(self.cycles))\n finally:\n _stop = dt.now()\n if not hasattr(self, 'total_sample_time'):\n self.total_sample_time = _stop - _start\n else:\n self.total_sample_time += _stop - _start\n\n def draw(self):\n \"\"\"\n Take exactly one sample from the joint posterior distribution.\n \"\"\"\n if self.cycles == 0:\n self._finalize()\n self._iteration()\n self.cycles += 1\n for param in self.traced_params:\n self.trace.chains[0][param].append(self.state[param])\n if self.database is not None:\n head_to_sql(self, self._cur, self._cxn)\n for param in self.traced_params:\n self.trace.chains[0][param] = [self.trace[param,-1]]\n\n def _parallel_sample(self, n_samples, n_jobs):\n \"\"\"\n Run n_jobs parallel samples of a given model. \n Not intended to be called directly, and should be called by model.sample.\n \"\"\"\n models = [copy.deepcopy(self) for _ in range(n_jobs)]\n for i, model in enumerate(models):\n if isinstance(model.state, list):\n models[i].state = copy.deepcopy(self.state[i])\n if hasattr(model, 'configs'):\n if isinstance(model.configs, list):\n models[i].configs = copy.deepcopy(self.configs[i])\n if self.database is not None:\n models[i].database = self.database + str(i)\n models[i].trace = Trace(**{k:[] for k in model.trace.varnames})\n if self.cycles == 0:\n models[i]._fuzz_starting_values()\n n_samples = [n_samples] * n_jobs\n _start = dt.now()\n seed = np.random.randint(0,10000, size=n_jobs).tolist()\n P = mp.Pool(n_jobs)\n results = P.map(_reflexive_sample, zip(models, n_samples, seed))\n P.close()\n _stop = dt.now()\n if self.cycles > 0:\n new_traces = []\n for i, model in enumerate(results):\n # model.trace.chains is always single-chain, since we've broken everything into single chains\n new_traces.append(Hashmap(**{k:param + model.trace.chains[0][k]\n for k, param in self.trace.chains[i].items()}))\n new_trace = Trace(*new_traces)\n else:\n new_trace = Trace(*[model.trace.chains[0] for model in results])\n self.trace = new_trace\n self.state = [model.state for model in results]\n self.cycles += n_samples[0]\n self.configs = [model.configs for model in results]\n if hasattr(self, 'total_sample_time'):\n self.total_sample_time += _stop - _start\n else:\n self.total_sample_time = _stop - _start\n\n def _fuzz_starting_values(self, state=None):\n \"\"\"\n Function to overdisperse starting values used in the package.\n \"\"\"\n st = self.state\n if hasattr(st, 'Betas'):\n st.Betas += np.random.normal(0,5, size=st.Betas.shape)\n if hasattr(st, 'Alphas'):\n st.Alphas += np.random.normal(0,5,size=st.Alphas.shape)\n if hasattr(st, 'Sigma2'):\n st.Sigma2 += np.random.uniform(0,5)\n if hasattr(st, 'Tau2'):\n st.Tau2 += np.random.uniform(0,5)\n if hasattr(st, 'Lambda'):\n st.Lambda += np.random.uniform(-.25,.25)\n if hasattr(st, 'Rho'):\n st.Rho += np.random.uniform(-.25,.25)\n\n def _finalize(self, **args):\n \"\"\"\n Abstract function to ensure inheritors define a finalze method. This method should compute all derived quantities used in the _iteration() function that would change if the user changed priors, starting values, or other information. This is to ensure that if the user initializes the sampler with n_samples=0 and then changes the state, the derived quantites used in sampling are correct.\n \"\"\"\n raise NotImplementedError\n\n def _setup_priors(self, **args):\n \"\"\"\n Abstract function to ensure inheritors define a _setup_priors method. This method should assign into the state all of the correct priors for all parameters in the model.\n \"\"\"\n raise NotImplementedError\n\n def _setup_truncation(self, **args):\n \"\"\"\n Abstract function to ensure inheritors define a _setup_truncation method. This method should truncate parameter space to a given arbitrary bounds.\n \"\"\"\n raise NotImplementedError\n\n def _setup_starting_values(self, **args):\n \"\"\"\n Abstract function to ensure that inheritors define a _setup_starting_values method. This method should assign the correct values for each of the parameters into model.state.\n \"\"\"\n raise NotImplementedError\n\n @property\n def database(self):\n \"\"\"\n the database used for the model.\n \"\"\"\n return getattr(self, '_db', None)\n\n @database.setter\n def database(self, filename):\n self._cxn, self._cur = start_sql(self, tracename=filename)\n self._db = filename\n from .sqlite import trace_from_sql\n def load_sqlite():\n return trace_from_sql(filename)\n self.trace.load_sqlite = load_from_sqlite\n\ndef _reflexive_sample(tup):\n \"\"\"\n a helper function sample a bunch of models in parallel.\n\n Tuple must be:\n\n model : model object\n n_samples : int number of samples\n seed : seed to use for the sampler\n \"\"\"\n model, n_samples, seed = tup\n np.random.seed(seed)\n model.sample(n_samples=n_samples)\n return model\n\ndef _noop(*args, **kwargs):\n pass\n\n#######################\n# MAPS AND CONTAINERS #\n#######################\n\nclass Hashmap(dict):\n \"\"\"\n A dictionary with dot access on attributes\n \"\"\"\n def __init__(self, **kw):\n super(Hashmap, self).__init__(**kw)\n if kw != dict():\n for k in kw:\n self[k] = kw[k]\n\n def __getattr__(self, attr):\n try:\n r = self[attr]\n except KeyError:\n try:\n r = getattr(super(Hashmap, self), attr)\n except AttributeError:\n raise AttributeError(\"'{}' object has no attribute '{}'\"\n .format(self.__class__, attr))\n return r\n\n def __setattr__(self, key, value):\n self.__setitem__(key, value)\n\n def __setitem__(self, key, value):\n super(Hashmap, self).__setitem__(key,value)\n self.__dict__.update({key:value})\n\n def __delattr__(self, item):\n self.__delitem__(item)\n\n def __delitem__(self, key):\n super(Hashmap, self).__delitem__(key)\n del self.__dict__[key]\n\nclass Trace(object):\n \"\"\"\n Object to contain results from sampling.\n\n Arguments\n ---------\n chains : a chain or comma-separated sequence of chains\n a chain is a dict-like collection, where keys are the parameter name and the values are the values of the chain.\n kwargs : a dictionary splatted into keyword arguments\n the name of the argument is taken to the be the parameter name, and the value is taken to be a chain of that parameter.\n\n Examples\n ---------\n >>> Trace(a=[1,2,3], b=[4,2,5], c=[1,9,23]) #Trace with one chain\n >>> Trace([{'a':[1,2,3], 'b':[4,2,5], 'c':[1,9,23]},\n {'a':[2,5,1], 'b':[2,9,1], 'c':[9,21,1]}]) #Trace with two chains\n \"\"\"\n def __init__(self, *chains, **kwargs):\n if chains is () and kwargs != dict():\n self.chains = _maybe_hashmap(kwargs)\n if chains is not ():\n self.chains = _maybe_hashmap(*chains)\n if kwargs != dict():\n self.chains.extend(_maybe_hashmap(kwargs))\n self._validate_schema()\n\n @property\n def varnames(self, chain=None):\n \"\"\"\n Names of variables contained in the trace.\n \"\"\"\n try:\n return self._varnames\n except AttributeError:\n try:\n self._validate_schema()\n except KeyError:\n if chain is None:\n raise Exception('Variable names are heterogeneous in chains and no default index provided.')\n else:\n warnings.warn('Variable names are heterogeneous in chains!', stacklevel=2)\n return list(self.chains[chain].keys())\n self._varnames = list(self.chains[0].keys())\n return self._varnames\n\n def drop(self, varnames, inplace=True):\n \"\"\"\n Drop a variable from the trace.\n\n Arguments\n ---------\n varnames : list of strings\n names of parameters to drop from the trace.\n inplace : bool\n whether to return a copy of the trace with parameters removed, or remove them inplace.\n \"\"\"\n if isinstance(varnames, str):\n varnames = (varnames,)\n if not inplace:\n new = copy.deepcopy(self)\n new.drop(varnames, inplace=True)\n new._varnames = list(new.chains[0].keys())\n return new\n for i, chain in enumerate(self.chains):\n for varname in varnames:\n del self.chains[i][varname]\n self._varnames = list(self.chains[0].keys())\n\n def _validate_schema(self, chains=None):\n \"\"\"\n Validates the trace to ensure that the chain is self-consistent.\n \"\"\"\n if chains is None:\n chains = self.chains\n tracked_in_each = [set(chain.keys()) for chain in chains]\n same_schema = [names == tracked_in_each[0] for names in tracked_in_each]\n try:\n assert all(same_schema)\n except AssertionError:\n bad_chains = [i for i in range(len(chains)) if same_schema[i]]\n KeyError('The parameters tracked in each chain are not the same!'\n '\\nChains {} do not have the same parameters as chain 1!'.format(bad_chains))\n\n def add_chain(self, chains, validate=True):\n \"\"\"\n Add chains to a trace object\n\n Parameters\n ----------\n chains : Hashmap or list of hashmaps\n chains to merge into the trace\n validate: bool\n whether or not to validate the schema and reject the chain if it does not match the current trace.\n \"\"\"\n if not isinstance(chains, (list, tuple)):\n chains = (chains,)\n new_chains = [self.chains]\n for chain in chains:\n if isinstance(chain, Hashmap):\n new_chains.append(chain)\n elif isinstance(chain, Trace):\n new_chains.extend(chain.chains)\n else:\n new_chains.extend(_maybe_hashmap(chain))\n if validate:\n self._validate_schema(chains=new_chains)\n self.chains = new_chains\n\n def map(self, func, **func_args):\n \"\"\"\n Map a function over all parameters in a chain.\n Multivariate parameters are reduced to sequences of univariate parameters.\n\n Usage\n -------\n Intended when full-trace statistics are required. Most often,\n the trace should be sliced directly. For example, to get the mean value of a\n parameter over the last -1000 iterations with a thinning of 2:\n\n trace[0, 'Betas', -1000::2].mean(axis=0)\n\n but, to average of the parameter over all recorded chains:\n\n trace['Betas', -1000::2].mean(axis=0).mean(axis=0)\n\n since the first reduction provides an array where rows\n are iterations and columns are parameters.\n\n trace.map(np.mean) yields the mean of each parameter within each chain, and is\n provided to make within-chain reductions easier.\n\n Arguments\n ---------\n func : callable\n a function that returns a result when provided a flat vector.\n varnames : string or list of strings\n a keyword only argument governing which parameters to map over.\n func_args : dictionary/keyword arguments\n arguments needed to be passed to the reduction\n \"\"\"\n varnames = func_args.pop('varnames', self.varnames)\n if isinstance(varnames, str):\n varnames = (varnames, )\n all_stats = []\n for i, chain in enumerate(self.chains):\n these_stats=dict()\n for var in varnames:\n data = np.squeeze(self[i,var])\n if data.ndim > 1:\n n,p = data.shape[0:2]\n rest = data.shape[2:0]\n if len(rest) == 0:\n data = data.T\n elif len(rest) == 1:\n data = data.reshape(n,p*rest[0]).T\n else:\n raise Exception('Parameter \"{}\" shape not understood.' ' Please extract, shape it, and pass '\n ' as its own chain. '.format(var))\n else:\n data = data.reshape(1,-1)\n stats = [func(datum, **func_args) for datum in data]\n if len(stats) == 1:\n stats = stats[0]\n these_stats.update({var:stats})\n all_stats.append(these_stats)\n return all_stats\n\n @property\n def n_chains(self):\n return len(self.chains)\n\n @property\n def n_iters(self):\n \"\"\"\n Number of raw iterations stored in the trace.\n \"\"\"\n lengths = [len(chain[self.varnames[0]]) for chain in self.chains]\n if len(lengths) == 1:\n return lengths[0]\n else:\n return lengths\n\n def plot(self, burn=0, thin=None, varnames=None,\n kde_kwargs={}, trace_kwargs={}, figure_kwargs={}):\n \"\"\"\n Make a trace plot paired with a distributional plot.\n\n Arguments\n -----------\n trace : namespace\n a namespace whose variables are contained in varnames\n burn : int\n the number of iterations to discard from the front of the trace\n thin : int\n the number of iterations to discard between iterations\n varnames : str or list\n name or list of names to plot.\n kde_kwargs : dictionary\n dictionary of aesthetic arguments for the kde plot\n trace_kwargs : dictionary\n dictinoary of aesthetic arguments for the traceplot\n\n Returns\n -------\n figure, axis tuple, where axis is (len(varnames), 2)\n \"\"\"\n f, ax = plot_trace(model=None, trace=self, burn=burn,\n thin=thin, varnames=varnames,\n kde_kwargs=kde_kwargs, trace_kwargs=trace_kwargs,\n figure_kwargs=figure_kwargs)\n return f,ax\n\n def summarize(self, level=0):\n \"\"\"\n Compute a summary of the trace. See Also: diagnostics.summary\n\n Arguments\n ------------\n level : int\n 0 for a summary by chain or 1 if the summary should be computed by pooling over chains.\n \"\"\"\n from .diagnostics import summarize\n return summarize(trace=self, level=level)\n\n def __getitem__(self, key):\n \"\"\"\n Getting an item from a trace can be done using at most three indices, where:\n\n 1 index\n --------\n str/list of str: names of variates in all chains to grab. Returns list of Hashmaps\n slice/int: iterations to grab from all chains. Returns list of Hashmaps, sliced to the specification\n\n 2 index\n -------\n (str/list of str, slice/int): first term is name(s) of variates in all chains to grab,\n second term specifies the slice each chain.\n returns: list of hashmaps with keys of first term and entries sliced by the second term.\n (slice/int, str/list of str): first term specifies which chains to retrieve,\n second term is name(s) of variates in those chains\n returns: list of hashmaps containing all iterations\n (slice/int, slice/int): first term specifies which chains to retrieve,\n second term specifies the slice of each chain.\n returns: list of hashmaps with entries sliced by the second term\n 3 index\n --------\n (slice/int, str/list of str, slice/int) : first term specifies which chains to retrieve,\n second term is the name(s) of variates in those chains,\n third term is the iteration slicing.\n returns: list of hashmaps keyed on second term, with entries sliced by the third term\n \"\"\"\n if isinstance(key, str): #user wants only one name from the trace\n if self.n_chains > 1:\n result = ([chain[key] for chain in self.chains])\n else:\n result = (self.chains[0][key])\n elif isinstance(key, (slice, int)): #user wants all draws past a certain index\n if self.n_chains > 1:\n return [Hashmap(**{k:v[key] for k,v in chain.items()}) for chain in self.chains]\n else:\n return Hashmap(**{k:v[key] for k,v in self.chains[0].items()})\n elif isinstance(key, list) and all([isinstance(val, str) for val in key]): #list of atts over all iters and all chains\n if self.n_chains > 1:\n return [Hashmap(**{k:chain[k] for k in key}) for chain in self.chains]\n else:\n return Hashmap(**{k:self.chains[0][k] for k in key})\n elif isinstance(key, tuple): #complex slicing\n if len(key) == 1:\n return self[key[0]] #ignore empty blocks\n if len(key) == 2:\n head, tail = key\n if isinstance(head, str): #all chains, one var, some iters\n if self.n_chains > 1:\n result = ([_ifilter(tail, chain[head]) for chain in self.chains])\n else:\n result = (_ifilter(tail, self.chains[0][head]))\n elif isinstance(head, list) and all([isinstance(v, str) for v in head]): #all chains, some vars, some iters\n if self.n_chains > 1:\n return [Hashmap(**{name:_ifilter(tail, chain[name]) for name in head})\n for chain in self.chains]\n else:\n chain = self.chains[0]\n return Hashmap(**{name:_ifilter(tail, chain[name]) for name in head})\n elif isinstance(tail, str):\n target_chains = _ifilter(head, self.chains)\n if isinstance(target_chains, Hashmap):\n target_chains = [target_chains]\n if len(target_chains) > 1:\n result = ([chain[tail] for chain in target_chains])\n elif len(target_chains) == 1:\n result = (target_chains[0][tail])\n else:\n raise IndexError('The supplied chain index {} does not'\n ' match any chains in trace.chains'.format(head))\n elif isinstance(tail, list) and all([isinstance(v, str) for v in tail]):\n target_chains = _ifilter(head, self.chains)\n if isinstance(target_chains, Hashmap):\n target_chains = [target_chains]\n if len(target_chains) > 1:\n return [Hashmap(**{k:chain[k] for k in tail}) for chain in target_chains]\n elif len(target_chains) == 1:\n return Hashmap(**{k:target_chains[0][k] for k in tail})\n else:\n raise IndexError('The supplied chain index {} does not'\n ' match any chains in trace.chains'.format(head))\n else:\n target_chains = _ifilter(head, self.chains)\n if isinstance(target_chains, Hashmap):\n target_chains = [target_chains]\n out = [Hashmap(**{k:_ifilter(tail, val) for k,val in chain.items()})\n for chain in target_chains]\n if len(out) == 1:\n return out[0]\n else:\n return out\n elif len(key) == 3:\n chidx, varnames, iters = key\n if isinstance(chidx, int):\n if np.abs(chidx) > self.n_chains:\n raise IndexError('The supplied chain index {} does not'\n ' match any chains in trace.chains'.format(chidx))\n if varnames == slice(None, None, None):\n varnames = self.varnames\n chains = _ifilter(chidx, self.chains)\n if isinstance(chains, Hashmap):\n chains = [chains]\n nchains = len(chains)\n if isinstance(varnames, str):\n varnames = [varnames]\n if varnames is slice(None, None, None):\n varnames = self.varnames\n if len(varnames) == 1:\n if nchains > 1:\n result = ([_ifilter(iters, chain[varnames[0]]) for chain in chains])\n else:\n result = (_ifilter(iters, chains[0][varnames[0]]))\n else:\n if nchains > 1:\n return [Hashmap(**{varname:_ifilter(iters, chain[varname])\n for varname in varnames})\n for chain in chains]\n else:\n return Hashmap(**{varname:_ifilter(iters, chains[0][varname]) for varname in varnames})\n else:\n raise IndexError('index not understood')\n\n result = np.asarray(result)\n if result.shape == ():\n result = result.tolist()\n elif result.shape in [(1,1), (1,)]:\n result = result[0]\n return result\n\n ##############\n # Comparison #\n ##############\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n return False\n else:\n a = [ch1==ch2 for ch1,ch2 in zip(other.chains, self.chains)]\n return all(a)\n\n def _allclose(self, other, **allclose_kw):\n try:\n self._assert_allclose(other, **allclose_kw)\n except AssertionError:\n return False\n return True\n\n def _assert_allclose(self, other, **allclose_kw):\n ignore_shape = allclose_kw.pop('ignore_shape', False)\n squeeze = allclose_kw.pop('squeeze', True)\n try:\n assert set(self.varnames) == set(other.varnames)\n except AssertionError:\n raise AssertionError('Variable names are different!\\n'\n 'self: {}\\nother:{}'.format(\n self.varnames, other.varnames))\n assert isinstance(other, type(self))\n for ch1, ch2 in zip(self.chains, other.chains):\n for k,v in ch1.items():\n allclose_kw['err_msg'] = 'Failed on {}'.format(k)\n if ignore_shape:\n A = [np.asarray(item).flatten() for item in v]\n B = [np.asarray(item).flatten() for item in ch2[k]]\n elif squeeze:\n A = [np.squeeze(item) for item in v]\n B = [np.squeeze(item) for item in ch2[k]]\n else:\n A = v\n B = ch2[k]\n np.testing.assert_allclose(A,B,**allclose_kw)\n\n\n ###################\n # IO and Exchange #\n ###################\n\n def to_df(self):\n \"\"\"\n Convert the trace object to a Pandas Dataframe.\n\n Returns\n -------\n a dataframe where each column is a parameter. Multivariate parameters are vectorized and stuffed into a column.\n \"\"\"\n dfs = []\n outnames = self.varnames\n to_split = [name for name in outnames if np.asarray(self[0,name,0]).size > 1]\n for chain in self.chains:\n out = OrderedDict(list(chain.items()))\n for split in to_split:\n records = np.asarray(copy.deepcopy(chain[split]))\n if len(records.shape) == 1:\n records = records.reshape(-1,1)\n n,k = records.shape[0:2]\n rest = records.shape[2:]\n if len(rest) == 0:\n pass\n elif len(rest) == 1:\n records = records.reshape(n,int(k*rest[0]))\n else:\n raise Exception(\"Parameter '{}' has too many dimensions\"\n \" to flatten able to be flattend?\" .format(split))\n records = OrderedDict([(split+'_'+str(i),record.T.tolist())\n for i,record in enumerate(records.T)])\n out.update(records)\n del out[split]\n df = pd.DataFrame().from_dict(out)\n dfs.append(df)\n if len(dfs) == 1:\n return dfs[0]\n else:\n return dfs\n\n def to_csv(self, filename, **pandas_kwargs):\n \"\"\"\n Write trace out to file, going through Trace.to_df()\n\n If there are multiple chains in this trace, this will write\n them each out to 'filename_number.csv', where `number` is the\n number of the trace.\n\n Arguments\n ---------\n filename : string\n name of file to write the trace to.\n pandas_kwargs: keyword arguments\n arguments to pass to the pandas to_csv function.\n \"\"\"\n if 'index' not in pandas_kwargs:\n pandas_kwargs['index'] = False\n dfs = self.to_df()\n if isinstance(dfs, list):\n name, ext = os.path.splitext(filename)\n for i, df in enumerate(dfs):\n df.to_csv(name + '_' + str(i) + ext, **pandas_kwargs)\n else:\n dfs.to_csv(filename, **pandas_kwargs)\n\n @classmethod\n def from_df(cls, dfs, varnames=None, combine_suffix='_'):\n \"\"\"\n Convert a dataframe into a trace object.\n\n Arguments\n ----------\n dfs : dataframe or list of dataframes\n pandas dataframes to convert into a trace. Each dataframe is assumed to be a single chain.\n varnames: string or list of strings\n names to use instead of the names in the dataframe. If none, the column\n names are split using `combine_suffix`, and the unique things before the suffix are used as parameter names.\n \"\"\"\n if not isinstance(dfs, (tuple, list)):\n dfs = (dfs,)\n if len(dfs) > 1:\n traces = ([cls.from_df(df, varnames=varnames,\n combine_suffix=combine_suffix) for df in dfs])\n return cls(*[trace.chains[0] for trace in traces])\n else:\n df = dfs[0]\n if varnames is None:\n varnames = df.columns\n unique_stems = set()\n for col in varnames:\n suffix_split = col.split(combine_suffix)\n if suffix_split[0] == col:\n unique_stems.update([col])\n else:\n unique_stems.update(['_'.join(suffix_split[:-1])])\n out = dict()\n for stem in unique_stems:\n cols = []\n for var in df.columns:\n if var == stem:\n cols.append(var)\n elif '_'.join(var.split('_')[:-1]) == stem:\n cols.append(var)\n if len(cols) == 1:\n targets = df[cols].values.flatten().tolist()\n else:\n # ensure the tail ordinate sorts the columns, not string order\n # '1','11','2' will corrupt the trace\n order = [int(st.split(combine_suffix)[-1]) for st in cols]\n cols = np.asarray(cols)[np.argsort(order)]\n targets = [vec for vec in df[cols].values]\n out.update({stem:targets})\n return cls(**out)\n\n @classmethod\n def from_pymc3(cls, pymc3trace):\n \"\"\"\n Convert a PyMC3 trace to a pysal.model.spvcm trace\n \"\"\"\n try:\n from pymc3 import trace_to_dataframe\n except ImportError:\n raise ImportError(\"The 'trace_to_dataframe' function in \"\n \"pymc3 is used for this feature. Pymc3 \"\n \"failed to import.\")\n return cls.from_df(mc.trace_to_dataframe(pymc3trace))\n\n @classmethod\n def from_csv(cls, filename=None, multi=False,\n varnames=None, combine_suffix='_', **pandas_kwargs):\n \"\"\"\n Read a CSV into a trace object, by way of `Trace.from_df()`\n\n Arguments\n ----------\n filename : string\n string containing the name of the file to read.\n multi : bool\n flag denoting whether the trace being read is a multitrace or not. If so, the filename is understood to be the prefix of many files that end in `filename_#.csv`\n varnames : string or list of strings\n custom names to use for the trace. If not provided, combine suffix is used to identify the unique prefixes in the csvs.\n pandas_kawrgs: keyword arguments\n keyword arguments to pass to the pandas functions.\n \"\"\"\n if multi:\n filepath = os.path.dirname(os.path.abspath(filename))\n filestem = os.path.basename(filename)\n targets = [f for f in os.listdir(filepath)\n if f.startswith(filestem)]\n ordinates = [int(os.path.splitext(fname)[0].split(combine_suffix)[-1])\n for fname in targets]\n # preserve the order of the trailing ordinates\n targets = np.asarray(targets)[np.argsort(ordinates)].tolist()\n traces = ([cls.from_csv(filename=os.path.join(filepath, f)\n ,multi=False) for f in targets])\n if traces == []:\n raise IOError(\"No such file or directory: \" +\n filepath + filestem)\n\n return cls(*[trace.chains[0] for trace in traces])\n else:\n df = pd.read_csv(filename, **pandas_kwargs)\n return cls.from_df(df, varnames=varnames,\n combine_suffix=combine_suffix)\n\n\n####################\n# HELPER FUNCTIONS #\n####################\n\ndef _ifilter(filt,iterable):\n \"\"\"\n Filter an iterable by whether or not each item is in the filt\n \"\"\"\n try:\n return iterable[filt]\n except:\n if isinstance(filt, (int, float)):\n filt = [filt]\n return [val for i,val in enumerate(iterable) if i in filt]\n\ndef _maybe_hashmap(*collections):\n \"\"\"\n Attempt to coerce a collection into a Hashmap. Otherwise, leave it alone.\n \"\"\"\n out = []\n for collection in collections:\n if isinstance(collection, Hashmap):\n out.append(collection)\n else:\n out.append(Hashmap(**collection))\n return out\n\ndef _copy_hashmaps(*hashmaps):\n \"\"\"\n Create deep copies of the hashmaps passed to the function.\n \"\"\"\n return [Hashmap(**{k:copy.deepcopy(v) for k,v in hashmap.items()})\n for hashmap in hashmaps]\n",
"import numpy\nfrom scipy import sparse\nfrom sklearn.base import BaseEstimator\nfrom sklearn import preprocessing\nfrom sklearn import utils\n\nclass Spatial_Pearson(BaseEstimator):\n \"\"\"Global Spatial Pearson Statistic\"\"\"\n\n def __init__(self, connectivity=None, permutations=999):\n \"\"\"\n Initialize a spatial pearson estimator\n\n Arguments\n ---------\n connectivity: scipy.sparse matrix object\n the connectivity structure describing the relationships\n between observed units. Will be row-standardized. \n permutations: int\n the number of permutations to conduct for inference.\n if < 1, no permutational inference will be conducted. \n\n Attributes\n ----------\n association_: numpy.ndarray (2,2)\n array containg the estimated Lee spatial pearson correlation\n coefficients, where element [0,1] is the spatial correlation\n coefficient, and elements [0,0] and [1,1] are the \"spatial\n smoothing factor\"\n reference_distribution_: numpy.ndarray (n_permutations, 2,2)\n distribution of correlation matrices for randomly-shuffled\n maps. \n significance_: numpy.ndarray (2,2)\n permutation-based p-values for the fraction of times the\n observed correlation was more extreme than the simulated \n correlations.\n \"\"\"\n self.connectivity = connectivity\n self.permutations = permutations\n\n def fit(self, x, y):\n \"\"\"\n bivariate spatial pearson's R based on Eq. 18 of Lee (2001).\n\n L = \\dfrac{Z^T (V^TV) Z}{1^T (V^TV) 1}\n\n Arguments\n ---------\n x : numpy.ndarray\n array containing continuous data\n y : numpy.ndarray\n array containing continuous data\n\n Returns\n -------\n the fitted estimator.\n\n Notes\n -----\n Technical details and derivations can be found in :cite:`Lee2001`.\n\n \"\"\"\n x = utils.check_array(x)\n y = utils.check_array(y)\n Z = numpy.column_stack((preprocessing.StandardScaler().fit_transform(x),\n preprocessing.StandardScaler().fit_transform(y)))\n if self.connectivity is None:\n self.connectivity = sparse.eye(Z.shape[0])\n self.association_ = self._statistic(Z, self.connectivity) \n \n standard_connectivity = sparse.csc_matrix(self.connectivity /\n self.connectivity.sum(axis=1))\n\n if (self.permutations is None):\n return self\n elif self.permutations < 1:\n return self\n\n if self.permutations:\n simulations = [self._statistic(numpy.random.permutation(Z), self.connectivity)\n for _ in range(self.permutations)]\n self.reference_distribution_ = simulations = numpy.array(simulations)\n above = simulations >= self.association_\n larger = above.sum(axis=0)\n extreme = numpy.minimum(self.permutations - larger, larger)\n self.significance_ = (extreme + 1.) / (self.permutations + 1.)\n return self\n\n @staticmethod\n def _statistic(Z,W):\n ctc = W.T @ W\n ones = numpy.ones(ctc.shape[0])\n return (Z.T @ ctc @ Z) / (ones.T @ ctc @ ones)\n\n\nclass Local_Spatial_Pearson(BaseEstimator):\n \"\"\"Local Spatial Pearson Statistic\"\"\"\n\n def __init__(self, connectivity=None, permutations=999):\n \"\"\"\n Initialize a spatial local pearson estimator\n\n Arguments\n ---------\n connectivity: scipy.sparse matrix object\n the connectivity structure describing the relationships\n between observed units. Will be row-standardized. \n permutations: int\n the number of permutations to conduct for inference.\n if < 1, no permutational inference will be conducted. \n significance_: numpy.ndarray (2,2)\n permutation-based p-values for the fraction of times the\n observed correlation was more extreme than the simulated \n correlations.\n Attributes\n ----------\n associations_: numpy.ndarray (n_samples,)\n array containg the estimated Lee spatial pearson correlation\n coefficients, where element [0,1] is the spatial correlation\n coefficient, and elements [0,0] and [1,1] are the \"spatial\n smoothing factor\"\n reference_distribution_: numpy.ndarray (n_permutations, n_samples)\n distribution of correlation matrices for randomly-shuffled\n maps. \n significance_: numpy.ndarray (n_samples,)\n permutation-based p-values for the fraction of times the\n observed correlation was more extreme than the simulated \n correlations.\n\n\n Notes\n -----\n Technical details and derivations can be found in :cite:`Lee2001`.\n \"\"\"\n self.connectivity = connectivity\n self.permutations = permutations\n\n def fit(self, x, y):\n \"\"\"\n bivariate local pearson's R based on Eq. 22 in Lee (2001), using \n site-wise conditional randomization from Moran_Local_BV.\n \n L_i = \\dfrac{\n n \\cdot\n \\Big[\\big(\\sum_i w_{ij}(x_j - \\bar{x})\\big)\n \\big(\\sum_i w_{ij}(y_j - \\bar{y})\\big) \\Big]\n } \n {\n \\sqrt{\\sum_i (x_i - \\bar{x})^2}\n \\sqrt{\\sum_i (y_i - \\bar{y})^2}}\n = \\dfrac{\n n \\cdot\n (\\tilde{x}_j - \\bar{x})\n (\\tilde{y}_j - \\bar{y})\n } \n {\n \\sqrt{\\sum_i (x_i - \\bar{x})^2}\n \\sqrt{\\sum_i (y_i - \\bar{y})^2}}\n\n Lee, Sang Il. (2001), \"Developing a bivariate spatial \n association measure: An integration of Pearson's r and \n Moran's I.\" Journal of Geographical Systems, 3(4):369-385.\n\n Arguments\n ---------\n x : numpy.ndarray\n array containing continuous data\n y : numpy.ndarray\n array containing continuous data\n\n Returns\n -------\n the fitted estimator.\n \"\"\"\n x = utils.check_array(x)\n x = preprocessing.StandardScaler().fit_transform(x)\n \n y = utils.check_array(y)\n y = preprocessing.StandardScaler().fit_transform(y)\n\n Z = numpy.column_stack((x, y))\n\n standard_connectivity = sparse.csc_matrix(self.connectivity /\n self.connectivity.sum(axis=1))\n \n n, _ = x.shape\n\n self.associations_ = self._statistic(Z, standard_connectivity)\n\n if self.permutations:\n self.reference_distribution_ = numpy.empty((n, self.permutations))\n max_neighbors = (standard_connectivity != 0).sum(axis=1).max()\n random_ids = numpy.array([numpy.random.permutation(n - 1)[0:max_neighbors + 1]\n for i in range(self.permutations)])\n ids = numpy.arange(n)\n\n for i in range(n):\n row = standard_connectivity[i]\n weight = numpy.asarray(row[row.nonzero()]).reshape(-1,1)\n cardinality = row.nonzero()[0].shape[0]\n\n ids_not_i = ids[ids != i]\n numpy.random.shuffle(ids_not_i)\n randomizer = random_ids[:, 0:cardinality]\n random_neighbors = ids_not_i[randomizer]\n \n random_neighbor_x = x[random_neighbors]\n random_neighbor_y = y[random_neighbors]\n\n self.reference_distribution_[i] = (weight * random_neighbor_y - y.mean())\\\n .sum(axis=1).squeeze()\n self.reference_distribution_[i] *= (weight * random_neighbor_x - x.mean())\\\n .sum(axis=1).squeeze()\n above = self.reference_distribution_ >= self.associations_.reshape(-1,1)\n larger = above.sum(axis=1)\n extreme = numpy.minimum(larger, self.permutations - larger)\n self.significance_ = (extreme + 1.) / (self.permutations + 1.)\n self.reference_distribution_ = self.reference_distribution_.T\n else:\n self.reference_distribution_ = None\n return self\n\n @staticmethod\n def _statistic(Z,W):\n return (Z[:,1] @ W.T) * (W @ Z[:,0]) \n\nif __name__ == '__main__':\n import geopandas\n import pysal.lib\n df = geopandas.read_file(pysal.lib.examples.get_path('columbus.shp'))\n x = df[['HOVAL']].values\n y = df[['CRIME']].values\n zx = preprocessing.StandardScaler().fit_transform(x)\n zy = preprocessing.StandardScaler().fit_transform(y)\n w = pysal.lib.weights.Queen.from_dataframe(df)\n w.transform = 'r'\n numpy.random.seed(2478879)\n testglobal = Spatial_Pearson(connectivity=w.sparse).fit(x,y)\n numpy.random.seed(2478879)\n testlocal = Local_Spatial_Pearson(connectivity=w.sparse).fit(x,y)\n",
"\"\"\"\nSet-like manipulation of weights matrices.\n\"\"\"\n\n__author__ = \"Sergio J. Rey <[email protected]>, Charles Schmidt <[email protected]>, David Folch <[email protected]>, Dani Arribas-Bel <[email protected]>\"\n\nimport copy\nfrom .weights import W, WSP\nfrom scipy.sparse import isspmatrix_csr\nfrom numpy import ones\n\n__all__ = ['w_union', 'w_intersection', 'w_difference',\n 'w_symmetric_difference', 'w_subset', 'w_clip']\n\n\ndef w_union(w1, w2, **kwargs):\n \"\"\"\n Returns a binary weights object, w, that includes all neighbor pairs that\n exist in either w1 or w2.\n\n Parameters\n ----------\n\n w1 : W\n object\n w2 : W\n object\n **kwargs : keyword arguments\n optional arguments for :class:`pysal.weights.W`\n\n Returns\n -------\n\n w : W\n object\n\n Notes\n -----\n ID comparisons are performed using ==, therefore the integer ID 2 is\n equivalent to the float ID 2.0. Returns a matrix with all the unique IDs\n from w1 and w2.\n\n Examples\n --------\n\n Construct rook weights matrices for two regions, one is 4x4 (16 areas)\n and the other is 6x4 (24 areas). A union of these two weights matrices\n results in the new weights matrix matching the larger one.\n\n >>> from pysal.lib.weights import lat2W, w_union\n >>> w1 = lat2W(4,4)\n >>> w2 = lat2W(6,4)\n >>> w = w_union(w1, w2)\n >>> w1[0] == w[0]\n True\n >>> w1.neighbors[15]\n [11, 14]\n >>> w2.neighbors[15]\n [11, 14, 19]\n >>> w.neighbors[15]\n [19, 11, 14]\n\n \"\"\"\n neighbors = dict(list(w1.neighbors.items()))\n for i in w2.neighbors:\n if i in neighbors:\n add_neigh = set(neighbors[i]).union(set(w2.neighbors[i]))\n neighbors[i] = list(add_neigh)\n else:\n neighbors[i] = copy.copy(w2.neighbors[i])\n return W(neighbors, **kwargs)\n\n\ndef w_intersection(w1, w2, w_shape='w1', **kwargs):\n \"\"\"\n Returns a binary weights object, w, that includes only\n those neighbor pairs that exist in both w1 and w2.\n\n Parameters\n ----------\n\n w1 : W\n object\n w2 : W\n object\n w_shape : string\n Defines the shape of the returned weights matrix. 'w1' returns a\n matrix with the same IDs as w1; 'all' returns a matrix with all\n the unique IDs from w1 and w2; and 'min' returns a matrix with\n only the IDs occurring in both w1 and w2.\n **kwargs : keyword arguments\n optional arguments for :class:`pysal.weights.W`\n\n Returns\n -------\n\n w : W\n object\n\n Notes\n -----\n ID comparisons are performed using ==, therefore the integer ID 2 is\n equivalent to the float ID 2.0.\n\n Examples\n --------\n\n Construct rook weights matrices for two regions, one is 4x4 (16 areas)\n and the other is 6x4 (24 areas). An intersection of these two weights\n matrices results in the new weights matrix matching the smaller one.\n\n >>> from pysal.lib.weights import lat2W, w_intersection\n >>> w1 = lat2W(4,4)\n >>> w2 = lat2W(6,4)\n >>> w = w_intersection(w1, w2)\n >>> w1[0] == w[0]\n True\n >>> w1.neighbors[15]\n [11, 14]\n >>> w2.neighbors[15]\n [11, 14, 19]\n >>> w.neighbors[15]\n [11, 14]\n\n \"\"\"\n\n if w_shape == 'w1':\n neigh_keys = list(w1.neighbors.keys())\n elif w_shape == 'all':\n neigh_keys = set(w1.neighbors.keys()).union(set(w2.neighbors.keys()))\n elif w_shape == 'min':\n neigh_keys = set(w1.neighbors.keys(\n )).intersection(set(w2.neighbors.keys()))\n else:\n raise Exception(\"invalid string passed to w_shape\")\n\n neighbors = {}\n for i in neigh_keys:\n if i in w1.neighbors and i in w2.neighbors:\n add_neigh = set(w1.neighbors[i]).intersection(set(w2.neighbors[i]))\n neighbors[i] = list(add_neigh)\n else:\n neighbors[i] = []\n\n return W(neighbors, **kwargs)\n\n\ndef w_difference(w1, w2, w_shape='w1', constrained=True, **kwargs):\n \"\"\"\n Returns a binary weights object, w, that includes only neighbor pairs\n in w1 that are not in w2. The w_shape and constrained parameters\n determine which pairs in w1 that are not in w2 are returned.\n\n Parameters\n ----------\n\n w1 : W\n object\n w2 : W\n object\n w_shape : string\n Defines the shape of the returned weights matrix. 'w1' returns a\n matrix with the same IDs as w1; 'all' returns a matrix with all\n the unique IDs from w1 and w2; and 'min' returns a matrix with\n the IDs occurring in w1 and not in w2.\n constrained : boolean\n If False then the full set of neighbor pairs in w1 that are\n not in w2 are returned. If True then those pairs that would\n not be possible if w_shape='min' are dropped. Ignored if\n w_shape is set to 'min'.\n **kwargs : keyword arguments\n optional arguments for :class:`pysal.weights.W`\n\n Returns\n -------\n\n w : W\n object\n\n Notes\n -----\n ID comparisons are performed using ==, therefore the integer ID 2 is\n equivalent to the float ID 2.0.\n\n Examples\n --------\n\n Construct rook (w2) and queen (w1) weights matrices for two 4x4 regions\n (16 areas). A queen matrix has all the joins a rook matrix does plus joins\n between areas that share a corner. The new matrix formed by the difference\n of rook from queen contains only join at corners (typically called a\n bishop matrix). Note that the difference of queen from rook would result\n in a weights matrix with no joins.\n\n >>> from pysal.lib.weights import lat2W, w_difference\n >>> w1 = lat2W(4,4,rook=False)\n >>> w2 = lat2W(4,4,rook=True)\n >>> w = w_difference(w1, w2, constrained=False)\n >>> w1[0] == w[0]\n False\n >>> w1.neighbors[15]\n [10, 11, 14]\n >>> w2.neighbors[15]\n [11, 14]\n >>> w.neighbors[15]\n [10]\n\n \"\"\"\n\n if w_shape == 'w1':\n neigh_keys = list(w1.neighbors.keys())\n elif w_shape == 'all':\n neigh_keys = set(w1.neighbors.keys()).union(set(w2.neighbors.keys()))\n elif w_shape == 'min':\n neigh_keys = set(\n w1.neighbors.keys()).difference(set(w2.neighbors.keys()))\n if not neigh_keys:\n raise Exception(\"returned an empty weights matrix\")\n else:\n raise Exception(\"invalid string passed to w_shape\")\n\n neighbors = {}\n for i in neigh_keys:\n if i in w1.neighbors:\n if i in w2.neighbors:\n add_neigh = set(w1.neighbors[i]\n ).difference(set(w2.neighbors[i]))\n neighbors[i] = list(add_neigh)\n else:\n neighbors[i] = copy.copy(w1.neighbors[i])\n else:\n neighbors[i] = []\n\n if constrained or w_shape == 'min':\n constrained_keys = set(\n w1.neighbors.keys()).difference(set(w2.neighbors.keys()))\n island_keys = set(neighbors.keys()).difference(constrained_keys)\n for i in island_keys:\n neighbors[i] = []\n for i in constrained_keys:\n neighbors[i] = list(\n set(neighbors[i]).intersection(constrained_keys))\n\n return W(neighbors, **kwargs)\n\n\ndef w_symmetric_difference(w1, w2, w_shape='all', constrained=True, **kwargs):\n \"\"\"\n Returns a binary weights object, w, that includes only neighbor pairs\n that are not shared by w1 and w2. The w_shape and constrained parameters\n determine which pairs that are not shared by w1 and w2 are returned.\n\n Parameters\n ----------\n\n w1 : W\n object\n w2 : W\n object\n w_shape : string\n Defines the shape of the returned weights matrix. 'all' returns a\n matrix with all the unique IDs from w1 and w2; and 'min' returns\n a matrix with the IDs not shared by w1 and w2.\n constrained : boolean\n If False then the full set of neighbor pairs that are not\n shared by w1 and w2 are returned. If True then those pairs\n that would not be possible if w_shape='min' are dropped.\n Ignored if w_shape is set to 'min'.\n **kwargs : keyword arguments\n optional arguments for :class:`pysal.weights.W`\n\n Returns\n -------\n\n w : W\n object\n\n Notes\n -----\n ID comparisons are performed using ==, therefore the integer ID 2 is\n equivalent to the float ID 2.0.\n\n Examples\n --------\n\n Construct queen weights matrix for a 4x4 (16 areas) region (w1) and a rook\n matrix for a 6x4 (24 areas) region (w2). The symmetric difference of these\n two matrices (with w_shape set to 'all' and constrained set to False)\n contains the corner joins in the overlap area, all the joins in the\n non-overlap area.\n\n >>> from pysal.lib.weights import lat2W, w_symmetric_difference\n >>> w1 = lat2W(4,4,rook=False)\n >>> w2 = lat2W(6,4,rook=True)\n >>> w = w_symmetric_difference(w1, w2, constrained=False)\n >>> w1[0] == w[0]\n False\n >>> w1.neighbors[15]\n [10, 11, 14]\n >>> w2.neighbors[15]\n [11, 14, 19]\n >>> set(w.neighbors[15]) == set([10, 19])\n True\n\n \"\"\"\n\n if w_shape == 'all':\n neigh_keys = set(w1.neighbors.keys()).union(set(w2.neighbors.keys()))\n elif w_shape == 'min':\n neigh_keys = set(w1.neighbors.keys(\n )).symmetric_difference(set(w2.neighbors.keys()))\n else:\n raise Exception(\"invalid string passed to w_shape\")\n\n neighbors = {}\n for i in neigh_keys:\n if i in w1.neighbors:\n if i in w2.neighbors:\n add_neigh = set(w1.neighbors[i]).symmetric_difference(\n set(w2.neighbors[i]))\n neighbors[i] = list(add_neigh)\n else:\n neighbors[i] = copy.copy(w1.neighbors[i])\n elif i in w2.neighbors:\n neighbors[i] = copy.copy(w2.neighbors[i])\n else:\n neighbors[i] = []\n\n if constrained or w_shape == 'min':\n constrained_keys = set(\n w1.neighbors.keys()).difference(set(w2.neighbors.keys()))\n island_keys = set(neighbors.keys()).difference(constrained_keys)\n for i in island_keys:\n neighbors[i] = []\n for i in constrained_keys:\n neighbors[i] = list(\n set(neighbors[i]).intersection(constrained_keys))\n\n return W(neighbors, **kwargs)\n\n\ndef w_subset(w1, ids, **kwargs):\n \"\"\"\n Returns a binary weights object, w, that includes only those\n observations in ids.\n\n Parameters\n ----------\n\n w1 : W\n object\n ids : list\n A list containing the IDs to be include in the returned weights\n object.\n **kwargs : keyword arguments\n optional arguments for :class:`pysal.weights.W`\n\n Returns\n -------\n\n w : W\n object\n\n Examples\n --------\n\n Construct a rook weights matrix for a 6x4 region (24 areas). By default\n PySAL assigns integer IDs to the areas in a region. By passing in a list\n of integers from 0 to 15, the first 16 areas are extracted from the\n previous weights matrix, and only those joins relevant to the new region\n are retained.\n\n >>> from pysal.lib.weights import lat2W, w_subset\n >>> w1 = lat2W(6,4)\n >>> ids = range(16)\n >>> w = w_subset(w1, ids)\n >>> w1[0] == w[0]\n True\n >>> w1.neighbors[15]\n [11, 14, 19]\n >>> w.neighbors[15]\n [11, 14]\n\n \"\"\"\n\n neighbors = {}\n ids_set = set(list(ids))\n for i in ids:\n if i in w1.neighbors:\n neigh_add = ids_set.intersection(set(w1.neighbors[i]))\n neighbors[i] = list(neigh_add)\n else:\n neighbors[i] = []\n\n return W(neighbors, id_order=list(ids), **kwargs)\n\n\ndef w_clip(w1, w2, outSP=True, **kwargs):\n '''\n Clip a continuous W object (w1) with a different W object (w2) so only cells where\n w2 has a non-zero value remain with non-zero values in w1.\n\n Checks on w1 and w2 are performed to make sure they conform to the\n appropriate format and, if not, they are converted.\n\n Parameters\n ----------\n w1 : W\n W, scipy.sparse.csr.csr_matrix\n Potentially continuous weights matrix to be clipped. The clipped\n matrix wc will have at most the same elements as w1.\n w2 : W\n W, scipy.sparse.csr.csr_matrix\n Weights matrix to use as shell to clip w1. Automatically\n converted to binary format. Only non-zero elements in w2 will be\n kept non-zero in wc. NOTE: assumed to be of the same shape as w1\n outSP : boolean\n If True (default) return sparse version of the clipped W, if\n False, return W object of the clipped matrix\n **kwargs : keyword arguments\n optional arguments for :class:`pysal.weights.W`\n\n Returns\n -------\n wc : W\n W, scipy.sparse.csr.csr_matrix\n Clipped W object (sparse if outSP=Ture). It inherits\n ``id_order`` from w1.\n\n Examples\n --------\n >>> from pysal.lib.weights import lat2W\n\n First create a W object from a lattice using queen contiguity and\n row-standardize it (note that these weights will stay when we clip the\n object, but they will not neccesarily represent a row-standardization\n anymore):\n\n >>> w1 = lat2W(3, 2, rook=False)\n >>> w1.transform = 'R'\n\n We will clip that geography assuming observations 0, 2, 3 and 4 belong to\n one group and 1, 5 belong to another group and we don't want both groups\n to interact with each other in our weights (i.e. w_ij = 0 if i and j in\n different groups). For that, we use the following method:\n\n >>> import pysal.lib\n >>> w2 = pysal.lib.weights.block_weights(['r1', 'r2', 'r1', 'r1', 'r1', 'r2'])\n\n To illustrate that w2 will only be considered as binary even when the\n object passed is not, we can row-standardize it\n\n >>> w2.transform = 'R'\n\n The clipped object ``wc`` will contain only the spatial queen\n relationships that occur within one group ('r1' or 'r2') but will have\n gotten rid of those that happen across groups\n\n >>> wcs = pysal.lib.weights.w_clip(w1, w2, outSP=True)\n\n This will create a sparse object (recommended when n is large).\n\n >>> wcs.sparse.toarray()\n array([[0. , 0. , 0.33333333, 0.33333333, 0. ,\n 0. ],\n [0. , 0. , 0. , 0. , 0. ,\n 0. ],\n [0.2 , 0. , 0. , 0.2 , 0.2 ,\n 0. ],\n [0.2 , 0. , 0.2 , 0. , 0.2 ,\n 0. ],\n [0. , 0. , 0.33333333, 0.33333333, 0. ,\n 0. ],\n [0. , 0. , 0. , 0. , 0. ,\n 0. ]])\n\n\n If we wanted an original W object, we can control that with the argument\n ``outSP``:\n\n >>> wc = pysal.lib.weights.w_clip(w1, w2, outSP=False)\n\n WARNING: there are 2 disconnected observations\n Island ids: [1, 5]\n >>> wc.full()[0]\n array([[0. , 0. , 0.33333333, 0.33333333, 0. ,\n 0. ],\n [0. , 0. , 0. , 0. , 0. ,\n 0. ],\n [0.2 , 0. , 0. , 0.2 , 0.2 ,\n 0. ],\n [0.2 , 0. , 0.2 , 0. , 0.2 ,\n 0. ],\n [0. , 0. , 0.33333333, 0.33333333, 0. ,\n 0. ],\n [0. , 0. , 0. , 0. , 0. ,\n 0. ]])\n\n You can check they are actually the same:\n\n >>> wcs.sparse.toarray() == wc.full()[0]\n array([[ True, True, True, True, True, True],\n [ True, True, True, True, True, True],\n [ True, True, True, True, True, True],\n [ True, True, True, True, True, True],\n [ True, True, True, True, True, True],\n [ True, True, True, True, True, True]])\n\n '''\n\n from .util import WSP2W\n if not w1.id_order:\n w1.id_order = None\n id_order = w1.id_order\n if not isspmatrix_csr(w1):\n w1 = w1.sparse\n if not isspmatrix_csr(w2):\n w2 = w2.sparse\n w2.data = ones(w2.data.shape)\n wc = w1.multiply(w2)\n wc = WSP(wc, id_order=id_order)\n if not outSP:\n wc = WSP2W(wc, **kwargs)\n return wc\n",
"from __future__ import division\nimport numpy as np\nimport numpy.linalg as la\nfrom ...utils import splogdet, spsolve\nfrom ...steps import metropolis\nfrom pysal.model.spreg.utils import spdot\n\n#############################\n# SPATIAL SAMPLE METHODS #\n#############################\n\ndef logp_rho_cov(state, val):\n \"\"\"\n The logp for lower-level spatial parameters in this case has the same\n form as a multivariate normal distribution, sampled over the variance matrix, rather than over y.\n \"\"\"\n st = state\n \n #must truncate in logp otherwise sampling gets unstable\n if (val < st.Rho_min) or (val > st.Rho_max):\n return np.array([-np.inf])\n \n PsiRho = st.Psi_1(val, st.W)\n logdet = splogdet(PsiRho)\n \n eta = st.Y - st.XBetas - st.DeltaAlphas\n kernel = spdot(eta.T, spsolve(PsiRho, eta)) / st.Sigma2\n\n return -.5*logdet -.5 * kernel + st.Log_Rho0(val)\n\ndef logp_lambda_cov(state, val):\n \"\"\"\n The logp for upper level spatial parameters in this case has the same form\n as a multivariate normal distribution, sampled over the variance matrix,\n rather than over Y.\n \"\"\"\n st = state\n\n #must truncate\n if (val < st.Lambda_min) or (val > st.Lambda_max):\n return np.array([-np.inf])\n\n PsiLambda = st.Psi_2(val, st.M)\n\n logdet = splogdet(PsiLambda)\n\n kernel = spdot(st.Alphas.T, spsolve(PsiLambda, st.Alphas)) / st.Tau2\n\n return -.5*logdet - .5*kernel + st.Log_Lambda0(val)\n\ndef logp_lambda_prec(state, val):\n \"\"\"\n Compute the log likelihood of the upper-level spatial correlation parameter using \n sparse operations and the precision matrix, rather than the covariance matrix. \n \"\"\"\n st = state\n\n if (val < st.Lambda_min) or (val > st.Lambda_max):\n return np.array([-np.inf])\n\n PsiLambdai = st.Psi_2i(val, st.M, sparse=True)\n logdet = -splogdet(PsiLambdai) #negative because precision\n\n kernel = spdot(spdot(st.Alphas.T, PsiLambdai), st.Alphas) / st.Tau2\n\n return -.5 * logdet - .5 * kernel + st.Log_Lambda0(val)\n\ndef logp_rho_prec(state, val):\n \"\"\"\n Compute the log likelihood of the lower-level spatial correlation parameter using\n sparse operations and the precision matrix, rather than the covariance matrix. \n \"\"\"\n st = state\n\n if (val < st.Rho_min) or (val > st.Rho_max):\n return np.array([-np.inf])\n\n PsiRhoi = st.Psi_1i(val, st.W, sparse=True)\n logdet = -splogdet(PsiRhoi)\n\n eta = st.Y - st.XBetas - st.DeltaAlphas\n\n kernel = spdot(spdot(eta.T, PsiRhoi), eta) / st.Sigma2\n\n return -.5 * logdet - .5 * kernel + st.Log_Rho0(val)\n\n\n"
] | [
[
"pandas.read_csv",
"numpy.abs",
"numpy.random.seed",
"numpy.asarray",
"numpy.squeeze",
"pandas.DataFrame",
"numpy.random.normal",
"numpy.testing.assert_allclose",
"numpy.argsort",
"numpy.random.uniform",
"numpy.random.randint"
],
[
"numpy.minimum",
"numpy.random.seed",
"scipy.sparse.eye",
"sklearn.utils.check_array",
"numpy.arange",
"numpy.random.shuffle",
"numpy.ones",
"numpy.random.permutation",
"numpy.column_stack",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.empty"
],
[
"scipy.sparse.isspmatrix_csr",
"numpy.ones"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fhaase2/sentence-use | [
"8acfde806d8bc866f7559e99a2cb6b684d96d796"
] | [
"train.py"
] | [
"import logging\n\nimport tensorflow as tf\n\nfrom sentence_use.data import read_data\nfrom sentence_use.models import SiameseUSE\nfrom sentence_use.parser import train_args\n\n\ndef train(args):\n \"\"\"Runs training script for given CLI arguments.\n\n :param args: Arguments\n :type args: argparse.Namespace\n \"\"\"\n tf.random.set_seed(args.seed)\n\n # x_train and x_val format is [(\"sentence1\", \"sentence2\")]\n # y_train and y_val format is [0.95]\n x_train, y_train = read_data(args.train_data)\n x_val, y_val = read_data(args.val_data)\n\n # Prepare train and val datasets as tf.data.Dataset\n train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))\n train_dataset = train_dataset.shuffle(len(x_train), seed=args.seed).batch(args.batch_size)\n val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))\n val_dataset = val_dataset.batch(args.batch_size)\n\n model = SiameseUSE(model_name_or_path=args.model_name_or_path,\n trainable=True)\n\n model.compile(\n optimizer=args.optimizer,\n loss=args.loss,\n metrics=args.metric,\n )\n model.optimizer.learning_rate.assign(float(args.lr))\n\n callbacks = [\n tf.keras.callbacks.TensorBoard(log_dir=\"./logs\"),\n ]\n\n model.fit(\n train_dataset,\n epochs=args.epochs,\n validation_data=val_dataset,\n callbacks=callbacks\n )\n\n model.model.save(args.output_path)\n\n\nif __name__ == \"__main__\":\n logging.getLogger().setLevel(logging.INFO)\n args = train_args.parse_args()\n train(args)\n"
] | [
[
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.random.set_seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xy-guo/mmdetection_kitti | [
"5cf3d2227531101dc45ea9f5b4f8c04ee124afcf",
"5cf3d2227531101dc45ea9f5b4f8c04ee124afcf",
"5cf3d2227531101dc45ea9f5b4f8c04ee124afcf",
"5cf3d2227531101dc45ea9f5b4f8c04ee124afcf"
] | [
"mmdet/utils/det3d/kitti_utils.py",
"mmdet/core/utils/debug_utils.py",
"mmdet/utils/det3d/pytorch_utils.py",
"mmdet/datasets/kitti_utils/target_assigner.py"
] | [
"\"\"\"utility functions for kitti dataset\"\"\"\nfrom scipy.spatial import Delaunay\nimport scipy\nimport numpy as np\nimport torch\n\n\ndef in_hull(p, hull):\n \"\"\"\n :param p: (N, K) test points\n :param hull: (M, K) M corners of a box\n :return (N) bool\n \"\"\"\n try:\n if not isinstance(hull, Delaunay):\n hull = Delaunay(hull)\n flag = hull.find_simplex(p) >= 0\n except scipy.spatial.qhull.QhullError:\n print('Warning: not a hull %s' % str(hull))\n flag = np.zeros(p.shape[0], dtype=np.bool)\n\n return flag\n\n\ndef enlarge_box3d(boxes3d, extra_width):\n \"\"\"\n :param boxes3d: (N, 7) [x, y, z, w, l, h, ry] in LiDAR coords\n \"\"\"\n if isinstance(boxes3d, np.ndarray):\n large_boxes3d = boxes3d.copy()\n else:\n large_boxes3d = boxes3d.clone()\n large_boxes3d[:, 3:6] += extra_width * 2\n large_boxes3d[:, 2] -= extra_width # bugfixed: here should be minus, not add in LiDAR, 20190508\n return large_boxes3d\n\n\ndef rotate_pc_along_z(pc, rot_angle):\n \"\"\"\n params pc: (N, 3+C), (N, 3) is in the LiDAR coordinate\n params rot_angle: rad scalar\n Output pc: updated pc with XYZ rotated\n \"\"\"\n cosval = np.cos(rot_angle)\n sinval = np.sin(rot_angle)\n rotmat = np.array([[cosval, -sinval], [sinval, cosval]])\n pc[:, 0:2] = np.dot(pc[:, 0:2], rotmat)\n return pc\n\n\ndef rotate_pc_along_z_torch(pc, rot_angle):\n \"\"\"\n :param pc: (N, 512, 3 + C) in the LiDAR coordinate\n :param rot_angle: (N)\n :return:\n TODO: merge with rotate_pc_along_y_torch in bbox_transform.py\n \"\"\"\n cosa = torch.cos(rot_angle).view(-1, 1) # (N, 1)\n sina = torch.sin(rot_angle).view(-1, 1) # (N, 1)\n\n raw_1 = torch.cat([cosa, -sina], dim=1) # (N, 2)\n raw_2 = torch.cat([sina, cosa], dim=1) # (N, 2)\n R = torch.cat((raw_1.unsqueeze(dim=1), raw_2.unsqueeze(dim=1)), dim=1) # (N, 2, 2)\n\n pc_temp = pc[:, :, 0:2] # (N, 512, 2)\n\n pc[:, :, 0:2] = torch.matmul(pc_temp, R) # (N, 512, 2)\n return pc\n\n\ndef transform_lidar_to_cam(boxes_lidar):\n \"\"\"\n Only transform format, not exactly in camera coords\n :param boxes_lidar: (N, 3 or 7) [x, y, z, w, l, h, ry] in LiDAR coords\n :return: boxes_cam: (N, 3 or 7) [x, y, z, h, w, l, ry] in camera coords\n \"\"\"\n # boxes_cam = boxes_lidar.new_tensor(boxes_lidar.data)\n boxes_cam = boxes_lidar.clone().detach()\n boxes_cam[:, 0] = -boxes_lidar[:, 1]\n boxes_cam[:, 1] = -boxes_lidar[:, 2]\n boxes_cam[:, 2] = boxes_lidar[:, 0]\n if boxes_cam.shape[1] > 3:\n boxes_cam[:, [3, 4, 5]] = boxes_lidar[:, [5, 3, 4]]\n return boxes_cam\n\n\ndef boxes3d_to_bev_torch(boxes3d):\n \"\"\"\n :param boxes3d: (N, 7) [x, y, z, h, w, l, ry] in camera coords\n :return:\n boxes_bev: (N, 5) [x1, y1, x2, y2, ry]\n \"\"\"\n boxes_bev = boxes3d.new(torch.Size((boxes3d.shape[0], 5)))\n\n cu, cv = boxes3d[:, 0], boxes3d[:, 2]\n half_l, half_w = boxes3d[:, 5] / 2, boxes3d[:, 4] / 2\n boxes_bev[:, 0], boxes_bev[:, 1] = cu - half_l, cv - half_w\n boxes_bev[:, 2], boxes_bev[:, 3] = cu + half_l, cv + half_w\n boxes_bev[:, 4] = boxes3d[:, 6]\n return boxes_bev\n\n\ndef boxes3d_to_bev_torch_lidar(boxes3d):\n \"\"\"\n :param boxes3d: (N, 7) [x, y, z, w, l, h, ry] in LiDAR coords\n :return:\n boxes_bev: (N, 5) [x1, y1, x2, y2, ry]\n \"\"\"\n boxes_bev = boxes3d.new(torch.Size((boxes3d.shape[0], 5)))\n\n cu, cv = boxes3d[:, 0], boxes3d[:, 1]\n half_l, half_w = boxes3d[:, 4] / 2, boxes3d[:, 3] / 2\n boxes_bev[:, 0], boxes_bev[:, 1] = cu - half_w, cv - half_l\n boxes_bev[:, 2], boxes_bev[:, 3] = cu + half_w, cv + half_l\n boxes_bev[:, 4] = boxes3d[:, 6]\n return boxes_bev\n",
"import os.path as osp\nimport torch\nimport mmcv\nimport cv2\nfrom mmcv.runner.hooks import HOOKS, Hook\nfrom mmcv.runner import master_only\nfrom mmdet.core.utils import tensor2imgs\nfrom mmdet.utils.det3d import box_np_ops\nimport numpy as np\n\n\ndef imshow_3d_det_bboxes(img,\n corners,\n labels,\n scores=None,\n class_names=None,\n score_thr=0,\n bbox_color='green',\n text_color='green',\n thickness=1,\n font_scale=0.5,\n show=True,\n win_name='',\n wait_time=0,\n out_file=None):\n \"\"\"Draw 3d bboxes and class labels (with scores) on an image.\n\n Args:\n img (str or ndarray): The image to be displayed.\n corners (ndarray): Bounding boxes (with scores), shaped (n, 8, 2).\n labels (ndarray): Labels of bboxes.\n class_names (list[str]): Names of each classes.\n score_thr (float): Minimum score of bboxes to be shown.\n bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.\n text_color (str or tuple or :obj:`Color`): Color of texts.\n thickness (int): Thickness of lines.\n font_scale (float): Font scales of texts.\n show (bool): Whether to show the image.\n win_name (str): The window name.\n wait_time (int): Value of waitKey param.\n out_file (str or None): The filename to write the image.\n \"\"\"\n assert corners.ndim == 3\n assert labels.ndim == 1\n assert corners.shape[0] == labels.shape[0]\n assert corners.shape[1] == 8\n img = mmcv.imread(img)\n\n if score_thr > 0:\n assert scores is not None\n assert scores.shape[0] == labels.shape[0]\n\n bbox_color = mmcv.color_val(bbox_color)\n text_color = mmcv.color_val(text_color)\n\n for corner, label in zip(corners, labels):\n corner = np.round(corner).astype(np.int32)\n bbox_color = (list(np.random.choice(range(256), size=3)))\n bbox_color = [int(bbox_color[0]), int(\n bbox_color[1]), int(bbox_color[2])]\n for i1, i2 in [(0, 1), (1, 2), (2, 3), (3, 0),\n (4, 5), (5, 6), (6, 7), (7, 4), (4, 6), (5, 7),\n (0, 4), (1, 5), (2, 6), (3, 7)]:\n cv2.line(\n img, tuple(corner[i1]), tuple(corner[i2]), bbox_color, thickness=thickness, lineType=cv2.LINE_AA)\n label_text = class_names[\n label] if class_names is not None else 'cls {}'.format(label)\n cv2.putText(img, label_text, (corner[0, 0], corner[0, 1] - 2),\n cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color)\n\n if show:\n mmcv.imshow(img, win_name, wait_time)\n if out_file is not None:\n mmcv.imwrite(img, out_file)\n\n\[email protected]_module\nclass DebugLoggerHook(Hook):\n\n def __init__(self,\n log_dir=None,\n interval=10,\n enable=False):\n super(DebugLoggerHook, self).__init__()\n self.log_dir = log_dir\n self.enable = enable\n\n @master_only\n def after_train_iter(self, runner):\n if not self.enable:\n return\n # draw images\n data = runner.data_batch\n data = {k: v.data[0] for k, v in data.items()} # data of GPU:0\n # data = {k: v[0] for k, v in data.items()} # data of sample 0\n\n # available keys:\n # K, fL, gt_bboxes, gt_bboxes_ignore, gt_labels\n # img_meta, left_img, right_img, t2to3, velo2cam2, velo2cam3\n iter_idx = runner._iter\n img_metas = data['img_meta']\n left_img_tensor = data['left_img']\n right_img_tensor = data['right_img']\n gt_bboxes = data['gt_bboxes']\n gt_bboxes_3d = data['gt_bboxes_3d']\n intrinsics = data['K']\n gt_bboxes_ignore = data['gt_bboxes_ignore']\n\n left_imgs = tensor2imgs(\n left_img_tensor, **img_metas[0]['img_norm_cfg'])\n right_imgs = tensor2imgs(\n right_img_tensor, **img_metas[0]['img_norm_cfg'])\n mix_imgs = [(l * 0.65 + r * 0.35)\n for l, r in zip(left_imgs, right_imgs)]\n\n for idx in range(len(left_imgs)):\n img_show = mix_imgs[idx].copy()\n img_show_3d = mix_imgs[idx].copy()\n bboxes = gt_bboxes[idx].detach().cpu().numpy()\n bboxes_3d = gt_bboxes_3d[idx].detach().cpu().numpy()\n K = intrinsics[idx].detach().cpu().numpy()\n corners = box_np_ops.center_to_corner_box3d(\n bboxes_3d[:, :3], bboxes_3d[:, 3:6], bboxes_3d[:, 6], origin=[0.5, 1.0, 0.5], axis=1)\n bboxes_ignore = gt_bboxes_ignore[idx].detach().cpu().numpy()\n\n labels = data['gt_labels'][idx].detach().cpu().numpy()\n labels_ignore = np.array([0] * len(bboxes_ignore))\n swap = img_metas[idx]['swap']\n flip = img_metas[idx]['flip']\n filename = img_metas[idx]['left_filename']\n\n cv2.putText(img_show, \"swap \" + str(swap), (10, 10),\n cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))\n cv2.putText(img_show, \"flip \" + str(flip), (10, 30),\n cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))\n cv2.putText(img_show, filename, (10, 50),\n cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))\n cv2.putText(img_show_3d, \"swap \" + str(swap), (10, 10),\n cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))\n cv2.putText(img_show_3d, \"flip \" + str(flip), (10, 30),\n cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))\n cv2.putText(img_show_3d, filename, (10, 50),\n cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))\n mmcv.imshow_det_bboxes(\n img_show,\n bboxes,\n labels,\n class_names=['?', 'c', 'p', 'b'],\n bbox_color='green',\n score_thr=0.,\n show=False)\n\n corners_2d = corners @ K.T\n corners_2d = corners_2d[..., :2] / corners_2d[..., 2:3]\n imshow_3d_det_bboxes(\n img_show_3d,\n corners_2d,\n labels,\n class_names=['?', 'c', 'p', 'b'],\n bbox_color='green',\n score_thr=0.,\n show=False,\n out_file=osp.join(self.log_dir, f'debug_{iter_idx:06d}_{idx:02d}_3d.jpg'))\n mmcv.imshow_det_bboxes(\n img_show,\n bboxes_ignore,\n labels_ignore,\n class_names=['x'],\n bbox_color='red',\n score_thr=0.,\n show=False,\n out_file=osp.join(self.log_dir, f'debug_{iter_idx:06d}_{idx:02d}.jpg'))\n print(\"saving debug img to \", self.log_dir,\n iter_idx, idx, \"swap\", swap, filename)\n",
"\"\"\"Some pytorch layer wrappers, combining conv/fc with bn/in/nonlinear layers\"\"\"\nimport torch.nn as nn\nfrom typing import List, Tuple\nimport numpy as np\n\n\nclass SharedMLP(nn.Sequential):\n\n def __init__(\n self,\n args: List[int],\n *,\n bn: bool = False,\n activation=nn.ReLU(inplace=True),\n preact: bool = False,\n first: bool = False,\n name: str = \"\",\n instance_norm: bool = False\n ):\n super().__init__()\n\n for i in range(len(args) - 1):\n self.add_module(\n name + 'layer{}'.format(i),\n Conv2d(\n args[i],\n args[i + 1],\n bn=(not first or not preact or (i != 0)) and bn,\n activation=activation\n if (not first or not preact or (i != 0)) else None,\n preact=preact,\n instance_norm=instance_norm\n )\n )\n\n\nclass _ConvBase(nn.Sequential):\n\n def __init__(\n self,\n in_size,\n out_size,\n kernel_size,\n stride,\n padding,\n activation,\n bn,\n init,\n conv=None,\n batch_norm=None,\n bias=True,\n preact=False,\n name=\"\",\n instance_norm=False,\n instance_norm_func=None\n ):\n super().__init__()\n\n bias = bias and (not bn)\n conv_unit = conv(\n in_size,\n out_size,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n bias=bias\n )\n init(conv_unit.weight)\n if bias:\n nn.init.constant_(conv_unit.bias, 0)\n\n if bn:\n if not preact:\n bn_unit = batch_norm(out_size)\n else:\n bn_unit = batch_norm(in_size)\n if instance_norm:\n if not preact:\n in_unit = instance_norm_func(out_size, affine=False, track_running_stats=False)\n else:\n in_unit = instance_norm_func(in_size, affine=False, track_running_stats=False)\n\n if preact:\n if bn:\n self.add_module(name + 'bn', bn_unit)\n\n if activation is not None:\n self.add_module(name + 'activation', activation)\n\n if not bn and instance_norm:\n self.add_module(name + 'in', in_unit)\n\n self.add_module(name + 'conv', conv_unit)\n\n if not preact:\n if bn:\n self.add_module(name + 'bn', bn_unit)\n\n if activation is not None:\n self.add_module(name + 'activation', activation)\n\n if not bn and instance_norm:\n self.add_module(name + 'in', in_unit)\n\n\nclass _BNBase(nn.Sequential):\n\n def __init__(self, in_size, batch_norm=None, name=\"\"):\n super().__init__()\n self.add_module(name + \"bn\", batch_norm(in_size))\n\n nn.init.constant_(self[0].weight, 1.0)\n nn.init.constant_(self[0].bias, 0)\n\n\nclass BatchNorm1d(_BNBase):\n\n def __init__(self, in_size: int, *, name: str = \"\"):\n super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name)\n\n\nclass BatchNorm2d(_BNBase):\n\n def __init__(self, in_size: int, name: str = \"\"):\n super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name)\n\n\nclass Conv1d(_ConvBase):\n\n def __init__(\n self,\n in_size: int,\n out_size: int,\n *,\n kernel_size: int = 1,\n stride: int = 1,\n padding: int = 0,\n activation=nn.ReLU(inplace=True),\n bn: bool = False,\n init=nn.init.kaiming_normal_,\n bias: bool = True,\n preact: bool = False,\n name: str = \"\",\n instance_norm=False\n ):\n super().__init__(\n in_size,\n out_size,\n kernel_size,\n stride,\n padding,\n activation,\n bn,\n init,\n conv=nn.Conv1d,\n batch_norm=BatchNorm1d,\n bias=bias,\n preact=preact,\n name=name,\n instance_norm=instance_norm,\n instance_norm_func=nn.InstanceNorm1d\n )\n\n\nclass Conv2d(_ConvBase):\n\n def __init__(\n self,\n in_size: int,\n out_size: int,\n *,\n kernel_size: Tuple[int, int] = (1, 1),\n stride: Tuple[int, int] = (1, 1),\n padding: Tuple[int, int] = (0, 0),\n activation=nn.ReLU(inplace=True),\n bn: bool = False,\n init=nn.init.kaiming_normal_,\n bias: bool = True,\n preact: bool = False,\n name: str = \"\",\n instance_norm=False\n ):\n super().__init__(\n in_size,\n out_size,\n kernel_size,\n stride,\n padding,\n activation,\n bn,\n init,\n conv=nn.Conv2d,\n batch_norm=BatchNorm2d,\n bias=bias,\n preact=preact,\n name=name,\n instance_norm=instance_norm,\n instance_norm_func=nn.InstanceNorm2d\n )\n\n\nclass FC(nn.Sequential):\n\n def __init__(\n self,\n in_size: int,\n out_size: int,\n *,\n activation=nn.ReLU(inplace=True),\n bn: bool = False,\n init=None,\n preact: bool = False,\n name: str = \"\"\n ):\n super().__init__()\n\n fc = nn.Linear(in_size, out_size, bias=not bn)\n if init is not None:\n init(fc.weight)\n if not bn:\n nn.init.constant(fc.bias, 0)\n\n if preact:\n if bn:\n self.add_module(name + 'bn', BatchNorm1d(in_size))\n\n if activation is not None:\n self.add_module(name + 'activation', activation)\n\n self.add_module(name + 'fc', fc)\n\n if not preact:\n if bn:\n self.add_module(name + 'bn', BatchNorm1d(out_size))\n\n if activation is not None:\n self.add_module(name + 'activation', activation)\n\n\ndef build_standard_annos_dict(bboxes_3d, bboxes_2d=None, labels=None, scores=None):\n num_bbox = bboxes_3d.shape[0]\n if num_bbox <= 0:\n return {\n 'name': np.array([]), 'truncated': np.array([]), 'occluded': np.array([]),\n 'alpha': np.array([]), 'bbox': np.zeros([0, 4]), 'dimensions': np.zeros([0, 3]),\n 'location': np.zeros([0, 3]), 'rotation_y': np.array([]), 'score': np.array([]),\n }\n else:\n anno = {'name': [], 'truncated': [], 'occluded': [], 'alpha': [], 'bbox': [], 'dimensions': [],\n 'location': [], 'rotation_y': [], 'score': []}\n\n bboxes_2d = bboxes_2d if bboxes_2d is not None else np.full([num_bbox, 4], np.nan)\n scores = scores if scores is not None else np.full([num_bbox], np.nan)\n labels = labels if labels is not None else ['Car'] * num_bbox\n for bbox_2d, bbox_3d, score, label in zip(\n bboxes_2d, bboxes_3d, scores, labels):\n # bbox[2:] = np.minimum(bbox[2:], ori_image_shape[::-1])\n # bbox[:2] = np.maximum(bbox[:2], [0, 0])\n anno[\"name\"].append(label)\n anno[\"truncated\"].append(np.nan)\n anno[\"occluded\"].append(np.nan)\n anno[\"alpha\"].append(-np.arctan2(bbox_3d[0], bbox_3d[2]) + bbox_3d[6])\n anno[\"bbox\"].append(bbox_2d)\n anno[\"dimensions\"].append(bbox_3d[3:6])\n anno[\"location\"].append(bbox_3d[:3])\n anno[\"rotation_y\"].append(bbox_3d[6])\n anno[\"score\"].append(score)\n anno = {k: np.stack(v) for k, v in anno.items()}\n return anno\n",
"import numpy as np\nimport numpy.random as npr\nimport utils.box_np_ops as box_np_ops\nimport logging\n\n\ndef unmap(data, count, inds, fill=0):\n \"\"\"Unmap a subset of item (data) back to the original set of items (of\n size count)\"\"\"\n if count == len(inds):\n return data\n\n if len(data.shape) == 1:\n ret = np.empty((count, ), dtype=data.dtype)\n ret.fill(fill)\n ret[inds] = data\n else:\n ret = np.empty((count, ) + data.shape[1:], dtype=data.dtype)\n ret.fill(fill)\n ret[inds, :] = data\n return ret\n\n\nclass AnchorGeneratorRange(object):\n def __init__(self,anchor_ranges, sizes=((1.6, 3.9, 1.56),), rotations=(0, np.pi / 2), class_name=None,\n match_threshold=-1, unmatch_threshold=-1, dtype=np.float32):\n self._sizes = sizes\n self._anchor_ranges = anchor_ranges\n self._rotations = rotations\n self._dtype = dtype\n self._class_name = class_name\n self._match_threshold = match_threshold\n self._unmatch_threshold = unmatch_threshold\n\n @property\n def class_name(self):\n return self._class_name\n\n @property\n def match_threshold(self):\n return self._match_threshold\n\n @property\n def unmatch_threshold(self):\n return self._unmatch_threshold\n\n @property\n def num_anchors_per_localization(self):\n num_rot = len(self._rotations)\n num_size = np.array(self._sizes).reshape([-1, 3]).shape[0]\n return num_rot * num_size\n\n def generate(self, feature_map_size):\n return box_np_ops.create_anchors_3d_range(feature_map_size, self._anchor_ranges, self._sizes,\n self._rotations, self._dtype)\n\n\nclass TargetAssigner(object):\n def __init__(self, anchor_generators, pos_fraction, sample_size, region_similarity_fn_name, box_coder, logger=None):\n super().__init__()\n self.anchor_generators = anchor_generators\n self.pos_fraction = pos_fraction if pos_fraction >= 0 else None\n self.sample_size = sample_size\n self.region_similarity_calculator = getattr(self, region_similarity_fn_name)\n self.box_coder = box_coder\n self.logger = logger if logger is not None else logging.getLogger(__name__)\n\n def generate_anchors(self, feature_map_size):\n \"\"\"generate anchors according to the feature map size\n\n :param feature_map_size: array [3]\n :return: a dict with the following keys:\n anchors: array [Nx, Ny, Nz, Na, 7],\n matched_thresholds: array [Nx*Ny*Nz*Na];\n unmatched_threasholds: array [Nx*Ny*Nz*Na]\n \"\"\"\n anchors_list = []\n matched_thresholds = [a.match_threshold for a in self.anchor_generators]\n unmatched_thresholds = [a.unmatch_threshold for a in self.anchor_generators]\n match_list, unmatch_list = [], []\n for anchor_generator, match_thresh, unmatch_thresh in zip(self.anchor_generators,\n matched_thresholds, unmatched_thresholds):\n anchors = anchor_generator.generate(feature_map_size)\n anchors = anchors.reshape([*anchors.shape[:3], -1, 7])\n anchors_list.append(anchors)\n num_anchors = np.prod(anchors.shape[:-1])\n match_list.append(np.full([num_anchors], match_thresh, anchors.dtype))\n unmatch_list.append(np.full([num_anchors], unmatch_thresh, anchors.dtype))\n anchors = np.concatenate(anchors_list, axis=-2) # TODO: may be a bug? why -2\n matched_thresholds = np.concatenate(match_list, axis=0)\n unmatched_thresholds = np.concatenate(unmatch_list, axis=0)\n return {\n \"anchors\": anchors,\n \"matched_thresholds\": matched_thresholds,\n \"unmatched_thresholds\": unmatched_thresholds\n }\n\n def generate_anchors_dict(self, feature_map_size):\n \"\"\"generate anchors according to the fature map size.\n\n NOTE that the generated anchors are not concatenated, instead saved in a dictionary separately for each class.\n\n :param feature_map_size: array [3]\n :return: anchors_dict\n \"\"\"\n anchors_list = []\n matched_thresholds = [a.match_threshold for a in self.anchor_generators]\n unmatched_thresholds = [a.unmatch_threshold for a in self.anchor_generators]\n match_list, unmatch_list = [], []\n anchors_dict = {a.class_name: {} for a in self.anchor_generators}\n for anchor_generator, match_thresh, unmatch_thresh in zip(self.anchor_generators,\n matched_thresholds, unmatched_thresholds):\n anchors = anchor_generator.generate(feature_map_size)\n anchors = anchors.reshape([*anchors.shape[:3], -1, 7])\n anchors_list.append(anchors)\n num_anchors = np.prod(anchors.shape[:-1])\n match_list.append(np.full([num_anchors], match_thresh, anchors.dtype))\n unmatch_list.append(np.full([num_anchors], unmatch_thresh, anchors.dtype))\n class_name = anchor_generator.class_name\n anchors_dict[class_name][\"anchors\"] = anchors\n anchors_dict[class_name][\"matched_thresholds\"] = match_list[-1]\n anchors_dict[class_name][\"unmatched_thresholds\"] = unmatch_list[-1]\n return anchors_dict\n\n @staticmethod\n def nearest_iou_similarity(boxes1, boxes2):\n boxes1_bv = box_np_ops.rbbox2d_to_near_bbox(boxes1)\n boxes2_bv = box_np_ops.rbbox2d_to_near_bbox(boxes2)\n ret = box_np_ops.iou_jit(boxes1_bv, boxes2_bv, eps=0.0)\n return ret\n\n def assign_v2(self, anchors_dict, gt_boxes, anchors_mask=None, gt_classes=None, gt_names=None):\n \"\"\"assign ground truth bounding boxes to the anchors\n\n :param anchors_dict: {'Car':\n {'anchors': [Nx, Ny, Nz, N_a, 7],\n 'matched_thresholds': [],\n 'unmatched_thresholds': []},\n ...}\n :param gt_boxes: [N_b, 7]\n :param anchors_mask: anchor filter mask or None\n :param gt_classes: gt class id (start from 1) or None\n :param gt_names: string array [N_b], 'Car'/'Cyclist'/etc...\n :return: target_dict\n \"\"\"\n # prune anchor function, to filter some anchors for acceleration.\n prune_anchor_fn = None if anchors_mask is None else lambda _: np.where(anchors_mask)[0]\n\n def similarity_fn(anchors, gt_boxes):\n # return similarity matrix\n anchors_rbv = anchors[:, [0, 1, 3, 4, 6]]\n gt_boxes_rbv = gt_boxes[:, [0, 1, 3, 4, 6]]\n return self.region_similarity_calculator(anchors_rbv, gt_boxes_rbv)\n\n def box_encoding_fn(boxes, anchors):\n return self.box_coder.encode_np(boxes, anchors)\n\n targets_list = []\n for class_name, anchor_dict in anchors_dict.items():\n mask = np.array([c == class_name for c in gt_names], dtype=np.bool_)\n targets = self.create_target_np(\n # anchor_dict[\"anchors\"].reshape(-1, self.box_coder.code_size),\n anchor_dict[\"anchors\"].reshape(-1, 7),\n gt_boxes[mask],\n similarity_fn,\n box_encoding_fn,\n prune_anchor_fn=prune_anchor_fn,\n gt_classes=gt_classes[mask],\n matched_threshold=anchor_dict[\"matched_thresholds\"],\n unmatched_threshold=anchor_dict[\"unmatched_thresholds\"],\n positive_fraction=self.pos_fraction,\n rpn_batch_size=self.sample_size,\n norm_by_num_examples=False,\n box_code_size=self.box_coder.code_size\n )\n targets_list.append(targets)\n feature_map_size = anchor_dict[\"anchors\"].shape[:3]\n targets_dict = {\n \"labels\": [t[\"labels\"] for t in targets_list],\n \"bbox_targets\": [t[\"bbox_targets\"] for t in targets_list],\n \"bbox_outside_weights\": [t[\"bbox_outside_weights\"] for t in targets_list],\n }\n targets_dict[\"bbox_targets\"] = np.concatenate([v.reshape(*feature_map_size, -1, self.box_coder.code_size)\n for v in targets_dict[\"bbox_targets\"]], axis=-2)\n targets_dict[\"labels\"] = np.concatenate([v.reshape(*feature_map_size, -1)\n for v in targets_dict[\"labels\"]], axis=-1)\n targets_dict[\"bbox_outside_weights\"] = np.concatenate([v.reshape(*feature_map_size, -1)\n for v in targets_dict[\"bbox_outside_weights\"]], axis=-1)\n\n targets_dict[\"bbox_targets\"] = targets_dict[\"bbox_targets\"].reshape(-1, self.box_coder.code_size)\n targets_dict[\"labels\"] = targets_dict[\"labels\"].reshape(-1)\n targets_dict[\"bbox_outside_weights\"] = targets_dict[\"bbox_outside_weights\"].reshape(-1)\n\n return targets_dict\n\n def create_target_np(self, all_anchors,\n gt_boxes,\n similarity_fn,\n box_encoding_fn,\n prune_anchor_fn=None,\n gt_classes=None,\n matched_threshold=0.6,\n unmatched_threshold=0.45,\n bbox_inside_weight=None,\n positive_fraction=None,\n rpn_batch_size=300,\n norm_by_num_examples=False,\n box_code_size=7):\n \"\"\"Modified from FAIR detectron.\n Args:\n all_anchors: [num_of_anchors, box_ndim(7)] float tensor.\n gt_boxes: [num_gt_boxes, box_ndim(7)] float tensor.\n similarity_fn: a function, accept anchors and gt_boxes, return\n similarity matrix(such as IoU).\n box_encoding_fn: a function, accept gt_boxes and anchors, return\n box encodings(offsets).\n prune_anchor_fn: a function, accept anchors, return indices that\n indicate valid anchors. this function could filter part of anchors\n for acceleration.\n gt_classes: [num_gt_boxes] int tensor. indicate gt classes id, must\n start with 1.\n matched_threshold: float, iou greater than matched_threshold will\n be treated as positives.\n unmatched_threshold: float, iou smaller than unmatched_threshold will\n be treated as negatives.\n bbox_inside_weight: unused\n positive_fraction: [0-1] float or None. if not None, we will try to\n keep ratio of pos/neg equal to positive_fraction when sample.\n if there is not enough positives, it fills the rest with negatives\n rpn_batch_size: int. sample size\n norm_by_num_examples: bool. norm box_weight by number of examples, but\n I recommend to do this outside.\n Returns:\n a dict with:\n \"labels\": array [num_of_anchors], -1 denotes uncertain, 0 bg, 1,2,3... fg;\n \"bbox_targets\": array [num_of_anchors, 7], regression/cls target from boxencoder\n \"bbox_outside_weights\": array [num_of_anchors]\n \"assigned_anchors_overlap\": array [num_of_fg_anchors] iou value (overlap) with the corresponding gt boxes\n \"gt_pos_ids\": [num_of_fg_anchors] corresponding gt box ids\n NOTE that an anchor seems only to be assigned with a single gt target box (with the maximum overlap)\n \"\"\"\n total_anchors = all_anchors.shape[0]\n if prune_anchor_fn is not None:\n inds_inside = prune_anchor_fn(all_anchors)\n anchors = all_anchors[inds_inside, :]\n if not isinstance(matched_threshold, float):\n matched_threshold = matched_threshold[inds_inside]\n if not isinstance(unmatched_threshold, float):\n unmatched_threshold = unmatched_threshold[inds_inside]\n else:\n anchors = all_anchors\n inds_inside = None\n num_inside = len(inds_inside) if inds_inside is not None else total_anchors\n box_ndim = all_anchors.shape[1]\n self.logger.info('total_anchors: {}'.format(total_anchors))\n self.logger.info('inds_inside: {}'.format(num_inside))\n self.logger.info('anchors.shape: {}'.format(anchors.shape))\n if gt_classes is None:\n gt_classes = np.ones([gt_boxes.shape[0]], dtype=np.int32) # assume only one class?\n # Compute anchor labels:\n # label=1 is positive, 0 is negative, -1 is don't care (ignore)\n labels = np.empty((num_inside,), dtype=np.int32)\n gt_ids = np.empty((num_inside,), dtype=np.int32)\n labels.fill(-1)\n gt_ids.fill(-1)\n if len(gt_boxes) > 0 and anchors.shape[0] > 0:\n # Compute overlaps between the anchors and the gt boxes overlaps\n anchor_by_gt_overlap = similarity_fn(anchors, gt_boxes)\n # Map from anchor to gt box that has highest overlap\n anchor_to_gt_argmax = anchor_by_gt_overlap.argmax(axis=1)\n # For each anchor, amount of overlap with most overlapping gt box\n anchor_to_gt_max = anchor_by_gt_overlap[np.arange(num_inside),\n anchor_to_gt_argmax] #\n # Map from gt box to an anchor that has highest overlap\n gt_to_anchor_argmax = anchor_by_gt_overlap.argmax(axis=0)\n # For each gt box, amount of overlap with most overlapping anchor\n gt_to_anchor_max = anchor_by_gt_overlap[\n gt_to_anchor_argmax,\n np.arange(anchor_by_gt_overlap.shape[1])]\n # must remove gt which doesn't match any anchor.\n empty_gt_mask = gt_to_anchor_max == 0\n gt_to_anchor_max[empty_gt_mask] = -1\n # Find all anchors that share the max overlap amount\n # (this includes many ties)\n anchors_with_max_overlap = np.where(\n anchor_by_gt_overlap == gt_to_anchor_max)[0]\n # Fg label: for each gt use anchors with highest overlap\n # (including ties)\n gt_inds_force = anchor_to_gt_argmax[anchors_with_max_overlap] # find the nearest gt box indices\n labels[anchors_with_max_overlap] = gt_classes[gt_inds_force] # note here we use gt classes instead of 1\n gt_ids[anchors_with_max_overlap] = gt_inds_force\n # Fg label: above threshold IOU\n pos_inds = anchor_to_gt_max >= matched_threshold\n gt_inds = anchor_to_gt_argmax[pos_inds]\n labels[pos_inds] = gt_classes[gt_inds]\n gt_ids[pos_inds] = gt_inds\n bg_inds = np.where(anchor_to_gt_max < unmatched_threshold)[0]\n else:\n # labels[:] = 0\n bg_inds = np.arange(num_inside)\n fg_inds = np.where(labels > 0)[0]\n fg_max_overlap = None\n if len(gt_boxes) > 0 and anchors.shape[0] > 0:\n fg_max_overlap = anchor_to_gt_max[fg_inds]\n gt_pos_ids = gt_ids[fg_inds]\n # bg_inds = np.where(anchor_to_gt_max < unmatched_threshold)[0]\n # bg_inds = np.where(labels == 0)[0]\n # subsample positive labels if we have too many\n if positive_fraction is not None:\n num_fg = int(positive_fraction * rpn_batch_size)\n if len(fg_inds) > num_fg:\n disable_inds = npr.choice(\n fg_inds, size=(len(fg_inds) - num_fg), replace=False)\n labels[disable_inds] = -1\n fg_inds = np.where(labels > 0)[0]\n\n # subsample negative labels if we have too many\n # (samples with replacement, but since the set of bg inds is large most\n # samples will not have repeats)\n num_bg = rpn_batch_size - np.sum(labels > 0)\n # print(num_fg, num_bg, len(bg_inds) )\n if len(bg_inds) > num_bg:\n enable_inds = bg_inds[npr.randint(len(bg_inds), size=num_bg)]\n labels[enable_inds] = 0\n bg_inds = np.where(labels == 0)[0]\n else:\n if len(gt_boxes) == 0 or anchors.shape[0] == 0:\n labels[:] = 0\n else:\n labels[bg_inds] = 0\n # re-enable anchors_with_max_overlap, some anchors with max-gt-overlap may be inside bg_inds\n labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]\n bbox_targets = np.zeros(\n (num_inside, box_code_size), dtype=all_anchors.dtype)\n if len(gt_boxes) > 0 and anchors.shape[0] > 0:\n # print(anchors[fg_inds, :].shape, gt_boxes[anchor_to_gt_argmax[fg_inds], :].shape)\n # bbox_targets[fg_inds, :] = box_encoding_fn(\n # anchors[fg_inds, :], gt_boxes[anchor_to_gt_argmax[fg_inds], :])\n bbox_targets[fg_inds, :] = box_encoding_fn(\n gt_boxes[anchor_to_gt_argmax[fg_inds], :], anchors[fg_inds, :])\n # Bbox regression loss has the form:\n # loss(x) = weight_outside * L(weight_inside * x)\n # Inside weights allow us to set zero loss on an element-wise basis\n # Bbox regression is only trained on positive examples so we set their\n # weights to 1.0 (or otherwise if config is different) and 0 otherwise\n # NOTE: we don't need bbox_inside_weights, remove it.\n # bbox_inside_weights = np.zeros((num_inside, box_ndim), dtype=np.float32)\n # bbox_inside_weights[labels == 1, :] = [1.0] * box_ndim\n\n # The bbox regression loss only averages by the number of images in the\n # mini-batch, whereas we need to average by the total number of example\n # anchors selected\n # Outside weights are used to scale each element-wise loss so the final\n # average over the mini-batch is correct\n # bbox_outside_weights = np.zeros((num_inside, box_ndim), dtype=np.float32)\n bbox_outside_weights = np.zeros((num_inside,), dtype=all_anchors.dtype)\n # uniform weighting of examples (given non-uniform sampling)\n if norm_by_num_examples:\n num_examples = np.sum(labels >= 0) # neg + pos\n num_examples = np.maximum(1.0, num_examples)\n bbox_outside_weights[labels > 0] = 1.0 / num_examples\n else:\n bbox_outside_weights[labels > 0] = 1.0\n # bbox_outside_weights[labels == 0, :] = 1.0 / num_examples\n\n # Map up to original set of anchors\n if inds_inside is not None:\n labels = unmap(labels, total_anchors, inds_inside, fill=-1)\n bbox_targets = unmap(bbox_targets, total_anchors, inds_inside, fill=0)\n # bbox_inside_weights = unmap(\n # bbox_inside_weights, total_anchors, inds_inside, fill=0)\n bbox_outside_weights = unmap(\n bbox_outside_weights, total_anchors, inds_inside, fill=0)\n # return labels, bbox_targets, bbox_outside_weights\n ret = {\n \"labels\": labels,\n \"bbox_targets\": bbox_targets,\n \"bbox_outside_weights\": bbox_outside_weights,\n \"assigned_anchors_overlap\": fg_max_overlap,\n \"positive_gt_id\": gt_pos_ids,\n }\n if inds_inside is not None:\n ret[\"assigned_anchors_inds\"] = inds_inside[fg_inds]\n else:\n ret[\"assigned_anchors_inds\"] = fg_inds\n return ret\n\n @property\n def num_anchors_per_location(self):\n num = 0\n for a_generator in self.anchor_generators:\n num += a_generator.num_anchors_per_localization\n return num\n\n @property\n def classes(self):\n return [a.class_name for a in self.anchor_generators]\n"
] | [
[
"numpy.dot",
"torch.Size",
"torch.cat",
"torch.sin",
"scipy.spatial.Delaunay",
"numpy.cos",
"numpy.sin",
"torch.matmul",
"numpy.array",
"numpy.zeros",
"torch.cos"
],
[
"numpy.round"
],
[
"torch.nn.init.constant_",
"numpy.stack",
"numpy.full",
"numpy.arctan2",
"torch.nn.Linear",
"torch.nn.ReLU",
"numpy.array",
"torch.nn.init.constant",
"numpy.zeros"
],
[
"numpy.sum",
"numpy.maximum",
"numpy.arange",
"numpy.ones",
"numpy.concatenate",
"numpy.full",
"numpy.prod",
"numpy.array",
"numpy.zeros",
"numpy.where",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yiwc/robotics-world | [
"48efda3a8ea6741b35828b02860f45753252e376",
"48efda3a8ea6741b35828b02860f45753252e376",
"48efda3a8ea6741b35828b02860f45753252e376"
] | [
"metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_bin_picking_v2.py",
"metaworld/policies/sawyer_push_back_v2_policy.py",
"metaworld/policies/sawyer_reach_wall_v2_policy.py"
] | [
"import numpy as np\nfrom gym.spaces import Box\n\nfrom metaworld.envs import reward_utils\nfrom metaworld.envs.asset_path_utils import full_v2_path_for\nfrom metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set\n\n\nclass SawyerBinPickingEnvV2(SawyerXYZEnv):\n \"\"\"\n Motivation for V2:\n V1 was often unsolvable because the cube could be located outside of\n the starting bin. It could even be near the base of the Sawyer and out\n of reach of the gripper. V2 changes the `obj_low` and `obj_high` bounds\n to fix this.\n Changelog from V1 to V2:\n - (7/20/20) Changed object initialization space\n - (7/24/20) Added Byron's XML changes\n - (11/23/20) Updated reward function to new pick-place style\n \"\"\"\n\n def __init__(self):\n\n hand_low = (-0.5, 0.40, 0.07)\n hand_high = (0.5, 1, 0.5)\n obj_low = (-0.21, 0.65, 0.02)\n obj_high = (-0.03, 0.75, 0.02)\n # Small bounds around the center of the target bin\n goal_low = np.array([0.1199, 0.699, -0.001])\n goal_high = np.array([0.1201, 0.701, +0.001])\n\n super().__init__(\n self.model_name,\n hand_low=hand_low,\n hand_high=hand_high,\n )\n\n self.init_config = {\n 'obj_init_angle': 0.3,\n 'obj_init_pos': np.array([-0.12, 0.7, 0.02]),\n 'hand_init_pos': np.array((0, 0.6, 0.2)),\n }\n self.goal = np.array([0.12, 0.7, 0.02])\n self.obj_init_pos = self.init_config['obj_init_pos']\n self.obj_init_angle = self.init_config['obj_init_angle']\n self.hand_init_pos = self.init_config['hand_init_pos']\n\n self._target_to_obj_init = None\n\n self.hand_and_obj_space = Box(\n np.hstack((self.hand_low, obj_low)),\n np.hstack((self.hand_high, obj_high)),\n )\n\n self.goal_and_obj_space = Box(\n np.hstack((goal_low[:2], obj_low[:2])),\n np.hstack((goal_high[:2], obj_high[:2])),\n )\n\n self.goal_space = Box(goal_low, goal_high)\n self._random_reset_space = Box(\n np.hstack((obj_low, goal_low)),\n np.hstack((obj_high, goal_high)),\n )\n\n @property\n def model_name(self):\n return full_v2_path_for('sawyer_xyz/sawyer_bin_picking.xml')\n\n @_assert_task_is_set\n def evaluate_state(self, obs, action):\n (\n reward,\n near_object,\n grasp_success,\n obj_to_target,\n grasp_reward,\n in_place_reward\n ) = self.compute_reward(action, obs)\n\n info = {\n 'success': float(obj_to_target <= 0.05),\n 'near_object': float(near_object),\n 'grasp_success': float(grasp_success),\n 'grasp_reward': grasp_reward,\n 'in_place_reward': in_place_reward,\n 'obj_to_target': obj_to_target,\n 'unscaled_reward': reward,\n }\n\n return reward, info\n\n @property\n def _target_site_config(self):\n return []\n\n def _get_id_main_object(self):\n return self.unwrapped.model.geom_name2id('objGeom')\n\n def _get_pos_objects(self):\n return self.get_body_com('obj')\n\n def _get_quat_objects(self):\n return self.sim.data.get_body_xquat('obj')\n\n def reset_model(self):\n self._reset_hand()\n self._target_pos = self.goal.copy()\n self.obj_init_pos = self.init_config['obj_init_pos']\n self.obj_init_angle = self.init_config['obj_init_angle']\n obj_height = self.get_body_com('obj')[2]\n\n if self.random_init:\n self.obj_init_pos = self._get_state_rand_vec()[:2]\n self.obj_init_pos = np.concatenate((self.obj_init_pos, [obj_height]))\n\n self._set_obj_xyz(self.obj_init_pos)\n self._target_pos = self.get_body_com('bin_goal')\n self._target_to_obj_init = None\n\n return self._get_obs()\n\n def compute_reward(self, action, obs):\n hand = obs[:3]\n obj = obs[4:7]\n\n target_to_obj = np.linalg.norm(obj - self._target_pos)\n if self._target_to_obj_init is None:\n self._target_to_obj_init = target_to_obj\n\n in_place = reward_utils.tolerance(\n target_to_obj,\n bounds=(0, self.TARGET_RADIUS),\n margin=self._target_to_obj_init,\n sigmoid='long_tail',\n )\n\n threshold = 0.03\n radii = [\n np.linalg.norm(hand[:2] - self.obj_init_pos[:2]),\n np.linalg.norm(hand[:2] - self._target_pos[:2])\n ]\n # floor is a *pair* of 3D funnels centered on (1) the object's initial\n # position and (2) the desired final position\n floor = min([\n 0.02 * np.log(radius - threshold) + 0.2\n if radius > threshold else 0.0\n for radius in radii\n ])\n # prevent the hand from running into the edge of the bins by keeping\n # it above the \"floor\"\n above_floor = 1.0 if hand[2] >= floor else reward_utils.tolerance(\n max(floor - hand[2], 0.0),\n bounds=(0.0, 0.01),\n margin=0.05,\n sigmoid='long_tail',\n )\n\n object_grasped = self._gripper_caging_reward(\n action,\n obj,\n obj_radius=0.015,\n pad_success_thresh=0.05,\n object_reach_radius=0.01,\n xz_thresh=0.01,\n desired_gripper_effort=0.7,\n high_density=True,\n )\n reward = reward_utils.hamacher_product(object_grasped, in_place)\n\n near_object = np.linalg.norm(obj - hand) < 0.04\n pinched_without_obj = obs[3] < 0.43\n lifted = obj[2] - 0.02 > self.obj_init_pos[2]\n # Increase reward when properly grabbed obj\n grasp_success = near_object and lifted and not pinched_without_obj\n if grasp_success:\n reward += 1. + 5. * reward_utils.hamacher_product(\n above_floor, in_place\n )\n # Maximize reward on success\n if target_to_obj < self.TARGET_RADIUS:\n reward = 10.\n\n return (\n reward,\n near_object,\n grasp_success,\n target_to_obj,\n object_grasped,\n in_place\n )\n",
"import numpy as np\n\nfrom metaworld.policies.action import Action\nfrom metaworld.policies.policy import Policy, assert_fully_parsed, move\n\n\nclass SawyerPushBackV2Policy(Policy):\n\n @staticmethod\n @assert_fully_parsed\n def _parse_obs(obs):\n return {\n 'hand_pos': obs[:3],\n 'unused_1': obs[3],\n 'puck_pos': obs[4:7],\n 'unused_2': obs[7:-3],\n 'goal_pos': obs[-3:],\n }\n\n def get_action(self, obs):\n o_d = self._parse_obs(obs)\n\n action = Action({\n 'delta_pos': np.arange(3),\n 'grab_effort': 3\n })\n\n action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=10.)\n action['grab_effort'] = self._grab_effort(o_d)\n\n return action.array\n\n @staticmethod\n def _desired_pos(o_d):\n pos_curr = o_d['hand_pos']\n pos_puck = o_d['puck_pos']\n\n # If error in the XY plane is greater than 0.02, place end effector above the puck\n if np.linalg.norm(pos_curr[:2] - pos_puck[:2]) > 0.04:\n return pos_puck + np.array([0., 0., 0.3])\n # Once XY error is low enough, drop end effector down on top of puck\n elif abs(pos_curr[2] - pos_puck[2]) > 0.055:\n return pos_puck\n # Move to the goal\n else:\n return o_d['goal_pos'] + np.array([0.0, 0.0, pos_curr[2]])\n\n @staticmethod\n def _grab_effort(o_d):\n pos_curr = o_d['hand_pos']\n pos_puck = o_d['puck_pos']\n\n if np.linalg.norm(pos_curr[:2] - pos_puck[:2]) > 0.04 or abs(pos_curr[2] - pos_puck[2]) > 0.085:\n return 0.\n # While end effector is moving down toward the puck, begin closing the grabber\n else:\n return 0.9\n",
"import numpy as np\n\nfrom metaworld.policies.action import Action\nfrom metaworld.policies.policy import Policy, move\n\n\nclass SawyerReachWallV2Policy(Policy):\n\n @staticmethod\n def _parse_obs(obs):\n return {\n 'hand_pos': obs[:3],\n 'unused_1': obs[3],\n 'puck_pos': obs[4:7],\n 'unused_2': obs[7:-3],\n 'goal_pos': obs[-3:],\n }\n\n\n def get_action(self, obs):\n o_d = self._parse_obs(obs)\n\n action = Action({\n 'delta_pos': np.arange(3),\n 'grab_effort': 3\n })\n\n action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=5.)\n action['grab_effort'] = 0.\n\n return action.array\n\n @staticmethod\n def _desired_pos(o_d):\n pos_hand = o_d['hand_pos']\n pos_goal = o_d['goal_pos']\n # if the hand is going to run into the wall, go up while still moving\n # towards the goal position.\n if(-0.1 <= pos_hand[0] <= 0.3 and\n 0.60 <= pos_hand[1] <= 0.80 and\n pos_hand[2] < 0.25):\n return pos_goal + np.array([0., 0., 1.])\n return pos_goal\n"
] | [
[
"numpy.hstack",
"numpy.log",
"numpy.linalg.norm",
"numpy.concatenate",
"numpy.array"
],
[
"numpy.arange",
"numpy.array",
"numpy.linalg.norm"
],
[
"numpy.arange",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
manipopopo/C5 | [
"154eb38c330e65476ddb77836948a28237f23c88"
] | [
"src/dataset.py"
] | [
"\"\"\"\n##### Copyright 2021 Google LLC. All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n\nfrom os.path import join\nfrom os import listdir\nfrom os import path\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nimport logging\nimport json\nfrom src import ops\nimport random\n\n\nclass Data(Dataset):\n def __init__(self, imgfiles, data_num=1, mode='training', input_size=64,\n load_hist=True):\n \"\"\" Data constructor\n\n Args:\n imgfiles: a list of full filenames to be used by the dataloader. If the\n mode is set to 'training', each filename in the list should have a\n metadata json file with a postfix '_metadata'. For example, if the\n filename is 'data/image1_sensorname_canon.png', the metadata file\n should be 'data/image1_sensorname_canon_metadata.json'. Each\n metadata file should contain a key named 'illuminant_color_raw' or\n 'gt_ill' that contains the true rgb illuminant color.\n data_num: number of input histograms to C5 network (m in the paper);\n default is 1.\n mode: 'training' or 'testing'. In the training mode, ground-truth\n illuminant information should be loaded; while for the testing mode it\n is an optional. Default is 'training'.\n input_size: histogram dimensions (number of bins).\n load_hist: boolean flat to load histogram file if it exists; default is\n true.\n\n Returns:\n Dataset loader object with the selected settings.\n \"\"\"\n\n assert (data_num >= 1)\n assert (mode == 'training' or mode == 'testing')\n assert (input_size % 2 == 0)\n self.imgfiles = imgfiles\n self.input_size = input_size\n self.additional_data_num = data_num - 1\n self.image_size = [384, 256] # width, height\n self.load_hist = load_hist # load histogram if exists\n self.mode = mode\n self.from_rgb = ops.rgb_to_uv # rgb to chroma conversion function\n self.to_rgb = ops.uv_to_rgb # chroma to rgb conversion function\n self.hist_boundary = ops.get_hist_boundary()\n\n logging.info(f'Creating dataset with {len(self.imgfiles)} examples')\n\n def __len__(self):\n \"\"\" Gets length of image files in the dataloader. \"\"\"\n\n return len(self.imgfiles)\n\n def __getitem__(self, i):\n \"\"\" Gets next data in the dataloader.\n\n Args:\n i: index of file in the dataloader.\n\n Returns:\n A dictionary of the following keys:\n - image_rgb:\n - file_name: filename (without the full path).\n - histogram: input histogram.\n - model_input_histograms: input histogram and the additional histograms\n to be fed to C5 network.\n - gt_ill: ground-truth illuminant color. If the dataloader's 'mode'\n variable was set to 'testing' and the ground-truth illuminant\n information does not exist, it will contain an empty tensor.\n \"\"\"\n\n img_file = self.imgfiles[i]\n\n in_img = ops.read_image(img_file)\n in_img = ops.resize_image(in_img, self.image_size)\n\n rgb_img = ops.to_tensor(in_img) # for visualization\n\n # gets the ground-truth illuminant color\n with open(path.splitext(img_file)[\n 0] + '_metadata.json', 'r') as metadata_file:\n metadata = json.load(metadata_file)\n\n if self.mode == 'training':\n assert ['illuminant_color_raw' in metadata.keys() or 'gt_ill' in\n metadata.keys()]\n if 'illuminant_color_raw' in metadata.keys():\n gt_ill = np.array(metadata['illuminant_color_raw'])\n gt_ill = torch.from_numpy(gt_ill)\n elif 'gt_ill' in metadata.keys():\n gt_ill = np.array(metadata['gt_ill'])\n gt_ill = torch.from_numpy(gt_ill)\n else:\n gt_ill = torch.tensor([])\n\n # computes histogram feature of rgb and edge images\n if self.input_size is 64:\n post_fix = ''\n else:\n post_fix = f'_{self.input_size}'\n\n if path.exists(path.splitext(img_file)[0] +\n f'_histogram{post_fix}.npy') and self.load_hist:\n histogram = np.load(path.splitext(img_file)[0] +\n f'_histogram{post_fix}.npy', allow_pickle=False)\n else:\n histogram = np.zeros((self.input_size, self.input_size, 2))\n valid_chroma_rgb, valid_colors_rgb = ops.get_hist_colors(\n in_img, self.from_rgb)\n histogram[:, :, 0] = ops.compute_histogram(\n valid_chroma_rgb, self.hist_boundary, self.input_size,\n rgb_input=valid_colors_rgb)\n\n edge_img = ops.compute_edges(in_img)\n valid_chroma_edges, valid_colors_edges = ops.get_hist_colors(\n edge_img, self.from_rgb)\n\n histogram[:, :, 1] = ops.compute_histogram(\n valid_chroma_edges, self.hist_boundary, self.input_size,\n rgb_input=valid_colors_edges)\n\n np.save(path.splitext(img_file)[0] + f'_histogram{post_fix}.npy',\n histogram)\n\n in_histogram = ops.to_tensor(histogram)\n\n # gets additional input data\n if self.additional_data_num > 0:\n additiona_files = Data.get_rand_examples_from_sensor(\n current_file=img_file, files=self.imgfiles,\n target_number=self.additional_data_num)\n else:\n additiona_files = None\n\n additional_histogram = histogram\n\n u_coord, v_coord = ops.get_uv_coord(self.input_size,\n tensor=False, normalize=True)\n u_coord = np.expand_dims(u_coord, axis=-1)\n v_coord = np.expand_dims(v_coord, axis=-1)\n\n additional_histogram = np.concatenate([additional_histogram, u_coord],\n axis=-1)\n additional_histogram = np.concatenate([additional_histogram, v_coord],\n axis=-1)\n additional_histogram = np.expand_dims(additional_histogram, axis=-1)\n\n # if multiple input is used, load them\n if additiona_files is not None:\n for file, i in zip(additiona_files, range(len(additiona_files))):\n # computes histogram feature of rgb and edge images\n if path.exists(path.splitext(file)[0] +\n f'_histogram{post_fix}.npy') and self.load_hist:\n histogram = np.load(path.splitext(file)[0] +\n f'_histogram{post_fix}.npy', allow_pickle=False)\n\n else:\n img = ops.read_image(file)\n h, w, _ = img.shape\n if h != self.image_size[1] or w != self.image_size[0]:\n img = ops.resize_image(img, self.image_size)\n histogram = np.zeros((self.input_size, self.input_size, 2))\n valid_chroma_rgb, valid_colors_rgb = ops.get_hist_colors(\n img, self.from_rgb)\n histogram[:, :, 0] = ops.compute_histogram(\n valid_chroma_rgb, self.hist_boundary, self.input_size,\n rgb_input=valid_colors_rgb)\n edge_img = ops.compute_edges(img)\n valid_chroma_edges, valid_colors_edges = ops.get_hist_colors(\n edge_img, self.from_rgb)\n\n histogram[:, :, 1] = ops.compute_histogram(\n valid_chroma_edges, self.hist_boundary, self.input_size,\n rgb_input=valid_colors_edges)\n\n np.save(path.splitext(file)[0] + f'_histogram{post_fix}.npy',\n histogram)\n\n histogram = np.concatenate([histogram, u_coord], axis=-1)\n histogram = np.concatenate([histogram, v_coord], axis=-1)\n histogram = np.expand_dims(histogram, axis=-1)\n\n additional_histogram = np.concatenate([additional_histogram, histogram],\n axis=-1)\n\n additional_histogram = ops.to_tensor(additional_histogram, dims=4)\n\n return {'image_rgb': rgb_img,\n 'file_name': path.basename(img_file),\n 'histogram': in_histogram,\n 'model_input_histograms': additional_histogram,\n 'gt_ill': gt_ill}\n\n @staticmethod\n def load_files(img_dir):\n \"\"\" Loads filenames in a given image directory.\n\n Args:\n img_dir: image directory. Note that if the dataloader's 'mode' variable\n was set to 'training', each filename in the list should have a\n metadata json file with a postfix '_metadata'. For example, if the\n filename is 'data/image1_sensorname_canon.png', the metadata file\n should be 'data/image1_sensorname_canon_metadata.json'. Each\n metadata file should contain a key named 'illuminant_color_raw' or\n 'gt_ill' that contains the true rgb illuminant color.\n\n Returns:\n imgfiles: a list of full filenames.\n \"\"\"\n\n logging.info(f'Loading images information from {img_dir}...')\n imgfiles = [join(img_dir, file) for file in listdir(img_dir)\n if file.endswith('.png') or file.endswith('.PNG')]\n return imgfiles\n\n @staticmethod\n def get_rand_examples_from_sensor(current_file, files, target_number):\n \"\"\" Randomly selects additional filenames of images taken by the same\n sensor.\n\n Args:\n current_file: filename of the current image; this filename should be in\n the following format: 'a_sensorname_b.png', where a is image id (can\n contain any string) and b is camera model name. The function will\n randomly select additional images that have the same camera model\n name (i.e., b).\n files: filenames of images in the dataloader.\n target_number: number of the additional images.\n\n Returns:\n sensor_files: additional image filenames taken by the same camera model\n used to capture the image in current_file.\n \"\"\"\n assert ('sensorname' in current_file)\n sensor_name = path.splitext(current_file)[0].split('sensorname_')[-1]\n sensor_files = [file for file in files if sensor_name in file]\n sensor_files.remove(current_file)\n random.shuffle(sensor_files)\n if len(sensor_files) < target_number:\n raise Exception('Cannot find enough training data from sensor:'\n f'{sensor_name}')\n return sensor_files[:target_number]\n"
] | [
[
"numpy.expand_dims",
"torch.from_numpy",
"torch.tensor",
"numpy.concatenate",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lin-tan/fairness-variance | [
"7f6aee23160707ffe78f429e5d960022ea1c9fe4",
"7f6aee23160707ffe78f429e5d960022ea1c9fe4"
] | [
"dlfairness/original_code/FairALM/Experiments-CelebA/label_ablation/fcn.py",
"dlfairness/original_code/Balanced-Datasets-Are-Not-Enough/verb_classification/adv/ae_adv_model.py"
] | [
"import os\nimport time\n\nimport numpy as np\nimport pandas as pd\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\n\nfrom torchvision import datasets\nfrom torchvision import transforms\n\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\nclass MultilayerPerceptron(torch.nn.Module):\n\n def __init__(self, num_features):\n super(MultilayerPerceptron, self).__init__()\n \n ### 1st hidden layer\n self.linear_1 = torch.nn.Linear(num_features, 64)\n \n ### 2nd hidden layer\n self.linear_2 = torch.nn.Linear(64, 1)\n \n def forward(self, x):\n out = x.view(x.size(0), -1)\n out = self.linear_1(out)\n out = F.relu(out)\n out = self.linear_2(out)\n\n probas = torch.cat(((out < 0.), (out >= 0.)), 1)\n \n return out, probas\n\nclass MyHingeLoss(torch.nn.Module):\n\n def __init__(self):\n super(MyHingeLoss, self).__init__()\n \n def forward(self, output, target):\n target_new = target.clone()\n target_new[target < 1.] = -1.\n\n hinge_loss = 1 - torch.mul(torch.squeeze(output), target_new.float())\n hinge_loss[hinge_loss < 0] = 0\n return hinge_loss\n \ndef fcn(num_features):\n model = MultilayerPerceptron(num_features)\n return model\n",
"import torch\r\nimport functools\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torchvision.models as models\r\nimport torch.nn.utils\r\nfrom torch.autograd import Function\r\nimport copy\r\n\r\ndef get_norm_layer(norm_type='instance'):\r\n if norm_type == 'batch':\r\n norm_layer = functools.partial(nn.BatchNorm2d, affine=True)\r\n elif norm_type == 'instance':\r\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)\r\n elif norm_type == 'none':\r\n norm_layer = None\r\n else:\r\n raise NotImplementedError('normalization layer [%s] is not found' % norm_type)\r\n return norm_layer\r\n\r\n# with skip connection and pixel connection and smoothed\r\nclass UnetGenerator(nn.Module):\r\n def __init__(self, input_nc, output_nc, num_downs, ngf=64,\r\n norm_layer=nn.BatchNorm2d, use_dropout=False):\r\n super(UnetGenerator, self).__init__()\r\n\r\n if type(norm_layer) == functools.partial:\r\n use_bias = norm_layer.func == nn.InstanceNorm2d\r\n else:\r\n use_bias = norm_layer == nn.InstanceNorm2d\r\n\r\n use_bias = True\r\n # construct unet structure\r\n self.downsample_0 = nn.Conv2d(input_nc, ngf, kernel_size=4, stride=2, padding=1, bias=use_bias)\r\n\r\n self.downRelu_1 = nn.LeakyReLU(0.2, True)\r\n self.downSample_1 = nn.Conv2d(ngf, ngf * 2, kernel_size=4, stride=2, padding=1, bias=use_bias)\r\n self.downNorm_1 = norm_layer(ngf * 2)\r\n\r\n self.downRelu_2 = nn.LeakyReLU(0.2, True)\r\n self.downSample_2 = nn.Conv2d(ngf * 2, ngf * 4, kernel_size=4, stride=2, padding=1, bias=use_bias)\r\n self.downNorm_2 = norm_layer(ngf * 4)\r\n\r\n self.downRelu_3 = nn.LeakyReLU(0.2, True)\r\n self.downSample_3 = nn.Conv2d(ngf * 4, ngf * 8, kernel_size=4, stride=2, padding=1, bias=use_bias)\r\n self.downNorm_3 = norm_layer(ngf * 8)\r\n\r\n self.innerLeakyRelu = nn.LeakyReLU(0.2, True)\r\n self.innerDownSample = nn.Conv2d(ngf * 8, ngf * 8, kernel_size=4, stride=2, padding=1, bias=use_bias)\r\n self.innerRelu = nn.ReLU(True)\r\n innerUpSample = []\r\n innerUpSample.append(nn.Upsample(scale_factor = 2, mode='bilinear'))\r\n innerUpSample.append(nn.ReflectionPad2d((2, 1, 2, 1)))\r\n innerUpSample.append(nn.Conv2d(ngf * 8, ngf * 8, kernel_size=4, stride=1, padding=0, bias=use_bias))\r\n self.innerUpSample = nn.Sequential(*innerUpSample)\r\n\r\n self.innerNorm = norm_layer(ngf * 8)\r\n\r\n self.upRelu_3 = nn.ReLU(True)\r\n upSample_3 = []\r\n upSample_3.append(nn.Upsample(scale_factor = 2, mode='bilinear'))\r\n upSample_3.append(nn.ReflectionPad2d((2, 1, 2, 1)))\r\n upSample_3.append(nn.Conv2d(ngf * 16, ngf * 4, kernel_size=4, stride=1, padding=0, bias=use_bias))\r\n self.upSample_3 = nn.Sequential(*upSample_3)\r\n self.upNorm_3 = norm_layer(ngf * 4)\r\n\r\n self.upRelu_2 = nn.ReLU(True)\r\n upSample_2 = []\r\n upSample_2.append(nn.Upsample(scale_factor = 2, mode='bilinear'))\r\n upSample_2.append(nn.ReflectionPad2d((2, 1, 2, 1)))\r\n upSample_2.append(nn.Conv2d(ngf * 8, ngf * 2, kernel_size=4, stride=1, padding=0, bias=use_bias))\r\n self.upSample_2 = nn.Sequential(*upSample_2)\r\n self.upNorm_2 = norm_layer(ngf * 2)\r\n\r\n self.upRelu_1 = nn.ReLU(True)\r\n upSample_1 = []\r\n upSample_1.append(nn.Upsample(scale_factor = 2, mode='bilinear'))\r\n upSample_1.append(nn.ReflectionPad2d((2, 1, 2, 1)))\r\n upSample_1.append(nn.Conv2d(ngf * 4, ngf, kernel_size=4, stride=1, padding=0, bias=use_bias))\r\n self.upSample_1 = nn.Sequential(*upSample_1)\r\n self.upNorm_1 = norm_layer(ngf)\r\n\r\n self.upRelu_0 = nn.ReLU(True)\r\n upSample_0 = []\r\n upSample_0.append(nn.Upsample(scale_factor = 2, mode='bilinear'))\r\n upSample_0.append(nn.ReflectionPad2d((2, 1, 2, 1)))\r\n upSample_0.append(nn.Conv2d(ngf * 2, 1, kernel_size=4, stride=1, padding=0, bias=use_bias))\r\n self.upSample_0 = nn.Sequential(*upSample_0)\r\n\r\n ## initialize bias\r\n nn.init.normal_(self.upSample_0[-1].bias, mean=3, std=1)\r\n\r\n self.activation = nn.Sigmoid()\r\n\r\n def forward(self, input):\r\n # assume input image size = 224\r\n x_down_0 = self.downsample_0(input) # (ngf, 112, 112)\r\n\r\n x_down_1 = self.downNorm_1(self.downSample_1(self.downRelu_1(x_down_0))) # (ngf*2, 56, 56)\r\n x_down_2 = self.downNorm_2(self.downSample_2(self.downRelu_2(x_down_1))) # (ngf*4, 28, 28)\r\n x_down_3 = self.downNorm_3(self.downSample_3(self.downRelu_3(x_down_2))) # (ngf*8, 14, 14)\r\n\r\n latent = self.innerDownSample(self.innerLeakyRelu(x_down_3)) # (ngf*8, 7, 7)\r\n\r\n x = self.innerNorm(self.innerUpSample(self.innerRelu(latent))) # (ngf*8, 14, 14)\r\n\r\n x_up_3 = self.upNorm_3(self.upSample_3(self.upRelu_3(torch.cat([x, x_down_3], 1)))) # (ngf*4, 28, 28)\r\n x_up_2 = self.upNorm_2(self.upSample_2(self.upRelu_2(torch.cat([x_up_3, x_down_2], 1)))) # (ngf*2, 56, 56)\r\n x_up_1 = self.upNorm_1(self.upSample_1(self.upRelu_1(torch.cat([x_up_2, x_down_1], 1)))) # (ngf, 112, 112)\r\n\r\n encoded_image = self.activation(self.upSample_0(self.upRelu_0(torch.cat([x_up_1, x_down_0], 1)))) # (3, 224, 224)\r\n\r\n return torch.mul(input, encoded_image), latent\r\n\r\nclass VerbClassificationAdv(nn.Module):\r\n\r\n def __init__(self, args, num_verb, hid_size, dropout, adv_lambda):\r\n\r\n super(VerbClassificationAdv, self).__init__()\r\n print(\"Build a VerbClassification Model[{}]\".format(args.layer))\r\n self.num_verb = num_verb\r\n self.args = args\r\n self.base_network = models.resnet50(pretrained = True)\r\n print('Load weights from Resnet18/50 done')\r\n\r\n norm_layer = 'batch'\r\n use_dropout = False\r\n norm_layer = get_norm_layer(norm_type=norm_layer)\r\n self.autoencoder = UnetGenerator(3, 3, 5, 64, \\\r\n norm_layer=norm_layer, use_dropout=use_dropout)\r\n\r\n output_size = self.num_verb\r\n self.finalLayer = nn.Linear(self.base_network.fc.in_features, output_size)\r\n\r\n if not args.autoencoder_finetune:\r\n for param in self.autoencoder.parameters():\r\n param.requires_grad = False\r\n\r\n if not args.finetune:\r\n for param in self.base_network.parameters():\r\n param.requires_grad = False\r\n\r\n for param in self.finalLayer.parameters():\r\n param.requires_grad = False\r\n\r\n\r\n self.adv_component = GenderClassification(args)\r\n pretrained_gender_classifier_path = './model_best_verb_balanced.pth.tar'\r\n gender_clssifier_checkpoint = torch.load(pretrained_gender_classifier_path)\r\n self.adv_component.load_state_dict(gender_clssifier_checkpoint['state_dict'])\r\n print(\"Loaded pretrained gender classifier from {}\".format(pretrained_gender_classifier_path))\r\n\r\n if not args.finetune:\r\n for param in self.adv_component.parameters():\r\n param.requires_grad = False\r\n\r\n self.adv_lambda = adv_lambda\r\n\r\n def forward(self, image):\r\n autoencoded_image, latent = self.autoencoder(image)\r\n\r\n x = self.base_network.conv1(autoencoded_image)\r\n x = self.base_network.bn1(x)\r\n x = self.base_network.relu(x)\r\n conv1_feature = self.base_network.maxpool(x)\r\n\r\n layer1_feature = self.base_network.layer1(conv1_feature)\r\n layer2_feature = self.base_network.layer2(layer1_feature)\r\n layer3_feature = self.base_network.layer3(layer2_feature)\r\n layer4_feature = self.base_network.layer4(layer3_feature)\r\n\r\n final_feature = self.base_network.avgpool(layer4_feature)\r\n final_feature = final_feature.view(final_feature.size(0), -1)\r\n\r\n preds = self.finalLayer(final_feature)\r\n\r\n # adv component forward pass\r\n adv_feature = ReverseLayerF.apply(autoencoded_image, self.adv_lambda)\r\n adv_preds = self.adv_component(adv_feature)\r\n return preds, adv_preds, autoencoded_image\r\n\r\nclass ReverseLayerF(Function):\r\n\r\n @staticmethod\r\n def forward(ctx, x, alpha):\r\n ctx.alpha = alpha\r\n\r\n return x.view_as(x)\r\n\r\n @staticmethod\r\n def backward(ctx, grad_output):\r\n return grad_output.neg() * ctx.alpha, None\r\n\r\n\r\nclass GenderClassification(nn.Module):\r\n\r\n def __init__(self, args):\r\n\r\n super(GenderClassification, self).__init__()\r\n print(\"Build a GenderClassification Model\")\r\n\r\n self.base_network = models.resnet18(pretrained = True)\r\n print('Load weights from Resnet18 done')\r\n\r\n self.finalLayer = nn.Linear(self.base_network.fc.in_features, 2)\r\n\r\n def forward(self, image):\r\n x = self.base_network.conv1(image)\r\n x = self.base_network.bn1(x)\r\n x = self.base_network.relu(x)\r\n x = self.base_network.maxpool(x)\r\n\r\n x = self.base_network.layer1(x)\r\n x = self.base_network.layer2(x)\r\n x = self.base_network.layer3(x)\r\n x = self.base_network.layer4(x)\r\n\r\n x = self.base_network.avgpool(x)\r\n image_features = x.view(x.size(0), -1)\r\n\r\n preds = self.finalLayer(image_features)\r\n\r\n return preds\r\n"
] | [
[
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.squeeze",
"torch.cat"
],
[
"torch.nn.Sequential",
"torch.nn.ReflectionPad2d",
"torch.load",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.mul",
"torch.nn.init.normal_",
"torch.nn.LeakyReLU",
"torch.nn.Upsample",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vocthor/rlcard | [
"eacf578379ed838cf1ceae6eba19dcbf774e3333"
] | [
"rlcard/utils/utils.py"
] | [
"import numpy as np\n\nfrom rlcard.games.base import Card\n\n\ndef set_seed(seed):\n if seed is not None:\n import subprocess\n import sys\n\n reqs = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'])\n installed_packages = [r.decode().split('==')[0] for r in reqs.split()]\n if 'torch' in installed_packages:\n import torch\n torch.backends.cudnn.deterministic = True\n torch.manual_seed(seed)\n np.random.seed(seed)\n import random\n random.seed(seed)\n\n\ndef get_device():\n import torch\n if torch.cuda.is_available():\n device = torch.device(\"cuda:0\")\n print(\"--> Running on the GPU\")\n else:\n device = torch.device(\"cpu\")\n print(\"--> Running on the CPU\")\n\n return device\n\n\ndef init_standard_deck():\n ''' Initialize a standard deck of 52 cards\n\n Returns:\n (list): A list of Card object\n '''\n suit_list = ['S', 'H', 'D', 'C']\n rank_list = ['A', '2', '3', '4', '5', '6',\n '7', '8', '9', 'T', 'J', 'Q', 'K']\n res = [Card(suit, rank) for suit in suit_list for rank in rank_list]\n return res\n\n\ndef init_54_deck():\n ''' Initialize a standard deck of 52 cards, BJ and RJ\n\n Returns:\n (list): Alist of Card object\n '''\n suit_list = ['S', 'H', 'D', 'C']\n rank_list = ['A', '2', '3', '4', '5', '6',\n '7', '8', '9', 'T', 'J', 'Q', 'K']\n res = [Card(suit, rank) for suit in suit_list for rank in rank_list]\n res.append(Card('BJ', ''))\n res.append(Card('RJ', ''))\n return res\n\n\ndef init_32_deck():\n ''' Initialize a normal 32 cards deck : from 7 to Ace\n\n Returns:\n (list): A list of Card objects\n '''\n suit_list = ['S', 'H', 'D', 'C']\n rank_list = ['7', '8', '9', 'T', 'J', 'Q', 'K', 'A']\n res = [Card(suit, rank) for suit in suit_list for rank in rank_list]\n return res\n\n\ndef init_8_deck():\n ''' Initialize a small 8 cards deck : from Ten to Ace\n\n Returns:\n (list): A list of Card objects\n '''\n suit_list = ['S', 'H']\n rank_list = ['T', 'J', 'Q', 'K', 'A']\n res = [Card(suit, rank) for suit in suit_list for rank in rank_list]\n return res\n\n\ndef rank2int(rank):\n ''' Get the coresponding number of a rank.\n\n Args:\n rank(str): rank stored in Card object\n\n Returns:\n (int): the number corresponding to the rank\n\n Note:\n 1. If the input rank is an empty string, the function will return -1.\n 2. If the input rank is not valid, the function will return None.\n '''\n if rank == '':\n return -1\n elif rank.isdigit():\n if int(rank) >= 2 and int(rank) <= 10:\n return int(rank)\n else:\n return None\n elif rank == 'A':\n return 14\n elif rank == 'T':\n return 10\n elif rank == 'J':\n return 11\n elif rank == 'Q':\n return 12\n elif rank == 'K':\n return 13\n return None\n\n\ndef elegent_form(card):\n ''' Get a elegent form of a card string\n\n Args:\n card (string): A card string\n\n Returns:\n elegent_card (string): A nice form of card\n '''\n suits = {'S': '♠', 'H': '♥', 'D': '♦', 'C': '♣',\n 's': '♠', 'h': '♥', 'd': '♦', 'c': '♣'}\n rank = '10' if card[1] == 'T' else card[1]\n\n return suits[card[0]] + rank\n\n\ndef print_card(cards):\n ''' Nicely print a card or list of cards\n\n Args:\n card (string or list): The card(s) to be printed\n '''\n if cards is None:\n cards = [None]\n if isinstance(cards, str):\n cards = [cards]\n\n lines = [[] for _ in range(9)]\n\n for card in cards:\n if card is None:\n lines[0].append('┌─────────┐')\n lines[1].append('│░░░░░░░░░│')\n lines[2].append('│░░░░░░░░░│')\n lines[3].append('│░░░░░░░░░│')\n lines[4].append('│░░░░░░░░░│')\n lines[5].append('│░░░░░░░░░│')\n lines[6].append('│░░░░░░░░░│')\n lines[7].append('│░░░░░░░░░│')\n lines[8].append('└─────────┘')\n else:\n if isinstance(card, Card):\n elegent_card = elegent_form(card.suit + card.rank)\n else:\n elegent_card = elegent_form(card)\n suit = elegent_card[0]\n rank = elegent_card[1]\n if len(elegent_card) == 3:\n space = elegent_card[2]\n else:\n space = ' '\n\n lines[0].append('┌─────────┐')\n lines[1].append('│{}{} │'.format(rank, space))\n lines[2].append('│ │')\n lines[3].append('│ │')\n lines[4].append('│ {} │'.format(suit))\n lines[5].append('│ │')\n lines[6].append('│ │')\n lines[7].append('│ {}{}│'.format(space, rank))\n lines[8].append('└─────────┘')\n\n for line in lines:\n print(' '.join(line))\n\n\ndef reorganize(trajectories, payoffs):\n ''' Reorganize the trajectory to make it RL friendly\n\n Args:\n trajectory (list): A list of trajectories\n payoffs (list): A list of payoffs for the players. Each entry corresponds to one player\n\n Returns:\n (list): A new trajectories that can be fed into RL algorithms.\n\n '''\n num_players = len(trajectories)\n new_trajectories = [[] for _ in range(num_players)]\n\n for player in range(num_players):\n for i in range(0, len(trajectories[player])-2, 2):\n if i == len(trajectories[player])-3:\n reward = payoffs[player]\n done = True\n else:\n reward, done = 0, False\n transition = trajectories[player][i:i+3].copy()\n transition.insert(2, reward)\n transition.append(done)\n\n new_trajectories[player].append(transition)\n return new_trajectories\n\n\ndef remove_illegal(action_probs, legal_actions):\n ''' Remove illegal actions and normalize the\n probability vector\n\n Args:\n action_probs (numpy.array): A 1 dimention numpy array.\n legal_actions (list): A list of indices of legal actions.\n\n Returns:\n probd (numpy.array): A normalized vector without legal actions.\n '''\n probs = np.zeros(action_probs.shape[0])\n probs[legal_actions] = action_probs[legal_actions]\n if np.sum(probs) == 0:\n probs[legal_actions] = 1 / len(legal_actions)\n else:\n probs /= sum(probs)\n return probs\n\n\ndef tournament(env, num):\n ''' Evaluate he performance of the agents in the environment\n\n Args:\n env (Env class): The environment to be evaluated.\n num (int): The number of games to play.\n\n Returns:\n A list of avrage payoffs for each player\n '''\n payoffs = [0 for _ in range(env.num_players)]\n counter = 0\n while counter < num:\n _, _payoffs = env.run(is_training=False)\n if isinstance(_payoffs, list):\n for _p in _payoffs:\n for i, _ in enumerate(payoffs):\n payoffs[i] += _p[i]\n counter += 1\n else:\n for i, _ in enumerate(payoffs):\n payoffs[i] += _payoffs[i]\n counter += 1\n for i, _ in enumerate(payoffs):\n payoffs[i] /= counter\n return payoffs\n\n\ndef plot_curve(csv_path, save_path, algorithm):\n ''' Read data from csv file and plot the results\n '''\n import os\n import csv\n import matplotlib.pyplot as plt\n with open(csv_path) as csvfile:\n reader = csv.DictReader(csvfile)\n xs = []\n ys = []\n for row in reader:\n xs.append(int(row['timestep']))\n ys.append(float(row['reward']))\n fig, ax = plt.subplots()\n ax.plot(xs, ys, label=algorithm)\n ax.set(xlabel='timestep', ylabel='reward')\n ax.legend()\n ax.grid()\n\n save_dir = os.path.dirname(save_path)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n fig.savefig(save_path)\n"
] | [
[
"numpy.random.seed",
"torch.manual_seed",
"matplotlib.pyplot.subplots",
"torch.cuda.is_available",
"torch.device",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alexglibby/RotationalDynamics | [
"e877ad0f7dd623106ec5336ff4a45018ae9fb525"
] | [
"network_helper_functions.py"
] | [
"#### Network Model of Rotation\n#### place in folder with network_model_rnn.py\n#### 7/22/2019 - AL\n### 3/2/2020 - Al\n\n### numpy version - '1.16.2'\nimport numpy as np\nimport functools\nimport operator\nimport sys\nimport pickle \n\n### torch version - '1.0.1'\nimport torch\nimport torch.nn as nn\nimport random\n\n# sklearn.__version__ - '0.21.1'\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n# scipy.__version__ - '1.2.1'\nfrom scipy.special import ndtri\n#############################################################\n#############################################################\n\n#### returns the index of true values (added 8/29/2018 - AL)\ndef return_true_index(vector):\n index = [i for i, x in enumerate(vector) if x]\n return(index)\n \n\n### Dynamically set network parameter grid space - 12/30/2019\n#### go through and replace each variable range with a tiled range, based on other values\ndef network_parm_grid(change_p_dict):\n\n #### get the full parameter space (12/12/2019 - AL)\n num_p = functools.reduce(operator.mul,[len(v) for v in change_p_dict.values()])\n\n ### get variables to loop through (12/30/2019 - Al)\n loop_var = [i for i in change_p_dict.keys() if len(change_p_dict[i])>1]\n other_var = [i for i in change_p_dict.keys() if len(change_p_dict[i])==1]\n\n all_parm_val = change_p_dict.copy()\n\n if len(loop_var)>0:\n var = loop_var[0]\n current_len = len(change_p_dict[var])\n all_parm_val[var] = np.tile(all_parm_val[var],int(num_p/current_len))\n #print(all_parm_val[var])\n\n for var in loop_var[1:]:\n all_parm_val[var] = np.tile(np.expand_dims(np.array(all_parm_val[var]),1),current_len).flatten()\n current_len = len(all_parm_val[var])\n all_parm_val[var] = np.tile(all_parm_val[var],int(num_p/current_len))\n #print(all_parm_val[var])\n\n ### tile other variables\n for var in other_var:\n all_parm_val[var] = np.tile(all_parm_val[var],num_p)\n #print(all_parm_val[var])\n \n return(all_parm_val,num_p)\n\ndef logistic_sigmoid(x,gain,center=0,L=1):\n out = L/(1+np.exp(-gain*(x-center)))\n return(out)\n\ndef transfer_func(x,func_type='Relu',center=100,gain=.1,L=20):\n if func_type=='sigmoid':\n out = logistic_sigmoid(x,gain,center,L)\n out[x <= 0] = 0\n \n elif func_type=='Relu':\n center = 0 ### min firing rate \n out = center*np.ones(x.shape)\n out[x >= center] = x[x >= center]\n \n elif func_type=='log':\n out = np.zeros(len(x))\n out[x>0] = np.log(3*x[x>0]+1)\n \n elif func_type=='exp':\n out = np.exp(x)-1\n out[x <= 0]=0\n return(out)\n\n##########################################################################\n##########################################################################\n#### helper functions for running the model (8/2/2019 - AL)\n\n### assume even counts of all trial types - 8/2/2019 - AL\ndef return_cond_list(num_trials_total,num_trial_types,random_on=True):\n \n num_trials_condition = int(np.round(num_trials_total/num_trial_types))\n num_trial_total = num_trials_condition*num_trial_types\n\n for cond in np.arange(num_trial_types):\n\n if cond==0:\n condition_list = cond*np.ones(num_trials_condition)\n else:\n condition_list = np.append(condition_list,cond*np.ones(num_trials_condition))\n\n ### randomize order\n if random_on==True:\n condition_list = np.random.permutation(condition_list)\n return(condition_list,num_trial_total)\n\n### updated - 3/2/2020 \n### updated - 3/9/2020 - AL\ndef define_network_inputs(network_parm,input_dict):\n #### this is the most basic - 4 inputs - A,X, and C/C*\n if network_parm['model_type']=='rec':\n \n \n #### Define the inputs at each time point (7/30/2019 - AL) \n num_tp = network_parm['num_stim']*network_parm['num_tp_stim']\n num_trial_types = 2*network_parm['num_stim']\n input_matrix = np.zeros((num_trial_types,network_parm['input_size'],num_tp))\n trial_types = {}\n \n ############\n trial_index = 0\n for stim0 in input_dict['stim0']:\n\n ##################################### \n stim = 0\n current_tp_start = stim*network_parm['num_tp_stim']\n current_tp_end = current_tp_start + network_parm['num_tp_on']\n\n ### A or X\n input_matrix[trial_index:trial_index+len(input_dict['stim1']),:,current_tp_start:current_tp_end] = \\\n input_dict['stim0'][stim0]\n\n for stim1 in input_dict['stim1']:\n\n ##################################### \n stim = 1\n current_tp_start = stim*network_parm['num_tp_stim']\n current_tp_end = current_tp_start + network_parm['num_tp_on']\n ### C or C*\n input_matrix[trial_index,:,current_tp_start:current_tp_end] = input_dict['stim1'][stim1]\n \n trial_types[trial_index] = [stim0,stim1]\n\n trial_index += 1\n\n \n return(input_matrix,trial_types)\n\n\n### updated 9/13/2019 - AL\ndef weight_array_generate(cell_type_counts,rand_std=.5,link_std=.5):\n cell_types_make = list(cell_type_counts.keys())\n ba = 0\n #print(cell_types_make)\n cell_type_list = []\n for cell_type in cell_types_make:\n \n #print(cell_type)\n\n count = cell_type_counts[cell_type]\n if count > 0:\n ba += 1\n for ci in np.arange(count):\n\n #a = 0 #np.random.uniform(.5,1) ### always have a > b (changed 8/29/2019)\n cell_select = cell_connect(cell_type,rand_std,link_std) ### shape should be options x input cells\n num_connect_types = cell_select.shape[0]\n\n cell_select_index = np.random.choice(np.arange(num_connect_types))\n\n if np.all([ci==0,ba==1]):\n weight_array = np.expand_dims(cell_select[cell_select_index],1)\n else:\n weight_array = np.append(weight_array,np.expand_dims(cell_select[cell_select_index],1),1)\n \n cell_type_list.append(cell_type)\n \n \n ### mix up the random / none cells (9/18/2019 - AL)\n ### there is nothing that says the random connections \n ### at each time step need to happen between the same set of cells\n ### so we mix up nones and randoms between time steps \n cell_type_list = np.array(cell_type_list)\n mix_index = np.any(np.array([cell_type_list=='none',cell_type_list=='rand_sel',\\\n cell_type_list=='random_uni',cell_type_list=='random_norm']),0)\n\n ### mix up cells\n weight_array[0:2,mix_index] = weight_array[0:2,mix_index][:,np.random.permutation(sum(mix_index))]\n weight_array[2:4,mix_index] = weight_array[2:4,mix_index][:,np.random.permutation(sum(mix_index))]\n \n return(weight_array)\n\n#### manually generate weights between input and population (hidden layer)\n#### to create singles/ doubles - 8/2/2019 - AL\n#### edited 9/13/2019 - AL (to add random cells)\n#### cell types\ndef cell_connect(cell_type,rand_std,link_std):\n\n cell_special_on = True\n if cell_special_on:\n mean = (0,0)\n cov_ss = rand_std\n cov = [[link_std,cov_ss],[cov_ss,link_std]]\n x = abs(np.random.multivariate_normal(mean, cov, 1))[0]\n a= x[0]\n am = x[1]\n b = 0\n else:\n #b = 0 #an - a\n try1 = abs(np.random.normal(0,rand_std,2))\n a = np.max(try1)\n am = a\n b = np.min(try1)\n\n \n if cell_type=='single_0':\n cell_select = np.array([[a,b,b,b],[b,a,b,b],[a,b,a,a],[b,a,a,a]])\n \n elif cell_type=='single_1':\n cell_select = np.array([[b,b,a,b],[b,b,b,a],[a,a,b,a],[a,a,a,b]])\n #cell_select = np.array([[b,b,a,b],[b,b,b,a]])\n \n elif cell_type=='switch':\n cell_select = np.array([[a,b,b,am],[b,a,am,b]])\n elif cell_type=='stable':\n cell_select = np.array([[a,b,am,b],[b,a,b,am]])\n elif cell_type=='none':\n cell_select = np.array([[a,a,am,am],[b,b,b,b],[a,a,b,b],[b,b,am,am]])\n \n #### add in random options (9/13/2019 - AL)\n elif cell_type=='rand_sel':\n cell_select = np.array([[a,b,b,b],[b,a,b,b],[a,b,a,a],[b,a,a,a],\\\n [b,b,a,b],[b,b,b,a],[a,a,b,a],[a,a,a,b],\\\n [a,b,b,a],[b,a,a,b],[a,b,a,b],[b,a,b,a]])\n elif cell_type=='random_uni':\n cell_select = np.array([np.random.uniform(0,1,4)])\n elif cell_type=='random_norm':\n cell_select = np.array([abs(np.random.normal(0,rand_std,4))])\n \n return(cell_select)\n\n#### first put in A/X selectivity based on W matrix - 3/18/2020 \n#### updated 3/19/2020 - AL\n###############################################\n#### set up A/X selectivity ###################\n#### IH weight matrix - 3/10/2020 - AL\n\n#### focus on keeping total number of C/C* selectivity neurons CONSTANT! (note made - 3/16/2020 - Al)\n\n### set up A/X selectivity - \n### num_trial_types,input_feature,num_tp = input_matrix.shape\n\ndef associate_IH(AX_sen,network_parm):\n \n IH = np.zeros((network_parm['input_size'],network_parm['N']))\n\n ### find neurons that are selective or A or X: \n A_sel_index = AX_sen[0,:]>AX_sen[1,:] ### A sel\n X_sel_index = AX_sen[0,:]<AX_sen[1,:] ### X sel\n\n #print('number of A sel neurons: '+str(sum(A_sel_index)))\n #print('number of X sel neurons: '+str(sum(X_sel_index)))\n #print('total A/X sel: '+str(sum(np.any([A_sel_index,X_sel_index],0))))\n \n IH[0:2,:] = AX_sen\n ########################################################\n ### then add in C/C* selectivity based on link percent \n #### updated - 3/18/2020 - AL\n\n #### test out different link percents - check C/C* sel\n lp = network_parm['link_percents']# = 0.95\n #print('current link percent: '+str(lp))\n\n link_A = int(np.round(lp*sum(A_sel_index)))\n link_X = int(np.round(lp*sum(X_sel_index)))\n\n #print('link with A: '+str(link_A))\n #print('link with X: '+str(link_X))\n\n #print('link with A/X: '+str(link_A+link_X))\n\n other_sel = network_parm['CCp_sel'] - (link_A+link_X) #CCp_sel_check\n\n #print('left over C/C* sel: '+str(other_sel))\n\n ######################################################\n ### set up AC link: \n unlink_A = sum(A_sel_index) - link_A\n a_link = abs(np.random.normal(0,network_parm['link_std'],link_A))\n a_unlink = np.zeros(unlink_A)\n from_C_AC = np.append(a_link,a_unlink) ## these are the connections from C to neurons that prefer A (to create AC)\n IH[2,A_sel_index] = np.random.permutation(from_C_AC)\n\n ### set up XC* 8link\n unlink_X = sum(X_sel_index) - link_X\n x_link = abs(np.random.normal(0,network_parm['link_std'],link_X))\n x_unlink = np.zeros(unlink_X)\n from_Cp_XCp = np.append(x_link,x_unlink) ## these are the connections from C* to neurons that prefer X (to create XC*)\n IH[3,X_sel_index] = np.random.permutation(from_Cp_XCp)\n\n #############################################################\n\n ### find neurons that prefer neither A or X \n ### put in C/Cp selectivity \n\n ### \n CCP = abs(np.random.normal(0,network_parm['link_std'],other_sel))\n to_CCp = np.append(CCP.reshape(-1,1),np.zeros(other_sel).reshape(-1,1),1)\n for index in np.arange(other_sel):\n to_CCp[index,:] = np.random.permutation(to_CCp[index,:])\n\n\n non_sel_index = return_true_index(AX_sen[0,:]==AX_sen[1,:])\n non_sel_index = np.array(np.random.permutation(non_sel_index))\n\n IH[2:4,non_sel_index[np.arange(other_sel)]] = to_CCp.T\n\n #### checking selectivity - \n check_CCp_sel = sum(np.any([IH[2,:]>IH[3,:],IH[2,:]<IH[3,:]],0))\n #print('check CCp sel: '+str(check_CCp_sel))\n check_AX_sel =sum(np.any([IH[0,:]>IH[1,:],IH[0,:]<IH[1,:]],0))\n #print('check AX sel: '+str(check_AX_sel))\n \n return(IH)\n\n##################################################################################\n### Two layer model \n#### 7/30/2019 - AL\n### updated 3/2/2020 - AL\n\nclass TL(nn.Module):\n def __init__(self, network_parm,IH,W):\n super(TL, self).__init__()\n\n if network_parm['model_type']=='rec':\n \n ### define connections from sensory to hidden layere\n self.IH = IH #np.random.normal()\n\n ### define dynamics over time\n self.W = W #np.random.normal()\n\n ### initialize the bias on RNN layer\n self.bias = np.zeros(network_parm['N'])\n \n def forward(self,Vt,network_parm,current_input):\n \n ### noise\n self.noise_hidden = np.random.normal(0,network_parm['noise_level'],network_parm['N']) \n \n ### all inputs \n u = np.dot(self.W,Vt) + self.noise_hidden + self.bias + np.dot(current_input,self.IH)\n \n dV = (1/network_parm['tau'])*(transfer_func(u,func_type=network_parm['transfer_func'])-Vt)\n\n Vt_out = Vt + dV\n return(Vt_out)\n\n### this function randomly chooses test trials and returns both the index of test and train trials\n### input - cond trials should be the index of the condition in question\n### 7/31/2019 - AL\ndef return_test_train_cond(cond_trials,test_size):\n num_cond_trials = len(cond_trials)\n choose_trials_bool = np.append(np.ones(int(test_size)),np.zeros(int(num_cond_trials-test_size)))\n choose_trials_bool = np.random.permutation(choose_trials_bool)\n\n test_trials = cond_trials[choose_trials_bool==1]\n train_trials = cond_trials[choose_trials_bool==0]\n return(test_trials,train_trials)\n\ndef down_sample(condition_list,train_trial_all):\n ### downsample train set\n train_cond_list = condition_list[train_trial_all]\n train_cond,train_cond_counts = np.unique(train_cond_list,return_counts=True)\n ### check counts: \n if len(np.unique(train_cond_counts))>1:\n min_counts = np.mean(train_cond_counts)\n ### downsample\n #print('downsample training so you have equal number of trials per condition')\n for cond in train_cond:\n down_train_cond = random.sample(train_trial_all[train_cond_list==cond],min_counts)\n if cond==train_cond[0]:\n all_down_train_cond = down_train_cond\n else:\n all_down_train_cond = np.append(all_down_train_cond,down_train_cond)\n\n train_trial_all_use = all_down_train_cond\n else:\n train_trial_all_use = np.copy(train_trial_all)\n \n return(train_trial_all_use)\n\n### determine split of train test (8/2/2019 - AL)\ndef split_train_test(condition_list,test_percent_y,cond_train_labels):\n num_trials = len(condition_list)\n num_cond = len(cond_train_labels)\n \n test_size = num_trials*test_percent_y\n test_cond_size = np.round(test_size/num_cond)\n test_size = num_cond*test_cond_size #num_trial_types*test_cond_size\n\n all_trials = np.arange(num_trials)\n\n for cond in cond_train_labels:\n cond_trials = all_trials[condition_list==cond]\n test_trials,train_trials = return_test_train_cond(cond_trials,test_size)\n\n if cond==cond_train_labels[0]:\n test_trial_all = test_trials\n train_trial_all = train_trials\n else:\n test_trial_all = np.append(test_trial_all,test_trials)\n train_trial_all = np.append(train_trial_all,train_trials)\n\n #if run==0:\n # print('held out test set counts: '+str(np.unique(condition_list[test_trial_all],return_counts=True)))\n\n ### downsample train set\n train_trial_all_use = down_sample(condition_list,train_trial_all)\n\n #if run==0:\n # print('train set counts: '+str(np.unique(condition_list[train_trial_all_use],return_counts=True)))\n return(test_trial_all,train_trial_all_use)\n\n\n#### return a binary version of the condition list based on trials we want to group together\n#### see cond_comp_dict - 8/5/2019 - AL\n#### - updated 4/2/2020 - AL - jjust made paradigm_type = 'AC_seq'\n\ndef return_bin_condition_list(condition_list,current_clf_name,cond_comp_dict,paradigm_type='AC_seq'):\n condition_list_bin = np.zeros(condition_list.shape)\n\n for bin_name in np.arange(2):\n bin_conds = cond_comp_dict[paradigm_type][current_clf_name][bin_name]\n for bc in bin_conds:\n\n if bc==bin_conds[0]:\n bin_list = np.expand_dims(condition_list==bc,1)\n else:\n bin_list = np.append(bin_list,np.expand_dims(condition_list==bc,1),1)\n\n condition_list_bin[np.any(bin_list,1)] = bin_name\n return(condition_list_bin)\n\n#### train classifier and get accuracy with AUC on test set - 8/2/2019 - AL\n#### make sure that condition_list is now in binary format (all conditions either 0 or 1)\ndef clf_train_test(test_trial_all,train_trial_all_use,clf_class,\\\n opt_params,condition_list_bin,response_all,tp_win=1):\n \n num_trials,num_cells,num_tp = response_all.shape\n num_tp_clf = num_tp-tp_win+1\n ############################\n all_tp_clf = {}\n all_tp_auc = np.zeros(num_tp_clf)\n for tp_train in np.arange(num_tp_clf):\n\n y_train = condition_list_bin[train_trial_all_use]\n X_train = np.mean(response_all[train_trial_all_use,:,tp_train:tp_train+tp_win],2)\n\n y_test = condition_list_bin[test_trial_all]\n X_test = np.mean(response_all[test_trial_all,:,tp_train:tp_train+tp_win],2)\n\n clf_c = clf_class(**opt_params).fit(X_train, y_train)\n all_tp_clf[tp_train] = clf_c\n \n y_score = clf_c.decision_function(X_test)\n #print(y_score.shape)\n fpr, tpr, _ = roc_curve(y_test, y_score,pos_label=1)\n all_tp_auc[tp_train] = auc(fpr, tpr)\n\n return(all_tp_clf,all_tp_auc)\n\n\n#### zscore reponses (looking for categories of cells) - 7/31/2019 - AL\n### add condition list binary (9/16/2019 - AL)\n### updated - 4/3/2020 - AL\ndef zscore_activity(response_all,condition_list,condition_list_bin,network_parm,num_shuffles = 1000):\n \n num_trials,num_cells,num_tp = response_all.shape\n\n #### downsample conditions (based on full trial types) if necessay (7/31/2019)\n use_trials = down_sample(condition_list,np.arange(num_trials))\n response_use = response_all[use_trials]\n\n ### use binary condition labels \n cond_list_use = condition_list_bin[use_trials]\n\n ### zscored firing rate differences \n num_tp_win = network_parm['num_tp']-network_parm['tp_win_zscore']+1\n zscore_diff = np.zeros((num_tp_win,num_cells))\n \n for tp in np.arange(num_tp_win):\n \n cond0 = np.nanmean(response_use[cond_list_use==0,:,tp:tp+network_parm['tp_win_zscore']],2)\n cond1 = np.nanmean(response_use[cond_list_use==1,:,tp:tp+network_parm['tp_win_zscore']],2)\n cell_diff = np.mean(cond0,0)-np.mean(cond1,0)\n\n cell_diff_shuffle = np.zeros((num_shuffles,num_cells))\n \n #print('shuffle_test')\n for shuffle in np.arange(num_shuffles):\n shuffle_cond_list = np.random.permutation(np.copy(cond_list_use))\n cond0 = np.nanmean(response_use[shuffle_cond_list==0,:,tp:tp+network_parm['tp_win_zscore']],2)\n cond1 = np.nanmean(response_use[shuffle_cond_list==1,:,tp:tp+network_parm['tp_win_zscore']],2)\n cell_diff_shuffle[shuffle,:] = np.mean(cond0,0)-np.mean(cond1,0)\n\n #print(np.std(cell_diff_shuffle,0))\n std_hold = np.std(cell_diff_shuffle,0)\n mean_hold = np.mean(cell_diff_shuffle,0)\n \n if np.any(std_hold==0):\n zscore_diff[tp,std_hold==0] = (cell_diff[std_hold==0] - mean_hold[std_hold==0])\n zscore_diff[tp,std_hold!=0] = (cell_diff[std_hold!=0] - mean_hold[std_hold!=0])\\\n /std_hold[std_hold!=0]\n print('zscore - std = zero')\n sys.stdout.flush()\n else:\n zscore_diff[tp,:] = (cell_diff - mean_hold)/std_hold\n \n return(zscore_diff)\n\n ##############################################################################\n \n#### return the periods of zscore for the counts calculation \n### 4/3/2020 - Al\ndef return_zscore_blocks(zscore_diff,network_parm):\n stim_period_starts = np.arange(network_parm['num_stim'])*network_parm['num_tp_stim']\n stim_period_ends = stim_period_starts + network_parm['num_tp_on']\n\n zscore_diff_periods = np.zeros((network_parm['num_stim'],network_parm['N']))\n for stim in np.arange(network_parm['num_stim']):\n zscore_diff_periods[stim,:] = np.mean(zscore_diff[stim_period_starts[stim]:\\\n stim_period_ends[stim]],0)\n \n return(zscore_diff_periods)\n\n\n#### create cell counts table - 7/31/2019 - AL\ndef create_cell_counts_table(zscore_diff):\n \n z_threshold = -ndtri(.05/2)\n cell_type_counts = {}\n cell_type_counts['AA'] = sum(np.all([zscore_diff[0,:]>=z_threshold,zscore_diff[1,:]>=z_threshold],0))\n cell_type_counts['0A'] = sum(np.all([abs(zscore_diff[0,:])<=z_threshold,zscore_diff[1,:]>=z_threshold],0))\n cell_type_counts['XA'] = sum(np.all([zscore_diff[0,:]<=-z_threshold,zscore_diff[1,:]>=z_threshold],0))\n\n cell_type_counts['A0'] = sum(np.all([zscore_diff[0,:]>=z_threshold,abs(zscore_diff[1,:])<=z_threshold],0))\n cell_type_counts['00'] = sum(np.all([abs(zscore_diff[0,:])<=z_threshold,abs(zscore_diff[1,:])<=z_threshold],0))\n cell_type_counts['X0'] = sum(np.all([zscore_diff[0,:]<=-z_threshold,abs(zscore_diff[1,:])<=z_threshold],0))\n\n cell_type_counts['AX'] = sum(np.all([zscore_diff[0,:]>=z_threshold,zscore_diff[1,:]<=-z_threshold],0))\n cell_type_counts['0X'] = sum(np.all([abs(zscore_diff[0,:])<=z_threshold,zscore_diff[1,:]<=-z_threshold],0))\n cell_type_counts['XX'] = sum(np.all([zscore_diff[0,:]<=-z_threshold,zscore_diff[1,:]<=-z_threshold],0))\n\n counts = [cell_type_counts['AA'],cell_type_counts['0A'],cell_type_counts['XA'],cell_type_counts['A0'],\\\n cell_type_counts['00'],cell_type_counts['X0'],cell_type_counts['AX'],cell_type_counts['0X'],cell_type_counts['XX']]\n return(counts,cell_type_counts)\n\n\n#### calculate angle\n##### NOTE - SAMPLES X FEATURES (so it should be 2 x neurons) 8/29/2019 - AL\ndef cal_angle(data):\n cos_sim = cosine_similarity(data)\n angle = np.degrees(np.arccos(cos_sim[0][1]))\n return(angle,cos_sim[0][1])\n\n### calculate the angle between axes/classifiers\n### (8/30/2019 - AL)\n### updated 4/6/2020 - AL\ndef cal_angle_axes(angle,all_clf,comp_compare,dict_axes,p_index,run): \n for comp in comp_compare:\n\n clf_0 = all_clf[dict_axes[comp[0]]['name']][dict_axes[comp[0]]['tp']].coef_[0]\n clf_1 = all_clf[dict_axes[comp[1]]['name']][dict_axes[comp[1]]['tp']].coef_[0]\n\n data = np.append(clf_0.reshape(-1,1),clf_1.reshape(-1,1),1).T\n\n ### store the angle in the run comparison dictionary \n angle['_'.join(comp)][p_index,run],cos_sim = cal_angle(data)\n return(angle)\n\n\n#### To understand how interference and associations play together (8/29/2019)\n#### calculate the cross auc (8/30/2019 - AL)\n#### updated - 4/6/2020 - Al \n\n#### cross_auc[clf_train_name][test_name] --> update each time\n#### clf_comp - clf/responses to compare\n#### num_tp \n#### all_clf - current runs trained clfs\n#### condition_list - conditions on this run\n#### paradigm_type \n#### cond_comp_dict[paradigm_type][cond] - explains how for each paradigm type, trial types are broken down\n#### test_trial_all - trial index to use\ndef cross_auc_clf(cross_auc,clf_comp,num_tp,all_clf,condition_list,\\\n cond_comp_dict,test_trial_all,p_index,run,network_parm,response_all):\n #### clf train\n for clf_type in clf_comp:\n\n #### clf train period\n for tp_train in np.arange(network_parm['num_tp']-network_parm['tp_win']+1):\n\n clf_train_name = clf_type+str(tp_train)\n clf_c = all_clf[clf_type][tp_train]\n\n ##################################\n #### test response comparison type\n for current_cond_type in clf_comp:\n\n condition_list_bin = return_bin_condition_list(condition_list,current_cond_type,\\\n cond_comp_dict,paradigm_type='AC_seq')\n #### test response time period (tp) \n for tp_test in np.arange(network_parm['num_tp']-network_parm['tp_win']+1):\n\n test_name = current_cond_type+str(tp_test)\n\n y_test = condition_list_bin[test_trial_all]\n X_test = np.mean(response_all[test_trial_all,:,tp_test:tp_test+network_parm['tp_win']],2)\n y_score = clf_c.decision_function(X_test)\n fpr, tpr, _ = roc_curve(y_test, y_score,pos_label=1)\n cross_auc[clf_train_name][test_name][p_index,run] = auc(fpr, tpr) \n \n return(cross_auc)\n\n\n#### POSTDICTION calculate how AX sen /AX mem responds during unexpected verses expected trials\n#### (8/30/2019 - AL)\n#### updated 4/6/2020 - AL\n### TRIALS [0:AC,1:AC*,2:XC,3:XC*]\n\ndef postdiction_test(eu_auc,clf_comp,all_clf,exp_comp,exp_dict,condition_list,\\\n test_trial_all,response_all,tp_test,network_parm,cond_comp_dict,p_index,run):\n #tp_test = 1 ### only look at the test\n\n #### clf train\n for clf_type in clf_comp:\n\n #### clf train period\n for tp_train in np.arange(network_parm['num_tp']-network_parm['tp_win']+1):\n\n clf_train_name = clf_type+str(tp_train)\n clf_c = all_clf[clf_type][tp_train]\n\n ##################################\n #### test response comparison type\n for current_cond_type in clf_comp:\n\n for exp_comp in ['exp','unexp']:\n\n test_name = current_cond_type+str(tp_test)+exp_comp\n\n condition_list_bin = return_bin_condition_list(condition_list,current_cond_type,\\\n cond_comp_dict,paradigm_type='AC_seq')\n\n ### only use conditions that are expected or unexpected \n current_cond_use = np.append(exp_dict[exp_comp][current_cond_type][0],\\\n exp_dict[exp_comp][current_cond_type][1])\n for cc in current_cond_use:\n tf_c = np.array(condition_list==cc).reshape(-1,1)\n if cc == current_cond_use[0]:\n tf = tf_c\n else:\n tf = np.append(tf,tf_c,1)\n use_trials = np.any(tf,1)\n\n #### trials from test_trial_all to use \n sub_test_trial_all = test_trial_all[use_trials[test_trial_all]]\n\n y_test = condition_list_bin[sub_test_trial_all]\n X_test = np.mean(response_all[sub_test_trial_all,:,tp_test:tp_test+network_parm['tp_win']],2)\n y_score = clf_c.decision_function(X_test)\n fpr, tpr, _ = roc_curve(y_test, y_score,pos_label=1)\n\n eu_auc[clf_train_name][test_name][p_index,run] = auc(fpr, tpr) \n \n return(eu_auc)\n"
] | [
[
"numpy.dot",
"numpy.expand_dims",
"numpy.random.multivariate_normal",
"numpy.round",
"numpy.max",
"numpy.all",
"numpy.mean",
"numpy.any",
"numpy.nanmean",
"numpy.exp",
"numpy.unique",
"numpy.arange",
"numpy.copy",
"numpy.std",
"numpy.zeros",
"numpy.log",
"numpy.min",
"sklearn.metrics.pairwise.cosine_similarity",
"numpy.arccos",
"sklearn.metrics.roc_curve",
"scipy.special.ndtri",
"numpy.append",
"sklearn.metrics.auc",
"numpy.array",
"numpy.tile",
"numpy.ones",
"numpy.random.normal",
"numpy.random.permutation",
"numpy.random.uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
marksibrahim/CrypTen | [
"4e5b13487d7f6ceaa4f06e86f0b260e0761960fd",
"4e5b13487d7f6ceaa4f06e86f0b260e0761960fd"
] | [
"crypten/__init__.py",
"test/test_common.py"
] | [
"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n__version__ = \"0.1.0\"\n\nimport copy\nimport warnings\n\nimport crypten.common\nimport crypten.communicator as comm\nimport crypten.mpc # noqa: F401\nimport crypten.nn # noqa: F401\nimport torch\n\n# other imports:\nfrom . import debug\nfrom .cryptensor import CrypTensor\n\n\n# functions controlling autograd:\nno_grad = CrypTensor.no_grad\nenable_grad = CrypTensor.enable_grad\nset_grad_enabled = CrypTensor.set_grad_enabled\n\n\ndef init(party_name=None, device=None):\n \"\"\"\n Initialize CrypTen. It will initialize communicator, setup party\n name for file save / load, and setup seeds for Random Number Generatiion.\n By default the function will initialize a set of RNG generators on CPU.\n If torch.cuda.is_available() returns True, it will initialize an additional\n set of RNG generators on GPU. Users can specify the GPU device the generators are\n initialized with device.\n\n Args:\n party_name (str): party_name for file save and load, default is None\n device (int, str, torch.device): Specify device for RNG generators on\n GPU. Must be a GPU device.\n \"\"\"\n # Return and raise warning if initialized\n if comm.is_initialized():\n warnings.warn(\"CrypTen is already initialized.\", RuntimeWarning)\n return\n\n # Initialize communicator\n comm._init(use_threads=False, init_ttp=crypten.mpc.ttp_required())\n\n # Setup party name for file save / load\n if party_name is not None:\n comm.get().set_name(party_name)\n\n # Setup seeds for Random Number Generation\n if comm.get().get_rank() < comm.get().get_world_size():\n _setup_przs(device=device)\n if crypten.mpc.ttp_required():\n crypten.mpc.provider.ttp_provider.TTPClient._init()\n\n\ndef init_thread(rank, world_size):\n comm._init(use_threads=True, rank=rank, world_size=world_size)\n _setup_przs()\n\n\ndef uninit():\n return comm.uninit()\n\n\ndef is_initialized():\n return comm.is_initialized()\n\n\ndef print_communication_stats():\n comm.get().print_communication_stats()\n\n\ndef reset_communication_stats():\n comm.get().reset_communication_stats()\n\n\n# set tensor type to be used for CrypTensors:\n__CRYPTENSOR_TYPES__ = {\"mpc\": crypten.mpc.MPCTensor}\n__DEFAULT_CRYPTENSOR_TYPE__ = \"mpc\"\n\n\ndef register_cryptensor(name):\n \"\"\"Registers a custom :class:`CrypTensor` subclass.\n\n This decorator allows the user to instantiate a subclass of `CrypTensor`\n from Python cpde, even if the class itself is not part of CrypTen. To use\n it, apply this decorator to a `CrypTensor` subclass, like this:\n\n .. code-block:: python\n\n @crypten.register_cryptensor('my_cryptensor')\n class MyCrypTensor(crypten.CrypTensor):\n ...\n \"\"\"\n\n def register_cryptensor_cls(cls):\n if name in __CRYPTENSOR_TYPES__:\n raise ValueError(\n \"Cannot register duplicate CrypTensor type: \\\n tensor type {} already exists.\".format(\n name\n )\n )\n if not issubclass(cls, CrypTensor):\n raise ValueError(\n \"Registered tensor ({}: {}) must extend \\\n CrypTensor\".format(\n name, cls.__name__\n )\n )\n __CRYPTENSOR_TYPES__[name] = cls\n return cls\n\n return register_cryptensor_cls\n\n\ndef set_default_cryptensor_type(cryptensor_type):\n \"\"\"Sets the default type used to create `CrypTensor`s.\"\"\"\n global __DEFAULT_CRYPTENSOR_TYPE__\n if cryptensor_type not in __CRYPTENSOR_TYPES__:\n raise ValueError(\"CrypTensor type %s does not exist.\" % cryptensor_type)\n __DEFAULT_CRYPTENSOR_TYPE__ = cryptensor_type\n\n\ndef get_default_cryptensor_type():\n \"\"\"Gets the default type used to create `CrypTensor`s.\"\"\"\n return __DEFAULT_CRYPTENSOR_TYPE__\n\n\ndef get_cryptensor_type(tensor):\n \"\"\"Gets the type name of the specified `tensor` `CrypTensor`.\"\"\"\n if not isinstance(tensor, CrypTensor):\n raise ValueError(\n \"Specified tensor is not a CrypTensor: {}\".format(type(tensor))\n )\n for name, cls in __CRYPTENSOR_TYPES__.items():\n if isinstance(tensor, cls):\n return name\n raise ValueError(\"Unregistered CrypTensor type: {}\".format(type(tensor)))\n\n\ndef cryptensor(*args, cryptensor_type=None, **kwargs):\n \"\"\"\n Factory function to return encrypted tensor of given `cryptensor_type`. If no\n `cryptensor_type` is specified, the default type is used.\n \"\"\"\n\n # determine CrypTensor type to use:\n if cryptensor_type is None:\n cryptensor_type = get_default_cryptensor_type()\n if cryptensor_type not in __CRYPTENSOR_TYPES__:\n raise ValueError(\"CrypTensor type %s does not exist.\" % cryptensor_type)\n\n # create CrypTensor:\n return __CRYPTENSOR_TYPES__[cryptensor_type](*args, **kwargs)\n\n\ndef is_encrypted_tensor(obj):\n \"\"\"\n Returns True if obj is an encrypted tensor.\n \"\"\"\n return isinstance(obj, CrypTensor)\n\n\ndef _setup_przs(device=None):\n \"\"\"\n Generate shared random seeds to generate pseudo-random sharings of\n zero. The random seeds are shared such that each process shares\n one seed with the previous rank process and one with the next rank.\n This allows for the generation of `n` random values, each known to\n exactly two of the `n` parties.\n\n For arithmetic sharing, one of these parties will add the number\n while the other subtracts it, allowing for the generation of a\n pseudo-random sharing of zero. (This can be done for binary\n sharing using bitwise-xor rather than addition / subtraction)\n \"\"\"\n # Initialize RNG Generators\n comm.get().g0 = torch.Generator()\n comm.get().g1 = torch.Generator()\n\n device = \"cuda\" if device is None else device\n device = torch.device(device)\n assert device.type == \"cuda\", \"Must be a GPU device\"\n\n if torch.cuda.is_available():\n comm.get().g0_cuda = torch.Generator(device=device)\n comm.get().g1_cuda = torch.Generator(device=device)\n\n # Generate random seeds for Generators\n # NOTE: Chosen seed can be any number, but we choose as a random 64-bit\n # integer here so other parties cannot guess its value.\n\n # We sometimes get here from a forked process, which causes all parties\n # to have the same RNG state. Reset the seed to make sure RNG streams\n # are different in all the parties. We use numpy's random here since\n # setting its seed to None will produce different seeds even from\n # forked processes.\n import numpy\n\n numpy.random.seed(seed=None)\n next_seed = torch.tensor(numpy.random.randint(-(2 ** 63), 2 ** 63 - 1, (1,)))\n prev_seed = torch.LongTensor([0]) # placeholder\n\n # Send random seed to next party, receive random seed from prev party\n world_size = comm.get().get_world_size()\n rank = comm.get().get_rank()\n if world_size >= 2: # Otherwise sending seeds will segfault.\n next_rank = (rank + 1) % world_size\n prev_rank = (next_rank - 2) % world_size\n\n req0 = comm.get().isend(tensor=next_seed, dst=next_rank)\n req1 = comm.get().irecv(tensor=prev_seed, src=prev_rank)\n\n req0.wait()\n req1.wait()\n else:\n prev_seed = next_seed\n\n # Seed Generators\n comm.get().g0.manual_seed(next_seed.item())\n comm.get().g1.manual_seed(prev_seed.item())\n\n # Create global generator\n global_seed = torch.tensor(numpy.random.randint(-(2 ** 63), 2 ** 63 - 1, (1,)))\n global_seed = comm.get().broadcast(global_seed, 0)\n comm.get().global_generator = torch.Generator()\n comm.get().global_generator.manual_seed(global_seed.item())\n\n\ndef load_from_party(\n f=None,\n preloaded=None,\n encrypted=False,\n dummy_model=None,\n src=0,\n load_closure=torch.load,\n **kwargs\n):\n \"\"\"\n Loads an object saved with `torch.save()` or `crypten.save_from_party()`.\n\n Args:\n f: a file-like object (has to implement `read()`, `readline()`,\n `tell()`, and `seek()`), or a string containing a file name\n preloaded: Use the preloaded value instead of loading a tensor/model from f.\n encrypted: Determines whether crypten should load an encrypted tensor\n or a plaintext torch tensor.\n dummy_model: Takes a model architecture to fill with the loaded model\n (on the `src` party only). Non-source parties will return the\n `dummy_model` input (with data unchanged). Loading a model will\n assert the correctness of the model architecture provided against\n the model loaded. This argument is ignored if the file loaded is\n a tensor. (deprecated)\n src: Determines the source of the tensor. If `src` is None, each\n party will attempt to read in the specified file. If `src` is\n specified, the source party will read the tensor from `f` and it\n will broadcast it to the other parties\n load_closure: Custom load function that matches the interface of `torch.load`,\n to be used when the tensor is saved with a custom save function in\n `crypten.save_from_party`. Additional kwargs are passed on to the closure.\n \"\"\"\n if dummy_model is not None:\n warnings.warn(\n \"dummy_model is deprecated and no longer required\", DeprecationWarning\n )\n if encrypted:\n raise NotImplementedError(\"Loading encrypted tensors is not yet supported\")\n else:\n assert isinstance(src, int), \"Load failed: src argument must be an integer\"\n assert (\n src >= 0 and src < comm.get().get_world_size()\n ), \"Load failed: src must be in [0, world_size)\"\n\n # source party\n if comm.get().get_rank() == src:\n assert (f is None and (preloaded is not None)) or (\n (f is not None) and preloaded is None\n ), \"Exactly one of f and preloaded must not be None\"\n\n if f is None:\n result = preloaded\n if preloaded is None:\n result = load_closure(f, **kwargs)\n\n # Zero out the tensors / modules to hide loaded data from broadcast\n if torch.is_tensor(result):\n result_zeros = result.new_zeros(result.size())\n elif isinstance(result, torch.nn.Module):\n result_zeros = copy.deepcopy(result)\n result_zeros.set_all_parameters(0)\n else:\n result = comm.get().broadcast_obj(-1, src)\n raise TypeError(\"Unrecognized load type %s\" % type(result))\n\n comm.get().broadcast_obj(result_zeros, src)\n\n # Non-source party\n else:\n result = comm.get().broadcast_obj(None, src)\n if isinstance(result, int) and result == -1:\n raise TypeError(\"Unrecognized load type from src party\")\n\n if torch.is_tensor(result):\n result = crypten.cryptensor(result, src=src)\n # TODO: Encrypt modules before returning them\n # elif isinstance(result, torch.nn.Module):\n # result = crypten.nn.from_pytorch(result, src=src)\n result.src = src\n return result\n\n\ndef load(\n f,\n preloaded=None,\n encrypted=False,\n dummy_model=None,\n src=0,\n load_closure=torch.load,\n **kwargs\n):\n \"\"\"\n Loads an object saved with `torch.save()` or `crypten.save_from_party()`.\n Note: this function is deprecated; please use load_from_party instead.\n \"\"\"\n warnings.warn(\n \"The current 'load' function is deprecated, and will be removed soon. \"\n \"To continue using current 'load' functionality, please use the \"\n \"'load_from_party' function instead.\",\n DeprecationWarning,\n )\n return load_from_party(\n f, preloaded, encrypted, dummy_model, src, load_closure, **kwargs\n )\n\n\ndef save_from_party(obj, f, src=0, save_closure=torch.save, **kwargs):\n \"\"\"\n Saves a CrypTensor or PyTorch tensor to a file.\n\n Args:\n obj: The CrypTensor or PyTorch tensor to be saved\n f: a file-like object (has to implement `read()`, `readline()`,\n `tell()`, and `seek()`), or a string containing a file name\n src: The source party that writes data to the specified file.\n save_closure: Custom save function that matches the interface of `torch.save`,\n to be used when the tensor is saved with a custom load function in\n `crypten.load_from_party`. Additional kwargs are passed on to the closure.\n \"\"\"\n if is_encrypted_tensor(obj):\n raise NotImplementedError(\"Saving encrypted tensors is not yet supported\")\n else:\n assert isinstance(src, int), \"Save failed: src must be an integer\"\n assert (\n src >= 0 and src < comm.get().get_world_size()\n ), \"Save failed: src must be an integer in [0, world_size)\"\n\n if comm.get().get_rank() == src:\n save_closure(obj, f, **kwargs)\n\n # Implement barrier to avoid race conditions that require file to exist\n comm.get().barrier()\n\n\ndef save(obj, f, src=0, save_closure=torch.save, **kwargs):\n \"\"\"\n Saves a CrypTensor or PyTorch tensor to a file.\n Note: this function is deprecated, please use save_from_party instead\n \"\"\"\n warnings.warn(\n \"The current 'save' function is deprecated, and will be removed soon. \"\n \"To continue using current 'save' functionality, please use the \"\n \"'save_from_party' function instead.\",\n DeprecationWarning,\n )\n save_from_party(obj, f, src, save_closure, **kwargs)\n\n\ndef where(condition, input, other):\n \"\"\"\n Return a tensor of elements selected from either `input` or `other`, depending\n on `condition`.\n \"\"\"\n if is_encrypted_tensor(condition):\n return condition * input + (1 - condition) * other\n elif torch.is_tensor(condition):\n condition = condition.float()\n return input * condition + other * (1 - condition)\n\n\ndef cat(tensors, dim=0):\n \"\"\"\n Concatenates the specified CrypTen `tensors` along dimension `dim`.\n \"\"\"\n assert isinstance(tensors, list), \"input to cat must be a list\"\n assert all(isinstance(t, CrypTensor) for t in tensors), \"inputs must be CrypTensors\"\n tensor_types = [get_cryptensor_type(t) for t in tensors]\n assert all(\n ttype == tensor_types[0] for ttype in tensor_types\n ), \"cannot concatenate CrypTensors with different underlying types\"\n if len(tensors) == 1:\n return tensors[0]\n return type(tensors[0]).cat(tensors, dim=dim)\n\n\ndef stack(tensors, dim=0):\n \"\"\"\n Stacks the specified CrypTen `tensors` along dimension `dim`. In contrast to\n `crypten.cat`, this adds a dimension to the result tensor.\n \"\"\"\n assert isinstance(tensors, list), \"input to stack must be a list\"\n assert all(isinstance(t, CrypTensor) for t in tensors), \"inputs must be CrypTensors\"\n tensor_types = [get_cryptensor_type(t) for t in tensors]\n assert all(\n ttype == tensor_types[0] for ttype in tensor_types\n ), \"cannot stack CrypTensors with different underlying types\"\n if len(tensors) == 1:\n return tensors[0].unsqueeze(dim)\n return type(tensors[0]).stack(tensors, dim=dim)\n\n\ndef rand(*sizes, cryptensor_type=None):\n \"\"\"\n Returns a tensor with elements uniformly sampled in [0, 1).\n \"\"\"\n if cryptensor_type is None:\n cryptensor_type = get_default_cryptensor_type()\n return __CRYPTENSOR_TYPES__[cryptensor_type].rand(*sizes)\n\n\ndef bernoulli(tensor, cryptensor_type=None):\n \"\"\"\n Returns a tensor with elements in {0, 1}. The i-th element of the\n output will be 1 with probability according to the i-th value of the\n input tensor.\n \"\"\"\n return rand(tensor.size(), cryptensor_type=cryptensor_type) < tensor\n\n\n# expose classes and functions in package:\n__all__ = [\n \"CrypTensor\",\n \"no_grad\",\n \"enable_grad\",\n \"set_grad_enabled\",\n \"debug\",\n \"init\",\n \"init_thread\",\n \"mpc\",\n \"nn\",\n \"uninit\",\n]\n",
"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport sys\nimport unittest\n\nimport crypten\nimport torch\nfrom crypten.common.util import chebyshev_series\nfrom crypten.encoder import FixedPointEncoder, nearest_integer_division\n\n\ndef get_test_tensor(max_value=10, float=False):\n \"\"\"Create simple test tensor.\"\"\"\n tensor = torch.LongTensor(list(range(max_value)))\n if float:\n tensor = tensor.float()\n return tensor\n\n\nclass TestCommon(unittest.TestCase):\n \"\"\"\n Test cases for common functionality.\n \"\"\"\n\n def _check(self, tensor, reference, msg):\n test_passed = (tensor == reference).all().item() == 1\n self.assertTrue(test_passed, msg=msg)\n\n def test_encode_decode(self):\n \"\"\"Tests tensor encoding and decoding.\"\"\"\n for float in [False, True]:\n if float:\n fpe = FixedPointEncoder(precision_bits=16)\n else:\n fpe = FixedPointEncoder(precision_bits=0)\n tensor = get_test_tensor(float=float)\n decoded = fpe.decode(fpe.encode(tensor))\n self._check(\n decoded,\n tensor,\n \"Encoding/decoding a %s failed.\" % \"float\" if float else \"long\",\n )\n\n # Make sure encoding a subclass of CrypTensor is a no-op\n crypten.mpc.set_default_provider(crypten.mpc.provider.TrustedFirstParty)\n crypten.init()\n\n tensor = get_test_tensor(float=True)\n encrypted_tensor = crypten.cryptensor(tensor)\n encrypted_tensor = fpe.encode(encrypted_tensor)\n self._check(\n encrypted_tensor.get_plain_text(),\n tensor,\n \"Encoding an EncryptedTensor failed.\",\n )\n\n # Try a few other types.\n fpe = FixedPointEncoder(precision_bits=0)\n for dtype in [torch.uint8, torch.int8, torch.int16]:\n tensor = torch.zeros(5, dtype=dtype).random_()\n decoded = fpe.decode(fpe.encode(tensor)).type(dtype)\n self._check(decoded, tensor, \"Encoding/decoding a %s failed.\" % dtype)\n\n def test_nearest_integer_division(self):\n # test without scaling:\n scale = 1\n reference = [[-26, -25, -7, -5, -4, -1, 0, 1, 3, 4, 5, 7, 25, 26]]\n tensor = torch.LongTensor(reference)\n result = nearest_integer_division(tensor, scale)\n self._check(\n torch.LongTensor(result.tolist()),\n torch.LongTensor(reference),\n \"Nearest integer division failed.\",\n )\n\n # test with scaling:\n scale = 4\n reference = [[-6, -6, -2, -1, -1, 0, 0, 0, 1, 1, 1, 2, 6, 6]]\n result = nearest_integer_division(tensor, scale)\n self._check(\n torch.LongTensor(result.tolist()),\n torch.LongTensor(reference),\n \"Nearest integer division failed.\",\n )\n\n def test_chebyshev_series(self):\n \"\"\"Checks coefficients returned by chebyshev_series are correct\"\"\"\n for width, terms in [(6, 10), (6, 20)]:\n result = chebyshev_series(torch.tanh, width, terms)\n # check shape\n self.assertTrue(result.shape == torch.Size([terms]))\n # check terms\n self.assertTrue(result[0] < 1e-4)\n self.assertTrue(torch.isclose(result[-1], torch.tensor(3.5e-2), atol=1e-1))\n\n def test_config_managers(self):\n \"\"\"Checks setting configuartion with config manager works\"\"\"\n # Set the config directly\n crypten.mpc.config.exp_iterations = 8\n self.assertTrue(crypten.mpc.config.exp_iterations == 8)\n\n # Set with a context manager\n with crypten.mpc.ConfigManager(\"exp_iterations\", 3):\n self.assertTrue(crypten.mpc.config.exp_iterations == 3)\n self.assertTrue(crypten.mpc.config.exp_iterations == 8)\n\n crypten.mpc.set_config(crypten.mpc.MPCConfig(exp_iterations=5))\n self.assertTrue(crypten.mpc.config.exp_iterations == 5)\n self.assertTrue(crypten.mpc.mpc.config.exp_iterations == 5)\n\n\nif __name__ == \"__main__\":\n unittest.main(argv=sys.argv[0])\n"
] | [
[
"torch.Generator",
"torch.LongTensor",
"numpy.random.seed",
"torch.is_tensor",
"torch.cuda.is_available",
"torch.device",
"numpy.random.randint"
],
[
"torch.Size",
"torch.tensor",
"torch.LongTensor",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.