repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
Petelr/ml2019 | [
"bd02f48ab35ed531954609a68da7d4aa5cae6e75"
] | [
"Submittions/proj1_4.py"
] | [
"import numpy as np \nimport matplotlib.pyplot as plt\nfrom scipy.stats import multivariate_normal \n\nmean = [2,5]\ncov = [[1,-0.1],[-0.1,1]]\nX = np.random.multivariate_normal(mean,cov,size=10)\nX_hat = np.empty(X.shape)\nX_hat[:,0] = (X[:,0]-np.mean(X[:,0]))/np.std(X[:,0])\nX_hat[:,1] = (X[:,1]-np.mean(X[:,1]))/np.std(X[:,1])\n\nfig1 = plt.figure()\nplot1 = fig1.add_subplot(111)\nplot1.scatter(X[:,0],X[:,1], c='blue')\nfig1.show()\n\nfig2 = plt.figure()\nplot2 = fig2.add_subplot(111)\nplot2.scatter(X_hat[:,0],X_hat[:,1], c='red')\nfig2.show()\n"
] | [
[
"numpy.random.multivariate_normal",
"numpy.std",
"numpy.mean",
"numpy.empty",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yandexdataschool/inverse-problem-intensive | [
"490b87d4309a641e4db7230ba6dff60f1972a365"
] | [
"lgso/experience_replay.py"
] | [
"import torch\nimport numpy as np\n\nclass ExperienceReplay:\n def __init__(self, psi_dim, x_dim, y_dim, device, sphere_cut=False):\n self._psi_dim = psi_dim\n self._x_dim = x_dim\n self._y_dim = y_dim\n self._device = device\n self._sphere_cut = sphere_cut\n self._y = torch.zeros(0, self._y_dim).float().to('cpu')\n self._condition = torch.zeros(0, self._x_dim + self._psi_dim).float().to('cpu')\n\n def add(self, y, condition):\n if y is None and condition is None:\n y = torch.zeros(0, self._y_dim).float()\n condition = torch.zeros(0, self._x_dim + self._psi_dim).float()\n self._y = torch.cat([self._y, y.to('cpu').detach().clone()], dim=0)\n self._condition = torch.cat([self._condition, condition.to('cpu').detach().clone()], dim=0)\n return self\n\n def extract(self, psi, step):\n psi = psi.float().to('cpu').detach().clone()\n\n if self._sphere_cut:\n mask = ((self._condition[:, :self._psi_dim] - psi).pow(2).sum(dim=1).sqrt() < step) # sphere\n else:\n mask = ((self._condition[:, :self._psi_dim] - psi).abs() < step).all(dim=1)\n\n y = (self._y[mask]).to(self._device)\n condition = (self._condition[mask]).to(self._device)\n return y.float(), condition.float()"
] | [
[
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Daniil-Osokin/openvino_training_extensions | [
"ba60480d546ee95e73472d1fde3cd1e49925f2af",
"ba60480d546ee95e73472d1fde3cd1e49925f2af"
] | [
"tensorflow_toolkit/lpr/export.py",
"tensorflow_toolkit/ssd_detector/export.py"
] | [
"from __future__ import print_function\nimport argparse\nimport os\nimport subprocess\n\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.python.framework import graph_io\n\nfrom lpr.trainer import inference\nfrom utils.helpers import load_module\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Export model in IE format')\n parser.add_argument('path_to_config', help='Path to a config.py')\n parser.add_argument('mo', help=\"Path to model optimizer 'mo.py' script\")\n return parser.parse_args()\n\n\ndef execute_tfmo(mo_py_path, frozen, input_shape, batch_size, precision):\n\n assert frozen.endswith('.pb.frozen')\n assert batch_size > 0\n assert precision in ('FP32', 'FP16')\n folder = os.path.dirname(frozen)\n name = os.path.splitext(frozen)[0].replace('.pb', '')\n\n input_shape = [batch_size] + list(input_shape[1:])\n\n\n params = (\n 'python3',\n '-u',\n mo_py_path or 'mo.py',\n '--framework={}'.format('tf'),\n '--reverse_input_channels',\n '--scale={}'.format(255),\n '--input_shape=[{}]'.format(','.join([str(shape) for shape in input_shape])),\n '--input={}'.format('input'),\n '--output={}'.format('d_predictions'),\n '--input_model={}'.format(frozen),\n '--output_dir={}'.format(folder),\n '--model_name={}'.format(name),\n '--data_type={}'.format(precision)\n )\n\n if mo_py_path:\n subprocess.call([p for p in params if p])\n else:\n print('\\nPath to `mo.py` is not specified. Please provide correct path to Model Optimizer `mo.py` script')\n\n# pylint: disable=too-many-locals\ndef export(config, tfmo, batch_size=1, precision='FP32'):\n shape = (None,) + tuple(config.input_shape) # NHWC, dynamic batch\n graph = tf.Graph()\n with graph.as_default():\n with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=False):\n input_tensor = tf.placeholder(dtype=tf.float32, shape=shape, name='input')\n prob = inference(config.rnn_cells_num, input_tensor, config.num_classes)\n prob = tf.transpose(prob, (1, 0, 2))\n data_length = tf.fill([tf.shape(prob)[1]], tf.shape(prob)[0])\n result = tf.nn.ctc_greedy_decoder(prob, data_length, merge_repeated=True)\n predictions = tf.to_int32(result[0][0])\n tf.sparse_to_dense(predictions.indices, [tf.shape(input_tensor, out_type=tf.int64)[0], config.max_lp_length],\n predictions.values, default_value=-1, name='d_predictions')\n init = tf.initialize_all_variables()\n saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)\n\n sess = tf.Session(graph=graph)\n sess.run(init)\n checkpoints_dir = config.model_dir\n latest_checkpoint = tf.train.latest_checkpoint(checkpoints_dir)\n saver.restore(sess, latest_checkpoint)\n frozen = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, [\"d_predictions\"])\n tf.train.write_graph(sess.graph, os.path.join(config.model_dir, 'ie_model/'), 'graph.pbtxt', as_text=True)\n path_to_frozen_model = graph_io.write_graph(frozen, os.path.join(config.model_dir, 'ie_model/'),\n 'graph.pb.frozen', as_text=False)\n execute_tfmo(tfmo, path_to_frozen_model, shape, batch_size, precision)\n\n\ndef main(_):\n args = parse_args()\n cfg = load_module(args.path_to_config)\n export(cfg, args.mo, 1, 'FP32') #set batch_size and precision\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run(main)\n",
"from __future__ import print_function\nimport argparse\nimport os\nimport subprocess\n\nimport tensorflow as tf\nfrom utils.helpers import dump_frozen_graph, load_module\nfrom ssd_detector.networks.mobilenet_ssd import MobileNetSSD\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Export model in IE format')\n parser.add_argument('path_to_config', help='Path to a config.py')\n parser.add_argument('mo', help=\"Path to model optimizer 'mo.py' script\")\n return parser.parse_args()\n\n\ndef execute_tfmo(mo_py_path, frozen, config, input_shape=None, **kvwags):\n \"\"\"\n This function dumps json configuration file and executes Model Optimizer for TensorFlow.\n As results it has converted to Inference Engine IR model in the same folder.\n\n :param mo_py_path: path to Model Optimizer mo.py\n :param frozen: path to frozen pb-file to convert to IE\n :param config: dictionary generated by SSD_Base.get_config_for_tfmo()\n :param input_shape: input shape to pass to Model Optimizer. This mostly need to overwrite batch size dimension\n \"\"\"\n assert frozen.endswith('.pb.frozen')\n folder = os.path.dirname(frozen)\n name = os.path.splitext(frozen)[0].replace('.pb', '')\n\n json = frozen.replace('.pb.frozen', '.tfmo.json')\n with open(json, mode='w') as file:\n file.write(config['json'])\n\n scale = kvwags.get('scale', None)\n mean_values = kvwags.get('mean_values', None)\n\n params = (\n 'python3', '-u', mo_py_path or 'mo.py',\n '--framework={}'.format('tf'),\n '--input_model={}'.format(frozen),\n '--output_dir={}'.format(folder),\n '--output={}'.format(','.join(config['cut_points'])),\n '--input_shape=[{}]'.format(','.join([str(shape) for shape in input_shape])) if input_shape else '',\n '--scale={}'.format(scale) if scale else '',\n '--mean_values=[{}]'.format(','.join([str(value) for value in mean_values])) if mean_values else '',\n '--model_name={}'.format(name),\n '--tensorflow_use_custom_operations_config={}'.format(json),\n )\n\n if mo_py_path:\n subprocess.call([p for p in params if p])\n else:\n print('\\nPath to `mo.py` is not specified. Please provide correct path to Model Optimizer `mo.py` script')\n\n\ndef convert_to_ie(ssd, session, output_folder, mo_py_path, batch_size=None, **kvargs):\n \"\"\"\n Single high-level function that converts current graph to IE model format\n\n :param ssd: Instance derived from SSDBase\n :param session: session with graph and initialized variables\n :param output_folder: absolute path to folder where to dump intermediate and final results of conversion\n :param mo_py_path: path to model optimizer for TensorFlow\n :param batch_size: batch_size to set for target model\n :param mean_values:\n :param scale:\n \"\"\"\n if not os.path.isabs(output_folder):\n output_folder = os.path.join(os.getcwd(), output_folder)\n\n config = ssd.get_config_for_tfmo()\n graph_file = os.path.join(output_folder, 'graph.pb')\n frozen = dump_frozen_graph(session, graph_file, config['output_nodes'])\n\n input_shape = [batch_size] + list(ssd.input_shape[1:])\n execute_tfmo(mo_py_path, frozen, config, input_shape, **kvargs)\n\n\ndef export(cfg, tfmo):\n\n checkpoint_path = tf.train.latest_checkpoint(cfg.MODEL_DIR)\n\n detector_params = cfg.detector_params.copy()\n with tf.Session() as sess:\n input_tensor = tf.placeholder(dtype=tf.float32, shape=(None,) + tuple(cfg.input_shape))\n\n for unnecessary_param in ['initial_weights_path',\n 'learning_rate',\n 'optimizer',\n 'weights_decay_factor',\n 'collect_priors_summary']:\n if unnecessary_param in detector_params:\n del detector_params[unnecessary_param]\n\n ssd = MobileNetSSD(input_tensor=input_tensor, is_training=False, **detector_params)\n ssd.detection_output()\n # For eval.py\n tf.get_variable('eval_iteration', initializer=0, dtype=tf.int32, trainable=False)\n tf.get_variable('global_step', initializer=tf.constant_initializer(0, dtype=tf.int64), shape=(), dtype=tf.int64,\n trainable=False)\n\n train_param, _ = ssd.create_transform_parameters(width=cfg.input_shape[0], height=cfg.input_shape[1])\n\n saver = tf.train.Saver()\n saver.restore(sess, checkpoint_path)\n\n mean_values = [train_param.mean_value for _ in range(3)]\n convert_to_ie(ssd, sess, os.path.join(cfg.MODEL_DIR, 'ie_model/'), tfmo, batch_size=1,\n scale=1./train_param.scale, mean_values=mean_values)\n\n\ndef main(_):\n args = parse_args()\n cfg = load_module(args.path_to_config)\n export(cfg, args.mo)\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run(main)\n"
] | [
[
"tensorflow.nn.ctc_greedy_decoder",
"tensorflow.Graph",
"tensorflow.graph_util.convert_variables_to_constants",
"tensorflow.train.latest_checkpoint",
"tensorflow.transpose",
"tensorflow.contrib.slim.arg_scope",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.initialize_all_variables",
"tensorflow.logging.set_verbosity",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.to_int32",
"tensorflow.app.run"
],
[
"tensorflow.get_variable",
"tensorflow.train.latest_checkpoint",
"tensorflow.constant_initializer",
"tensorflow.logging.set_verbosity",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.app.run"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
AndreiRoibu/AIwithPyTorch | [
"82e93186b4aa76d73a6303755dfc9960ec896919"
] | [
"Machine Learning and Neurons/linear_regression.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Linear_Regression.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/154CXhkgz--XVuKrGLFkiNi-S1Iv1GdCU\n\"\"\"\n\n# We start by doing the required imports\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\n\nN = 100 # Number of data points\nX = np.random.random(N) * 10 - 5 # Random data in range [-5, 5]\ny = 0.5 * X - 1 + np.random.randn(N) # A line plus noise\n\nprint(X.shape)\n# True slope = 0.5\n# True line intercept = -1\n# Gaussia noise with mean=0 and variance=1 is added as this is directly linked with MSE as a loss function\n\nplt.scatter(X, y) # Visualise the data\n\n# Create the linear regression model \n# The model has 1 input and 1 output\nmodel = nn.Linear(1,1)\n\n# Define the loss and optimizer\ncriterion = nn.MSELoss()\noptimizer = torch.optim.SGD(model.parameters(), lr= 0.05)\n\n# Reshape the data and define the inputs\nX = X.reshape(N,1) # Number of samples x Number of Dimensions\ny = y.reshape(N,1)\n\nprint(X.shape)\n\ninputs = torch.from_numpy(X.astype(np.float32))\ntargets = torch.from_numpy(y.astype(np.float32))\n\n# Now we train the model\nnumber_epochs = 30\nlosses = []\nfor iteration in range(number_epochs):\n optimizer.zero_grad() # zero the parameter gradients. PyTorch accumulates the gradients for every .backward() call\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n losses.append(loss.item())\n loss.backward()\n optimizer.step()\n print('Epoch {}/{}, Loss: {:.6f}'.format(iteration+1, number_epochs, loss.item()))\n\n# Plot the losses\nplt.plot(losses)\n\n# Plot the graph of predictions\npredicted = model(inputs).detach().numpy()\nplt.scatter(X, y, label='Original Data')\nplt.plot(X, predicted, label='Fitted Line', color='r', linestyle='--')\nplt.legend()\nplt.show()\n\n# Test the model values (true w = 0.5, b = -1)\nw = model.weight.data.numpy()\nb = model.bias.data.numpy()\nprint(w,b)\n\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.random.random",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.plot",
"torch.nn.Linear",
"numpy.random.randn",
"matplotlib.pyplot.show",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhoub/tensorflow | [
"1752d9c8fac5f6cf85a41e77d92e2743adbfc446"
] | [
"tensorflow/contrib/bayesflow/python/ops/hmc_impl.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Hamiltonian Monte Carlo, a gradient-based MCMC algorithm.\n\n@@sample_chain\n@@sample_annealed_importance_chain\n@@kernel\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport numpy as np\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import functional_ops\nfrom tensorflow.python.ops import gradients_impl as gradients_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops.distributions import util as distributions_util\n\n__all__ = [\n \"sample_chain\",\n \"sample_annealed_importance_chain\",\n \"kernel\",\n]\n\n\nKernelResults = collections.namedtuple(\n \"KernelResults\",\n [\n \"acceptance_probs\",\n \"current_grads_target_log_prob\", # \"Current result\" means \"accepted\".\n \"current_target_log_prob\", # \"Current result\" means \"accepted\".\n \"energy_change\",\n \"is_accepted\",\n \"proposed_grads_target_log_prob\",\n \"proposed_state\",\n \"proposed_target_log_prob\",\n \"random_positive\",\n ])\n\n\ndef _make_dummy_kernel_results(\n dummy_state,\n dummy_target_log_prob,\n dummy_grads_target_log_prob):\n return KernelResults(\n acceptance_probs=dummy_target_log_prob,\n current_grads_target_log_prob=dummy_grads_target_log_prob,\n current_target_log_prob=dummy_target_log_prob,\n energy_change=dummy_target_log_prob,\n is_accepted=array_ops.ones_like(dummy_target_log_prob, dtypes.bool),\n proposed_grads_target_log_prob=dummy_grads_target_log_prob,\n proposed_state=dummy_state,\n proposed_target_log_prob=dummy_target_log_prob,\n random_positive=dummy_target_log_prob,\n )\n\n\ndef sample_chain(\n num_results,\n target_log_prob_fn,\n current_state,\n step_size,\n num_leapfrog_steps,\n num_burnin_steps=0,\n num_steps_between_results=0,\n seed=None,\n current_target_log_prob=None,\n current_grads_target_log_prob=None,\n name=None):\n \"\"\"Runs multiple iterations of one or more Hamiltonian Monte Carlo chains.\n\n Hamiltonian Monte Carlo (HMC) is a Markov chain Monte Carlo (MCMC) algorithm\n that takes a series of gradient-informed steps to produce a Metropolis\n proposal. This function samples from an HMC Markov chain at `current_state`\n and whose stationary distribution has log-unnormalized-density\n `target_log_prob_fn()`.\n\n This function samples from multiple chains in parallel. It assumes that the\n the leftmost dimensions of (each) `current_state` (part) index an independent\n chain. The function `target_log_prob_fn()` sums log-probabilities across\n event dimensions (i.e., current state (part) rightmost dimensions). Each\n element of the output of `target_log_prob_fn()` represents the (possibly\n unnormalized) log-probability of the joint distribution over (all) the current\n state (parts).\n\n The `current_state` can be represented as a single `Tensor` or a `list` of\n `Tensors` which collectively represent the current state. When specifying a\n `list`, one must also specify a list of `step_size`s.\n\n Only one out of every `num_steps_between_samples + 1` steps is included in the\n returned results. This \"thinning\" comes at a cost of reduced statistical\n power, while reducing memory requirements and autocorrelation. For more\n discussion see [1].\n\n [1]: \"Statistically efficient thinning of a Markov chain sampler.\"\n Art B. Owen. April 2017.\n http://statweb.stanford.edu/~owen/reports/bestthinning.pdf\n\n #### Examples:\n\n ##### Sample from a diagonal-variance Gaussian.\n\n ```python\n tfd = tf.contrib.distributions\n\n def make_likelihood(true_variances):\n return tfd.MultivariateNormalDiag(\n scale_diag=tf.sqrt(true_variances))\n\n dims = 10\n dtype = np.float32\n true_variances = tf.linspace(dtype(1), dtype(3), dims)\n likelihood = make_likelihood(true_variances)\n\n states, kernel_results = hmc.sample_chain(\n num_results=1000,\n target_log_prob_fn=likelihood.log_prob,\n current_state=tf.zeros(dims),\n step_size=0.5,\n num_leapfrog_steps=2,\n num_burnin_steps=500)\n\n # Compute sample stats.\n sample_mean = tf.reduce_mean(states, axis=0)\n sample_var = tf.reduce_mean(\n tf.squared_difference(states, sample_mean),\n axis=0)\n ```\n\n ##### Sampling from factor-analysis posteriors with known factors.\n\n I.e.,\n\n ```none\n for i=1..n:\n w[i] ~ Normal(0, eye(d)) # prior\n x[i] ~ Normal(loc=matmul(w[i], F)) # likelihood\n ```\n\n where `F` denotes factors.\n\n ```python\n tfd = tf.contrib.distributions\n\n def make_prior(dims, dtype):\n return tfd.MultivariateNormalDiag(\n loc=tf.zeros(dims, dtype))\n\n def make_likelihood(weights, factors):\n return tfd.MultivariateNormalDiag(\n loc=tf.tensordot(weights, factors, axes=[[0], [-1]]))\n\n # Setup data.\n num_weights = 10\n num_factors = 4\n num_chains = 100\n dtype = np.float32\n\n prior = make_prior(num_weights, dtype)\n weights = prior.sample(num_chains)\n factors = np.random.randn(num_factors, num_weights).astype(dtype)\n x = make_likelihood(weights, factors).sample(num_chains)\n\n def target_log_prob(w):\n # Target joint is: `f(w) = p(w, x | factors)`.\n return prior.log_prob(w) + make_likelihood(w, factors).log_prob(x)\n\n # Get `num_results` samples from `num_chains` independent chains.\n chains_states, kernels_results = hmc.sample_chain(\n num_results=1000,\n target_log_prob_fn=target_log_prob,\n current_state=tf.zeros([num_chains, dims], dtype),\n step_size=0.1,\n num_leapfrog_steps=2,\n num_burnin_steps=500)\n\n # Compute sample stats.\n sample_mean = tf.reduce_mean(chains_states, axis=[0, 1])\n sample_var = tf.reduce_mean(\n tf.squared_difference(chains_states, sample_mean),\n axis=[0, 1])\n ```\n\n Args:\n num_results: Integer number of Markov chain draws.\n target_log_prob_fn: Python callable which takes an argument like\n `current_state` (or `*current_state` if it's a list) and returns its\n (possibly unnormalized) log-density under the target distribution.\n current_state: `Tensor` or Python `list` of `Tensor`s representing the\n current state(s) of the Markov chain(s). The first `r` dimensions index\n independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.\n step_size: `Tensor` or Python `list` of `Tensor`s representing the step size\n for the leapfrog integrator. Must broadcast with the shape of\n `current_state`. Larger step sizes lead to faster progress, but too-large\n step sizes make rejection exponentially more likely. When possible, it's\n often helpful to match per-variable step sizes to the standard deviations\n of the target distribution in each variable.\n num_leapfrog_steps: Integer number of steps to run the leapfrog integrator\n for. Total progress per HMC step is roughly proportional to `step_size *\n num_leapfrog_steps`.\n num_burnin_steps: Integer number of chain steps to take before starting to\n collect results.\n Default value: 0 (i.e., no burn-in).\n num_steps_between_results: Integer number of chain steps between collecting\n a result. Only one out of every `num_steps_between_samples + 1` steps is\n included in the returned results. This \"thinning\" comes at a cost of\n reduced statistical power, while reducing memory requirements and\n autocorrelation. For more discussion see [1].\n Default value: 0 (i.e., no subsampling).\n seed: Python integer to seed the random number generator.\n current_target_log_prob: (Optional) `Tensor` representing the value of\n `target_log_prob_fn` at the `current_state`. The only reason to specify\n this argument is to reduce TF graph size.\n Default value: `None` (i.e., compute as needed).\n current_grads_target_log_prob: (Optional) Python list of `Tensor`s\n representing gradient of `target_log_prob` at the `current_state` and wrt\n the `current_state`. Must have same shape as `current_state`. The only\n reason to specify this argument is to reduce TF graph size.\n Default value: `None` (i.e., compute as needed).\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., \"hmc_sample_chain\").\n\n Returns:\n accepted_states: Tensor or Python list of `Tensor`s representing the\n state(s) of the Markov chain(s) at each result step. Has same shape as\n input `current_state` but with a prepended `num_results`-size dimension.\n kernel_results: `collections.namedtuple` of internal calculations used to\n advance the chain.\n \"\"\"\n with ops.name_scope(\n name, \"hmc_sample_chain\",\n [num_results, current_state, step_size, num_leapfrog_steps,\n num_burnin_steps, num_steps_between_results, seed,\n current_target_log_prob, current_grads_target_log_prob]):\n with ops.name_scope(\"initialize\"):\n [\n current_state,\n step_size,\n current_target_log_prob,\n current_grads_target_log_prob,\n ] = _prepare_args(\n target_log_prob_fn, current_state, step_size,\n current_target_log_prob, current_grads_target_log_prob)\n def _run_chain(num_steps, current_state, seed, kernel_results):\n \"\"\"Runs the chain(s) for `num_steps`.\"\"\"\n def _loop_body(iter_, current_state, kernel_results):\n return [iter_ + 1] + list(kernel(\n target_log_prob_fn,\n current_state,\n step_size,\n num_leapfrog_steps,\n seed,\n kernel_results.current_target_log_prob,\n kernel_results.current_grads_target_log_prob))\n return control_flow_ops.while_loop(\n cond=lambda iter_, *args: iter_ < num_steps,\n body=_loop_body,\n loop_vars=[0, current_state, kernel_results])[1:] # Lop-off \"iter_\".\n\n def _scan_body(args_list, _):\n \"\"\"Closure which implements `tf.scan` body.\"\"\"\n current_state, kernel_results = args_list\n return _run_chain(num_steps_between_results + 1, current_state, seed,\n kernel_results)\n\n current_state, kernel_results = _run_chain(\n num_burnin_steps,\n current_state,\n distributions_util.gen_new_seed(\n seed, salt=\"hmc_sample_chain_burnin\"),\n _make_dummy_kernel_results(\n current_state,\n current_target_log_prob,\n current_grads_target_log_prob))\n\n return functional_ops.scan(\n fn=_scan_body,\n elems=array_ops.zeros(num_results, dtype=dtypes.bool), # Dummy arg.\n initializer=[current_state, kernel_results])\n\n\ndef sample_annealed_importance_chain(\n proposal_log_prob_fn,\n num_steps,\n target_log_prob_fn,\n current_state,\n step_size,\n num_leapfrog_steps,\n seed=None,\n name=None):\n \"\"\"Runs annealed importance sampling (AIS) to estimate normalizing constants.\n\n This function uses Hamiltonian Monte Carlo to sample from a series of\n distributions that slowly interpolates between an initial \"proposal\"\n distribution:\n\n `exp(proposal_log_prob_fn(x) - proposal_log_normalizer)`\n\n and the target distribution:\n\n `exp(target_log_prob_fn(x) - target_log_normalizer)`,\n\n accumulating importance weights along the way. The product of these\n importance weights gives an unbiased estimate of the ratio of the\n normalizing constants of the initial distribution and the target\n distribution:\n\n `E[exp(ais_weights)] = exp(target_log_normalizer - proposal_log_normalizer)`.\n\n #### Examples:\n\n ##### Estimate the normalizing constant of a log-gamma distribution.\n\n ```python\n tfd = tf.contrib.distributions\n\n # Run 100 AIS chains in parallel\n num_chains = 100\n dims = 20\n dtype = np.float32\n\n proposal = tfd.MultivatiateNormalDiag(\n loc=tf.zeros([dims], dtype=dtype))\n\n target = tfd.TransformedDistribution(\n distribution=tfd.Gamma(concentration=dtype(2),\n rate=dtype(3)),\n bijector=tfd.bijectors.Invert(tfd.bijectors.Exp()),\n event_shape=[dims])\n\n chains_state, ais_weights, kernels_results = (\n hmc.sample_annealed_importance_chain(\n proposal_log_prob_fn=proposal.log_prob,\n num_steps=1000,\n target_log_prob_fn=target.log_prob,\n step_size=0.2,\n current_state=proposal.sample(num_chains),\n num_leapfrog_steps=2))\n\n log_estimated_normalizer = (tf.reduce_logsumexp(ais_weights)\n - np.log(num_chains))\n log_true_normalizer = tf.lgamma(2.) - 2. * tf.log(3.)\n ```\n\n ##### Estimate marginal likelihood of a Bayesian regression model.\n\n ```python\n tfd = tf.contrib.distributions\n\n def make_prior(dims, dtype):\n return tfd.MultivariateNormalDiag(\n loc=tf.zeros(dims, dtype))\n\n def make_likelihood(weights, x):\n return tfd.MultivariateNormalDiag(\n loc=tf.tensordot(weights, x, axes=[[0], [-1]]))\n\n # Run 100 AIS chains in parallel\n num_chains = 100\n dims = 10\n dtype = np.float32\n\n # Make training data.\n x = np.random.randn(num_chains, dims).astype(dtype)\n true_weights = np.random.randn(dims).astype(dtype)\n y = np.dot(x, true_weights) + np.random.randn(num_chains)\n\n # Setup model.\n prior = make_prior(dims, dtype)\n def target_log_prob_fn(weights):\n return prior.log_prob(weights) + make_likelihood(weights, x).log_prob(y)\n\n proposal = tfd.MultivariateNormalDiag(\n loc=tf.zeros(dims, dtype))\n\n weight_samples, ais_weights, kernel_results = (\n hmc.sample_annealed_importance_chain(\n num_steps=1000,\n proposal_log_prob_fn=proposal.log_prob,\n target_log_prob_fn=target_log_prob_fn\n current_state=tf.zeros([num_chains, dims], dtype),\n step_size=0.1,\n num_leapfrog_steps=2))\n log_normalizer_estimate = (tf.reduce_logsumexp(ais_weights)\n - np.log(num_chains))\n ```\n\n Args:\n proposal_log_prob_fn: Python callable that returns the log density of the\n initial distribution.\n num_steps: Integer number of Markov chain updates to run. More\n iterations means more expense, but smoother annealing between q\n and p, which in turn means exponentially lower variance for the\n normalizing constant estimator.\n target_log_prob_fn: Python callable which takes an argument like\n `current_state` (or `*current_state` if it's a list) and returns its\n (possibly unnormalized) log-density under the target distribution.\n current_state: `Tensor` or Python `list` of `Tensor`s representing the\n current state(s) of the Markov chain(s). The first `r` dimensions index\n independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.\n step_size: `Tensor` or Python `list` of `Tensor`s representing the step size\n for the leapfrog integrator. Must broadcast with the shape of\n `current_state`. Larger step sizes lead to faster progress, but too-large\n step sizes make rejection exponentially more likely. When possible, it's\n often helpful to match per-variable step sizes to the standard deviations\n of the target distribution in each variable.\n num_leapfrog_steps: Integer number of steps to run the leapfrog integrator\n for. Total progress per HMC step is roughly proportional to `step_size *\n num_leapfrog_steps`.\n seed: Python integer to seed the random number generator.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., \"hmc_sample_annealed_importance_chain\").\n\n Returns:\n accepted_state: `Tensor` or Python list of `Tensor`s representing the\n state(s) of the Markov chain(s) at the final iteration. Has same shape as\n input `current_state`.\n ais_weights: Tensor with the estimated weight(s). Has shape matching\n `target_log_prob_fn(current_state)`.\n \"\"\"\n def make_convex_combined_log_prob_fn(iter_):\n def _fn(*args):\n p = proposal_log_prob_fn(*args)\n t = target_log_prob_fn(*args)\n dtype = p.dtype.base_dtype\n beta = (math_ops.cast(iter_ + 1, dtype)\n / math_ops.cast(num_steps, dtype))\n return (1. - beta) * p + beta * t\n return _fn\n\n with ops.name_scope(\n name, \"hmc_sample_annealed_importance_chain\",\n [num_steps, current_state, step_size, num_leapfrog_steps, seed]):\n with ops.name_scope(\"initialize\"):\n [\n current_state,\n step_size,\n current_log_prob,\n current_grads_log_prob,\n ] = _prepare_args(\n make_convex_combined_log_prob_fn(iter_=0),\n current_state,\n step_size,\n description=\"convex_combined_log_prob\")\n def _loop_body(iter_, ais_weights, current_state, kernel_results):\n \"\"\"Closure which implements `tf.while_loop` body.\"\"\"\n current_state_parts = (list(current_state)\n if _is_list_like(current_state)\n else [current_state])\n ais_weights += ((target_log_prob_fn(*current_state_parts)\n - proposal_log_prob_fn(*current_state_parts))\n / math_ops.cast(num_steps, ais_weights.dtype))\n return [iter_ + 1, ais_weights] + list(kernel(\n make_convex_combined_log_prob_fn(iter_),\n current_state,\n step_size,\n num_leapfrog_steps,\n seed,\n kernel_results.current_target_log_prob,\n kernel_results.current_grads_target_log_prob))\n\n [ais_weights, current_state, kernel_results] = control_flow_ops.while_loop(\n cond=lambda iter_, *args: iter_ < num_steps,\n body=_loop_body,\n loop_vars=[\n 0, # iter_\n array_ops.zeros_like(current_log_prob), # ais_weights\n current_state,\n _make_dummy_kernel_results(current_state,\n current_log_prob,\n current_grads_log_prob),\n ])[1:] # Lop-off \"iter_\".\n\n return [current_state, ais_weights, kernel_results]\n\n\ndef kernel(target_log_prob_fn,\n current_state,\n step_size,\n num_leapfrog_steps,\n seed=None,\n current_target_log_prob=None,\n current_grads_target_log_prob=None,\n name=None):\n \"\"\"Runs one iteration of Hamiltonian Monte Carlo.\n\n Hamiltonian Monte Carlo (HMC) is a Markov chain Monte Carlo (MCMC)\n algorithm that takes a series of gradient-informed steps to produce\n a Metropolis proposal. This function applies one step of HMC to\n randomly update the variable `x`.\n\n This function can update multiple chains in parallel. It assumes that all\n leftmost dimensions of `current_state` index independent chain states (and are\n therefore updated independently). The output of `target_log_prob_fn()` should\n sum log-probabilities across all event dimensions. Slices along the rightmost\n dimensions may have different target distributions; for example,\n `current_state[0, :]` could have a different target distribution from\n `current_state[1, :]`. This is up to `target_log_prob_fn()`. (The number of\n independent chains is `tf.size(target_log_prob_fn(*current_state))`.)\n\n #### Examples:\n\n ##### Simple chain with warm-up.\n\n ```python\n tfd = tf.contrib.distributions\n\n # Tuning acceptance rates:\n dtype = np.float32\n target_accept_rate = 0.631\n num_warmup_iter = 500\n num_chain_iter = 500\n\n x = tf.get_variable(name=\"x\", initializer=dtype(1))\n step_size = tf.get_variable(name=\"step_size\", initializer=dtype(1))\n\n target = tfd.Normal(loc=dtype(0), scale=dtype(1))\n\n new_x, other_results = hmc.kernel(\n target_log_prob_fn=target.log_prob,\n current_state=x,\n step_size=step_size,\n num_leapfrog_steps=3)[:4]\n\n x_update = x.assign(new_x)\n\n step_size_update = step_size.assign_add(\n step_size * tf.where(\n other_results.acceptance_probs > target_accept_rate,\n 0.01, -0.01))\n\n warmup = tf.group([x_update, step_size_update])\n\n tf.global_variables_initializer().run()\n\n sess.graph.finalize() # No more graph building.\n\n # Warm up the sampler and adapt the step size\n for _ in xrange(num_warmup_iter):\n sess.run(warmup)\n\n # Collect samples without adapting step size\n samples = np.zeros([num_chain_iter])\n for i in xrange(num_chain_iter):\n _, x_, target_log_prob_, grad_ = sess.run([\n x_update,\n x,\n other_results.target_log_prob,\n other_results.grads_target_log_prob])\n samples[i] = x_\n\n print(samples.mean(), samples.std())\n ```\n\n ##### Sample from more complicated posterior.\n\n I.e.,\n\n ```none\n W ~ MVN(loc=0, scale=sigma * eye(dims))\n for i=1...num_samples:\n X[i] ~ MVN(loc=0, scale=eye(dims))\n eps[i] ~ Normal(loc=0, scale=1)\n Y[i] = X[i].T * W + eps[i]\n ```\n\n ```python\n tfd = tf.contrib.distributions\n\n def make_training_data(num_samples, dims, sigma):\n dt = np.asarray(sigma).dtype\n zeros = tf.zeros(dims, dtype=dt)\n x = tfd.MultivariateNormalDiag(\n loc=zeros).sample(num_samples, seed=1)\n w = tfd.MultivariateNormalDiag(\n loc=zeros,\n scale_identity_multiplier=sigma).sample(seed=2)\n noise = tfd.Normal(\n loc=dt(0),\n scale=dt(1)).sample(num_samples, seed=3)\n y = tf.tensordot(x, w, axes=[[1], [0]]) + noise\n return y, x, w\n\n def make_prior(sigma, dims):\n # p(w | sigma)\n return tfd.MultivariateNormalDiag(\n loc=tf.zeros([dims], dtype=sigma.dtype),\n scale_identity_multiplier=sigma)\n\n def make_likelihood(x, w):\n # p(y | x, w)\n return tfd.MultivariateNormalDiag(\n loc=tf.tensordot(x, w, axes=[[1], [0]]))\n\n # Setup assumptions.\n dtype = np.float32\n num_samples = 150\n dims = 10\n num_iters = int(5e3)\n\n true_sigma = dtype(0.5)\n y, x, true_weights = make_training_data(num_samples, dims, true_sigma)\n\n # Estimate of `log(true_sigma)`.\n log_sigma = tf.get_variable(name=\"log_sigma\", initializer=dtype(0))\n sigma = tf.exp(log_sigma)\n\n # State of the Markov chain.\n weights = tf.get_variable(\n name=\"weights\",\n initializer=np.random.randn(dims).astype(dtype))\n\n prior = make_prior(sigma, dims)\n\n def joint_log_prob_fn(w):\n # f(w) = log p(w, y | x)\n return prior.log_prob(w) + make_likelihood(x, w).log_prob(y)\n\n weights_update = weights.assign(\n hmc.kernel(target_log_prob_fn=joint_log_prob,\n current_state=weights,\n step_size=0.1,\n num_leapfrog_steps=5)[0])\n\n with tf.control_dependencies([weights_update]):\n loss = -prior.log_prob(weights)\n\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)\n log_sigma_update = optimizer.minimize(loss, var_list=[log_sigma])\n\n sess.graph.finalize() # No more graph building.\n\n tf.global_variables_initializer().run()\n\n sigma_history = np.zeros(num_iters, dtype)\n weights_history = np.zeros([num_iters, dims], dtype)\n\n for i in xrange(num_iters):\n _, sigma_, weights_, _ = sess.run([log_sigma_update, sigma, weights])\n weights_history[i, :] = weights_\n sigma_history[i] = sigma_\n\n true_weights_ = sess.run(true_weights)\n\n # Should converge to something close to true_sigma.\n plt.plot(sigma_history);\n plt.ylabel(\"sigma\");\n plt.xlabel(\"iteration\");\n ```\n\n Args:\n target_log_prob_fn: Python callable which takes an argument like\n `current_state` (or `*current_state` if it's a list) and returns its\n (possibly unnormalized) log-density under the target distribution.\n current_state: `Tensor` or Python `list` of `Tensor`s representing the\n current state(s) of the Markov chain(s). The first `r` dimensions index\n independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.\n step_size: `Tensor` or Python `list` of `Tensor`s representing the step size\n for the leapfrog integrator. Must broadcast with the shape of\n `current_state`. Larger step sizes lead to faster progress, but too-large\n step sizes make rejection exponentially more likely. When possible, it's\n often helpful to match per-variable step sizes to the standard deviations\n of the target distribution in each variable.\n num_leapfrog_steps: Integer number of steps to run the leapfrog integrator\n for. Total progress per HMC step is roughly proportional to `step_size *\n num_leapfrog_steps`.\n seed: Python integer to seed the random number generator.\n current_target_log_prob: (Optional) `Tensor` representing the value of\n `target_log_prob_fn` at the `current_state`. The only reason to\n specify this argument is to reduce TF graph size.\n Default value: `None` (i.e., compute as needed).\n current_grads_target_log_prob: (Optional) Python list of `Tensor`s\n representing gradient of `current_target_log_prob` at the `current_state`\n and wrt the `current_state`. Must have same shape as `current_state`. The\n only reason to specify this argument is to reduce TF graph size.\n Default value: `None` (i.e., compute as needed).\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., \"hmc_kernel\").\n\n Returns:\n accepted_state: Tensor or Python list of `Tensor`s representing the state(s)\n of the Markov chain(s) at each result step. Has same shape as\n `current_state`.\n acceptance_probs: Tensor with the acceptance probabilities for each\n iteration. Has shape matching `target_log_prob_fn(current_state)`.\n accepted_target_log_prob: `Tensor` representing the value of\n `target_log_prob_fn` at `accepted_state`.\n accepted_grads_target_log_prob: Python `list` of `Tensor`s representing the\n gradient of `accepted_target_log_prob` wrt each `accepted_state`.\n\n Raises:\n ValueError: if there isn't one `step_size` or a list with same length as\n `current_state`.\n \"\"\"\n with ops.name_scope(\n name, \"hmc_kernel\",\n [current_state, step_size, num_leapfrog_steps, seed,\n current_target_log_prob, current_grads_target_log_prob]):\n with ops.name_scope(\"initialize\"):\n [current_state_parts, step_sizes, current_target_log_prob,\n current_grads_target_log_prob] = _prepare_args(\n target_log_prob_fn, current_state, step_size,\n current_target_log_prob, current_grads_target_log_prob,\n maybe_expand=True)\n independent_chain_ndims = distributions_util.prefer_static_rank(\n current_target_log_prob)\n def init_momentum(s):\n return random_ops.random_normal(\n shape=array_ops.shape(s),\n dtype=s.dtype.base_dtype,\n seed=distributions_util.gen_new_seed(\n seed, salt=\"hmc_kernel_momentums\"))\n current_momentums = [init_momentum(s) for s in current_state_parts]\n\n [\n proposed_momentums,\n proposed_state_parts,\n proposed_target_log_prob,\n proposed_grads_target_log_prob,\n ] = _leapfrog_integrator(current_momentums,\n target_log_prob_fn,\n current_state_parts,\n step_sizes,\n num_leapfrog_steps,\n current_target_log_prob,\n current_grads_target_log_prob)\n\n energy_change = _compute_energy_change(current_target_log_prob,\n current_momentums,\n proposed_target_log_prob,\n proposed_momentums,\n independent_chain_ndims)\n\n # u < exp(min(-energy, 0)), where u~Uniform[0,1)\n # ==> -log(u) >= max(e, 0)\n # ==> -log(u) >= e\n # (Perhaps surprisingly, we don't have a better way to obtain a random\n # uniform from positive reals, i.e., `tf.random_uniform(minval=0,\n # maxval=np.inf)` won't work.)\n random_uniform = random_ops.random_uniform(\n shape=array_ops.shape(energy_change),\n dtype=energy_change.dtype,\n seed=seed)\n random_positive = -math_ops.log(random_uniform)\n is_accepted = random_positive >= energy_change\n\n accepted_target_log_prob = array_ops.where(is_accepted,\n proposed_target_log_prob,\n current_target_log_prob)\n\n accepted_state_parts = [_choose(is_accepted,\n proposed_state_part,\n current_state_part,\n independent_chain_ndims)\n for current_state_part, proposed_state_part\n in zip(current_state_parts, proposed_state_parts)]\n\n accepted_grads_target_log_prob = [\n _choose(is_accepted,\n proposed_grad,\n grad,\n independent_chain_ndims)\n for proposed_grad, grad\n in zip(proposed_grads_target_log_prob, current_grads_target_log_prob)]\n\n maybe_flatten = lambda x: x if _is_list_like(current_state) else x[0]\n return [\n maybe_flatten(accepted_state_parts),\n KernelResults(\n acceptance_probs=math_ops.exp(math_ops.minimum(-energy_change, 0.)),\n current_grads_target_log_prob=accepted_grads_target_log_prob,\n current_target_log_prob=accepted_target_log_prob,\n energy_change=energy_change,\n is_accepted=is_accepted,\n proposed_grads_target_log_prob=proposed_grads_target_log_prob,\n proposed_state=maybe_flatten(proposed_state_parts),\n proposed_target_log_prob=proposed_target_log_prob,\n random_positive=random_positive,\n ),\n ]\n\n\ndef _leapfrog_integrator(current_momentums,\n target_log_prob_fn,\n current_state_parts,\n step_sizes,\n num_leapfrog_steps,\n current_target_log_prob=None,\n current_grads_target_log_prob=None,\n name=None):\n \"\"\"Applies `num_leapfrog_steps` of the leapfrog integrator.\n\n Assumes a simple quadratic kinetic energy function: `0.5 ||momentum||**2`.\n\n #### Examples:\n\n ##### Simple quadratic potential.\n\n ```python\n tfd = tf.contrib.distributions\n\n dims = 10\n num_iter = int(1e3)\n dtype = np.float32\n\n position = tf.placeholder(np.float32)\n momentum = tf.placeholder(np.float32)\n\n [\n new_momentums,\n new_positions,\n ] = hmc._leapfrog_integrator(\n current_momentums=[momentum],\n target_log_prob_fn=tfd.MultivariateNormalDiag(\n loc=tf.zeros(dims, dtype)).log_prob,\n current_state_parts=[position],\n step_sizes=0.1,\n num_leapfrog_steps=3)[:2]\n\n sess.graph.finalize() # No more graph building.\n\n momentum_ = np.random.randn(dims).astype(dtype)\n position_ = np.random.randn(dims).astype(dtype)\n\n positions = np.zeros([num_iter, dims], dtype)\n for i in xrange(num_iter):\n position_, momentum_ = sess.run(\n [new_momentums[0], new_position[0]],\n feed_dict={position: position_, momentum: momentum_})\n positions[i] = position_\n\n plt.plot(positions[:, 0]); # Sinusoidal.\n ```\n\n Args:\n current_momentums: Tensor containing the value(s) of the momentum\n variable(s) to update.\n target_log_prob_fn: Python callable which takes an argument like\n `*current_state_parts` and returns its (possibly unnormalized) log-density\n under the target distribution.\n current_state_parts: Python `list` of `Tensor`s representing the current\n state(s) of the Markov chain(s). The first `independent_chain_ndims` of\n the `Tensor`(s) index different chains.\n step_sizes: Python `list` of `Tensor`s representing the step size for the\n leapfrog integrator. Must broadcast with the shape of\n `current_state_parts`. Larger step sizes lead to faster progress, but\n too-large step sizes make rejection exponentially more likely. When\n possible, it's often helpful to match per-variable step sizes to the\n standard deviations of the target distribution in each variable.\n num_leapfrog_steps: Integer number of steps to run the leapfrog integrator\n for. Total progress per HMC step is roughly proportional to `step_size *\n num_leapfrog_steps`.\n current_target_log_prob: (Optional) `Tensor` representing the value of\n `target_log_prob_fn(*current_state_parts)`. The only reason to specify\n this argument is to reduce TF graph size.\n Default value: `None` (i.e., compute as needed).\n current_grads_target_log_prob: (Optional) Python list of `Tensor`s\n representing gradient of `target_log_prob_fn(*current_state_parts`) wrt\n `current_state_parts`. Must have same shape as `current_state_parts`. The\n only reason to specify this argument is to reduce TF graph size.\n Default value: `None` (i.e., compute as needed).\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., \"hmc_leapfrog_integrator\").\n\n Returns:\n proposed_momentums: Updated value of the momentum.\n proposed_state_parts: Tensor or Python list of `Tensor`s representing the\n state(s) of the Markov chain(s) at each result step. Has same shape as\n input `current_state_parts`.\n proposed_target_log_prob: `Tensor` representing the value of\n `target_log_prob_fn` at `accepted_state`.\n proposed_grads_target_log_prob: Gradient of `proposed_target_log_prob` wrt\n `accepted_state`.\n\n Raises:\n ValueError: if `len(momentums) != len(state_parts)`.\n ValueError: if `len(state_parts) != len(step_sizes)`.\n ValueError: if `len(state_parts) != len(grads_target_log_prob)`.\n TypeError: if `not target_log_prob.dtype.is_floating`.\n \"\"\"\n def _loop_body(step,\n current_momentums,\n current_state_parts,\n ignore_current_target_log_prob, # pylint: disable=unused-argument\n current_grads_target_log_prob):\n return [step + 1] + list(_leapfrog_step(current_momentums,\n target_log_prob_fn,\n current_state_parts,\n step_sizes,\n current_grads_target_log_prob))\n\n with ops.name_scope(\n name, \"hmc_leapfrog_integrator\",\n [current_momentums, current_state_parts, step_sizes, num_leapfrog_steps,\n current_target_log_prob, current_grads_target_log_prob]):\n if len(current_momentums) != len(current_state_parts):\n raise ValueError(\"`momentums` must be in one-to-one correspondence \"\n \"with `state_parts`\")\n num_leapfrog_steps = ops.convert_to_tensor(num_leapfrog_steps,\n name=\"num_leapfrog_steps\")\n current_target_log_prob, current_grads_target_log_prob = (\n _maybe_call_fn_and_grads(\n target_log_prob_fn,\n current_state_parts,\n current_target_log_prob,\n current_grads_target_log_prob))\n return control_flow_ops.while_loop(\n cond=lambda iter_, *args: iter_ < num_leapfrog_steps,\n body=_loop_body,\n loop_vars=[\n 0, # iter_\n current_momentums,\n current_state_parts,\n current_target_log_prob,\n current_grads_target_log_prob,\n ],\n back_prop=False)[1:] # Lop-off \"iter_\".\n\n\ndef _leapfrog_step(current_momentums,\n target_log_prob_fn,\n current_state_parts,\n step_sizes,\n current_grads_target_log_prob,\n name=None):\n \"\"\"Applies one step of the leapfrog integrator.\"\"\"\n with ops.name_scope(\n name, \"_leapfrog_step\",\n [current_momentums, current_state_parts, step_sizes,\n current_grads_target_log_prob]):\n proposed_momentums = [m + 0.5 * ss * g for m, ss, g\n in zip(current_momentums,\n step_sizes,\n current_grads_target_log_prob)]\n proposed_state_parts = [x + ss * m for x, ss, m\n in zip(current_state_parts,\n step_sizes,\n proposed_momentums)]\n proposed_target_log_prob = target_log_prob_fn(*proposed_state_parts)\n if not proposed_target_log_prob.dtype.is_floating:\n raise TypeError(\"`target_log_prob_fn` must produce a `Tensor` \"\n \"with `float` `dtype`.\")\n proposed_grads_target_log_prob = gradients_ops.gradients(\n proposed_target_log_prob, proposed_state_parts)\n if any(g is None for g in proposed_grads_target_log_prob):\n raise ValueError(\n \"Encountered `None` gradient. Does your target `target_log_prob_fn` \"\n \"access all `tf.Variable`s via `tf.get_variable`?\\n\"\n \" current_state_parts: {}\\n\"\n \" proposed_state_parts: {}\\n\"\n \" proposed_grads_target_log_prob: {}\".format(\n current_state_parts,\n proposed_state_parts,\n proposed_grads_target_log_prob))\n proposed_momentums = [m + 0.5 * ss * g for m, ss, g\n in zip(proposed_momentums,\n step_sizes,\n proposed_grads_target_log_prob)]\n return [\n proposed_momentums,\n proposed_state_parts,\n proposed_target_log_prob,\n proposed_grads_target_log_prob,\n ]\n\n\ndef _compute_energy_change(current_target_log_prob,\n current_momentums,\n proposed_target_log_prob,\n proposed_momentums,\n independent_chain_ndims,\n name=None):\n \"\"\"Helper to `kernel` which computes the energy change.\"\"\"\n with ops.name_scope(\n name, \"compute_energy_change\",\n ([current_target_log_prob, proposed_target_log_prob,\n independent_chain_ndims] +\n current_momentums + proposed_momentums)):\n # Abbreviate lk0=log_kinetic_energy and lk1=proposed_log_kinetic_energy\n # since they're a mouthful and lets us inline more.\n lk0, lk1 = [], []\n for current_momentum, proposed_momentum in zip(current_momentums,\n proposed_momentums):\n axis = math_ops.range(independent_chain_ndims,\n array_ops.rank(current_momentum))\n lk0.append(_log_sum_sq(current_momentum, axis))\n lk1.append(_log_sum_sq(proposed_momentum, axis))\n\n lk0 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk0, axis=-1),\n axis=-1)\n lk1 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk1, axis=-1),\n axis=-1)\n lp0 = -current_target_log_prob # log_potential\n lp1 = -proposed_target_log_prob # proposed_log_potential\n x = array_ops.stack([lp1, math_ops.exp(lk1), -lp0, -math_ops.exp(lk0)],\n axis=-1)\n\n # The sum is NaN if any element is NaN or we see both +Inf and -Inf.\n # Thus we will replace such rows with infinite energy change which implies\n # rejection. Recall that float-comparisons with NaN are always False.\n is_sum_determinate = (\n math_ops.reduce_all(math_ops.is_finite(x) | (x >= 0.), axis=-1) &\n math_ops.reduce_all(math_ops.is_finite(x) | (x <= 0.), axis=-1))\n is_sum_determinate = array_ops.tile(\n is_sum_determinate[..., array_ops.newaxis],\n multiples=array_ops.concat([\n array_ops.ones(array_ops.rank(is_sum_determinate),\n dtype=dtypes.int32),\n [4],\n ], axis=0))\n x = array_ops.where(is_sum_determinate,\n x,\n array_ops.fill(array_ops.shape(x),\n value=x.dtype.as_numpy_dtype(np.inf)))\n\n return math_ops.reduce_sum(x, axis=-1)\n\n\ndef _choose(is_accepted,\n accepted,\n rejected,\n independent_chain_ndims,\n name=None):\n \"\"\"Helper to `kernel` which expand_dims `is_accepted` to apply tf.where.\"\"\"\n def _expand_is_accepted_like(x):\n with ops.name_scope(\"_choose\"):\n expand_shape = array_ops.concat([\n array_ops.shape(is_accepted),\n array_ops.ones([array_ops.rank(x) - array_ops.rank(is_accepted)],\n dtype=dtypes.int32),\n ], axis=0)\n multiples = array_ops.concat([\n array_ops.ones([array_ops.rank(is_accepted)], dtype=dtypes.int32),\n array_ops.shape(x)[independent_chain_ndims:],\n ], axis=0)\n m = array_ops.tile(array_ops.reshape(is_accepted, expand_shape),\n multiples)\n m.set_shape(x.shape)\n return m\n with ops.name_scope(name, \"_choose\", values=[\n is_accepted, accepted, rejected, independent_chain_ndims]):\n return array_ops.where(_expand_is_accepted_like(accepted),\n accepted,\n rejected)\n\n\ndef _maybe_call_fn_and_grads(fn,\n fn_arg_list,\n fn_result=None,\n grads_fn_result=None,\n description=\"target_log_prob\"):\n \"\"\"Helper which computes `fn_result` and `grads` if needed.\"\"\"\n fn_arg_list = (list(fn_arg_list) if _is_list_like(fn_arg_list)\n else [fn_arg_list])\n if fn_result is None:\n fn_result = fn(*fn_arg_list)\n if not fn_result.dtype.is_floating:\n raise TypeError(\"`{}` must be a `Tensor` with `float` `dtype`.\".format(\n description))\n if grads_fn_result is None:\n grads_fn_result = gradients_ops.gradients(\n fn_result, fn_arg_list)\n if len(fn_arg_list) != len(grads_fn_result):\n raise ValueError(\"`{}` must be in one-to-one correspondence with \"\n \"`grads_{}`\".format(*[description]*2))\n if any(g is None for g in grads_fn_result):\n raise ValueError(\"Encountered `None` gradient.\")\n return fn_result, grads_fn_result\n\n\ndef _prepare_args(target_log_prob_fn, state, step_size,\n target_log_prob=None, grads_target_log_prob=None,\n maybe_expand=False, description=\"target_log_prob\"):\n \"\"\"Helper which processes input args to meet list-like assumptions.\"\"\"\n state_parts = list(state) if _is_list_like(state) else [state]\n state_parts = [ops.convert_to_tensor(s, name=\"state\")\n for s in state_parts]\n target_log_prob, grads_target_log_prob = _maybe_call_fn_and_grads(\n target_log_prob_fn,\n state_parts,\n target_log_prob,\n grads_target_log_prob,\n description)\n step_sizes = list(step_size) if _is_list_like(step_size) else [step_size]\n step_sizes = [\n ops.convert_to_tensor(\n s, name=\"step_size\", dtype=target_log_prob.dtype)\n for s in step_sizes]\n if len(step_sizes) == 1:\n step_sizes *= len(state_parts)\n if len(state_parts) != len(step_sizes):\n raise ValueError(\"There should be exactly one `step_size` or it should \"\n \"have same length as `current_state`.\")\n if maybe_expand:\n maybe_flatten = lambda x: x\n else:\n maybe_flatten = lambda x: x if _is_list_like(state) else x[0]\n return [\n maybe_flatten(state_parts),\n maybe_flatten(step_sizes),\n target_log_prob,\n grads_target_log_prob,\n ]\n\n\ndef _is_list_like(x):\n \"\"\"Helper which returns `True` if input is `list`-like.\"\"\"\n return isinstance(x, (tuple, list))\n\n\ndef _log_sum_sq(x, axis=None):\n \"\"\"Computes log(sum(x**2)).\"\"\"\n return math_ops.reduce_logsumexp(2. * math_ops.log(math_ops.abs(x)), axis)\n"
] | [
[
"tensorflow.python.ops.math_ops.log",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.python.ops.math_ops.exp",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.distributions.util.prefer_static_rank",
"tensorflow.python.ops.gradients_impl.gradients",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.math_ops.abs",
"tensorflow.python.ops.distributions.util.gen_new_seed",
"tensorflow.python.ops.array_ops.where",
"tensorflow.python.ops.math_ops.cast",
"numpy.log",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.python.ops.math_ops.is_finite",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.math_ops.reduce_sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.8",
"2.10"
]
}
] |
QianWanghhu/SALib | [
"95a3371e503f9253cb917b8f0101c0202b969c2b"
] | [
"src/SALib/analyze/sobol.py"
] | [
"from scipy.stats import norm\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nfrom . import common_args\r\nfrom ..util import (read_param_file, compute_groups_matrix, ResultDict, \r\n extract_group_names, _check_groups)\r\nfrom types import MethodType\r\n\r\nfrom multiprocessing import Pool, cpu_count\r\nfrom functools import partial\r\nfrom itertools import combinations, zip_longest\r\n\r\n\r\ndef analyze(problem, Y, calc_second_order=True, num_resamples=100,\r\n conf_level=0.95, print_to_console=False, parallel=False,\r\n n_processors=None, seed=None):\r\n \"\"\"Perform Sobol Analysis on model outputs.\r\n\r\n Returns a dictionary with keys 'S1', 'S1_conf', 'ST', and 'ST_conf', where\r\n each entry is a list of size D (the number of parameters) containing the\r\n indices in the same order as the parameter file. If calc_second_order is\r\n True, the dictionary also contains keys 'S2' and 'S2_conf'.\r\n\r\n Compatible with\r\n ---------------\r\n * `saltelli`\r\n\r\n Parameters\r\n ----------\r\n problem : dict\r\n The problem definition\r\n Y : numpy.array\r\n A NumPy array containing the model outputs\r\n calc_second_order : bool\r\n Calculate second-order sensitivities (default True)\r\n num_resamples : int\r\n The number of resamples (default 100)\r\n conf_level : float\r\n The confidence interval level (default 0.95)\r\n print_to_console : bool\r\n Print results directly to console (default False)\r\n\r\n References\r\n ----------\r\n .. [1] Sobol, I. M. (2001). \"Global sensitivity indices for nonlinear\r\n mathematical models and their Monte Carlo estimates.\" Mathematics\r\n and Computers in Simulation, 55(1-3):271-280,\r\n doi:10.1016/S0378-4754(00)00270-6.\r\n .. [2] Saltelli, A. (2002). \"Making best use of model evaluations to\r\n compute sensitivity indices.\" Computer Physics Communications,\r\n 145(2):280-297, doi:10.1016/S0010-4655(02)00280-1.\r\n .. [3] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and\r\n S. Tarantola (2010). \"Variance based sensitivity analysis of model\r\n output. Design and estimator for the total sensitivity index.\"\r\n Computer Physics Communications, 181(2):259-270,\r\n doi:10.1016/j.cpc.2009.09.018.\r\n\r\n Examples\r\n --------\r\n >>> X = saltelli.sample(problem, 1000)\r\n >>> Y = Ishigami.evaluate(X)\r\n >>> Si = sobol.analyze(problem, Y, print_to_console=True)\r\n\r\n \"\"\"\r\n if seed:\r\n np.random.seed(seed)\r\n # determining if groups are defined and adjusting the number\r\n # of rows in the cross-sampled matrix accordingly\r\n groups = _check_groups(problem)\r\n if not groups:\r\n D = problem['num_vars']\r\n else:\r\n _, D = extract_group_names(groups)\r\n\r\n if calc_second_order and Y.size % (2 * D + 2) == 0:\r\n N = int(Y.size / (2 * D + 2))\r\n elif not calc_second_order and Y.size % (D + 2) == 0:\r\n N = int(Y.size / (D + 2))\r\n else:\r\n raise RuntimeError(\"\"\"\r\n Incorrect number of samples in model output file.\r\n Confirm that calc_second_order matches option used during sampling.\"\"\")\r\n\r\n if conf_level < 0 or conf_level > 1:\r\n raise RuntimeError(\"Confidence level must be between 0-1.\")\r\n\r\n # normalize the model output\r\n Y = (Y - Y.mean()) / Y.std()\r\n\r\n A, B, AB, BA = separate_output_values(Y, D, N, calc_second_order)\r\n r = np.random.randint(N, size=(N, num_resamples))\r\n Z = norm.ppf(0.5 + conf_level / 2)\r\n\r\n if not parallel:\r\n S = create_Si_dict(D, calc_second_order)\r\n\r\n for j in range(D):\r\n S['S1'][j] = first_order(A, AB[:, j], B)\r\n S['S1_conf'][j] = Z * first_order(A[r], AB[r, j], B[r]).std(ddof=1)\r\n S['ST'][j] = total_order(A, AB[:, j], B)\r\n S['ST_conf'][j] = Z * total_order(A[r], AB[r, j], B[r]).std(ddof=1)\r\n\r\n # Second order (+conf.)\r\n if calc_second_order:\r\n for j in range(D):\r\n for k in range(j + 1, D):\r\n S['S2'][j, k] = second_order(\r\n A, AB[:, j], AB[:, k], BA[:, j], B)\r\n S['S2_conf'][j, k] = Z * second_order(A[r], AB[r, j],\r\n AB[r, k], BA[r, j],\r\n B[r]).std(ddof=1)\r\n else:\r\n tasks, n_processors = create_task_list(\r\n D, calc_second_order, n_processors)\r\n\r\n func = partial(sobol_parallel, Z, A, AB, BA, B, r)\r\n pool = Pool(n_processors)\r\n S_list = pool.map_async(func, tasks)\r\n pool.close()\r\n pool.join()\r\n\r\n S = Si_list_to_dict(S_list.get(), D, calc_second_order)\r\n\r\n \r\n # Add problem context and override conversion method for special case\r\n S.problem = problem\r\n S.to_df = MethodType(to_df, S)\r\n\r\n # Print results to console\r\n if print_to_console:\r\n res = S.to_df()\r\n for df in res:\r\n print(df)\r\n \r\n return S\r\n\r\n\r\ndef first_order(A, AB, B):\r\n # First order estimator following Saltelli et al. 2010 CPC, normalized by\r\n # sample variance\r\n return np.mean(B * (AB - A), axis=0) / np.var(np.r_[A, B], axis=0)\r\n\r\n\r\ndef total_order(A, AB, B):\r\n # Total order estimator following Saltelli et al. 2010 CPC, normalized by\r\n # sample variance\r\n return 0.5 * np.mean((A - AB) ** 2, axis=0) / np.var(np.r_[A, B], axis=0)\r\n\r\n\r\ndef second_order(A, ABj, ABk, BAj, B):\r\n # Second order estimator following Saltelli 2002\r\n Vjk = np.mean(BAj * ABk - A * B, axis=0) / np.var(np.r_[A, B], axis=0)\r\n Sj = first_order(A, ABj, B)\r\n Sk = first_order(A, ABk, B)\r\n\r\n return Vjk - Sj - Sk\r\n\r\n\r\ndef create_Si_dict(D, calc_second_order):\r\n # initialize empty dict to store sensitivity indices\r\n S = ResultDict((k, np.zeros(D))\r\n for k in ('S1', 'S1_conf', 'ST', 'ST_conf'))\r\n\r\n if calc_second_order:\r\n S['S2'] = np.full((D, D), np.nan)\r\n S['S2_conf'] = np.full((D, D), np.nan)\r\n\r\n return S\r\n\r\n\r\ndef separate_output_values(Y, D, N, calc_second_order):\r\n AB = np.zeros((N, D))\r\n BA = np.zeros((N, D)) if calc_second_order else None\r\n step = 2 * D + 2 if calc_second_order else D + 2\r\n\r\n A = Y[0:Y.size:step]\r\n B = Y[(step - 1):Y.size:step]\r\n for j in range(D):\r\n AB[:, j] = Y[(j + 1):Y.size:step]\r\n if calc_second_order:\r\n BA[:, j] = Y[(j + 1 + D):Y.size:step]\r\n\r\n return A, B, AB, BA\r\n\r\n\r\ndef sobol_parallel(Z, A, AB, BA, B, r, tasks):\r\n sobol_indices = []\r\n for d, j, k in tasks:\r\n if d == 'S1':\r\n s = first_order(A, AB[:, j], B)\r\n elif d == 'S1_conf':\r\n s = Z * first_order(A[r], AB[r, j], B[r]).std(ddof=1)\r\n elif d == 'ST':\r\n s = total_order(A, AB[:, j], B)\r\n elif d == 'ST_conf':\r\n s = Z * total_order(A[r], AB[r, j], B[r]).std(ddof=1)\r\n elif d == 'S2':\r\n s = second_order(A, AB[:, j], AB[:, k], BA[:, j], B)\r\n elif d == 'S2_conf':\r\n s = Z * second_order(A[r], AB[r, j], AB[r, k],\r\n BA[r, j], B[r]).std(ddof=1)\r\n sobol_indices.append([d, j, k, s])\r\n\r\n return sobol_indices\r\n\r\n\r\ndef create_task_list(D, calc_second_order, n_processors):\r\n # Create list with one entry (key, parameter 1, parameter 2) per sobol\r\n # index (+conf.). This is used to supply parallel tasks to\r\n # multiprocessing.Pool\r\n tasks_first_order = [[d, j, None] for j in range(\r\n D) for d in ('S1', 'S1_conf', 'ST', 'ST_conf')]\r\n\r\n # Add second order (+conf.) to tasks\r\n tasks_second_order = []\r\n if calc_second_order:\r\n tasks_second_order = [[d, j, k] for j in range(D) for k in\r\n range(j + 1, D) for d in ('S2', 'S2_conf')]\r\n\r\n if n_processors is None:\r\n n_processors = min(cpu_count(), len(\r\n tasks_first_order) + len(tasks_second_order))\r\n\r\n if not calc_second_order:\r\n tasks = np.array_split(tasks_first_order, n_processors)\r\n else:\r\n # merges both lists alternating its elements and splits the\r\n # resulting lists into n_processors sublists\r\n tasks = np.array_split([v for v in sum(\r\n zip_longest(tasks_first_order[::-1], tasks_second_order), ())\r\n if v is not None], n_processors)\r\n\r\n return tasks, n_processors\r\n\r\n\r\ndef Si_list_to_dict(S_list, D, calc_second_order):\r\n # Convert the parallel output into the regular dict format for\r\n # printing/returning\r\n S = create_Si_dict(D, calc_second_order)\r\n L = []\r\n for l in S_list: # first reformat to flatten\r\n L += l\r\n\r\n for s in L: # First order (+conf.)\r\n if s[2] is None:\r\n S[s[0]][s[1]] = s[3]\r\n else:\r\n S[s[0]][s[1], s[2]] = s[3]\r\n\r\n return S\r\n\r\n\r\ndef Si_to_pandas_dict(S_dict):\r\n \"\"\"Convert Si information into Pandas DataFrame compatible dict.\r\n\r\n Parameters\r\n ----------\r\n S_dict : ResultDict\r\n Sobol sensitivity indices\r\n\r\n See Also\r\n ----------\r\n Si_list_to_dict\r\n\r\n Returns\r\n ----------\r\n tuple : of total, first, and second order sensitivities.\r\n Total and first order are dicts.\r\n Second order sensitivities contain a tuple of parameter name\r\n combinations for use as the DataFrame index and second order\r\n sensitivities.\r\n If no second order indices found, then returns tuple of\r\n (None, None)\r\n\r\n Examples\r\n --------\r\n >>> X = saltelli.sample(problem, 1000)\r\n >>> Y = Ishigami.evaluate(X)\r\n >>> Si = sobol.analyze(problem, Y, print_to_console=True)\r\n >>> T_Si, first_Si, (idx, second_Si) = sobol.Si_to_pandas_dict(Si, problem)\r\n \"\"\"\r\n problem = S_dict.problem\r\n total_order = {\r\n 'ST': S_dict['ST'],\r\n 'ST_conf': S_dict['ST_conf']\r\n }\r\n first_order = {\r\n 'S1': S_dict['S1'],\r\n 'S1_conf': S_dict['S1_conf']\r\n }\r\n\r\n idx = None\r\n second_order = None\r\n if 'S2' in S_dict:\r\n groups = _check_groups(problem)\r\n if groups:\r\n names, _ = extract_group_names(groups)\r\n else:\r\n names = problem.get('names')\r\n\r\n if len(names) > 2:\r\n idx = list(combinations(names, 2))\r\n else:\r\n idx = (names, )\r\n \r\n second_order = {\r\n 'S2': [S_dict['S2'][names.index(i[0]), names.index(i[1])]\r\n for i in idx],\r\n 'S2_conf': [S_dict['S2_conf'][names.index(i[0]), names.index(i[1])]\r\n for i in idx]\r\n }\r\n return total_order, first_order, (idx, second_order)\r\n\r\n\r\ndef to_df(self):\r\n '''Conversion method to Pandas DataFrame. To be attached to ResultDict.\r\n\r\n Returns\r\n --------\r\n List : of Pandas DataFrames in order of Total, First, Second\r\n\r\n Example\r\n -------\r\n >>> Si = sobol.analyze(problem, Y, print_to_console=True)\r\n >>> total_Si, first_Si, second_Si = Si.to_df()\r\n '''\r\n total, first, (idx, second) = Si_to_pandas_dict(self)\r\n\r\n problem = self.problem\r\n groups = _check_groups(problem)\r\n if not groups:\r\n names = problem.get('names')\r\n else:\r\n names, _ = extract_group_names(groups)\r\n\r\n ret = [pd.DataFrame(total, index=names),\r\n pd.DataFrame(first, index=names)]\r\n\r\n if second:\r\n ret += [pd.DataFrame(second, index=idx)]\r\n\r\n return ret\r\n\r\n\r\ndef cli_parse(parser):\r\n parser.add_argument('--max-order', type=int, required=False, default=2,\r\n choices=[1, 2],\r\n help='Maximum order of sensitivity indices to '\r\n 'calculate')\r\n parser.add_argument('-r', '--resamples', type=int, required=False,\r\n default=100,\r\n help='Number of bootstrap resamples for Sobol '\r\n 'confidence intervals')\r\n parser.add_argument('--parallel', action='store_true', help='Makes '\r\n 'use of parallelization.',\r\n dest='parallel')\r\n parser.add_argument('--processors', type=int, required=False,\r\n default=None,\r\n help='Number of processors to be used with the ' +\r\n 'parallel option.', dest='n_processors')\r\n return parser\r\n\r\n\r\ndef cli_action(args):\r\n problem = read_param_file(args.paramfile)\r\n Y = np.loadtxt(args.model_output_file, delimiter=args.delimiter,\r\n usecols=(args.column,))\r\n\r\n analyze(problem, Y, (args.max_order == 2),\r\n num_resamples=args.resamples, print_to_console=True,\r\n parallel=args.parallel, n_processors=args.n_processors,\r\n seed=args.seed)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n common_args.run_cli(cli_parse, cli_action)\r\n"
] | [
[
"scipy.stats.norm.ppf",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.full",
"numpy.mean",
"numpy.var",
"numpy.array_split",
"numpy.zeros",
"numpy.loadtxt",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ADernild/DS809 | [
"6380c2354cea517ea684d99cb1fb1d5f35b5dc22"
] | [
"filtersvis.py"
] | [
"import keras\nfrom keras import models, Model\nfrom matplotlib import pyplot\nfrom keras.preprocessing.image import load_img\nfrom keras.preprocessing.image import img_to_array\nfrom keras.models import Model\nfrom matplotlib import pyplot as plt\nfrom numpy import expand_dims\nimport sys\n\n\n\n\nmodel = keras.models.load_model('aid/final_model_2.h5')\nmodel.summary()\n\nfor layer in model.layers:\n\t# check for convolutional layer\n\tif 'conv' not in layer.name:\n\t\tcontinue\n\t# get filter weights\n\tfilters, biases = layer.get_weights()\n\tprint(layer.name, filters.shape)\n\n\nfilters, biases = model.layers[0].get_weights()\n# normalize filter values to 0-1 so we can visualize them\nf_min, f_max = filters.min(), filters.max()\nfilters = (filters - f_min) / (f_max - f_min)\n# plot first few filters\nn_filters, ix = 6, 1\nfor i in range(n_filters):\n # get the filter\n f = filters[:, :, :, i]\n # plot each channel separately\n for j in range(3):\n # specify subplot and turn of axis\n ax = pyplot.subplot(n_filters, 3, ix)\n ax.set_xticks([])\n ax.set_yticks([])\n # plot filter channel in grayscale\n pyplot.imshow(f[:, :, j], cmap='brg')\n ix += 1\n\t# show the figure\nfilename = sys.argv[0].split('/')[-1]\nplt.savefig(filename + 'filter_plt_brg.png')\nplt.close()\npyplot.show()\n\nfor i in range(len(model.layers)):\n\tlayer = model.layers[i]\n\t# check for convolutional layer\n\tif 'conv' not in layer.name:\n\t\tcontinue\n\t# summarize output shape\n\tprint(i, layer.name, layer.output.shape)\n\n\nmodel.summary()\nmodel = Model(inputs=model.inputs, outputs=model.layers[0].output)\n\nimg = load_img('train/dog/dog.1.jpg', target_size=(200, 200))\n# convert the image to an array\nimg = img_to_array(img)\n# expand dimensions so that it represents a single 'sample'\nimg = expand_dims(img, axis=0)\n# prepare the image (e.g. scale pixel values for the vgg)\n# get feature map for first hidden layer\nfeaturemaps = model.predict(img)\n# plot all 64 maps in an 8x8 squares\n\nsquare = 4\nix = 1\n\nfor _ in range(square):\n for _ in range(square):\n # specify subplot and turn of axis\n ax = plt.subplot(square, square, ix)\n ax.set_xticks([])\n ax.set_yticks([])\n # plot filter channel in grayscale\n plt.imshow(featuremaps[0, :, :, ix-1], cmap='brg')\n ix += 1\n\t# show the figure\nfilename = sys.argv[0].split('/')[-1]\nplt.savefig(filename + 'map_plt_brg.png')\nplt.close()\nplt.show()\n\n\n\n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.expand_dims",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
minilek/private_frequency_oracles | [
"7b4f9723b59b234fda504869e4ed8a2cbc2cf19b"
] | [
"generate_figures.py"
] | [
"#!/usr/bin/env python3\n\nimport numpy as np\nfrom pylab import *\nimport matplotlib.pyplot as plt\nfrom sys import stdin\n\nimport argparse\nimport os\nfrom pathlib import Path\nimport shutil\nimport sys\nfrom absl import app,flags\n\nfrom pfoparams import Params\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"results_dir\", \"pfotemp\", \"directory where results live\")\n\ncolors = [\"b\", \"g\", \"r\", \"c\", \"mediumslateblue\", \"y\", \"dimgrey\", \"magenta\", \"mediumspringgreen\", \"gainsboro\", \"rosybrown\", \"chocolate\", \"aqua\", \"mediumslateblue\", \"saddlebrown\", \"darkorchid\", \"khaki\"]\nalg_to_color = {}\ncolor_idx = 0\n \n\ndef extract_base_filename(s):\n s = os.path.basename(s)\n return s[:s.rfind('.')]\n\ndef get_params_from_file(filepath, ext):\n global cmds_path\n cmdpath = cmds_path / (extract_base_filename(str(filepath.resolve())) + '.' + ext + '.cmd')\n with cmdpath.open('r', encoding='utf-8') as f:\n return Params.string_to_param(f.readline().rstrip())\n\ndef gen_var_fig(filepath):\n global colors, alg_to_color, color_idx\n print(\"opening \" + str(filepath.resolve()), file=sys.stderr)\n algos = []\n variances = []\n my_colors = []\n algo_name_to_idx = {}\n\n params = get_params_from_file(filepath, 'var')\n test_type = extract_base_filename(str(filepath.resolve()))\n if test_type.startswith('spike'):\n test_type = 'spike'\n\n max_variance = -1\n\n with filepath.open('r', encoding='utf-8') as f:\n for line in f:\n a = line.split(\",\")\n alg, eps, var = a[0], float(a[1]), float(a[2])\n max_variance = max(max_variance, var)\n if alg not in algo_name_to_idx:\n algo_name_to_idx[alg] = len(algos)\n if alg not in alg_to_color:\n alg_to_color[alg] = colors[color_idx]\n color_idx += 1\n algos.append(alg)\n my_colors.append(alg_to_color[alg])\n variances.append({})\n idx = algo_name_to_idx[alg]\n variances[idx][eps] = var\n\n for a in algos:\n if set(variances[algo_name_to_idx[a]].keys()) != set(variances[0].keys()):\n print('Will not create figure for ' + filepath.resolve() + ' since not all algos were ran with same values of epsilon.')\n return\n\n all_epsilons = list(variances[0].keys())\n all_epsilons.sort()\n\n fig=plt.figure()\n plt.xlabel('epsilon')\n plt.ylabel('MSE')\n\n fig.text(0.5, .95, test_type+',k='+str(params.universe_size)+',n='+str(params.num_users), horizontalalignment='center', verticalalignment='top')\n\n plots = []\n for i in range(len(algos)):\n plots.append([])\n for j in range(len(all_epsilons)):\n plots[i].append(variances[i][all_epsilons[j]])\n plt.plot(all_epsilons, plots[i], '-', label=algos[i], color=my_colors[i])\n\n lgnd = plt.legend(loc='upper right')\n\n for color,text in zip(my_colors, lgnd.get_texts()):\n text.set_color(color)\n \n plt.savefig(str((figs_path / (extract_base_filename(str(filepath.resolve())) + '.var.png')).resolve()), facecolor='floralwhite', edgecolor='none')\n\ndef gen_mse_or_max_fig(filepath, mse_type=True):\n global colors, alg_to_color, color_idx\n print(\"opening \" + str(filepath.resolve()), file=sys.stderr)\n algos = []\n errors = []\n my_colors = []\n algo_name_to_idx = {}\n\n params = get_params_from_file(filepath, 'mse')\n test_type = extract_base_filename(str(filepath.resolve()))\n if test_type.startswith('spike'):\n test_type = 'spike'\n\n max_error = -1\n\n with filepath.open('r', encoding='utf-8') as f:\n for line in f:\n a = line.split(\",\")\n alg, err = a[0], float(a[1])\n max_error = max(max_error, err)\n if alg not in algo_name_to_idx:\n algo_name_to_idx[alg] = len(algos)\n if alg not in alg_to_color:\n alg_to_color[alg] = colors[color_idx]\n color_idx += 1\n algos.append(alg)\n my_colors.append(alg_to_color[alg])\n errors.append([])\n idx = algo_name_to_idx[alg]\n errors[idx].append(err)\n\n for i in range(len(errors)):\n errors[i].sort()\n\n # plot CDFs to within .01 granularity on x-axis\n N = len(errors[0])\n dx = 1.0/N\n X = np.arange(0, 100.0, dx*100)\n\n fig=plt.figure()\n plt.xlabel('percentile')\n if mse_type:\n plt.ylabel('MSE')\n else:\n plt.ylabel('max error')\n\n fig.text(0.5, .95, test_type+',k='+str(params.universe_size)+',n='+str(params.num_users)+',eps='+str(params.epsilon), horizontalalignment='center', verticalalignment='top')\n\n plots = []\n for i in range(len(algos)):\n plots.append([])\n for j in range(N):\n plots[i].append(errors[i][int(1.0*j/N * len(errors[i]))])\n plt.plot(X, plots[i], 'o', label=algos[i], markersize=2, color=my_colors[i])\n\n lgnd = plt.legend(loc='upper left')\n\n for color,text in zip(my_colors, lgnd.get_texts()):\n text.set_color(color)\n\n suffix = '.mse.png'\n if not mse_type:\n suffix = '.max.png'\n plt.savefig(str((figs_path / (extract_base_filename(str(filepath.resolve())) + suffix)).resolve()), facecolor='floralwhite', edgecolor='none')\n\ndef gen_time_fig(filepath):\n global colors, alg_to_color, color_idx\n print(\"opening \" + str(filepath.resolve()), file=sys.stderr)\n algos = []\n runtimes = []\n my_colors = []\n algo_name_to_idx = {}\n\n params = get_params_from_file(filepath, 'time')\n test_type = extract_base_filename(str(filepath.resolve()))\n if test_type.startswith('spike'):\n test_type = 'spike'\n\n max_time = -1\n\n with filepath.open('r', encoding='utf-8') as f:\n for line in f:\n a = line.split(\",\")\n alg, k, time = a[0], int(a[1]), int(float(a[2])) # float() first in case # is so big it uses scientific notation\n max_time = max(max_time, time)\n if alg not in algo_name_to_idx:\n algo_name_to_idx[alg] = len(algos)\n if alg not in alg_to_color:\n alg_to_color[alg] = colors[color_idx]\n color_idx += 1\n algos.append(alg)\n my_colors.append(alg_to_color[alg])\n runtimes.append({})\n idx = algo_name_to_idx[alg]\n runtimes[idx][k] = time\n\n for a in algos:\n if set(runtimes[algo_name_to_idx[a]].keys()) != set(runtimes[0].keys()):\n print('Will not create figure for ' + filepath.resolve() + ' since not all algos were ran with same values of k.')\n return\n\n all_universe_sizes = list(runtimes[0].keys())\n all_universe_sizes.sort()\n\n fig=plt.figure()\n plt.xlabel('universe size')\n plt.ylabel('runtime')\n\n fig.text(0.5, .95, test_type+',n='+str(params.num_users)+',eps='+str(params.epsilon), horizontalalignment='center', verticalalignment='top')\n\n plots = []\n for i in range(len(algos)):\n plots.append([])\n for j in range(len(all_universe_sizes)):\n plots[i].append(runtimes[i][all_universe_sizes[j]])\n plt.plot(all_universe_sizes, plots[i], '-', label=algos[i], color=my_colors[i])\n\n lgnd = plt.legend(loc='upper left')\n\n for color,text in zip(my_colors, lgnd.get_texts()):\n text.set_color(color)\n \n plt.savefig(str((figs_path / (extract_base_filename(str(filepath.resolve())) + '.time.png')).resolve()), facecolor='floralwhite', edgecolor='none')\n\ndef generate_figures():\n global results_path, cmds_path, figs_path\n\n SMALL_SIZE = 8\n MEDIUM_SIZE = 18\n BIGGER_SIZE = 24\n\n plt.rcParams[\"figure.figsize\"] = (11.5,7)\n plt.rc('font', size=BIGGER_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=BIGGER_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=BIGGER_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=BIGGER_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title\n \n results_path = Path(FLAGS.results_dir)\n cmds_path = results_path / 'cmds'\n figs_path = results_path.parents[0] / 'figures'\n if figs_path.exists():\n shutil.rmtree(figs_path)\n os.mkdir(figs_path)\n for f in results_path.iterdir():\n if not f.is_dir():\n pathstr = str(f.resolve())\n if pathstr.endswith('.time'):\n gen_time_fig(f)\n elif pathstr.endswith('.mse') or pathstr.endswith('.max'):\n gen_mse_or_max_fig(f, pathstr.endswith('.mse'))\n elif pathstr.endswith('.var'):\n gen_var_fig(f)\n else:\n print('unknown result filetype ' + str(f.resolve()))\n\nif __name__ == \"__main__\":\n app.run(lambda argv: generate_figures())\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.arange",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dsr0018/olivia | [
"8b7de3a512848c5d313bbc848ac9c7b667c2f6ce"
] | [
"olivia/packagemetrics.py"
] | [
"\"\"\"Olivia package metrics for network vulnerability analysis.\"\"\"\n\nimport numbers\n\nfrom olivia.lib.aggregators import AscendentAggregator, DescendentAggregator\nimport numpy as np\n\n\nclass Reach(AscendentAggregator):\n \"\"\"\n Olivia Reach Metric.\n\n REACH(n) is the number of transitive descendants for a package 'n', i.e. the number of\n potentially affected packages by a defect in 'n'.\n \"\"\"\n\n def __init__(self, olivia_model, **kwargs):\n \"\"\"\n Create a Reach metric object.\n\n Parameters\n ----------\n olivia_model: OliviaModel\n Input network.\n kwargs: **kwargs\n Parameters for ~olivia.lib.aggregators.AscendentAggregator.\n Use 'compression_threshold' and 'save_memory' to adjust computation to available RAM.\n\n \"\"\"\n super().__init__(olivia_model.dag,\n mapping=olivia_model.dag.graph['mapping'],\n **kwargs)\n self._scc_sizes = np.array([len(olivia_model.dag.nodes[x]['members']) for x in olivia_model.dag])\n\n def _aggregation(self, n, descendants):\n return self._scc_sizes[descendants].sum() + self._scc_sizes[n]\n\n def compute(self):\n \"\"\"\n Compute the Reach metric for each package in the network.\n\n Returns\n -------\n ms: A ~MetricStats object with the results of the computation.\n\n \"\"\"\n print(\"Computing Reach\")\n return MetricStats(super().compute(), normalize_factor=self._scc_sizes.sum())\n\n\nclass Impact(AscendentAggregator):\n \"\"\"\n Olivia Impact Metric.\n\n IMPACT(n) is the number of dependencies induced by a package 'n', or the size of the edge set\n of the subgraph induced by transitive descendants of n. It is the amount of dependencies that would be potentially\n compromised in the network by a defect in 'n'.\n \"\"\"\n\n def __init__(self, olivia_model, **kwargs):\n \"\"\"\n Create an Impact metric object.\n\n Parameters\n ----------\n olivia_model: OliviaModel\n Input network.\n kwargs: **kwargs\n Parameters for ~olivia.lib.aggregators.AscendentAggregator.\n Use 'compression_threshold' and 'save_memory' to adjust computation to available RAM.\n\n \"\"\"\n super().__init__(olivia_model.dag,\n mapping=olivia_model.dag.graph['mapping'],\n **kwargs)\n odegree = olivia_model.dag.out_degree(weight='weight')\n intra_edges = np.array([olivia_model.dag.nodes[n]['intra_edges'] for n in olivia_model.dag])\n self._out = np.array([odegree[n] for n in olivia_model.dag]) + intra_edges\n\n def _aggregation(self, n, descendants):\n return self._out[descendants].sum() + self._out[n]\n\n def compute(self):\n \"\"\"\n Compute the Impact metric for each package in the network.\n\n Returns\n -------\n ms: A ~MetricStats object with the results of the computation.\n\n \"\"\"\n print(\"Computing Impact\")\n return MetricStats(super().compute(), normalize_factor=self._out.sum())\n\n\nclass Surface(DescendentAggregator):\n \"\"\"\n Olivia Surface Metric.\n\n SURFACE(n) is the number of transitive ascendants of a package 'n', i.e the number\n of packets in which a defect would potentially cause the compromise of 'n'.\n \"\"\"\n\n def __init__(self, olivia_model, **kwargs):\n \"\"\"\n Create an Impact metric object.\n\n Parameters\n ----------\n olivia_model: OliviaModel\n Input network.\n kwargs: **kwargs\n Parameters for ~olivia.lib.aggregators.AscendentAggregator.\n Use 'compression_threshold' and 'save_memory' to adjust computation to available RAM.\n\n \"\"\"\n super().__init__(olivia_model.dag,\n mapping=olivia_model.dag.graph['mapping'],\n **kwargs)\n self._scc_sizes = np.array([len(olivia_model.dag.nodes[x]['members']) for x in olivia_model.dag])\n\n def _aggregation(self, n, descendants):\n return self._scc_sizes[descendants].sum() + self._scc_sizes[n]\n\n def compute(self):\n \"\"\"\n Compute the Surface metric for each package in the network.\n\n Returns\n -------\n ms: A ~MetricStats object with the results of the computation.\n\n \"\"\"\n print(\"Computing Surface\")\n return MetricStats(super().compute(), normalize_factor=self._scc_sizes.sum())\n\n\nclass DependenciesCount:\n \"\"\"\n Dependencies Count Metric.\n\n Number of direct dependencies of a package.\n \"\"\"\n\n def __init__(self, olivia_model):\n \"\"\"\n Create a DependenciesCount metric object.\n\n Parameters\n ----------\n olivia_model: OliviaModel\n Input network.\n\n \"\"\"\n self.net = olivia_model\n\n def compute(self):\n \"\"\"\n Compute the Dependencies Count metric for each package in the network.\n\n Returns\n -------\n ms: A ~MetricStats object with the results of the computation.\n\n \"\"\"\n print(\"Computing Dependencies Count\")\n return MetricStats({package: self.net.network.in_degree(package) for package in self.net.network})\n\n\nclass DependentsCount:\n \"\"\"\n Dependents Count Metric.\n\n Number of direct dependents of a package.\n \"\"\"\n\n def __init__(self, olivia_model):\n \"\"\"\n Create a DependentsCount metric object.\n\n Parameters\n ----------\n olivia_model: OliviaModel\n Input network.\n\n \"\"\"\n self.net = olivia_model\n\n def compute(self):\n \"\"\"\n Compute the Dependents Count metric for each package in the network.\n\n Returns\n -------\n ms: A ~MetricStats object with the results of the computation.\n\n \"\"\"\n print(\"Computing Dependents Count\")\n return MetricStats({package: self.net.network.out_degree(package) for package in self.net.network})\n\n\nclass MetricStats:\n \"\"\"A helper class to store and manipulate Olivia metrics.\"\"\"\n\n def __init__(self, results_dict, normalize_factor=1):\n \"\"\"\n Create and initializes a MetricStats object.\n\n Parameters\n ----------\n results_dict: dict\n {node:value} dict with metric values.\n normalize_factor: float\n Factor to perform normalization\n\n \"\"\"\n self._results = results_dict\n self._normalize_factor = normalize_factor\n self._normalized = False\n self._build_index()\n\n def __getitem__(self, index):\n \"\"\"\n Metric value for package 'index'.\n\n Parameters\n ----------\n index: identifier\n Identifier of package.\n\n Returns\n -------\n value: int\n Metric value for package 'index'.\n\n \"\"\"\n return self._results[index]\n\n def _build_index(self):\n self._values = np.array([self.results_dict[k] for k in self.results_dict], dtype=np.float64)\n self._keys = np.array([k for k in self.results_dict.keys()])\n sorted_indexes = np.flip(np.argsort(self._values))\n self._sorted_keys = self._keys[sorted_indexes]\n\n def top(self, n=1, subset=None):\n \"\"\"\n Return the top 'n' elements according to its metric value.\n\n Parameters\n ----------\n n: int\n number of top packages to retrieve.\n subset: container of nodes\n subset of packages to limit the ranking to\n\n Returns\n -------\n result: list of duples\n List of top n (package, metric value) tuples.\n\n \"\"\"\n if subset:\n result = []\n for k in self._sorted_keys:\n if k in subset:\n result.append((k, self._results[k]))\n if len(result) == n:\n break\n return result\n else:\n return [(k, self._results[k]) for k in self._sorted_keys[:n]]\n\n def bottom(self, n=1, subset=None):\n \"\"\"\n Return the bottom 'n' elements according to its metric value.\n\n Parameters\n ----------\n n: int\n number of bottom packages to retrieve.\n subset: container of nodes\n subset of packages to limit the ranking to\n\n Returns\n -------\n result: list of duples\n List of bottom n (package, metric value) tuples.\n\n \"\"\"\n if subset:\n result = []\n for k in reversed(self._sorted_keys):\n if k in subset:\n result.append((k, self._results[k]))\n if len(result) == n:\n break\n return result\n else:\n return [(k, self._results[k]) for k in self._sorted_keys[-n:]]\n\n\n @property\n def values(self):\n \"\"\"Return array with metric values.\"\"\"\n return self._values\n\n @property\n def keys(self):\n \"\"\"Return package names.\"\"\"\n return self._keys\n\n @property\n def results_dict(self):\n \"\"\"Return metric values in a package:value dictionary.\"\"\"\n return self._results\n\n @property\n def normalize_factor(self):\n \"\"\"Return factor used for performing metric normalization.\"\"\"\n return self._normalize_factor\n\n def normalize(self):\n \"\"\"Perform metric normalization.\"\"\"\n if self._normalized or self._normalize_factor == 1:\n return\n for k in self._results:\n self._results[k] = self._results[k] / self._normalize_factor\n self._normalized = True\n self._build_index()\n\n def __add__(self, other):\n \"\"\"Add metric values element-wise or to a numeric constant.\"\"\"\n if isinstance(other, numbers.Number):\n return MetricStats({e: self[e] + other for e in self.keys})\n else:\n return MetricStats({e: self[e] + other[e] for e in self.keys})\n\n def __sub__(self, other):\n \"\"\"Subtract metric values element-wise or a numeric constant.\"\"\"\n if isinstance(other, numbers.Number):\n return MetricStats({e: self[e] - other for e in self.keys})\n else:\n return MetricStats({e: self[e] - other[e] for e in self.keys})\n\n def __mul__(self, other):\n \"\"\"Multiply metric values element-wise or to a numeric constant.\"\"\"\n if isinstance(other, numbers.Number):\n return MetricStats({e: self[e] * other for e in self.keys})\n else:\n return MetricStats({e: np.multiply(self[e], other[e], dtype=np.int64) for e in self.keys})\n\n def __truediv__(self, other):\n \"\"\"Divide metric values element-wise or with a numeric constant.\"\"\"\n if isinstance(other, numbers.Number):\n return MetricStats({e: np.true_divide(self[e], other, dtype=np.float64) for e in self.keys})\n else:\n return MetricStats({e: np.true_divide(self[e], other[e], dtype=np.float64) for e in self.keys})\n\n def __pow__(self, other):\n \"\"\"Power metric values element-wise or to a numeric constant.\"\"\"\n if isinstance(other, numbers.Number):\n return MetricStats({e: self[e] ** other for e in self.keys})\n else:\n return MetricStats({e: self[e] ** other[e] for e in self.keys})\n"
] | [
[
"numpy.argsort",
"numpy.true_divide",
"numpy.array",
"numpy.multiply"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jnettels/DHNx | [
"11b2b431fcc9d5923c2a6ac12cc37916ff1fdd2b"
] | [
"examples/import_export_plot/import_export_plot.py"
] | [
"import dhnx\nimport matplotlib.pyplot as plt\n\n# initialize a thermal network\nthermal_network = dhnx.network.ThermalNetwork()\n\n# load data from csv\nthermal_network.from_csv_folder('data_csv_input')\n\n# save thermal network to csv\nthermal_network.to_csv_folder('data_csv_output')\n\n# get graph of thermal network\ngraph = thermal_network.to_nx_graph()\n\n# plot static map\nstatic_map = dhnx.plotting.StaticMap(thermal_network)\n\nstatic_map.draw(background_map=False)\nplt.savefig('static_map_wo_background.png')\n\nstatic_map.draw(background_map=True)\nplt.savefig('static_map_w_background.png')\n\n# plot interactive map\ninteractive_map = dhnx.plotting.InteractiveMap(thermal_network)\nmap = interactive_map.draw()\nmap.save('interactive_map.html')\n"
] | [
[
"matplotlib.pyplot.savefig"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Practical-Formal-Methods/mod_stable_baselines3 | [
"08bdb0a529c8ab446ac7973f2a02f832c0c3f454"
] | [
"stable_baselines3/ppo/ppo.py"
] | [
"import warnings\nfrom typing import Any, Dict, Optional, Type, Union\n\nimport numpy as np\nimport torch as th\nfrom mod_gym.gym import spaces\nfrom torch.nn import functional as F\n\nfrom mod_stable_baselines3.stable_baselines3.common.on_policy_algorithm import OnPolicyAlgorithm\nfrom mod_stable_baselines3.stable_baselines3.common.policies import ActorCriticPolicy\nfrom mod_stable_baselines3.stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule\nfrom mod_stable_baselines3.stable_baselines3.common.utils import explained_variance, get_schedule_fn\n\n\nclass PPO(OnPolicyAlgorithm):\n \"\"\"\n Proximal Policy Optimization algorithm (PPO) (clip version)\n\n Paper: https://arxiv.org/abs/1707.06347\n Code: This implementation borrows code from OpenAI Spinning Up (https://github.com/openai/spinningup/)\n https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail and\n and Stable Baselines (PPO2 from https://github.com/hill-a/stable-baselines)\n\n Introduction to PPO: https://spinningup.openai.com/en/latest/algorithms/ppo.html\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from (if registered in Gym, can be str)\n :param learning_rate: The learning rate, it can be a function\n of the current progress remaining (from 1 to 0)\n :param n_steps: The number of steps to run for each environment per update\n (i.e. rollout buffer size is n_steps * n_envs where n_envs is number of environment copies running in parallel)\n NOTE: n_steps * n_envs must be greater than 1 (because of the advantage normalization)\n See https://github.com/pytorch/pytorch/issues/29372\n :param batch_size: Minibatch size\n :param n_epochs: Number of epoch when optimizing the surrogate loss\n :param gamma: Discount factor\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n :param clip_range: Clipping parameter, it can be a function of the current progress\n remaining (from 1 to 0).\n :param clip_range_vf: Clipping parameter for the value function,\n it can be a function of the current progress remaining (from 1 to 0).\n This is a parameter specific to the OpenAI implementation. If None is passed (default),\n no clipping will be done on the value function.\n IMPORTANT: this clipping depends on the reward scaling.\n :param ent_coef: Entropy coefficient for the loss calculation\n :param vf_coef: Value function coefficient for the loss calculation\n :param max_grad_norm: The maximum value for the gradient clipping\n :param use_sde: Whether to use generalized State Dependent Exploration (gSDE)\n instead of action noise exploration (default: False)\n :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE\n Default: -1 (only sample at the beginning of the rollout)\n :param target_kl: Limit the KL divergence between updates,\n because the clipping is not enough to prevent large update\n see issue #213 (cf https://github.com/hill-a/stable-baselines/issues/213)\n By default, there is no limit on the kl div.\n :param tensorboard_log: the log location for tensorboard (if None, no logging)\n :param create_eval_env: Whether to create a second environment that will be\n used for evaluating the agent periodically. (Only available when passing string for the environment)\n :param policy_kwargs: additional arguments to be passed to the policy on creation\n :param verbose: the verbosity level: 0 no output, 1 info, 2 debug\n :param seed: Seed for the pseudo random generators\n :param device: Device (cpu, cuda, ...) on which the code should be run.\n Setting it to auto, the code will be run on the GPU if possible.\n :param _init_setup_model: Whether or not to build the network at the creation of the instance\n \"\"\"\n\n def __init__(\n self,\n policy: Union[str, Type[ActorCriticPolicy]],\n env: Union[GymEnv, str],\n learning_rate: Union[float, Schedule] = 3e-4,\n n_steps: int = 2048,\n batch_size: Optional[int] = 64,\n n_epochs: int = 10,\n gamma: float = 0.99,\n gae_lambda: float = 0.95,\n clip_range: Union[float, Schedule] = 0.2,\n clip_range_vf: Union[None, float, Schedule] = None,\n ent_coef: float = 0.0,\n vf_coef: float = 0.5,\n max_grad_norm: float = 0.5,\n use_sde: bool = False,\n sde_sample_freq: int = -1,\n target_kl: Optional[float] = None,\n tensorboard_log: Optional[str] = None,\n create_eval_env: bool = False,\n policy_kwargs: Optional[Dict[str, Any]] = None,\n verbose: int = 0,\n seed: Optional[int] = None,\n device: Union[th.device, str] = \"auto\",\n _init_setup_model: bool = True,\n ):\n\n super(PPO, self).__init__(\n policy,\n env,\n learning_rate=learning_rate,\n n_steps=n_steps,\n gamma=gamma,\n gae_lambda=gae_lambda,\n ent_coef=ent_coef,\n vf_coef=vf_coef,\n max_grad_norm=max_grad_norm,\n use_sde=use_sde,\n sde_sample_freq=sde_sample_freq,\n tensorboard_log=tensorboard_log,\n policy_kwargs=policy_kwargs,\n verbose=verbose,\n device=device,\n create_eval_env=create_eval_env,\n seed=seed,\n _init_setup_model=False,\n supported_action_spaces=(\n spaces.Box,\n spaces.Discrete,\n spaces.MultiDiscrete,\n spaces.MultiBinary,\n ),\n )\n\n # Sanity check, otherwise it will lead to noisy gradient and NaN\n # because of the advantage normalization\n assert (\n batch_size > 1\n ), \"`batch_size` must be greater than 1. See https://github.com/DLR-RM/stable-baselines3/issues/440\"\n\n if self.env is not None:\n # Check that `n_steps * n_envs > 1` to avoid NaN\n # when doing advantage normalization\n buffer_size = self.env.num_envs * self.n_steps\n assert (\n buffer_size > 1\n ), f\"`n_steps * n_envs` must be greater than 1. Currently n_steps={self.n_steps} and n_envs={self.env.num_envs}\"\n # Check that the rollout buffer size is a multiple of the mini-batch size\n untruncated_batches = buffer_size // batch_size\n if buffer_size % batch_size > 0:\n warnings.warn(\n f\"You have specified a mini-batch size of {batch_size},\"\n f\" but because the `RolloutBuffer` is of size `n_steps * n_envs = {buffer_size}`,\"\n f\" after every {untruncated_batches} untruncated mini-batches,\"\n f\" there will be a truncated mini-batch of size {buffer_size % batch_size}\\n\"\n f\"We recommend using a `batch_size` that is a factor of `n_steps * n_envs`.\\n\"\n f\"Info: (n_steps={self.n_steps} and n_envs={self.env.num_envs})\"\n )\n self.batch_size = batch_size\n self.n_epochs = n_epochs\n self.clip_range = clip_range\n self.clip_range_vf = clip_range_vf\n self.target_kl = target_kl\n\n if _init_setup_model:\n self._setup_model()\n\n def _setup_model(self) -> None:\n super(PPO, self)._setup_model()\n\n # Initialize schedules for policy/value clipping\n self.clip_range = get_schedule_fn(self.clip_range)\n if self.clip_range_vf is not None:\n if isinstance(self.clip_range_vf, (float, int)):\n assert self.clip_range_vf > 0, \"`clip_range_vf` must be positive, \" \"pass `None` to deactivate vf clipping\"\n\n self.clip_range_vf = get_schedule_fn(self.clip_range_vf)\n\n def train(self) -> None:\n \"\"\"\n Update policy using the currently gathered rollout buffer.\n \"\"\"\n # Update optimizer learning rate\n self._update_learning_rate(self.policy.optimizer)\n # Compute current clip range\n clip_range = self.clip_range(self._current_progress_remaining)\n # Optional: clip range for the value function\n if self.clip_range_vf is not None:\n clip_range_vf = self.clip_range_vf(self._current_progress_remaining)\n\n entropy_losses = []\n pg_losses, value_losses = [], []\n clip_fractions = []\n\n continue_training = True\n\n # train for n_epochs epochs\n for epoch in range(self.n_epochs):\n approx_kl_divs = []\n # Do a complete pass on the rollout buffer\n for rollout_data in self.rollout_buffer.get(self.batch_size):\n actions = rollout_data.actions\n if isinstance(self.action_space, spaces.Discrete):\n # Convert discrete action from float to long\n actions = rollout_data.actions.long().flatten()\n\n # Re-sample the noise matrix because the log_std has changed\n # TODO: investigate why there is no issue with the gradient\n # if that line is commented (as in SAC)\n if self.use_sde:\n self.policy.reset_noise(self.batch_size)\n\n values, log_prob, entropy = self.policy.evaluate_actions(rollout_data.observations, actions)\n values = values.flatten()\n # Normalize advantage\n advantages = rollout_data.advantages\n advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)\n\n # ratio between old and new policy, should be one at the first iteration\n ratio = th.exp(log_prob - rollout_data.old_log_prob)\n\n # clipped surrogate loss\n policy_loss_1 = advantages * ratio\n policy_loss_2 = advantages * th.clamp(ratio, 1 - clip_range, 1 + clip_range)\n policy_loss = -th.min(policy_loss_1, policy_loss_2).mean()\n\n # Logging\n pg_losses.append(policy_loss.item())\n clip_fraction = th.mean((th.abs(ratio - 1) > clip_range).float()).item()\n clip_fractions.append(clip_fraction)\n\n if self.clip_range_vf is None:\n # No clipping\n values_pred = values\n else:\n # Clip the different between old and new value\n # NOTE: this depends on the reward scaling\n values_pred = rollout_data.old_values + th.clamp(\n values - rollout_data.old_values, -clip_range_vf, clip_range_vf\n )\n # Value loss using the TD(gae_lambda) target\n value_loss = F.mse_loss(rollout_data.returns, values_pred)\n value_losses.append(value_loss.item())\n\n # Entropy loss favor exploration\n if entropy is None:\n # Approximate entropy when no analytical form\n entropy_loss = -th.mean(-log_prob)\n else:\n entropy_loss = -th.mean(entropy)\n\n entropy_losses.append(entropy_loss.item())\n\n loss = policy_loss + self.ent_coef * entropy_loss + self.vf_coef * value_loss\n\n # Calculate approximate form of reverse KL Divergence for early stopping\n # see issue #417: https://github.com/DLR-RM/stable-baselines3/issues/417\n # and discussion in PR #419: https://github.com/DLR-RM/stable-baselines3/pull/419\n # and Schulman blog: http://joschu.net/blog/kl-approx.html\n with th.no_grad():\n log_ratio = log_prob - rollout_data.old_log_prob\n approx_kl_div = th.mean((th.exp(log_ratio) - 1) - log_ratio).cpu().numpy()\n approx_kl_divs.append(approx_kl_div)\n\n if self.target_kl is not None and approx_kl_div > 1.5 * self.target_kl:\n continue_training = False\n if self.verbose >= 1:\n print(f\"Early stopping at step {epoch} due to reaching max kl: {approx_kl_div:.2f}\")\n break\n\n # Optimization step\n self.policy.optimizer.zero_grad()\n loss.backward()\n # Clip grad norm\n th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)\n self.policy.optimizer.step()\n\n if not continue_training:\n break\n\n self._n_updates += self.n_epochs\n explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten())\n\n # Logs\n self.logger.record(\"train/entropy_loss\", np.mean(entropy_losses))\n self.logger.record(\"train/policy_gradient_loss\", np.mean(pg_losses))\n self.logger.record(\"train/value_loss\", np.mean(value_losses))\n self.logger.record(\"train/approx_kl\", np.mean(approx_kl_divs))\n self.logger.record(\"train/clip_fraction\", np.mean(clip_fractions))\n self.logger.record(\"train/loss\", loss.item())\n self.logger.record(\"train/explained_variance\", explained_var)\n if hasattr(self.policy, \"log_std\"):\n self.logger.record(\"train/std\", th.exp(self.policy.log_std).mean().item())\n\n self.logger.record(\"train/n_updates\", self._n_updates, exclude=\"tensorboard\")\n self.logger.record(\"train/clip_range\", clip_range)\n if self.clip_range_vf is not None:\n self.logger.record(\"train/clip_range_vf\", clip_range_vf)\n\n def learn(\n self,\n total_timesteps: int,\n callback: MaybeCallback = None,\n log_interval: int = 1,\n eval_env: Optional[GymEnv] = None,\n eval_freq: int = -1,\n n_eval_episodes: int = 5,\n tb_log_name: str = \"PPO\",\n eval_log_path: Optional[str] = None,\n reset_num_timesteps: bool = True,\n ) -> \"PPO\":\n\n return super(PPO, self).learn(\n total_timesteps=total_timesteps,\n callback=callback,\n log_interval=log_interval,\n eval_env=eval_env,\n eval_freq=eval_freq,\n n_eval_episodes=n_eval_episodes,\n tb_log_name=tb_log_name,\n eval_log_path=eval_log_path,\n reset_num_timesteps=reset_num_timesteps,\n )\n"
] | [
[
"torch.mean",
"torch.abs",
"torch.min",
"torch.exp",
"torch.nn.functional.mse_loss",
"torch.no_grad",
"numpy.mean",
"torch.clamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ilopezgp/human_impacts | [
"b2758245edac0946080a647f1dbfd1098c0f0b27",
"b2758245edac0946080a647f1dbfd1098c0f0b27",
"b2758245edac0946080a647f1dbfd1098c0f0b27"
] | [
"data/atmosphere_biogeochemistry/global_carbon_project_CO2/code/get_global_CO2.py",
"data/water/Qin2019_water_use/viz/generate.py",
"data/atmosphere_biogeochemistry/IFASTAT_nitrogen_production/viz/generate.py"
] | [
"# -*- coding: utf-8 -*-\n#\n#################\n# This script takes as an input the supplementary global data \n# from Friedlingstein et al. (2019), imported in csv format from\n# the original .xlsx file, and returns a csv file with\n# historical global emissions and sinks since 1959.\n# Data is provided in Tg C /year and Tg CO2 /year.\n#\n# Last updated: Apr 2021\n# Author: Ignacio Lopez-Gomez\n# \n#################\nimport numpy as np\nimport pandas as pd\n\ndef add_agg_col(df, col_inds, type_ = float):\n df_agg_col = df[col_inds[0]].astype(float)\n for i in range(1,len(col_inds)):\n df_agg_col = df_agg_col + df[col_inds[i]].astype(float)\n return df_agg_col\n\n######### Get global CO2 data #########\ndata_ = pd.DataFrame(pd.read_csv('../processed/historical_global_budget_clean.csv', header=0))\n\n# # Aggregate data\ndata_['anthropogenic emissions'] = add_agg_col(data_, ['fossil fuel and industry', 'land-use change emissions'])\ndata_['natural sink'] = add_agg_col(data_, ['ocean sink','land sink'])\ndata_['Flux into atmosphere'] = 'source'\n\n# # Tidy data\ndata_tidy = data_.melt(id_vars=data_[['Year', 'Flux into atmosphere']], \n var_name=\"Sink/source type\", \n value_name=\"Pg C yr-1\")\ndata_tidy.index = data_tidy['Sink/source type']\ndata_tidy.loc[['ocean sink', 'land sink', 'natural sink'],['Flux into atmosphere']] = 'sink'\ndata_tidy['Reported value'] = 'mean'\ndata_tidy = data_tidy[['Year', 'Flux into atmosphere', 'Sink/source type', 'Reported value', 'Pg C yr-1']]\n\n# Add uncertainties from Global_Carbon_Budget_2019v1.0\ndata_unc = data_tidy.copy()\ndata_unc['Reported value'] = 'standard deviation'\ndata_unc.loc[['land-use change emissions'],['Pg C yr-1']] = 0.7\ndata_unc.loc[['fossil fuel and industry'],['Pg C yr-1']] = data_unc.loc[['fossil fuel and industry'],['Pg C yr-1']]*0.05\ndata_unc.loc[['ocean sink'],['Pg C yr-1']] = 0.5\ndata_unc.loc[['land sink'],['Pg C yr-1']] = 0.9\n# Uncorrelated errors following Friedlingstein, P. et al. (2019)., Table 6\ndata_unc.loc[['anthropogenic emissions'],['Pg C yr-1']] = np.sqrt(np.add(\n np.square(0.7), np.square(data_unc.loc[['fossil fuel and industry'],['Pg C yr-1']].to_numpy())))\ndata_unc.loc[['budget imbalance'],['Pg C yr-1']] = np.nan\ndata_unc.loc[['atmospheric growth'],['Pg C yr-1']] = 0.2\ndata_unc.loc[['natural sink'],['Pg C yr-1']] = np.sqrt(0.5*0.5 + 0.9*0.9)\ndata_tidy=data_tidy.append(data_unc)\n\n# Add CO2 emissions as well\ndata_tidy[\"Pg CO2 yr-1\"] = data_tidy[\"Pg C yr-1\"]*44.0/12.0\ndata_tidy = data_tidy.melt(id_vars=data_tidy.columns[:4], \n var_name=\"Units\", \n value_name=\"Value\")\ndata_tidy[\"Value\"] = round(data_tidy[\"Value\"], 2)\n# Write to file\ndata_tidy.to_csv(r'../processed/global_carbon_budget_processed.csv', index = False)\n\n### CO2 flux number : Ratio between anthropogenic sources and net natural sinks\n# Mean value\ndf_anthro = data_tidy[(data_tidy[\"Sink/source type\"] == 'anthropogenic emissions') &\n (data_tidy[\"Reported value\"] == 'mean') &\n (data_tidy[\"Units\"] == 'Pg C yr-1')].copy(deep=True)\n\ndf_nat = data_tidy[(data_tidy[\"Sink/source type\"] == 'natural sink') &\n (data_tidy[\"Reported value\"] == 'mean') &\n (data_tidy[\"Units\"] == 'Pg C yr-1')].copy(deep=True)\n\n# Standard deviation\ndf_anthro_unc = data_tidy[(data_tidy[\"Sink/source type\"] == 'anthropogenic emissions') &\n (data_tidy[\"Reported value\"] == 'standard deviation') &\n (data_tidy[\"Units\"] == 'Pg C yr-1')].copy(deep=True)\n\ndf_nat_unc = data_tidy[(data_tidy[\"Sink/source type\"] == 'natural sink') &\n (data_tidy[\"Reported value\"] == 'standard deviation') &\n (data_tidy[\"Units\"] == 'Pg C yr-1')].copy(deep=True)\n# Drop unnecessary columns\ndf_anthro = df_anthro.drop(columns=[\"Flux into atmosphere\", \"Sink/source type\", \"Units\"])\ndf_anthro_unc = df_anthro_unc.drop(columns=[\"Flux into atmosphere\", \"Sink/source type\", \"Units\"])\ndf_nat = df_nat.drop(columns=[\"Flux into atmosphere\", \"Sink/source type\", \"Units\"])\ndf_nat_unc = df_nat_unc.drop(columns=[\"Flux into atmosphere\", \"Sink/source type\", \"Units\"])\n\n# Error propagation, assuming possibly correlated small errors. Standard approach following\n# An Introduction to Error Analysis, John R. Taylor, Ch. 3.\ndf_anthro_unc['Value'] = (df_anthro_unc.loc[:, 'Value'].to_numpy()/df_anthro.loc[:, 'Value'].to_numpy() +\n df_nat_unc.loc[:, 'Value'].to_numpy()/df_nat.loc[:, 'Value'].to_numpy() \n ) * (\n df_anthro.loc[:, 'Value'].to_numpy()/df_nat.loc[:, 'Value'].to_numpy() )\n\ndf_anthro['Value'] = df_anthro.loc[:, 'Value']/df_nat.loc[:, 'Value'].to_numpy()\n\ndata_ratio=df_anthro.append(df_anthro_unc)\ndata_ratio[\"Value\"] = round(data_ratio[\"Value\"], 2)\ndata_ratio.to_csv(r'../processed/co2_flux_number_dimensioness.csv', index = False)\n",
"#%%\nimport pandas as pd \nimport altair as alt \n\n# Load the timeseries data\ndata = pd.read_csv('../processed/Qin2019_category_volume_1980-2016.csv')\ndata['year'] = pd.to_datetime(data['year'], format='%Y')\n\n# %%\n# Generate a vizualization for each category\nfor g, d in data.groupby(['category']): \n domain_min = d['volume_km3'].values.min() - 10\n domain_max = d['volume_km3'].values.max() + 10\n chart = alt.Chart(d).encode(\n x=alt.X(field='year', type='temporal', timeUnit='year', title='year'),\n y=alt.Y(field='volume_km3', type='quantitative', title='volume [km³]',\n scale=alt.Scale(domain=[domain_min, domain_max])),\n tooltip=[alt.Tooltip(field='year', type='temporal', format='%Y', title='year'),\n alt.Tooltip(field='volume_km3', type='quantitative', format='0.1f', title='volume [km³]')]\n ).properties(width='container', height=300)\n l = chart.mark_line(color='dodgerblue')\n p = chart.mark_point(color='dodgerblue', filled=True)\n layer = alt.layer(l, p)\n layer.save(f'./{g}.json')\n# %%\n# Generate a viz for the total water use\ndata = data.groupby(['year']).sum().reset_index()\n\nchart = alt.Chart(data).encode(\n x=alt.X(field='year', type='temporal', timeUnit='year', title='year'),\n y=alt.Y(field='volume_km3', type='quantitative', title='volume [km³]',\n scale=alt.Scale(domain=[1000, 1800])),\n tooltip=[alt.Tooltip(field='year', type='temporal', format='%Y', title='year'),\n alt.Tooltip(field='volume_km3', type='quantitative', format='0.1f', title='volume [km³]')]\n ).properties(width='container', height=300)\nl = chart.mark_line(color='dodgerblue')\np = chart.mark_point(color='dodgerblue', filled=True)\nlayer = alt.layer(l, p)\nlayer.save('./total_volume.json')\nlayer\n\n# %%\n",
"#%%\nimport numpy as np\nimport pandas as pd\nimport altair as alt\nimport anthro.io\n\n# Load the IFASTAT nitrogen data.\ndata = pd.read_csv('../processed/IFA_ammonia_production_2008-2019.csv')\nproc_data = pd.DataFrame()\nproc_data['year'] = pd.to_datetime(data['year'], format='%Y')\nproc_data['production (kg)'] = data['value']\nproc_data['label'] = anthro.io.numeric_formatter(data['value'].values * 1E3, unit='kg')\n\n# Generate a plot for global nitrogen production.\nchart = alt.Chart(proc_data).encode(\n x=alt.X(field='year', type='temporal', timeUnit='year', title='year'),\n y=alt.Y(field='production (kg)', type='quantitative', title='Mass of nitrogen in synthesized ammonia [kg]', scale=alt.Scale(domain=[120000000000, 160000000000])),\n tooltip=[alt.Tooltip(field='year', type='temporal', title='year', format='%Y'),\n alt.Tooltip(field='label', type='nominal', title='production mass')]\n ).properties(width='container', height=300)\n\nl = chart.mark_line(color='dodgerblue')\np = chart.mark_point(color='dodgerblue', filled=True)\nlayer = alt.layer(l, p)\nlayer.save('./nitrogen_production.json')\n# %%\n"
] | [
[
"numpy.square",
"pandas.read_csv",
"numpy.sqrt"
],
[
"pandas.read_csv",
"pandas.to_datetime"
],
[
"pandas.read_csv",
"pandas.to_datetime",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Anonymous-ARR/code | [
"ede7a3b73cb97905454c21d2f9260a788751f8d8"
] | [
"probe_custom_word2vec/probe_loader.py"
] | [
"import torch\nimport os, json, random, sys\nfrom params import params\nimport numpy as np\nfrom gensim.models import Word2Vec\nfrom torch.utils import data\nimport pandas as pd\nimport re\nimport string\n\nnp.random.seed(params.seed)\nrandom.seed(params.seed)\ntorch.manual_seed(params.seed)\n\ncustom_embeds_limit = 40000\n\ndo_lowercase = lambda x: x.lower() if not params.case_sensitive else x\nMAX_LEN = 0\n\nbasepath = \"/\".join(os.path.realpath(__file__).split('/')[:-1])\n\nregex_pattern = '^[a-zA-Z]+$'\n\ncustom_embeds = Word2Vec.load(params.model_card)\n\nfull_dataset = sorted([(str(x), i) for i, x in enumerate(custom_embeds.wv.index_to_key)], key = lambda x: x[1])\ntoken_to_index = {k:v for k,v in full_dataset}\n\nfull_dataset = full_dataset[:custom_embeds_limit]\n\nfull_dataset = [x for x in full_dataset if re.match(regex_pattern, x[0]) and\n len(x[0]) > 1 and set(x[0]) != {'Ġ'} and x[0].lower() not in ['nan', 'null', 'n/a']]\n\nchar_vocab = list(set([x.lower() for d in full_dataset for x in d[0]]))\nprint(char_vocab)\nprint(\"Len Char Vocab:\", len(char_vocab))\nchar_to_id = {c:i for i,c in enumerate(char_vocab)}\nid_to_char = {i:c for i,c in enumerate(char_vocab)}\n\nfrom nltk.stem import WordNetLemmatizer\nlemmatizer = WordNetLemmatizer()\nclean_up = lambda x: x[1:] if x[0] == 'Ġ' else x\n\nlemma_wise = {x:[] for x in set(lemmatizer.lemmatize(clean_up(x[0].lower())) for x in full_dataset)}\nfor x in full_dataset:\n lemma_wise[lemmatizer.lemmatize(clean_up(x[0].lower()))].append(x)\n\nflatten = lambda x: [y for xx in x for y in xx]\nfull_dataset = list(lemma_wise.values())\n\n# Split the dataset\nrandom.shuffle(full_dataset)\n\ndef shuffle_and_return(x):\n random.shuffle(x)\n return x\n\nclass SpellingDataset:\n def __init__(self):\n self.embed_tokenizer = token_to_index\n\n self.char_to_id = char_to_id\n self.id_to_char = id_to_char\n self.alphabets = string.ascii_lowercase\n \n self.split = int(0.8 * len(full_dataset))\n random.shuffle(full_dataset)\n train_set = flatten(full_dataset[:self.split])\n test_set = flatten(full_dataset[self.split:])\n\n self.alphabet_wise_datasets = {c: self.split_and_process(c, train_set, test_set)\n for c in self.alphabets\n }\n\n def split_and_process(self, c, train_data, test_data):\n train_data = self.balance_dataset(c, train_data)\n test_data = self.balance_dataset(c, test_data)\n if params.dummy_run:\n test_data = train_data\n return (self.process(c, train_data), self.process(c, test_data))\n\n def balance_dataset(self, c, train_set):\n splitted_set = ([x for x in train_set if c in x[0]],\n [x for x in train_set if c not in x[0]])\n assert len(splitted_set[0]) + len(splitted_set[1]) == len(train_set)\n\n train_set = splitted_set[0] + splitted_set[1][:len(splitted_set[0])]\n random.shuffle(train_set)\n return train_set\n\n def process(self, c, all_data):\n if params.dummy_run:\n all_data = all_data[:5]\n return [(x[0], self.embed_tokenizer[x[0]],\n int(c in do_lowercase(x[0])))\n for x in all_data]\n\ndef pad(batch):\n get_f = lambda x: [single_data[x] for single_data in batch]\n batch_tokens = get_f(0)\n token_ids_tensor = torch.LongTensor(get_f(1)).to(params.device)\n char_ids_tensor = torch.FloatTensor(get_f(2)).to(params.device)\n\n return batch_tokens, token_ids_tensor, char_ids_tensor\n\nif __name__ == \"__main__\":\n dataset = SpellingDataset()\n print(\"Num chars:\", len(dataset.alphabet_wise_datasets))\n\n print({x[0]: len(x[1][0]) for x in dataset.alphabet_wise_datasets.items()})\n print('\\n')\n print({x[0]: len(x[1][1]) for x in dataset.alphabet_wise_datasets.items()})\n\n print(sorted([x[1] for x in dataset.alphabet_wise_datasets['a'][0]])[-10:])\n print(dataset.alphabet_wise_datasets['a'][0][:5])\n print(dataset.alphabet_wise_datasets['a'][1][:5])\n\n print(pad(dataset.alphabet_wise_datasets['a'][0][:5]))\n"
] | [
[
"torch.manual_seed",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sio13/helmnet | [
"514a2035b4e043825d444496eda375b3d954f853",
"514a2035b4e043825d444496eda375b3d954f853"
] | [
"helmnet/hybridnet.py",
"helmnet/architectures.py"
] | [
"from helmnet.source_module import SourceModule\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torch.nn.functional import hardtanh\nfrom random import choice\nimport pytorch_lightning as pl\nfrom torchmetrics.regression import MeanAbsoluteError\nimport numpy as np\nfrom helmnet.architectures import HybridNet\nfrom helmnet.dataloaders import get_dataset\nfrom helmnet.spectral import LaplacianWithPML, FastLaplacianWithPML\nfrom helmnet.replaybuffer import ReplayBuffer, Experience\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\n\n\n\nclass IterativeSolver(pl.LightningModule):\n def __init__(\n self,\n domain_size : int,\n k : float,\n omega : float,\n PMLsize : int,\n sigma_max : float,\n source_location : list,\n train_data_path : str,\n validation_data_path : str,\n test_data_path : str,\n activation_function : str = \"relu\",\n architecture : str = \"custom_unet\",\n gradient_clip_val : int = 0,\n batch_size : int = 24,\n buffer_size : int = 100,\n depth : int = 4,\n features : int = 8,\n learning_rate : float = 1e-4,\n loss : str = \"mse\",\n minimum_learning_rate : float = 1e-4,\n optimizer : str = \"adam\",\n weight_decay : float = 0.0,\n max_iterations : int = 100,\n source_amplitude : int = 10,\n source_phase : int = 0,\n source_smoothing : bool = False,\n state_channels : int = 2,\n state_depth : int = 4,\n unrolling_steps : int = 10\n ):\n super().__init__()\n\n # Saving hyperparameters\n self.save_hyperparameters()\n\n # Derived modules\n self.replaybuffer = ReplayBuffer(self.hparams.buffer_size)\n self.metric = MeanAbsoluteError()\n\n self.register_buffer('sigmas', None) # buffer to transition tensor device with module\n \n self.set_laplacian()\n self.setup_source() # source is now a module\n\n # Non linear function approximator\n self.init_f()\n\n # Custom weight initialization\n # TODO: Add this to the settings file\n def weights_init(m):\n if isinstance(m, nn.Conv2d):\n torch.nn.init.xavier_normal_(m.weight, gain=0.02)\n # torch.nn.init.zeros_(m.bias)\n\n self.f.apply(weights_init)\n\n def init_f(self):\n nn_name = self.hparams.architecture\n if nn_name == \"custom_unet\":\n self.f = HybridNet(\n activation_function = self.hparams.activation_function,\n depth = self.hparams.depth,\n domain_size = self.hparams.domain_size,\n features = self.hparams.features,\n inchannels = 6,\n state_channels = self.hparams.state_channels,\n state_depth = self.hparams.state_depth,\n )\n else:\n raise NotImplementedError(\"Unknown architecture {}\".format(nn_name))\n\n def set_domain_size(self, domain_size, source_location = None, source_map = None):\n self.hparams.domain_size = domain_size\n self.f.domain_size = self.hparams.domain_size\n self.set_laplacian()\n self.setup_source()\n\n #pytorch lightning really does not like this\n self.Lap.to(self.device)\n self.source_module.to(self.device)\n\n if source_location is not None:\n self.set_multiple_sources([source_location])\n else:\n self.set_source_maps(source_map)\n self.f.init_by_size()\n for enc, size in zip(self.f.enc, self.f.states_dimension):\n enc.domain_size = size\n\n def set_laplacian(self):\n \"\"\"\n self.Lap = LaplacianWithPML(\n domain_size=self.hparams.domain_size,\n PMLsize=self.hparams.PMLsize,\n k=self.hparams.k,\n sigma_max=self.hparams.sigma_max,\n ).to(self.device)\n \"\"\"\n self.Lap = FastLaplacianWithPML(\n domain_size = self.hparams.domain_size,\n PMLsize = self.hparams.PMLsize,\n k = self.hparams.k,\n sigma_max = self.hparams.sigma_max,\n )#.to(self.device) #by the time of initialization the self.device variable is still cpu\n\n sigmax, sigmay = self.Lap.sigmas()\n sigmax = sigmax.clone().detach()#.to(self.device) #torch.tensor(sigmax, device=self.device) \n sigmay = sigmay.clone().detach()#.to(self.device) #torch.tensor(sigmay, device=self.device)\n sigmax = sigmax.unsqueeze(0)\n sigmay = sigmay.unsqueeze(0)\n self.sigmas = torch.cat([sigmax, sigmay]).float() # buffer will be moved together with the module\n\n def setup_source(self):\n self.source_module = SourceModule(\n image_size = self.hparams.domain_size,\n omega = self.hparams.omega,\n location = self.hparams.source_location,\n amplitude = self.hparams.source_amplitude,\n phase = self.hparams.source_phase,\n smooth = self.hparams.source_smoothing\n )\n with torch.no_grad():\n self.set_source()\n\n def set_source_maps(self, sourceval):\n self.source = nn.Parameter(\n sourceval,\n requires_grad = False,\n )\n\n def set_source(self):\n sourceval = self.source_module.spatial_map(0).permute(0, 3, 1, 2)\n self.set_source_maps(sourceval)\n\n def reset_source(self):\n with torch.no_grad():\n if not self.source_module.get_location() == self.hparams.source_location:\n self.source_module.set_new_location(self.hparams.source_location)\n self.set_source()\n\n def set_multiple_sources(self, source_locations):\n sourceval_array = []\n with torch.no_grad():\n for loc in source_locations:\n self.source_module.set_new_location(loc)\n sourceval_array.append(\n self.source_module.spatial_map(0).permute(0, 3, 1, 2)\n )\n sourceval = torch.cat(sourceval_array, 0)\n self.set_source_maps(sourceval)\n\n def on_after_backward(self):\n if self.hparams.gradient_clip_val > 0:\n torch.nn.utils.clip_grad.clip_grad_value_(\n self.parameters(), self.hparams.gradient_clip_val\n )\n\n def get_random_source_loc(self):\n \"\"\"Random source location on a circle\"\"\"\n # TODO: Make it more flexible, this is basically hard coded...\n theta = torch.tensor(2 * np.pi * np.random.rand(1), device=self.device)\n L = self.hparams.domain_size // 2\n dL = L - self.hparams.PMLsize - 2\n # source_location = np.array(\n # [int(L + dL * np.cos(theta)), int(L + dL * np.sin(theta))]\n # )\n source_location = torch.tensor(\n [int(L + dL * torch.cos(theta)), int(L + dL * torch.sin(theta))], device=self.device\n )\n return source_location\n\n def train_dataloader(self):\n # Making dataset of SoS\n sos_train = get_dataset(self.hparams.train_data_path)\n\n # Filling up experience replay\n print(\"Filling up Replay buffer...\")\n\n with torch.no_grad():\n for counter in range(len(self.replaybuffer)):\n self.reset_source() # self.set_multiple_sources([self.get_random_source_loc()])\n #sos_map = sos_train[counter].unsqueeze(0).to(self.device) # [1, 1, 96, 96]\n sos_map = sos_train[counter].unsqueeze(0)\n sos_map = sos_map.type_as(self.source)\n k_sq, wavefield = self.get_initials(sos_map) # ( [1, 1, 96, 96], [1, 2, 96, 96] )\n self.f.clear_states(wavefield) \n h_states = self.f.get_states(flatten=True) # [1, 2, 96x96xN]\n residual = self.get_residual(wavefield, k_sq) # [1, 2, 96x96]\n exp = Experience(\n wavefield[0],\n h_states[0],\n k_sq[0],\n residual[0],\n self.source[0],\n counter * 10,\n )\n\n self.replaybuffer.append(exp, counter)\n\n # Return the dataloader of sos maps\n return DataLoader(\n sos_train,\n batch_size=self.hparams.batch_size,\n num_workers=min([self.hparams.batch_size, 32]),\n drop_last=True,\n )\n\n def val_dataloader(self):\n # Making dataset of SoS\n self.reset_source()\n sos_train = get_dataset(self.hparams.validation_data_path)\n # Return the dataloader of sos maps\n return DataLoader(\n sos_train,\n batch_size=self.hparams.batch_size,\n num_workers=min([self.hparams.batch_size, 32]),\n )\n\n def test_dataloader(self):\n self.reset_source()\n testset = get_dataset(self.hparams.test_data_path)\n # Return the dataloader of sos maps\n return DataLoader(\n testset,\n batch_size=self.hparams.batch_size,\n num_workers=min([self.hparams.batch_size, 32]),\n shuffle = True\n )\n\n def configure_optimizers(self):\n # TODO: Add adam betast to settings file\n if self.hparams.optimizer.lower() == \"adam\":\n optimizer = torch.optim.Adam(\n self.parameters(),\n lr=self.hparams.learning_rate,\n betas=(0.9, 0.95),\n weight_decay=self.hparams.weight_decay,\n )\n else:\n raise NotImplementedError(\n \"The optimizer {} is not implemented\".format(self.hparams.optimizer)\n )\n\n if self.hparams.minimum_learning_rate > self.hparams.learning_rate:\n raise ValueError(\n \"Minimum learning rate ({}) must be smaller than the starting learning rate ({})\".format(\n self.hparams.minimum_learning_rate, self.hparams.learning_rate\n )\n )\n scheduler = {\n \"scheduler\": ReduceLROnPlateau(\n optimizer,\n mode=\"min\",\n factor=0.5,\n patience=10,\n min_lr=self.hparams.minimum_learning_rate,\n verbose=True,\n ),\n \"monitor\": \"train_loss_mean\", # Default: val_loss\n \"interval\": \"epoch\",\n \"frequency\": 1,\n }\n return [optimizer], [scheduler]\n\n def loss_function(self, x):\n if self.hparams.loss == \"mse\":\n #without the dimension specifier we now average across all samples in a batch instead of per sample, should be ok as long as mean(sqrt(mean(...))) ~= sqrt(mean(mean()))\n #advantage of this is that we can avoid dimension mismatching with nan results later\n return x.pow(2).mean() \n else:\n raise NotImplementedError(\n \"The loss function {} is not implemented\".format(self.hparams.loss)\n )\n\n @staticmethod\n def test_loss_function(x):\n return x.pow(2).mean((1, 2, 3)).sqrt()\n\n def test_step(self, batch, batch_idx):\n self.reset_source()\n output = self.forward(\n batch,\n num_iterations=self.hparams.max_iterations,\n return_wavefields=True,\n return_states=False,\n )\n # Get loss\n losses = [self.test_loss_function(x) for x in output[\"residuals\"]]\n losses = torch.stack(losses, 1)\n return {\n \"losses\": losses,\n #\"wavefields\": [x.cpu() for x in output[\"wavefields\"]], #calls to cpu seems to be unnecessary here\n \"wavefields\": [x for x in output[\"wavefields\"]], \n }\n\n def test_epoch_end(self, outputs):\n # Saving average losses\n print(\"Saving residual RMSE\")\n x = []\n for o in outputs:\n x.append(o[\"losses\"])\n all_losses = torch.cat(x, dim = 0).cpu().numpy()\n np.save(\"results/evolution_of_model_RMSE_on_test_set\", all_losses)\n\n # Save wavefield\n print(\"Saving wavefields\")\n wavefields = torch.cat(\n [torch.stack(x[\"wavefields\"], 0) for x in outputs], 1\n ).permute(1, 0, 2, 3, 4)\n np.save(\"results/evolution_of_wavefields_on_test_set\", wavefields.cpu().numpy())\n \n\n def validation_step(self, batch, batch_idx):\n self.set_multiple_sources(\n [self.get_random_source_loc() for _ in range(batch.shape[0])]\n )\n output = self.forward(\n batch,\n num_iterations=self.hparams.max_iterations,\n return_wavefields=False,\n return_states=False,\n )\n # Get loss\n loss = self.loss_function(output[\"residuals\"][-1]).sqrt()\n # NaNs to Infs, due to Lightning bug: https://github.com/PyTorchLightning/pytorch-lightning/issues/2636\n loss[torch.isnan(loss)] = float(\"inf\")\n sample_wavefield = (hardtanh(output[\"wavefields\"][0][0]) + 1) / 2\n return {\n \"loss\": loss,\n \"sample_wavefield\": sample_wavefield,\n \"batch_idx\": batch_idx,\n }\n\n def validation_epoch_end(self, outputs):\n all_losses = torch.stack([x[\"loss\"] for x in outputs]).mean()\n val_loss_mean = self.metric(all_losses, torch.zeros_like(all_losses))\n\n self.reset_source()\n self.logger.experiment.add_images(\n \"wavefield/val_real\",\n outputs[0][\"sample_wavefield\"][0],\n self.trainer.global_step,\n dataformats=\"HW\",\n )\n self.logger.experiment.add_image(\n \"wavefield/val_imag\",\n outputs[0][\"sample_wavefield\"][1],\n self.trainer.global_step,\n dataformats=\"HW\",\n )\n\n self.log('val_loss', val_loss_mean)\n self.log('val_terminal_loss', val_loss_mean)\n return {\n \"val_loss\": val_loss_mean\n }\n\n\n def training_epoch_end(self, outputs):\n train_loss_mean = torch.stack([x[\"loss\"] for x in outputs]).mean()\n #training_epoch_end cannot return values anymore\n self.log('train_loss_mean', train_loss_mean)\n #return {\"train_loss\": train_loss_mean}\n\n def training_step(self, sos_batch, batch_idx):\n # Training phase\n maxiter = min([self.current_epoch * 20 + 1, self.hparams.max_iterations])\n # Sample from the buffer\n (\n wavefields,\n h_states,\n k_sqs,\n residual,\n sources,\n timesteps,\n indices,\n ) = self.replaybuffer.sample(self.hparams.batch_size)\n\n # Set the states and sources\n self.set_source_maps(sources)\n self.f.set_states(h_states, flatten=True)\n\n # Make N steps\n num_iterations = self.hparams.unrolling_steps #10\n\n\n output = self.n_steps(wavefields, k_sqs, residual, num_iterations, True, True)\n\n # Evaluate the loss function (will backward later)\n cat_res = torch.cat(output[\"residuals\"])\n\n # stack_res = torch.stack(output[\"residuals\"])\n loss_f = cat_res.pow(2)\n loss = 1e4 * loss_f.mean() # TODO: Use settings loss and amplify\n rel_loss_f = loss_f.mean((1, 2, 3)).sqrt().mean()\n self.logger.experiment.add_scalar(\n \"loss/train\", rel_loss_f, self.trainer.global_step\n )\n\n #Add histogram of iteration lengths\n if self.trainer.current_epoch // 50 == 0:\n self.logger.experiment.add_histogram(\n \"hyper/iterations\", np.array(list(timesteps)), self.trainer.global_step\n )\n\n # Making detached clones\n wavefields = [x.detach() for x in output[\"wavefields\"]]\n h_states = [x.detach() for x in output[\"states\"]]\n k_sqs = [k_sqs for x in output[\"wavefields\"]]\n residuals = [x.detach() for x in output[\"residuals\"]]\n sources = [x.detach() for x in self.source]\n\n # Adding to RB if iterations are not more than allowed\n counter = 0\n terminal_logged = False\n middle_logged = False\n iteration = np.random.choice(len(residuals))\n for sample_idx in range(self.hparams.batch_size):\n new_timesteps = timesteps[sample_idx] + iteration + 1\n res = residuals[iteration][sample_idx]\n if res.pow(2).mean() < 1 and new_timesteps < maxiter:\n self.replaybuffer.append(\n Experience(\n wavefields[iteration][sample_idx],\n h_states[iteration][sample_idx],\n k_sqs[iteration][sample_idx],\n residuals[iteration][sample_idx],\n sources[sample_idx],\n new_timesteps,\n ),\n indices[sample_idx],\n )\n else:\n with torch.no_grad():\n self.reset_source()\n ksq, wf = self.get_initials(choice(sos_batch).unsqueeze(0))\n self.f.clear_states(wf)\n h = self.f.get_states(flatten=True)\n res = self.get_residual(wf, ksq)\n self.replaybuffer.append(\n Experience(wf[0], h[0], ksq[0], res[0], self.source[0], 0),\n indices[sample_idx],\n )\n counter += 1\n\n # Log it as wavefield at 20 steps\n if not middle_logged and new_timesteps == 20:\n self.log_wavefield(wavefields[iteration][sample_idx], \"20\")\n with torch.no_grad():\n middle_loss = self.loss_function(residuals[iteration][sample_idx])\n self.logger.experiment.add_scalar(\n \"loss/step_20\",\n middle_loss.sqrt().item(),\n self.trainer.global_step,\n )\n middle_logged = True\n\n # Log terminal wavefield\n elif new_timesteps >= maxiter and not terminal_logged:\n self.log_wavefield(wavefields[iteration][sample_idx], \"terminal\")\n with torch.no_grad():\n terminal_loss = self.loss_function(residuals[iteration][sample_idx])\n self.logger.experiment.add_scalar(\n \"loss/terminal\",\n terminal_loss.sqrt().item(),\n self.trainer.global_step,\n )\n terminal_logged = True\n\n self.logger.experiment.add_scalar(\n \"train_loss\",\n loss,\n self.trainer.global_step\n )\n\n self.log('train_loss', loss) # force the variable to be saved in state dict of module. tensorboard logger does not do this for some reason\n\n #need to log with prog_bar = True to show on progress bar now\n self.log('maxiter', maxiter, on_epoch=True, prog_bar = True)\n self.log('unrolling', num_iterations, on_epoch=True, prog_bar = True)\n self.log('new_sos', counter, on_epoch=True, prog_bar = True)\n\n return {\n \"loss\": loss\n }\n\n def log_wavefield(self, wavefield, name):\n wavefield = (hardtanh(wavefield) + 1) / 2\n self.logger.experiment.add_images(\n \"wavefield/\" + name + \"_real\",\n wavefield[0],\n self.trainer.global_step,\n dataformats=\"HW\",\n )\n self.logger.experiment.add_image(\n \"wavefield/\" + name + \"_imag\",\n wavefield[1],\n self.trainer.global_step,\n dataformats=\"HW\",\n )\n\n def get_initials(self, sos_maps: torch.tensor):\n \"\"\"Gets the initial estimates for state, wavefield and residual. It\n also calculate k_sq = (omega/c)**2\n\n Args:\n sos_maps (tensor): Speed of sound map\n\n Returns:\n (tensor, tensor): k_sq, wavefield\n \"\"\"\n # TODO: Make it trainable?\n\n k_sq = (self.hparams.omega / sos_maps) ** 2\n wavefield = torch.zeros(\n k_sq.shape[0], 2, k_sq.shape[2], k_sq.shape[3], device=k_sq.device\n )\n return k_sq, wavefield\n\n def apply_laplacian(self, x: torch.tensor):\n #laplacian now needs a contiguous memory to work\n return self.Lap(x.permute(0, 2, 3, 1).contiguous()).permute(0, 3, 1, 2)\n\n def get_residual(self, x: torch.tensor, k_sq: torch.tensor):\n # TODO: This should be outside of the networ, as represents the\n # environment\n \"\"\"Returns the residual wavefield\n\n Args:\n x (tensor): Current solution estimate for the Helmholtz equation\n k_sq (tensor): (omega/c)**2\n\n Returns:\n torch.tensor: the residual\n \"\"\"\n return self.apply_laplacian(x) + k_sq * x - self.source\n\n def single_step(\n self, wavefield: torch.tensor, k_sq: torch.tensor, residual: torch.tensor, get_residual : bool = True\n ):\n # Getting residual signal\n # residual = self.get_residual(wavefield, k_sq)\n\n sigmas = self.sigmas.unsqueeze(0).repeat(wavefield.shape[0], 1, 1, 1) #self.sigmas on device, no need to move\n\n input = torch.cat([wavefield, 1e3 * residual, sigmas], dim = 1)\n\n # Predicting wavefield update\n d_wavefield = self.f(input) # *100/self.current_iterations\n up_wavefield = d_wavefield / 1e3 + wavefield\n new_residual = self.get_residual(up_wavefield, k_sq)\n\n # Impose Dirichlet BC on updated wavefield\n \"\"\"\n dirichlet_mask = torch.zeros_like(up_wavefield)\n dirichlet_mask.requires_grad = False\n dirichlet_mask[:,:,1:-1,1:-1] = 1.\n up_wavefield = up_wavefield*dirichlet_mask\n \"\"\"\n #get_residual = True\n if get_residual:\n return up_wavefield, new_residual\n else:\n return up_wavefield\n\n def n_steps(\n self,\n wavefield,\n k_sq,\n residual,\n num_iterations,\n return_wavefields=False,\n return_states=False,\n ):\n # Initialize containers\n wavefields = []\n residuals = []\n states = []\n\n # Unroll N steps\n for current_iteration in range(num_iterations):\n # Update wavefield and get residual AFTER update\n wavefield, residual = self.single_step(\n wavefield, k_sq, residual, get_residual=True\n )\n\n # Store\n residuals.append(residual) # Last residual\n if return_wavefields:\n wavefields.append(wavefield)\n if return_states:\n states.append(self.f.get_states(flatten=True))\n\n # Add only last wavefield if none logged\n if not return_wavefields:\n wavefields.append(wavefield)\n\n return {\n \"wavefields\": wavefields,\n \"residuals\": residuals,\n \"states\": states,\n \"last_iteration\": current_iteration,\n }\n\n def fast_forward(self, sos_maps):\n # Finite horizon value\n num_iterations = self.hparams.max_iterations\n\n # Initialize inputs and network states\n k_sq, wavefield = self.get_initials(sos_maps)\n self.f.clear_states(wavefield)\n residual = self.get_residual(wavefield, k_sq)\n sigmas = (\n self.sigmas.unsqueeze(0).repeat(wavefield.shape[0], 1, 1, 1).to(self.device)\n )\n\n # Initialize containers\n wavefields = torch.empty(\n [num_iterations] + list(wavefield.shape[1:]),\n device=\"cuda:1\",\n dtype=torch.float32,\n )\n\n # Unroll N steps\n for current_iteration in range(num_iterations):\n # Loop\n wavefield, residual = self.single_step(wavefield, k_sq, residual)\n\n # Store\n wavefields[current_iteration] = wavefield[0]\n\n return wavefields\n\n def forward(\n self,\n sos_maps,\n return_wavefields=False,\n return_states=False,\n num_iterations=None,\n stop_if_diverge=False,\n ):\n # Finite horizon value\n if num_iterations is None:\n num_iterations = self.hparams.max_iterations\n\n # Initialize inputs and network states\n k_sq, wavefield = self.get_initials(sos_maps)\n self.f.clear_states(wavefield)\n residual = self.get_residual(wavefield, k_sq)\n\n # Initialize containers\n wavefields = []\n residuals = []\n states = []\n\n # Unroll N steps\n for current_iteration in range(num_iterations):\n # Update wavefield and get residual AFTER update\n wavefield, residual = self.single_step(wavefield, k_sq, residual)\n\n # Store\n residuals.append(residual) # Last residual\n if return_wavefields:\n wavefields.append(wavefield)\n if return_states:\n states.append(self.f.get_states(flatten=True))\n\n # Add only last wavefield if none logged\n if not return_wavefields:\n wavefields.append(wavefield)\n\n return {\n \"wavefields\": wavefields,\n \"residuals\": residuals,\n \"states\": states,\n \"last_iteration\": current_iteration,\n }\n\n def forward_variable_src(\n self,\n sos_maps,\n src_time_pairs,\n return_wavefields=False,\n return_states=False,\n num_iterations=None,\n stop_if_diverge=False,\n ):\n # Finite horizon value\n if num_iterations is None:\n num_iterations = self.hparams.max_iterations\n\n # Extract source insertion times\n new_src_times = src_time_pairs[\"iteration\"]\n src_maps = iter(src_time_pairs[\"src_maps\"])\n\n # Initialize inputs and network states\n k_sq, wavefield = self.get_initials(sos_maps)\n self.f.clear_states(wavefield)\n residual = self.get_residual(wavefield, k_sq)\n\n # Initialize containers\n wavefields = []\n residuals = []\n states = []\n\n # Unroll N steps\n for current_iteration in range(num_iterations):\n # Update source map if needed\n if current_iteration in new_src_times:\n self.set_source_maps(next(src_maps))\n # _, wavefield = self.get_initials(sos_maps)\n # self.f.clear_states(wavefield)\n residual = self.get_residual(wavefield, k_sq)\n\n # Update wavefield and get residual AFTER update\n wavefield, residual = self.single_step(wavefield, k_sq, residual)\n\n # Store\n residuals.append(residual) # Last residual\n if return_wavefields:\n wavefields.append(wavefield)\n if return_states:\n states.append(self.f.get_states(flatten=True))\n\n # Add only last wavefield if none logged\n if not return_wavefields:\n wavefields.append(wavefield)\n\n return {\n \"wavefields\": wavefields,\n \"residuals\": residuals,\n \"states\": states,\n \"last_iteration\": current_iteration,\n }\n",
"import torch\nimport torch.nn as nn\n\n\ndef getActivationFunction(\n act_function_name: str, features=None, end=False\n) -> nn.Module:\n \"\"\"Returns the activation function module given\n the name\n\n Args:\n act_function_name (str): Name of the activation function, case unsensitive\n\n Raises:\n NotImplementedError: Raised if the activation function is unknown\n\n Returns:\n nn.Module\n \"\"\"\n if act_function_name.lower() == \"relu\":\n return nn.ReLU(inplace=True)\n elif act_function_name.lower() == \"celu\":\n return nn.CELU(inplace=True)\n elif act_function_name.lower() == \"relu_batchnorm\":\n if end:\n return nn.ReLU(inplace=True)\n else:\n return nn.Sequential(nn.ReLU(inplace=True), nn.BatchNorm2d(features))\n return nn.CELU(inplace=True)\n elif act_function_name.lower() == \"tanh\":\n return nn.Tanh()\n elif act_function_name.lower() == \"prelu\":\n return nn.PReLU()\n elif act_function_name.lower() == \"gelu\":\n return nn.GELU()\n elif act_function_name.lower() == \"tanhshrink\":\n return nn.Tanhshrink()\n elif act_function_name.lower() == \"softplus\":\n return nn.Softplus()\n elif act_function_name.lower() == \"leakyrelu\":\n return nn.LeakyReLU(inplace=True)\n else:\n err = \"Unknown activation function {}\".format(act_function_name)\n raise NotImplementedError(err)\n\n\nclass OutConv(nn.Module):\n \"\"\"Outconvolution, consisting of a simple 2D convolution layer with kernel size 1\"\"\"\n\n def __init__(self, in_channels: int, out_channels: int):\n \"\"\"\n Args:\n in_channels (int): Number of input channels\n out_channels (int): Number of output channels\n \"\"\"\n super(OutConv, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)\n\n def forward(self, x):\n return self.conv(x)\n\n\nclass DoubleConv(nn.Module):\n \"\"\"(convolution => actFunction) * 2\"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n mid_channels=None,\n activation_fun=\"relu\",\n ):\n super().__init__()\n if mid_channels is None:\n mid_channels = out_channels\n\n self.double_conv = nn.Sequential(\n nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),\n getActivationFunction(activation_fun, mid_channels),\n nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),\n )\n\n def forward(self, x):\n return self.double_conv(x)\n\n\nclass CleanDoubleConv(nn.Module):\n \"\"\"(convolution => actFunction) * 2\"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n mid_channels=None,\n activation_fun=\"relu\",\n ):\n super().__init__()\n if mid_channels is None:\n mid_channels = out_channels\n\n self.double_conv = nn.Sequential(\n nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),\n getActivationFunction(activation_fun, mid_channels),\n nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),\n )\n\n def forward(self, x):\n return self.double_conv(x)\n\n\nclass ResDoubleConv(nn.Module):\n \"\"\"(convolution => actFunction) * 2\"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n mid_channels=None,\n activation_fun=\"relu\",\n ):\n super().__init__()\n if mid_channels is None:\n mid_channels = out_channels\n\n self.double_conv = nn.Sequential(\n nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),\n getActivationFunction(activation_fun, mid_channels),\n nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),\n )\n\n def forward(self, x):\n return self.double_conv(x) + x\n\n\nclass ConvGRUCell(nn.Module):\n \"\"\"\n Basic CGRU cell.\n \"\"\"\n\n def __init__(self, in_channels, hidden_channels, kernel_size, bias):\n\n super(ConvGRUCell, self).__init__()\n\n self.input_dim = in_channels\n self.hidden_dim = hidden_channels\n\n self.kernel_size = kernel_size\n self.padding = kernel_size[0] // 2, kernel_size[1] // 2\n self.bias = bias\n self.update_gate = nn.Conv2d(\n in_channels=self.input_dim + self.hidden_dim,\n out_channels=self.hidden_dim,\n kernel_size=self.kernel_size,\n padding=self.padding,\n bias=self.bias,\n )\n self.reset_gate = nn.Conv2d(\n in_channels=self.input_dim + self.hidden_dim,\n out_channels=self.hidden_dim,\n kernel_size=self.kernel_size,\n padding=self.padding,\n bias=self.bias,\n )\n\n self.out_gate = nn.Conv2d(\n in_channels=self.input_dim + self.hidden_dim,\n out_channels=self.hidden_dim,\n kernel_size=self.kernel_size,\n padding=self.padding,\n bias=self.bias,\n )\n\n def forward(self, input_tensor, cur_state):\n h_cur = cur_state\n # data size is [batch, channel, height, width]\n x_in = torch.cat([input_tensor, h_cur], dim=1)\n update = torch.sigmoid(self.update_gate(x_in))\n reset = torch.sigmoid(self.reset_gate(x_in))\n x_out = torch.tanh(\n self.out_gate(torch.cat([input_tensor, h_cur * reset], dim=1))\n )\n h_new = h_cur * (1 - update) + x_out * update\n return h_new\n\n\nclass EncoderBlock(nn.Module):\n def __init__(\n self,\n num_features: int,\n state_size=2,\n activation_function=\"prelu\",\n use_state=True,\n domain_size=0,\n ):\n super().__init__()\n self.state_size = state_size\n self.use_state = use_state\n self.domain_size = domain_size\n self.num_features = num_features\n\n # Define the two double_conv layers\n self.conv_signal = DoubleConv(\n self.num_features + self.state_size * self.use_state,\n self.num_features,\n activation_fun=activation_function,\n )\n\n # Downward path\n self.down = nn.Conv2d(\n self.num_features, self.num_features, kernel_size=8, padding=3, stride=2\n )\n if self.use_state:\n self.conv_state = DoubleConv(\n self.num_features + self.state_size,\n self.state_size,\n activation_fun=activation_function,\n )\n \"\"\"\n self.conv_state = ConvGRUCell(\n in_channels=self.num_features,\n hidden_channels=self.state_size,\n kernel_size=[3, 3],\n bias=True\n )\n \"\"\"\n\n self.state = None\n\n def set_state(self, state):\n self.state = state\n\n def get_state(self):\n return self.state\n\n def clear_state(self, x):\n self.state = torch.zeros(\n [x.shape[0], 2, self.domain_size, self.domain_size], device=x.device\n )\n\n def forward(self, x):\n if self.use_state:\n if self.state is None:\n raise ValueError(\n \"You must set or clear the state before using this module\"\n )\n x_and_state = torch.cat([x, self.state], 1)\n output = self.conv_signal(x_and_state)\n self.state = self.conv_state(torch.cat([output, self.state], 1))\n # self.state = self.conv_state(output, self.state)\n else:\n output = self.conv_signal(x)\n return output, self.down(output)\n\n\nclass ResNet(nn.Module):\n def __init__(\n self,\n activation_function: str,\n depth: int,\n domain_size: int,\n features: int,\n inchannels: int,\n state_channels: int,\n state_depth: int,\n ):\n super().__init__()\n\n # Hyperparameters\n self.activation_function = activation_function\n self.depth = depth\n self.domain_size = domain_size\n self.features = features\n self.inchannels = inchannels\n self.state_channels = state_channels\n self.state_depth = state_depth\n self.state = None\n\n # Define resnet\n inc = [nn.Conv2d(inchannels + 2, features, 7, padding=3)]\n res_blocks = [\n ResDoubleConv(features, features, features * 2) for _ in range(self.depth)\n ]\n outc = [nn.Conv2d(features, 4, 7, padding=3)]\n layers = inc + res_blocks + outc\n self.network = nn.Sequential(*layers)\n\n def init_by_size(self):\n return\n\n def get_states(self, flatten=False):\n return\n\n def clear_states(self, x):\n self.state = None\n return\n\n def set_states(self, states, flatten=False):\n return\n\n def flatten_state(self, h_list):\n return\n\n def unflatten_state(self, h_flatten):\n return\n\n def forward(self, x):\n if self.state is None:\n self.state = torch.zeros(\n (x.shape[0], 2, x.shape[2], x.shape[3]), device=x.device\n )\n x = torch.cat([x, self.state], 1)\n y = self.network(x)\n self.state = y[:, :2]\n return y[:, 2:]\n\n\nclass HybridNet(nn.Module):\n def __init__(\n self,\n activation_function: str,\n depth: int,\n domain_size: int,\n features: int,\n inchannels: int,\n state_channels: int,\n state_depth: int,\n ):\n super().__init__()\n # Hyperparameters\n self.activation_function = activation_function\n self.depth = depth\n self.domain_size = domain_size\n self.features = features\n self.inchannels = inchannels\n self.state_channels = state_channels\n self.state_depth = state_depth\n\n # Define states boundaries for packing and unpacking\n self.init_by_size()\n\n # Input layer\n self.inc = DoubleConv(\n self.inchannels, self.features, activation_fun=self.activation_function\n )\n\n # Encoding layer\n self.enc = nn.ModuleList(\n [\n EncoderBlock(\n self.features,\n state_size=self.state_channels,\n activation_function=self.activation_function,\n use_state=d < self.state_depth,\n domain_size=self.states_dimension[d],\n )\n for d in range(self.depth)\n ]\n )\n\n # Decode path\n self.decode = nn.ModuleList(\n [\n DoubleConv(\n self.features + self.features * (i < self.depth),\n self.features,\n activation_fun=self.activation_function,\n )\n for i in range(self.depth + 1)\n ]\n )\n\n # Upsampling\n self.up = nn.ModuleList(\n [\n nn.ConvTranspose2d(\n self.features,\n self.features,\n kernel_size=8,\n padding=3,\n output_padding=0,\n stride=2,\n )\n for i in range(self.depth)\n ]\n )\n\n # Output layer\n self.outc = OutConv(self.features, 2)\n\n def init_by_size(self):\n # This helps to reshape the state to the correct dimensions\n self.states_dimension = [self.domain_size // 2 ** x for x in range(self.depth)]\n self.total_state_length = sum(map(lambda x: x ** 2, self.states_dimension))\n self.state_boundaries = []\n for d in range(self.depth):\n if d == 0:\n self.state_boundaries.append([0, self.states_dimension[d] ** 2])\n else:\n self.state_boundaries.append(\n [\n self.state_boundaries[-1][-1],\n self.state_boundaries[-1][-1] + self.states_dimension[d] ** 2,\n ]\n )\n\n def get_states(self, flatten=False):\n h = []\n for enc in self.enc:\n h.append(enc.get_state())\n if flatten:\n return self.flatten_state(h)\n else:\n return h\n\n def clear_states(self, x):\n for enc in self.enc:\n enc.clear_state(x)\n\n def set_states(self, states, flatten=False):\n if flatten:\n h = self.unflatten_state(states)\n for enc, state in zip(self.enc[: len(h)], h):\n enc.set_state(state)\n\n def flatten_state(self, h_list):\n h = []\n for x in h_list:\n h.append(x.view(x.shape[0], x.shape[1], -1))\n return torch.cat(h, 2)\n\n def unflatten_state(self, h_flatten):\n h = []\n h_shape = h_flatten.shape\n for boundaries, size in zip(self.state_boundaries, self.states_dimension):\n h_d_flat = h_flatten[:, :, boundaries[0] : boundaries[1]]\n h.append(h_d_flat.view(h_shape[0], h_shape[1], size, size))\n return h\n\n def forward(self, x):\n\n # First feature transformation\n x = self.inc(x)\n\n # Downsampling tree and extracting new states\n inner_signals = []\n for d in range(self.depth):\n # Encode signal\n inner, x = self.enc[d](x)\n # Store signal\n inner_signals.append(inner)\n\n # Upscaling\n x = self.decode[-1](x)\n for d in range(self.depth - 1, -1, -1):\n # Upscale\n x = self.up[d](x)\n # Concatenate inner path\n x = torch.cat([x, inner_signals[d]], 1)\n # Decode\n x = self.decode[d](x)\n\n # Output layer\n out = self.outc(x)\n\n return out\n"
] | [
[
"torch.nn.Parameter",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.isnan",
"torch.zeros",
"torch.cat",
"torch.sin",
"torch.zeros_like",
"torch.nn.init.xavier_normal_",
"numpy.save",
"torch.no_grad",
"numpy.random.rand",
"torch.nn.functional.hardtanh",
"torch.stack",
"torch.cos"
],
[
"torch.nn.Sequential",
"torch.nn.GELU",
"torch.nn.Tanhshrink",
"torch.nn.Softplus",
"torch.nn.ConvTranspose2d",
"torch.zeros",
"torch.cat",
"torch.nn.PReLU",
"torch.nn.Conv2d",
"torch.nn.Tanh",
"torch.nn.CELU",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xmu-xiaoma666/RepMLP-pytorch | [
"598139b8e6da34dd01154311eaae14628d8de4e6"
] | [
"repvgg-pytorch.py"
] | [
"import torch\nfrom torch import mean, nn\nfrom collections import OrderedDict\nfrom torch.nn import functional as F\nimport numpy as np\nfrom numpy import random\nfrom se_block import SEBlock\n\ndef setup_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True\n\ndef _conv_bn(input_channel,output_channel,kernel_size=3,padding=1,stride=1,groups=1):\n res=nn.Sequential()\n res.add_module('conv',nn.Conv2d(in_channels=input_channel,out_channels=output_channel,kernel_size=kernel_size,padding=padding,padding_mode='zeros',stride=stride,groups=groups,bias=False))\n res.add_module('bn',nn.BatchNorm2d(output_channel))\n return res\n\nclass RepBlock(nn.Module):\n def __init__(self,input_channel,output_channel,kernel_size=3,groups=1,stride=1,deploy=False,use_se=False):\n super().__init__()\n self.use_se=use_se\n self.input_channel=input_channel\n self.output_channel=output_channel\n self.deploy=deploy\n self.kernel_size=kernel_size\n self.padding=kernel_size//2\n self.groups=groups\n self.activation=nn.ReLU()\n\n\n\n #make sure kernel_size=3 padding=1\n assert self.kernel_size==3\n assert self.padding==1\n if(not self.deploy):\n self.brb_3x3=_conv_bn(input_channel,output_channel,kernel_size=self.kernel_size,padding=self.padding,groups=groups)\n self.brb_1x1=_conv_bn(input_channel,output_channel,kernel_size=1,padding=0,groups=groups)\n self.brb_identity=nn.BatchNorm2d(self.input_channel) if self.input_channel == self.output_channel else None\n else:\n self.brb_rep=nn.Conv2d(in_channels=input_channel,out_channels=output_channel,kernel_size=self.kernel_size,padding=self.padding,padding_mode='zeros',stride=stride,bias=True)\n\n if(self.use_se):\n self.se=SEBlock(input_channel,input_channel//16)\n else:\n self.se=nn.Identity()\n\n \n def forward(self, inputs):\n if(self.deploy):\n return self.activation(self.se(self.brb_rep(inputs)))\n \n if(self.brb_identity==None):\n identity_out=0\n else:\n identity_out=self.brb_identity(inputs)\n \n return self.activation(self.se(self.brb_1x1(inputs)+self.brb_3x3(inputs)+identity_out))\n\n \n \n\n def _switch_to_deploy(self):\n self.deploy=True\n kernel,bias=self._get_equivalent_kernel_bias()\n self.brb_rep=nn.Conv2d(in_channels=self.brb_3x3.conv.in_channels,out_channels=self.brb_3x3.conv.out_channels,\n kernel_size=self.brb_3x3.conv.kernel_size,padding=self.brb_3x3.conv.padding,\n padding_mode=self.brb_3x3.conv.padding_mode,stride=self.brb_3x3.conv.stride,\n groups=self.brb_3x3.conv.groups,bias=True)\n self.brb_rep.weight.data=kernel\n self.brb_rep.bias.data=bias\n #消除梯度更新\n for para in self.parameters():\n para.detach_()\n #删除没用的分支\n self.__delattr__('brb_3x3')\n self.__delattr__('brb_1x1')\n self.__delattr__('brb_identity')\n\n\n #将1x1的卷积变成3x3的卷积参数\n def _pad_1x1_kernel(self,kernel):\n if(kernel is None):\n return 0\n else:\n return F.pad(kernel,[1]*4)\n\n\n #将identity,1x1,3x3的卷积融合到一起,变成一个3x3卷积的参数\n def _get_equivalent_kernel_bias(self):\n brb_3x3_weight,brb_3x3_bias=self._fuse_conv_bn(self.brb_3x3)\n brb_1x1_weight,brb_1x1_bias=self._fuse_conv_bn(self.brb_1x1)\n brb_id_weight,brb_id_bias=self._fuse_conv_bn(self.brb_identity)\n return brb_3x3_weight+self._pad_1x1_kernel(brb_1x1_weight)+brb_id_weight,brb_3x3_bias+brb_1x1_bias+brb_id_bias\n \n \n ### 将卷积和BN的参数融合到一起\n def _fuse_conv_bn(self,branch):\n if(branch is None):\n return 0,0\n elif(isinstance(branch,nn.Sequential)):\n kernel=branch.conv.weight\n running_mean=branch.bn.running_mean\n running_var=branch.bn.running_var\n gamma=branch.bn.weight\n beta=branch.bn.bias\n eps=branch.bn.eps\n else:\n assert isinstance(branch, nn.BatchNorm2d)\n if not hasattr(self, 'id_tensor'):\n input_dim = self.input_channel // self.groups\n kernel_value = np.zeros((self.input_channel, input_dim, 3, 3), dtype=np.float32)\n for i in range(self.input_channel):\n kernel_value[i, i % input_dim, 1, 1] = 1\n self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device)\n kernel = self.id_tensor\n running_mean = branch.running_mean\n running_var = branch.running_var\n gamma = branch.weight\n beta = branch.bias\n eps = branch.eps\n \n std=(running_var+eps).sqrt()\n t=gamma/std\n t=t.view(-1,1,1,1)\n return kernel*t,beta-running_mean*gamma/std\n \n\n\nif __name__ == '__main__':\n input=torch.randn(50,512,49,49)\n repblock=RepBlock(512,512)\n repblock.eval()\n out=repblock(input)\n repblock._switch_to_deploy()\n out2=repblock(input)\n print('difference between vgg and repvgg')\n print(((out2-out)**2).sum())\n "
] | [
[
"torch.nn.Sequential",
"numpy.random.seed",
"torch.manual_seed",
"torch.randn",
"torch.nn.Conv2d",
"torch.from_numpy",
"torch.nn.Identity",
"torch.cuda.manual_seed_all",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"numpy.zeros",
"torch.nn.functional.pad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
scbysc/my_wavy_project | [
"c167ac9a3cd20b21fa7362dc5eeac68091b72942"
] | [
"src/cos.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\n\ntime = np.arange(0, 10, 0.1)\namplitude = np.cos(time)\n\nplt.plot (time, amplitude)\n\nplt.title('Cosine curve')\n\nplt.xlabel('Time')\n\nplt.ylabel('Amplitude')\n\nplt.show()"
] | [
[
"matplotlib.pyplot.title",
"numpy.arange",
"numpy.cos",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bionicdl-sustech/DeepClaw | [
"5cc61289e6088adc03d502f07ec970d221e05857",
"b5bc1866a1847e7b0c11616fd6cbe949c64a355b",
"5cc61289e6088adc03d502f07ec970d221e05857",
"b5bc1866a1847e7b0c11616fd6cbe949c64a355b"
] | [
"deepclaw/sim2real/simulation/src/franka.py",
"deepclaw/modules/recognition/efficientnet/demo_realsense.py",
"deepclaw/sim2real/simulation/tasks/BlueArm_simulation/ktest.py",
"deepclaw/modules/recognition/efficientnet/demo.py"
] | [
"from pyrep.robots.arms.panda import Panda\nfrom pyrep.robots.end_effectors.panda_gripper import PandaGripper\nfrom pyrep.robots.configuration_paths.arm_configuration_path import ArmConfigurationPath\nfrom pyrep.errors import ConfigurationError, ConfigurationPathError, IKError\nfrom pyrep.const import ConfigurationPathAlgorithms as Algos\nfrom typing import List, Union\nimport copy\nimport numpy as np\nfrom scipy.optimize import minimize\nfrom scipy.spatial.transform import Rotation as R\nfrom scipy.spatial.transform import Slerp\nfrom .franka_kinematics import (FrankaKinematics,get_rotation_part,\nget_transition_part,set_rotation_part,set_position_part)\n\nclass Franka(Panda):\n\n def __init__(self):\n super().__init__()\n self.path_point_nums = 50\n self.pose = None\n self.home_joints = [0, -np.pi/4, 0, -3 * np.pi/4, 0, np.pi/2, 0]\n self.position = self.get_position()\n self.kine = FrankaKinematics()\n self.position_min = [0.8, -0.3, 0.83]\n self.position_max = [1.0, 0.3, 1.2]\n self.gripper = PandaGripper()\n self.clear_path = False\n \n def grasp(self,env,obj:None,force_mode=False):\n '''\n gripper grasp\n '''\n while not self.gripper.actuate(0.0,0.1):\n env.step()\n self.grasped_obj = obj\n if force_mode:\n self.gripper._grasped_objects.append(self.grasped_obj)\n self.gripper._old_parents.append(self.grasped_obj.get_parent()) # type: ignore\n self.obj.set_parent(self.gripper._attach_point, keep_in_place=True)\n else:\n self.gripper.grasp(self.grasped_obj)\n\n def release(self,env):\n '''\n gripper open\n '''\n while not self.gripper.actuate(1.0,0.1):\n env.step()\n if self.grasped_obj is not None:\n self.gripper.release()\n self.grasped_obj = None\n\n def _rot_value(self,euler: Union[List[float], np.ndarray] = None,\n quaternion: Union[List[float], np.ndarray] = None):\n\n if euler is not None:\n return R.from_euler('xyz',euler)\n elif quaternion is not None:\n return R.from_quat(quaternion)\n else:\n raise ValueError('input eluer or quternion')\n\n def _get_linear_path(self, position: Union[List[float], np.ndarray],\n euler: Union[List[float], np.ndarray] = None,\n quaternion: Union[List[float], np.ndarray] = None\n ) -> ArmConfigurationPath:\n # start\n joints = self.get_joint_positions()\n H_start = self.kine.fk(joints)\n # rot ~\n rots = [get_rotation_part(H_start),self._rot_value(euler,quaternion)]\n slerp = Slerp([0,1], rots)\n times = [x/self.path_point_nums for x in range(self.path_point_nums+1)]\n interp_rots = slerp(times)\n # trans ~\n d_position = (position - self.pose)/self.path_point_nums\n # ik\n ret_floats = []\n q_guess = self.home_joints\n start_position = get_transition_part(H_start)\n for i in range(self.path_point_nums+1):\n H_target = set_rotation_part(np.eye(4),interp_rots[i])\n H_target = set_position_part(H_target,start_position)\n q = self.kine.ik(H_target, q_guess) # solve_ik\n ret_floats.append(q)\n q_guess = q\n return ArmConfigurationPath(self, ret_floats)\n\n\n def _get_nonlinear_path(self, position: Union[List[float], np.ndarray],\n euler: Union[List[float], np.ndarray] = None,\n quaternion: Union[List[float], np.ndarray] = None) -> ArmConfigurationPath:\n r = self._rot_value(euler,quaternion)\n H_target = set_position_part(set_rotation_part(np.eye(4),r),position)\n q_target = self.kine.ik(H_target,self.home_joints)\n #self.move_j(q_target)\n\n def move_j(self,q_target,env):\n _q_target = copy.copy(q_target)\n _q_target[6] += np.pi/4\n q_start = np.array(self.get_joint_positions())\n dq = (_q_target - q_start)/self.path_point_nums\n res = []\n for i in range(self.path_point_nums):\n res.append(q_start + dq * i)\n res = np.array(res)\n res = res.reshape((1,-1))\n path = ArmConfigurationPath(self, res.tolist()[0])\n done = False\n while not done:\n done = path.step()\n env.step()\n\n def home(self,env):\n self.move_j(self.home_joints,env)\n \n def move(self,env,\n position: Union[List[float], np.ndarray],\n euler: Union[List[float], np.ndarray] = None,\n quaternion: Union[List[float], np.ndarray] = None):\n path = self.get_path(\n position=position, euler=euler, quaternion = quaternion)\n if path is None:\n raise RuntimeError('no path found')\n path.visualize()\n env.step()\n \n # Step the simulation and advance the agent along the path\n done = False\n while not done:\n done = path.step()\n env.step()\n if self.clear_path:\n path.clear_visualization()\n\n def go_to_position(self,position: Union[List[float], np.ndarray],\n euler: Union[List[float], np.ndarray] = None,\n quaternion: Union[List[float], np.ndarray] = None) -> ArmConfigurationPath:\n r = self._rot_value(euler,quaternion)\n H_target = set_position_part(set_rotation_part(np.eye(4),r),np.array(position))\n q = self.kine.ik(H_target,self.home_joints)\n self.set_joint_positions(q)\n\n def get_path(self, position: Union[List[float], np.ndarray],\n euler: Union[List[float], np.ndarray] = None,\n quaternion: Union[List[float], np.ndarray] = None,\n ignore_collisions=False,\n trials=100, max_configs=60, trials_per_goal=6,\n algorithm=Algos.SBL\n ) -> ArmConfigurationPath:\n '''\n para\n ---\n position(franka frame)\n euler or quaternion\n '''\n #position = np.array(position) + np.array(self.position)\n position = np.array(position)\n try:\n p = self.get_linear_path(position, euler, quaternion,\n ignore_collisions=ignore_collisions)\n return p\n except ConfigurationPathError:\n print('get linear path fail\\n')\n pass # Allowed. Try again, but with non-linear.\n \n try: \n # TODO: _get_linear_path\n #p = self._get_linear_path(position,euler,quaternion)\n #return p\n pass\n except ConfigurationError:\n pass\n\n try:\n p = self.get_nonlinear_path(\n position, euler, quaternion, ignore_collisions, trials, max_configs,\n trials_per_goal, algorithm)\n return p\n except ConfigurationPathError:\n print('get nonlinear path fail\\n')\n #p = self._get_nonlinear_path(position,euler,quaternion)\n #return p\n pass\n",
"# Copyright (c) BioniDL@SUSTECH. All Rights Reserved\n\"\"\"\nThis is a demo to run effcientnet trained on waste sorting dataset on a test image paper.png\nPlease download the pretrained weights and put it under ./weight folder before run the code\n\"\"\"\n\nfrom efficientnet_predictor import efficientnet\nimport cv2\nimport numpy as np\nfrom deepclaw.driver.sensors.camera.Realsense import Realsense\n\ncamera = Realsense('../../../../configs/robcell-ur10e-hande-d435/d435.yaml')\n\nimg_size = 300\n\nmodel = efficientnet(0, 'weights/Recyclable-bs32-weights.08-1.000-DenseNet169.hdf5')\nprint(\"Press q to quite the real-time detection\")\n\nwhile True:\n frame = camera.get_frame()\n image = frame.color_image[0]\n crop = image[350:650,850:1150,:]\n \n preds = model.run(crop[:,:,::-1])\n idx = np.argmax(preds[0])\n name = ['glass', 'metal', 'paper','plastic'][idx]\n ret = cv2.putText(crop, '{}, {:.3f}'.format(name, preds[0][idx]),\n (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6,\n (255, 0, 255), 1)\n cv2.imshow('Prediction', crop)\n \n \n if cv2.waitKey(1) & 0xFF == ord('q'): break\n\ncv2.destroyAllWindows()\n\n\n\n\n\n",
"from os.path import dirname, abspath\nfrom os import system, environ\nsim_path = dirname(dirname(dirname(dirname(abspath(__file__)))))\nscene_path = sim_path + '/simulation/scene/'\nimport sys\nsys.path.append(sim_path)\nfrom simulation.src.camera import Camera\nfrom simulation.src.env import Env\nfrom pyrep.robots.arms.arm import Arm\nfrom pyrep.robots.end_effectors.gripper import Gripper\nfrom pyrep.objects.vision_sensor import VisionSensor\nfrom pyrep.objects.shape import Shape\nimport numpy as np\nimport cv2\nimport copy\n\ndef scene(scene_file_name):\n # return abs dir of scene file \n return scene_path + scene_file_name\n\nclass BlueArm(Arm):\n def __init__(self, Armname):\n super().__init__(0, Armname, 5)\n\n\nclass BlueArmGripper(Gripper):\n def __init__(self, Armname):\n super().__init__(0, Armname,\n [Armname+'_gripper_joint'])\n\nclass Kinect(VisionSensor):\n def __init__(self):\n super().__init__('kinect')\n # enable camera sensor\n #self.set_explicit_handling(1)\n #self.handle_explicitly()\n # compute vision sensor intrinsic matrix\n # [ax 0 u0\n # 0 ay v0\n # 0 0 1]\n self.ax = 2*np.tan(np.radians(self.get_perspective_angle()/2))/self.get_resolution()[0]\n self.ay = 2*np.tan(np.radians(self.get_perspective_angle()*self.get_resolution()[1]/self.get_resolution()[0]*2))/self.get_resolution()[1]\n self.u0 = self.get_resolution()[0]/2 # u0\n self.v0 = self.get_resolution()[1]/2 # v0\n self.H = np.array([[0,1,0,1.1],\n [1,0,0,0],\n [0,0,-1,1.8],\n [0,0,0,1]])\n def capture_bgr(self):\n img = cv2.cvtColor(self.capture_rgb(),cv2.COLOR_RGB2BGR)*255\n return np.array(img,dtype=np.uint8)\n\n def uv2XYZ(self,depth_img,u,v):\n Z = depth_img[v,u]\n return np.array([Z*(u-self.u0)*self.ax, Z*(v-self.v0)*self.ay, Z, 1])\n\nif __name__ == \"__main__\":\n env = Env(scene('BlueArm.ttt'))\n env.start()\n\n left_arm = BlueArm('LeftBlueArm')\n right_arm = BlueArm('RightBlueArm')\n left_arm_gripper = BlueArmGripper('LeftBlueArm')\n right_arm_gripper = BlueArmGripper('RightBlueArm')\n\n env.stop()\n env.shutdown()",
"# Copyright (c) BioniDL@SUSTECH. All Rights Reserved\n\"\"\"\nThis is a demo to run effcientnet trained on waste sorting dataset on a test image paper.png\nPlease download the pretrained weights and put it under ./weight folder before run the code\n\"\"\"\n\nfrom efficientnet_predictor import efficientnet\nimport cv2\nimport numpy as np\n\nimg_size = 300\n\nmodel = efficientnet(0, 'weights/Recyclable-bs32-weights.08-1.000-DenseNet169.hdf5')\n\nimage = cv2.resize(cv2.imread('paper.png'),(img_size,img_size))\n\n# Feed the image in RGB order to the model.\n# The input can be of shape [height, width, channels] or [number of images, height, width, channels]\npreds = model.run(image[:,:,::-1])[0]\n\n# The model pretrained is to classify four recycable waste type ['glass', 'metal', 'paper', 'plastic']\nobj = ['glass', 'metal', 'paper', 'plastic'][np.argmax(preds)]\n\nprint(\"Recognize %s\"%obj)\n\n"
] | [
[
"scipy.spatial.transform.Slerp",
"scipy.spatial.transform.Rotation.from_quat",
"numpy.eye",
"scipy.spatial.transform.Rotation.from_euler",
"numpy.array"
],
[
"numpy.argmax"
],
[
"numpy.array"
],
[
"numpy.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.5",
"1.3",
"1.2",
"1.4"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lizeyan/tensorkit | [
"2997a5914ec3c3ec72f91eb5906b5ee878fdc020",
"2997a5914ec3c3ec72f91eb5906b5ee878fdc020"
] | [
"tensorkit/backend/pytorch_/init.py",
"tests/helper.py"
] | [
"import math\nfrom typing import *\n\nimport numpy as np\nfrom torch.jit import ScriptModule\nfrom torch.utils.hooks import RemovableHandle\n\nfrom . import core, nn, random\nfrom .core import Tensor, Module, no_grad, rank, jit, assign_data\n\n__all__ = [\n # utilities\n 'calculate_fan_in_and_fan_out', 'get_activation_gain', 'apply_initializer',\n\n # data-independent tensor initializers\n 'zeros', 'ones', 'fill', 'uniform', 'normal',\n 'xavier_uniform', 'xavier_normal',\n 'kaming_uniform', 'kaming_normal',\n\n # data-dependent layer initializers\n 'DataDependentInitializer', 'set_initialized',\n 'remove_data_dependent_initializers',\n]\n\n\n# ---- utilities ----\n@jit\ndef calculate_fan_in_and_fan_out(tensor: Tensor) -> Tuple[int, int]:\n r = rank(tensor)\n if r < 2:\n raise ValueError('`fan_in` and `fan_out` cannot be calculated '\n 'when `rank(tensor)` < 2.')\n\n n_output_feature_maps = tensor.shape[0]\n n_input_feature_maps = tensor.shape[1]\n if r > 2:\n receptive_field = tensor[0][0].numel()\n n_input_feature_maps *= receptive_field\n n_output_feature_maps *= receptive_field\n\n return n_input_feature_maps, n_output_feature_maps\n\n\ndef _leaky_relu_activation_gain(negative_slope=nn.LEAKY_RELU_DEFAULT_SLOPE):\n return math.sqrt(2. / (1. + negative_slope ** 2))\n\n\n_relu_activation_gain = math.sqrt(2)\n_tanh_activation_gain = 5. / 3\n\n\ndef get_activation_gain(activation: Optional[Union[Type[Module], Module]],\n *args, **kwargs) -> float:\n \"\"\"\n Get the preferred initialization gain for specified activation.\n\n Args:\n activation: The class or instance of the activation module.\n *args, \\\\**kwargs: Arguments to construct the activation module,\n if the specified `activation` is given as its class.\n Ignored if `activation` is already a module.\n\n Returns:\n The initialization gain. If `activation` is not recognized, returns 1.\n \"\"\"\n from tensorkit.layers import ReLU, LeakyReLU, Tanh\n\n if activation is not None:\n if isinstance(activation, type):\n if issubclass(activation, ReLU):\n return _relu_activation_gain\n if issubclass(activation, LeakyReLU):\n return _leaky_relu_activation_gain(*args, **kwargs)\n if issubclass(activation, Tanh):\n return _tanh_activation_gain\n else:\n if isinstance(activation, ReLU):\n return _relu_activation_gain\n if isinstance(activation, LeakyReLU):\n return _leaky_relu_activation_gain(activation.negative_slope)\n if isinstance(activation, Tanh):\n return _tanh_activation_gain\n return 1.0\n\n\ndef apply_initializer(tensor: Tensor,\n initializer: Optional[\n Union[\n int, float, np.ndarray, Tensor,\n Callable[..., None]\n ]\n ],\n gain: Optional[float] = None,\n activation: Optional[\n Union[str, Type[Module], Module, Any]\n ] = None,\n fan_in_and_fan_out: Optional[Tuple[int, int]] = None,\n mode: str = 'fan_in'\n ) -> None:\n \"\"\"\n Apply an `initializer` on the specified `tensor`.\n\n Args:\n tensor: The tensor to be initialized.\n initializer: The initializer, may be one of:\n * A scalar, which will be filled into `tensor`.\n * A NumPy array or another `Tensor`, whose value will be copied\n to the `tensor`.\n * A callable function ``(t: Tensor, \\\\**kwargs) -> None``.\n The `\\\\**kwargs` must present in order to consume all\n named arguments passed to the initializer. Currently\n possible named arguments are: `gain`, `fan_in_and_fan_out`,\n and `mode`.\n gain: The gain of the activation. If not specified, will calculate\n according to `activation` via :func:`get_activation_gain()`.\n activation: The activation of the layer.\n fan_in_and_fan_out: A tuple of ``(fan_in, fan_out)`` of the layer.\n If not specified, and if `rank(tensor)` >= 2, it will be computed\n via :func:`calculate_fan_in_and_fan_out()`.\n mode: Either \"fan_in\" or \"fan_out\". If it is \"fan_out\", then the\n specified or calculated `fan_in` will be regarded as `fan_out`,\n and `fan_out` regarded as `fan_in`.\n \"\"\"\n if gain is None:\n gain = get_activation_gain(activation)\n\n r = rank(tensor)\n if fan_in_and_fan_out is None and r > 1:\n fan_in_and_fan_out = calculate_fan_in_and_fan_out(tensor)\n kwargs = ({} if fan_in_and_fan_out is None\n else {'fan_in_and_fan_out': fan_in_and_fan_out})\n\n is_scalar = (\n (\n not isinstance(initializer, bool) and\n isinstance(initializer, (int, float))\n ) or (\n isinstance(initializer, np.ndarray) and\n np.shape(initializer) == ()\n )\n )\n if is_scalar:\n fill(tensor, initializer)\n elif isinstance(initializer, (np.ndarray, Tensor)):\n assign_data(tensor, initializer)\n elif callable(initializer):\n with no_grad():\n initializer(tensor, gain=gain, mode=mode, **kwargs)\n else:\n raise TypeError(f'Unsupported initializer: {initializer!r}')\n\n\n# ---- data-independent tensor initializers ----\n# NOTE: all initializer functions must have `**kwargs` in its arguments, to\n# consume all arguments passed from :class:`LayerInit`. The arguments\n# are listed as follows:\n#\n# 1. gain: float\n# 2. fan_in_and_fan_out: Tuple[int, int]\n# 3. mode: str # either one of {\"fan_in\", \"fan_out\"}\n_no_grad_uniform_init = random.uniform_init\n_no_grad_normal_init = random.normal_init\n\n\ndef _validate_fan_in_and_fan_out(tensor: Tensor,\n fan_in_and_fan_out: Optional[Tuple[int, int]]\n ) -> Tuple[int, int]:\n if fan_in_and_fan_out is None:\n fan_in_and_fan_out = calculate_fan_in_and_fan_out(tensor)\n return fan_in_and_fan_out\n\n\ndef _calculate_fan(tensor: Tensor,\n fan_in_and_fan_out: Optional[Tuple[int, int]],\n mode: str) -> int:\n if mode not in ('fan_in', 'fan_out'):\n raise ValueError(f'`mode` must be either \"fan_in\" or \"fan_out\": '\n f'got {mode!r}')\n fan_in, fan_out = _validate_fan_in_and_fan_out(tensor, fan_in_and_fan_out)\n return fan_in if mode == 'fan_in' else fan_out\n\n\ndef zeros(tensor: Tensor, **kwargs):\n with no_grad():\n core.fill_zeros(tensor)\n\n\ndef ones(tensor: Tensor, **kwargs):\n with no_grad():\n core.fill(tensor, fill_value=1.)\n\n\ndef fill(tensor: Tensor, fill_value: Union[int, float, np.ndarray], **kwargs):\n with no_grad():\n core.fill(tensor, fill_value=float(fill_value))\n\n\ndef uniform(tensor: Tensor, low: float = 0., high: float = 1.,\n **kwargs):\n _no_grad_uniform_init(tensor, low=low, high=high)\n\n\ndef normal(tensor: Tensor, mean: float = 0., std: float = 1.,\n **kwargs):\n _no_grad_normal_init(tensor, mean=mean, std=std)\n\n\ndef xavier_uniform(tensor: Tensor,\n gain: float = 1.,\n fan_in_and_fan_out: Optional[Tuple[int, int]] = None,\n **kwargs):\n fan_in, fan_out = _validate_fan_in_and_fan_out(tensor, fan_in_and_fan_out)\n std = gain * math.sqrt(2.0 / float(fan_in + fan_out))\n a = math.sqrt(3.0) * std # such that U(-a, a) will have standard deviation `std`\n\n _no_grad_uniform_init(tensor, -a, a)\n\n\ndef xavier_normal(tensor: Tensor,\n gain: float = 1.,\n fan_in_and_fan_out: Optional[Tuple[int, int]] = None,\n **kwargs):\n fan_in, fan_out = _validate_fan_in_and_fan_out(tensor, fan_in_and_fan_out)\n std = gain * math.sqrt(2.0 / float(fan_in + fan_out))\n\n _no_grad_normal_init(tensor, 0., std)\n\n\ndef kaming_uniform(tensor: Tensor,\n gain: float = 1.,\n fan_in_and_fan_out: Optional[Tuple[int, int]] = None,\n mode: str = 'fan_in',\n **kwargs):\n fan = _calculate_fan(tensor, fan_in_and_fan_out, mode)\n std = gain / math.sqrt(fan)\n a = math.sqrt(3.0) * std # such that U(-a, a) will have standard deviation `std`\n\n _no_grad_uniform_init(tensor, -a, a)\n\n\ndef kaming_normal(tensor: Tensor,\n gain: float = 1.,\n fan_in_and_fan_out: Optional[Tuple[int, int]] = None,\n mode: str = 'fan_in',\n **kwargs):\n fan = _calculate_fan(tensor, fan_in_and_fan_out, mode)\n std = gain / math.sqrt(fan)\n\n _no_grad_normal_init(tensor, mean=0., std=std)\n\n\n# ---- data-dependent layer initializers ----\nclass DataDependentInitializer(object):\n \"\"\"\n Base class for data-dependent initializers.\n\n A :class:`DataDependentInitializer` initializes the `weight` and `bias` of\n layers according to their inputs. :class:`DataDependentInitializer` are\n generally stateless, and can be shared among layers.\n \"\"\"\n\n def register(self, layer: Module, initialized: bool = False) -> None:\n \"\"\"\n Register this data-dependent initializer to the specified `layer`.\n\n Args:\n layer: The layer to be initialized by this initializer.\n initialized: The initial `initialized` flag of the hook.\n Defaults to :obj:`False`.\n \"\"\"\n _ = DataDependentInitializerForwardPreHook(\n self, layer, initialized=initialized)\n\n def _init(self, layer: Module, inputs: List[Tensor]) -> None:\n raise NotImplementedError()\n\n def __call__(self, layer: Module, inputs: List[Tensor]) -> None:\n self._init(layer, list(inputs))\n\n def __repr__(self) -> str:\n buf = []\n for attr in getattr(self, '__annotations__', ()):\n attr_val = getattr(self, attr, None)\n buf.append(f'{attr}={attr_val!r}')\n return f'{self.__class__.__qualname__}({\", \".join(buf)})'\n\n\nclass DataDependentInitializerForwardPreHook(object):\n\n initializer: DataDependentInitializer\n hook_handle: RemovableHandle\n initialized: bool\n is_calling: bool # whether or not the initializer is being called\n\n def __init__(self,\n layer_init: DataDependentInitializer,\n layer: Module,\n initialized: bool = False):\n super().__init__()\n self.initializer = layer_init\n self.hook_handle = layer.register_forward_pre_hook(self)\n self.initialized = initialized\n self.is_calling = False\n\n def __call__(self, layer: Module, inputs: List[Tensor]):\n if not self.initialized:\n if not self.is_calling:\n self.is_calling = True\n try:\n self.initializer(layer, inputs)\n self.initialized = True\n finally:\n self.is_calling = False\n\n def set_initialized(self, initialized: bool = True):\n self.initialized = initialized\n\n\ndef set_initialized(root: Module, initialized: bool = True) -> None:\n \"\"\"\n Call `set_initialized` on `root` and all its children layers (recursively),\n as well as their every data-dependent initializer hook.\n\n Args:\n root: The root layer.\n initialized: The value of the `initialized` flag.\n If :obj:`True`, the data-dependent initializers will be disabled.\n If :obj:`False`, the data-dependent initializes will be enabled\n for the next forward call.\n \"\"\"\n def set_initialized(layer: Module):\n if hasattr(layer, 'set_initialized'):\n getattr(layer, 'set_initialized')(initialized)\n if not isinstance(layer, ScriptModule):\n for key, hook in layer._forward_pre_hooks.items():\n if isinstance(hook, DataDependentInitializerForwardPreHook):\n hook.set_initialized(initialized)\n root.apply(set_initialized)\n\n\ndef remove_data_dependent_initializers(root: Module) -> None:\n \"\"\"\n Remove all data-dependent initializer hooks from the `root` module and all\n its children (recursively).\n\n Args:\n root: The root module.\n \"\"\"\n def remove(layer: Module):\n handles_to_remove = []\n for itm, hook in layer._forward_pre_hooks.items():\n if isinstance(hook, DataDependentInitializerForwardPreHook):\n handles_to_remove.append(hook.hook_handle)\n for handle in handles_to_remove:\n handle.remove()\n root.apply(remove)\n",
"import os\nimport random\nimport unittest\nfrom functools import wraps\n\nimport numpy as np\nimport pytest\nfrom scipy import sparse as sp\n\nfrom tensorkit import tensor as T\nfrom tensorkit import *\n\n__all__ = [\n 'int_dtypes', 'float_dtypes', 'number_dtypes',\n 'n_samples',\n\n 'assert_allclose', 'assert_not_allclose', 'assert_equal', 'assert_not_equal',\n\n 'slow_test',\n\n 'check_distribution_instance', 'flow_standard_check',\n\n 'make_ndarray_by_coo', 'make_random_adj_matrix',\n\n 'TestCase',\n]\n\n\n# Not all integer or float dtypes are listed as follows. Just some commonly\n# used dtypes, enough for test.\nint_dtypes = (T.int32, T.int64)\nfloat_dtypes = (T.float32, T.float64)\nnumber_dtypes = int_dtypes + float_dtypes\n\n# The number of samples to take for tests which requires random samples.\nn_samples = 10000\n\n\ndef wrap_numpy_testing_assertion_fn(fn):\n def f(t):\n if T.sparse.is_sparse_tensor(t):\n t = T.sparse.to_numpy(t)\n if isinstance(t, (T.Tensor, StochasticTensor)):\n t = T.to_numpy(T.as_tensor(t))\n if isinstance(t, sp.spmatrix):\n t = t.toarray()\n return t\n\n def wrapper(x, y, **kwargs):\n return fn(f(x), f(y), **kwargs)\n return wrapper\n\n\nassert_allclose = wrap_numpy_testing_assertion_fn(np.testing.assert_allclose)\n\n\n@wrap_numpy_testing_assertion_fn\ndef assert_not_allclose(x, y, err_msg='', **kwargs):\n if np.all(np.allclose(x, y, **kwargs)):\n msg = f'`not allclose(x, y)` not hold'\n if err_msg:\n msg += f': {err_msg}'\n msg += f'\\nx = {x}\\ny = {y}'\n raise AssertionError(msg)\n\n\nassert_equal = wrap_numpy_testing_assertion_fn(np.testing.assert_equal)\n\n\n@wrap_numpy_testing_assertion_fn\ndef assert_not_equal(x, y, err_msg=''):\n if np.all(np.equal(x, y)):\n msg = f'`x != y` not hold'\n if err_msg:\n msg += f': {err_msg}'\n msg += f'\\nx = {x}\\ny = {y}'\n raise AssertionError(msg)\n\n\n# decorate a test that is slow\ndef slow_test(fn):\n fn = pytest.mark.skipif(\n os.environ.get('FAST_TEST', '0').lower() in ('1', 'on', 'yes', 'true'),\n reason=f'slow test: {fn}'\n )(fn)\n return fn\n\n\ndef check_distribution_instance(ctx,\n d,\n event_ndims,\n batch_shape,\n min_event_ndims,\n max_event_ndims,\n log_prob_fn,\n transform_origin_distribution=None,\n transform_origin_group_ndims=None,\n **expected_attrs):\n ctx.assertLessEqual(max_event_ndims - event_ndims, d.batch_ndims)\n\n event_shape = expected_attrs.get('event_shape', None)\n ctx.assertEqual(d.min_event_ndims, min_event_ndims)\n ctx.assertEqual(d.value_ndims, len(batch_shape) + event_ndims)\n if event_shape is not None:\n ctx.assertEqual(d.value_shape, batch_shape + event_shape)\n ctx.assertEqual(d.batch_shape, batch_shape)\n ctx.assertEqual(d.batch_ndims, len(batch_shape))\n ctx.assertEqual(d.event_ndims, event_ndims)\n ctx.assertEqual(d.event_shape, event_shape)\n\n for attr, val in expected_attrs.items():\n ctx.assertEqual(getattr(d, attr), val)\n ctx.assertEqual(\n d.validate_tensors,\n expected_attrs.get('validate_tensors', settings.validate_tensors)\n )\n\n # check sample\n for n_samples in (None, 5):\n for group_ndims in (None, 0,\n -(event_ndims - min_event_ndims),\n max_event_ndims - event_ndims):\n for reparameterized2 in (None, True, False):\n if reparameterized2 and not d.reparameterized:\n continue\n\n # sample()\n sample_kwargs = {}\n if n_samples is not None:\n sample_kwargs['n_samples'] = n_samples\n sample_shape = [n_samples]\n else:\n sample_shape = []\n\n if group_ndims is not None:\n sample_kwargs['group_ndims'] = group_ndims\n else:\n group_ndims = 0\n\n if reparameterized2 is not None:\n sample_kwargs['reparameterized'] = reparameterized2\n else:\n reparameterized2 = d.reparameterized\n\n t = d.sample(**sample_kwargs)\n ctx.assertEqual(t.group_ndims, group_ndims)\n ctx.assertEqual(t.reparameterized, reparameterized2)\n ctx.assertEqual(\n T.rank(t.tensor),\n d.value_ndims + len(sample_shape))\n ctx.assertEqual(\n T.shape(t.tensor)[:(d.batch_ndims +\n len(sample_shape))],\n sample_shape + d.batch_shape\n )\n\n if transform_origin_distribution is not None:\n ctx.assertIsInstance(t.transform_origin, StochasticTensor)\n ctx.assertIs(\n t.transform_origin.distribution,\n transform_origin_distribution\n )\n ctx.assertIs(\n t.transform_origin.group_ndims,\n transform_origin_group_ndims\n )\n\n # log_prob()\n expected_log_prob = log_prob_fn(t)\n for group_ndims2 in (None, 0,\n -(event_ndims - min_event_ndims),\n max_event_ndims - event_ndims):\n if group_ndims2 is not None:\n log_prob_kwargs = {'group_ndims': group_ndims2}\n else:\n log_prob_kwargs = {}\n group_ndims2 = group_ndims\n\n log_prob = t.log_prob(**log_prob_kwargs)\n ctx.assertEqual(\n T.shape(log_prob),\n T.shape(t.tensor)[: T.rank(t.tensor) - (group_ndims2 + event_ndims)]\n )\n\n assert_allclose(\n log_prob,\n T.reduce_sum(\n expected_log_prob,\n T.int_range(-(group_ndims2 + (event_ndims - min_event_ndims)), 0)\n ),\n rtol=1e-4, atol=1e-6,\n )\n\n prob = t.prob(**log_prob_kwargs)\n assert_allclose(prob, T.exp(log_prob), rtol=1e-4, atol=1e-6)\n\n if transform_origin_distribution is not None:\n for p in (log_prob, prob):\n ctx.assertIsInstance(p.transform_origin,\n StochasticTensor)\n ctx.assertIs(\n p.transform_origin.distribution,\n transform_origin_distribution\n )\n ctx.assertIs(\n p.transform_origin.group_ndims,\n transform_origin_group_ndims\n )\n\n\ndef flow_standard_check(ctx, flow, x, expected_y, expected_log_det,\n input_log_det):\n # test call\n y, log_det = flow(x)\n assert_allclose(y, expected_y, rtol=1e-4, atol=1e-6)\n assert_allclose(log_det, expected_log_det, rtol=1e-4, atol=1e-6)\n\n y, log_det = flow(x, input_log_det)\n assert_allclose(y, expected_y, rtol=1e-4, atol=1e-6)\n assert_allclose(log_det, input_log_det + expected_log_det, rtol=1e-4, atol=1e-6)\n\n y, log_det = flow(x, compute_log_det=False)\n assert_allclose(y, expected_y, rtol=1e-4, atol=1e-6)\n ctx.assertIsNone(log_det)\n\n # test call inverse\n y = expected_y\n expected_x = x\n expected_log_det = -expected_log_det\n\n x, log_det = flow(y, inverse=True)\n assert_allclose(x, expected_x, rtol=1e-4, atol=1e-6)\n assert_allclose(log_det, expected_log_det, rtol=1e-4, atol=1e-6)\n\n x, log_det = flow(y, input_log_det, inverse=True)\n assert_allclose(x, expected_x, rtol=1e-4, atol=1e-6)\n assert_allclose(log_det, input_log_det + expected_log_det, rtol=1e-4, atol=1e-6)\n\n x, log_det = flow(y, inverse=True, compute_log_det=False)\n assert_allclose(x, expected_x, rtol=1e-4, atol=1e-6)\n ctx.assertIsNone(log_det)\n\n\ndef make_ndarray_by_coo(row, col, values, shape) -> np.ndarray:\n ret = np.zeros(shape, dtype=values.dtype)\n ret[row, col] = values\n return ret\n\n\ndef make_random_adj_matrix(node_count: int, p=0.1, dtype=T.float_x(),\n directed=True) -> T.SparseTensor:\n edge_count = int(node_count * node_count * p)\n indices = np.random.randint(0, node_count, size=[2, edge_count])\n if not directed:\n indices = np.concatenate(\n [indices, np.stack([indices[1], indices[0]], axis=0)],\n axis=1\n )\n indices = T.as_tensor(indices, dtype=T.int64)\n values = T.abs(T.random.randn([T.shape(indices)[1]], dtype=dtype)) + 1e-6\n return T.sparse.make_sparse(\n indices, values, shape=[node_count, node_count], coord_first=True)\n\n\nclass TestCaseMeta(type):\n\n def __new__(cls, name, parents, dct):\n def make_wrapper(method):\n @wraps(method)\n def wrapper(*args, **kwargs):\n T.random.set_deterministic(True)\n T.random.seed(1234)\n np.random.seed(1234)\n random.seed(1234)\n\n try:\n with T.use_device(T.first_gpu_device()):\n return method(*args, **kwargs)\n finally:\n T.random.set_deterministic(False)\n return wrapper\n\n keys = list(dct)\n for key in keys:\n val = dct[key]\n if key.startswith('test_'):\n val = make_wrapper(val)\n dct[key] = val\n\n return super().__new__(cls, name, parents, dct)\n\n\nclass TestCase(unittest.TestCase, metaclass=TestCaseMeta):\n pass\n"
] | [
[
"numpy.shape"
],
[
"numpy.allclose",
"numpy.random.seed",
"numpy.stack",
"numpy.equal",
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ka2hyeon/autoins_public | [
"234f3765f173280806804bf4f4794773e4b0ca03",
"234f3765f173280806804bf4f4794773e4b0ca03"
] | [
"autoins/rl/tester.py",
"autoins/svdd/tester.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom autoins.common import common, io\n\nclass RewardShapingTester():\n def __init__(self,\n env,\n exp_dir,\n ccl_id,\n data_name):\n self.env = env\n self.exp_dir = exp_dir\n self.ccl_id = ccl_id\n self.data_name = data_name\n\n self.io_manager = io.IoManager(exp_dir = exp_dir,\n data_name = data_name,\n ccl_id = ccl_id)\n\n self.fig_dir = f'{exp_dir}/figure/{data_name}/{ccl_id}/reward'\n common.create_dir(self.fig_dir, clear_dir = True)\n\n def test_reward_shaping(self):\n ag_demo_list = self.io_manager.ag_demo\n \n for i, ag_demo in enumerate(ag_demo_list):\n goal = np.expand_dims(ag_demo[-1],0)\n goal = np.tile(goal, [ag_demo.shape[0], 1])\n\n phi_c = self.env.compute_phi_c(ag_demo, goal)\n phi_g = self.env.compute_phi_g(ag_demo, goal)\n node = self.env.get_node(ag_demo)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(phi_c, label = 'phi_c')\n ax.plot(phi_g, label = 'phi_g')\n ax.legend()\n fig.savefig(f'{self.fig_dir}/reward_shaping_{i:04d}.png')\n plt.close(fig)\n \n #'''\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(node)\n ax.legend()\n fig.savefig(f'{self.fig_dir}/node_{i:04d}.png')\n plt.close(fig)\n #'''\n\n\n\n ",
"import matplotlib.pyplot as plt\nimport numpy as np\nfrom autoins.common import common, io\n\nclass SvddTester():\n def __init__(self,\n svdd,\n exp_dir,\n ccl_id,\n data_name):\n \n self.svdd = svdd\n self.exp_dir = exp_dir\n self.ccl_id = ccl_id\n self.data_name = data_name\n\n self.io_manager = io.IoManager(self.exp_dir, \n self.data_name, \n self.ccl_id)\n \n self.fig_dir = f'{exp_dir}/figure/{data_name}/{ccl_id}/test_svdd'\n common.create_dir(self.fig_dir, clear_dir = True)\n\n def test_ood_ant(self):\n assert self.data_name == 'ant'\n from toygrid import ToyGridEnv\n SCALE = 2.5\n\n world = ToyGridEnv().world\n\n ag_demo = self.io_manager.ag_demo\n label = self.io_manager.label\n state_demo = [] \n for ag_traj in ag_demo:\n state_traj = np.copy(ag_traj)\n state_traj[:,0] = (state_traj[:,0]/SCALE)+1\n state_traj[:,1] = (state_traj[:,1]/-SCALE)+1\n state_demo.append(state_traj)\n state_data = np.concatenate(state_demo, 0)\n ag_data = np.concatenate(ag_demo, 0)\n\n n = 100\n x = np.linspace(-5, 20, n)\n y = np.linspace(-20, 5, n)\n xv, yv = np.meshgrid(x, y)\n xv_state = (xv/SCALE)\n yv_state = (yv/-SCALE)\n\n xv_reshaped = np.reshape(xv,[-1,1])\n yv_reshaped = np.reshape(yv,[-1,1])\n ag = np.concatenate([xv_reshaped, yv_reshaped], axis = 1)\n \n predicted_c = self.svdd.predict(ag)\n predicted_c = np.reshape(predicted_c, [n,n])\n\n predicted_c_binary = self.svdd.predict_binary(ag)\n predicted_c_binary = np.reshape(predicted_c_binary, [n,n])\n\n fig = plt.figure()\n ax = fig.add_subplot(111) \n plt.pcolormesh(xv, yv, predicted_c)\n fig.savefig(f'{self.fig_dir}/ood.png')\n plt.close(fig)\n\n fig = plt.figure()\n ax = fig.add_subplot(111) \n ax.imshow(world)\n ax.scatter(state_data[:,0], state_data[:,1], s = 5, label ='data')\n ax.contourf(xv_state, yv_state, predicted_c_binary, alpha = 0.5)\n ax.legend()\n\n fig.savefig(f'{self.fig_dir}/ood_binary.png')\n plt.close(fig)\n"
] | [
[
"numpy.expand_dims",
"numpy.tile",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure"
],
[
"numpy.linspace",
"numpy.reshape",
"numpy.concatenate",
"numpy.copy",
"matplotlib.pyplot.close",
"matplotlib.pyplot.pcolormesh",
"numpy.meshgrid",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pinghu6/FANet | [
"8aad47eba0cf50387148e717c3e0b923a50d1264"
] | [
"Testing/models/swiftnet/resnet/resnet_single_scale.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\nfrom itertools import chain\nimport torch.utils.checkpoint as cp\n\nfrom ..util import _Upsample, SpatialPyramidPooling\n\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\ndef _bn_function_factory(conv, norm, relu=None):\n def bn_function(x):\n x = conv(x)\n if norm is not None:\n x = norm(x)\n if relu is not None:\n x = relu(x)\n return x\n\n return bn_function\n\n\ndef do_efficient_fwd(block, x, efficient):\n if efficient and x.requires_grad:\n return cp.checkpoint(block, x)\n else:\n return block(x)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, efficient=True, use_bn=True):\n super(BasicBlock, self).__init__()\n self.use_bn = use_bn\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes) if self.use_bn else None\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes) if self.use_bn else None\n self.downsample = downsample\n self.stride = stride\n self.efficient = efficient\n\n def forward(self, x):\n residual = x\n\n bn_1 = _bn_function_factory(self.conv1, self.bn1, self.relu)\n bn_2 = _bn_function_factory(self.conv2, self.bn2)\n\n out = do_efficient_fwd(bn_1, x, self.efficient)\n out = do_efficient_fwd(bn_2, out, self.efficient)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out = out + residual\n relu = self.relu(out)\n # print(f'Basic Block memory: {torch.cuda.memory_allocated() // 2**20}')\n\n return relu, out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, efficient=True, use_bn=True):\n super(Bottleneck, self).__init__()\n self.use_bn = use_bn\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes) if self.use_bn else None\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes) if self.use_bn else None\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion) if self.use_bn else None\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.efficient = efficient\n\n def forward(self, x):\n residual = x\n\n bn_1 = _bn_function_factory(self.conv1, self.bn1, self.relu)\n bn_2 = _bn_function_factory(self.conv2, self.bn2, self.relu)\n bn_3 = _bn_function_factory(self.conv3, self.bn3, self.relu)\n\n out = do_efficient_fwd(bn_1, x, self.efficient)\n out = do_efficient_fwd(bn_2, out, self.efficient)\n out = do_efficient_fwd(bn_3, out, self.efficient)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out = out + residual\n relu = self.relu(out)\n\n return relu, out\n\n\nclass ResNet(nn.Module):\n def __init__(self, block, layers, *, num_features=128, k_up=3, efficient=True, use_bn=True,\n spp_grids=(8, 4, 2, 1), spp_square_grid=False, **kwargs):\n super(ResNet, self).__init__()\n self.inplanes = 64\n self.efficient = efficient\n self.use_bn = use_bn\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64) if self.use_bn else lambda x: x\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n upsamples = []\n self.layer1 = self._make_layer(block, 64, layers[0])\n upsamples += [_Upsample(num_features, self.inplanes, num_features, use_bn=self.use_bn, k=k_up)]\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n upsamples += [_Upsample(num_features, self.inplanes, num_features, use_bn=self.use_bn, k=k_up)]\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n upsamples += [_Upsample(num_features, self.inplanes, num_features, use_bn=self.use_bn, k=k_up)]\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n\n self.fine_tune = [self.conv1, self.maxpool, self.layer1, self.layer2, self.layer3, self.layer4]\n if self.use_bn:\n self.fine_tune += [self.bn1]\n\n num_levels = 3\n self.spp_size = num_features\n bt_size = self.spp_size\n\n level_size = self.spp_size // num_levels\n\n self.spp = SpatialPyramidPooling(self.inplanes, num_levels, bt_size=bt_size, level_size=level_size,\n out_size=self.spp_size, grids=spp_grids, square_grid=spp_square_grid,\n bn_momentum=0.01 / 2, use_bn=self.use_bn)\n self.upsample = nn.ModuleList(list(reversed(upsamples)))\n\n self.random_init = [self.spp, self.upsample]\n\n self.num_features = num_features\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n layers = [nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False)]\n if self.use_bn:\n layers += [nn.BatchNorm2d(planes * block.expansion)]\n downsample = nn.Sequential(*layers)\n layers = [block(self.inplanes, planes, stride, downsample, efficient=self.efficient, use_bn=self.use_bn)]\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers += [block(self.inplanes, planes, efficient=self.efficient, use_bn=self.use_bn)]\n\n return nn.Sequential(*layers)\n\n def random_init_params(self):\n return chain(*[f.parameters() for f in self.random_init])\n\n def fine_tune_params(self):\n return chain(*[f.parameters() for f in self.fine_tune])\n\n def forward_resblock(self, x, layers):\n skip = None\n for l in layers:\n x = l(x)\n if isinstance(x, tuple):\n x, skip = x\n return x, skip\n\n def forward_down(self, image):\n x = self.conv1(image)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n features = []\n x, skip = self.forward_resblock(x, self.layer1)\n features += [skip]\n x, skip = self.forward_resblock(x, self.layer2)\n features += [skip]\n x, skip = self.forward_resblock(x, self.layer3)\n features += [skip]\n x, skip = self.forward_resblock(x, self.layer4)\n features += [self.spp.forward(skip)]\n return features\n\n def forward_up(self, features):\n features = features[::-1]\n\n x = features[0]\n\n upsamples = []\n for skip, up in zip(features[1:], self.upsample):\n x = up(x, skip)\n upsamples += [x]\n return x, {'features': features, 'upsamples': upsamples}\n\n def forward(self, image):\n return self.forward_up(self.forward_down(image))\n\n\ndef resnet18(pretrained=True, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model\n\n\ndef resnet34(pretrained=True, **kwargs):\n \"\"\"Constructs a ResNet-34 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']), strict=False)\n return model\n\n\ndef resnet50(pretrained=True, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']), strict=False)\n return model\n\n\ndef resnet101(pretrained=True, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']), strict=False)\n return model\n\n\ndef resnet152(pretrained=True, **kwargs):\n \"\"\"Constructs a ResNet-152 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']), strict=False)\n return model\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.utils.checkpoint.checkpoint",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.utils.model_zoo.load_url",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Davidxswang/nni | [
"270a36264515f1576071f07e741fccd5333434ff"
] | [
"nni/algorithms/compression/v2/pytorch/base/pruner.py"
] | [
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport logging\nfrom typing import Dict, List, Optional, Tuple\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import Module\n\nfrom .compressor import Compressor, LayerInfo\n\n_logger = logging.getLogger(__name__)\n\n__all__ = ['Pruner']\n\n\nclass PrunerModuleWrapper(Module):\n def __init__(self, module: Module, module_name: str, config: Dict, pruner: Compressor):\n \"\"\"\n Wrap an module to enable data parallel, forward method customization and buffer registeration.\n\n Parameters\n ----------\n module\n The module user wants to compress.\n config\n The configurations that users specify for compression.\n module_name\n The name of the module to compress, wrapper module shares same name.\n pruner\n The pruner used to calculate mask.\n \"\"\"\n super().__init__()\n # origin layer information\n self.module = module\n self.name = module_name\n # config and pruner\n self.config = config\n self.pruner = pruner\n\n # register buffer for mask\n self.register_buffer(\"weight_mask\", torch.ones(self.module.weight.shape))\n if hasattr(self.module, 'bias') and self.module.bias is not None:\n self.register_buffer(\"bias_mask\", torch.ones(self.module.bias.shape))\n else:\n self.register_buffer(\"bias_mask\", None)\n\n def forward(self, *inputs):\n # apply mask to weight, bias\n self.module.weight.data = self.module.weight.data.mul_(self.weight_mask)\n if hasattr(self.module, 'bias') and self.module.bias is not None:\n self.module.bias.data = self.module.bias.data.mul_(self.bias_mask)\n return self.module(*inputs)\n\n\nclass Pruner(Compressor):\n \"\"\"\n The abstract class for pruning algorithm. Inherit this class and implement the `_reset_tools` to customize a pruner.\n \"\"\"\n\n def reset(self, model: Optional[Module] = None, config_list: Optional[List[Dict]] = None):\n super().reset(model=model, config_list=config_list)\n\n def _wrap_modules(self, layer: LayerInfo, config: Dict):\n \"\"\"\n Create a wrapper module to replace the original one.\n\n Parameters\n ----------\n layer\n The layer to instrument the mask.\n config\n The configuration for generating the mask.\n \"\"\"\n _logger.debug(\"Module detected to compress : %s.\", layer.name)\n wrapper = PrunerModuleWrapper(layer.module, layer.name, config, self)\n assert hasattr(layer.module, 'weight'), \"module %s does not have 'weight' attribute\" % layer.name\n # move newly registered buffers to the same device of weight\n wrapper.to(layer.module.weight.device)\n return wrapper\n\n def load_masks(self, masks: Dict[str, Dict[str, Tensor]]):\n \"\"\"\n Load an exist masks on the wrapper. You can train the model with an exist masks after load the masks.\n\n Parameters\n ----------\n masks\n The masks dict with format {'op_name': {'weight_mask': mask, 'bias_mask': mask}}.\n \"\"\"\n wrappers = self.get_modules_wrapper()\n for name, layer_mask in masks.items():\n assert name in wrappers, '{} is not in wrappers of this pruner, can not apply the mask.'.format(name)\n for mask_type, mask in layer_mask.items():\n assert hasattr(wrappers[name], mask_type), 'there is no attribute {} in wrapper'.format(mask_type)\n setattr(wrappers[name], mask_type, mask)\n\n def compress(self) -> Tuple[Module, Dict[str, Dict[str, Tensor]]]:\n \"\"\"\n Returns\n -------\n Tuple[Module, Dict]\n Return the wrapped model and mask.\n \"\"\"\n return self.bound_model, {}\n\n # NOTE: need refactor dim with supporting list\n def show_pruned_weights(self, dim: int = 0):\n \"\"\"\n Log the simulated prune sparsity.\n\n Parameters\n ----------\n dim\n The pruned dim.\n \"\"\"\n for _, wrapper in self.get_modules_wrapper().items():\n weight_mask = wrapper.weight_mask\n mask_size = weight_mask.size()\n if len(mask_size) == 1:\n index = torch.nonzero(weight_mask.abs() != 0, as_tuple=False).tolist()\n else:\n sum_idx = list(range(len(mask_size)))\n sum_idx.remove(dim)\n index = torch.nonzero(weight_mask.abs().sum(sum_idx) != 0, as_tuple=False).tolist()\n _logger.info(f'simulated prune {wrapper.name} remain/total: {len(index)}/{weight_mask.size(dim)}')\n\n def export_model(self, model_path, mask_path=None, onnx_path=None, input_shape=None, device=None):\n \"\"\"\n Export pruned model weights, masks and onnx model(optional)\n\n Parameters\n ----------\n model_path\n Path to save pruned model state_dict.\n mask_path\n (optional) path to save mask dict.\n onnx_path\n (optional) path to save onnx model.\n input_shape\n Input shape to onnx model.\n device\n Device of the model, used to place the dummy input tensor for exporting onnx file.\n The tensor is placed on cpu if ```device``` is None.\n \"\"\"\n assert model_path is not None, 'model_path must be specified'\n mask_dict = {}\n self._unwrap_model() # used for generating correct state_dict name without wrapper state\n\n for name, wrapper in self.get_modules_wrapper().items():\n weight_mask = wrapper.weight_mask\n bias_mask = wrapper.bias_mask\n if weight_mask is not None:\n mask_sum = weight_mask.sum().item()\n mask_num = weight_mask.numel()\n _logger.debug('Layer: %s Sparsity: %.4f', name, 1 - mask_sum / mask_num)\n wrapper.module.weight.data = wrapper.module.weight.data.mul(weight_mask)\n if bias_mask is not None:\n wrapper.module.bias.data = wrapper.module.bias.data.mul(bias_mask)\n # save mask to dict\n mask_dict[name] = {\"weight_mask\": weight_mask, \"bias_mask\": bias_mask}\n\n torch.save(self.bound_model.state_dict(), model_path)\n _logger.info('Model state_dict saved to %s', model_path)\n if mask_path is not None:\n torch.save(mask_dict, mask_path)\n _logger.info('Mask dict saved to %s', mask_path)\n if onnx_path is not None:\n assert input_shape is not None, 'input_shape must be specified to export onnx model'\n # input info needed\n if device is None:\n device = torch.device('cpu')\n input_data = torch.Tensor(*input_shape)\n torch.onnx.export(self.bound_model, input_data.to(device), onnx_path)\n _logger.info('Model in onnx with input shape %s saved to %s', input_data.shape, onnx_path)\n\n self._wrap_model()\n"
] | [
[
"torch.device",
"torch.ones",
"torch.Tensor",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FischbachLab/invaderCheck | [
"d4d146cc5db552e95a6cf241b382ed1c44790fe6",
"d4d146cc5db552e95a6cf241b382ed1c44790fe6"
] | [
"InvaderCheckLite.py",
"collapse_to_midas.py"
] | [
"#!/usr/bin/env python3\n\n##########################################################################################\n#### THIS SCRIPT WAS USED TO GENERATE DEPTH vs COVERAGE FIGURES in the IN VIVO PAPER ####\n##########################################################################################\n\n# Accept S3 dir for reference fasta\n# Accept Parent S3 dir for BAMs\n# Accept uniquely identifiable sample group\n# Accept s3 output location.\n# For each genome:\n# For each sample in this group:\n# get coverage distribution\n# write it to an appropriate s3 location\n\nimport concurrent.futures\nimport itertools\nimport logging\nimport boto3\nimport json\n\n# from botocore.session import SubsetChainConfigFactory\nimport pandas as pd\nimport os\n\nimport sys\n\n# from pandas.io.parsers import read_csv\nfrom tqdm import tqdm\nimport botocore.exceptions\nfrom invaderCheck import genome_coverage_distribution_with_subsampling\n\n# from invaderCheck import compute_strain_difference\n# from invaderCheck import compare_distributions_wViz\n\n\ndef get_file_names(bucket_name, prefix, suffix=\"txt\"):\n \"\"\"\n Return a list for the file names in an S3 bucket folder.\n\n :param bucket: Name of the S3 bucket.\n :param prefix: Only fetch keys that start with this prefix (folder name).\n :param suffix: Only fetch keys that end with this suffix (extension).\n \"\"\"\n s3_client = boto3.client(\"s3\")\n try:\n response = s3_client.list_objects_v2(Bucket=bucket_name, Prefix=prefix)\n except botocore.exceptions.ClientError:\n logging.error(f\"Bucket={bucket_name}, Prefix={prefix}\")\n # raise ce\n return None\n\n try:\n objs = response[\"Contents\"]\n except KeyError as ke:\n logging.error(\n f\"Path with bucket '{bucket_name}' and prefix '{prefix}' does not exist!\"\n )\n raise ke\n\n while response[\"IsTruncated\"]:\n response = s3_client.list_objects_v2(\n Bucket=bucket_name,\n Prefix=prefix,\n ContinuationToken=response[\"NextContinuationToken\"],\n )\n objs.extend(response[\"Contents\"])\n\n logging.info(f\"Sifting through {len(objs)} files ...\")\n\n shortlisted_files = list()\n if suffix == \"\":\n shortlisted_files = [obj[\"Key\"] for obj in objs]\n total_size_bytes = sum([obj[\"Size\"] for obj in objs])\n else:\n shortlisted_files = [obj[\"Key\"] for obj in objs if obj[\"Key\"].endswith(suffix)]\n total_size_bytes = sum(\n [obj[\"Size\"] for obj in objs if obj[\"Key\"].endswith(suffix)]\n )\n\n logging.info(\n f\"Found {len(shortlisted_files)} files, totalling about {total_size_bytes/1e9:,.3f} Gb.\"\n )\n\n # return shortlisted_files\n return [f\"s3://{bucket_name}/{file_path}\" for file_path in shortlisted_files]\n\n\ndef get_bam_file(s3uri, sample_name, subfolder=\"bowtie2\", suffix=\"bam\"):\n s3uri = s3uri.rstrip(\"/\")\n sample_name = sample_name.rstrip(\"/\")\n\n bucket, file_prefix = declutter_s3paths(f\"{s3uri}/{sample_name}/{subfolder}\")\n return get_file_names(bucket, file_prefix, suffix=\"bam\")[0]\n\n\ndef setup_experiment(\n metadata,\n keep_locations=[\"ex_vivo\", \"gut\"],\n challenged_on=\"Week4\",\n):\n # metadata = sample_metadata\n all_bam_paths_list = list()\n # setup_experiment(sample_metadata, s3_bam_dir, challenged_on=\"Week4\")\n df = metadata.query(\"Location in @keep_locations\").dropna(subset=[\"bam_location\"])\n df[\"bam_file\"] = df.apply(\n lambda x: get_bam_file(x.bam_location, x.sample_id), axis=1\n )\n all_bam_paths_list = sorted(df[\"bam_file\"].unique())\n\n comparison_df, challenge_weeks, inoculum_weeks = create_comparison_df(\n df, challenged_on\n )\n\n comparisons = list()\n for week in challenge_weeks:\n if week in inoculum_weeks:\n continue\n # pylint: disable=unused-variable\n comparison_target_weeks = {week} | inoculum_weeks\n comparisons.append(\n comparison_df.query(\"Week_query in @comparison_target_weeks\")\n .sort_values([\"Week_query\", \"MouseOrder\"], ascending=[False, True])\n .reset_index(drop=True)[[\"base_sample_id\", \"query_sample_id\", \"Week_query\"]]\n )\n\n return all_bam_paths_list, comparisons, challenge_weeks\n\n\ndef declutter_s3paths(s3uri):\n s3path_as_list = s3uri.replace(\"s3://\", \"\").rstrip(\"/\").split(\"/\")\n bucket = s3path_as_list.pop(0)\n prefix = \"/\".join(s3path_as_list)\n\n return bucket, prefix\n\n\ndef download_from_s3(s3_uri, local_dir):\n s3 = boto3.client(\"s3\")\n bucket, file_obj = declutter_s3paths(s3_uri)\n local_file = f\"{local_dir}/{os.path.basename(file_obj)}\"\n if not os.path.exists(local_file):\n with open(local_file, \"wb\") as f:\n s3.download_fileobj(bucket, file_obj, f)\n\n return local_file\n\n\ndef upload_to_s3(s3_uri_dir, local_obj):\n s3 = boto3.client(\"s3\")\n bucket, obj_dir = declutter_s3paths(s3_uri_dir)\n file_name = os.path.basename(local_obj)\n # with open(local_obj, \"rb\") as f:\n # s3.upload_fileobj(f, bucket, f\"{obj_dir}/{file_name}\")\n s3.meta.client.upload_file(local_obj, bucket, f\"{obj_dir}/{file_name}\")\n\n return\n\n\ndef depth_vector_exists(genome, bam_file, min_qual, min_pid, min_paln):\n genome_name = os.path.splitext(os.path.basename(genome))[0]\n output_dir = f\"{genome_name}_q{min_qual}_id{min_pid}_aln{min_paln}_vectors\"\n file_name = os.path.basename(bam_file).split(\"_vs_\")[0].split(\".\")[0]\n exp_vector_path = f\"{output_dir}/{file_name}.q{min_qual}_id{min_pid}_aln{min_paln}.ref_depth.csv.gz\"\n\n return os.path.exists(exp_vector_path)\n\n\ndef get_coverage_distribution(\n bam_s3_uri,\n fasta_list,\n local_tmp_dir,\n min_qual=20,\n min_pid=99,\n min_paln=100,\n subset_list=None,\n):\n if not bam_s3_uri.endswith(\"bam\"):\n return\n\n bam_file = f\"{local_tmp_dir}/{os.path.basename(bam_s3_uri)}\"\n bai_file = f\"{bam_file}.bai\"\n logging.info(f\"Calculating coverage distribution {bam_file} ...\")\n\n unprocessed_vectors = [\n depth_vector_exists(genome, bam_file, min_qual, min_pid, min_paln)\n for genome in fasta_list\n ]\n\n # download bam file and index, if needed.\n if not all(unprocessed_vectors):\n logging.info(f\"Downloading {bam_file} ...\")\n bam_file = download_from_s3(bam_s3_uri, local_tmp_dir)\n bai_file = download_from_s3(f\"{bam_s3_uri}.bai\", local_tmp_dir)\n\n # Get genome coverage for each genome in the bam file\n depth_files_list = [\n genome_coverage_distribution_with_subsampling.get_coverage_distribution(\n bam_file,\n fasta_file,\n min_qual=min_qual,\n min_pid=min_pid,\n min_paln=min_paln,\n subset_list=subset_list,\n )\n for fasta_file in fasta_list\n ]\n\n # delete bam file and index\n logging.info(f\"Done processing {bam_file}.\")\n if os.path.exists(bam_file):\n logging.info(\"Removing BAM files to save space.\")\n os.remove(bam_file)\n os.remove(bai_file)\n\n return pd.DataFrame(depth_files_list)\n\n\ndef compute_depth_profiles(\n sample_metadata,\n s3_fasta_dir,\n vector_paths_file,\n min_qual,\n min_pid,\n min_paln,\n local_tmp_dir=\"TEMP\",\n max_cores=1,\n):\n # From the metadata file, get\n # 1. a list of bam files\n # 2. a list of df for each base week vs query week samples\n all_bam_paths_list, comparisons, challenge_weeks = setup_experiment(sample_metadata)\n\n # Download all genomes\n os.makedirs(local_tmp_dir, exist_ok=True)\n s3_fasta_bucket, s3_fasta_prefix = declutter_s3paths(s3_fasta_dir)\n s3_fasta_suffix = \"fna\"\n all_fasta_paths_list = get_file_names(\n s3_fasta_bucket, s3_fasta_prefix, s3_fasta_suffix\n )\n logging.info(f\"Downloading {len(all_fasta_paths_list)} Genomes\")\n local_fasta_files = [\n download_from_s3(fasta_s3_uri, local_tmp_dir)\n for fasta_s3_uri in tqdm(all_fasta_paths_list, ascii=True, desc=\"Genomes\")\n ]\n genome_names = [\n os.path.splitext(os.path.basename(genome))[0] for genome in local_fasta_files\n ]\n\n _ = [\n os.makedirs(\n f\"{genome_name}_q{min_qual}_id{min_pid}_aln{min_paln}_vectors\",\n exist_ok=True,\n )\n for genome_name in genome_names\n ]\n\n vector_paths = list()\n with concurrent.futures.ProcessPoolExecutor(max_workers=max_cores) as executor:\n future = [\n executor.submit(\n get_coverage_distribution,\n bam_file,\n local_fasta_files,\n local_tmp_dir,\n min_qual=min_qual,\n min_pid=min_pid,\n min_paln=min_paln,\n subset_list=None,\n )\n for bam_file in all_bam_paths_list\n ]\n for f in tqdm(\n concurrent.futures.as_completed(future),\n ascii=True,\n desc=\"Genome Coverage Distribution\",\n ):\n vector_paths.append(f.result())\n\n vector_paths_df = pd.concat(vector_paths)\n # logging.info(vector_paths_df.shape)\n # logging.info(f\"\\n{vector_paths_df.head()}\")\n vector_paths_df.to_csv(vector_paths_file, index=False)\n\n ## Upload vectors to S3\n # with concurrent.futures.ProcessPoolExecutor(max_workers=max_cores) as executor:\n # future = [\n # executor.submit(\n # upload_to_s3,\n # f\"{s3_vector_output_dir}/{row.Genome_Name}\",\n # row.Depth_Vector,\n # )\n # for row in vector_paths_df.itertuples()\n # ]\n # for f in tqdm(concurrent.futures.as_completed(future), ascii=True, desc=\"Uploading depth profiles to S3\"):\n # _ = f.result()\n\n return genome_names, comparisons, challenge_weeks, vector_paths_df\n\n\ndef create_comparison_df(df, challenged_on):\n inoculum_query_samples = (\n df.query(\"Location == 'ex_vivo'\")\n .reset_index(drop=True)\n .rename(columns={\"sample_id\": \"query_sample_id\"})\n )[[\"query_sample_id\", \"Week\", \"MouseNum\", \"MouseOrder\"]]\n\n inoculum_weeks = set(inoculum_query_samples[\"Week\"])\n\n base_samples = (\n df.query(\"Week == @challenged_on\")\n .reset_index(drop=True)\n .rename(columns={\"sample_id\": \"base_sample_id\"})\n )[[\"base_sample_id\", \"Week\", \"MouseNum\", \"MouseOrder\"]]\n\n # I'll only be able to use the mice who have a base to compare\n # pylint: disable=unused-variable\n selected_mice = sorted(base_samples[\"MouseNum\"].unique())\n\n challenge_samples = (\n df.dropna(subset=[\"Challenge\"])\n .query(\"MouseNum in @selected_mice\")\n .reset_index(drop=True)\n .rename(columns={\"sample_id\": \"query_sample_id\"})\n )[[\"query_sample_id\", \"Week\", \"MouseNum\", \"MouseOrder\"]]\n\n challenge_weeks = set(challenge_samples[\"Week\"])\n\n all_query_samples = pd.concat([challenge_samples, inoculum_query_samples])\n compare_df = all_query_samples.merge(\n right=base_samples, on=[\"MouseNum\", \"MouseOrder\"], suffixes=(\"_query\", \"_base\")\n )\n\n return compare_df, challenge_weeks, inoculum_weeks\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s\\t[%(levelname)s]:\\t%(message)s\",\n )\n cores = os.cpu_count()\n max_cores = int(cores * 0.90) # use at most 90% of all CPU\n\n args_json = sys.argv[1]\n with open(args_json, \"r\") as j:\n args = json.load(j)\n\n sample_metadata = f\"{args['s3_base_output']}/{args['sample_metadata_suffix']}\"\n s3_vector_output_dir = (\n f\"{args['s3_base_output']}/{args['s3_vector_output_dir_suffix']}\"\n )\n s3_diff_output_dir = f\"{args['s3_base_output']}/{args['s3_diff_output_dir_suffix']}\"\n s3_compare_output_dir = (\n f\"{args['s3_base_output']}/{args['s3_compare_output_dir_suffix']}\"\n )\n\n # Final Output\n weekly_agg_pred_file = args[\"weekly_agg_pred_file\"]\n filter_week = args[\"filter_week\"]\n\n metadata_df = pd.DataFrame()\n if filter_week is not None:\n metadata_df = (\n pd.read_csv(sample_metadata, header=0)\n .query(\"Week in @filter_week\")\n .dropna(subset=[\"bam_location\"])\n )\n else:\n metadata_df = pd.read_csv(sample_metadata, header=0).dropna(\n subset=[\"bam_location\"]\n )\n\n # For each genome in each sample:\n # Compute per nucleotide coverage and probability distributions\n (\n genome_names,\n comparisons,\n challenge_weeks,\n vector_paths_df,\n ) = compute_depth_profiles(\n metadata_df,\n args[\"s3_fasta_dir\"],\n args[\"vector_paths_file\"],\n args[\"min_qual\"],\n args[\"min_pid\"],\n args[\"min_paln\"],\n args[\"local_tmp_dir\"],\n max_cores,\n )\n\n",
"#!/usr/bin/env python3\n\nimport sys\nimport numpy as np\nimport pandas as pd\nimport logging\nimport math\n\n\ndef log_ratio(a, b):\n if np.isnan(a) or np.isnan(b):\n return np.nan\n\n return math.log10(a / b)\n\n\ndef read_midas_output(\n file_path,\n selected_weeks,\n selected_samples,\n min_abundance=None,\n min_reads=None,\n infer_sample_name=True,\n):\n # file_path = midas_output\n # selected_weeks = sample_weeks\n logging.info(\"Reading Midas output ...\")\n df = pd.read_csv(\n file_path,\n header=0,\n usecols=[\n \"name\",\n \"sample_id\",\n \"count_reads\",\n \"relative_abundance\",\n \"Week\",\n \"Mouse\",\n \"Challenge\",\n ],\n ).dropna(subset=[\"Mouse\"])\n\n if min_abundance is not None:\n df = (\n df.query(\n f\"(Week in @selected_weeks) and (relative_abundance >= {min_abundance})\"\n )\n .reset_index(drop=True)\n .copy()\n )\n\n if min_reads is not None:\n df = (\n df.query(f\"(Week in @selected_weeks) and (count_reads >= {min_reads})\")\n .reset_index(drop=True)\n .copy()\n )\n\n if infer_sample_name:\n df[\"sample_name\"] = df.apply(lambda x: f\"{x.Week}{x.Mouse}\", axis=1)\n else:\n df[\"sample_name\"] = df[\"sample_id\"]\n\n df = df.query(\"sample_name in @selected_samples\").copy()\n\n df[\"unique_name\"] = df.apply(lambda x: f\"{x.sample_name}---{x['name']}\", axis=1)\n # By default Midas relative abundances are between 0-1, make them between 0-100\n # to match NM\n df[\"midas_relative_abundance\"] = df[\"relative_abundance\"] * 100\n df = df[\n [\n \"unique_name\",\n \"name\",\n \"sample_name\",\n \"count_reads\",\n \"midas_relative_abundance\",\n ]\n ]\n df.columns = [\n \"unique_name\",\n \"midas_bucket\",\n \"sample_name\",\n \"count_reads\",\n \"midas_relative_abundance\",\n ]\n logging.info(\n f\"{df['midas_bucket'].nunique()} buckets across {df['sample_name'].nunique()} samples found.\"\n )\n logging.info(f\"{df['unique_name'].nunique()} bucket-sample pairs found.\")\n return df\n\n\ndef read_strain_midas_contributions(\n contributions_file, min_contrib=0, trigger_contrib_check=20\n):\n # contributions_file = strain_midas_contribution_file\n logging.info(\"Reading Strain --> Midas contributions output ...\")\n select_cols = {\n \"sample_id\": \"Organism\",\n \"name\": \"midas_bucket\",\n \"abundance\": \"strain_contribution\",\n }\n # new_col_names = [\"Organism\", \"midas_bucket\", \"strain_contribution\"]\n init_df = pd.read_csv(\n contributions_file, header=0, usecols=select_cols.keys()\n ).rename(columns=select_cols)\n\n upper = 100 - trigger_contrib_check\n lower = trigger_contrib_check\n offmapping_df = init_df.query(\"@lower <= strain_contribution <= @upper\")\n num_offmapped_strains, _ = offmapping_df.shape\n if num_offmapped_strains > 0:\n logging.warning(\n f\"\\nFound {num_offmapped_strains} instances of significant (> {trigger_contrib_check}%) strain offmapping to midas buckets in contributions data.\"\n f\"This is usually caused due to inaccurate/erroneous genome assemblies. Please make sure that this is not the case, before relying on the output of this analysis.\"\n )\n logging.warning(f\"\\n{offmapping_df}\")\n\n df = init_df.query(f\"strain_contribution >= {min_contrib}\")\n logging.info(\n f\"{df['Organism'].nunique()} actual strains map to {df['midas_bucket'].nunique()} Midas buckets\"\n )\n # acceptable_buckets = set(df[\"midas_bucket\"])\n primary_buckets, secondary_buckets = find_primary_bucket(df)\n shared_buckets = find_buckets_linked_by_strains(init_df, primary_buckets)\n\n return (df, shared_buckets)\n\n\ndef find_primary_bucket(df):\n # Primary buckets:\n # - group by strain,\n # - descending order,\n # - pick top bucket\n # Secondary buckets: Midas buckets to which more than one of our strains map,\n # but they are not the primary catchment for any of our strains\n df = pd.read_csv(strain_midas_contribution_file)\n\n # Group by strains (sample_id) and keep the top contributor bucket (name) by abundance\n primary_df = (\n df.groupby([\"sample_id\"])\n .apply(lambda x: x.sort_values([\"abundance\"], ascending=False))\n .reset_index(drop=True)\n .groupby([\"sample_id\"])\n .head(1)\n )\n\n primary_buckets = set(primary_df[\"name\"])\n secondary_df = (\n df.query(\"name not in @primary_buckets\")\n .sort_values([\"name\"])\n .reset_index(drop=True)\n )\n secondary_buckets = sorted(\n secondary_df[\"name\"].unique()\n ) # as list for alphabetical printing\n logging.info(\n f\"Found {len(secondary_buckets)} midas buckets that are not primary catchments for any strain:\"\n )\n _ = [logging.info(f\"\\t{b}\") for b in secondary_buckets]\n\n return primary_buckets, set(secondary_buckets)\n # return primary_buckets\n\n\ndef find_buckets_linked_by_strains(df, acceptable_buckets):\n linked_buckets = dict()\n for row in df.itertuples(index=False):\n if row.Organism in linked_buckets:\n linked_buckets[row.Organism].add(row.midas_bucket)\n else:\n linked_buckets[row.Organism] = {row.midas_bucket}\n\n shared_buckets = dict()\n for group in linked_buckets.values():\n if len(group) == 1:\n continue\n for bucket in group:\n if bucket in acceptable_buckets:\n continue\n if bucket in shared_buckets:\n shared_buckets[bucket] |= group\n else:\n shared_buckets[bucket] = set(group)\n\n num_shared_strains = len(shared_buckets.keys())\n logging.info(\n f\"{num_shared_strains} instances found where a single strain contributed to multiple midas buckets.\"\n )\n return shared_buckets\n\n\ndef read_nm_output(file_path, read_stats, selected_samples, min_abundance=1e-6):\n logging.info(\"Reading NinjaMap output ...\")\n read_stats_df = pd.read_csv(read_stats, header=0)\n\n read_stats_df[\"adjustment\"] = (\n read_stats_df[\"Fragments_Aligned\"] / read_stats_df[\"Fragments_After_Trim\"]\n )\n keep_cols = [\"sample_id\", \"adjustment\"]\n read_stats_df = read_stats_df[keep_cols]\n\n df = (\n pd.read_csv(file_path, header=0)\n .query(\"(sample_id in @selected_samples)\")\n .reset_index(drop=True)\n .merge(read_stats_df, how=\"left\", on=\"sample_id\")\n .assign(Norm_Read_Fraction=lambda x: x.Read_Fraction * x.adjustment)\n .query(f\"Norm_Read_Fraction >= {min_abundance}\")\n )[\n [\n \"sample_id\",\n \"Strain_Name\",\n \"Norm_Read_Fraction\",\n \"Percent_Coverage\",\n \"Coverage_Depth\",\n ]\n ]\n\n # Update column names to match outputs from other tools.\n df.columns = [\n \"sample_name\",\n \"Organism\",\n \"NM_Norm_Read_Fraction\",\n \"Percent_Coverage\",\n \"Coverage_Depth\",\n ]\n logging.info(\n f\"{df['Organism'].nunique()} strains across {len(selected_samples)} samples found.\"\n )\n return df\n\n\ndef nm2midas(nm_df, strain_midas_df):\n logging.info(\n \"Transforming strain level abundances into midas bucket level abundances ...\"\n )\n # join_type = \"outer\" # default\n join_type = \"left\" # testing\n nm2midas = nm_df.merge(right=strain_midas_df, how=join_type, on=\"Organism\").dropna(\n subset=[\"sample_name\"]\n )\n\n bucket_strain_weights = (\n strain_midas_df[[\"midas_bucket\", \"strain_contribution\"]]\n .groupby([\"midas_bucket\"])\n .agg({\"strain_contribution\": np.nansum,})\n .reset_index()\n .rename(columns={\"strain_contribution\": \"total_strain_contribution\"})\n .dropna(subset=[\"total_strain_contribution\"])\n )\n\n strain_weighted_abundance = nm2midas.merge(\n right=bucket_strain_weights, how=\"left\", on=[\"midas_bucket\"]\n ).assign(\n proportional_contribution=lambda x: x.strain_contribution\n / x.total_strain_contribution,\n proportional_rel_abund=lambda x: x.NM_Norm_Read_Fraction\n * x.proportional_contribution,\n )[\n [\"sample_name\", \"midas_bucket\", \"Organism\", \"proportional_rel_abund\"]\n ]\n\n nm2midas_imputed = (\n strain_weighted_abundance.groupby([\"sample_name\", \"midas_bucket\"])\n .agg({\"proportional_rel_abund\": np.nansum})\n .reset_index()\n .rename(columns={\"proportional_rel_abund\": \"imputed_NM_rel_abund\"})\n )\n\n nm2midas_imputed[\"unique_name\"] = nm2midas_imputed.apply(\n lambda x: f\"{x.sample_name}---{x.midas_bucket}\", axis=1\n )\n\n nm2midas_imputed = nm2midas_imputed[[\"unique_name\", \"imputed_NM_rel_abund\"]]\n # nm2midas_imputed.sort_values(\n # \"imputed_NM_rel_abund\", ascending=False\n # ).reset_index(drop=True)\n logging.info(\n f\"Imputed NinjaMap strains --> Midas buckets: {nm2midas_imputed['unique_name'].nunique()} bucket-sample pairs\"\n )\n return nm2midas_imputed, strain_weighted_abundance\n\n\ndef fix_extreme_pos_invaders(\n current_pred, extreme_pos, min_extreme_pos=2, alt_prediction=\"Unclear\"\n):\n if current_pred.lower() == \"invader\":\n if extreme_pos < min_extreme_pos:\n return alt_prediction\n\n return current_pred\n\n\ndef read_ic_output(file_path, sample_names, min_extreme_pos=2):\n logging.info(\"Reading invaderCheck output ...\")\n df = pd.read_csv(file_path, header=0).dropna(subset=[\"Prediction\"])\n df[\"sample_name\"] = df[\"Sample\"].apply(lambda x: x.split(\"_\")[2])\n df_selected = df.query(\"sample_name in @sample_names\")\n\n # TODO @sunit: This is a temporary fix; the data needs to be run through the comparison\n # step again for a more final version\n # df_selected[\"Prediction\"] = df_selected.apply(\n # lambda row: fix_extreme_pos_invaders(row.Prediction, row.Extreme_Positions),\n # axis=1,\n # )\n\n logging.info(\n f\"Category counts (strains):\\n{df_selected['Prediction'].value_counts()}\"\n )\n logging.info(\n f\"{df_selected['Organism'].nunique()} strains across {df_selected['sample_name'].nunique()} samples found.\"\n )\n return df_selected\n\n\ndef ic2midas(ic_df, strain_midas_df):\n logging.info(\n \"Aggregating strain level invader predictions into midas bucket level predictions ...\"\n )\n # join_type = \"outer\" # default\n join_type = \"left\" # testing\n ic_pred = (\n ic_df.merge(right=strain_midas_df, how=join_type, on=\"Organism\",)\n .reset_index(drop=True)[[\"sample_name\", \"midas_bucket\", \"Prediction\"]]\n .drop_duplicates()\n .groupby([\"sample_name\", \"midas_bucket\"])\n .agg(unique_IC_pred=(\"Prediction\", \"unique\"))\n .reset_index()\n )\n\n # ic_pred.sort_values(\"IC_Final_Prediction\")\n ic_pred[\"unique_name\"] = ic_pred.apply(\n lambda x: f\"{x.sample_name}---{x.midas_bucket}\", axis=1\n )\n ic_pred[\"unique_IC_pred\"] = ic_pred[\"unique_IC_pred\"].apply(lambda x: frozenset(x))\n ic_pred[\"unique_IC_pred_len\"] = ic_pred[\"unique_IC_pred\"].apply(lambda x: len(x))\n # ic_pred.sort_values(\"unique_IC_pred_len\", ascending=False)\n ic_pred.drop([\"sample_name\", \"midas_bucket\"], axis=1, inplace=True)\n logging.info(\n f\"Category counts (buckets):\\n{ic_pred['unique_IC_pred'].value_counts()}\"\n )\n logging.info(\n f\"Imputed IC strains --> Midas buckets: {ic_pred['unique_name'].nunique()} bucket-sample pairs\"\n )\n return ic_pred[[\"unique_name\", \"unique_IC_pred\", \"unique_IC_pred_len\"]]\n\n\ndef aggregate_across_tools(midas_df, nm_df, ic_df):\n logging.info(\"Aggregating data from all tools in midas buckets terms ...\")\n df = midas_df.merge(\n right=nm_df,\n how=\"outer\",\n on=[\"unique_name\"],\n suffixes=[\"_midas\", \"_NM\"],\n validate=\"one_to_one\",\n ).merge(right=ic_df, how=\"outer\", on=[\"unique_name\"], suffixes=[\"_midas\", \"_IC\"],)\n\n df[\"midas_NM_ratio_log10\"] = df.apply(\n lambda x: log_ratio(x.midas_relative_abundance, x.imputed_NM_rel_abund), axis=1\n )\n\n df = df[\n [\n \"unique_name\",\n \"sample_name\",\n \"midas_bucket\",\n \"count_reads\",\n \"midas_relative_abundance\",\n \"imputed_NM_rel_abund\",\n \"midas_NM_ratio_log10\",\n \"unique_IC_pred\",\n \"unique_IC_pred_len\",\n ]\n ]\n # remove any row that does not have midas relative abundance, since it cannot be used.\n before_midas_trimming = df[\"unique_name\"].nunique()\n logging.info(\n f\"Aggregated data across all tools contains {df['unique_name'].nunique()} bucket-sample pairs.\"\n )\n df.dropna(subset=[\"midas_relative_abundance\"], inplace=True)\n after_midas_trimming = df[\"unique_name\"].nunique()\n logging.info(\n f\"Removed {before_midas_trimming - after_midas_trimming} midas-sample combinations that did not have a midas relative abundance\"\n )\n logging.info(\n f\"Aggregated data across all tools now contains {df['unique_name'].nunique()} bucket-sample pairs.\"\n )\n\n lower, upper = get_thresholds(df)\n df[\"unique_IC_pred\"] = df.apply(lambda row: other_ic_pred_categories(row), axis=1)\n df[\"tool_agreement\"] = df.apply(\n lambda row: check_tool_agreement(row, lower, upper), axis=1\n )\n logging.info(\n \"Removing cases where a Midas bucket was not found. Since we can't use that data.\"\n )\n df.dropna(subset=[\"tool_agreement\"], inplace=True)\n\n df[\"midas_is_greater\"] = df[\"midas_NM_ratio_log10\"].apply(lambda x: x > upper)\n logging.info(f\"Category counts (buckets):\\n{df['unique_IC_pred'].value_counts()}\")\n logging.info(\n f\"Aggregated data finally contains {df['unique_name'].nunique()} bucket-sample pairs from {df['midas_bucket'].nunique()} buckets and {df['sample_name'].nunique()} samples.\"\n )\n\n return df\n\n\ndef check_tool_agreement(row, lower, upper):\n value = row.midas_NM_ratio_log10\n nm_value = row.imputed_NM_rel_abund\n midas_value = row.midas_relative_abundance\n\n if np.isnan(nm_value) and np.isnan(midas_value):\n # If neither tool detected a bucket/strain (but IC did)\n return None\n elif (not np.isnan(nm_value)) and np.isnan(midas_value):\n # If Midas did not detect a bucket, but NM did\n return None\n elif np.isnan(nm_value) and (not np.isnan(midas_value)):\n # If NM did not detect a bucket, but Midas did\n return \"In_Midas_Only\"\n elif lower < value < upper:\n # If Midas and NM found a bucket and both reported relative abundances within tolerable range (2 * SD)\n return \"Agree\"\n elif (value <= lower) or (upper <= value):\n # If Midas and NM found a bucket and both reported relative abundances outside of tolerable range (2 * SD)\n return \"Disagree\"\n else:\n # Any other case that hasn't been accounted for.\n return \"Error\"\n\n\ndef other_ic_pred_categories(row):\n if type(row.unique_IC_pred) == frozenset:\n return row.unique_IC_pred\n\n if (\n (not np.isnan(row.midas_relative_abundance))\n and (np.isnan(row.imputed_NM_rel_abund))\n and (np.isnan(row.unique_IC_pred))\n ):\n return frozenset({\"In_Midas_Only\"})\n elif (\n (np.isnan(row.midas_relative_abundance))\n and (not np.isnan(row.imputed_NM_rel_abund))\n and (np.isnan(row.unique_IC_pred))\n ):\n return frozenset({\"In_NM_Only\"})\n elif np.isnan(row.unique_IC_pred):\n return frozenset({\"No_IC_Pred\"})\n else:\n return frozenset({\"ERROR\"})\n\n\ndef get_NM_Midas_agree_df(df, lower, upper):\n return df.query(f\"{lower} < midas_NM_ratio_log10 < {upper}\").copy()\n\n\ndef get_NM_Midas_disagree_df(df, lower, upper):\n return df.query(\n f\"(midas_NM_ratio_log10 <= {lower}) or ({upper} <= midas_NM_ratio_log10)\"\n ).copy()\n\n\ndef get_thresholds(df):\n assert (\n \"midas_NM_ratio_log10\" in df.columns\n ), \"This is not the dataframe you're looking for ...\"\n dist_mean = df[\"midas_NM_ratio_log10\"].mean(skipna=True)\n dist_sd = df[\"midas_NM_ratio_log10\"].std(skipna=True)\n kc_threshold = dist_sd * 2\n kc_lower = dist_mean - kc_threshold\n kc_upper = dist_mean + kc_threshold\n logging.info(\"Midas/NM Log10 ratio:\")\n logging.info(f\"\\tMean:{dist_mean}\")\n logging.info(f\"\\tSD:{dist_sd}\")\n logging.info(f\"\\tAcceptable range:{kc_lower} to {kc_upper}\")\n return (kc_lower, kc_upper)\n\n\ndef identify_inputs(all_cases_df):\n # NM and Midas agree and IC says Input\n logging.info(\"Searching for Inputs ...\")\n df = all_cases_df.copy()\n df[\"Final_Prediction\"] = df.apply(\n lambda x: \"Input\"\n if (\n (x.unique_IC_pred_len == 1)\n and (\"Input\" in x.unique_IC_pred)\n and (x.tool_agreement == \"Agree\")\n )\n else None,\n axis=1,\n )\n df_selected = df.dropna(subset=[\"Final_Prediction\"]).reset_index(drop=True)\n logging.info(f\"Category counts:\\n{df_selected['unique_IC_pred'].value_counts()}\")\n logging.info(\n f\"Final prediction counts:\\n{df_selected['Final_Prediction'].value_counts()}\"\n )\n logging.info(f\"Final prediction buckets:\\n{df_selected['midas_bucket'].nunique()}\")\n return df_selected\n\n\ndef identify_invaders(all_cases_df):\n logging.info(\"Searching for Invaders ...\")\n df = all_cases_df.copy()\n df[\"Final_Prediction\"] = df.apply(\n lambda x: \"Invader\"\n # NM and Midas agree and IC says Invader\n if ((x.unique_IC_pred_len == 1) and (\"Invader\" in x.unique_IC_pred))\n # Only Midas can see it\n or (x.tool_agreement == \"In_Midas_Only\")\n # NM and Midas disagree and Midas is greater\n or (\n (x.unique_IC_pred_len == 1)\n and (x.tool_agreement == \"Disagree\")\n and x.midas_is_greater\n )\n else None,\n axis=1,\n )\n df_selected = df.dropna(subset=[\"Final_Prediction\"]).reset_index(drop=True)\n logging.info(f\"Category counts:\\n{df_selected['unique_IC_pred'].value_counts()}\")\n logging.info(\n f\"Final prediction counts:\\n{df_selected['Final_Prediction'].value_counts()}\"\n )\n logging.info(f\"Final prediction buckets:\\n{df_selected['midas_bucket'].nunique()}\")\n return df_selected\n\n\ndef identify_mixed(all_cases_df, already_identified):\n logging.info(\"Searching for Mixed buckets ...\")\n df = all_cases_df.copy()\n logging.info(\n f\"Remove {len(already_identified)} unique names that have already been categorized\"\n )\n df[\"Final_Prediction\"] = df.query(\"unique_name not in @already_identified\").apply(\n lambda x: \"Mixed\"\n # Midas bucket has contributions from multiple type of strains\n # if (x.unique_IC_pred_len == 2 and (\"Unclear\" not in x.unique_IC_pred) )\n if (x.unique_IC_pred == {\"Input\", \"Invader\"}) else None,\n axis=1,\n )\n df_selected = df.dropna(subset=[\"Final_Prediction\"]).reset_index(drop=True)\n logging.info(f\"Category counts:\\n{df_selected['unique_IC_pred'].value_counts()}\")\n logging.info(\n f\"Final prediction counts:\\n{df_selected['Final_Prediction'].value_counts()}\"\n )\n logging.info(f\"Final prediction buckets:\\n{df_selected['midas_bucket'].nunique()}\")\n return df_selected\n\n\ndef identify_unclear(all_cases_df, already_identified):\n logging.info(\"Searching for Unclear predictions ...\")\n df = all_cases_df.copy()\n logging.info(\n f\"Remove {len(already_identified)} unique names that have already been categorized\"\n )\n df[\"Final_Prediction\"] = df.query(\"unique_name not in @already_identified\").apply(\n lambda x: \"Unclear\"\n if (not np.isnan(x.unique_IC_pred_len)) or (\"No_IC_Pred\" in x.unique_IC_pred)\n else None,\n axis=1,\n )\n # df[\"Final_Prediction\"] = \"Unclear\"\n df_selected = df.dropna(subset=[\"Final_Prediction\"]).reset_index(drop=True)\n logging.info(f\"Category counts:\\n{df_selected['unique_IC_pred'].value_counts()}\")\n logging.info(\n f\"Final prediction counts:\\n{df_selected['Final_Prediction'].value_counts()}\"\n )\n logging.info(f\"Final prediction buckets:\\n{df_selected['midas_bucket'].nunique()}\")\n return df_selected\n\n\ndef plot_relative_abundance_comparison(df, file_prefix, color=\"Final_Prediction\"):\n import seaborn as sns\n\n sns.set()\n sns.set_style(\"whitegrid\")\n import matplotlib\n\n matplotlib.use(\"agg\")\n import matplotlib.pyplot as plt\n\n plt.rcParams[\"figure.figsize\"] = (24, 24)\n\n sns.scatterplot(\n data=df,\n y=\"imputed_NM_rel_abund\",\n x=\"midas_relative_abundance\",\n hue=color,\n palette=\"colorblind\",\n ).set(xscale=\"log\", yscale=\"log\")\n # ylim=(1e-5, 1e2), xlim=(1e-5, 1e2)\n plt.plot([1e-4, 1e2], [1e-4, 1e2], linewidth=2, color=\"red\", linestyle=\"dashed\")\n plt.savefig(\n f\"{file_prefix}.png\", bbox_inches=\"tight\",\n )\n plt.close()\n\n\ndef plot_bucket_relative_abundance(\n df, sample_order, file_prefix, color=\"Final_Prediction\"\n):\n import seaborn as sns\n\n sns.set()\n sns.set_style(\"whitegrid\")\n import matplotlib\n\n matplotlib.use(\"agg\")\n import matplotlib.pyplot as plt\n\n plt.rcParams[\"figure.figsize\"] = (24, 12)\n\n # mouse_order = [f\"W8M{i}\" for i in range(1, 18)]\n sns.stripplot(\n data=df,\n y=\"midas_relative_abundance\",\n x=\"sample_name\",\n hue=color,\n palette=\"colorblind\",\n order=sample_order,\n dodge=True,\n ).set(\n yscale=\"log\", title=\"\", ylim=(1e-4, 1e2),\n )\n plt.savefig(\n f\"{file_prefix}.png\", bbox_inches=\"tight\",\n )\n plt.close()\n\n\ndef add_strain_context_back(\n df, strain_weighted_abundance, invader_check_df, ninjamap_df\n):\n # add_strain_context_back(mixed_df, strain_weighted_bucket_nm_abundance, ic_mice_df, nm_df)\n data = (\n df\n # .query(f\"midas_bucket == '{bucket}'\")\n .merge(\n right=strain_weighted_abundance, # strain_weighted_bucket_nm_abundance,\n on=[\"sample_name\", \"midas_bucket\"],\n how=\"left\",\n )\n .merge(\n right=invader_check_df[\n [\n \"sample_name\",\n \"Organism\",\n \"Prediction\",\n \"Extreme_Positions\",\n \"missed_2_EP\",\n ]\n ],\n on=[\"sample_name\", \"Organism\"],\n how=\"left\",\n )\n .merge(right=ninjamap_df, on=[\"sample_name\", \"Organism\"], how=\"left\",)[\n [\n \"unique_name\",\n \"sample_name\",\n \"midas_bucket\",\n \"Organism\",\n \"count_reads\",\n \"midas_relative_abundance\",\n \"imputed_NM_rel_abund\",\n \"proportional_rel_abund\",\n \"NM_Norm_Read_Fraction\",\n \"Prediction\",\n \"missed_2_EP\",\n \"Final_Prediction\",\n \"Extreme_Positions\",\n \"Percent_Coverage\",\n \"Coverage_Depth\",\n ]\n ]\n .rename(\n columns={\n \"Prediction\": \"Org_initial_prediction\",\n \"Final_Prediction\": \"Bucket_final_prediction\",\n \"missed_2_EP\": \"Org_prob_missed_2_EP\",\n \"count_reads\": \"Bucket_midas_num_reads_assigned\",\n \"midas_relative_abundance\": \"Bucket_midas_relative_abundance\",\n \"imputed_NM_rel_abund\": \"Bucket_imputed_NM_rel_abund\",\n \"proportional_rel_abund\": \"Org_proportional_rel_abund\",\n \"NM_Norm_Read_Fraction\": \"Org_NM_Norm_Read_Fraction\",\n \"Extreme_Positions\": \"Org_Extreme_Positions\",\n \"Percent_Coverage\": \"Org_Percent_Coverage\",\n \"Coverage_Depth\": \"Org_Coverage_Depth\",\n },\n )\n )\n return data\n\n\ndef remove_mismapped_midas_buckets(\n df, linked_buckets, control_samples, remaining_samples\n):\n logging.info(\n f\"Searching for mismapped buckets in {len(control_samples)} control samples\"\n )\n # sample_order = control_samples + remaining_samples\n contaminant_buckets = set(\n df.query(\n \"(sample_name in @control_samples) and (tool_agreement == 'In_Midas_Only')\"\n )[\"midas_bucket\"].unique()\n )\n logging.info(\n f\"{len(contaminant_buckets)} potential mismapped buckets found in the control samples.\"\n )\n logging.info(\n f\"Searching for these buckets in {len(remaining_samples)} remaining samples\"\n )\n # Remove buckets that are only present in control samples\n present_outside_saline = set(\n df.query(\n \"(midas_bucket in @contaminant_buckets) and (sample_name in @remaining_samples)\"\n )[\"midas_bucket\"].unique()\n )\n\n remove_buckets = set()\n if len(present_outside_saline) > 0:\n # contaminants that are not present outside saline\n remove_buckets = contaminant_buckets.difference(present_outside_saline)\n logging.info(\n f\"Computing pearson's correlations for {len(present_outside_saline)} buckets with other related major buckets:\"\n )\n for bucket in present_outside_saline:\n if is_correlated_with_another(\n df, linked_buckets, bucket, remaining_samples\n ):\n remove_buckets |= {bucket}\n\n removed_buckets = \"\\n\\t\".join([i for i in remove_buckets])\n logging.info(\n f\"Removing the following {len(remove_buckets)} off-mapped buckets from analysis as they are highly correlated with other major buckets.\\n\\t\"\n f\"{removed_buckets}\"\n )\n clean_df = df.query(\"midas_bucket not in @remove_buckets\").copy()\n else:\n logging.info(\"No mismapping found across weeks\")\n clean_df = df\n\n logging.info(f\"Category counts:\\n{clean_df['unique_IC_pred'].value_counts()}\")\n logging.info(\n f\"Cleaned data finally contains {clean_df['unique_name'].nunique()} bucket-sample pairs from {clean_df['midas_bucket'].nunique()} buckets and {clean_df['sample_name'].nunique()} samples.\"\n )\n return clean_df, remove_buckets\n\n\ndef is_correlated_with_another(\n df, linked_buckets, bucket, sample_order, min_correlation=0.75\n):\n base = extract_bucket_sample_vector(bucket, df, sample_order)\n correlations = list()\n if bucket not in linked_buckets:\n return False\n for b in linked_buckets[bucket]:\n # for all the values in this bucket\n if b in linked_buckets:\n # if the value is also a key in this dict.\n continue\n query = extract_bucket_sample_vector(b, df, sample_order)\n corr = base.corr(query, min_periods=4)\n logging.info(f\"{bucket} vs {b} = {corr}\")\n correlations.append(corr > min_correlation)\n # correlations.append(base.corr(query, min_periods=4))\n\n return any(correlations)\n\n\ndef extract_bucket_sample_vector(bucket, df, sample_order):\n # return a pandas series object for midas bucket, ordered by samples\n this_bucket = df.query(f\"midas_bucket == '{bucket}'\").copy()\n this_bucket.set_index(\"sample_name\", inplace=True)\n return this_bucket[\"midas_relative_abundance\"].reindex(sample_order)\n\n\ndef check_prediction_consistency(df):\n # Logic in R\n # invader_check_df %>%\n # select(Mouse,Week,midas_bucket,Bucket_final_prediction) %>%\n # unique() %>%\n # group_by(Mouse,midas_bucket) %>%\n # pivot_wider(names_from = Week, values_from=Bucket_final_prediction)\n ####\n\n # df = df[[\"midas_bucket\", \"sample_name\", \"Final_Prediction\"]]\n df = all_categorized_df[[\"midas_bucket\", \"sample_name\", \"Final_Prediction\"]].copy()\n\n df[\"Mouse\"] = df[\"sample_name\"].apply(\n lambda x: int(x.replace(\"M\", \" \").split(\" \")[1])\n )\n df[\"Week\"] = df[\"sample_name\"].apply(lambda x: x.replace(\"M\", \" \").split(\" \")[0])\n\n # df.set_index(['midas_bucket', 'Mouse'], inplace=True)\n\n logging.info(f\"\\n{df.sort_values('midas_bucket').head()}\")\n\n # logging.info(f\"\\n{df.set_index(['midas_bucket', 'Mouse']).stack().head()}\")\n # logging.info(\n # f\"\\n{df.pivot_table(index=, columns='Week',values='Final_Prediction').head()}\"\n # )\n\n df.drop(columns=\"sample_name\").pivot(\n index=[\"midas_bucket\"], columns=[\"Week\"], values=\"Final_Prediction\"\n )\n\n\ndef parse_sample_metadata(\n sample_metadata, ignore_weeks=[\"Week0\", \"Week4\"], control_terms=[\"PBS\"]\n):\n df = (\n pd.read_csv(sample_metadata, header=0)\n .query(\"Week != @ignore_weeks\")\n .sort_values([\"Week\", \"MouseNum\"])\n .reset_index(drop=True)\n )\n sample_weeks = list(df[\"Week\"].unique())\n sample_names = list(df[\"sample_id\"])\n control_samples = list(df.query(\"Challenge in @control_terms\")[\"sample_id\"])\n sample_order = (\n df[[\"sample_id\", \"Week\", \"MouseOrder\"]]\n .sort_values([\"Week\", \"MouseOrder\"])\n .reset_index(drop=True)\n )\n\n return sample_weeks, sample_names, control_samples, sample_order\n\n\n##############################################################################\n##############################################################################\n##############################################################################\n\n# if __name__ == \"__main__\":\nlogging.basicConfig(\n level=logging.INFO, format=\"%(asctime)s\\t[%(levelname)s]:\\t%(message)s\",\n)\ndate_prefix = 20201231\nmidas_min_abundance = 0\nnm_min_abundance = 0\nmidas_min_reads = 3\n\n# ic_pred_output = sys.argv[1]\n# midas_output = sys.argv[2]\n# nm_output = sys.argv[3]\n# nm_read_stats = sys.argv[4]\n# strain_midas_contribution_file = sys.argv[5]\n# sample_week = sys.argv[6] # use W#, example: W8\n\n## MBFv1 - Test\n# dataset_name = \"mbfv1\"\n# ic_pred_output = \"s3://czbiohub-microbiome/Sunit_Jain/scratch/immigrationCheck/output/v3_20201102/majority.03_predictions.csv\"\n# midas_output = \"/Users/sunit.jain/Research/Alice/in_vivo/Mouse_Backfill/Midas_Results/v1/mbfv1_dataframe_minRelAbund_0.csv\"\n# nm_output = \"/Users/sunit.jain/Research/Alice/in_vivo/Mouse_Backfill/NinjaMap/db_SCv1_2/MouseBackFill_V1/analysis/20201105_MouseBackFill_V1.long.csv\"\n# nm_read_stats = \"/Users/sunit.jain/Research/Alice/in_vivo/Mouse_Backfill/NinjaMap/db_SCv1_2/MouseBackFill_V1/analysis/20201105_MouseBackFill_V1.read_stats.csv\"\n# strain_midas_contribution_file = (\n# \"/Users/sunit.jain/Research/Sunit/Midas/art_reads_output.full_dataframe.csv\"\n# )\n# sample_metadata = \"/Users/sunit.jain/Research/Alice/Midas_NinjaMap_ImmiCheck_Compare/MBFv1/mbfv1_ic_metadata.csv\"\n# output_folder = (\n# \"/Users/sunit.jain/Research/Alice/Midas_NinjaMap_ImmiCheck_Compare/MBFv1\"\n# )\n# ignored_weeks = [\"W0\", \"W4\"]\n# challenge_control = [\"PBS\"]\n# infer_sample_names = True\n\n## MBFv2 - Test\ndataset_name = \"mbfv2\"\nic_pred_output = \"s3://czbiohub-microbiome/Synthetic_Community/invaderCheck/SCv2_3_20201208/Mouse_Backfill_v2/majority.ic_pred.csv\"\nmidas_output = \"/Users/sunit.jain/Research/Alice/in_vivo/Mouse_Backfill/Midas_Results/v2/mbfv2_dataframe_minRelAbund_0.csv\"\nnm_output = \"/Users/sunit.jain/Research/Alice/in_vivo/Mouse_Backfill/NinjaMap/db_SCv2_3/MouseBackFill_V2/analysis/20201231_MouseBackFill_V2.long.csv\"\nnm_read_stats = \"/Users/sunit.jain/Research/Alice/in_vivo/Mouse_Backfill/NinjaMap/db_SCv2_3/MouseBackFill_V2/analysis/20201231_MouseBackFill_V2.read_stats.csv\"\nstrain_midas_contribution_file = (\n \"/Users/sunit.jain/Research/Sunit/Midas/SCv2_3.art_reads_output.full_dataframe.csv\"\n)\nsample_metadata = \"s3://czbiohub-microbiome/Synthetic_Community/invaderCheck/SCv2_3_20201208/Mouse_Backfill_v2/00_metadata/mbfv2_metadata.csv\"\noutput_folder = (\n \"/Users/sunit.jain/Research/Alice/Midas_NinjaMap_ImmiCheck_Compare/MBFv2\"\n)\n\nignored_weeks = [\"Week0\", \"Week4\"]\nchallenge_control = [\"PBS\"]\ninfer_sample_names = False\n\noutput_file = (\n f\"{output_folder}/{date_prefix}_{dataset_name.lower()}.input_v_invader.csv\"\n)\n\nsample_weeks, sample_names, control_samples, sample_order = parse_sample_metadata(\n sample_metadata, ignore_weeks=ignored_weeks, control_terms=challenge_control\n)\n\nactual_samples = [sample for sample in sample_names if sample not in control_samples]\n\n# Strain contributions\nstrain_midas_df, linked_buckets = read_strain_midas_contributions(\n strain_midas_contribution_file, min_contrib=0\n)\n\n# Midas output\nmidas_df = read_midas_output(\n midas_output,\n sample_weeks,\n selected_samples=sample_names,\n min_abundance=midas_min_abundance,\n min_reads=midas_min_reads,\n infer_sample_name=infer_sample_names,\n)\n# midas_df.sort_values(\"count_reads\").head()\n\n# NinjaMap output\n## Normalize by num reads after trim (as opposed to num reads aligned)\nnm_df = read_nm_output(nm_output, nm_read_stats, sample_names, nm_min_abundance)\n\n# Transform NinjaMap strain level relative abundance values into Midas bucket\n# level relative abundances\nnm2midas_df, strain_weighted_bucket_nm_abundance = nm2midas(nm_df, strain_midas_df)\n\n# Invader Check\n# TODO @sunit: This is a temporary fix function;\n# the data needs to be run through the comparison\n# step again and changes needed there for a more final version\nic_df = read_ic_output(ic_pred_output, sample_names)\n# ic_df.to_csv(f\"{sample_week}.03_all_ic_predictions.csv\", index=False)\n\n# Transform Invader check Strain level outputs into Midas bucket terms\nic2midas_df = ic2midas(ic_df, strain_midas_df)\n\n# Combine all data\nall_cases = aggregate_across_tools(midas_df, nm2midas_df, ic2midas_df)\nlogging.info(\n f\"Found {all_cases['unique_name'].nunique()} 'midas bucket - sample' combinations.\"\n)\nall_cases.to_csv(f\"{output_folder}/{date_prefix}.all_cases.csv\", index=False)\n\nassert \"Error\" not in list(\n all_cases[\"tool_agreement\"].unique()\n), \"NM-Midas agreement anomaly detected. Found a case that hasn't been accounted for.\"\n\nunique_names_before_merger = len(\n set(midas_df[\"unique_name\"])\n | set(nm2midas_df[\"unique_name\"])\n | set(ic2midas_df[\"unique_name\"])\n)\n\nunique_names_after_merger = len(set(all_cases[\"unique_name\"]))\n\n# assert (\n# unique_names_before_merger == unique_names_after_merger\n# ), f\"Expected total number of unique names to stay the same before ({unique_names_before_merger}) and after ({unique_names_after_merger}) the merger.\"\n\n# Remove midas mismapped buckets\nclean_midas_buckets, removed_buckets = remove_mismapped_midas_buckets(\n all_cases, linked_buckets, control_samples, actual_samples\n)\ndel all_cases\n\n# Check: all_cases.query(\"(midas_bucket == 'Bacteroides_cellulosilyticus_58046') and (sample_name == 'SCV2-Week8-Mouse10')\")\n\nlogging.info(\n f\"Will attempt to categorize {clean_midas_buckets['unique_name'].nunique()} 'midas bucket - sample' combinations.\"\n)\nclean_midas_buckets.to_csv(\n f\"{output_folder}/{date_prefix}.clean_midas_buckets.csv\", index=False\n)\n\ninput_df = identify_inputs(clean_midas_buckets)\ninvader_df = identify_invaders(clean_midas_buckets)\n\nassert (\n len(set(input_df[\"unique_name\"]) & set(invader_df[\"unique_name\"])) == 0\n), \"Overlap between input and invader calls found!\"\n\nalready_identified = set(input_df[\"unique_name\"]) | set(invader_df[\"unique_name\"])\nmixed_df = identify_mixed(clean_midas_buckets, already_identified)\n\nalready_identified = already_identified | set(mixed_df[\"unique_name\"])\n\nunclear_df = identify_unclear(clean_midas_buckets, already_identified)\n\nall_categorized_df = pd.concat([input_df, invader_df, mixed_df, unclear_df])\n# all_categorized_df.shape\nlogging.info(\n f\"Successfully categorized {all_categorized_df['unique_name'].nunique()} 'midas bucket - sample' combinations for {all_categorized_df['midas_bucket'].nunique()} buckets.\"\n)\n\nlogging.info(\n f\"Adding strain contribution weighted abundances to 'midas bucket - sample' combinations.\"\n)\nadd_strain_context_back(\n all_categorized_df, strain_weighted_bucket_nm_abundance, ic_df, nm_df\n).to_csv(\n output_file, index=False,\n)\n\n# check_prediction_consistency(all_categorized_df)\n# .to_csv(\n# f\"{output_folder}/prediction_consistency.csv\"\n# )\n\nlogging.info(f\"Making some very interesting plots ...\")\nfor week in sample_weeks:\n plot_sample_names = list(sample_order.query(\"Week == @week\")[\"sample_id\"])\n week_df = all_categorized_df.query(\"sample_name in @plot_sample_names\")\n logging.info(\n f\"Final predictions breakdown for {week}:\\n{week_df['Final_Prediction'].value_counts()}\"\n )\n logging.info(\n f\"Final predictions buckets for {week}:\\n{week_df['midas_bucket'].nunique()}\"\n )\n plot_relative_abundance_comparison(\n all_categorized_df,\n f\"{output_folder}/{date_prefix}.{week}.imputed_nm_vs_midas_abundance\",\n color=\"Final_Prediction\",\n )\n\n plot_bucket_relative_abundance(\n all_categorized_df,\n sample_names,\n f\"{output_folder}/{date_prefix}.{week}.midas_abundance_by_prediction\",\n )\n\n# combine midas, ninjamap and ic data\n# take final input/invader/mixed/unclear calls\n# report proportions for mixed buckets\nmidas_df.query(\"midas_bucket == 'Bacteroides_stercorirosoris_59710'\").sort_values(\n [\"count_reads\"]\n)\n# visualize_predictions(df)\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame"
],
[
"pandas.concat",
"pandas.read_csv",
"numpy.isnan",
"matplotlib.use",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
acidburn0zzz/beam | [
"13d7c5c54daacd4536b533c072f72e80e5e86c78"
] | [
"sdks/python/apache_beam/io/tfrecordio_test.py"
] | [
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport binascii\nimport cStringIO\nimport glob\nimport gzip\nimport logging\nimport os\nimport pickle\nimport random\nimport shutil\nimport tempfile\nimport unittest\n\nimport crcmod\n\nimport apache_beam as beam\nfrom apache_beam import coders\nfrom apache_beam.io.filesystem import CompressionTypes\nfrom apache_beam.io.tfrecordio import ReadFromTFRecord\nfrom apache_beam.io.tfrecordio import WriteToTFRecord\nfrom apache_beam.io.tfrecordio import _TFRecordSink\nfrom apache_beam.io.tfrecordio import _TFRecordSource\nfrom apache_beam.io.tfrecordio import _TFRecordUtil\nfrom apache_beam.testing.test_pipeline import TestPipeline\nfrom apache_beam.testing.util import assert_that\nfrom apache_beam.testing.util import equal_to\n\ntry:\n import tensorflow as tf # pylint: disable=import-error\nexcept ImportError:\n tf = None # pylint: disable=invalid-name\n logging.warning('Tensorflow is not installed, so skipping some tests.')\n\n# Created by running following code in python:\n# >>> import tensorflow as tf\n# >>> import base64\n# >>> writer = tf.python_io.TFRecordWriter('/tmp/python_foo.tfrecord')\n# >>> writer.write('foo')\n# >>> writer.close()\n# >>> with open('/tmp/python_foo.tfrecord', 'rb') as f:\n# ... data = base64.b64encode(f.read())\n# ... print data\nFOO_RECORD_BASE64 = 'AwAAAAAAAACwmUkOZm9vYYq+/g=='\n\n# Same as above but containing two records ['foo', 'bar']\nFOO_BAR_RECORD_BASE64 = 'AwAAAAAAAACwmUkOZm9vYYq+/gMAAAAAAAAAsJlJDmJhckYA5cg='\n\n\nclass TestTFRecordUtil(unittest.TestCase):\n\n def setUp(self):\n self.record = binascii.a2b_base64(FOO_RECORD_BASE64)\n\n def _as_file_handle(self, contents):\n result = cStringIO.StringIO()\n result.write(contents)\n result.reset()\n return result\n\n def _increment_value_at_index(self, value, index):\n l = list(value)\n l[index] = chr(ord(l[index]) + 1)\n return ''.join(l)\n\n def _test_error(self, record, error_text):\n with self.assertRaises(ValueError) as context:\n _TFRecordUtil.read_record(self._as_file_handle(record))\n self.assertIn(error_text, context.exception.message)\n\n def test_masked_crc32c(self):\n self.assertEqual(0xfd7fffa, _TFRecordUtil._masked_crc32c('\\x00' * 32))\n self.assertEqual(0xf909b029, _TFRecordUtil._masked_crc32c('\\xff' * 32))\n self.assertEqual(0xfebe8a61, _TFRecordUtil._masked_crc32c('foo'))\n self.assertEqual(\n 0xe4999b0,\n _TFRecordUtil._masked_crc32c('\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00'))\n\n def test_masked_crc32c_crcmod(self):\n crc32c_fn = crcmod.predefined.mkPredefinedCrcFun('crc-32c')\n self.assertEqual(\n 0xfd7fffa,\n _TFRecordUtil._masked_crc32c(\n '\\x00' * 32, crc32c_fn=crc32c_fn))\n self.assertEqual(\n 0xf909b029,\n _TFRecordUtil._masked_crc32c(\n '\\xff' * 32, crc32c_fn=crc32c_fn))\n self.assertEqual(\n 0xfebe8a61, _TFRecordUtil._masked_crc32c(\n 'foo', crc32c_fn=crc32c_fn))\n self.assertEqual(\n 0xe4999b0,\n _TFRecordUtil._masked_crc32c(\n '\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00', crc32c_fn=crc32c_fn))\n\n def test_write_record(self):\n file_handle = cStringIO.StringIO()\n _TFRecordUtil.write_record(file_handle, 'foo')\n self.assertEqual(self.record, file_handle.getvalue())\n\n def test_read_record(self):\n actual = _TFRecordUtil.read_record(self._as_file_handle(self.record))\n self.assertEqual('foo', actual)\n\n def test_read_record_invalid_record(self):\n self._test_error('bar', 'Not a valid TFRecord. Fewer than 12 bytes')\n\n def test_read_record_invalid_length_mask(self):\n record = self._increment_value_at_index(self.record, 9)\n self._test_error(record, 'Mismatch of length mask')\n\n def test_read_record_invalid_data_mask(self):\n record = self._increment_value_at_index(self.record, 16)\n self._test_error(record, 'Mismatch of data mask')\n\n def test_compatibility_read_write(self):\n for record in ['', 'blah', 'another blah']:\n file_handle = cStringIO.StringIO()\n _TFRecordUtil.write_record(file_handle, record)\n file_handle.reset()\n actual = _TFRecordUtil.read_record(file_handle)\n self.assertEqual(record, actual)\n\n\nclass _TestCaseWithTempDirCleanUp(unittest.TestCase):\n \"\"\"Base class for TestCases that deals with TempDir clean-up.\n\n Inherited test cases will call self._new_tempdir() to start a temporary dir\n which will be deleted at the end of the tests (when tearDown() is called).\n \"\"\"\n\n def setUp(self):\n self._tempdirs = []\n\n def tearDown(self):\n for path in self._tempdirs:\n if os.path.exists(path):\n shutil.rmtree(path)\n self._tempdirs = []\n\n def _new_tempdir(self):\n result = tempfile.mkdtemp()\n self._tempdirs.append(result)\n return result\n\n\nclass TestTFRecordSink(_TestCaseWithTempDirCleanUp):\n\n def _write_lines(self, sink, path, lines):\n f = sink.open(path)\n for l in lines:\n sink.write_record(f, l)\n sink.close(f)\n\n def test_write_record_single(self):\n path = os.path.join(self._new_tempdir(), 'result')\n record = binascii.a2b_base64(FOO_RECORD_BASE64)\n sink = _TFRecordSink(\n path,\n coder=coders.BytesCoder(),\n file_name_suffix='',\n num_shards=0,\n shard_name_template=None,\n compression_type=CompressionTypes.UNCOMPRESSED)\n self._write_lines(sink, path, ['foo'])\n\n with open(path, 'r') as f:\n self.assertEqual(f.read(), record)\n\n def test_write_record_multiple(self):\n path = os.path.join(self._new_tempdir(), 'result')\n record = binascii.a2b_base64(FOO_BAR_RECORD_BASE64)\n sink = _TFRecordSink(\n path,\n coder=coders.BytesCoder(),\n file_name_suffix='',\n num_shards=0,\n shard_name_template=None,\n compression_type=CompressionTypes.UNCOMPRESSED)\n self._write_lines(sink, path, ['foo', 'bar'])\n\n with open(path, 'r') as f:\n self.assertEqual(f.read(), record)\n\n\[email protected](tf is None, 'tensorflow not installed.')\nclass TestWriteToTFRecord(TestTFRecordSink):\n\n def test_write_record_gzip(self):\n file_path_prefix = os.path.join(self._new_tempdir(), 'result')\n with TestPipeline() as p:\n input_data = ['foo', 'bar']\n _ = p | beam.Create(input_data) | WriteToTFRecord(\n file_path_prefix, compression_type=CompressionTypes.GZIP)\n\n actual = []\n file_name = glob.glob(file_path_prefix + '-*')[0]\n for r in tf.python_io.tf_record_iterator(\n file_name, options=tf.python_io.TFRecordOptions(\n tf.python_io.TFRecordCompressionType.GZIP)):\n actual.append(r)\n self.assertEqual(actual, input_data)\n\n def test_write_record_auto(self):\n file_path_prefix = os.path.join(self._new_tempdir(), 'result')\n with TestPipeline() as p:\n input_data = ['foo', 'bar']\n _ = p | beam.Create(input_data) | WriteToTFRecord(\n file_path_prefix, file_name_suffix='.gz')\n\n actual = []\n file_name = glob.glob(file_path_prefix + '-*.gz')[0]\n for r in tf.python_io.tf_record_iterator(\n file_name, options=tf.python_io.TFRecordOptions(\n tf.python_io.TFRecordCompressionType.GZIP)):\n actual.append(r)\n self.assertEqual(actual, input_data)\n\n\nclass TestTFRecordSource(_TestCaseWithTempDirCleanUp):\n\n def _write_file(self, path, base64_records):\n record = binascii.a2b_base64(base64_records)\n with open(path, 'wb') as f:\n f.write(record)\n\n def _write_file_gzip(self, path, base64_records):\n record = binascii.a2b_base64(base64_records)\n with gzip.GzipFile(path, 'wb') as f:\n f.write(record)\n\n def test_process_single(self):\n path = os.path.join(self._new_tempdir(), 'result')\n self._write_file(path, FOO_RECORD_BASE64)\n with TestPipeline() as p:\n result = (p\n | beam.io.Read(\n _TFRecordSource(\n path,\n coder=coders.BytesCoder(),\n compression_type=CompressionTypes.AUTO,\n validate=True)))\n assert_that(result, equal_to(['foo']))\n\n def test_process_multiple(self):\n path = os.path.join(self._new_tempdir(), 'result')\n self._write_file(path, FOO_BAR_RECORD_BASE64)\n with TestPipeline() as p:\n result = (p\n | beam.io.Read(\n _TFRecordSource(\n path,\n coder=coders.BytesCoder(),\n compression_type=CompressionTypes.AUTO,\n validate=True)))\n assert_that(result, equal_to(['foo', 'bar']))\n\n def test_process_gzip(self):\n path = os.path.join(self._new_tempdir(), 'result')\n self._write_file_gzip(path, FOO_BAR_RECORD_BASE64)\n with TestPipeline() as p:\n result = (p\n | beam.io.Read(\n _TFRecordSource(\n path,\n coder=coders.BytesCoder(),\n compression_type=CompressionTypes.GZIP,\n validate=True)))\n assert_that(result, equal_to(['foo', 'bar']))\n\n def test_process_auto(self):\n path = os.path.join(self._new_tempdir(), 'result.gz')\n self._write_file_gzip(path, FOO_BAR_RECORD_BASE64)\n with TestPipeline() as p:\n result = (p\n | beam.io.Read(\n _TFRecordSource(\n path,\n coder=coders.BytesCoder(),\n compression_type=CompressionTypes.AUTO,\n validate=True)))\n assert_that(result, equal_to(['foo', 'bar']))\n\n\nclass TestReadFromTFRecordSource(TestTFRecordSource):\n\n def test_process_gzip(self):\n path = os.path.join(self._new_tempdir(), 'result')\n self._write_file_gzip(path, FOO_BAR_RECORD_BASE64)\n with TestPipeline() as p:\n result = (p\n | ReadFromTFRecord(\n path, compression_type=CompressionTypes.GZIP))\n assert_that(result, equal_to(['foo', 'bar']))\n\n def test_process_gzip_auto(self):\n path = os.path.join(self._new_tempdir(), 'result.gz')\n self._write_file_gzip(path, FOO_BAR_RECORD_BASE64)\n with TestPipeline() as p:\n result = (p\n | ReadFromTFRecord(\n path, compression_type=CompressionTypes.AUTO))\n assert_that(result, equal_to(['foo', 'bar']))\n\n\nclass TestEnd2EndWriteAndRead(_TestCaseWithTempDirCleanUp):\n\n def create_inputs(self):\n input_array = [[random.random() - 0.5 for _ in xrange(15)]\n for _ in xrange(12)]\n memfile = cStringIO.StringIO()\n pickle.dump(input_array, memfile)\n return memfile.getvalue()\n\n def test_end2end(self):\n file_path_prefix = os.path.join(self._new_tempdir(), 'result')\n\n # Generate a TFRecord file.\n with TestPipeline() as p:\n expected_data = [self.create_inputs() for _ in range(0, 10)]\n _ = p | beam.Create(expected_data) | WriteToTFRecord(file_path_prefix)\n\n # Read the file back and compare.\n with TestPipeline() as p:\n actual_data = p | ReadFromTFRecord(file_path_prefix + '-*')\n assert_that(actual_data, equal_to(expected_data))\n\n def test_end2end_auto_compression(self):\n file_path_prefix = os.path.join(self._new_tempdir(), 'result')\n\n # Generate a TFRecord file.\n with TestPipeline() as p:\n expected_data = [self.create_inputs() for _ in range(0, 10)]\n _ = p | beam.Create(expected_data) | WriteToTFRecord(\n file_path_prefix, file_name_suffix='.gz')\n\n # Read the file back and compare.\n with TestPipeline() as p:\n actual_data = p | ReadFromTFRecord(file_path_prefix + '-*')\n assert_that(actual_data, equal_to(expected_data))\n\n def test_end2end_auto_compression_unsharded(self):\n file_path_prefix = os.path.join(self._new_tempdir(), 'result')\n\n # Generate a TFRecord file.\n with TestPipeline() as p:\n expected_data = [self.create_inputs() for _ in range(0, 10)]\n _ = p | beam.Create(expected_data) | WriteToTFRecord(\n file_path_prefix + '.gz', shard_name_template='')\n\n # Read the file back and compare.\n with TestPipeline() as p:\n actual_data = p | ReadFromTFRecord(file_path_prefix + '.gz')\n assert_that(actual_data, equal_to(expected_data))\n\n @unittest.skipIf(tf is None, 'tensorflow not installed.')\n def test_end2end_example_proto(self):\n file_path_prefix = os.path.join(self._new_tempdir(), 'result')\n\n example = tf.train.Example()\n example.features.feature['int'].int64_list.value.extend(range(3))\n example.features.feature['bytes'].bytes_list.value.extend(\n [b'foo', b'bar'])\n\n with TestPipeline() as p:\n _ = p | beam.Create([example]) | WriteToTFRecord(\n file_path_prefix, coder=beam.coders.ProtoCoder(example.__class__))\n\n # Read the file back and compare.\n with TestPipeline() as p:\n actual_data = (p | ReadFromTFRecord(\n file_path_prefix + '-*',\n coder=beam.coders.ProtoCoder(example.__class__)))\n assert_that(actual_data, equal_to([example]))\n\n def test_end2end_read_write_read(self):\n path = os.path.join(self._new_tempdir(), 'result')\n with TestPipeline() as p:\n # Initial read to validate the pipeline doesn't fail before the file is\n # created.\n _ = p | ReadFromTFRecord(path + '-*', validate=False)\n expected_data = [self.create_inputs() for _ in range(0, 10)]\n _ = p | beam.Create(expected_data) | WriteToTFRecord(\n path, file_name_suffix='.gz')\n\n # Read the file back and compare.\n with TestPipeline() as p:\n actual_data = p | ReadFromTFRecord(path+'-*', validate=True)\n assert_that(actual_data, equal_to(expected_data))\n\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.INFO)\n unittest.main()\n"
] | [
[
"tensorflow.python_io.TFRecordOptions",
"tensorflow.train.Example"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
micimize/DigitalHistoPath | [
"f2a4dd03761e321c35b1b2e17de3aa4b3ba49511"
] | [
"code_cm17/trainer/auto_hardmine.py"
] | [
"import sys\nimport os\nimport argparse\nimport logging\nimport json\nimport time\nimport numpy as np\nimport openslide\nimport PIL\nimport cv2\nimport matplotlib.pyplot as plt\nfrom scipy import ndimage\nfrom torch.utils.data import DataLoader\nimport math\nimport json\nimport logging\nimport time\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.utils import multi_gpu_model\nfrom skimage.transform import resize, rescale\nimport gzip\nimport time\nimport pandas as pd\nimport csv\nimport tensorflow as tf\n\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')\nfrom helpers.utils import *\nfrom data.automine_data_loader import WSIStridedPatchDataset\nfrom models.seg_models import *\nnp.random.seed(0)\n\n\n# python3 auto_hardmine.py /media/mak/mirlproject1/CAMELYON17/training/dataset/center_0/patient_004_node_4.tif /media/mak/Data/Projects/Camelyon17/saved_models/keras_models/segmentation/CM16/unet_densenet121_imagenet_pretrained_L0_20190712-173828/Model_Stage2.h5 ../configs/DenseNet121_UNET_NCRF_CM16_COORDS_CDL_AUTOMINE.json ./patient_004_node_4_mask.npy --label_path='/media/mak/mirlproject1/CAMELYON17/training/groundtruth/lesion_annotations/Mask/patient_004_node_4_mask.tif' --mask_path='/media/mak/mirlproject1/CAMELYON17/training/groundtruth/lesion_annotations/Mask/patient_004_node_4_mask.tif'\n\nparser = argparse.ArgumentParser(description='Hardmine points from CM17 training WSI')\nparser.add_argument('wsi_path', default=None, metavar='WSI_PATH', type=str,\n help='Path to the input WSI file')\nparser.add_argument('model_path', default=None, metavar='MODEL_PATH', type=str,\n help='Path to the saved model weights file of a Keras model')\nparser.add_argument('cfg_path', default=None, metavar='CFG_PATH', type=str,\n help='Path to the config file in json format related to'\n ' the ckpt file')\nparser.add_argument('out_csv_path', default=None, metavar='OUT_CSV_PATH',\n type=str, help='Path to the output csv file')\nparser.add_argument('--mask_path', default=None, metavar='MASK_PATH', type=str,\n help='Path to the tissue mask of the input WSI file')\nparser.add_argument('--label_path', default=None, metavar='LABEL_PATH', type=str,\n help='Path to the Ground-Truth label image')\nparser.add_argument('--GPU', default='0,1', type=str, help='which GPU to use'\n ', default 0')\nparser.add_argument('--num_workers', default=8, type=int, help='number of '\n 'workers to use to make batch, default 5')\nparser.add_argument('--level', default=5, type=int, help='heatmap generation level,'\n ' default 5')\nparser.add_argument('--sampling_stride', default=16, type=int, help='Sampling pixels in tissue mask,'\n ' default 16')\nparser.add_argument('--roi_masking', default=True, type=int, help='Sample pixels from tissue mask region,'\n ' default True, points are not sampled from glass region')\n\n\ndef dice(im1, im2, empty_score=1.0):\n \"\"\"\n Computes the Dice coefficient, a measure of set similarity.\n Parameters\n ----------\n im1 : array-like, bool\n Any array of arbitrary size. If not boolean, will be converted.\n im2 : array-like, bool\n Any other array of identical size. If not boolean, will be converted.\n Returns\n -------\n dice : float\n Dice coefficient as a float on range [0,1].\n Maximum similarity = 1\n No similarity = 0\n Both are empty (sum eq to zero) = empty_score\n \n Notes\n -----\n The order of inputs for `dice` is irrelevant. The result will be\n identical if `im1` and `im2` are switched.\n \"\"\"\n im1 = np.asarray(im1).astype(np.bool)\n im2 = np.asarray(im2).astype(np.bool)\n\n if im1.shape != im2.shape:\n raise ValueError(\"Shape mismatch: im1 and im2 must have the same shape.\")\n\n im_sum = im1.sum() + im2.sum()\n if im_sum == 0:\n return empty_score\n\n # Compute Dice coefficient\n intersection = np.logical_and(im1, im2)\n\n return 2. * intersection.sum() / im_sum\n\n\n\ndef get_probs_map(model, dataloader):\n \"\"\"\n Generate probability map\n \"\"\"\n probs_map = np.zeros(dataloader.dataset._mask.shape)\n count_map = np.zeros(dataloader.dataset._mask.shape)\n num_batch = len(dataloader)\n batch_size = dataloader.batch_size\n map_x_size = dataloader.dataset._mask.shape[0]\n map_y_size = dataloader.dataset._mask.shape[1]\n level = dataloader.dataset._level\n factor = dataloader.dataset._sampling_stride\n flip = dataloader.dataset._flip\n rotate = dataloader.dataset._rotate\n wsi_name = os.path.basename(dataloader.dataset._wsi_path) \n down_scale = 1.0 / pow(2, level)\n\n count = 0\n time_now = time.time()\n probs_map = []\n\n DICE_THRESHOLD = 0.90\n # label_mask is not utilized \n print ('Started Mining')\n try:\n model = multi_gpu_model(model, gpus=2, cpu_merge=False)\n print(\"Inference on multiple GPUs..\")\n except:\n print(\"Inference on single GPU or CPU..\")\n\n for (image_patches, x_coords, y_coords, label_patches) in dataloader:\n\n image_patches = image_patches.cpu().data.numpy()\n label_patches = label_patches.cpu().data.numpy()\n x_coords = x_coords.cpu().data.numpy()*pow(2,level)\n y_coords = y_coords.cpu().data.numpy()*pow(2,level)\n\n y_preds = model.predict(image_patches, batch_size=batch_size, verbose=0, steps=None)\n\n for i in range(batch_size):\n y_pred_mask = labelthreshold(y_preds[i][:,:,1], threshold=0.45)\n y_true_mask = label_patches[i]\n dice_score = dice(y_pred_mask, y_true_mask)\n if dice_score < DICE_THRESHOLD:\n # print (wsi_name, str(x_coords[i]), str(y_coords[i]), str(dice_score))\n probs_map.append((wsi_name, str(x_coords[i]), str(y_coords[i]), str(dice_score)))\n # imshow(normalize_minmax(image_patches[i]), y_pred_mask, y_true_mask)\n count += 1\n time_spent = time.time() - time_now\n time_now = time.time()\n print ('{}, flip : {}, rotate : {}, batch : {}/{}, Run Time : {:.2f}'\n .format(\n time.strftime(\"%Y-%m-%d %H:%M:%S\"), dataloader.dataset._flip,\n dataloader.dataset._rotate, count, num_batch, time_spent))\n\n # logging.info(\n # '{}, flip : {}, rotate : {}, batch : {}/{}, Run Time : {:.2f}'\n # .format(\n # time.strftime(\"%Y-%m-%d %H:%M:%S\"), dataloader.dataset._flip,\n # dataloader.dataset._rotate, count, num_batch, time_spent))\n # imshow(count_map)\n return probs_map\n\ndef make_dataloader(args, cfg, flip='NONE', rotate='NONE'):\n batch_size = cfg['batch_size']\n dataloader = DataLoader(WSIStridedPatchDataset(args.wsi_path, args.mask_path,\n args.label_path,\n image_size=cfg['image_size'],\n normalize=True, flip=flip, rotate=rotate,\n level=args.level, sampling_stride=args.sampling_stride, roi_masking=args.roi_masking),\n batch_size=batch_size, num_workers=args.num_workers, drop_last=True)\n return dataloader\n\ndef run(args):\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.GPU\n logging.basicConfig(level=logging.INFO)\n\n with open(args.cfg_path) as f:\n cfg = json.load(f)\n\n core_config = tf.compat.v1.ConfigProto()\n core_config.gpu_options.allow_growth = True \n session = tf.compat.v1.Session(config=core_config) \n tf.compat.v1.keras.backend.set_session(session)\n\n # Instantiate the base model (or \"template\" model).\n # We recommend doing this with under a CPU device scope,\n # so that the model's weights are hosted on CPU memory.\n # Otherwise they may end up hosted on a GPU, which would\n # complicate weight sharing.\n # with tf.device('/cpu:0'):\n model = unet_densenet121((None, None), weights=None)\n \n model.load_weights(args.model_path)\n print (\"Loaded Model Weights\")\n\n save_dir = os.path.dirname(args.out_csv_path)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n dataloader = make_dataloader(\n args, cfg, flip='NONE', rotate='NONE')\n probs_map = get_probs_map(model, dataloader)\n\n\n with open(args.out_csv_path, 'w') as out:\n csv_out = csv.writer(out)\n for row in probs_map:\n csv_out.writerow(row) \n\ndef main():\n args = parser.parse_args()\n run(args)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"tensorflow.compat.v1.ConfigProto",
"tensorflow.keras.utils.multi_gpu_model",
"numpy.random.seed",
"numpy.asarray",
"tensorflow.compat.v1.keras.backend.set_session",
"tensorflow.compat.v1.Session",
"numpy.logical_and",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
troymoench/naccbis | [
"87fd79a79e9ae189236781fa4682811c1da6480f"
] | [
"naccbis/Cleaning/CleanTeamOffense.py"
] | [
"\"\"\" This script is used to clean team offense data and load into database \"\"\"\n# Standard library imports\nimport logging\nfrom pathlib import Path\n\n# Third party imports\nimport pandas as pd\n\n# Local imports\nfrom naccbis.Common import utils, metrics\nfrom naccbis.Common.splits import Split\n\n\nclass TeamOffenseETL:\n \"\"\"ETL class for team offense\"\"\"\n\n CSV_DIR = Path(\"csv/\")\n\n def __init__(\n self,\n year: int,\n split: Split,\n load_db: bool,\n conn: object,\n inseason: bool = False,\n ) -> None:\n self.year = year\n self.split = split\n self.load_db = load_db\n self.conn = conn\n self.inseason = inseason\n self.data: pd.DataFrame\n\n def extract(self) -> None:\n table = \"raw_team_offense_{}\".format(self.split)\n if self.inseason:\n table += \"_inseason\"\n logging.info(\"Reading data from %s\", table)\n self.data = pd.read_sql_table(table, self.conn)\n logging.info(\"Read %s records from %s\", len(self.data), table)\n if self.year:\n self.data = self.data[self.data[\"season\"] == self.year]\n\n def transform(self) -> None:\n self.data = metrics.basic_offensive_metrics(self.data)\n columns = [\n \"name\",\n \"season\",\n \"g\",\n \"pa\",\n \"ab\",\n \"r\",\n \"h\",\n \"x2b\",\n \"x3b\",\n \"hr\",\n \"rbi\",\n \"bb\",\n \"so\",\n \"hbp\",\n \"tb\",\n \"xbh\",\n \"sf\",\n \"sh\",\n \"gdp\",\n \"sb\",\n \"cs\",\n \"go\",\n \"fo\",\n \"go_fo\",\n \"hbp_p\",\n \"bb_p\",\n \"so_p\",\n \"iso\",\n \"babip\",\n \"avg\",\n \"obp\",\n \"slg\",\n \"ops\",\n \"sar\",\n ]\n if self.inseason:\n columns.insert(2, \"date\")\n self.data = self.data[columns]\n\n def load(self) -> None:\n table = f\"team_offense_{self.split}\"\n if self.inseason:\n table += \"_inseason\"\n\n if self.load_db:\n logging.info(\"Loading data into database\")\n utils.db_load_data(\n self.data, table, self.conn, if_exists=\"append\", index=False\n )\n else:\n filename = f\"{table}.csv\"\n logging.info(\"Dumping to csv\")\n self.data.to_csv(self.CSV_DIR / filename, index=False)\n\n def run(self) -> None:\n logging.info(\"Running %s\", type(self).__name__)\n logging.info(\"Year: %s Split: %s Load: %s\", self.year, self.split, self.load_db)\n self.extract()\n self.transform()\n self.load()\n"
] | [
[
"pandas.read_sql_table"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
rafaelsanin/esctoolbox-python | [
"c5aed852ba39ee24ffb348efd864e3c6812265e2"
] | [
"dyn_model/funcs.py"
] | [
"\"\"\"\nFunctions used by the dyn_model\n\"\"\"\n\n# Modules\n# ------------------------------------------------------------------------------\n\nimport ipdb\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.optimize import fminbound, nnls, minimize_scalar\nfrom scipy.signal import dlsim, dlti\nfrom models import ModelDyn\n\n# Functions\n# ------------------------------------------------------------------------------\n\ndef OCVfromSOCtemp(soc, temp, model):\n \"\"\" OCV function \"\"\"\n SOC = model.SOC # force to be column vector\n OCV0 = model.OCV0 # force to be column vector\n OCVrel = model.OCVrel # force to be column vector\n\n # if soc is scalar then make it a vector\n soccol = np.asarray(soc)\n if soccol.ndim == 0:\n soccol = soccol[None]\n\n tempcol = temp * np.ones(np.size(soccol))\n\n diffSOC = SOC[1] - SOC[0] # spacing between SOC points - assume uniform\n ocv = np.zeros(np.size(soccol)) # initialize output to zero\n I1, = np.where(soccol <= SOC[0]) # indices of socs below model-stored data\n I2, = np.where(soccol >= SOC[-1]) # and of socs above model-stored data\n I3, = np.where((soccol > SOC[0]) & (soccol < SOC[-1])) # the rest of them\n I6 = np.isnan(soccol) # if input is \"not a number\" for any locations\n\n # for voltages less than lowest stored soc datapoint, extrapolate off\n # low end of table\n if I1.any():\n dv = (OCV0[1] + tempcol*OCVrel[1]) - (OCV0[0] + tempcol*OCVrel[0])\n ocv[I1] = (soccol[I1] - SOC[0])*dv[I1]/diffSOC + OCV0[0] + tempcol[I1]*OCVrel[0]\n\n # for voltages greater than highest stored soc datapoint, extrapolate off\n # high end of table\n if I2.any():\n dv = (OCV0[-1] + tempcol*OCVrel[-1]) - (OCV0[-2] + tempcol*OCVrel[-2])\n ocv[I2] = (soccol[I2] - SOC[-1])*dv[I2]/diffSOC + OCV0[-1] + tempcol[I2]*OCVrel[-1]\n\n # for normal soc range, manually interpolate (10x faster than \"interp1\")\n I4 = (soccol[I3] - SOC[0])/diffSOC # using linear interpolation\n I5 = np.floor(I4)\n I5 = I5.astype(int)\n I45 = I4 - I5\n omI45 = 1 - I45\n ocv[I3] = OCV0[I5]*omI45 + OCV0[I5+1]*I45\n ocv[I3] = ocv[I3] + tempcol[I3]*(OCVrel[I5]*omI45 + OCVrel[I5+1]*I45)\n ocv[I6] = 0 # replace NaN SOCs with zero voltage\n return ocv\n\n\ndef SISOsubid(y, u, n):\n \"\"\"\n Identify state-space \"A\" matrix from input-output data.\n y: vector of measured outputs\n u: vector of measured inputs\n n: number of poles in solution\n A: discrete-time state-space state-transition matrix.\n\n Theory from \"Subspace Identification for Linear Systems Theory - Implementation\n - Applications\" Peter Van Overschee / Bart De Moor (VODM) Kluwer Academic\n Publishers, 1996. Combined algorithm: Figure 4.8 page 131 (robust). Robust\n implementation: Figure 6.1 page 169.\n\n Code adapted from \"subid.m\" in \"Subspace Identification for Linear Systems\"\n toolbox on MATLAB CENTRAL file exchange, originally by Peter Van Overschee,\n Dec. 1995\n \"\"\"\n \n ny = len(y)\n i = 2*n\n twoi = 4*n\n\n # Determine the number of columns in the Hankel matrices\n j = ny - twoi + 1\n\n # Make Hankel matrices Y and U\n Y = np.zeros((twoi, j))\n U = np.zeros((twoi, j))\n\n for k in range(2*i):\n Y[k] = y[k:k+j]\n U[k] = u[k:k+j]\n\n # Compute the R factor\n UY = np.concatenate((U, Y)) # combine U and Y into one array\n _, r = np.linalg.qr(UY.T) # QR decomposition\n R = r.T # transpose of upper triangle\n\n # STEP 1: Calculate oblique and orthogonal projections\n # ------------------------------------------------------------------\n\n Rf = R[-i:] # future outputs\n Rp = np.concatenate((R[:i], R[2*i:3*i])) # past inputs and outputs\n Ru = R[i:twoi, :twoi] # future inputs\n\n RfRu = np.linalg.lstsq(Ru.T, Rf[:, :twoi].T, rcond=None)[0].T\n RfRuRu = RfRu.dot(Ru)\n tm1 = Rf[:, :twoi] - RfRuRu\n tm2 = Rf[:, twoi:4*i]\n Rfp = np.concatenate((tm1, tm2), axis=1) # perpendicular future outputs\n\n RpRu = np.linalg.lstsq(Ru.T, Rp[:, :twoi].T, rcond=None)[0].T\n RpRuRu = RpRu.dot(Ru)\n tm3 = Rp[:, :twoi] - RpRuRu\n tm4 = Rp[:, twoi:4*i]\n Rpp = np.concatenate((tm3, tm4), axis=1) # perpendicular past inputs and outputs\n\n # The oblique projection is computed as (6.1) in VODM, page 166.\n # obl/Ufp = Yf/Ufp * pinv(Wp/Ufp) * (Wp/Ufp)\n # The extra projection on Ufp (Uf perpendicular) tends to give\n # better numerical conditioning (see algo on VODM page 131)\n\n # Funny rank check (SVD takes too long)\n # This check is needed to avoid rank deficiency warnings\n\n nmRpp = np.linalg.norm(Rpp[:, 3*i-3:-i], ord='fro')\n if nmRpp < 1e-10:\n # oblique projection as (Rfp*pinv(Rpp')') * Rp\n Ob = Rfp.dot(np.linalg.pinv(Rpp.T).T).dot(Rp)\n else:\n # oblique projection as (Rfp/Rpp) * Rp\n Ob = (np.linalg.lstsq(Rpp.T, Rfp.T, rcond=None)[0].T).dot(Rp)\n\n # STEP 2: Compute weighted oblique projection and its SVD\n # Extra projection of Ob on Uf perpendicular\n # ------------------------------------------------------------------\n\n ObRu = np.linalg.lstsq(Ru.T, Ob[:, :twoi].T, rcond=None)[0].T\n ObRuRu = ObRu.dot(Ru)\n tm5 = Ob[:, :twoi] - ObRuRu\n tm6 = Ob[:, twoi:4*i]\n WOW = np.concatenate((tm5, tm6), axis=1)\n\n U, S, _ = np.linalg.svd(WOW, full_matrices=False)\n ss = S # In np.linalg.svd S is already the diagonal, generally ss = diag(S)\n\n # STEP 3: Partitioning U into U1 and U2 (the latter is not used)\n # ------------------------------------------------------------------\n\n U1 = U[:, :n] # determine U1\n\n # STEP 4: Determine gam = Gamma(i) and gamm = Gamma(i-1)\n # ------------------------------------------------------------------\n\n gam = U1 @ np.diag(np.sqrt(ss[:n]))\n gamm = gam[0:(i-1),:]\n gam_inv = np.linalg.pinv(gam) # pseudo inverse of gam\n gamm_inv = np.linalg.pinv(gamm) # pseudo inverse of gamm\n\n # STEP 5: Determine A matrix (also C, which is not used)\n # ------------------------------------------------------------------\n\n tm7 = np.concatenate((gam_inv @ R[3*i:4*i, 0:3*i], np.zeros((n,1))), axis=1)\n tm8 = R[i:twoi, 0:3*i+1]\n Rhs = np.vstack((tm7, tm8))\n tm9 = gamm_inv @ R[3*i+1:4*i, 0:3*i+1]\n tm10 = R[3*i:3*i+1, 0:3*i+1]\n Lhs = np.vstack((tm9, tm10))\n sol = np.linalg.lstsq(Rhs.T, Lhs.T, rcond=None)[0].T # solve least squares for [A; C]\n A = sol[0:n, 0:n] # extract A\n\n return A\n\n\ndef minfn(data, model, theTemp, doHyst):\n \"\"\"\n Using an assumed value for gamma (already stored in the model), find optimum\n values for remaining cell parameters, and compute the RMS error between true\n and predicted cell voltage\n \"\"\"\n\n alltemps = [d.temp for d in data]\n ind, = np.where(np.array(alltemps) == theTemp)[0]\n\n G = abs(model.GParam[ind])\n\n Q = abs(model.QParam[ind])\n eta = abs(model.etaParam[ind])\n RC = abs(model.RCParam[ind])\n numpoles = len(RC)\n\n ik = data[ind].s1.current.copy()\n vk = data[ind].s1.voltage.copy()\n tk = np.arange(len(vk))\n etaik = ik.copy()\n etaik[ik < 0] = etaik[ik < 0] * eta\n\n hh = 0*ik\n sik = 0*ik\n fac = np.exp(-abs(G * etaik/(3600*Q)))\n\n for k in range(1, len(ik)):\n hh[k] = (fac[k-1]*hh[k-1]) - ((1-fac[k-1])*np.sign(ik[k-1]))\n sik[k] = np.sign(ik[k])\n if abs(ik[k]) < Q/100:\n sik[k] = sik[k-1]\n\n # First modeling step: Compute error with model = OCV only\n vest1 = data[ind].OCV\n verr = vk - vest1\n\n # Second modeling step: Compute time constants in \"A\" matrix\n y = -np.diff(verr)\n u = np.diff(etaik)\n A = SISOsubid(y, u, numpoles)\n\n # Modify results to ensure real, preferably distinct, between 0 and 1\n\n eigA = np.linalg.eigvals(A)\n eigAr = eigA + 0.001 * np.random.normal(loc=0.0, scale=1.0, size=eigA.shape)\n eigA[eigA != np.conj(eigA)] = abs(eigAr[eigA != np.conj(eigA)]) # Make sure real\n eigA = np.real(eigA) # Make sure real\n eigA[eigA<0] = abs(eigA[eigA<0]) # Make sure in range \n eigA[eigA>1] = 1 / eigA[eigA>1]\n RCfact = np.sort(eigA)\n RCfact = RCfact[-numpoles:]\n RC = -1 / np.log(RCfact)\n\n # Compute RC time constants as Plett's Matlab ESCtoolbox \n # nup = numpoles\n # while 1:\n # A = SISOsubid(y, u, nup)\n\n # # Modify results to ensure real, preferably distinct, between 0 and 1\n # eigA = np.linalg.eigvals(A)\n # eigA = np.real(eigA[eigA == np.conj(eigA)]) # Make sure real\n # eigA = eigA[(eigA>0) & (eigA<1)] # Make sure in range \n # okpoles = len(eigA)\n # nup = nup + 1\n # if okpoles >= numpoles:\n # break\n # # print(nup)\n\n # RCfact = np.sort(eigA)\n # RCfact = RCfact[-numpoles:]\n # RC = -1 / np.log(RCfact)\n\n # Simulate the R-C filters to find R-C currents\n stsp = dlti(np.diag(RCfact), np.vstack(1-RCfact), np.eye(numpoles), np.zeros((numpoles, 1))) \n [tout, vrcRaw, xout] = dlsim(stsp, etaik)\n\n # Third modeling step: Hysteresis parameters\n if doHyst:\n H = np.column_stack((hh, sik, -etaik, -vrcRaw))\n W = nnls(H, verr)\n M = W[0][0]\n M0 = W[0][1]\n R0 = W[0][2]\n Rfact = W[0][3:].T\n else:\n H = np.column_stack((-etaik, -vrcRaw))\n W = np.linalg.lstsq(H,verr, rcond=None)[0]\n M = 0\n M0 = 0\n R0 = W[0]\n Rfact = W[1:].T\n\n idx, = np.where(np.array(model.temps) == data[ind].temp)[0]\n model.R0Param[idx] = R0\n model.M0Param[idx] = M0\n model.MParam[idx] = M\n model.RCParam[idx] = RC.T\n model.RParam[idx] = Rfact.T\n\n vest2 = vest1 + M*hh + M0*sik - R0*etaik - vrcRaw @ Rfact.T\n verr = vk - vest2\n\n # plot voltages\n plt.figure(1)\n plt.plot(tk[::10]/60, vk[::10], label='voltage')\n plt.plot(tk[::10]/60, vest1[::10], label='vest1 (OCV)')\n plt.plot(tk[::10]/60, vest2[::10], label='vest2 (DYN)')\n plt.xlabel('Time (min)')\n plt.ylabel('Voltage (V)')\n plt.title(f'Voltage and estimates at T = {data[ind].temp} C')\n plt.legend(loc='best', numpoints=1)\n #plt.show()\n\n # plot modeling errors\n plt.figure(2)\n plt.plot(tk[::10]/60, verr[::10], label='verr')\n plt.xlabel('Time (min)')\n plt.ylabel('Error (V)')\n plt.title(f'Modeling error at T = {data[ind].temp} C')\n #plt.show()\n\n # Compute RMS error only on data roughly in 5% to 95% SOC\n v1 = OCVfromSOCtemp(0.95, data[ind].temp, model)[0]\n v2 = OCVfromSOCtemp(0.05, data[ind].temp, model)[0]\n N1 = np.where(vk < v1)[0][0]\n N2 = np.where(vk < v2)[0][0]\n\n rmserr = np.sqrt(np.mean(verr[N1:N2]**2))\n cost = np.sum(rmserr)\n print(f'RMS error = {cost*1000:.2f} mV')\n\n return cost, model\n\n\ndef optfn(x, data, model, theTemp, doHyst):\n \"\"\"\n This minfn works for the enhanced self-correcting cell model\n \"\"\"\n\n idx, = np.where(np.array(model.temps) == theTemp)\n model.GParam[idx] = abs(x)\n\n cost, _ = minfn(data, model, theTemp, doHyst)\n return cost\n\n\ndef processDynamic(data, modelocv, numpoles, doHyst):\n \"\"\"\n Technical note: PROCESSDYNAMIC assumes that specific Arbin test scripts have\n been executed to generate the input files. \"makeMATfiles.m\" converts the raw\n Excel data files into \"MAT\" format where the MAT files have fields for time,\n step, current, voltage, chgAh, and disAh for each script run.\n\n The results from three scripts are required at every temperature.\n The steps in each script file are assumed to be:\n Script 1 (thermal chamber set to test temperature):\n Step 1: Rest @ 100% SOC to acclimatize to test temperature\n Step 2: Discharge @ 1C to reach ca. 90% SOC\n Step 3: Repeatedly execute dynamic profiles (and possibly intermediate\n rests) until SOC is around 10%\n Script 2 (thermal chamber set to 25 degC):\n Step 1: Rest ca. 10% SOC to acclimatize to 25 degC\n Step 2: Discharge to min voltage (ca. C/3)\n Step 3: Rest\n Step 4: Constant voltage at vmin until current small (ca. C/30)\n Steps 5-7: Dither around vmin\n Step 8: Rest\n Script 3 (thermal chamber set to 25 degC):\n Step 2: Charge @ 1C to max voltage\n Step 3: Rest\n Step 4: Constant voltage at vmax until current small (ca. C/30)\n Steps 5-7: Dither around vmax\n Step 8: Rest\n\n All other steps (if present) are ignored by PROCESSDYNAMIC. The time step\n between data samples must be uniform -- we assume a 1s sample period in this\n code.\n\n The inputs:\n - data: An array, with one entry per temperature to be processed.\n One of the array entries must be at 25 degC. The fields of \"data\" are:\n temp (the test temperature), script1, script 2, and script 3, where the\n latter comprise data collected from each script. The sub-fields of\n these script structures that are used by PROCESSDYNAMIC are the\n vectors: current, voltage, chgAh, and disAh\n - model: The output from processOCV, comprising the OCV model\n - numpoles: The number of R-C pairs in the model\n - doHyst: 0 if no hysteresis model desired; 1 if hysteresis desired\n\n The output:\n - model: A modified model, which now contains the dynamic fields filled in.\n \"\"\"\n\n # used by minimize_scalar later on\n options = {\n 'xatol': 1e-08, \n 'maxiter': 1e5, \n 'disp': 0\n }\n\n # Step 1: Compute capacity and coulombic efficiency for every test\n # ------------------------------------------------------------------\n\n alltemps = [d.temp for d in data]\n alletas = np.zeros(len(alltemps))\n allQs = np.zeros(len(alltemps))\n\n ind25, = np.where(np.array(alltemps) == 25)[0]\n not25, = np.where(np.array(alltemps) != 25)\n\n k = ind25\n\n totDisAh = data[k].s1.disAh[-1] + data[k].s2.disAh[-1] + data[k].s3.disAh[-1]\n totChgAh = data[k].s1.chgAh[-1] + data[k].s2.chgAh[-1] + data[k].s3.chgAh[-1]\n eta25 = totDisAh/totChgAh\n data[k].eta = eta25\n alletas[k] = eta25\n data[k].s1.chgAh = data[k].s1.chgAh * eta25\n data[k].s2.chgAh = data[k].s2.chgAh * eta25\n data[k].s3.chgAh = data[k].s3.chgAh * eta25\n\n Q25 = data[k].s1.disAh[-1] + data[k].s2.disAh[-1] - data[k].s1.chgAh[-1] - data[k].s2.chgAh[-1]\n data[k].Q = Q25\n allQs[k] = Q25\n\n eta25 = np.mean(alletas[ind25])\n\n for k in not25:\n data[k].s2.chgAh = data[k].s2.chgAh*eta25\n data[k].s3.chgAh = data[k].s3.chgAh*eta25\n eta = (data[k].s1.disAh[-1] + data[k].s2.disAh[-1] + data[k].s3.disAh[-1] - data[k].s2.chgAh[-1] - data[k].s3.chgAh[-1])/data[k].s1.chgAh[-1]\n\n data[k].s1.chgAh = eta*data[k].s1.chgAh\n data[k].eta = eta\n alletas[k] = eta\n\n Q = data[k].s1.disAh[-1] + data[k].s2.disAh[-1] - data[k].s1.chgAh[-1] - data[k].s2.chgAh[-1]\n data[k].Q = Q\n allQs[k] = Q\n\n modeldyn = ModelDyn()\n modeldyn.temps = alltemps\n modeldyn.etaParam = alletas\n modeldyn.QParam = allQs\n\n # Step 2: Compute OCV for \"discharge portion\" of test\n # ------------------------------------------------------------------\n\n for k, _ in enumerate(data):\n etaParam = modeldyn.etaParam[k]\n etaik = data[k].s1.current.copy()\n etaik[etaik < 0] = etaParam*etaik[etaik < 0]\n data[k].Z = 1 - np.cumsum(etaik) * 1/(data[k].Q * 3600)\n data[k].OCV = OCVfromSOCtemp(data[k].Z, alltemps[k], modelocv)\n\n # Step 3: Now, optimize!\n # ------------------------------------------------------------------\n\n modeldyn.GParam = np.zeros(len(modeldyn.temps)) # gamma hysteresis parameter\n modeldyn.M0Param = np.zeros(len(modeldyn.temps)) # M0 hysteresis parameter\n modeldyn.MParam = np.zeros(len(modeldyn.temps)) # M hysteresis parameter\n modeldyn.R0Param = np.zeros(len(modeldyn.temps)) # R0 ohmic resistance parameter\n modeldyn.RCParam = np.zeros((len(modeldyn.temps), numpoles)) # time constant\n modeldyn.RParam = np.zeros((len(modeldyn.temps), numpoles)) # Rk\n\n modeldyn.SOC = modelocv.SOC # copy SOC values from OCV model\n modeldyn.OCV0 = modelocv.OCV0 # copy OCV0 values from OCV model\n modeldyn.OCVrel = modelocv.OCVrel # copy OCVrel values from OCV model\n\n for theTemp in range(len(modeldyn.temps)):\n temp = modeldyn.temps[theTemp]\n print('Processing temperature', temp, 'C')\n\n if doHyst:\n g = abs(minimize_scalar(optfn, bounds=(1, 250), args=(data, modeldyn, temp, doHyst), method='bounded', options=options).x)\n print('g =', g)\n\n else:\n modeldyn.GParam[theTemp] = 0\n theGParam = 0 \n optfn(theGParam, data, modeldyn, temp, doHyst)\n return modeldyn \n"
] | [
[
"numpy.diag",
"matplotlib.pyplot.legend",
"numpy.linalg.eigvals",
"numpy.sqrt",
"numpy.asarray",
"numpy.cumsum",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"numpy.mean",
"numpy.linalg.qr",
"numpy.where",
"numpy.linalg.svd",
"numpy.eye",
"scipy.optimize.minimize_scalar",
"numpy.real",
"numpy.size",
"numpy.diff",
"numpy.column_stack",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.log",
"matplotlib.pyplot.title",
"numpy.isnan",
"scipy.signal.dlsim",
"numpy.linalg.lstsq",
"numpy.floor",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"numpy.conj",
"scipy.optimize.nnls",
"numpy.linalg.norm",
"numpy.sort",
"numpy.sign",
"numpy.linalg.pinv",
"numpy.random.normal",
"matplotlib.pyplot.xlabel",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
KaylaBaum/astr-119-hw-1 | [
"1e01f8449d35c927d969086ff713e3e280e2bdf7"
] | [
"data_types.py"
] | [
"import numpy as np\t\t#import numpy library\r\n\r\n#integers\r\n\r\ni = 10\t\t\t#integer\r\nprint(type(i))\t#print out the data type of i\r\n\r\na_i = np.zeros(i,dtype=int)\t#declare an array of ints\r\nprint(type(a_i))\t\t\t#will return ndarray\r\nprint(type(a_i[0]))\t\t\t#will return int64\r\n\r\n#floats\r\n\r\nx = 119.0\t\t#floating point number\r\nprint(type(x))\t#print out the data type of x\r\n\r\ny = 1.19e2\t\t#float 119 in sci notation\r\nprint(type(y))\t#print out the data type of y\r\n\r\nz = np.zeros(i,dtype=float)\t#declare array of floats\r\nprint(type(z))\t\t\t\t#will return nd array\r\nprint(type(z[0]))\t\t\t#will return float64\r\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DistractionCrab/MachineLearningExamples | [
"9bb05bc0ba0c6b4c147ec70f557b9ae981df46fe"
] | [
"ml/reinforcement/__init__.py"
] | [
"import ml\nimport gym\nimport cv2\nimport operator\nimport random\nimport torch\nimport torchvision\nimport numpy as np\nimport functools as ftools\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.datasets as datasets\n\nfrom collections import deque\n\nclass RLModel:\n\tdef train(self):\n\t\tcriterion = self.criterion()\n\t\toptimizer = self.optimizer()\n\n\t\tfor e in range(self.epochs):\n\t\t\tprint(f\"--------- Epoch {e} -----------\")\n\t\t\tself.model.train()\n\t\t\trs = self._train_epoch(criterion, optimizer)\t\t\t\n\t\t\tprint(f'- Average Reward: {rs.mean()}')\n\t\t\tself.test()\n\n\tdef _train_epoch(self, criterion, optimizer):\n\t\t\"\"\"\n\t\tRuns an epoch of training. All environments will be reset after this returns.\n\t\t\"\"\"\n\t\traise NotImplementedError('Training not implemented for particular model.')\n\n\n\tdef test(self):\n\t\tdone = False\n\t\trwrd = 0.\n\t\tself.model.eval()\n\t\twhile not done:\n\t\t\taction = self.model(self.model.env.obsv).argmax().item()\n\t\t\t(r, done) = self.model.env.step(action)\n\t\t\trwrd += 1\n\t\tprint(f'Total Evaluation Reward: {rwrd}')\n\n\t@property\n\tdef model(self):\n\t\traise NotImplementedError('Subclass must define their model to be used.')\n\t\n\t@property\n\tdef epochs(self):\n\t\treturn 6\n\n\t@property\n\tdef learning_rate(self):\n\t\treturn 0.001\n\n\t@property\n\tdef regularization_beta(self):\n\t\treturn 1e-5\n\t\n\tdef criterion(self):\n\t\treturn torch.nn.MSELoss()\n\t\n\tdef optimizer(self):\n\t\treturn torch.optim.Adam(\n\t\t\tself.model.parameters(), \n\t\t\tlr=self.learning_rate)\n\t\n\nclass CartpoleV0:\n\tdef __init__(self, render=False):\n\t\tself.__env = gym.make('CartPole-v0')\n\t\tself.__obsv = self.__env.reset()\n\t\tself.__done = False\n\t\t\n\tdef reset(self):\n\t\tself.__done = False\n\t\tself.__obsv = self.__env.reset()\n\n\t@property\n\tdef env(self):\n\t\treturn self\n\n\t@property\n\tdef obsv(self):\n\t\treturn torch.from_numpy(self.__obsv.astype('float32'))\n\t\n\t@property\n\tdef num_act(self):\n\t\treturn 2\n\n\t@property\n\tdef obsv_shape(self):\n\t\treturn (4,)\n\t\n\t@property\n\tdef done(self):\n\t\treturn self.__done\n\t\n\tdef seed(self, val):\n\t\tself.__env.seed(val)\n\n\tdef step(self, action):\n\t\t(self.__obsv, reward, self.__done, _) = self.__env.step(action)\n\t\treturn (reward, self.__done)\n\nclass MsPacman:\n\tdef __init__(self, render=False):\n\t\tself.__env = gym.make('MsPacman-v0')\n\t\tself.__env.frame_skip = 4\n\t\tself.__render = render\n\t\tself.reset()\n\n\t\tif render:\n\t\t\tpygame.init()\n\t\t\tself.__env.render()\n\t\t\n\tdef reset(self):\n\t\tself.__done = False\n\t\tself.__obsv = self.__process(self.__env.reset())\n\t\tself.__frames = deque([self.__obsv]*self.frame_save, maxlen=self.frame_save)\n\t\tself.__rwrd = deque([0.0]*self.frame_save, maxlen=self.frame_save)\n\n\t@property\n\tdef frame_save(self):\n\t\treturn 4\n\t\n\n\t@property\n\tdef env(self):\n\t\treturn self\n\n\t@property\n\tdef obsv(self):\n\t\tarray = np.stack(self.__frames).astype('float32')\n\t\ttensor = torch.from_numpy(array)\n\t\treturn torch.reshape(tensor, (1, 4, 84, 84))\n\n\t@property\n\tdef rwrd(self):\n\t\treturn sum(self.__rwrd)\t\n\t\n\t@property\n\tdef num_act(self):\n\t\treturn 8\n\n\t@property\n\tdef obsv_shape(self):\n\t\treturn (84, 84, 4)\n\t\n\t@property\n\tdef resize_shape(self):\n\t\treturn (84, 84)\n\t\n\n\t@property\n\tdef done(self):\n\t\treturn self.__done\n\t\n\tdef seed(self, val):\n\t\tself.__env.seed(val)\n\n\tdef step(self, action):\n\t\t(obsv, reward, done, _) = self.__env.step(action)\n\t\tself.__obsv = self.__process(obsv)\n\n\t\tself.__frames.append(self.__obsv)\n\t\tself.__rwrd.append(reward)\n\n\t\treturn (self.rwrd, done)\n\n\tdef __process(self, obsv):\n\t\treturn cv2.cvtColor(cv2.resize(obsv, self.resize_shape), cv2.COLOR_RGB2GRAY)\n\n\nif __name__ == '__main__':\n\tgame = MsPacman()\n\tdone = False\n\twhile not done:\n\t\t(_, done) = game.step(0)"
] | [
[
"torch.reshape",
"torch.from_numpy",
"torch.nn.MSELoss",
"numpy.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SergejVolkov/SR_base | [
"285b40c0bbe9dc46f2bd660dc80ff255b4dc65a0",
"285b40c0bbe9dc46f2bd660dc80ff255b4dc65a0"
] | [
"crop_figures.py",
"models/Algo.py"
] | [
"import cv2\nimport os\nimport numpy as np\n\nINIT_SIZE = 24\nFONT_WIDTH_FACTOR = 0.8\n\nc_h, c_w = 128, 128\nscale = 3\nimg_num = 7\nmax_row_w = 6\ni_size = (1920, 1080)\nstart_index = len('The_SoulTaker_01_a_sr_')\nfont_size = 24\nheader_h = font_size * 3\n\ncoords = [(780, 966), (579, 762), (482, 497),\n (385, 968), (485, 1000), (890, 1680), (420, 1250)]\n\n# coords = [(548, 1037), (482, 497), (442, 361), (170, 1220), (354, 630)]\n\nimg_dir = 'D:/Foma/Python/SR_base/data/output'\nsave_dir = 'D:/Foma/Documents/P&P/media/Projects/2 Term, Video Enhance CP2/images'\n\nsc_h = int(c_h * scale)\nsc_w = int(c_w * scale)\n\nimgs = [name for name in os.listdir(img_dir) if\n name.lower().endswith('.png') or\n name.lower().endswith('.jpg') or\n name.lower().endswith('.jpeg') or\n name.lower().endswith('.gif') or\n name.lower().endswith('.bmp')]\nimgs.sort()\n\nimg_num = len(imgs) // img_num\n\ndivide = 1\nwhile (divide + 1) * img_num <= max_row_w:\n divide += 1\n\nheight = len(imgs) // img_num\nif height % divide != 0:\n height = height // divide + 1\nelse:\n height = height // divide\n\nout_img = np.ones((sc_h * height + header_h, sc_w * img_num * divide, 3), dtype=np.uint8) * 255\n\nfont = cv2.FONT_HERSHEY_COMPLEX\nfor i in range(img_num * divide):\n name = imgs[i % img_num][start_index:][:-4]\n cv2.putText(\n out_img, name,\n (i * sc_w + int(sc_w - len(name) * font_size * FONT_WIDTH_FACTOR) // 2, font_size * 2),\n font, font_size / INIT_SIZE, (0, 0, 0), 1, cv2.LINE_AA\n )\n\nfor i in range(len(imgs) // img_num):\n div_idx = i % height\n column = i // height\n for j in range(img_num):\n img = cv2.imread(img_dir + '/' + imgs[i * img_num + j])\n img = cv2.resize(img, i_size, interpolation=cv2.INTER_CUBIC)\n crop = img[coords[i][0]:coords[i][0] + c_h, coords[i][1]:coords[i][1] + c_w, :]\n crop = cv2.resize(crop, (sc_w, sc_h), interpolation=cv2.INTER_LANCZOS4)\n out_img[sc_h * div_idx + header_h:sc_h * (div_idx + 1) + header_h,\n sc_w * j + column * sc_w * img_num:sc_w * (j + 1) + column * sc_w * img_num, :] = crop\n\n\ncv2.imwrite(save_dir + '/figure1.png', out_img)\n",
"import dl_modules.dataset as ds\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass Bicubic(nn.Module):\n def __init__(self):\n super(Bicubic, self).__init__()\n self.eval()\n\n def forward(self, x):\n return F.interpolate(x, scale_factor=(ds.scale, ds.scale), mode='bicubic', align_corners=True)\n"
] | [
[
"numpy.ones"
],
[
"torch.nn.functional.interpolate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qua-platform/qcodes-driver | [
"0fa0035705a457c68fa88e8c74bac2bb492d4373"
] | [
"demo.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nimport qcodes as qc\nfrom qcodes import (\n Measurement,\n load_or_create_experiment,\n)\nfrom qcodes.dataset.plotting import plot_dataset\n\nfrom qcodes_driver.driver import *\n\npulse_len = 1000\nconfig = {\n \"version\": 1,\n \"controllers\": {\n \"con1\": {\n \"type\": \"opx1\",\n \"analog_outputs\": {\n 1: {\"offset\": +0.0},\n 2: {\"offset\": +0.0},\n },\n \"analog_inputs\": {\n 1: {\"offset\": +0.0},\n },\n }\n },\n \"elements\": {\n \"qe1\": {\n \"mixInputs\": {\"I\": (\"con1\", 1), \"Q\": (\"con1\", 2)},\n \"outputs\": {\"output1\": (\"con1\", 1)},\n \"intermediate_frequency\": 5e6,\n \"operations\": {\"playOp\": \"constPulse\", \"readout\": \"readoutPulse\"},\n \"time_of_flight\": 180,\n \"smearing\": 0,\n },\n },\n \"pulses\": {\n \"constPulse\": {\n \"operation\": \"control\",\n \"length\": pulse_len, # in ns\n \"waveforms\": {\"I\": \"const_wf\", \"Q\": \"const_wf\"},\n },\n \"readoutPulse\": {\n \"operation\": \"measure\",\n \"length\": pulse_len,\n \"waveforms\": {\"I\": \"const_wf\", \"Q\": \"const_wf\"},\n \"digital_marker\": \"ON\",\n \"integration_weights\": {\"x\": \"xWeights\", \"y\": \"yWeights\"},\n },\n },\n \"waveforms\": {\n \"const_wf\": {\"type\": \"constant\", \"sample\": 0.2},\n },\n \"digital_waveforms\": {\n \"ON\": {\"samples\": [(1, 0)]},\n },\n \"integration_weights\": {\n \"xWeights\": {\n \"cosine\": [1.0] * (pulse_len // 4),\n \"sine\": [0.0] * (pulse_len // 4),\n },\n \"yWeights\": {\n \"cosine\": [0.0] * (pulse_len // 4),\n \"sine\": [1.0] * (pulse_len // 4),\n },\n },\n}\n\nf_pts = 10\nvoltage_range = np.linspace(0, 10, 3)\nf_range = np.linspace(0, 100, f_pts)\nopx = OPX_SpectrumScan(config)\nopx.f_start(0)\nopx.f_stop(100)\nopx.sim_time(100000)\nopx.n_points(f_pts)\nstation = qc.Station()\nstation.add_component(opx)\nexp = load_or_create_experiment(\n experiment_name=\"my experiment\", sample_name=\"this sample\"\n)\n\nmeas = Measurement(exp=exp, station=station)\n\nmeas.register_parameter(opx.ext_v) # register the independent parameter\nmeas.register_parameter(\n opx.spectrum, setpoints=(opx.ext_v,)\n) # now register the dependent one\n\n\nwith meas.run() as datasaver:\n for v in voltage_range:\n opx.ext_v(v)\n # interact with external device here\n datasaver.add_result((opx.ext_v, v), (opx.spectrum, opx.spectrum()))\n\n dataset = datasaver.dataset\n\nplot_dataset(dataset)\n"
] | [
[
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
heistermann/trmmlib | [
"b32cf623737285073e4c61bd0e01a0fe8b26c329"
] | [
"trmmlib/util.py"
] | [
"# -*- coding: utf-8 -*-\n#-------------------------------------------------------------------------------\n# Name: util\n# Purpose:\n#\n# Authors: Maik Heistermann\n#\n# Created: 2015-11-6\n# Copyright: (c) Maik Heistermann\n# Licence: The MIT License\n#-------------------------------------------------------------------------------\n#!/usr/bin/env python\n\n\nimport numpy as np\nfrom scipy.spatial import cKDTree\n\n\ndef mask_from_bbox(x, y, bbox):\n \"\"\"Return index array based on spatial selection from a bounding box.\n \"\"\"\n ny, nx = x.shape\n \n ix = np.arange(x.size).reshape(x.shape)\n\n # Find bbox corners\n # Plant a tree\n tree = cKDTree(np.vstack((x.ravel(),y.ravel())).transpose())\n # find lower left corner index\n dists, ixll = tree.query([bbox[\"left\"], bbox[\"bottom\"]], k=1)\n ill, jll = np.array(np.where(ix==ixll))[:,0]\n ill = (ixll / nx)#-1\n jll = (ixll % nx)#-1\n # find lower left corner index\n dists, ixur = tree.query([bbox[\"right\"],bbox[\"top\"]], k=1)\n iur, jur = np.array(np.where(ix==ixur))[:,0]\n iur = (ixur / nx)#+1\n jur = (ixur % nx)#+1\n \n mask = np.repeat(False, ix.size).reshape(ix.shape)\n if iur>ill:\n iur += 1\n jur += 1\n mask[ill:iur,jll:jur] = True\n shape = (iur-ill, jur-jll)\n else:\n ill += 1\n jur += 1\n mask[iur:ill,jll:jur] = True\n shape = (ill-iur, jur-jll)\n \n return mask, shape\n \n# return ix[ill:iur,jll:jur].ravel() \n \n\ndef reduce_multipolygons(verts):\n \"\"\"\n \"\"\"\n for i, vert in enumerate(verts):\n if vert.ndim==1:\n # Multi-Polygons - keep only the largest polygon \n verts[i] = vert[np.argmax([len(subpoly) for subpoly in vert])]\n return verts\n \n \ndef make_ids_unique(verts, ids):\n \"\"\"Selects the longest polygon in case of duplicate IDs.\n \"\"\"\n ids = np.array(ids)\n mask = np.repeat(False, len(ids))\n for id in np.unique(ids):\n ix = np.where( ids==id)[0]\n if len(ix) > 1:\n # More than one key matching? Find largest matching polygon\n mask[ ix[np.argmax([len(verts[i]) for i in ix])] ] = True\n else:\n mask[ix[0]] = True\n return verts[mask], ids[mask]\n\n \n\n\n\nif __name__ == '__main__':\n \n pass"
] | [
[
"numpy.unique",
"numpy.arange",
"numpy.repeat",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bradyneal/Bias-Variance | [
"4c759d978f09475e7f2fbfaac3a6154f91a12a87"
] | [
"test_y_onehot.py"
] | [
"'''Returns the one hot vectors for the y label'''\n\nimport torch\nfrom torchvision import datasets, transforms\n\nMNIST_TEST_SIZE = 10000\nNUM_MNIST_CLASSES = 10\n\n\ndef get_test_y_onehot():\n # Return onehot matrix of test y labels\n\n test = datasets.MNIST('./data', train=False, download=True, transform=transforms.ToTensor())\n test_loader = torch.utils.data.DataLoader(test, batch_size=MNIST_TEST_SIZE)\n _, test_y = next(iter(test_loader))\n\n # get one-hot encoding (should be a separate function)\n test_y_onehot = torch.FloatTensor(MNIST_TEST_SIZE, NUM_MNIST_CLASSES)\n test_y_onehot.zero_()\n test_y_onehot.scatter_(1, test_y.unsqueeze(1), 1)\n test_y_onehot = test_y_onehot.cpu().numpy()\n return test_y_onehot\n"
] | [
[
"torch.FloatTensor",
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
incognitoRepo/youtube-dl | [
"e9dbbbd87e200d37f15531973fe653c3573cfd0b"
] | [
"bin/fmtutil/dataframe.py"
] | [
"import sys\nimport ast\nimport re,os\nimport pandas as pd\nimport numpy as np\nimport itertools\nfrom fmtutil.row.parse_verbose_argvars import ParsedTuple\n\n\nfrom pathlib import Path\nfrom pdb import set_trace as st\nfrom typing import Dict, List, Any, Iterable\nfrom toolz.functoolz import compose_left\nfrom dataclasses import dataclass, field, InitVar\nfrom collections import OrderedDict, namedtuple\nfrom types import SimpleNamespace\nfrom functools import partial\nfrom textwrap import TextWrapper\nfrom beautifultable import BeautifulTable\nfrom itertools import accumulate\nfrom IPython.core import ultratb\nsys.excepthook = ultratb.VerboseTB()\nimport better_exceptions\n# better_exceptions.hook()\nimport traceback as tb\n# exc = sys.exc_info()\n# exc_tb = exc[2]\n# tb.print_exception(*exc)\n# tb.print_tb(exc_tb)\n\n# def colorizetxt(txt,bgc=None,fgc=None):\n# d = {\n# \"blu\":f'\\x1b[0;34m',\n# \"grn\":f'\\x1b[0;32m',\n# \"cyn\":f'\\x1b[0;36m'\n# }\n# reset = f'\\x1b[0m'\n# rv = f\"{d.get(bgc)}{txt}{reset}\"\n# return rv\n\ndef _format_snoop_datacell():\n ledger = {}\n space = \"\\u0020\"\n hard_coded_replacements = [\n ('/Users/alberthan/VSCodeProjects/vytd','vytd'),\n ('object at','@')\n ]\n d = {\n \"1\":f'|\\x1b[0;34m',\n \"2\":f'|\\x1b[0;32m',\n \"3\":f'|\\x1b[0;36m',\n \"4\":f'|\\x1b[0;33m'\n }\n\n def entry(cell):\n assert isinstance(cell,list) and isinstance(cell[0],str), f'{type(cell)=}{cell=}'\n cleaned_cell = [elm.strip() for elm in cell]\n paired_cell = [(elm.split(':=',1) if ':=' in elm else elm.split(':',1)) for elm in cleaned_cell]\n labeled_cell = {k.strip():v.strip() for k,v in paired_cell}\n fmtd_lst_o_str_nc, fmtd_lst_o_str_c = process_snoop_dct(labeled_cell,'nc'), process_snoop_dct(labeled_cell,'c')\n return (fmtd_lst_o_str_nc, fmtd_lst_o_str_c)\n\n implies_iterable = lambda v: v.startswith('[')\n def hard_coded_mods(v):\n og_v = v\n for hcr in hard_coded_replacements:\n v = v.replace(*hcr)\n return v\n\n def process_snoop_dct(cell:Dict,color:str):\n lst_o_fmtd_snoop_lines = []\n for indent_flag,(k,v) in enumerate(cell.items()):\n if not indent_flag:\n fmtd_kv_str = f\"{process_key(k,color)}: {process_val(v)}\"\n else:\n fmtd_kv_str = f\"{space*2}{process_key(k,color)}: {process_val(v)}\"\n lst_o_fmtd_snoop_lines.append(fmtd_kv_str)\n return lst_o_fmtd_snoop_lines\n\n def process_key(k:str,c:str):\n if c == 'nc':\n rv = f\"{k[1:]}\"\n else:\n rv = f\"{d[k[0]]}{(k[1:])}\\x1b[0m\"\n return rv\n\n def process_val(v:str):\n v = hard_coded_mods(v)\n if implies_iterable(v):\n rv = process_implied_iterable(v)\n else:\n rv = process_str(v)\n return rv\n\n def process_implied_iterable(v:str):\n viterlen = v.count(',')+1\n viter = v[1:-1].split(',')\n viter_first,viter_rest_len = viter[0], viterlen-1 # n_rest = number of the rest of args\n assert viterlen == len(viter), f\"{v=}, {viter=}\"\n rv = f\"{viter_first}, +({viter_rest_len})…,\"\n return rv\n\n def process_str(v:str):\n viterlen = v.count(',')\n viter_first,viter_rest_len = v[0], viterlen-1\n rv = f\"{v}, +({viterlen})...,\"\n return rv\n\n return entry\n\nformat_snoop_datacell = _format_snoop_datacell()\n\ndef _format_call_datacell():\n ledger = {}\n space = \"\\u0020\"\n hard_coded_replacements = [\n ('/Users/alberthan/VSCodeProjects/vytd','vytd'),\n ('object at','@')\n ]\n rgx1 = re.compile(r\"(?P<funkname>.+?)(?P<fargs>\\(.*\\))\")\n rgx2 = re.compile(r\"(?P<funkname>.+?):\\s(?P<fargs>\\(.+\\))\")\n d = {\n \"1\":f'\\x1b[0;34m',\n \"2\":f'\\x1b[0;32m',\n \"3\":f'\\x1b[0;36m',\n \"4\":f'\\x1b[0;33m'\n }\n\n def entry(cell,event_kind):\n assert isinstance(cell,list) and len(cell) == 1, f'{type(cell)=}{cell=}'\n cell0 = cell[0]\n if event_kind.strip() not in ['exception','call']:\n return cell0, cell0\n fmtd_str_nc,fmtd_str_c = (process_exc_event_cell(cell0) # TODO: add a var here to cause an error\n if ('exception' in event_kind)\n else process_call_event_cell(cell0)\n if (event_kind == 'call' or '=' in cell0)\n else st())\n return fmtd_str_nc,fmtd_str_c # str\n\n def process_call_event_cell(cell) -> str:\n funkname,fargs = rgx1.search(cell).groupdict().values()\n # ['argv=None)','argv=None)']\n nicely_prepared_cell_args = _split_logic_for_call_datacell(fargs)\n if fargs == '()':\n return f\"{funkname}{fargs}\",f\"{funkname}{fargs}\"\n cleaned_cell_args = [elm.strip() for elm in nicely_prepared_cell_args]\n paired_cell_args = [elm.split('=',1) for elm in cleaned_cell_args]\n labeled_cell_args = {k.strip():v.strip() for k,v in paired_cell_args}\n lst_o_keys_nc, lst_o_keys_c = process_cell_dct(labeled_cell_args,'nc'), process_cell_dct(labeled_cell_args,'c')\n joined_loks_nc,joined_loks_c = \",\".join(lst_o_keys_nc),\",\".join(lst_o_keys_c)\n fmtd_str_nc, fmtd_str_c = f\"{funkname}({joined_loks_nc})\",f\"{funkname}({joined_loks_c})\"\n return fmtd_str_nc, fmtd_str_c\n\n def process_cell_dct(cell:Dict,color:str) -> List[str]:\n lst_o_keys = []\n for i,(k,v) in enumerate(cell.items()):\n k = process_key(k,i,color)\n lst_o_keys.append(k)\n return lst_o_keys\n\n def process_key(k:str,i:int,c:str):\n if c == 'nc':\n if i == 1:\n rv = f\"{k}\"\n else:\n rv = f\"{k}\"\n else:\n if i == 1:\n rv = f\"{d['1']}{k}\\x1b[0m\"\n else:\n rv = f\"{d['2']}{k}\\x1b[0m\"\n return rv\n\n def process_exc_event_cell(cell) -> str:\n funkname,fargs = rgx2.search(cell).groupdict().values()\n # (<class 'OSError'>, OSError('dlopen(libc.so.6, 6): image not found'), <traceback object at 0x104bfa500>)\n split_cell = fargs[1:-1].split(',')\n exc_info_value = \"\".join(split_cell[1:3]) # sys.exc_info[1]\n fmtd_str_nc,fmtd_str_c = f\"{funkname}({exc_info_value})\",f\"{funkname}({exc_info_value})\"\n return fmtd_str_nc,fmtd_str_c\n\n def _split_logic_for_call_datacell(extracted_args_cell) -> List[str]:\n _split1 = extracted_args_cell.split(',')\n draft,record = [],[]\n for elm in _split1:\n if elm.startswith('('): elm = elm[1:]\n if elm.endswith(')'): elm = elm[:-1]\n if ('=') in elm:\n if draft:\n # join the current draft, append to record, start new draft with elm\n final = \"\".join(draft)\n record.append(final)\n draft = [elm]\n else:\n draft.append(elm)\n else:\n draft.append(elm)\n final = \"\".join(draft)\n record.append(final)\n return record\n\n\n sns = SimpleNamespace(entry=entry)\n return sns.entry\n\nformat_call_datacell = _format_call_datacell()\n\nclass LiterateUtil:\n def __init__(self):\n self.FormatLengths = namedtuple('FormatLengths', ['Index', 'filepath', 'line_number', 'event_kind', 'call_data', 'snoop_data'])\n self.FormatStrings = namedtuple('FormatStrings', ['Index', 'filepath', 'line_number', 'event_kind', 'call_data', 'snoop_data'])\n self.rgx1 = re.compile(r\"(\u001b|\\\\x1b)\\[\\d;\\d+m\") # 10\n self.rgx2 = re.compile(r\"(\u001b|\\\\x1b)\\[0m\") # 7\n self.lengths = [9,20,9,10,80,180]\n\n def typeset(self,dnc,dc):\n \"\"\"copy editor\n ['Index', 'filepath', 'line_number', 'event_kind', 'call_data', 'snoop_data']\n Arguments:\n strings {iterable[str]}\n Keyword Arguments:\n lengths {iterable[int]}\n lst_o_vals = [f\"{str(r.Index):9.9}\",f\"{r.filepath:>20.20}\",f\"{str(r.line_number):<9.9}\",f\"{r.event_kind:>10.10}\",f\"{next(caitrr,'^'):<80.80}\",f\"{next(snitrr,'&'):<180.180}\"]\n \"\"\"\n stringsnc,stringsc = dnc['strings'],dc['strings']\n # sc = [s for s in strings[:5]] + [process_snoop_]\n # rgxcnt = [len(self.rgx1.findall(str(s))) for s in strings]\n # lengths = [r * 17 + l for l,r in zip(lengths,rgxcnt)]\n flens = self.FormatLengths(*self.lengths)\n _borders = list(accumulate([l for l in flens])) # 9+20+9+80=118 (snp starts on 119)\n # if len(strings) != 6: st()\n fstrsnc,fstrsc = self.FormatStrings(*stringsnc),self.FormatStrings(*stringsc)\n padtrunc = [f\"{pad}.{trunc}\" for pad,trunc in zip(flens,flens)]\n fmtdlstnc = [f\"{str(s):<{pt}}\" for s,pt in zip(fstrsnc,padtrunc)]\n fmtdlstc = [f\"{str(s):<{pt}}\" for s,pt in zip(fstrsc,padtrunc)]\n fmtdstrnc,fmtdstrc = \"\".join(fmtdlstnc),\"\".join(fmtdlstnc)\n return fmtdstrnc,fmtdstrc\n\ndef _write_literate_style_df():\n util = LiterateUtil()\n space = \"\\u0020\"\n typeset_args_lengths = [9,20,9,10,80,180]\n\n def typeset_args1(r):\n ca_nc,ca_c = format_call_datacell(r.call_data,r.event_kind)\n sn_nc,sn_c = format_snoop_datacell(r.snoop_data)\n stringsnc = [r.Index, r.filepath, r.line_number, r.event_kind, ca_nc, sn_nc]\n stringsc = [r.Index, r.filepath, r.line_number, r.event_kind, ca_c, sn_c]\n dnc = {\n \"strings\": stringsnc,\n \"lengths\": typeset_args_lengths #[clrcnt(s)*15+l for s,l in zip(strings,typeset_args_lengths)]\n }\n dc = {\n \"strings\": stringsc,\n \"lengths\": typeset_args_lengths\n }\n return dnc,dc\n\n def typeset_args2(r):\n sn_nc,sn_c = format_snoop_datacell(r.snoop_data)\n stringsc = [space]*5 + [sn_c]\n stringsnc = [space]*5 + [sn_nc]\n dnc = {\n \"strings\": stringsnc,\n \"lengths\": typeset_args_lengths #[clrcnt(s)*15+l for s,l in zip(strings,typeset_args_lengths)]\n }\n dc = {\n \"strings\": stringsc,\n \"lengths\": typeset_args_lengths\n }\n return dnc,dc\n\n def entry(df,filename,color=True):\n return write_literate_style_df(df,filename,color=color)\n\n def write_literate_style_df(df,filename,color=True):\n _nclst,_clst = [],[]\n for rowtpl in df.itertuples():\n r = rowtpl\n # assert r._fields == ('Index', 'filepath', 'line_number', 'symbol', 'event_kind', 'call_data', 'code_data', 'snoop_data'), r\n lines_for_row = len(r.snoop_data) - 1\n _ncsublst,_csublst=[],[]\n dnc,dc = typeset_args1(r)\n fmtdstr_nc,fmtdstr_c = util.typeset(dnc,dc)\n _ncsublst.append(fmtdstr_nc);_csublst.append(fmtdstr_c)\n for line in range(lines_for_row):\n dnc,dc = typeset_args2(r)\n fmtdstr_nc,fmtdstr_c = util.typeset(dnc,dc)\n _ncsublst.append(fmtdstr_nc);_csublst.append(fmtdstr_c)\n _ncsublst_as_str = \"\\n\".join(_ncsublst);_csublst_as_str = \"\\n\".join(_csublst)\n _nclst.append(_ncsublst_as_str);_clst.append(_csublst_as_str)\n _retvalnc,_retvalc = \"\\n\".join(_nclst),\"\\n\".join(_clst)\n name_file_tpls = [(f\"{filename}.c\",_retvalc),(f\"{filename}.nc\",_retvalnc)]\n write_to_file(name_file_tpls)\n return _retvalnc,_retvalc\n\n def write_to_file(name_file_tpls):\n for name,filedata in name_file_tpls:\n p = Path(name).resolve()\n if not p.parent.exists():\n p.parent.mkdir(parents=True,exist_ok=True)\n with open(f\"{name}.lit.log\", 'w') as f:\n f.write(filedata)\n\n def noco(s):\n rgx1 = re.compile(r\"(\u001b|\\\\x1b)\\[\\d;\\d+m\")\n rgx2 = re.compile(r\"(\u001b|\\\\x1b)\\[0m\")\n s = rgx1.sub(repl=\"\",string=s)\n s = rgx2.sub(repl=\"\",string=s)\n return s\n\n sns = SimpleNamespace(entry=entry)\n return sns.entry\n\nwrite_literate_style_df = _write_literate_style_df()\nclass BeautifulUtil:\n def __init__(self):\n self.cols_and_widths = [(\"Index\",9),(\"filepath\",20),(\"line_number\",9),(\"call_data\",80),(\"snoop_data\",80)]\n self.columns = [\"Index\",\"filepath\",\"line_number\",\"call_data\",\"snoop_data\"]\n self.widths = [9,20,9,80,80]\n self.bt_widths = [9,20,9,80 + 9,80 + 9]\n self.textwrapper_width = 80\n self.safecolumns = [\"filepath\",\"line_number\",\"symbol\",\"event_kind\",\"call_data\",\"code_data\",\"snoop_data\"]\n\ndef _create_beautiful_table():\n util = BeautifulUtil()\n\n def entry(df):\n return create_beautiful_table(df)\n\n def create_beautiful_table(df):\n table = get_cfgd_table()\n rows = process_df_rows(df)\n for row in rows:\n table.append_row(row)\n return table.get_string(recalculate_width=False)\n\n def get_cfgd_table():\n nt = new_table = BeautifulTable(\n max_width=200,default_alignment=BeautifulTable.ALIGN_LEFT,default_padding=1\n )\n nt.column_headers,nt.column_widths = util.columns,util.widths\n nt.set_style(BeautifulTable.STYLE_COMPACT)\n return nt\n\n def process_df_rows(df):\n newrows,i,si = [],0,0\n for rawrow in df[util.safecolumns].itertuples():\n row:namedtuple = compose_left(\n wrapdatalines,\n partial(colorize,clrs=False),\n )(rawrow)\n newrows.append([f\"{row.Index}\",f\"{row.filepath}\",f\"({row.line_number}):\",f\"{row.call_data}\",f\"{row.snoop_data}\"])\n return newrows\n\n def wrapdatalines(rawrow):\n def trunc_func(line): return f\"{line[:util.textwrapper_width]}…\" if len(line) > util.textwrapper_width else line\n datacells = [rawrow.call_data, rawrow.code_data, rawrow.snoop_data]\n cad,cod,snp = [[trunc_func(elm) for elm in datacell] for datacell in datacells]\n rv = rawrow._make([rawrow.Index,rawrow.filepath,rawrow.line_number,rawrow.symbol,rawrow.event_kind,cad,cod,snp])\n return rv\n\n def colorize(rowlst,clrs=True):\n if not clrs: return rowlst\n blu,grn,cyn = f'\\x1b[0;34m',f'\\x1b[0;32m',f'\\x1b[0;36m'\n prefixes = iter([f'\\x1b[0;34m',f'\\x1b[0;32m',f'\\x1b[0;36m'])\n suffix = f'\\x1b[0m'\n newlst = rowlst[:2] + [f\"{next(prefixes)}{elm}{suffix}\" for elm in rowlst[-3:]]\n return newlst\n\n sns = SimpleNamespace(entry=entry)\n return sns.entry\ncreate_beautiful_table = _create_beautiful_table()\n\nclass GroupByUtil:\n def __init__(self):\n self.dfpath = \"\"\n self.gbs_dir = \"\"\n\n def write_df_logfiles(self,gbdfs_dct,dfpath):\n assert self.dfpath.exists(), self.dfpath\n self.mk_gbs_dir()\n def get_path_for_gbdf(filename): return self.gbs_dir.joinpath(filename)\n for filename,gbdf in gbdfs_dct.items():\n gbdf_path = get_path_for_gbdf(filename)\n compose_left(\n create_beautiful_table,\n partial(self.write_to_disk,name=gbdf_path)\n )(gbdf)\n\n def write_to_disk(self,table:str,name:str):\n with open(name,'w') as f:\n f.write(table)\n\n def mk_gbs_dir(self):\n self.gbs_dir = self.dfpath.parent.joinpath('gbs')\n self.gbs_dir.mkdir(parents=True,exist_ok=True)\n\n def get_path_for_gbdf(self,filename):\n gbs_dfpath = self.gbs_dir.joinpath(self.filter_type)\n return gbs_dfpath\n\ndef _groupby_filename() -> Dict[str,pd.DataFrame]:\n util = GroupByUtil()\n\n def entry(df,dfpath):\n util.dfpath = dfpath\n dct_o_dfs = group_df_by_filename(df)\n return dct_o_dfs\n\n def group_df_by_filename(df):\n gbs = get_groups_as_lst_o_dfs(df.copy())\n def filename_from_df(df): return Path(df.iloc[0].filepath).name.replace('.py','')\n dct_o_filename_df_pairs = {filename_from_df(g):g for g in gbs}\n return dct_o_filename_df_pairs\n for gb,filename in zip(gbs,filenames):\n write_df_logfile(gb,filename)\n return gbs\n\n def get_groups_as_lst_o_dfs(df):\n gb = df.groupby(['filepath'])\n gbs = [gb.get_group(g) for g in gb.groups]\n return gbs\n\n sns = SimpleNamespace(entry=entry)\n return sns.entry\ngroupby_filename = _groupby_filename()\n\nclass FilterUtil:\n def __init__(self,filter_type):\n self.filter_type = filter_type\n\n def write_df_logfile(self,fltrd_df,dfpath):\n fltrd_dfpath = self.get_path_for_fltrd_df(dfpath)\n compose_left(\n create_beautiful_table, # -> str\n partial(self.write_to_disk,name=fltrd_dfpath)\n )(fltrd_df)\n\n def write_to_disk(self,table:str,name:str):\n with open(name,'w') as f:\n f.write(table)\n\n def get_path_for_fltrd_df(self,dfpath):\n filter_dir = dfpath.parent.joinpath('fltrd')\n filter_dir.mkdir(parents=True,exist_ok=True)\n filter_dfpath = filter_dir.joinpath(self.filter_type)\n return filter_dfpath\n\ndef _filter_line_events():\n util = FilterUtil(filter_type='line')\n\n def entry(df,dfpath):\n df = df.copy()\n fltrd_df = compose_left(\n get_line_event_mask,\n lambda mask: df[mask]\n )(df)\n return fltrd_df\n\n def get_line_event_mask(df):\n mask = df.event_kind.str.strip() != 'line'\n return mask\n\n sns = SimpleNamespace(entry=entry)\n return sns.entry\nfilter_line_events = _filter_line_events()\n\nclass AggregateUtil:\n def __init__(self):\n self.columns = [\"filepath\",\"line_number\",\"symbol\",\"event_kind\",\"call_data\",\"code_data\",\"snoop_data\"]\n\n def base_dct_factory(self):\n d = OrderedDict({\n \"home\": None,\n \"interpaths\": None,\n \"filename\": None,\n \"line_number\": None,\n \"symbol\": None,\n \"event_kind\": None,\n \"call_data\": None,\n \"code_data\": None,\n \"snoop_data\": None\n })\n return d\n\n def base_dct_factory_call_only(self):\n d = OrderedDict({\n \"home\": None,\n \"interpaths\": None,\n \"filename\": None,\n \"line_number\": None,\n \"symbol\": None,\n \"event_kind\": None,\n \"call_data\": None,\n })\n return d\n\n def process_snoop_data(self, snp_dtacell):\n \"\"\"snp_dtacell: List[Datum]\n Datum.split(':='): Mapping[str,StrWithBrackets]\n StrWithBrackets[1:-1].split(',') = ValList = List[str]\n \"\\n\".join(ValList): str\n \"\"\"\n space,cs = \"\\u0020\",iter(['blu','grn','cyn'])\n sublst:str\n snp_dtacell:List\n for sublst in snp_dtacell:\n if (ce:=':=' in sublst) or (c:=':' in sublst):\n def mapfunk(sep): return map(str.strip,sublst.split(sep))\n sublstkey,sublstval = mapfunk(':=') if ce else mapfunk(':') if c else ('ERRR','RRROR')\n sublstval = sublstval.replace(\"None\", \"Mome\")\n sublstval,sublstvallen = [sublstval] if not (isinstance(sublstval,list)) else sublstval, sublstval.count(',') + 1\n valsplitaslst = sublstval[1:-1].split(',') if ',' in sublstval else [sublstval]\n linestrfmtd = f\"{sublstkey}({sublstvallen}): [{valsplitaslst[0][0]:<80.80}{',…' if sublstvallen > 1 else ''}]\"\n else:\n print(sublst)\n st()\n return linestrfmtd\n\ndef _aggregate_aggdfs():\n util = AggregateUtil()\n\n def entry(input_dfs,\n columns=util.columns,\n verbose=False\n ):\n util.columns = columns\n assert isinstance(input_dfs[0],pd.DataFrame) and len(input_dfs[0]) > 1, input_dfs\n aggdf = compose_left(\n rename_data_columns,\n merge_dfs_into_one,\n )(input_dfs)\n if verbose:\n return aggdf\n return aggdf[columns]\n\n def rename_data_columns(input_dfs):\n \"\"\"\n ..input_dfs:: {calldf,codedf,snoopdf}\n ..returns :: {\"call\":calldf,\"code\":codedf,\"snoop\":snoopdf}`\n \"\"\"\n source_columns = []\n for elm in ('call','code','snoop'):\n for col in util.columns:\n if col.startswith(elm): source_columns.append(elm)\n else: continue\n def rename(df,name): return df.rename(columns={\"source_data\":f\"{name}_data\"},inplace=False)\n dct_o_renamed_dfs = {name:rename(df,name) for name,df in zip(source_columns,input_dfs)}\n assert all([\n (colname in df.columns and len(df) > 1)\n for colname,df\n in zip([f\"{col}_data\" for col in source_columns],dct_o_renamed_dfs.values())\n ])\n return dct_o_renamed_dfs\n\n def merge_dfs_into_one(dct_o_renamed_dfs):\n aggdf = compose_left(\n iterate_over_rows,\n aggregate_lst_o_merged_rowdcts,\n )(dct_o_renamed_dfs)\n return aggdf\n\n def colorize_lst(l,bc=None,fc=None,c=False):\n \"\"\"usage: c=True for colors\"\"\"\n if not c: return l\n bgc = intense_background_black = ('\\x1b[0;100m','\\x1b[0m')\n strt = [bgc[0] for _ in range(len(l))]\n stop = [bgc[1] for _ in range(len(l))]\n zipd = zip(strt,l,stop)\n _jmp = _joinmepls = []\n for rt,txt,op in zipd:\n s = f\"{rt}{txt}{op}\"\n _jmp.append(s)\n # clrzd_str = \"\\n\".join(_jmp)\n return _jmp\n\n def create_filepath(home,interpath,filename):\n # from pdb import set_trace as st; st()\n h = home\n i = f\"{interpath}/\" if interpath else \"\"\n f = filename\n rv = f\"{h}/{i}{f}\"\n return rv\n\n def iterate_over_rows(dct_o_rnd_dfs):\n lst_o_merged_rowdcts = []\n try:\n cadf,codf,sndf = [dct_o_rnd_dfs.get(k) for k in (\"call\",\"code\",\"snoop\")]\n except BaseException:\n raise\n if not (codf and sndf):\n for i in range(len(cadf)):\n base_dct = util.base_dct_factory_call_only()\n car = cadf.iloc[i]\n cadata = car.call_data\n try:\n base_dct.update({\n \"og_index\": car.og_index,\n \"home\": car.home,\n \"interpaths\": car.interpaths,\n \"filename\": car.filename,\n \"filepath\": create_filepath(car.home,car.get('interpath',''),car.filename),\n \"line_number\": car.line_number,\n \"event_kind\": car.event_kind,\n \"symbol\": [getattr(car.symbol,'symbol',None)],\n \"call_data\": colorize_lst([elm for elm in cadata]),\n })\n lst_o_merged_rowdcts.append(base_dct)\n except BaseException:\n from IPython.core.ultratb import ColorTB,VerboseTB\n print(ColorTB().text(*sys.exc_info()))\n raise\n else:\n si,slen,sdone = 0, len(sndf), False\n for i in range(len(cadf)):\n base_dct = util.base_dct_factory()\n car,cor,snr = cadf.iloc[i],codf.iloc[i],sndf.iloc[si]\n assert (caln:=car.line_number) == (coln:=cor.line_number), f\"{caln=},{coln=}\"\n cadata,codata = car.call_data,cor.code_data\n if (not sdone and (snr.get('line_number',None) == caln)):\n sndata = snr.snoop_data\n assert isinstance(sndata,list) and isinstance(sndata[0],str), sndata[0]\n sdone,si = (end:=bool(si == slen)), (si if end else si + 1)\n try:\n base_dct.update({\n \"home\": car.home,\n \"interpaths\": car.interpaths,\n \"filename\": car.filename,\n \"filepath\": create_filepath(car.home,car.get('interpath',''),car.filename),\n \"line_number\": car.line_number,\n \"event_kind\": car.event_kind,\n \"symbol\": [sym for sym in (getattr(car.symbol,'symbol',None),getattr(cor.symbol,'symbol',None))],\n \"call_data\": colorize_lst([elm for elm in cadata]),\n \"code_data\": colorize_lst([elm for elm in codata]),\n \"snoop_data\": sndata if sndata else \"<None>\",\n })\n lst_o_merged_rowdcts.append(base_dct)\n except BaseException:\n from IPython.core.ultratb import ColorTB,VerboseTB\n print(ColorTB().text(*sys.exc_info()))\n return lst_o_merged_rowdcts\n\n def aggregate_lst_o_merged_rowdcts(merged_rowdcts):\n aggdf = pd.DataFrame(merged_rowdcts)\n return aggdf\n\n sns = SimpleNamespace(entry=entry)\n return sns.entry\naggregate_aggdfs = _aggregate_aggdfs()\n\n\nclass TargetFuncUtil:\n def __init__(self):\n self.columns = [\"filepath\",\"line_number\",\"symbol\",\"event_kind\",\"call_data\",\"snoop_data\"]\n\n def base_dct_factory(self):\n d = OrderedDict({\n \"home\": None,\n \"interpaths\": None,\n \"filename\": None,\n \"line_number\": None,\n \"symbol\": None,\n \"event_kind\": None,\n \"call_data\": None,\n \"snoop_data\": None\n })\n return d\n\n def process_snoop_data(self, snp_dtacell):\n \"\"\"snp_dtacell: List[Datum]\n Datum.split(':='): Mapping[str,StrWithBrackets]\n StrWithBrackets[1:-1].split(',') = ValList = List[str]\n \"\\n\".join(ValList): str\n \"\"\"\n space,cs = \"\\u0020\",iter(['blu','grn','cyn'])\n sublst:str\n snp_dtacell:List\n for sublst in snp_dtacell:\n if (ce:=':=' in sublst) or (c:=':' in sublst):\n def mapfunk(sep): return map(str.strip,sublst.split(sep))\n sublstkey,sublstval = mapfunk(':=') if ce else mapfunk(':') if c else ('ERRR','RRROR')\n sublstval = sublstval.replace(\"None\", \"Mome\")\n sublstval,sublstvallen = [sublstval] if not (isinstance(sublstval,list)) else sublstval, sublstval.count(',') + 1\n valsplitaslst = sublstval[1:-1].split(',') if ',' in sublstval else [sublstval]\n linestrfmtd = f\"{sublstkey}({sublstvallen}): [{valsplitaslst[0][0]:<80.80}{',…' if sublstvallen > 1 else ''}]\"\n else:\n print(sublst)\n st()\n return linestrfmtd\n\ndef _aggregate_tfdfs():\n util = TargetFuncUtil()\n\n def entry(input_dfs,verbose=False):\n # at this point we have 3 input_dfs\n # rename data columns, merge into one df>to_dict,iterate_over_rows,\n assert isinstance(input_dfs[0],pd.DataFrame) and len(input_dfs[0]) > 1, input_dfs\n tfdf = compose_left(\n rename_data_columns,\n merge_dfs_into_one,\n )(input_dfs)\n if verbose:\n return tfdf\n assert all([col in tfdf.columns for col in util.columns]), tfdf.columns\n return tfdf[[\"filepath\",\"line_number\",\"symbol\",\"event_kind\",\"call_data\",\"snoop_data\",]]\n\n def rename_data_columns(input_dfs):\n \"\"\"\n ..input_dfs:: {calldf,codedf,snoopdf}\n ..returns :: {\"call\":calldf,\"code\":codedf,\"snoop\":snoopdf}`\n \"\"\"\n def rename(df,name): return df.rename(columns={\"source_data\":f\"{name}_data\"},inplace=False)\n dct_o_renamed_dfs = {name:rename(df,name) for name,df in zip([\"call\",\"snoop\"],input_dfs)}\n assert all([(colname in df.columns and len(df) > 1) for colname,df in zip([\"call_data\",\"snoop_data\"],dct_o_renamed_dfs.values())])\n return dct_o_renamed_dfs\n\n def merge_dfs_into_one(dct_o_renamed_dfs):\n tfdf = compose_left(\n iterate_over_rows,\n aggregate_lst_o_merged_rowdcts,\n )(dct_o_renamed_dfs)\n return tfdf\n\n def colorize_lst(l,bc=None,fc=None,c=False):\n \"\"\"usage: c=True for colors\"\"\"\n if not c: return l\n bgc = intense_background_black = ('\\x1b[0;100m','\\x1b[0m')\n strt = [bgc[0] for _ in range(len(l))]\n stop = [bgc[1] for _ in range(len(l))]\n zipd = zip(strt,l,stop)\n _jmp = _joinmepls = []\n for rt,txt,op in zipd:\n s = f\"{rt}{txt}{op}\"\n _jmp.append(s)\n # clrzd_str = \"\\n\".join(_jmp)\n return _jmp\n\n def create_filepath(home,interpath,filename):\n # from pdb import set_trace as st; st()\n h = home\n i = f\"{interpath}/\" if interpath else \"\"\n f = filename\n rv = f\"{h}/{i}{f}\"\n return rv\n\n def iterate_over_rows(dct_o_rnd_dfs):\n lst_o_merged_rowdcts = []\n try:\n cadf,sndf = [dct_o_rnd_dfs.get(k) for k in (\"call\",\"snoop\")]\n si,slen,sdone = 0, len(sndf), False\n except BaseException:\n import IPython\n from IPython.core.ultratb import ColorTB,VerboseTB\n from inspect import getfile\n print(getfile(IPython.core.ultratb))\n print(ColorTB().text(*sys.exc_info()))\n for i in range(len(cadf)):\n base_dct = util.base_dct_factory()\n car,snr = cadf.iloc[i],sndf.iloc[si]\n cadata = car.call_data\n if (not sdone and (snr.get('line_number',None) == car.line_number)):\n sndata = snr.snoop_data\n assert isinstance(sndata,list) and isinstance(sndata[0],str), sndata[0]\n sdone,si = (end:=bool(si == slen)), (si if end else si + 1)\n try:\n base_dct.update({\n \"home\": car.home,\n \"interpaths\": car.interpaths,\n \"filename\": car.filename,\n \"filepath\": create_filepath(car.home,car.get('interpath',''),car.filename),\n \"line_number\": car.line_number,\n \"event_kind\": car.event_kind,\n \"symbol\": [sym for sym in (getattr(car.symbol,'symbol',None))] if (getattr(car.symbol,'symbol',None)) else [],\n \"call_data\": colorize_lst([elm for elm in cadata]),\n \"snoop_data\": sndata if sndata else \"<None>\",\n })\n lst_o_merged_rowdcts.append(base_dct)\n except BaseException:\n from IPython.core.ultratb import ColorTB,VerboseTB\n print(ColorTB().text(*sys.exc_info()))\n assert len(lst_o_merged_rowdcts) > 1, lst_o_merged_rowdcts\n return lst_o_merged_rowdcts\n\n def aggregate_lst_o_merged_rowdcts(merged_rowdcts):\n tfdf = pd.DataFrame(merged_rowdcts)\n return tfdf\n\n sns = SimpleNamespace(entry=entry)\n return sns.entry\naggregate_tfdfs = _aggregate_tfdfs()\n\nclass LitUtil:\n def __init__(self):\n self.row_cols = [\n 'filepath', 'line_number', 'symbol', 'event_kind', 'call_data', 'snoop_data'\n ]\n\ndef _write_lit_file():\n util = LitUtil()\n\n def entry(df,tfdfpath):\n df = df.copy()\n writeable_string = compose_left(\n iterate_over_rows,\n )(df)\n return writeable_string\n\n def iterate_over_rows(df):\n for row in df.itertuples():\n # fmtd = format_line(row.filepath, row.line_number, row.call_data)\n try: filepath,lineno,call_data = row.filepath,row.line_number,row.call_data\n except: st()\n if isinstance(call_data, ParsedTuple):\n call_data = str(call_data)\n elif isinstance(call_data, tuple):\n if isinstance(call_data[0], ParsedTuple):\n call_data = tuple([str(cd) for cd in call_data])\n elif call_data[1]:\n call_data = \": \".join(call_data)\n else:\n call_data = call_data[0]\n fmtd = f\"{filepath},{lineno},{call_data}\"\n return fmtd\n\n def format_line(filename,line_number,call_data):\n # if len(call_data) < 2000:\n rgx = re.compile(r\"(?P<funcname>[A-z0-9_]+)\\s?=\\s?\\((?P<funkargs>[A-z0-9_]+)\\s?=\\s?\\(\")\n try: m = rgx.search(\": \".join(call_data))\n except: st()\n funcname,args = gd = m.groupdict()\n if not isinstance(call_data,FmtdCellData):\n\n pass\n else:\n funcname = call_data.funcname\n keys = call_data.get_keys\n args = {k:call_data.get_arg(k) for k in keys}\n\n fn = f'{Path(filename).stem:>10.10}' # len is 8 or 10 'd.__init__'\n ln = f'{line_number:0>3}'\n funk = call_data.funcname\n args = call_data.get_arg('info_dict'),call_data.get_arg('params')\n metadata = f\"{fn}:{ln}\"\n\n sns = SimpleNamespace(entry=entry)\n return sns.entry\nfilter_line_events = _filter_line_events()\n\nwrite_lit_file = _write_lit_file()\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
cstsunfu/dlkit | [
"69e0efd372fa5c0ae5313124d0ba1ef55b535196"
] | [
"dlk/core/modules/biaffine.py"
] | [
"# Copyright 2021 cstsunfu. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch.nn as nn\nimport torch\nfrom typing import Dict, List\nfrom . import module_register, module_config_register, Module\nfrom dlk.utils.config import BaseConfig\n\n\n@module_config_register(\"biaffine\")\nclass BiAffineConfig(BaseConfig):\n \"\"\"Config for BiAffine\n\n Config Example:\n >>> {\n >>> \"config\": {\n >>> \"input_size\": 256,\n >>> \"output_size\": 2,\n >>> \"dropout\": 0.0, //generally no need dropout\n >>> \"bias\": true, // use bias or not in biaffine\n >>> },\n >>> \"_name\": \"biaffine\",\n >>> }\n \"\"\"\n def __init__(self, config: Dict):\n super(BiAffineConfig, self).__init__(config)\n config = config['config']\n self.input_size = config['input_size']\n self.output_size = config['output_size']\n self.dropout = float(config['dropout'])\n self.bias = config['bias']\n self.post_check(config, used=[\n \"input_size\",\n \"output_size\",\n \"dropout\",\n \"bias\",\n ])\n\n\n@module_register(\"biaffine\")\nclass BiAffine(Module):\n \"\"\"wrap for nn.BiAffine\"\"\"\n def __init__(self, config: BiAffineConfig):\n super(BiAffine, self).__init__()\n if config.bias:\n self.biaffine = nn.Parameter(torch.randn(config.input_size+1, config.output_size, config.input_size+1))\n else:\n self.biaffine = nn.Parameter(torch.randn(config.input_size, config.output_size, config.input_size))\n\n self.dropout = nn.Dropout(p=float(config.dropout))\n self.config = config\n\n def init_weight(self, method):\n \"\"\"init the weight of submodules by 'method'\n\n Args:\n method: init method\n\n Returns: \n None\n\n \"\"\"\n torch.nn.init.xavier_uniform_(self.biaffine)\n\n def forward(self, input_a: torch.Tensor, input_b: torch.Tensor)->torch.Tensor:\n \"\"\"do forward on a mini batch\n\n Args:\n input_a: a mini batch inputs_a, shape==(batch_size, input_a_len, input_size)\n input_b: a mini batch inputs_b, shape==(batch_size, input_b_len, input_size)\n\n Returns: \n input_a x biaffine x input_b, shape==(batch_size, input_a_len, input_b_len, output_size)\n\n \"\"\"\n if self.config.bias:\n output = self.dropout(torch.einsum('bmi,ioj,bnj->bmno', \n torch.cat((input_a, torch.ones_like(input_a[..., :1])), dim=-1), \n self.biaffine, \n torch.cat((input_b, torch.ones_like(input_b[..., :1])), dim=-1)\n ))\n else:\n output = self.dropout(torch.einsum('bmi,ioj,bnj->bmno', \n input_a,\n self.biaffine, \n input_b,\n ))\n return output\n"
] | [
[
"torch.randn",
"torch.einsum",
"torch.ones_like",
"torch.nn.init.xavier_uniform_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rezahojabr/taichi | [
"122c0352ec480b740a4118819458cbf08d2e5ddb"
] | [
"tests/python/test_loops.py"
] | [
"import taichi as ti\n\n\[email protected]_archs\ndef test_loops():\n x = ti.var(ti.f32)\n y = ti.var(ti.f32)\n\n N = 512\n\n @ti.layout\n def place():\n ti.root.dense(ti.i, N).place(x)\n ti.root.dense(ti.i, N).place(y)\n ti.root.lazy_grad()\n\n for i in range(N // 2, N):\n y[i] = i - 300\n\n @ti.kernel\n def func():\n for i in range(ti.static(N // 2 + 3), N):\n x[i] = ti.abs(y[i])\n\n func()\n\n for i in range(N // 2 + 3):\n assert x[i] == 0\n\n for i in range(N // 2 + 3, N):\n assert x[i] == abs(y[i])\n\n\[email protected]_archs\ndef test_numpy_loops():\n x = ti.var(ti.f32)\n y = ti.var(ti.f32)\n\n N = 512\n\n @ti.layout\n def place():\n ti.root.dense(ti.i, N).place(x)\n ti.root.dense(ti.i, N).place(y)\n ti.root.lazy_grad()\n\n for i in range(N // 2, N):\n y[i] = i - 300\n\n import numpy as np\n begin = (np.ones(1) * (N // 2 + 3)).astype(np.int32)\n end = (np.ones(1) * N).astype(np.int32)\n\n @ti.kernel\n def func():\n for i in range(begin, end):\n x[i] = ti.abs(y[i])\n\n func()\n\n for i in range(N // 2 + 3):\n assert x[i] == 0\n\n for i in range(N // 2 + 3, N):\n assert x[i] == abs(y[i])\n\n\[email protected]_archs\ndef test_nested_loops():\n # this may crash if any LLVM allocas are called in the loop body\n x = ti.var(ti.i32)\n\n n = 2048\n\n @ti.layout\n def layout():\n ti.root.dense(ti.ij, n).place(x)\n\n @ti.kernel\n def paint():\n for i in range(n):\n for j in range(n):\n x[0, 0] = i\n\n paint()\n \[email protected]_archs\ndef test_zero_outer_loop():\n x = ti.var(ti.i32, shape=())\n \n @ti.kernel\n def test():\n for i in range(0):\n x[None] = 1\n \n test()\n \n assert x[None] == 0\n \[email protected]_archs\ndef test_zero_inner_loop():\n x = ti.var(ti.i32, shape=())\n \n @ti.kernel\n def test():\n for i in range(1):\n for j in range(0):\n x[None] = 1\n \n test()\n \n assert x[None] == 0\n"
] | [
[
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
marami52/sweetviz | [
"d06df4a2741c73985c574eb63e913a98c4066592"
] | [
"sweetviz/graph_associations.py"
] | [
"import math\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sweetviz.sv_types import FeatureType\nimport sweetviz.graph\nfrom sweetviz.config import config\nimport itertools\nimport matplotlib.patches as patches\nfrom textwrap import wrap\n\n# Portions of this file contain code from the following repository:\n# https://github.com/dylan-profiler/heatmaps\n#\n# Used under the following license:\n#\n# BSD 3-Clause License\n#\n# Copyright (c) 2019, Drazen Zaric\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n# A name for a custom index column that likely will not be used by users\nUNIQUE_INDEX_NAME = 'indexZZ8vr$#RVwadfaFASDFSA'\n\n# Something to detect correlation errors to display\n# TODO: Better/more intuitive display of correlation errors (right now just show up as empty)\nCORRELATION_ERROR = 83572398457329.0\nCORRELATION_IDENTICAL = 1357239845732.0\n\ndef wrap_custom(source_text, separator_chars, width=70, keep_separators = True):\n current_length = 0\n latest_separator = -1\n current_chunk_start = 0\n output = \"\"\n char_index = 0\n while char_index < len(source_text):\n if source_text[char_index] in separator_chars:\n latest_separator = char_index\n output += source_text[char_index]\n current_length += 1\n if current_length == width:\n if latest_separator >= current_chunk_start:\n # Valid earlier separator, cut there\n cutting_length = char_index - latest_separator\n if not keep_separators:\n cutting_length += 1\n if cutting_length:\n output = output[:-cutting_length]\n output += \"\\n\"\n current_chunk_start = latest_separator + 1\n char_index = current_chunk_start\n else:\n # No separator found, hard cut\n output += \"\\n\"\n current_chunk_start = char_index + 1\n latest_separator = current_chunk_start - 1\n char_index += 1\n current_length = 0\n else:\n char_index += 1\n return output\n\nclass GraphAssoc(sweetviz.graph.Graph):\n def __init__(self, dataframe_report, which_graph: str, association_data):\n self.set_style([\"graph_base.mplstyle\"])\n\n # Set categories to use first (some may be unused but no need to optimize this)\n categoricals = [dataframe_report[feature][\"name\"] for feature in dataframe_report._features \\\n if dataframe_report[feature][\"type\"] in [FeatureType.TYPE_CAT,\n FeatureType.TYPE_BOOL]]\n nums = [dataframe_report[feature][\"name\"] for feature in dataframe_report._features \\\n if dataframe_report[feature][\"type\"] == FeatureType.TYPE_NUM]\n combined = [dataframe_report[feature][\"name\"] for feature in dataframe_report._features \\\n if dataframe_report[feature][\"type\"] in [FeatureType.TYPE_CAT,\n FeatureType.TYPE_BOOL,\n FeatureType.TYPE_NUM] and \\\n feature in association_data]\n # Add target at beginning\n if dataframe_report._target is not None and dataframe_report._target[\"name\"] in association_data:\n for list_of_features in [categoricals, nums, combined]:\n list_of_features.insert(0, dataframe_report._target[\"name\"])\n\n if len(association_data) == 0 or len(combined) == 0:\n f, axs = plt.subplots(1, 1, figsize=(1,1))\n self.graph_base64 = self.get_encoded_base64(f)\n plt.close(f)\n return\n\n # Build graph_data dataframe with the information we need for the type of graph we want\n if which_graph == \"all\":\n # ALL\n graph_data = make_zero_square_dataframe(combined)\n\n for feature in combined:\n for associated_feature_name in combined:\n associated_feature_val = association_data[feature].get( \\\n associated_feature_name)\n if associated_feature_val is not None:\n graph_data.at[combined.index(feature), associated_feature_name] = \\\n associated_feature_val\n # Workaround\n graph_data[UNIQUE_INDEX_NAME] = combined\n graph_data.set_index(UNIQUE_INDEX_NAME, inplace=True)\n # matplotlib.use('tkagg')\n # corrplot(graph_data)\n # plt.show()\n\n\n elif which_graph == \"cat-cat\":\n # CATEGORY-CATEGORY\n # Associations: _associations[FEATURE][GIVES INFORMATION ABOUT THIS FEATURE]\n graph_data = make_zero_square_dataframe(categoricals)\n\n for feature in categoricals:\n for associated_feature_name in categoricals:\n associated_feature_val = association_data[feature].get( \\\n associated_feature_name)\n if associated_feature_val is not None:\n graph_data.at[categoricals.index(feature), associated_feature_name] = \\\n associated_feature_val\n # Workaround\n graph_data['index'] = categoricals\n graph_data.set_index('index', inplace=True)\n\n elif which_graph == \"num-num\":\n # NUM-NUM\n graph_data = make_zero_square_dataframe(nums)\n\n for feature in nums:\n for associated_feature_name in nums:\n associated_feature_val = association_data[feature].get( \\\n associated_feature_name)\n if associated_feature_val is not None:\n # Make symmetrical, values in both\n graph_data.at[nums.index(feature), associated_feature_name] = \\\n associated_feature_val\n graph_data.at[nums.index(associated_feature_name), feature] = \\\n associated_feature_val\n # Workaround\n graph_data['index'] = nums\n graph_data.set_index('index', inplace=True)\n\n elif which_graph == \"cat-num\":\n # CAT-NUM\n\n # RECTANGULAR: rows are categories. Still, make a square, with categories first\n # (we will just not render the Unused rows/cols)\n graph_data = pd.DataFrame()\n # Add columns\n empty_row_dict = dict()\n for feature in nums:\n graph_data[feature] = pd.Series()\n empty_row_dict[feature] = 0.0\n if len(nums) > len(categoricals):\n for i in range(len(categoricals), len(nums)):\n graph_data[str(i)+\"PAD\"] = pd.Series()\n empty_row_dict[str(i)+\"PAD\"] = 0.0\n\n # Add series\n for categorical in categoricals:\n graph_data = graph_data.append(pd.Series(empty_row_dict, name=categorical))\n if len(categoricals) > len(nums):\n for i in range(len(nums), len(categoricals)):\n graph_data = graph_data.append(pd.Series(empty_row_dict, name=str(i)+\"RPAD\"))\n\n # MUST DROP INDEX GRRRR\n orig_index = graph_data.index.values\n graph_data.reset_index(drop=True, inplace=True)\n\n for feature in categoricals:\n for associated_feature_name in nums:\n associated_feature_val = association_data[feature].get( \\\n associated_feature_name)\n if associated_feature_val is not None:\n graph_data.at[categoricals.index(feature), associated_feature_name] = \\\n associated_feature_val\n # Workaround\n graph_data['index'] = orig_index\n graph_data.set_index('index', inplace=True)\n\n # Finalize Graph\n #plt.subplots_adjust(bottom=0.15, right=0.85, top=0.97, left=0.15)\n f = corrplot(graph_data, dataframe_report)\n self.graph_base64 = self.get_encoded_base64(f)\n plt.close(f)\n return\n\n\ndef make_zero_square_dataframe(features):\n new_dataframe = pd.DataFrame()\n # Add columns\n empty_row_dict = dict()\n for feature in features:\n new_dataframe[feature] = pd.Series(dtype=float)\n empty_row_dict[feature] = 0.0\n # Add series\n for categorical in features:\n new_dataframe = new_dataframe.append(pd.Series(empty_row_dict, name=feature))\n # MUST DROP INDEX GRRRR\n return new_dataframe.reset_index(drop=True)\n\ndef heatmap(y, x, figure_size, **kwargs):\n if 'color' in kwargs:\n color = kwargs['color']\n else:\n color = [1]*len(x)\n\n palette = []\n n_colors = 256\n for i in range(0,128):\n palette.append( (0.85, (0.85/128)*i, (0.85/128)*i ))\n for i in range(128,256):\n palette.append( (0.85 - 0.85*(i-128.0)/128.0, 0.85 - 0.85*(i-128.0)/128.0, 0.85 ))\n\n if 'color_range' in kwargs:\n color_min, color_max = kwargs['color_range']\n else:\n color_min, color_max = min(color), max(color) # Range of values that will be mapped to the palette, i.e. min and max possible correlation\n\n def value_to_color(val):\n if color_min == color_max:\n return palette[-1]\n else:\n # For now, return \"max positive\" correlation color\n if val == CORRELATION_IDENTICAL:\n return palette[(n_colors - 1)]\n if val == CORRELATION_ERROR:\n return palette[(n_colors - 1)]\n val_position = float((val - color_min)) / (color_max - color_min) # position of value in the input range, relative to the length of the input range\n val_position = min(max(val_position, 0), 1) # bound the position betwen 0 and 1\n # LOG IT\n val_position = math.pow(val_position, 0.925)\n ind = int(val_position * (n_colors - 1)) # target index in the color palette\n return palette[ind]\n\n if 'size' in kwargs:\n size = kwargs['size']\n else:\n size = [1]*len(x)\n\n if 'size_range' in kwargs:\n size_min, size_max = kwargs['size_range'][0], kwargs['size_range'][1]\n else:\n size_min, size_max = min(size), max(size)\n\n size_scale = kwargs.get('size_scale', 500)\n\n # Scale with num squares\n size_scale = size_scale / len(x)\n def value_to_size(val):\n if val == 0:\n return 0.0\n if val == abs(CORRELATION_IDENTICAL):\n return 1.0\n # TODO: Better/more intuitive display of correlation errors\n if val == abs(CORRELATION_ERROR):\n return 0.0\n if size_min == size_max:\n return 1 * size_scale\n else:\n val_position = (val - size_min) * 0.999 / (size_max - size_min) + 0.001 # position of value in the input range, relative to the length of the input range\n val_position = min(max(val_position, 0), 1) # bound the position betwen 0 and 1\n # LOG IT\n val_position = math.pow(val_position, 0.5)\n return val_position\n # val_position = int(val_position*2)+4\n # return int(size_scale)\n # return val_position * int(size_scale)\n\n def do_wrapping(label, length):\n return wrap_custom(label, [\"_\", \"-\"], length)\n # return '\\n'.join(wrap(label, 15))\n wrap_x = 12 # at top/bottom\n wrap_y = 13\n if 'x_order' in kwargs:\n x_names = [t for t in kwargs['x_order']]\n else:\n x_names = [t for t in sorted(set([v for v in x]))]\n # Wrap to help avoid overflow\n x_names = [do_wrapping(label, wrap_x) for label in x_names]\n\n x_to_num = {p[1]:p[0] for p in enumerate(x_names)}\n\n if 'y_order' in kwargs:\n y_names = [t for t in kwargs['y_order']]\n else:\n y_names = [t for t in sorted(set([v for v in y]))]\n # Wrap to help avoid overflow\n y_names = [do_wrapping(label, wrap_y) for label in y_names]\n\n y_to_num = {p[1]:p[0] for p in enumerate(y_names)}\n\n figure, axs = plt.subplots(1, 1, figsize=figure_size)\n\n plot_grid = plt.GridSpec(1, 15, figure = figure) # Setup a 1x10 grid\n # plot_grid = plt.GridSpec(1, 15, hspace=0.2, wspace=0.1, figure = f) # Setup a 1x10 grid\n ax = plt.subplot(plot_grid[:,:-1]) # Use the left 14/15ths of the grid for the main plot\n\n marker = kwargs.get('marker', 's')\n\n kwargs_pass_on = {k:v for k,v in kwargs.items() if k not in [\n 'color', 'palette', 'color_range', 'size', 'size_range', 'size_scale', 'marker', 'x_order', 'y_order'\n ]}\n\n ax.tick_params(labelbottom='on', labeltop='on')\n ax.set_xticks([v for k,v in x_to_num.items()])\n ax.set_xticklabels([k for k in x_to_num], rotation=90, horizontalalignment='center', linespacing=0.8)\n ax.set_yticks([v for k,v in y_to_num.items()])\n ax.set_yticklabels([k for k in y_to_num], linespacing=0.8)\n\n ax.grid(False, 'major')\n ax.grid(True, 'minor')\n ax.set_xticks([t + 0.5 for t in ax.get_xticks()], minor=True)\n ax.set_yticks([t + 0.5 for t in ax.get_yticks()], minor=True)\n\n ax.set_xlim([-0.5, max([v for v in x_to_num.values()]) + 0.5])\n ax.set_ylim([-0.5, max([v for v in y_to_num.values()]) + 0.5])\n ax.set_facecolor('#F1F1F1')\n # figure.show()\n #figure.savefig(\"ASSOCTEST\")\n delta_in_pix = ax.transData.transform((1, 1)) - ax.transData.transform((0, 0))\n\n index = 0\n for cur_x, cur_y in zip(x,y):\n wrapped_x_name = do_wrapping(cur_x, wrap_x)\n wrapped_y_name = do_wrapping(cur_y, wrap_y)\n before_coordinate = np.array(ax.transData.transform((x_to_num[wrapped_x_name]-0.5, y_to_num[wrapped_y_name] -0.5)))\n after_coordinate = np.array(ax.transData.transform((x_to_num[wrapped_x_name]+0.5, y_to_num[wrapped_y_name] +0.5)))\n before_pixels = np.round(before_coordinate, 0)\n after_pixels = np.round(after_coordinate, 0)\n desired_fraction = value_to_size(size[index])\n if desired_fraction == 0.0:\n index = index + 1\n continue\n if kwargs[\"dataframe_report\"][cur_x][\"type\"] == FeatureType.TYPE_NUM and \\\n kwargs[\"dataframe_report\"][cur_y][\"type\"] == FeatureType.TYPE_NUM:\n use_rectangle = False\n else:\n use_rectangle = True\n # desired_fraction = desired_fraction / 0.707\n delta_in_pix = after_pixels - before_pixels\n gap = np.round((1.0 - desired_fraction) * delta_in_pix / 2, 0)\n start = before_pixels + gap[0]\n ending = after_pixels - gap[0]\n start[0] = start[0] + 1\n ending[1] = ending[1] - 1\n start_doc = ax.transData.inverted().transform(start)\n ending_doc = ax.transData.inverted().transform(ending)\n cur_size = ending_doc - start_doc\n # cur_size = 0.50\n # bottom_left = ax.transData.transform((x_to_num[cur_x]-0.5, y_to_num[cur_y]))\n # print(f\"{bottom_left[0]}\")\n if use_rectangle:\n cur_rect = patches.Rectangle((start_doc[0], start_doc[1]),\n cur_size[0], cur_size[1], facecolor=value_to_color(color[index]),\n antialiased=True)\n else:\n cur_rect = patches.Circle((start_doc[0] + cur_size[0] / 2, start_doc[1] + cur_size[1] / 2),\n cur_size[1] / 2, facecolor=value_to_color(color[index]),\n antialiased=True)\n cur_rect.set_antialiased(True)\n ax.add_patch(cur_rect)\n index = index + 1\n # ax.scatter(\n # x=[x_to_num[v] for v in x],\n # y=[y_to_num[v] for v in y],\n # marker=marker,\n # s=[value_to_size(v) for v in size],\n # c=[value_to_color(v) for v in color],\n # **kwargs_pass_on\n # )\n\n # Add color legend on the right side of the plot\n if color_min < color_max:\n ax = plt.subplot(plot_grid[:,-1]) # Use the rightmost column of the plot\n\n col_x = [0]*len(palette) # Fixed x coordinate for the bars\n bar_y=np.linspace(color_min, color_max, n_colors) # y coordinates for each of the n_colors bars\n ax.set_ylim(-1, 1)\n bar_height = bar_y[1] - bar_y[0]\n ax.barh(\n y=bar_y,\n width=[5]*len(palette), # Make bars 5 units wide\n left=col_x, # Make bars start at 0\n height=bar_height,\n color=palette,\n linewidth=0\n )\n ax.set_xlim(1, 2) # Bars are going from 0 to 5, so lets crop the plot somewhere in the middle\n ax.grid(False) # Hide grid\n ax.set_facecolor('white') # Make background white\n ax.set_xticks([]) # Remove horizontal ticks\n ax.set_yticks(np.linspace(min(bar_y), max(bar_y), 3)) # Show vertical ticks for min, middle and max\n ax.yaxis.tick_right() # Show vertical ticks on the right\n return figure\n\ndef filter_best_corr(correlation_dataframe):\n top_values = dict()\n for features in itertools.product(correlation_dataframe.index.values, \\\n correlation_dataframe.columns):\n val = correlation_dataframe[features[0]][features[1]]\n for f in features:\n if f not in top_values.keys():\n top_values[f] = val\n elif val > top_values[f]:\n top_values[f] = val\n ordered = {k: v for k, v in sorted(top_values.items(), key=lambda item: item[1])}\n\ndef corrplot(correlation_dataframe, dataframe_report, size_scale=100, marker='s'):\n # PassengerId Survived Pclass ... SibSp Parch Fare\n # PassengerId 1.000000 -0.005007 -0.035144 ... -0.057527 -0.001652 0.012658\n # Survived -0.005007 1.000000 -0.338481 ... -0.035322 0.081629 0.257307\n # Pclass -0.035144 -0.338481 1.000000 ... 0.083081 0.018443 -0.549500\n # Age 0.036847 -0.077221 -0.369226 ... -0.308247 -0.189119 0.096067\n # SibSp -0.057527 -0.035322 0.083081 ... 1.000000 0.414838 0.159651\n # Parch -0.001652 0.081629 0.018443 ... 0.414838 1.000000 0.216225\n # Fare 0.012658 0.257307 -0.549500 ... 0.159651 0.216225 1.000000\n # filter_best_corr(correlation_dataframe)\n sweetviz.graph.Graph.set_style([\"graph_base.mplstyle\"])\n corr = pd.melt(correlation_dataframe.reset_index(), id_vars=UNIQUE_INDEX_NAME)\n corr.columns = ['x', 'y', 'value']\n # e.g.:\n # x y value\n # 0 PassengerId PassengerId 1.000000\n # 1 Survived PassengerId -0.005007\n # 2 Pclass PassengerId -0.035144\n # 3 Age PassengerId 0.036847\n # 4 SibSp PassengerId -0.057527\n # 5 Parch PassengerId -0.001652\n # 6 Fare PassengerId 0.012658\n # 7 PassengerId Survived -0.005007\n # 8 Survived Survived 1.000000\n # 9 Pclass Survived -0.338481\n # 10 Age Survived -0.077221\n # 11 SibSp Survived -0.035322\n # 12 Parch Survived 0.081629\n # 13 Fare Survived 0.257307\n # 14 PassengerId Pclass -0.035144\n\n return heatmap(\n corr['x'], corr['y'],\n figure_size=(config[\"Associations\"].getfloat(\"association_graph_width\"),\n config[\"Associations\"].getfloat(\"association_graph_height\")),\n color=corr['value'], color_range=[-1, 1],\n palette=None,\n size=corr['value'].abs(), size_range=[0,1],\n marker=marker,\n x_order=correlation_dataframe.columns,\n y_order=correlation_dataframe.columns[::-1],\n size_scale=config[\"Associations\"].getfloat(\"association_graph_size_scale\"),\n dataframe_report = dataframe_report\n )\n\n"
] | [
[
"pandas.Series",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"numpy.round",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.GridSpec"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
SEE-MOF/QRNN-CloudCorrection | [
"ba58f1f4f70ec0f7264d5e98d80552d2fba1bb4d"
] | [
"ICI/plot_calibration.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 7 20:46:41 2020\n\n@author: inderpreet\nThis code plots the calibration curves for both QRNN-single and QRNN-all\n\nThis script is used to plot Figure 11 of the article.\n\n\"\"\"\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport netCDF4\n\nfrom matplotlib.ticker import (MultipleLocator, FormatStrFormatter,\n AutoMinorLocator)\nfrom typhon.retrieval.qrnn import set_backend, QRNN\nset_backend(\"pytorch\")\nimport stats as S\nfrom ici import iciData\nfrom calibration import calibration\nimport random\nplt.rcParams.update({'font.size': 26})\n\n\n#%% input parameters\ndepth = 4\nwidth = 128\nquantiles = np.array([0.002, 0.03, 0.16, 0.5, 0.84, 0.97, 0.998])\nbatchSize = 128\n\ntarget = 'I1V'\n\n\ninChannels = np.array(['I1V', 'I2V', 'I3V', 'I5V' , 'I6V', 'I7V', 'I8V', 'I9V', 'I10V', 'I11V'])\n#inChannels = np.array(['I1V', 'I2V', 'I3V', 'MWI-15', 'MWI-16', 'I5V', 'I6V', 'I7V', 'I8V', 'I9V', 'I10V', 'I11V', 'I11H'])\ninChannels = np.array([target, 'I5V' , 'I6V', 'I7V', 'I8V', 'I9V', 'I10V', 'I11V'])\ni183, = np.argwhere(inChannels == target)[0]\n\nbinstep = 0.5\nbins = np.arange(-20, 15, binstep)\niq = np.argwhere(quantiles == 0.5)[0,0]\ninpath = os.path.expanduser(\"~/Dendrite/Projects/AWS-325GHz/ICI/\")\ntest_file = os.path.join(inpath, \"data/TB_ICI_test.nc\")\n#%% read input data\ndata = iciData(test_file, \n inChannels, target, \n batch_size = batchSize) \n\nfile = os.path.join(inpath, 'qrnn_output/qrnn_ici_%s_%s_%s_single.nc'%(depth, width, target))\nprint (file)\nqrnn = QRNN.load(file)\n\ny_pre, y_prior, y0, y, y_pos_mean = S.predict(data, qrnn, add_noise = True)\n\n# calibration plot data with correction greater than 15K\n\nfig, ax = plt.subplots(1, 1, figsize = [8,8]) \n\nim = np.arange(0, y0.size, 1)\na1, a2, a3, a4, a5, a6, intervals = calibration(y_pre, y0, im, quantiles)\n \n\n(ax.plot(intervals[:], [ a1/len(y0[:]), a2/len(y0[:]), a3/len(y0[:]), \n a4/len(y0[:]), a5/len(y0[:])\n ], 'r.-', ms = 15, linewidth = 2.5, label =\"All data\"))\n\nim = np.where(np.abs(y_pre[:, iq] - y_prior[:, i183]) >= 5)[0]\na1, a2, a3, a4, a5, a6, intervals = calibration(y_pre, y0, im, quantiles) \n\n(ax.plot(intervals[:], [ a1/len(y0[im]), a2/len(y0[im]), a3/len(y0[im]), \n a4/len(y0[im]), a5/len(y0[im])\n ], 'b.-', ms = 15, linewidth = 2.5, label = \"Correction > 10K\"))\n\n\n#%% set the plot parameters\n\nx = np.arange(0,1.2,0.2)\ny = x\nax.plot(x, y, 'k:', linewidth = 1.5)\nax.set(xlim = [0, 1], ylim = [0,1])\nax.set_aspect(1.0)\nax.set_xlabel(\"Predicted frequency\")\nax.set_ylabel(\"Observed frequency\")\nax.xaxis.set_minor_locator(MultipleLocator(0.2))\nax.grid(which = 'both', alpha = 0.2)\nax.set_title(\"Channel:%s\"%str(target), fontsize = 24)\nfig.savefig('Figures/calibration_plot_%s'%target)\n\n(ax.legend(prop={'size': 22}, frameon = False)) \n\nfig.savefig(\"Figures/calibration_QRNN_%s.pdf\"%target, bbox_inches = 'tight')\nfig.savefig(\"Figures/calibration_QRNN_%s.png\"%target, bbox_inches = 'tight')\n"
] | [
[
"matplotlib.ticker.MultipleLocator",
"numpy.abs",
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.argwhere",
"matplotlib.pyplot.rcParams.update",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Cahlil-Togonon/Deep-Learning-Experiments | [
"8048b91f382667e9b43078460fb792b369f8af49",
"501ae610b0a8fb7fb75a53dcfdab71be49274b58",
"501ae610b0a8fb7fb75a53dcfdab71be49274b58"
] | [
"versions/2020/keras/seq2seq/seq2seq_translate.py",
"versions/2022/tools/python/accelerate_demo.py",
"versions/2020/transformer/code/transformer-mnist.py"
] | [
"'''Sequence to sequence example in Keras \n\nEnglish to Tagalog sentence pairs.\nhttp://www.manythings.org/anki/tgl-eng.zip\n\nLots of neat sentence pairs datasets can be found at:\nhttp://www.manythings.org/anki/\n\n# References\n\n- Sequence to Sequence Learning with Neural Networks\n https://arxiv.org/abs/1409.3215\n- Learning Phrase Representations using\n RNN Encoder-Decoder for Statistical Machine Translation\n https://arxiv.org/abs/1406.1078\n'''\nfrom __future__ import print_function\n\nfrom keras.models import Model\nfrom keras.layers import Input, LSTM, Dense\nimport numpy as np\n\n\ndef read_data(fname):\n with open(fname) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n content = [content[i].split() for i in range(len(content))]\n content = np.array(content)\n content = np.reshape(content, [-1, ])\n return content\n\ndef build_dicts(words):\n dictionary = dict()\n for word in words:\n dictionary[word] = len(dictionary)\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n return dictionary, reverse_dictionary\n\ndef build_seq2seq(latent_dim=256):\n # Define an input sequence and process it.\n encoder_inputs = Input(shape=(None,))\n x = Embedding(num_encoder_tokens, latent_dim)(encoder_inputs)\n x, state_h, state_c = LSTM(latent_dim,\n return_state=True)(x)\n encoder_states = [state_h, state_c]\n\n # Set up the decoder, using `encoder_states` as initial state.\n decoder_inputs = Input(shape=(None,))\n x = Embedding(num_decoder_tokens, latent_dim)(decoder_inputs)\n x = LSTM(latent_dim, return_sequences=True)(x, initial_state=encoder_states)\n decoder_outputs = Dense(num_decoder_tokens, activation='softmax')(x)\n\n # Define the model that will turn\n # `encoder_input_data` & `decoder_input_data` into `decoder_target_data`\n model = Model([encoder_inputs, decoder_inputs], decoder_outputs)\n return model\n\ndef build_models(latent_dim=256):\n return \n\ndef train_model():\n batch_size = 64 # Batch size for training.\n epochs = 1 # Number of epochs to train for.\n latent_dim = 256 # Latent dimensionality of the encoding space.\n # Path to the data txt file on disk.\n\n model = build_seq2seq()\n\n # Compile & run training\n model.compile(optimizer='rmsprop', loss='categorical_crossentropy')\n # Note that `decoder_target_data` needs to be one-hot encoded,\n # rather than sequences of integers like `decoder_input_data`!\n model.fit([encoder_input_data, decoder_input_data],\n decoder_target_data,\n batch_size=batch_size,\n epochs=epochs,\n validation_split=0.2)\n\ndef input2target(data_path, sos, eos):\n input_texts = []\n target_texts = []\n\n with open(data_path, 'r', encoding='utf-8') as f:\n lines = f.read().split('\\n')\n for line in lines:\n if len(line) <= 0:\n continue\n line = line.replace(\",\", \" ,\")\n line = line.replace(\".\", \" .\")\n line = line.replace(\"!\", \" !\")\n line = line.replace(\"?\", \" ?\")\n line = line.lower()\n target_text, input_text = line.split('\\t')\n # print(input_text , \" : \", target_text)\n target_text = \"%s %s %s\" % (sos, target_text, eos)\n input_texts.append(input_text)\n target_texts.append(target_text)\n\n return input_texts, target_texts\n\ndef get_words(sentences):\n words = []\n for sen in sentences:\n tokens = sen.split()\n for token in tokens:\n if token not in words:\n words.append(token)\n print(len(words))\n return words\n\ndef sentence2tensor(input_texts, input_dict):\n return\n\ndef max_wordnum(texts):\n count = 0\n for text in texts:\n if len(text.split()) > count:\n count = len(text.split())\n return count\n \n\ndata_path = 'tgl-eng/tgl.txt'\neos = \"<EOS>\"\nsos = \"<SOS>\"\n\ninput_texts, target_texts = input2target(data_path, sos, eos)\n\ninput_words = get_words(input_texts)\ninput_dict, input_rev_dict = build_dicts(input_words)\n\ntarget_words = get_words(target_texts)\nif sos in target_words:\n print(\"Present\")\n\ntarget_dict, target_rev_dict = build_dicts(target_words)\n\n\nnum_encoder_tokens = len(input_words)\nnum_decoder_tokens = len(target_words)\nmax_encoder_seq_length = max([len(words.split()) for words in input_texts])\nmax_decoder_seq_length = max([len(words.split()) for words in target_texts])\n\nprint('Number of samples:', len(input_texts))\nprint('Number of unique input tokens:', num_encoder_tokens)\nprint('Number of unique output tokens:', num_decoder_tokens)\nprint('Max sequence length for inputs:', max_encoder_seq_length)\nprint('Max sequence length for outputs:', max_decoder_seq_length)\n\nencoder_input_data = np.zeros(\n (len(input_texts), max_encoder_seq_length, num_encoder_tokens),\n dtype='float32')\ndecoder_input_data = np.zeros(\n (len(input_texts), max_decoder_seq_length, num_decoder_tokens),\n dtype='float32')\ndecoder_target_data = np.zeros(\n (len(input_texts), max_decoder_seq_length, num_decoder_tokens),\n dtype='float32')\n\nfor i, text, in enumerate(input_texts):\n words = text.split()\n for t, word in enumerate(words):\n encoder_input_data[i, t, input_dict[word]] = 1.\n\nfor i, text, in enumerate(target_texts):\n words = text.split()\n for t, word in enumerate(words):\n # decoder_target_data is ahead of decoder_input_data by one timestep\n decoder_input_data[i, t, target_dict[word]] = 1.\n if t > 0:\n # decoder_target_data will be ahead by one timestep\n # and will not include the start character.\n decoder_target_data[i, t - 1, target_dict[word]] = 1.\n\n# indexes = np.random.randint(0, len(input_texts), 40)\n# encoder_test_data = encoder_input_data[indexes]\n# encoder_input_data = np.delete(encoder_input_data, indexes, axis=0)\n# decoder_input_data = np.delete(decoder_input_data, indexes, axis=0)\n# decoder_target_data = np.delete(decoder_target_data, indexes, axis=0)\n\nbatch_size = 64 # Batch size for training.\nepochs = 100 # Number of epochs to train for.\nlatent_dim = 512 # Latent dimensionality of the encoding space.\n\n# Path to the data txt file on disk.\n# Define an input sequence and process it.\nencoder_inputs = Input(shape=(None, num_encoder_tokens))\nencoder = LSTM(latent_dim, return_sequences=True)(encoder_inputs)\nencoder_outputs, state_h, state_c = LSTM(latent_dim, return_state=True)(encoder)\n# We discard `encoder_outputs` and only keep the states.\nencoder_states = [state_h, state_c]\n\n# Set up the decoder, using `encoder_states` as initial state.\ndecoder_inputs = Input(shape=(None, num_decoder_tokens))\n# We set up our decoder to return full output sequences,\n# and to return internal states as well. We don't use the\n# return states in the training model, but we will use them in inference.\ndecoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)\ndecoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states)\ndecoder_dense = Dense(num_decoder_tokens, activation='softmax')\ndecoder_outputs = decoder_dense(decoder_outputs)\n\n# Define the model that will turn\n# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`\nmodel = Model([encoder_inputs, decoder_inputs], decoder_outputs)\nmodel.summary()\n\n# Compile & run training\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy')\n# Note that `decoder_target_data` needs to be one-hot encoded,\n# rather than sequences of integers like `decoder_input_data`!\nmodel.fit([encoder_input_data, decoder_input_data],\n decoder_target_data,\n batch_size=batch_size,\n epochs=epochs,\n shuffle=True,\n validation_split=0.05)\n\n# Save model\nmodel.save('s2s.h5')\n\n# Next: inference mode (sampling).\n# Here's the drill:\n# 1) encode input and retrieve initial decoder state\n# 2) run one step of decoder with this initial state\n# and a \"start of sequence\" token as target.\n# Output will be the next target token\n# 3) Repeat with the current target token and current states\n\n# Define sampling models\nencoder_model = Model(encoder_inputs, encoder_states)\nencoder_model.summary()\n\ndecoder_state_input_h = Input(shape=(latent_dim,))\ndecoder_state_input_c = Input(shape=(latent_dim,))\ndecoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\ndecoder_outputs, state_h, state_c = decoder_lstm(\n decoder_inputs, initial_state=decoder_states_inputs)\ndecoder_states = [state_h, state_c]\ndecoder_outputs = decoder_dense(decoder_outputs)\ndecoder_model = Model(\n [decoder_inputs] + decoder_states_inputs,\n [decoder_outputs] + decoder_states)\ndecoder_model.summary()\n\ndef decode_sequence(input_seq):\n # Encode the input as state vectors.\n states_value = encoder_model.predict(input_seq)\n\n # Generate empty target sequence of length 1.\n target_seq = np.zeros((1, 1, num_decoder_tokens))\n # Populate the first character of target sequence with the start character.\n target_seq[0, 0, target_dict[sos]] = 1.\n\n # Sampling loop for a batch of sequences\n # (to simplify, here we assume a batch of size 1).\n stop_condition = False\n decoded_sentence = ''\n while not stop_condition:\n output_tokens, h, c = decoder_model.predict(\n [target_seq] + states_value)\n\n # Sample a token\n sampled_token_index = np.argmax(output_tokens[0, -1, :])\n sampled_word = target_rev_dict[sampled_token_index]\n decoded_sentence += sampled_word + \" \"\n\n # Exit condition: either hit max length\n # or find stop character.\n # if sampled_word in [\".\", \"?\", \"!\"] or\n if (sampled_word == eos or\n len(decoded_sentence) > max_decoder_seq_length):\n stop_condition = True\n\n # Update the target sequence (of length 1).\n target_seq = np.zeros((1, 1, num_decoder_tokens))\n target_seq[0, 0, sampled_token_index] = 1.\n\n # Update states\n states_value = [h, c]\n\n return decoded_sentence\n\n# print(\"-------------------- TEST ---------------------------\")\n# for seq_index in range(40):\n# # Take one sequence (part of the training set)\n# # for trying out decoding.\n# input_seq = encoder_test_data[seq_index: seq_index + 1]\n# decoded_sentence = decode_sequence(input_seq)\n# print('Input sentence:', input_texts[seq_index])\n# print('Decoded sentence:', decoded_sentence)\n\n\n\nprint(\"-------------------- TRAIN ---------------------------\")\nindexes = np.random.randint(0, len(input_texts), 40)\nfor seq_index in indexes:\n # Take one sequence (part of the training set)\n # for trying out decoding.\n input_seq = encoder_input_data[seq_index: seq_index + 1]\n decoded_sentence = decode_sequence(input_seq)\n print('Input sentence:', input_texts[seq_index])\n print('Decoded sentence:', decoded_sentence)\n print(\"----\")\n",
"'''\nAccelerate demo with fp16 and multi-gpu support.\nSingle CPU:\n python accelerate_demo.py --cpu\n\n16-bit Floating Point:\n python accelerate_demo.py --fp16\n\nModel from timm:\n python accelerate_demo.py --timm\n\nSinge-GPU:\n python accelerate_demo.py \n\nMulti-GPU or Multi-CPU:\n accelerate config\n accelerate launch accelerate_demo.py\n'''\n\nimport torch\nimport wandb\nimport datetime\nimport timm\nimport torchvision\nimport argparse\nfrom torch.optim import SGD\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\nfrom ui import progress_bar\nfrom accelerate import Accelerator\n\n\ndef init_wandb():\n wandb.login()\n config = {\n \"learning_rate\": 0.1,\n \"epochs\": 100,\n \"batch_size\": 128,\n \"dataset\": \"cifar10\"\n }\n run = wandb.init(project=\"accelerate-options-project\", entity=\"upeee\", config=config)\n\n return run\n\n\ndef run_experiment(args):\n accelerator = Accelerator(fp16=args.fp16, cpu=args.cpu)\n _ = init_wandb()\n\n # With timm, no need to manually replace the classifier head.\n # Just initialize the model with the correct number of classes.\n # However, timm model has a lower accuracy (TODO: why?)\n if args.timm:\n model = timm.create_model('resnet18', pretrained=False, num_classes=10)\n else:\n model = torchvision.models.resnet18(pretrained=False, progress=True)\n model.fc = torch.nn.Linear(model.fc.in_features, 10) \n \n # wandb will automatically log the model gradients.\n wandb.watch(model)\n\n loss = torch.nn.CrossEntropyLoss()\n optimizer = SGD(model.parameters(), lr=wandb.config.learning_rate)\n scheduler = CosineAnnealingLR(optimizer, T_max=wandb.config.epochs)\n\n x_train = datasets.CIFAR10(root='./data', train=True, \n download=True, \n transform=transforms.ToTensor())\n x_test = datasets.CIFAR10(root='./data',\n train=False, \n download=True, \n transform=transforms.ToTensor())\n train_loader = DataLoader(x_train, \n batch_size=wandb.config.batch_size, \n shuffle=True, \n num_workers=2)\n test_loader = DataLoader(x_test, \n batch_size=wandb.config.batch_size, \n shuffle=False, \n num_workers=2)\n\n\n\n label_human = [\"airplane\", \"automobile\", \"bird\", \"cat\", \"deer\", \"dog\", \"frog\", \"horse\", \"ship\", \"truck\"]\n\n table_test = wandb.Table(columns=['Image', \"Ground Truth\", \"Initial Pred Label\",])\n\n image, label = iter(test_loader).next()\n image = image.to(accelerator.device)\n\n # Accelerate API\n model = accelerator.prepare(model)\n optimizer = accelerator.prepare(optimizer)\n scheduler = accelerator.prepare(scheduler)\n train_loader = accelerator.prepare(train_loader)\n test_loader = accelerator.prepare(test_loader)\n\n model.eval()\n with torch.no_grad():\n pred = torch.argmax(model(image), dim=1).cpu().numpy()\n\n for i in range(8):\n table_test.add_data(wandb.Image(image[i]),\n label_human[label[i]], \n label_human[pred[i]])\n accelerator.print(label_human[label[i]], \"vs. \", label_human[pred[i]])\n\n\n\n start_time = datetime.datetime.now()\n\n best_acc = 0\n for epoch in range(wandb.config[\"epochs\"]):\n train_acc, train_loss = train(epoch, model, optimizer, scheduler, train_loader, loss, accelerator)\n test_acc, test_loss = test(model, test_loader, loss, accelerator)\n if test_acc > best_acc:\n wandb.run.summary[\"Best accuracy\"] = test_acc\n best_acc = test_acc\n if args.fp16:\n accelerator.save(model.state_dict(), \"./resnet18_best_acc_fp16.pth\")\n else:\n accelerator.save(model, \"./resnet18_best_acc.pth\")\n wandb.log({\n \"Train accuracy\": train_acc,\n \"Test accuracy\": test_acc,\n \"Train loss\": train_loss,\n \"Test loss\": test_loss,\n \"Learning rate\": optimizer.param_groups[0]['lr']\n })\n\n elapsed_time = datetime.datetime.now() - start_time\n accelerator.print(\"Elapsed time: %s\" % elapsed_time)\n wandb.run.summary[\"Elapsed train time\"] = str(elapsed_time)\n wandb.run.summary[\"Fp16 enabled\"] = str(args.fp16)\n wandb.run.summary[\"Using timm\"] = str(args.timm)\n wandb.run.summary[\"Using CPU\"] = str(args.cpu)\n\n model.eval()\n with torch.no_grad():\n pred = torch.argmax(model(image), dim=1).cpu().numpy()\n\n final_pred = []\n for i in range(8):\n final_pred.append(label_human[pred[i]])\n accelerator.print(label_human[label[i]], \"vs. \", final_pred[i])\n\n table_test.add_column(name=\"Final Pred Label\", data=final_pred)\n\n wandb.log({\"Test data\": table_test})\n\n wandb.finish()\n\n\ndef train(epoch, model, optimizer, scheduler, train_loader, loss, accelerator):\n model.train()\n train_loss = 0\n correct = 0\n train_samples = 0\n\n # sample a batch. compute loss and backpropagate\n for batch_idx, (data, target) in enumerate(train_loader):\n optimizer.zero_grad()\n output = model(data)\n loss_value = loss(output, target)\n accelerator.backward(loss_value)\n optimizer.step()\n scheduler.step(epoch)\n train_loss += loss_value.item()\n train_samples += len(data)\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n if batch_idx % 10 == 0:\n accuracy = 100. * correct / len(train_loader.dataset)\n progress_bar(batch_idx,\n len(train_loader),\n 'Train Epoch: {}, Loss: {:0.2e}, Acc: {:.2f}%'.format(epoch+1, \n train_loss/train_samples, accuracy))\n \n train_loss /= len(train_loader.dataset)\n accuracy = 100. * correct / len(train_loader.dataset)\n\n return accuracy, train_loss\n\n\ndef test(model, test_loader, loss, accelerator):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n\n output = model(data)\n test_loss += loss(output, target).item()\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n accuracy = 100. * correct / len(test_loader.dataset)\n\n accelerator.print('\\nTest Loss: {:.4f}, Acc: {:.2f}%\\n'.format(test_loss, accuracy))\n\n return accuracy, test_loss\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Simple example of training script.\")\n parser.add_argument(\"--timm\", action=\"store_true\", help=\"If passed, build model using timm library.\")\n parser.add_argument(\"--fp16\", action=\"store_true\", help=\"If passed, will use FP16 training.\")\n\n # Seems that this is not supported in the Accelerator version installed\n parser.add_argument(\n \"--mixed_precision\",\n type=str,\n default=\"no\",\n choices=[\"no\", \"fp16\", \"bf16\"],\n help=\"Whether to use mixed precision. Choose\"\n \"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\n \"and an Nvidia Ampere GPU.\",\n )\n\n parser.add_argument(\"--cpu\", action=\"store_true\", help=\"If passed, will train on the CPU.\")\n args = parser.parse_args()\n run_experiment(args)\n\n\nif __name__ == \"__main__\":\n main()",
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nimport numpy as np\n\nimport datetime\nimport argparse\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nfrom PIL import Image\nfrom vit_pytorch import ViT\nfrom ui import AverageMeter, accuracy, progress_bar\n\nclass CNNModel(nn.Module):\n\n def __init__(self, n_classes=10):\n super(CNNModel, self).__init__()\n # (channel, filters, kernel_size)\n self.classifier = nn.Sequential(\n nn.Conv2d(1, 64, 3),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Conv2d(64, 64, 3),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Conv2d(64, 64, 3),\n nn.Dropout(0.2),\n nn.Flatten(),\n nn.Linear(64 * 3 * 3, n_classes),\n )\n\n def forward(self, x):\n x = self.classifier(x)\n return x\n\ndef train(args,\n model, \n device, \n train_loader, \n test_loader, \n optimizer, \n epoch):\n\n model.train()\n lr = optimizer.param_groups[0]['lr']\n correct = 0\n total = 0\n losses = AverageMeter()\n \n for i, data in enumerate(train_loader):\n inputs, labels = data[0].to(device), data[1].to(device)\n optimizer.zero_grad()\n outputs = model(inputs)\n loss = nn.CrossEntropyLoss()(outputs, labels)\n loss.backward()\n optimizer.step()\n losses.update(loss.float().mean().item())\n\n _, predicted = outputs.max(1)\n total += labels.size(0)\n correct += predicted.eq(labels).sum().item()\n acc = correct * 100. / total\n\n progress_bar(i,\n len(train_loader),\n '[Epoch %d] CE: %.4f | Top 1 Acc: %0.2f%% | LR: %.2e'\n % (epoch, losses.avg, acc, lr))\n\n return test(args, model, device, test_loader)\n\n\ndef test(args, model, device, test_loader):\n model.eval()\n top1 = AverageMeter()\n top5 = AverageMeter()\n with torch.no_grad():\n for i, data in enumerate(test_loader):\n inputs, labels = data[0].to(device), data[1].to(device)\n outputs = model(inputs)\n\n acc1, acc5 = accuracy(outputs, labels, (1, 5))\n top1.update(acc1[0], inputs.size(0))\n top5.update(acc5[0], inputs.size(0))\n\n progress_bar(i,\n len(test_loader),\n 'Test accuracy Top 1: %0.2f%%, Top 5: %0.2f%%'\n % (top1.avg, top5.avg))\n return top1.avg, top5.avg\n\n\nclass SaveOutput:\n def __init__(self):\n self.outputs = []\n \n def __call__(self, module, module_in, module_out):\n self.outputs.append(module_out)\n \n def clear(self):\n self.outputs = []\n\ndef viz_features(args, model):\n save_output = SaveOutput()\n hook_handles = []\n kernels = []\n n_layers = 0 \n for layer in model.modules():\n if isinstance(layer, torch.nn.modules.conv.Conv2d):\n handle = layer.register_forward_hook(save_output)\n hook_handles.append(handle)\n kernels.append(layer.weight)\n n_layers += 1\n\n print(f\"Total convolutional layers: {n_layers}\")\n\n image = np.array(Image.open(args.image))\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n transform = transforms.Compose([transforms.ToTensor(),])\n image = transform(image)\n image = image.to(device)\n image = image.unsqueeze(0)\n pred = model(image)\n features = save_output.outputs\n\n for layer, data in enumerate(zip(features, kernels)):\n maps, kernel = data\n maps = maps.squeeze().detach().cpu().numpy()\n dim = int( np.sqrt( len(maps) ) )\n title = \"Feature maps at CNN layer %d\" % layer\n plot_data(args, maps, dim, title)\n\n kernel = kernel.detach().cpu().numpy()\n kernel = kernel[:,args.feature_num,:,:]\n kernel = kernel.squeeze()\n dim = int( np.sqrt( len(kernel) ) )\n title = \"Kernel weights layer %d filter %d\" % (layer, args.feature_num)\n plot_data(args, kernel, dim, title)\n\n\ndef plot_data(args, maps, dim, title=\"\"):\n fig = plt.figure(figsize=(10, 10))\n\n maps = maps - np.amin(maps)\n maps = maps / np.amax(maps)\n\n axes = []\n for i, m in enumerate(maps):\n ax = plt.subplot(dim, dim, i+1)\n axes.append(ax)\n im = plt.imshow(m, cmap=\"gray\")\n plt.axis('off')\n \n fig.colorbar(im, ax=axes)\n plt.suptitle(title, fontsize=14)\n plt.show()\n\n\ndef main():\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--image',\n default='dataset/test/0_000.png',\n help='image to be classified')\n parser.add_argument('--lr',\n type=float,\n default=1e-3,\n metavar='S',\n help='learning rate (default: 1e-3)')\n parser.add_argument('--batch-size',\n type=int,\n default=128,\n metavar='N',\n help='input batch size for training (default: 128)')\n parser.add_argument('--epochs',\n type=int,\n default=10,\n metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--layer-num',\n type=int,\n default=0,\n metavar='N',\n help='which layer to visualize (default: 0)')\n parser.add_argument('--feature-num',\n type=int,\n default=0,\n metavar='N',\n help='which feature of a layer to visualize (default: 0)')\n parser.add_argument('--train',\n action='store_true',\n default=False,\n help='train the model (default: False)')\n parser.add_argument('--save-model',\n action='store_true',\n default=False,\n help='save the current model (default: False)')\n parser.add_argument('--restore-model',\n default=None,\n help='restore & eval this model file (default: False)')\n parser.add_argument('--normalize',\n action='store_true',\n default=False,\n help='normalize input dataset (default: False)')\n parser.add_argument('--cnn',\n action='store_true',\n default=False,\n help='use cnn model instead of transformer (default: False)')\n parser.add_argument('--visualize',\n action='store_true',\n default=False,\n help='plot kernel and feature maps (default: False)')\n \n args = parser.parse_args()\n use_cuda = torch.cuda.is_available()\n\n kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}\n\n if args.normalize:\n transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))])\n else:\n transform = transforms.Compose([transforms.ToTensor()])\n\n x_train = datasets.MNIST(root='./data',\n train=True,\n download=True,\n transform=transform)\n\n x_test = datasets.MNIST(root='./data',\n train=False,\n download=True,\n transform=transform)\n\n DataLoader = torch.utils.data.DataLoader\n train_loader = DataLoader(x_train,\n shuffle=True,\n batch_size=args.batch_size,\n **kwargs)\n\n test_loader = DataLoader(x_test,\n shuffle=False,\n batch_size=args.batch_size,\n **kwargs)\n\n\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n if args.cnn:\n model = CNNModel().to(device)\n else:\n model = ViT(image_size=28,\n patch_size=14,\n num_classes=10,\n dim=128,\n depth=6,\n heads=8,\n mlp_dim=128,\n channels=1,\n ).to(device)\n\n if torch.cuda.device_count() > 1:\n print(\"Available GPUs:\", torch.cuda.device_count())\n model = nn.DataParallel(model)\n print(\"Model:\", model)\n print(\"Device:\", device)\n optimizer = optim.Adam(model.parameters())\n \n start_time = datetime.datetime.now()\n best_top1 = 0\n best_top5 = 0\n if args.restore_model is not None:\n model.load_state_dict(torch.load(args.restore_model)) \n best_top1, best_top5 = test(args, model, device, test_loader)\n print(\"Best Top 1: %0.2f%%, Top 5: %0.2f%%\" % (best_top1, best_top5))\n\n if args.train:\n for epoch in range(1, args.epochs + 1):\n top1, top5 = train(args, model, device, train_loader, test_loader, optimizer, epoch)\n if top1 > best_top1:\n print(\"New best Top 1: %0.2f%%, Top 5: %0.2f%%\" % (top1, top5))\n best_top1 = top1\n best_top5 = top5\n if args.save_model:\n filename = \"cnn-mnist.pth\" if args.cnn else \"transformer-mnist.pth\"\n torch.save(model.state_dict(), filename)\n print(\"Saving best model on file: \", filename)\n\n print(\"Best Top 1: %0.2f%%, Top 5: %0.2f%% in %d epochs\" % (best_top1, best_top5, args.epochs))\n\n elapsed_time = datetime.datetime.now() - start_time\n print(\"Elapsed time (train): %s\" % elapsed_time)\n\n if args.visualize:\n viz_features(args, model)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.reshape",
"numpy.array",
"numpy.zeros",
"numpy.argmax"
],
[
"torch.nn.CrossEntropyLoss",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"torch.no_grad"
],
[
"numpy.amax",
"matplotlib.pyplot.imshow",
"torch.load",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"numpy.amin",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.DataParallel",
"torch.cuda.device_count",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.show",
"torch.nn.Flatten",
"torch.nn.MaxPool2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ernewton/starspot | [
"5cf7f0e4d9ce8d39e7768e63b0d45fbaf89955a7"
] | [
"tests/PDM_test.py"
] | [
"import numpy as np\nfrom starspot import phase_dispersion_minimization as pdm\nimport matplotlib.pyplot as plt\nimport starspot as ss\n\n\ndef test_sj2():\n np.random.seed(42)\n N = 10000\n t = np.linspace(0, .1, N)\n x = np.random.randn(N)\n sj2 = pdm.sj2(x, 0, N)\n assert np.isclose(sj2, 1, atol=.01)\n\n\ndef test_s2():\n np.random.seed(42)\n N = 10000\n M = 10\n nj = np.ones(M) * N\n sj2 = np.zeros(M)\n for j in range(M):\n t = np.linspace(0, .1, N)\n x = np.random.randn(N)\n sj2[j] = pdm.sj2(x, 0, nj[j])\n s2 = pdm.s2(nj, sj2, M)\n assert np.isclose(s2, 1, atol=.01)\n\n\ndef test_phase():\n\n # Generate some data\n t = np.linspace(0, 100, 1000)\n p = 10\n w = 2*np.pi/p\n x = np.sin(w*t) + np.random.randn(len(t))*1e-2\n\n phase = pdm.calc_phase(10, t)\n\n # plt.plot(phase, x, \".\")\n # plt.savefig(\"phase_test\")\n # plt.close()\n\n\ndef test_phase_bins():\n \"\"\"\n Make sure that phased light curves are binned correctly.\n \"\"\"\n\n # Generate some data\n t = np.linspace(0, 100, 1000)\n p = 10\n w = 2*np.pi/p\n x = np.sin(w*t) + np.random.randn(len(t))*1e-2\n\n nbins = 10\n\n # Try a period of 2.5\n phase = pdm.calc_phase(2.5, t)\n x_means, phase_bins, Ns, sj2s, x_binned, phase_binned = \\\n pdm.phase_bins(nbins, phase, x)\n mid_phase_bins = np.diff(phase_bins) * .5 + phase_bins[:-1]\n s225 = pdm.s2(Ns, sj2s, nbins)\n\n # Try a period of 5\n phase = pdm.calc_phase(5, t)\n x_means, phase_bins, Ns, sj2s, x_binned, phase_binned = \\\n pdm.phase_bins(nbins, phase, x)\n mid_phase_bins = np.diff(phase_bins) * .5 + phase_bins[:-1]\n s25 = pdm.s2(Ns, sj2s, nbins)\n\n # Try a period of 10\n phase = pdm.calc_phase(10, t)\n x_means, phase_bins, Ns, sj2s, x_binned, phase_binned = \\\n pdm.phase_bins(nbins, phase, x)\n mid_phase_bins = np.diff(phase_bins) * .5 + phase_bins[:-1]\n s210 = pdm.s2(Ns, sj2s, nbins)\n\n # Plot each bin\n for j in range(nbins):\n plt.plot(phase_binned[j], x_binned[j], \".\", alpha=.1, zorder=3)\n\n # Make sure that a period of 10 has the smallest s2 value.\n assert s210 < s25\n assert s210 < s225\n\n # Calculate the total variance and phi statistic and test that too.\n total_variance = pdm.sj2(x, np.mean(x), len(x))\n phi10 = s210/total_variance\n phi5 = s25/total_variance\n phi25 = s225/total_variance\n\n assert phi10 < phi5\n assert phi10 < phi25\n\n assert pdm.phi(10, 10, t, x) == s210/total_variance\n assert pdm.phi(10, 5, t, x) == s25/total_variance\n assert pdm.phi(10, 2.5, t, x) == s225/total_variance\n\n # plt.plot(phase, x, \".\", zorder=0)\n # plt.errorbar(mid_phase_bins, x_means, fmt=\".\", yerr=sj2s, zorder=1)\n # plt.savefig(\"phase_test2\")\n # plt.close()\n\n assert np.isclose(max(x_means), 1, atol=.02)\n\n\ndef test_phi():\n\n # Generate some data\n t = np.linspace(0, 100, 1000)\n p = 10\n w = 2*np.pi/p\n x = np.sin(w*t) + np.random.randn(len(t))*1e-2\n\n # Generate some data\n # t = np.linspace(0, 100, 1000)\n # p = 10\n # w = 2*np.pi/p\n # x1 = np.sin(w*t)\n # x2 = .4*np.sin(w*t + np.pi/2)\n # x3 = .3*np.sin(w*t + np.pi/3)\n # x = x1 #+ x2 + x3\n # x += np.random.randn(len(x)) * .1\n\n # plt.plot(t, x1)\n # plt.plot(t, x2)\n # plt.plot(t, x3)\n x += np.random.randn(len(x))*.1\n # plt.plot(t, x)\n # plt.savefig(\"test\")\n\n # Calculate the Phi statistic over a range of periods\n nperiods = 200\n nbins = 10\n periods = np.linspace(1, 20, nperiods)\n phis = np.zeros(nperiods)\n for i, p in enumerate(periods):\n phis[i] = pdm.phi(nbins, p, t, x)\n\n # Find period with the lowest Phi\n ind = np.argmin(phis)\n pplot = periods[ind]\n # pplot = 10\n\n # Get variances for that period\n phase = pdm.calc_phase(pplot, t)\n x_means, phase_bs, Ns, sj2s, xb, pb = pdm.phase_bins(nbins, phase, x)\n mid_phase_bins = np.diff(phase_bs)*.5 + phase_bs[:-1]\n\n # Calculate the phase at that period (for plotting)\n phase = pdm.calc_phase(pplot, t)\n\n # Make the plot\n # fig = plt.figure(figsize=(16, 9))\n # ax1 = fig.add_subplot(311)\n # ax1.plot(t, x, \".\")\n # ax1.set_xlabel(\"Time\")\n # ax1.set_ylabel(\"Flux\")\n\n # ax2 = fig.add_subplot(312)\n # ax2.plot(phase, x, \".\")\n # ax2.errorbar(mid_phase_bins, x_means, yerr=sj2s, fmt=\".\")\n # ax2.set_xlabel(\"Phase\")\n # ax2.set_ylabel(\"Flux\")\n\n # ax3 = fig.add_subplot(313)\n # ax3.plot(periods, phis) # *pplot)\n # ax3.set_xlabel(\"Period [days]\")\n # ax3.set_ylabel(\"Dispersion\")\n # ax3.axvline(periods[ind], color=\"C1\")\n\n # fig.savefig(\"phi_test\")\n\n assert np.isclose(periods[ind], 10, atol=.1)\n\n\ndef test_uncertainty():\n\n # Generate some data\n np.random.seed(42)\n t = np.linspace(0, 100, 1000)\n p = 10\n w = 2*np.pi/p\n x = np.sin(w*t) + np.random.randn(len(t))*1e-2\n xerr = np.ones_like(x)*1e-2\n\n rm = ss.RotationModel(t, x, xerr)\n nperiods = 200\n period_grid = np.linspace(1, 20, nperiods)\n pdm_period, period_err = rm.pdm_rotation(period_grid)\n print(pdm_period, period_err)\n fig = rm.pdm_plot()\n plt.savefig(\"pdm_test\")\n\n # 2 day period\n np.random.seed(42)\n p = 2\n w = 2*np.pi/p\n x = np.sin(w*t) + np.random.randn(len(t))*1e-2\n xerr = np.ones_like(x)*1e-2\n period_grid = np.linspace(.1, 5, nperiods)\n rm = ss.RotationModel(t, x, xerr)\n pdm_period, period_err = rm.pdm_rotation(period_grid)\n print(pdm_period, period_err)\n fig = rm.pdm_plot()\n plt.savefig(\"pdm_test_2\")\n\n # 5 day period\n np.random.seed(42)\n p = 5\n w = 2*np.pi/p\n x = np.sin(w*t) + np.random.randn(len(t))*1e-2\n xerr = np.ones_like(x)*1e-2\n period_grid = np.linspace(.1, 10, nperiods)\n rm = ss.RotationModel(t, x, xerr)\n pdm_period, period_err = rm.pdm_rotation(period_grid)\n print(pdm_period, period_err)\n fig = rm.pdm_plot()\n plt.savefig(\"pdm_test_5\")\n\n # 20 day period\n p = 20\n np.random.seed(42)\n w = 2*np.pi/p\n x = np.sin(w*t) + np.random.randn(len(t))*1e-2\n xerr = np.ones_like(x)*1e-2\n period_grid = np.linspace(5, 30, nperiods)\n rm = ss.RotationModel(t, x, xerr)\n pdm_period, period_err = rm.pdm_rotation(period_grid)\n print(pdm_period, period_err)\n fig = rm.pdm_plot()\n plt.savefig(\"pdm_test_20\")\n\n # 50 day period\n np.random.seed(42)\n t = np.linspace(0, 100, 1000)\n p = 50\n w = 2*np.pi/p\n x = np.sin(w*t) + np.random.randn(len(t))*1e-2\n xerr = np.ones_like(x)*1e-2\n period_grid = 10**np.linspace(0, np.log10(200), nperiods)\n rm = ss.RotationModel(t, x, xerr)\n pdm_period, period_err = rm.pdm_rotation(period_grid)\n print(pdm_period, period_err)\n fig = rm.pdm_plot()\n plt.savefig(\"pdm_test_50\")\n\n # 100 day period\n np.random.seed(42)\n p = 100\n w = 2*np.pi/p\n x = np.sin(w*t) + np.random.randn(len(t))*1e-2\n xerr = np.ones_like(x)*1e-2\n period_grid = 10**np.linspace(0, np.log10(200), nperiods)\n rm = ss.RotationModel(t, x, xerr)\n pdm_period, period_err = rm.pdm_rotation(period_grid)\n print(pdm_period, period_err)\n fig = rm.pdm_plot()\n plt.savefig(\"pdm_test_100\")\n\n\nif __name__ == \"__main__\":\n test_sj2()\n test_s2()\n\n # Generate some data\n t = np.linspace(0, 100, 1000)\n p = 10\n w = 2*np.pi/p\n x = np.sin(w*t) + np.random.randn(len(t))*1e-2\n\n test_phase()\n test_phase_bins()\n test_phi()\n test_uncertainty()\n"
] | [
[
"numpy.ones_like",
"numpy.random.seed",
"numpy.linspace",
"matplotlib.pyplot.savefig",
"numpy.ones",
"numpy.sin",
"matplotlib.pyplot.plot",
"numpy.log10",
"numpy.random.randn",
"numpy.argmin",
"numpy.mean",
"numpy.diff",
"numpy.zeros",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
binshengliu/pytorch-lightning | [
"8f6b7a2b4fea9b7bd0b873f5973e6364b3981412"
] | [
"tests/trainer/test_dataloaders.py"
] | [
"import platform\n\nimport pytest\nimport torch\nfrom torch.utils.data.dataloader import DataLoader\nfrom torch.utils.data.dataset import Subset\n\nimport tests.base.utils as tutils\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom tests.base import EvalModelTemplate\n\n\ndef test_fit_train_loader_only(tmpdir):\n\n model = EvalModelTemplate()\n train_dataloader = model.train_dataloader()\n\n model.train_dataloader = None\n model.val_dataloader = None\n model.test_dataloader = None\n\n model.validation_step = None\n model.validation_epoch_end = None\n\n model.test_step = None\n model.test_epoch_end = None\n\n trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir)\n trainer.fit(model, train_dataloader=train_dataloader)\n\n\ndef test_fit_val_loader_only(tmpdir):\n\n model = EvalModelTemplate()\n train_dataloader = model.train_dataloader()\n val_dataloader = model.val_dataloader()\n\n model.train_dataloader = None\n model.val_dataloader = None\n model.test_dataloader = None\n\n model.test_step = None\n model.test_epoch_end = None\n\n trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir)\n trainer.fit(model, train_dataloader=train_dataloader, val_dataloaders=val_dataloader)\n\n\[email protected](\"dataloader_options\", [\n dict(train_percent_check=-0.1),\n dict(train_percent_check=1.1),\n dict(val_check_interval=1.1),\n dict(val_check_interval=10000),\n])\ndef test_dataloader_config_errors(tmpdir, dataloader_options):\n\n model = EvalModelTemplate()\n\n # fit model\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n **dataloader_options,\n )\n\n with pytest.raises(ValueError):\n trainer.fit(model)\n\n\ndef test_multiple_val_dataloader(tmpdir):\n \"\"\"Verify multiple val_dataloader.\"\"\"\n\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__multiple\n model.validation_step = model.validation_step__multiple_dataloaders\n\n # fit model\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n val_percent_check=0.1,\n train_percent_check=1.0,\n )\n result = trainer.fit(model)\n\n # verify training completed\n assert result == 1\n\n # verify there are 2 val loaders\n assert len(trainer.val_dataloaders) == 2, \\\n 'Multiple val_dataloaders not initiated properly'\n\n # make sure predictions are good for each val set\n for dataloader in trainer.val_dataloaders:\n tutils.run_prediction(dataloader, trainer.model)\n\n\ndef test_multiple_test_dataloader(tmpdir):\n \"\"\"Verify multiple test_dataloader.\"\"\"\n\n model = EvalModelTemplate()\n model.test_dataloader = model.test_dataloader__multiple\n model.test_step = model.test_step__multiple_dataloaders\n\n # fit model\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n val_percent_check=0.1,\n train_percent_check=0.2\n )\n trainer.fit(model)\n trainer.test()\n\n # verify there are 2 test loaders\n assert len(trainer.test_dataloaders) == 2, \\\n 'Multiple test_dataloaders not initiated properly'\n\n # make sure predictions are good for each test set\n for dataloader in trainer.test_dataloaders:\n tutils.run_prediction(dataloader, trainer.model)\n\n # run the test method\n trainer.test()\n\n\ndef test_train_dataloader_passed_to_fit(tmpdir):\n \"\"\"Verify that train dataloader can be passed to fit \"\"\"\n\n # only train passed to fit\n model = EvalModelTemplate()\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n val_percent_check=0.1,\n train_percent_check=0.2\n )\n fit_options = dict(train_dataloader=model.dataloader(train=True))\n result = trainer.fit(model, **fit_options)\n\n assert result == 1\n\n\ndef test_train_val_dataloaders_passed_to_fit(tmpdir):\n \"\"\" Verify that train & val dataloader can be passed to fit \"\"\"\n\n # train, val passed to fit\n model = EvalModelTemplate()\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n val_percent_check=0.1,\n train_percent_check=0.2\n )\n fit_options = dict(train_dataloader=model.dataloader(train=True),\n val_dataloaders=model.dataloader(train=False))\n\n result = trainer.fit(model, **fit_options)\n assert result == 1\n assert len(trainer.val_dataloaders) == 1, \\\n f'`val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'\n\n\ndef test_all_dataloaders_passed_to_fit(tmpdir):\n \"\"\"Verify train, val & test dataloader(s) can be passed to fit and test method\"\"\"\n\n model = EvalModelTemplate()\n\n # train, val and test passed to fit\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n val_percent_check=0.1,\n train_percent_check=0.2\n )\n fit_options = dict(train_dataloader=model.dataloader(train=True),\n val_dataloaders=model.dataloader(train=False))\n test_options = dict(test_dataloaders=model.dataloader(train=False))\n\n result = trainer.fit(model, **fit_options)\n trainer.test(**test_options)\n\n assert result == 1\n assert len(trainer.val_dataloaders) == 1, \\\n f'val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'\n assert len(trainer.test_dataloaders) == 1, \\\n f'test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'\n\n\ndef test_multiple_dataloaders_passed_to_fit(tmpdir):\n \"\"\"Verify that multiple val & test dataloaders can be passed to fit.\"\"\"\n\n model = EvalModelTemplate()\n model.validation_step = model.validation_step__multiple_dataloaders\n model.test_step = model.test_step__multiple_dataloaders\n\n # train, multiple val and multiple test passed to fit\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n val_percent_check=0.1,\n train_percent_check=0.2\n )\n fit_options = dict(train_dataloader=model.dataloader(train=True),\n val_dataloaders=[model.dataloader(train=False),\n model.dataloader(train=False)])\n test_options = dict(test_dataloaders=[model.dataloader(train=False),\n model.dataloader(train=False)])\n\n trainer.fit(model, **fit_options)\n trainer.test(**test_options)\n\n assert len(trainer.val_dataloaders) == 2, \\\n f'Multiple `val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'\n assert len(trainer.test_dataloaders) == 2, \\\n f'Multiple `test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'\n\n\ndef test_mixing_of_dataloader_options(tmpdir):\n \"\"\"Verify that dataloaders can be passed to fit\"\"\"\n\n model = EvalModelTemplate()\n\n trainer_options = dict(\n default_root_dir=tmpdir,\n max_epochs=1,\n val_percent_check=0.1,\n train_percent_check=0.2\n )\n\n # fit model\n trainer = Trainer(**trainer_options)\n results = trainer.fit(model, val_dataloaders=model.dataloader(train=False))\n assert results\n\n # fit model\n trainer = Trainer(**trainer_options)\n results = trainer.fit(model, val_dataloaders=model.dataloader(train=False))\n assert results\n trainer.test(test_dataloaders=model.dataloader(train=False))\n\n assert len(trainer.val_dataloaders) == 1, \\\n f'`val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'\n assert len(trainer.test_dataloaders) == 1, \\\n f'`test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'\n\n\ndef test_train_inf_dataloader_error(tmpdir):\n \"\"\"Test inf train data loader (e.g. IterableDataset)\"\"\"\n model = EvalModelTemplate()\n model.train_dataloader = model.train_dataloader__infinite\n\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, val_check_interval=0.5)\n\n with pytest.raises(MisconfigurationException, match='infinite DataLoader'):\n trainer.fit(model)\n\n\ndef test_val_inf_dataloader_error(tmpdir):\n \"\"\"Test inf train data loader (e.g. IterableDataset)\"\"\"\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__infinite\n\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, val_percent_check=0.5)\n\n with pytest.raises(MisconfigurationException, match='infinite DataLoader'):\n trainer.fit(model)\n\n\ndef test_test_inf_dataloader_error(tmpdir):\n \"\"\"Test inf train data loader (e.g. IterableDataset)\"\"\"\n model = EvalModelTemplate()\n model.test_dataloader = model.test_dataloader__infinite\n\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, test_percent_check=0.5)\n\n with pytest.raises(MisconfigurationException, match='infinite DataLoader'):\n trainer.test(model)\n\n\[email protected]('check_interval', [50, 1.0])\ndef test_inf_train_dataloader(tmpdir, check_interval):\n \"\"\"Test inf train data loader (e.g. IterableDataset)\"\"\"\n\n model = EvalModelTemplate()\n model.train_dataloader = model.train_dataloader__infinite\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n val_check_interval=check_interval\n )\n result = trainer.fit(model)\n # verify training completed\n assert result == 1\n\n\[email protected]('check_interval', [1.0])\ndef test_inf_val_dataloader(tmpdir, check_interval):\n \"\"\"Test inf val data loader (e.g. IterableDataset)\"\"\"\n\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__infinite\n\n # logger file to get meta\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n val_check_interval=check_interval,\n )\n result = trainer.fit(model)\n\n # verify training completed\n assert result == 1\n\n\ndef test_error_on_zero_len_dataloader(tmpdir):\n \"\"\" Test that error is raised if a zero-length dataloader is defined \"\"\"\n\n model = EvalModelTemplate()\n model.train_dataloader = model.train_dataloader__zero_length\n\n # fit model\n with pytest.raises(ValueError):\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n test_percent_check=0.5\n )\n trainer.fit(model)\n\n\[email protected](platform.system() == 'Windows', reason='Does not apply to Windows platform.')\ndef test_warning_with_few_workers(tmpdir):\n \"\"\" Test that error is raised if dataloader with only a few workers is used \"\"\"\n\n model = EvalModelTemplate()\n\n # logger file to get meta\n trainer_options = dict(\n default_root_dir=tmpdir,\n max_epochs=1,\n val_percent_check=0.1,\n train_percent_check=0.2\n )\n\n fit_options = dict(train_dataloader=model.dataloader(train=True),\n val_dataloaders=model.dataloader(train=False))\n test_options = dict(test_dataloaders=model.dataloader(train=False))\n\n trainer = Trainer(**trainer_options)\n\n # fit model\n with pytest.warns(UserWarning, match='train'):\n trainer.fit(model, **fit_options)\n\n with pytest.warns(UserWarning, match='val'):\n trainer.fit(model, **fit_options)\n\n with pytest.warns(UserWarning, match='test'):\n trainer.test(**test_options)\n\n\[email protected](torch.cuda.device_count() < 2, reason='Test requires multiple GPUs')\ndef test_dataloader_reinit_for_subclass():\n\n class CustomDataLoader(torch.utils.data.DataLoader):\n def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None,\n batch_sampler=None, num_workers=0, collate_fn=None,\n pin_memory=False, drop_last=False, timeout=0,\n worker_init_fn=None, dummy_kwarg=None):\n super().__init__(dataset, batch_size, shuffle, sampler, batch_sampler,\n num_workers, collate_fn, pin_memory, drop_last, timeout,\n worker_init_fn)\n\n self.dummy_kwarg = dummy_kwarg\n\n trainer = Trainer(\n gpus=[0, 1],\n num_nodes=1,\n distributed_backend='ddp',\n )\n\n class CustomDummyObj:\n sampler = None\n\n result = trainer.auto_add_sampler(CustomDummyObj(), train=True)\n assert isinstance(result, CustomDummyObj), \"Wrongly reinstantiated data loader\"\n\n result = trainer.auto_add_sampler(CustomDataLoader(list(range(1000))), train=True)\n assert isinstance(result, torch.utils.data.DataLoader)\n assert isinstance(result, CustomDataLoader)\n assert hasattr(result, 'dummy_kwarg')\n\n\[email protected](torch.cuda.device_count() < 3, reason='Test requires multiple GPUs')\ndef test_batch_size_smaller_than_num_gpus():\n # we need at least 3 gpus for this test\n num_gpus = 3\n batch_size = 3\n\n class CurrentTestModel(EvalModelTemplate):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # batch norm doesn't work with batch size 1, we replace it\n self.c_d1_bn = torch.nn.ReLU()\n\n def training_step(self, *args, **kwargs):\n output = super().training_step(*args, **kwargs)\n loss = output['loss']\n # we make sure to add some metrics to the output dict,\n # this is essential for this test\n output['progress_bar'] = {'train_loss': loss}\n return output\n\n def train_dataloader(self):\n dataloader = super().train_dataloader()\n # construct a dataset with a size that is not divisible by num_gpus\n # therefore the last batch will have a size < num_gpus\n size = num_gpus * batch_size + (num_gpus - 1)\n dataset = Subset(dataloader.dataset, range(size))\n dataloader = DataLoader(\n dataset,\n batch_size=self.hparams.batch_size,\n drop_last=False,\n )\n return dataloader\n\n hparams = EvalModelTemplate.get_default_hparams()\n hparams.batch_size = batch_size\n model = CurrentTestModel(hparams)\n\n trainer = Trainer(\n max_epochs=1,\n val_percent_check=0,\n gpus=num_gpus,\n )\n\n # we expect the reduction for the metrics also to happen on the last batch\n # where we will get fewer metrics than gpus\n result = trainer.fit(model)\n assert 1 == result\n"
] | [
[
"torch.cuda.device_count",
"torch.utils.data.dataloader.DataLoader",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OSUrobotics/sim-to-real-kinova | [
"334bd824631b0f42bb060c9f1b7ced3137aae294",
"334bd824631b0f42bb060c9f1b7ced3137aae294"
] | [
"object_detection_pkg/src/reward_detection.py",
"learning_code/DDPG.py"
] | [
"#!/usr/bin/env python\n\nimport rospy\nimport cv2\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom sensor_msgs.msg import Image\nfrom std_msgs.msg import String, Float32\nimport numpy as np\nimport cv2.aruco as aruco\nimport sys\n\n\nclass ImageProcessor():\n def __init__(self):\n\n #cv bridge class\n self.bridge = CvBridge()\n\n # This is the image message subcriber. Change the topic to your camera topic (most likely realsense)\n self.image_sub = rospy.Subscriber('/usb_cam/image_raw', Image, self.get_object_pose)\n \n \n # Callback for image processing\n def get_object_pose(self, img_msg):\n \n ##Box Pixel values in the Image##\n x = 100 # Start Pixel in Height\n y = 350 # Start Pixel in Width\n h = 600 # Height in pixels \n w = 550 # Width in pixels\n \n # Aruko Marker Info\n marker_size = 2 #cm \n\n # Marker IDs\n ee_marker_id = 0 # end-effector\n obj_marker_id = 1 # object \n\n # Get the saved camera and distortion matrices from calibration\n mtx = np.load('/home/nuha/kinova_ws/src/traj-control/src/camera_mtx.npy') # camera matrix\n dist = np.load('/home/nuha/kinova_ws/src/traj-control/src/dist_mtx.npy') # distortion matrix\n\n # Define Aruco Dictionary \n aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_1000)\n parameters = aruco.DetectorParameters_create()\n\n #Lists for storing marker positions\n ee_marker = []\n obj_marker = []\n \n #Convert ros image message to opencv image\n try:\n cv_image = self.bridge.imgmsg_to_cv2(img_msg, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n \n #Cropping Image\n cv_image = cv_image[y:y+h, x:x+w]\n \n #Convert in gray scale\n gray = cv2.cvtColor(cv_image,cv2.COLOR_BGR2GRAY)\n \n #Find markers in the image \n corners, ids, rejected = aruco.detectMarkers(image=gray, dictionary=aruco_dict, parameters=parameters, cameraMatrix=mtx, distCoeff=dist)\n\n if np.all(ids != None):\n\n rvec, tvec, __ = aruco.estimatePoseSingleMarkers(corners,marker_size, mtx, dist)\n \n for i in range(ids.size):\n #Draw reference frame for the marker\n \n # Save end-effector marker pose\n if ids[i] == ee_marker_id:\n ee_marker = tvec[i]\n\n # Save object marker pose\n if ids[i] == obj_marker_id:\n obj_marker = tvec[i]\n\n aruco.drawAxis(cv_image, mtx, dist, rvec[i], tvec[i], 10)\n\n #Draw Markers\n aruco.drawDetectedMarkers(cv_image, corners, ids)\n\n if len(obj_marker) > 0 :\n rospy.set_param('Goal', \"true\")\n print(\"Lift Detected\")\n else:\n rospy.set_param('Goal', \"false\") \n print(\"No Marker Found\")\n\n #Display\n cv2.imshow('Window', cv_image)\n cv2.waitKey(3)\n \n \ndef main(args):\n f = ImageProcessor()\n rospy.init_node('reward_detection', anonymous=True)\n\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main(sys.argv)\n",
"import copy\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n# Implementation of Deep Deterministic Policy Gradients (DDPG)\r\n# Paper: https://arxiv.org/abs/1509.02971\r\n# [Not the implementation used in the TD3 paper]\r\n\r\n\r\nclass Actor(nn.Module):\r\n\tdef __init__(self, state_dim, action_dim, max_action):\r\n\t\tsuper(Actor, self).__init__()\r\n\r\n\t\tself.l1 = nn.Linear(state_dim, 400)\r\n\t\tself.l2 = nn.Linear(400, 300)\r\n\t\tself.l3 = nn.Linear(300, action_dim)\r\n\t\t\r\n\t\tself.max_action = max_action\r\n\r\n\t\r\n\tdef forward(self, state):\r\n\t\ta = F.relu(self.l1(state))\r\n\t\ta = F.relu(self.l2(a))\r\n\t\treturn self.max_action * torch.tanh(self.l3(a))\r\n\r\n\r\nclass Critic(nn.Module):\r\n\tdef __init__(self, state_dim, action_dim):\r\n\t\tsuper(Critic, self).__init__()\r\n\r\n\t\tself.l1 = nn.Linear(state_dim, 400)\r\n\t\tself.l2 = nn.Linear(400 + action_dim, 300)\r\n\t\tself.l3 = nn.Linear(300, 1)\r\n\r\n\r\n\tdef forward(self, state, action):\r\n\t\tq = F.relu(self.l1(state))\r\n\t\tq = F.relu(self.l2(torch.cat([q, action], 1)))\r\n\t\treturn self.l3(q)\r\n\r\n\r\nclass DDPG(object):\r\n\tdef __init__(self, state_dim, action_dim, max_action, discount=0.99, tau=0.001):\r\n\t\tself.actor = Actor(state_dim, action_dim, max_action).to(device)\r\n\t\tself.actor_target = copy.deepcopy(self.actor)\r\n\t\tself.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=1e-4)\r\n\r\n\t\tself.critic = Critic(state_dim, action_dim).to(device)\r\n\t\tself.critic_target = copy.deepcopy(self.critic)\r\n\t\tself.critic_optimizer = torch.optim.Adam(self.critic.parameters(), weight_decay=1e-2)\r\n\r\n\t\tself.discount = discount\r\n\t\tself.tau = tau\r\n\r\n\r\n\tdef select_action(self, state):\r\n\t\tstate = torch.FloatTensor(state.reshape(1, -1)).to(device)\r\n\t\treturn self.actor(state).cpu().data.numpy().flatten()\r\n\r\n\r\n\tdef train(self, replay_buffer, batch_size=64):\r\n\t\t# Sample replay buffer \r\n\t\tstate, action, next_state, reward, not_done = replay_buffer.sample(batch_size)\r\n\r\n\t\t# Compute the target Q value\r\n\t\ttarget_Q = self.critic_target(next_state, self.actor_target(next_state))\r\n\t\ttarget_Q = reward + (not_done * self.discount * target_Q).detach()\r\n\r\n\t\t# Get current Q estimate\r\n\t\tcurrent_Q = self.critic(state, action)\r\n\r\n\t\t# Compute critic loss\r\n\t\tcritic_loss = F.mse_loss(current_Q, target_Q)\r\n\r\n\t\t# Optimize the critic\r\n\t\tself.critic_optimizer.zero_grad()\r\n\t\tcritic_loss.backward()\r\n\t\tself.critic_optimizer.step()\r\n\r\n\t\t# Compute actor loss\r\n\t\tactor_loss = -self.critic(state, self.actor(state)).mean()\r\n\t\t\r\n\t\t# Optimize the actor \r\n\t\tself.actor_optimizer.zero_grad()\r\n\t\tactor_loss.backward()\r\n\t\tself.actor_optimizer.step()\r\n\r\n\t\t# Update the frozen target models\r\n\t\tfor param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):\r\n\t\t\ttarget_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)\r\n\r\n\t\tfor param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):\r\n\t\t\ttarget_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)\r\n\r\n\t\treturn critic_loss, actor_loss\r\n\r\n\tdef save(self, filename):\r\n\t\ttorch.save(self.critic.state_dict(), filename + \"_critic\")\r\n\t\ttorch.save(self.critic_optimizer.state_dict(), filename + \"_critic_optimizer\")\r\n\t\ttorch.save(self.actor.state_dict(), filename + \"_actor\")\r\n\t\ttorch.save(self.actor_optimizer.state_dict(), filename + \"_actor_optimizer\")\r\n\r\n\r\n\tdef load(self, filename):\r\n\t\tself.critic.load_state_dict(torch.load(filename + \"_critic\"))\r\n\t\tself.critic_optimizer.load_state_dict(torch.load(filename + \"_critic_optimizer\"))\r\n\t\tself.actor.load_state_dict(torch.load(filename + \"_actor\"))\r\n\t\tself.actor_optimizer.load_state_dict(torch.load(filename + \"_actor_optimizer\"))"
] | [
[
"numpy.all",
"numpy.load"
],
[
"torch.cat",
"torch.load",
"torch.nn.functional.mse_loss",
"torch.nn.Linear",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZurichNLP/understanding-mbr | [
"4052f6feef783fd851fdaf0acd6bf6ad71dc58ef"
] | [
"scripts/measure_overlaps.py"
] | [
"#! /usr/bin/python3\n\nimport numpy\nimport argparse\nimport logging\nimport sacrebleu\n\n# local dependency\n\n# noinspection PyUnresolvedReferences\nfrom bleu_weighted_precision import WeightedBLEU\n\n\nOVERLAP_FUNCTIONS = [\"word\", \"bleu2\", \"chrf\"]\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--compare\", type=str, help=\"Path to sentences to compare to.\", required=True)\n parser.add_argument(\"--input\", type=str, help=\"Path to sentences to analyze.\", required=True)\n\n parser.add_argument(\"--output\", type=str, help=\"Where to save numpy array of overlaps.\", required=True)\n parser.add_argument(\"--overlap-function\", type=str, help=\"How to compute overlap.\", required=True,\n choices=OVERLAP_FUNCTIONS)\n\n args = parser.parse_args()\n\n return args\n\n\nclass Measurer(object):\n\n def __init__(self,\n overlap_function: str) -> None:\n \"\"\"\n\n :param overlap_function:\n \"\"\"\n\n if overlap_function == \"word\":\n self.overlap_function = self.measure_overlap_word\n elif overlap_function == \"bleu-2\":\n self.overlap_function = self.measure_overlap_sacrebleu\n\n # set weights for ngram precisions\n\n precision_weights = [0.55, 0.45, 0.0, 0.0]\n\n args = argparse.Namespace(smooth_method=\"floor\", smooth_value=0.01, force=False,\n short=False, lc=False, tokenize=sacrebleu.DEFAULT_TOKENIZER,\n precision_weights=precision_weights)\n\n self.scorer = WeightedBLEU(args)\n\n else:\n self.overlap_function = self.measure_overlap_sacrebleu\n\n args = argparse.Namespace(chrf_order=6, chrf_beta=2, chrf_whitespace=False, short=False)\n\n self.scorer = sacrebleu.metrics.CHRF(args)\n\n self.tokenize = sacrebleu.tokenizers.tokenizer_13a.Tokenizer13a()\n\n def measure(self,\n input_string: str,\n compare_string: str) -> float:\n \"\"\"\n\n :param input_string:\n :param compare_string:\n :return:\n \"\"\"\n\n return self.overlap_function(input_string.strip(), compare_string.strip())\n\n def measure_overlap_sacrebleu(self,\n input_string: str,\n compare_string: str) -> float:\n \"\"\"\n\n This method is taken from Lee et al (2019):\n https://openreview.net/pdf?id=SkxJ-309FQ\n\n :param input_string:\n :param compare_string:\n :return:\n \"\"\"\n\n score = self.scorer.sentence_score(input_string, [compare_string]).score\n\n # sacrebleu score is 100-based, need a fraction\n\n return score / 100.\n\n def measure_overlap_word(self,\n input_string: str,\n compare_string: str) -> float:\n \"\"\"\n\n :param input_string:\n :param compare_string:\n :return:\n \"\"\"\n input_tokens = self.tokenize(input_string).split(\" \")\n compare_tokens = self.tokenize(compare_string).split(\" \")\n\n input_length = len(input_tokens)\n\n if input_length == 0:\n return 0.0\n\n intersection = set(input_tokens) & set(compare_tokens)\n\n return len(intersection) / input_length\n\n\ndef main():\n args = parse_args()\n\n logging.basicConfig(level=logging.DEBUG)\n logging.debug(args)\n\n overlaps = []\n\n m = Measurer(overlap_function=args.overlap_function)\n\n with open(args.input, \"r\") as input_handle, open(args.compare) as compare_handle:\n\n for line_input, line_compare in zip(input_handle, compare_handle):\n\n overlap = m.measure(line_input, line_compare)\n overlaps.append(overlap)\n\n overlaps_array = numpy.asarray(overlaps, dtype=\"float32\")\n\n numpy.save(args.output, overlaps_array)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.asarray",
"numpy.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
XiaociZhang/Paddle | [
"f551d9fe13fa061d40a326e07941af9f6323b4dd"
] | [
"python/paddle/fluid/tests/unittests/auto_parallel/engine_api.py"
] | [
"# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport time\nimport tempfile\nimport copy\nimport os\nimport numpy as np\nimport subprocess\nimport paddle\nimport paddle.nn as nn\nimport paddle.fluid as fluid\nimport paddle.static as static\nimport paddle.nn.functional as F\nimport paddle.utils as utils\nfrom paddle.fluid import layers\nfrom paddle.io import Dataset, IterableDataset, DataLoader\nfrom paddle.static import InputSpec\nfrom paddle.distributed import fleet\nimport paddle.distributed.auto_parallel as auto\nfrom paddle.distributed.auto_parallel.engine import Engine\n\npaddle.enable_static()\nglobal_process_mesh = auto.ProcessMesh(mesh=[0, 1])\nPP_MESH_0 = auto.ProcessMesh([0])\nPP_MESH_1 = auto.ProcessMesh([1])\nbatch_size = 1\nbatch_num = 10\nhidden_size = 1024\nsequence_len = 512\nimage_size = hidden_size\nclass_num = 10\n\npaddle.seed(44)\n\n\nclass MyDataset(Dataset):\n\n def __init__(self, num_samples):\n super(MyDataset, self).__init__()\n self.num_samples = num_samples\n\n def __getitem__(self, index):\n input = np.random.uniform(size=image_size).astype(\"float32\")\n label = np.random.randint(0, class_num - 1, dtype=\"int64\")\n return input, label\n\n def __len__(self):\n return self.num_samples\n\n\nclass MLPLayer(nn.Layer):\n\n def __init__(self,\n hidden_size=1024,\n intermediate_size=4 * 1024,\n dropout_ratio=0.1,\n initializer_range=0.02):\n super(MLPLayer, self).__init__()\n d_model = hidden_size\n dim_feedforward = intermediate_size\n weight_attr = paddle.ParamAttr(\n initializer=nn.initializer.Normal(mean=0.0, std=initializer_range))\n bias_attr = None\n\n self.linear0 = nn.Linear(d_model,\n dim_feedforward,\n weight_attr,\n bias_attr=bias_attr)\n self.linear1 = nn.Linear(dim_feedforward,\n d_model,\n weight_attr,\n bias_attr=bias_attr)\n self.linear2 = nn.Linear(d_model, 1, weight_attr, bias_attr=bias_attr)\n self.norm = nn.LayerNorm(d_model, epsilon=1e-5)\n self.dropout = nn.Dropout(dropout_ratio, mode=\"upscale_in_train\")\n\n def forward(self, input):\n out = auto.shard_op(self.norm, dist_attr={\"process_mesh\":\n PP_MESH_0})(input)[0]\n out = self.linear0(input)\n out = F.gelu(out, approximate=True)\n out = auto.shard_op(self.linear1, dist_attr={\"process_mesh\":\n PP_MESH_1})(out)[0]\n out = self.dropout(out)\n out = self.linear2(out)\n return out\n\n\ndef train():\n mlp = MLPLayer(hidden_size=hidden_size,\n intermediate_size=4 * hidden_size,\n dropout_ratio=0.1,\n initializer_range=0.02)\n loss = paddle.nn.CrossEntropyLoss()\n optimizer = paddle.fluid.optimizer.AdamOptimizer(learning_rate=0.00001,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-08,\n grad_clip=None)\n\n inputs_spec = InputSpec([batch_size, hidden_size], 'float32', 'x')\n labels_spec = InputSpec([batch_size], 'int64', 'label')\n\n dist_strategy = fleet.DistributedStrategy()\n dist_strategy.amp = False\n dist_strategy.pipeline = False\n dist_strategy.recompute = False\n # init parallel optimizer\n dist_strategy.semi_auto = True\n fleet.init(is_collective=True, strategy=dist_strategy)\n\n # init engine\n engine = Engine(mlp,\n inputs_spec=inputs_spec,\n labels_spec=labels_spec,\n strategy=dist_strategy)\n engine.prepare(optimizer, loss, metrics=paddle.metric.Accuracy())\n\n # train\n train_dataset = MyDataset(batch_num * batch_size)\n engine.fit(train_dataset,\n batch_size=batch_size,\n steps_per_epoch=batch_num * batch_size,\n fetch_list=['label'])\n\n # eval\n eval_dataset = MyDataset(batch_size)\n engine.evaluate(eval_dataset, batch_size, fetch_list=['label'])\n\n # predict\n test_dataset = MyDataset(batch_size)\n engine.predict(test_dataset, batch_size, fetch_list=['label'])\n\n # save\n temp_dir = tempfile.TemporaryDirectory()\n model_filename = os.path.join(temp_dir.name, 'mlp_inf')\n engine.save(model_filename, training=False, mode='predict')\n temp_dir.cleanup()\n\n\nif __name__ == \"__main__\":\n train()\n"
] | [
[
"numpy.random.uniform",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZJaume/paraphrasing | [
"66f38b30a52d5513b2426dc515373a6f26b62dac"
] | [
"datagen.py"
] | [
"from keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import to_categorical\nimport numpy as np\nimport keras\nimport pickle\n\nOOV = '<UNK>'\nBOS = '<BOS>'\nEOS = '<EOS>'\n\ndef tokenize(tknzr, text, maxlen=40, pad=True):\n '''\n Tokenize a list of sentences\n '''\n tok = tknzr.texts_to_sequences(text)\n if pad:\n tok = pad_sequences(tok, maxlen=maxlen, padding='post', truncating='post')\n return tok\n\ndef load_tokenizer(path):\n '''\n Create a tokenizer object from pickle file\n '''\n if not path.endswith('.pkl'):\n raise Exception('File extension must be pkl')\n f = open(path, 'rb')\n tmp = pickle.load(f)\n f.close()\n tknzr = Tokenizer()\n tknzr.__dict__.update(tmp)\n\n return tknzr\n\ndef save_tokenizer(tknzr, path):\n '''\n Save the tokenizer object to a pickle file\n '''\n f = open(path, 'wb')\n pickle.dump(tknzr.__dict__, f)\n f.close()\n\n\n\nclass SentencesGenerator(keras.utils.Sequence):\n '''\n Generates batches of sentences\n '''\n def __init__(self, path, tknzr, batch_size=64,\n max_len=40, shuffle=False, keep_original=False):\n '''\n Instantiate parameters and load dataset\n '''\n self.path = path\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.maxlen = max_len\n self.tknzr = tknzr\n self.tknzr.filters = ''\n self.tknzr.lower = False\n self.keep_original = keep_original\n if keep_original:\n self.data, self.source, self.source_t, self.target = SentencesGenerator.load_data(path, max_len, tknzr, keep_original)\n else:\n self.source, self.source_t, self.target = SentencesGenerator.load_data(path, max_len, tknzr)\n self.index = list(range(0,len(self.source),batch_size))\n\n def __len__(self):\n '''\n Lenght of epochs\n '''\n return int(np.floor(len(self.source) / self.batch_size))\n\n def __getitem__(self, index):\n '''\n Create a batch of source sentence and target sentence\n '''\n if len(self)-1 == index: # Avoid out of range\n idx = None\n else:\n idx = self.index[index+1]\n X = self.source[self.index[index]:idx]\n X_y = self.source_t[self.index[index]:idx]\n Y = []\n for y in self.target[self.index[index]:idx]:\n Y.append(np.reshape(y,y.shape+(1,)))\n\n return [X, X_y], np.array(Y)\n\n def load_data(path, maxlen, tknzr, keep=False):\n '''\n Read corpus file, tokenize words and encode to sequences\n keep=True to keep the data without tokenizing\n '''\n # Read file and append end anf begin of sentence tags\n print(' Reading data file')\n f = open(path,'r')\n X = []\n X_y = []\n Y = []\n if keep:\n data = ([],[])\n for line in f:\n xy = line[:-1].split('|||')\n X.append(BOS+' '+xy[0]+' ' + EOS)\n X_y.append(BOS+' '+xy[1])\n Y.append(xy[1]+' ' + EOS)\n if keep:\n data[0].append(xy[0])\n data[1].append(xy[1])\n f.close()\n\n print(' Word2idx len:',len(tknzr.word_index))\n\n # Create one_hot vectors\n print(' Creating one-hot vectors')\n X = tokenize(tknzr, X, maxlen)\n X_y = tokenize(tknzr, X_y, maxlen)\n Y = tokenize(tknzr, Y, maxlen)\n\n if keep:\n return data, X, X_y, Y\n else:\n return X, X_y, Y\n\nclass TripletsGenerator(keras.utils.Sequence):\n '''\n Generates triplets of source backward and forward sentences\n '''\n\n def __init__(self, path, vocab_size=20000, batch_size=64,\n max_len=30, shuffle=False, window=1, filters=True, tokenizer=None):\n '''\n Instantiate parameters and load dataset\n '''\n self.path = path\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.maxlen = max_len\n self.vocab_size = vocab_size\n self.window = window\n self.data, self.tknzr = TripletsGenerator.load_data(path, vocab_size, max_len, filters, tknzr=tokenizer)\n # Create the indexes for the tirplets of data\n self.source_index = list(range(window,self.data.shape[0]-window,batch_size))\n self.backward_index = [None]*window\n self.forward_index = [None]*window\n for i in range(window):\n self.backward_index[i] = list(range(0,self.data.shape[0]-i+window,batch_size))\n self.forward_index[i] = list(range(i+window,self.data.shape[0],batch_size))\n\n def __len__(self):\n '''\n Lenght of epochs\n '''\n return int(np.floor((len(self.data)-self.window) / self.batch_size))\n\n def __getitem__(self,index):\n '''\n Create a batch of source, forward and backward sentences as input\n and forward and backward sentences as output\n '''\n source_idx = self.source_index[index+1]\n backward_idx, forward_idx = [], []\n for i in range(self.window):\n backward_idx.append(self.backward_index[i][index+1])\n forward_idx.append(self.forward_index[i][index+1])\n\n # Grab batches\n batch_source = self.data[self.source_index[index]:source_idx]\n batch_backward = [None]*self.window\n batch_forward = [None]*self.window\n for i in range(self.window):\n batch_backward[i] = self.data[self.backward_index[i][index]:backward_idx[i]]\n batch_forward[i] = self.data[self.forward_index[i][index]:forward_idx[i]]\n\n X = [batch_source]\n for y in batch_backward + batch_forward: # Make offset for the input of decoders\n X.append(np.where(y == self.tknzr.word_index[EOS], 0, y))\n Y = []\n for y in batch_backward + batch_forward: # Remove offset for the output\n shifted = pad_sequences(y[:,1:], maxlen=self.maxlen, padding='post', truncating='post')\n Y.append(np.reshape(shifted,shifted.shape+(1,)))\n\n return X,Y\n\n def on_epoch_end(self):\n pass\n\n def load_data(path, vocab_size, maxlen, filters, ids=False, tknzr=None):\n '''\n Read corpus file, tokenize words and encode to sequences\n '''\n # Read file and append end anf begin of sentence tags\n print(' Reading data file')\n f = open(path,'r')\n text = []\n if ids: # Open ids file\n # Change file extension to .ids\n name = ''.join(path.split('.')[:-1]) + 'ids'\n idfile = open(name)\n idname = ''\n\n for line in f:\n text.append(BOS +' '+line[:-1]+' ' +EOS)\n if ids: # Add context separator\n read = idfile.readline()\n if read != idname:\n idname = read\n text.append('<EOC>')\n f.close()\n\n # Create vocabulary\n if tknzr is None:\n print(' Generating vocabulary')\n tknzr = Tokenizer(num_words=vocab_size, lower=False, oov_token=OOV)\n if not filters:\n tknzr.filters = ''\n else:\n tknzr.filters = tknzr.filters.replace('<','') #need keep tags\n tknzr.filters = tknzr.filters.replace('>','')\n tknzr.fit_on_texts(text)\n print(' Word2idx len:',len(tknzr.word_index))\n\n # Create one_hot vectors\n print(' Creating one-hot vectors')\n data = tokenize(tknzr, text, maxlen)\n\n return data, tknzr\n"
] | [
[
"numpy.reshape",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MoritzTaylor/maml-rl-tf2 | [
"1c9237e7f18d688027a1db56b3cbb27c059a7ff1",
"1c9237e7f18d688027a1db56b3cbb27c059a7ff1"
] | [
"maml_rl/policies/policy.py",
"maml_rl/utils/tf_utils.py"
] | [
"from collections import OrderedDict\n\nimport tensorflow as tf\nimport tensorflow.keras as keras\n\n\ndef weight_init(module):\n if isinstance(module, keras.layers.Dense):\n keras.initializers.glorot_uniform(module.weight)\n module.bias.data.zero_()\n\n\nclass Policy(tf.keras.Model):\n def __init__(self, input_size, output_size, name=None):\n \"\"\"\n Base class for the policies.\n\n Args:\n input_size: Shape of the observations\n output_size: Shape of the resulting actions\n name: Name of the scope this policy created in\n \"\"\"\n super(Policy, self).__init__(name=name)\n self.input_size = input_size\n self.output_size = output_size\n self.all_params = OrderedDict()\n\n def update_params(self, grads, step_size=0.5):\n \"\"\"Apply one step of gradient descent on the loss function `loss`, with \n step-size `step_size`, and returns the updated parameters of the neural \n network.\n\n Arguments:\n grads: The gradients calculated w.r.t the parameters\n step_size: Learning rate\n \"\"\"\n updated_params = OrderedDict()\n params_with_name = [(x.name, x) for x in self.get_trainable_variables()]\n for (name, param), grad in zip(params_with_name, grads):\n updated_params[name] = tf.subtract(param, tf.multiply(step_size, grad))\n\n return updated_params\n\n def set_params_with_name(self, var_list):\n old_var_list = self.get_trainable_variables()\n for (name, var), old_var in zip(var_list.items(), old_var_list):\n old_var.assign(var)\n\n def set_params(self, var_list):\n old_var_list = self.get_trainable_variables()\n for var, old_var in zip(var_list, old_var_list):\n old_var.assign(var)\n\n def get_trainable_variables(self):\n return NotImplementedError\n\n def __call__(self, x, params=None):\n return self.forward(x, params)\n\n def forward(self, x, params=None):\n raise NotImplementedError\n",
"import numpy as np\nimport tensorflow as tf\n\nfrom maml_rl.policies import CategoricalMLPPolicy, NormalMLPPolicy\nfrom maml_rl.policies.distributions import CategoricalPd, DiagGaussianPd, CategoricalPdType, DiagGaussianPdType\n\n\"\"\"\nCode partially adapted from\nhttps://github.com/openai/baselines/blob/tf2/baselines/common/distributions.py\n\"\"\"\n\n\ndef weighted_mean(tensor, axis=None, weights=None):\n if weights is None:\n out = tf.reduce_mean(tensor)\n if axis is None:\n out = tf.reduce_sum(tensor * weights)\n out = out / tf.reduce_sum(weights)\n else:\n mean_dim = tf.reduce_sum(tensor * weights, axis=axis)\n mean_dim = mean_dim/(tf.reduce_sum(weights, axis=axis))\n out = tf.reduce_mean(mean_dim)\n return out\n\n\ndef weighted_normalize(tensor, axis=None, weights=None, epsilon=1e-8):\n mean = weighted_mean(tensor, axis=axis, weights=weights)\n out = tensor * (1 if weights is None else weights) - mean\n std = tf.math.sqrt(weighted_mean(out ** 2, axis=axis, weights=weights))\n out = out/(std + epsilon)\n return out\n\n\n# ================================================================\n# Flat vectors (from OpenAI Baselines)\n# ================================================================\n\ndef var_shape(x):\n out = x.get_shape().as_list()\n assert all(isinstance(a, int) for a in out), \\\n \"shape function assumes that shape is fully known\"\n return out\n\n\ndef numel(x):\n return intprod(var_shape(x))\n\n\ndef intprod(x):\n return int(np.prod(x))\n\n\ndef flatgrad(grads, var_list, clip_norm=None):\n if clip_norm is not None:\n grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]\n return tf.concat(axis=0, values=[\n tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])\n for (v, grad) in zip(var_list, grads)\n ])\n\n\nclass SetFromFlat(object):\n def __init__(self, var_list, dtype=tf.float32):\n assigns = []\n self.shapes = list(map(var_shape, var_list))\n self.total_size = np.sum([intprod(shape) for shape in self.shapes])\n self.var_list = var_list\n\n def __call__(self, theta):\n start = 0\n for (shape, v) in zip(self.shapes, self.var_list):\n size = intprod(shape)\n v.assign(tf.reshape(theta[start:start + size], shape))\n start += size\n\n\nclass GetFlat(object):\n def __init__(self, var_list):\n self.var_list = var_list\n\n def __call__(self):\n return tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in self.var_list]).numpy()\n\n\ndef flattenallbut0(x):\n return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])\n\n\n# ================================================================\n# Distributions\n# ================================================================\n\ndef make_pdtype(latent_shape, ac_space, init_scale=1.0):\n from gym import spaces\n if isinstance(ac_space, spaces.Box):\n assert len(ac_space.shape) == 1\n return DiagGaussianPdType(latent_shape, ac_space.shape[0], init_scale)\n elif isinstance(ac_space, spaces.Discrete):\n return CategoricalPdType(latent_shape, ac_space.n, init_scale)\n else:\n raise ValueError('No implementation for {}'.format(ac_space))\n\n\ndef detach_distribution(pi):\n if isinstance(pi, CategoricalPd):\n distribution = CategoricalPd(logits=tf.identity(pi.logits.numpy()))\n elif isinstance(pi, DiagGaussianPd):\n mean = tf.identity(pi.mean.numpy())\n logstd = tf.Variable(tf.identity(pi.logstd.numpy()), name='old_pi/logstd', trainable=False, dtype=tf.float32) # TODO: trainable=True?\n pdparam = tf.concat([mean, tf.zeros_like(mean) + logstd], axis=-1)\n distribution = DiagGaussianPd(pdparam)\n else:\n raise NotImplementedError('Only `Categorical` and `Normal` '\n 'policies are valid policies.')\n return distribution\n\n\ndef clone_policy(policy, params=None, with_names=False):\n\n if params is None:\n params = policy.get_trainable_variables()\n\n if isinstance(policy, CategoricalMLPPolicy):\n cloned_policy = CategoricalMLPPolicy(input_size=policy.input_size,\n output_size=policy.output_size,\n hidden_sizes=policy.hidden_sizes,\n nonlinearity=policy.nonlinearity)\n elif isinstance(policy, NormalMLPPolicy):\n cloned_policy = NormalMLPPolicy(input_size=policy.input_size,\n output_size=policy.output_size,\n hidden_sizes=policy.hidden_sizes,\n nonlinearity=policy.nonlinearity)\n else:\n raise NotImplementedError('Only `Categorical` and `Normal` '\n 'policies are valid policies at the moment.')\n\n #x = tf.zeros(shape=(1, cloned_policy.input_size))\n #cloned_policy(x)\n\n if with_names:\n cloned_policy.set_params_with_name(params)\n else:\n cloned_policy.set_params(params)\n\n return cloned_policy\n\n"
] | [
[
"tensorflow.multiply",
"tensorflow.keras.initializers.glorot_uniform"
],
[
"tensorflow.reduce_mean",
"tensorflow.reduce_sum",
"tensorflow.reshape",
"tensorflow.clip_by_norm",
"tensorflow.zeros_like",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
angelvalay/asr_commands | [
"281c40ee2ad4b270fa935f6eb91bc29ea4efde11"
] | [
"data/data_with_roise/get_voice.py"
] | [
"import numpy as np\nimport librosa.display\nimport librosa.output\nimport matplotlib.pyplot as plt\nimport sys\n\ndef get_file_audio(in_path, out_path):\n data, sr = librosa.load(in_path, sr=44100)\n _noise = np.array([])\n _data = np.array([])\n _flag_start = False\n for d in data:\n if _flag_start:\n _data = np.append(_data, [d])\n else:\n _noise = np.append(_data, [d])\n if d > 0.08 and not _flag_start:\n _flag_start = True\n _n_data = np.array([])\n _flag_start = False\n for item in reversed(_data):\n if _flag_start:\n _n_data = np.append(_n_data, [item])\n if item > 0.08 and not _flag_start:\n _flag_start = True\n _n_data = _n_data[::-1]\n librosa.output.write_wav(out_path, np.asfortranarray(_n_data), sr)\n\n\ndef main():\n # print command line arguments\n get_file_audio(sys.argv[1], sys.argv[2])\n\nif __name__ == \"__main__\":\n main()"
] | [
[
"numpy.asfortranarray",
"numpy.array",
"numpy.append"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dangeng/infiniteGANorama | [
"92c9cbe0638cf9fcdc05020759772e36aebf788c"
] | [
"ranker.py"
] | [
"'''\nScript to train the ranker\nShould add some sort of image pool someday...?\n'''\n\nimport time\nfrom options.train_options import TrainOptions\nfrom data import CreateDataLoader\nfrom models import create_model\nfrom util.visualizer import Visualizer\nfrom models import networks\n\nimport pdb\nimport torch\nfrom collections import OrderedDict\n\ndef load_chkpt(network, fname):\n chkpt = torch.load(fname)\n new_chkpt = OrderedDict()\n\n for k, v in chkpt.items():\n name = 'module.' + k # add `module`\n new_chkpt[name] = v\n\n network.load_state_dict(new_chkpt)\n\nif __name__ == '__main__':\n opt = TrainOptions().parse()\n data_loader = CreateDataLoader(opt)\n dataset = data_loader.load_data()\n dataset_size = len(data_loader)\n\n model = create_model(opt)\n model.setup(opt)\n visualizer = Visualizer(opt)\n total_steps = 0\n\n '''\n chkpt_D = torch.load('checkpoints/streetview_throttled/15_net_D.pth')\n chkpt_G = torch.load('checkpoints/streetview_throttled/15_net_G.pth')\n\n new_chkpt_D = OrderedDict()\n new_chkpt_G = OrderedDict()\n for k, v in chkpt_D.items():\n name = 'module.' + k # add `module`\n new_chkpt_D[name] = v\n for k, v in chkpt_G.items():\n name = 'module.' + k # add `module`\n new_chkpt_G[name] = v\n\n model.netD.load_state_dict(new_chkpt_D)\n model.netG.load_state_dict(new_chkpt_G)\n '''\n\n G_model_chkpts = ['checkpoints/street_decaythrottle45_halflr/1_net_G.pth',\n 'checkpoints/street_decaythrottle45_halflr/2_net_G.pth',\n 'checkpoints/street_decaythrottle45_halflr/3_net_G.pth',\n 'checkpoints/street_decaythrottle45_halflr/4_net_G.pth',\n 'checkpoints/street_decaythrottle45_halflr/5_net_G.pth',\n 'checkpoints/street_decaythrottle45_halflr/6_net_G.pth',\n 'checkpoints/street_decaythrottle45_halflr/7_net_G.pth',\n 'checkpoints/street_decaythrottle45_halflr/8_net_G.pth',\n 'checkpoints/street_decaythrottle45_halflr/9_net_G.pth',\n 'checkpoints/street_decaythrottle45_halflr/10_net_G.pth',\n 'checkpoints/street_decaythrottle45_halflr/11_net_G.pth',\n 'checkpoints/street_decaythrottle45_halflr/12_net_G.pth',\n 'checkpoints/street_decaythrottle45_halflr/13_net_G.pth',\n 'checkpoints/street_decaythrottle45_halflr/14_net_G.pth',\n 'checkpoints/street_decaythrottle45_halflr/15_net_G.pth',\n 'checkpoints/street_decaythrottle45_halflr/16_net_G.pth',\n 'checkpoints/street_decaythrottle45_halflr/17_net_G.pth',\n 'checkpoints/street_decaythrottle45_halflr/18_net_G.pth',\n 'checkpoints/street_decaythrottle45_halflr/19_net_G.pth',\n 'checkpoints/street_decaythrottle45_halflr/20_net_G.pth']\n\n G_networks = []\n for i in range(len(G_model_chkpts)):\n netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, opt.gpu_ids)\n load_chkpt(netG, G_model_chkpts[i])\n G_networks.append(netG)\n netGs = networks.RandomNetwork(G_networks)\n\n #load_chkpt(model.netG, 'checkpoints/streetview_throttled/15_net_G.pth')\n model.netG = netGs\n load_chkpt(model.netD, 'checkpoints/street_decaythrottle45_halflr/20_net_D.pth')\n\n for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):\n epoch_start_time = time.time()\n iter_data_time = time.time()\n epoch_iter = 0\n\n for i, data in enumerate(dataset):\n iter_start_time = time.time()\n if total_steps % opt.print_freq == 0:\n t_data = iter_start_time - iter_data_time\n visualizer.reset()\n total_steps += opt.batch_size\n epoch_iter += opt.batch_size\n model.set_input(data)\n\n # optimize only discriminator\n model.forward()\n model.set_requires_grad(model.netD, True)\n model.optimizer_D.zero_grad()\n model.backward_D()\n model.optimizer_D.step()\n model.set_requires_grad(model.netD, False)\n\n # need this to prevent logger from complaining\n # because it wants to log the G loss, even though\n # we aren't updating it\n model.backward_G()\n\n if total_steps % opt.display_freq == 0:\n save_result = total_steps % opt.update_html_freq == 0\n visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)\n\n if total_steps % opt.print_freq == 0:\n losses = model.get_current_losses()\n t = (time.time() - iter_start_time) / opt.batch_size\n visualizer.print_current_losses(epoch, epoch_iter, losses, t, t_data)\n if opt.display_id > 0:\n visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, opt, losses)\n\n if total_steps % opt.save_latest_freq == 0:\n print('saving the latest model (epoch %d, total_steps %d)' %\n (epoch, total_steps))\n model.save_networks('latest', saveG=False)\n\n iter_data_time = time.time()\n if epoch % opt.save_epoch_freq == 0:\n print('saving the model at the end of epoch %d, iters %d' %\n (epoch, total_steps))\n model.save_networks('latest', saveG=False)\n model.save_networks(epoch, saveG=False)\n\n print('End of epoch %d / %d \\t Time Taken: %d sec' %\n (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))\n model.update_learning_rate()\n"
] | [
[
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lkadin/pandera | [
"247d020ded7a3f160f05c4fc8bb9a626e8570846"
] | [
"tests/io/test_io.py"
] | [
"\"\"\"Unit tests for io module\"\"\"\n\nimport platform\nimport tempfile\nimport unittest.mock as mock\nfrom pathlib import Path\n\nimport pandas as pd\nimport pytest\nfrom packaging import version\n\nimport pandera as pa\nimport pandera.extensions as pa_ext\nimport pandera.typing as pat\n\ntry:\n from pandera import io\nexcept ImportError:\n HAS_IO = False\nelse:\n HAS_IO = True\n\n\ntry:\n import yaml\nexcept ImportError: # pragma: no cover\n PYYAML_VERSION = None\nelse:\n PYYAML_VERSION = version.parse(yaml.__version__) # type: ignore\n\n\nSKIP_YAML_TESTS = PYYAML_VERSION is None or PYYAML_VERSION.release < (5, 1, 0) # type: ignore\n\n\n# skip all tests in module if \"io\" depends aren't installed\npytestmark = pytest.mark.skipif(\n not HAS_IO, reason='needs \"io\" module dependencies'\n)\n\n\ndef _create_schema(index=\"single\"):\n\n if index == \"multi\":\n index = pa.MultiIndex(\n [\n pa.Index(pa.Int, name=\"int_index0\"),\n pa.Index(pa.Int, name=\"int_index1\"),\n pa.Index(pa.Int, name=\"int_index2\"),\n ]\n )\n elif index == \"single\":\n # make sure io modules can handle case when index name is None\n index = pa.Index(pa.Int, name=None)\n else:\n index = None\n\n return pa.DataFrameSchema(\n columns={\n \"int_column\": pa.Column(\n pa.Int,\n checks=[\n pa.Check.greater_than(0),\n pa.Check.less_than(10),\n pa.Check.in_range(0, 10),\n ],\n ),\n \"float_column\": pa.Column(\n pa.Float,\n checks=[\n pa.Check.greater_than(-10),\n pa.Check.less_than(20),\n pa.Check.in_range(-10, 20),\n ],\n ),\n \"str_column\": pa.Column(\n pa.String,\n checks=[\n pa.Check.isin([\"foo\", \"bar\", \"x\", \"xy\"]),\n pa.Check.str_length(1, 3),\n ],\n ),\n \"datetime_column\": pa.Column(\n pa.DateTime,\n checks=[\n pa.Check.greater_than(pd.Timestamp(\"20100101\")),\n pa.Check.less_than(pd.Timestamp(\"20200101\")),\n ],\n ),\n \"timedelta_column\": pa.Column(\n pa.Timedelta,\n checks=[\n pa.Check.greater_than(pd.Timedelta(1000, unit=\"ns\")),\n pa.Check.less_than(pd.Timedelta(10000, unit=\"ns\")),\n ],\n ),\n \"optional_props_column\": pa.Column(\n pa.String,\n nullable=True,\n allow_duplicates=True,\n coerce=True,\n required=False,\n regex=True,\n checks=[pa.Check.str_length(1, 3)],\n ),\n \"notype_column\": pa.Column(\n checks=pa.Check.isin([\"foo\", \"bar\", \"x\", \"xy\"]),\n ),\n },\n index=index,\n coerce=False,\n strict=True,\n )\n\n\nYAML_SCHEMA = f\"\"\"\nschema_type: dataframe\nversion: {pa.__version__}\ncolumns:\n int_column:\n pandas_dtype: int\n nullable: false\n checks:\n greater_than: 0\n less_than: 10\n in_range:\n min_value: 0\n max_value: 10\n allow_duplicates: true\n coerce: false\n required: true\n regex: false\n float_column:\n pandas_dtype: float\n nullable: false\n checks:\n greater_than: -10\n less_than: 20\n in_range:\n min_value: -10\n max_value: 20\n allow_duplicates: true\n coerce: false\n required: true\n regex: false\n str_column:\n pandas_dtype: str\n nullable: false\n checks:\n isin:\n - foo\n - bar\n - x\n - xy\n str_length:\n min_value: 1\n max_value: 3\n allow_duplicates: true\n coerce: false\n required: true\n regex: false\n datetime_column:\n pandas_dtype: datetime64[ns]\n nullable: false\n checks:\n greater_than: '2010-01-01 00:00:00'\n less_than: '2020-01-01 00:00:00'\n allow_duplicates: true\n coerce: false\n required: true\n regex: false\n timedelta_column:\n pandas_dtype: timedelta64[ns]\n nullable: false\n checks:\n greater_than: 1000\n less_than: 10000\n allow_duplicates: true\n coerce: false\n required: true\n regex: false\n optional_props_column:\n pandas_dtype: str\n nullable: true\n checks:\n str_length:\n min_value: 1\n max_value: 3\n allow_duplicates: true\n coerce: true\n required: false\n regex: true\n notype_column:\n pandas_dtype: null\n nullable: false\n checks:\n isin:\n - foo\n - bar\n - x\n - xy\n allow_duplicates: true\n coerce: false\n required: true\n regex: false\nchecks: null\nindex:\n- pandas_dtype: int\n nullable: false\n checks: null\n name: null\n coerce: false\ncoerce: false\nstrict: true\n\"\"\"\n\n\ndef _create_schema_null_index():\n\n return pa.DataFrameSchema(\n columns={\n \"float_column\": pa.Column(\n pa.Float,\n checks=[\n pa.Check.greater_than(-10),\n pa.Check.less_than(20),\n pa.Check.in_range(-10, 20),\n ],\n ),\n \"str_column\": pa.Column(\n pa.String,\n checks=[\n pa.Check.isin([\"foo\", \"bar\", \"x\", \"xy\"]),\n pa.Check.str_length(1, 3),\n ],\n ),\n },\n index=None,\n )\n\n\nYAML_SCHEMA_NULL_INDEX = f\"\"\"\nschema_type: dataframe\nversion: {pa.__version__}\ncolumns:\n float_column:\n pandas_dtype: float\n nullable: false\n checks:\n greater_than: -10\n less_than: 20\n in_range:\n min_value: -10\n max_value: 20\n str_column:\n pandas_dtype: str\n nullable: false\n checks:\n isin:\n - foo\n - bar\n - x\n - xy\n str_length:\n min_value: 1\n max_value: 3\nindex: null\nchecks: null\ncoerce: false\nstrict: false\n\"\"\"\n\n\ndef _create_schema_python_types():\n return pa.DataFrameSchema(\n {\n \"int_column\": pa.Column(int),\n \"float_column\": pa.Column(float),\n \"str_column\": pa.Column(str),\n \"object_column\": pa.Column(object),\n }\n )\n\n\nYAML_SCHEMA_PYTHON_TYPES = f\"\"\"\nschema_type: dataframe\nversion: {pa.__version__}\ncolumns:\n int_column:\n pandas_dtype: int64\n float_column:\n pandas_dtype: float64\n str_column:\n pandas_dtype: str\n object_column:\n pandas_dtype: object\nchecks: null\nindex: null\ncoerce: false\nstrict: false\n\"\"\"\n\n\nYAML_SCHEMA_MISSING_GLOBAL_CHECK = f\"\"\"\nschema_type: dataframe\nversion: {pa.__version__}\ncolumns:\n int_column:\n pandas_dtype: int64\n float_column:\n pandas_dtype: float64\n str_column:\n pandas_dtype: str\n object_column:\n pandas_dtype: object\nchecks:\n unregistered_check:\n stat1: missing_str_stat\n stat2: 11\nindex: null\ncoerce: false\nstrict: false\n\"\"\"\n\n\nYAML_SCHEMA_MISSING_COLUMN_CHECK = f\"\"\"\nschema_type: dataframe\nversion: {pa.__version__}\ncolumns:\n int_column:\n pandas_dtype: int64\n checks:\n unregistered_check:\n stat1: missing_str_stat\n stat2: 11\n float_column:\n pandas_dtype: float64\n str_column:\n pandas_dtype: str\n object_column:\n pandas_dtype: object\nindex: null\ncoerce: false\nstrict: false\n\"\"\"\n\n\[email protected](\n SKIP_YAML_TESTS,\n reason=\"pyyaml >= 5.1.0 required\",\n)\ndef test_inferred_schema_io():\n \"\"\"Test that inferred schema can be written to yaml.\"\"\"\n df = pd.DataFrame(\n {\n \"column1\": [5, 10, 20],\n \"column2\": [5.0, 1.0, 3.0],\n \"column3\": [\"a\", \"b\", \"c\"],\n }\n )\n schema = pa.infer_schema(df)\n schema_yaml_str = schema.to_yaml()\n schema_from_yaml = io.from_yaml(schema_yaml_str)\n assert schema == schema_from_yaml\n\n\[email protected](\n SKIP_YAML_TESTS,\n reason=\"pyyaml >= 5.1.0 required\",\n)\ndef test_to_yaml():\n \"\"\"Test that to_yaml writes to yaml string.\"\"\"\n schema = _create_schema()\n yaml_str = io.to_yaml(schema)\n assert yaml_str.strip() == YAML_SCHEMA.strip()\n\n yaml_str_schema_method = schema.to_yaml()\n assert yaml_str_schema_method.strip() == YAML_SCHEMA.strip()\n\n\[email protected](\n SKIP_YAML_TESTS,\n reason=\"pyyaml >= 5.1.0 required\",\n)\[email protected](\n \"yaml_str, schema_creator\",\n [\n [YAML_SCHEMA, _create_schema],\n [YAML_SCHEMA_NULL_INDEX, _create_schema_null_index],\n [YAML_SCHEMA_PYTHON_TYPES, _create_schema_python_types],\n ],\n)\ndef test_from_yaml(yaml_str, schema_creator):\n \"\"\"Test that from_yaml reads yaml string.\"\"\"\n schema_from_yaml = io.from_yaml(yaml_str)\n expected_schema = schema_creator()\n assert schema_from_yaml == expected_schema\n assert expected_schema == schema_from_yaml\n\n\ndef test_from_yaml_unregistered_checks():\n \"\"\"Test that from_yaml raises an exception when deserializing unregistered checks.\"\"\"\n\n with pytest.raises(AttributeError, match=\".*custom checks.*\"):\n io.from_yaml(YAML_SCHEMA_MISSING_COLUMN_CHECK)\n\n with pytest.raises(AttributeError, match=\".*custom checks.*\"):\n io.from_yaml(YAML_SCHEMA_MISSING_GLOBAL_CHECK)\n\n\ndef test_from_yaml_load_required_fields():\n \"\"\"Test that dataframe schemas do not require any field.\"\"\"\n io.from_yaml(\"\")\n\n with pytest.raises(\n pa.errors.SchemaDefinitionError, match=\".*must be a mapping.*\"\n ):\n io.from_yaml(\n \"\"\"\n - value\n \"\"\"\n )\n\n\ndef test_io_yaml_file_obj():\n \"\"\"Test read and write operation on file object.\"\"\"\n schema = _create_schema()\n\n # pass in a file object\n with tempfile.NamedTemporaryFile(\"w+\") as f:\n output = schema.to_yaml(f)\n assert output is None\n f.seek(0)\n schema_from_yaml = pa.DataFrameSchema.from_yaml(f)\n assert schema_from_yaml == schema\n\n\[email protected](\n platform.system() == \"Windows\",\n reason=\"skipping due to issues with opening file names for temp files.\",\n)\[email protected](\"index\", [\"single\", \"multi\", None])\ndef test_io_yaml(index):\n \"\"\"Test read and write operation on file names.\"\"\"\n schema = _create_schema(index)\n\n # pass in a file name\n with tempfile.NamedTemporaryFile(\"w+\") as f:\n output = io.to_yaml(schema, f.name)\n assert output is None\n schema_from_yaml = io.from_yaml(f.name)\n assert schema_from_yaml == schema\n\n # pass in a Path object\n with tempfile.NamedTemporaryFile(\"w+\") as f:\n output = schema.to_yaml(Path(f.name))\n assert output is None\n schema_from_yaml = pa.DataFrameSchema.from_yaml(Path(f.name))\n assert schema_from_yaml == schema\n\n\[email protected](\n platform.system() == \"Windows\",\n reason=\"skipping due to issues with opening file names for temp files.\",\n)\[email protected](\"index\", [\"single\", \"multi\", None])\ndef test_to_script(index):\n \"\"\"Test writing DataFrameSchema to a script.\"\"\"\n schema_to_write = _create_schema(index)\n\n for script in [io.to_script(schema_to_write), schema_to_write.to_script()]:\n\n local_dict = {}\n # pylint: disable=exec-used\n exec(script, globals(), local_dict)\n\n schema = local_dict[\"schema\"]\n\n # executing script should result in a variable `schema`\n assert schema == schema_to_write\n\n with tempfile.NamedTemporaryFile(\"w+\") as f:\n schema_to_write.to_script(Path(f.name))\n # pylint: disable=exec-used\n exec(f.read(), globals(), local_dict)\n schema = local_dict[\"schema\"]\n assert schema == schema_to_write\n\n\ndef test_to_script_lambda_check():\n \"\"\"Test writing DataFrameSchema to a script with lambda check.\"\"\"\n schema1 = pa.DataFrameSchema(\n {\n \"a\": pa.Column(\n pa.Int,\n checks=pa.Check(lambda s: s.mean() > 5, element_wise=False),\n ),\n }\n )\n\n with pytest.warns(UserWarning):\n pa.io.to_script(schema1)\n\n schema2 = pa.DataFrameSchema(\n {\n \"a\": pa.Column(\n pa.Int,\n ),\n },\n checks=pa.Check(lambda s: s.mean() > 5, element_wise=False),\n )\n\n with pytest.warns(UserWarning, match=\".*registered checks.*\"):\n pa.io.to_script(schema2)\n\n\ndef test_to_yaml_lambda_check():\n \"\"\"Test writing DataFrameSchema to a yaml with lambda check.\"\"\"\n schema = pa.DataFrameSchema(\n {\n \"a\": pa.Column(\n pa.Int,\n checks=pa.Check(lambda s: s.mean() > 5, element_wise=False),\n ),\n }\n )\n\n with pytest.warns(UserWarning):\n pa.io.to_yaml(schema)\n\n\ndef test_format_checks_warning():\n \"\"\"Test that unregistered checks raise a warning when formatting checks.\"\"\"\n with pytest.warns(UserWarning):\n io._format_checks({\"my_check\": None})\n\n\[email protected](\"pandera.Check.REGISTERED_CUSTOM_CHECKS\", new_callable=dict)\ndef test_to_yaml_registered_dataframe_check(_):\n \"\"\"\n Tests that writing DataFrameSchema with a registered dataframe check works.\n \"\"\"\n ncols_gt_called = False\n\n @pa_ext.register_check_method(statistics=[\"column_count\"])\n def ncols_gt(pandas_obj: pd.DataFrame, column_count: int) -> bool:\n \"\"\"test registered dataframe check\"\"\"\n\n # pylint: disable=unused-variable\n nonlocal ncols_gt_called\n ncols_gt_called = True\n assert isinstance(column_count, int), \"column_count must be integral\"\n assert isinstance(\n pandas_obj, pd.DataFrame\n ), \"ncols_gt should only be applied to DataFrame\"\n return len(pandas_obj.columns) > column_count\n\n assert (\n len(pa.Check.REGISTERED_CUSTOM_CHECKS) == 1\n ), \"custom check is registered\"\n\n schema = pa.DataFrameSchema(\n {\n \"a\": pa.Column(\n pa.Int,\n ),\n },\n checks=[pa.Check.ncols_gt(column_count=5)],\n )\n\n serialized = pa.io.to_yaml(schema)\n loaded = pa.io.from_yaml(serialized)\n\n assert len(loaded.checks) == 1, \"global check was stripped\"\n\n with pytest.raises(pa.errors.SchemaError):\n schema.validate(pd.DataFrame(data={\"a\": [1]}))\n\n assert ncols_gt_called, \"did not call ncols_gt\"\n\n\ndef test_to_yaml_custom_dataframe_check():\n \"\"\"Tests that writing DataFrameSchema with an unregistered check raises.\"\"\"\n\n schema = pa.DataFrameSchema(\n {\n \"a\": pa.Column(\n pa.Int,\n ),\n },\n checks=[pa.Check(lambda obj: len(obj.index) > 1)],\n )\n\n with pytest.warns(UserWarning, match=\".*registered checks.*\"):\n pa.io.to_yaml(schema)\n\n # the unregistered column check case is tested in\n # `test_to_yaml_lambda_check`\n\n\ndef test_to_yaml_bugfix_419():\n \"\"\"Ensure that GH#419 is fixed\"\"\"\n # pylint: disable=no-self-use\n\n class CheckedSchemaModel(pa.SchemaModel):\n \"\"\"Schema with a global check\"\"\"\n\n a: pat.Series[pat.Int64]\n b: pat.Series[pat.Int64]\n\n @pa.dataframe_check()\n def unregistered_check(self, _):\n \"\"\"sample unregistered check\"\"\"\n ...\n\n with pytest.warns(UserWarning, match=\".*registered checks.*\"):\n CheckedSchemaModel.to_yaml()\n"
] | [
[
"pandas.Timedelta",
"pandas.Timestamp",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
lidegao899/Evolutionary-Algorithm | [
"2b36038ecfe6d7bc848eb8ee72d66f9b0f5ff265"
] | [
"tutorial-contents/DimAutoLayout/Lab.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\n\nN_DIMS = 5 # DNA size\nDNA_SIZE = 10 # DNA (real number)\nDNA_BOUND = [0, 20] # solution upper and lower bounds\nN_GENERATIONS = 200\nPOP_SIZE = 100 # population size\nN_KID = 50 # n kids per generation\n\nTargePos=[10,10]\n\ndef MakePnt():\n return np.random.rand(N_DIMS, 2)\n\ndef GetFitness(lens):\n arr=[]\n for len in lens:\n arr.append(1/(len-5))\n return arr\n\n# 获取所有样本的长度\ndef GetLen(xys):\n # 样本所有点到(0,0)的距离\n sum=[]\n for xy in xys:\n xl,yl = xy.reshape((2, 5))\n len=np.sum(np.sqrt((xl-TargePos[0])**2+(yl-TargePos[1])**2))\n sum.append(len)\n return sum\n\ndef get_fitness(pred):\n return pred.flatten()\n\n# 生小孩\ndef make_kid(pop, n_kid):\n # generate empty kid holder\n kids = {'DNA': np.empty((n_kid, DNA_SIZE))}\n kids['mut_strength'] = np.empty_like(kids['DNA'])\n for kv, ks in zip(kids['DNA'], kids['mut_strength']):\n # crossover (roughly half p1 and half p2)\n # 选父母\n p1, p2 = np.random.choice(np.arange(POP_SIZE), size=2, replace=False)\n # 交叉点\n cp = np.random.randint(0, 2, DNA_SIZE, dtype=np.bool) # crossover points\n # 分别选择父母的部分DNA\n kv[cp] = pop['DNA'][p1, cp]\n kv[~cp] = pop['DNA'][p2, ~cp]\n # 合并到一个样本中\n ks[cp] = pop['mut_strength'][p1, cp]\n ks[~cp] = pop['mut_strength'][p2, ~cp]\n\n # 正态分布标准差\n # mutate (change DNA based on normal distribution)\n ks[:] = np.maximum(ks + (np.random.rand(*ks.shape)-0.5), 0.) # must > 0\n # 正态分布\n kv += ks * np.random.randn(*kv.shape)\n # 限制范围\n kv[:] = np.clip(kv, *DNA_BOUND) # clip the mutated value\n return kids\n\n# 移除不好样本\ndef kill_bad(pop, kids):\n # 新老合并\n for key in ['DNA', 'mut_strength']:\n pop[key] = np.vstack((pop[key], kids[key]))\n\n # 获取所有适应度\n lens=GetLen(pop['DNA'])\n fitness = GetFitness(lens) # calculate global fitness\n idx = np.arange(pop['DNA'].shape[0])\n # 递增排列,取后POP_SIZE位\n good_idx = idx[np.argsort(fitness)][-POP_SIZE:] # selected by fitness ranking (not value)\n for key in ['DNA', 'mut_strength']:\n pop[key] = pop[key][good_idx]\n return pop\n\nclass SmartDim(object):\n def __init__(self):\n self.pop = dict(DNA=10 * np.random.rand(1, DNA_SIZE).repeat(POP_SIZE, axis=0), # initialize the pop DNA values\n mut_strength=np.random.rand(POP_SIZE, DNA_SIZE)) # initialize the pop mutation strength values\n\n def Myplotting(self):\n plt.cla()\n\n # plt.scatter(self.city_pos[:, 0].T, self.city_pos[:, 1].T, s=100, c='k')\n # plt.scatter(self.pop.)\n plt.xlim((-0.1, 1.1))\n plt.ylim((-0.1, 1.1))\n plt.pause(0.01)\n\nsd =SmartDim()\nprint(GetLen(sd.pop['DNA']))\n\nfor i in range(N_GENERATIONS):\n\n\n # print(xl)\n # print(yl)\n # print(GetLen(sd.pop['DNA'][i]))\n plt.pause(0.2)\n\n kids = make_kid(sd.pop, N_KID)\n xl,yl = sd.pop['DNA'][1].reshape((2, 5))\n sd.pop = kill_bad(sd.pop,kids)\n if 'sca' in globals(): sca.remove()\n sca = plt.scatter(xl, yl, s=200, lw=0, c='red',alpha=0.5);\n\n# print(sd.pop['DNA'])\nplt.ioff(); plt.show()"
] | [
[
"numpy.sqrt",
"matplotlib.pyplot.scatter",
"numpy.clip",
"numpy.arange",
"numpy.empty_like",
"numpy.vstack",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.xlim",
"numpy.random.randn",
"numpy.random.rand",
"numpy.argsort",
"matplotlib.pyplot.show",
"matplotlib.pyplot.pause",
"numpy.empty",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yanxp/MetaR-CNN | [
"5a4487e78e4014e18d32e59e6edc4e3bf0f10e16"
] | [
"test_metarcnn.py"
] | [
"# --------------------------------------------------------\n# Pytorch Meta R-CNN\n# Written by Anny Xu, Xiaopeng Yan, based on the code from Jianwei Yang\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nimport os\nimport sys\nimport numpy as np\nimport argparse\nimport pprint\nimport pdb\nimport time\nimport torch\nimport cv2\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport pickle\nfrom roi_data_layer.roidb import combined_roidb\nfrom roi_data_layer.roibatchLoader import roibatchLoader\nfrom model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir\nfrom model.rpn.bbox_transform import clip_boxes\nfrom model.nms.nms_wrapper import nms\nfrom model.rpn.bbox_transform import bbox_transform_inv\nfrom model.utils.net_utils import save_net, load_net, vis_detections, vis_detections_label_only\n\nfrom matplotlib import pyplot as plt\nimport torch.utils.data as Data\nfrom model.utils.net_utils import weights_normal_init, save_net, load_net, \\\n adjust_learning_rate, save_checkpoint, clip_gradient\n#from tsne import plot_embedding\nimport collections\n\nimport pickle\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Test a Meta R-CNN network')\n # Define Model and data\n parser.add_argument('--dataset', dest='dataset',\n help='training dataset:coco2017,coco,pascal_07_12',\n default='pascal_07_12', type=str)\n parser.add_argument('--net', dest='net',\n help='metarcnn',\n default='metarcnn', type=str)\n # Define testing parameters\n parser.add_argument('--cuda', dest='cuda',\n default=True, type=bool,\n help='whether use CUDA')\n parser.add_argument('--cag', dest='class_agnostic',\n default=False, type=bool,\n help='whether perform class_agnostic bbox regression')\n # Define meta parameters\n parser.add_argument('--meta_test', dest='meta_test', default=False, type=bool,\n help='whether perform meta testing')\n parser.add_argument('--meta_loss', dest='meta_loss', default=False, type=bool,\n help='whether perform adding meta loss')\n parser.add_argument('--shots', dest='shots',\n help='the number of meta input',\n default=1, type=int)\n parser.add_argument('--meta_type', dest='meta_type', default=1, type=int,\n help='choose which sets of metaclass')\n parser.add_argument('--phase', dest='phase',\n help='the phase of training process',\n default=1, type=int)\n # resume trained model\n parser.add_argument('--load_dir', dest='load_dir',\n help='directory to load models', default=\"exps\",\n type=str)\n parser.add_argument('--checksession', dest='checksession',\n help='checksession to load model',\n default=3256, type=int)\n parser.add_argument('--checkepoch', dest='checkepoch',\n help='checkepoch to load network',\n default=12, type=int)\n parser.add_argument('--checkpoint', dest='checkpoint',\n help='checkpoint to load network',\n default=21985, type=int)\n # Others\n parser.add_argument('--bs', dest='batch_size',\n help='batch_size',\n default=1, type=int)\n parser.add_argument('--vis', dest='vis',\n help='visualization mode',\n action='store_true')\n parser.add_argument('--save', dest='save_dir',\n help='directory to save logs', default='models',\n type=str)\n args = parser.parse_args()\n return args\n\n\nlr = cfg.TRAIN.LEARNING_RATE\nmomentum = cfg.TRAIN.MOMENTUM\nweight_decay = cfg.TRAIN.WEIGHT_DECAY\n\nif __name__ == '__main__':\n args = parse_args()\n\n if args.net == 'metarcnn':\n from model.faster_rcnn.resnet import resnet\n print('Called with args:')\n print(args)\n if torch.cuda.is_available() and not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n np.random.seed(cfg.RNG_SEED)\n if args.dataset == \"coco\":\n args.imdb_name = \"coco_2014_train+coco_2014_valminusminival\"\n args.imdbval_name = \"coco_2014_minival\"\n args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']\n elif args.dataset == \"pascal_voc_0712\":\n args.imdbval_name = \"voc_2007_test\"\n args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']\n # the number of sets of metaclass\n cfg.TRAIN.META_TYPE = args.meta_type\n args.cfg_file = \"cfgs/res101_ms.yml\"\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n\n print('Using config:')\n pprint.pprint(cfg)\n\n cfg.TRAIN.USE_FLIPPED = False\n imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)\n imdb.competition_mode(on=True)\n\n print('{:d} roidb entries'.format(len(roidb)))\n\n input_dir = args.load_dir\n if not os.path.exists(input_dir):\n raise Exception('There is no input directory for loading network from ' + input_dir)\n load_name = os.path.join(input_dir,\n '{}_{}_{}_{}_{}.pth'.format(args.dataset, str(args.net), args.checksession,\n args.checkepoch, args.checkpoint))\n # initilize the network here.\n if args.net == 'metarcnn':\n fasterRCNN = resnet(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic, meta_train=False,\n meta_test=args.meta_test, meta_loss=args.meta_loss)\n else:\n print('No module define')\n\n load_name = os.path.join(input_dir,\n '{}_{}_{}_{}_{}.pth'.format(args.dataset, str(args.net), args.checksession,\n args.checkepoch, args.checkpoint))\n fasterRCNN.create_architecture()\n print(\"load checkpoint %s\" % (load_name))\n checkpoint = torch.load(load_name)\n fasterRCNN.load_state_dict(checkpoint['model'])\n if 'pooling_mode' in checkpoint.keys():\n cfg.POOLING_MODE = checkpoint['pooling_mode']\n print('load model successfully!')\n if args.cuda:\n cfg.CUDA = True\n\n if args.cuda:\n fasterRCNN.cuda()\n\n start = time.time()\n max_per_image = 100\n\n vis = args.vis\n if vis:\n thresh = 0.5\n else:\n thresh = 0.0001\n\n fasterRCNN.eval()\n empty_array = np.transpose(np.array([[], [], [], [], []]), (1, 0))\n\n # if meta test\n mean_class_attentions = None\n if args.meta_test:\n print('loading mean class attentions!')\n mean_class_attentions = pickle.load(open(os.path.join('attentions',str(args.phase) + '_shots_' + str(args.shots) + '_mean_class_attentions.pkl'), 'rb'))\n\n save_name = '{}_{}'.format(args.save_dir, args.checkepoch)\n num_images = len(imdb.image_index)\n all_boxes = [[[] for _ in range(num_images)] for _ in range(imdb.num_classes)]\n\n output_dir = get_output_dir(imdb, save_name)\n dataset = roibatchLoader(roidb, ratio_list, ratio_index, args.batch_size,\n imdb.num_classes, training=False, normalize=False)\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size,\n shuffle=False, num_workers=0, pin_memory=True)\n\n data_iter = iter(dataloader)\n\n _t = {'im_detect': time.time(), 'misc': time.time()}\n det_file = os.path.join(output_dir, 'detections.pkl')\n\n for i in range(num_images):\n data = next(data_iter)\n im_data_list = []\n im_info_list = []\n gt_boxes_list = []\n num_boxes_list = []\n # initilize the tensor holder here.\n im_data = torch.FloatTensor(1)\n im_info = torch.FloatTensor(1)\n num_boxes = torch.LongTensor(1)\n gt_boxes = torch.FloatTensor(1)\n # ship to cuda\n if args.cuda:\n im_data = im_data.cuda()\n im_info = im_info.cuda()\n num_boxes = num_boxes.cuda()\n gt_boxes = gt_boxes.cuda()\n # make variable\n im_data = Variable(im_data, volatile=True)\n im_info = Variable(im_info, volatile=True)\n num_boxes = Variable(num_boxes, volatile=True)\n gt_boxes = Variable(gt_boxes, volatile=True)\n im_data.data.resize_(data[0].size()).copy_(data[0])\n im_info.data.resize_(data[1].size()).copy_(data[1])\n gt_boxes.data.resize_(data[2].size()).copy_(data[2])\n num_boxes.data.resize_(data[3].size()).copy_(data[3])\n\n im_data_list.append(im_data)\n im_info_list.append(im_info)\n gt_boxes_list.append(gt_boxes)\n num_boxes_list.append(num_boxes)\n det_tic = time.time()\n rois, \\\n rpn_loss_cls, rpn_loss_box, \\\n RCNN_loss_cls, RCNN_loss_bbox, \\\n rois_label, cls_prob_list, bbox_pred_list, _ = fasterRCNN(im_data_list, im_info_list, gt_boxes_list,\n num_boxes_list,mean_class_attentions=mean_class_attentions)\n if args.meta_test:\n for clsidx in range(len(cls_prob_list)):\n cls_prob = cls_prob_list[clsidx]\n bbox_pred = bbox_pred_list[clsidx]\n scores = cls_prob.data\n boxes = rois.data[:, :, 1:5]\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = bbox_pred.data\n if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\n # Optionally normalize targets by a precomputed mean and stdev\n if args.class_agnostic:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(\n cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n box_deltas = box_deltas.view(1, -1, 4)\n else:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(\n cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))\n\n pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)\n pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)\n\n else:\n # Simply repeat the boxes, once for each class\n pred_boxes = np.tile(boxes, (1, scores.shape[1]))\n\n pred_boxes /= data[1][0][2]\n scores = scores.squeeze()\n pred_boxes = pred_boxes.squeeze()\n if clsidx == 0:\n allscores = scores[:, clsidx].unsqueeze(1)\n allpredboxes = pred_boxes[:, (clsidx) * 4:(clsidx + 1) * 4]\n allscores = torch.cat([allscores, scores[:, (clsidx + 1)].unsqueeze(1)], dim=1)\n allpredboxes = torch.cat([allpredboxes, pred_boxes[:, (clsidx + 1) * 4:(clsidx + 2) * 4]], dim=1)\n else:\n allscores = torch.cat([allscores, scores[:, (clsidx + 1)].unsqueeze(1)], dim=1)\n allpredboxes = torch.cat([allpredboxes, pred_boxes[:, (clsidx + 1) * 4:(clsidx + 2) * 4]], dim=1)\n scores = allscores\n pred_boxes = allpredboxes\n else:\n scores = cls_prob_list.data\n boxes = rois.data[:, :, 1:5]\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = bbox_pred_list.data\n if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\n # Optionally normalize targets by a precomputed mean and stdev\n if args.class_agnostic:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n box_deltas = box_deltas.view(1, -1, 4)\n else:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))\n\n pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)\n pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)\n else:\n # Simply repeat the boxes, once for each class\n pred_boxes = np.tile(boxes, (1, scores.shape[1]))\n pred_boxes /= data[1][0][2]\n scores = scores.squeeze()\n pred_boxes = pred_boxes.squeeze()\n det_toc = time.time()\n detect_time = det_toc - det_tic\n misc_tic = time.time()\n if vis:\n im = cv2.imread(imdb.image_path_at(int(data[4])))\n im2show = np.copy(im)\n for j in range(1, imdb.num_classes):\n inds = torch.nonzero(scores[:, j] > thresh).view(-1)\n # if there is det\n if inds.numel() > 0:\n cls_scores = scores[:, j][inds]\n _, order = torch.sort(cls_scores, 0, True)\n if args.class_agnostic:\n cls_boxes = pred_boxes[inds, :]\n else:\n cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]\n cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)\n cls_dets = cls_dets[order]\n keep = nms(cls_dets, cfg.TEST.NMS)\n cls_dets = cls_dets[keep.view(-1).long()]\n if vis:\n im2show = vis_detections_label_only(im2show, imdb.classes[j], cls_dets.cpu().numpy(), 0.3)\n all_boxes[j][i] = cls_dets.cpu().numpy()\n else:\n all_boxes[j][i] = empty_array\n\n # Limit to max_per_image detections *over all classes*\n if max_per_image > 0:\n image_scores = np.hstack([all_boxes[j][i][:, -1] for j in range(1, imdb.num_classes)])\n if len(image_scores) > max_per_image:\n image_thresh = np.sort(image_scores)[-max_per_image]\n for j in range(1, imdb.num_classes):\n keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]\n all_boxes[j][i] = all_boxes[j][i][keep, :]\n\n misc_toc = time.time()\n nms_time = misc_toc - misc_tic\n\n sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \\n'.\n format(i + 1, num_images, detect_time, nms_time))\n sys.stdout.flush()\n\n if vis:\n im_dir = 'vis/' + str(data[4].numpy()[0]) + '_metarcnn.png'\n cv2.imwrite(im_dir, im2show)\n plt.imshow(im2show[:, :, ::-1])\n plt.show()\n\n with open(det_file, 'wb') as f:\n pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)\n\n print('Evaluating detections')\n ############################### changed by Anny Xu 2019/1/29 begin################################\n imdb.evaluate_detections(all_boxes, output_dir, **vars(args))\n ############################## end ###########################################################\n end = time.time()\n print(\"test time: %0.4fs\" % (end - start))\n"
] | [
[
"torch.LongTensor",
"matplotlib.pyplot.imshow",
"numpy.random.seed",
"torch.load",
"torch.cat",
"torch.utils.data.DataLoader",
"numpy.tile",
"numpy.sort",
"numpy.copy",
"torch.FloatTensor",
"torch.sort",
"torch.cuda.is_available",
"torch.nonzero",
"numpy.array",
"numpy.where",
"matplotlib.pyplot.show",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sevagh/headbang.py | [
"50b76d4a85a857e879513c2bdf86be7347da332e"
] | [
"headbang/util.py"
] | [
"from madmom.io.audio import load_audio_file\nimport numpy\nimport librosa\n\n\ndef load_wav(wav_in, stereo=False):\n x, fs = load_audio_file(wav_in, sample_rate=44100)\n\n if not stereo:\n # stereo to mono if necessary\n if len(x.shape) > 1 and x.shape[1] == 2:\n x = x.sum(axis=1) / 2\n\n # cast to float\n x = x.astype(numpy.single)\n\n # normalize between -1.0 and 1.0\n x /= numpy.max(numpy.abs(x))\n\n return x\n\n\ndef overlay_clicks(x, beats):\n clicks = librosa.clicks(beats, sr=44100, length=len(x))\n\n if len(x.shape) > 1 and x.shape[1] == 2:\n clicks = numpy.column_stack((clicks, clicks)) # convert to stereo\n\n return (x + clicks).astype(numpy.single)\n\n\ndef find_closest(A, target):\n idx = A.searchsorted(target)\n idx = numpy.clip(idx, 1, len(A) - 1)\n left = A[idx - 1]\n right = A[idx]\n idx -= target - left < right - target\n return idx\n"
] | [
[
"numpy.abs",
"numpy.column_stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
UBC-MDS/DMC_Portuguese | [
"6a301c29a558534c455549af8abd52362082c6e9"
] | [
"src/ml_analysis.py"
] | [
"# author: Karlos Muradyan\n# date: 2020-01-24\n\n'''This script does ML analysis by training multiple models, doing hyperparameter\ntuning and reporting the results in a csv file.\n\nUsage: \n ml_analysis.py --train_csv=<train_csv> --test_csv=<test_csv> --output_csv=<output_csv> --output_png=<output_png>\n ml_analysis.py --test_csv=<test_csv> --output_csv=<output_csv>\n ml_analysis.py --train_csv=<train_csv> --output_csv=<output_csv>\n ml_analysis.py --train_csv=<train_csv> --test_csv=<test_csv>\n ml_analysis.py --train_csv=<train_csv>\n ml_analysis.py --test_csv=<test_csv>\n ml_analysis.py --output_csv=<output_csv>\n ml_analysis.py\n\nOptions:\n--train_csv=<train_csv> csv path for training [Default: ./data/clean/bank_train.csv].\n--test_csv=<test_csv> csv path for testing [Default: ./data/clean/bank_test.csv].\n--output_csv=<output_csv> csv path for outputting the result of training and hyperparameter tuning.\n [Default: ./reports/training_report.csv]\n--output_png=<output_png> png path for outputting the result of figure containing all trainings\n [Default: ./reports/training_report.png]\n'''\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix, f1_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\nfrom tqdm import tqdm\nimport lightgbm as lgb\nfrom docopt import docopt\nfrom itertools import accumulate\nimport os\nimport altair as alt\nimport selenium\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\n\nos.environ[\"PYTHONWARNINGS\"] = \"ignore::UserWarning\"\nnp.random.RandomState(42)\nnp.random.seed(2020)\n\ndef check_filepath(save_dir):\n \"\"\"\n Checks if all subfolders of save_dir exist or not. If not, creates\n\n Parameters\n ----------\n save_dir: str containing the path where the files hould be saved\n\n Returns:\n None\n\n Usage\n -----\n check_filepath('./unknown_dir')\n \"\"\"\n for subdir in accumulate(save_dir.split('/'), lambda x, y: os.path.join(x, y)):\n if not os.path.exists(subdir):\n os.mkdir(subdir)\n print(f\"Directory {subdir} Created \")\n\n\ndef train_lgb(X_train, y_train, X_test, y_test, epochs = 1000, early_stopping = 100):\n \"\"\"\n Function that handles the training process of Lightgbm.\n\n Parameters\n ----------\n X_train: 2D numpy.ndarray containing predictors for training\n y_train: 1D numpy.ndarray containing response for training\n X_test: 2D numpy.ndarray containing predictors for testing\n y_test: 1D numpy.ndarray containing response for testing\n epochs: positive integer specifying number of weak learners in the model\n Default: 1000\n early_stopping: represents number of epochs that is required to pass without\n model improvement to stop the training earlier. Default: 100\n\n Returns\n -------\n tuple: (model, parameters, Train F1 score, Test F1 score, Test accuracy, confusion_matrix)\n \n Examples\n --------\n >>>train_lgb(X_train, y_train, X_test, y_test) \n (<lightgbm.basic.Booster at 0x7fd9f06b3470>,\n {'learning_rate': 0.01, 'lambda_l2': 0.5},\n 0.6242424242424243,\n 0.3972602739726027,\n 0.9027624309392265)\n \"\"\"\n # Defining custom f1 score for LightGBM model\n def lgb_f1_score(y_hat, data):\n y_true = data.get_label()\n y_hat = np.round(y_hat) # scikits f1 doesn't like probabilities\n return 'f1', f1_score(y_true, y_hat), True\n\n # Defining validation set\n X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.2)\n\n # Creating dataset objects for LightGBM\n lgb_train = lgb.Dataset(X_train, label=y_train)\n lgb_valid = lgb.Dataset(X_valid, label=y_valid)\n\n # For all possible parameters see https://lightgbm.readthedocs.io/en/latest/Parameters.html\n params = {\n \"learning_rate\" : 0.1,\n \"lambda_l1\": 0.5,\n \"max_depth\": 64,\n \"num_leaves\": 32,\n \"bagging_fraction\" : 0.9,\n \"bagging_freq\": 3,\n \"bagging_seed\": 42,\n \"seed\": 42\n }\n\n # Training model\n model = lgb.train(params,\n lgb_train,\n valid_sets=[lgb_train, lgb_valid],\n feval = lgb_f1_score,\n num_boost_round=epochs,\n early_stopping_rounds=early_stopping,\n verbose_eval=False)\n\n # Getting train F1 score\n lgb_train_preds = np.round(model.predict(X_train))\n train_f1 = f1_score(y_train, lgb_train_preds)\n\n # Getting test F1 score\n lgb_test_preds = np.round(model.predict(X_test))\n test_f1 = f1_score(y_test, lgb_test_preds)\n \n # Getting test accuracy\n test_acc = sum(lgb_test_preds == y_test)/len(y_test)\n\n # Getting confusion matrix\n cm = confusion_matrix(y_test, lgb_test_preds)\n\n # Returning whatever is important\n return model, params, train_f1, test_f1, test_acc, [train_f1], [test_f1], cm\n\n\ndef hyperparameter_tuning_and_report(classifier, parameters, X, y, X_test=None, y_test=None, scoring='f1'):\n \"\"\"\n Tunes hyperparameters of given model from the list of parameters.\n \n Uses GridSearchCV to find best hyperparameter. Optionally can\n calculate F1 score and accuracy of test set. Scoring function\n can be changed that GridSearchCV is using. Default scoring function\n is F1.\n\n Parameters\n ----------\n X: 2D numpy.ndarray containing predictors for training\n y: 1D numpy.ndarray containing response for training\n X_test: 2D numpy.ndarray containing predictors for testing. If None, test\n scores will not be computed. Default: None\n y_test: 1D numpy.ndarray containing response for testing. If None, test\n scores will not be computed. Default: None\n scoring: Scoring function used in GridSearchCV. Default is 'f1'. For all\n possibilities, please check documentation of GridSearchCV.\n\n Returns\n -------\n tuple: (best model, best parameters, Train F1 score, Test F1 score, Test accuracy, Mean Train scores, Mean Test scores, confusion_matrices)\n \n Examples\n --------\n >>>hyperparameter_tuning_and_report(LogisticRegression(),\n {'penalty': ['l1', 'l2'], 'C': [0.1, 1, 10]},\n X_train, y_train, X_test, y_test)\n (LogisticRegression(C=10, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, l1_ratio=None, max_iter=100,\n multi_class='auto', n_jobs=None, penalty='l2',\n random_state=None, solver='lbfgs', tol=0.0001, verbose=0,\n warm_start=False),\n {'C': 10, 'penalty': 'l2'},\n 0.43209194041441296,\n 0.3776223776223776,\n 0.901657458563536,\n array([ nan, 0.37217253, nan, 0.44838868, nan,\n 0.45292401]),\n array([ nan, 0.34413357, nan, 0.42585911, nan,\n 0.43209194]))\n \"\"\"\n # Find the best model\n try:\n grid_search = GridSearchCV(classifier, parameters, n_jobs=-1, scoring=scoring, return_train_score=True)\n grid_search.fit(X, y)\n except ValueError:\n pass\n \n test_f1 = None\n test_accuracy = None\n\n y_train_pred = grid_search.predict(X)\n train_report = classification_report(y, y_train_pred, output_dict=True)\n\n # Test best model on test set and produce classification report\n if X_test is not None and y_test is not None:\n y_test_pred = grid_search.predict(X_test)\n report = classification_report(y_test, y_test_pred, output_dict=True)\n test_f1 = report['1.0']['f1-score'], \n test_accuracy = report['accuracy'], \n cm = confusion_matrix(y_test, y_test_pred)\n\n # Return whatever is important\n return (grid_search.best_estimator_, \n grid_search.best_params_, \n train_report['1.0']['f1-score'], \n test_f1,\n test_accuracy,\n grid_search.cv_results_['mean_train_score'], \n grid_search.cv_results_['mean_test_score'],\n cm)\n\ndef save_confusion_matrix(model_names, cms, save_path):\n \"\"\"\n Creates and saves confusion matrix figures containing all models\n mentioned in the input arrays.\n\n Parameters\n ----------\n model_names: array containing names of the models tested\n csm: confusion matrices the models trained\n save_path: str showing the filepath, where the figure should be saved\n\n Returns\n -------\n None\n \"\"\"\n fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(16, 10))\n\n (ax1, ax2), (ax3, ax4) = axes\n\n cols = ListedColormap([[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]])\n\n im1 = ax1.matshow(cms[0], cmap=cols)\n im2 = ax2.matshow(cms[1], cmap=cols)\n im3 = ax3.matshow(cms[2], cmap=cols)\n im4 = ax4.matshow(cms[3], cmap=cols)\n\n ax = (ax1, ax2, ax3, ax4)\n\n for i, model_name in enumerate(model_names):\n\n for y in range(cms[i].shape[0]):\n for x in range(cms[i].shape[1]):\n ax[i].text(x, y, f'{cms[i][y, x]}',\n horizontalalignment='center',\n verticalalignment='center',\n fontdict = {'size': 16, 'weight': 'bold'}\n )\n ax[i].set_xticklabels(['', 'Predicted 0', 'Predicted 1'])\n ax[i].set_yticklabels(['', 'Actual 0', 'Actual 1'])\n\n ax[i].title.set_text(f'Confusion matrix of {model_name}')\n # for y in range(a.shape[0]):\n # for x in range(a.shape[1]):\n # ax2.text(x, y, '%.4f' % a[y, x],\n # horizontalalignment='center',\n # verticalalignment='center',\n # )\n # \n # for y in range(a.shape[0]):\n # for x in range(a.shape[1]):\n # ax3.text(x, y, '%.4f' % a[y, x],\n # horizontalalignment='center',\n # verticalalignment='center',\n # )\n\n # for y in range(a.shape[0]):\n # for x in range(a.shape[1]):\n # ax4.text(x, y, '%.4f' % a[y, x],\n # horizontalalignment='center',\n # verticalalignment='center',\n # )\n\n # model_name = 'aa'\n # \n # ax1.title.set_text(f'confusion matrix of {model_name}')\n # ax2.title.set_text(f'confusion matrix of {model_name}')\n # ax3.title.set_text(f'confusion matrix of {model_name}')\n # ax4.title.set_text(f'confusion matrix of {model_name}') \n\n check_filepath(save_path.rsplit('/', 1)[0])\n\n fig.savefig(save_path)\n\ndef generate_csv_and_figure_reports(arr, csv_filepath, figure_filepath):\n \"\"\"\n Generates csv report from the results obtained from hyperparameter tuning.\n\n Given the array of results of hyperparameter_tuning_and_report for different\n models, generates csv file and saves in the given path.\n\n Parameters\n ----------\n arr: 1D array containing a tuple returned from hyperparameter_tuning_and_report or\n train_lgb() function.\n csv_filepath: str containing path where the report should be saved.\n figure_filepath: str containing path where the figure report should be saved. Should have extension .png\n\n Returns\n -------\n None\n\n Notes:\n Please be sure that arr is array of tuples.\n \"\"\"\n names = []\n best_params = []\n train_f1s = []\n test_f1s = []\n test_accuracies = []\n\n train_score_results = []\n test_score_results = []\n score_res_names = []\n\n cms = []\n\n # Gathering names of the models and other information\n for model in tqdm(arr):\n names.append(model[0].__class__.__name__)\n best_params.append(model[1])\n train_f1s.append(model[2])\n test_f1s.append(model[3])\n test_accuracies.append(model[4])\n cms.append(model[7].astype(int))\n \n if model[0].__class__.__name__ != 'Booster':\n train_score_results.extend(model[5])\n test_score_results.extend(model[6])\n score_res_names.extend([model[0].__class__.__name__]*len(model[5]))\n\n # Creating dataframe from the results\n csv_report = pd.DataFrame({'Model name': names,\n 'Best parameters': best_params,\n 'Train F1': train_f1s,\n 'Test F1': test_f1s,\n 'Test accuracies': test_accuracies,\n 'Confusion matrix': cms})\n \n # Creating dataframe from the results for figure generation\n figure_report = pd.DataFrame({'models': score_res_names,\n 'train_scores': train_score_results,\n 'test_scores': test_score_results})\n\n # Check for existance of a filepath\n check_filepath(csv_filepath.rsplit('/', 1)[0])\n check_filepath(figure_filepath.rsplit('/', 1)[0])\n\n # Saving the report\n csv_report.to_csv(csv_filepath)\n\n # Saving figure\n alt.Chart(figure_report).mark_circle(size=100).encode(\n x = alt.X('test_scores',\n axis = alt.Axis(title='Cross-Val Test F1 score'),\n scale = alt.Scale(domain = (0., 1.))),\n y = alt.Y('train_scores',\n axis = alt.Axis(title='Cross-Val Train F1 score'),\n scale = alt.Scale(domain=(0., 1.))),\n color = 'models').properties(\n title = 'Train and Test scores of all methods tested').\\\n configure_axis(\n labelFontSize=15,\n titleFontSize=15\n ).\\\n configure_legend(labelFontSize = 15,\n titleFontSize=15).\\\n configure_title(fontSize=20).\\\n save(figure_filepath)\n # Saving confusion matrix\n save_confusion_matrix(names, cms, './reports/confusion_matrix.png')\n\n\ndef read_data_and_split(train_csv_path = '../data/clean/bank_train.csv',\n test_csv_path = '../data/clean/bank_test.csv'):\n \"\"\"\n Reads the data from the given paths and returns predictors and response\n variables separately for train and test sets\n\n Parameters\n ----------\n train_csv_path: str containing the path of train csv. Default: '../data/clean/bank_train.csv'\n test_csv_path: str containing the path of test csv. Default: '../data/clean/bank_test.csv'\n\n Returns\n -------\n tuple: (X_train, y_train, X_test, y_test)\n \"\"\"\n try:\n train_ds = pd.read_csv(train_csv_path)\n test_ds = pd.read_csv(test_csv_path)\n except (FileNotFoundError) as e:\n print('Please check train and test filepaths')\n raise(e)\n\n try:\n X_train, y_train = train_ds.drop('y_yes', axis=1), train_ds['y_yes']\n X_test, y_test = test_ds.drop('y_yes', axis=1), test_ds['y_yes']\n except KeyError:\n print('Corrupted csv files. Please check the columns')\n raise KeyError\n\n return X_train, y_train, X_test, y_test\n\ndef main(train_csv, test_csv, output_csv, output_png):\n try:\n X_train, y_train, X_test, y_test = read_data_and_split(train_csv, test_csv)\n except:\n return\n\n # Define models that should be passed to hyperparameter tuning\n models = [LogisticRegression(class_weight = 'balanced', random_state=42),\n SVC(class_weight = 'balanced', random_state=42),\n RandomForestClassifier(class_weight = 'balanced', random_state=42)\n ]\n \n\n # Define parameters that should be tested for each of the models. \n # Note: Be sure that indices of models and its parameters correspond.\n # Note2: Each model should have some valid dictionalry associated with it.\n parameters = [\n [{'solver': ['saga', 'liblinear'], 'penalty': ['l1', 'l2'], 'C': [0.01, 0.1, 1, 10]},\n {'solver': ['lbfgs', 'newton-cg', 'sag'], 'penalty': ['l2'], 'C': [0.01, 0.1, 1, 10]}],\n [{'C': [0.01, 0.1, 1, 10], 'kernel': ['rbf']},\n {'C': [0.01, 0.1, 1, 10], 'kernel': ['poly'], 'degree': [2, 3, 4]}],\n {'n_estimators': [25, 50, 75], 'max_depth': [None, 16, 32], 'criterion': ['gini', 'entropy']}\n ]\n\n if len(models) != len(parameters):\n print('Check models and corresponding parameters. Each model should have a dictionary of parameters to test.')\n return\n\n # Performing hyperparameter tuning and getting reports of each of the models\n # defined above\n all_results = []\n for model_id, model in enumerate(tqdm(models)):\n model_params = parameters[model_id]\n res = hyperparameter_tuning_and_report(model, model_params, X_train, y_train, X_test, y_test)\n all_results.append(res)\n\n # Training LightGBM model and appending results to other results\n lgb_model_res = train_lgb(X_train, y_train, X_test, y_test)\n all_results.append(lgb_model_res)\n\n # Generating and saving model results\n generate_csv_and_figure_reports(all_results, output_csv, output_png)\n\nif __name__ == '__main__':\n opt = docopt(__doc__)\n main(opt[\"--train_csv\"], opt[\"--test_csv\"], opt[\"--output_csv\"], opt[\"--output_png\"])\n"
] | [
[
"sklearn.model_selection.GridSearchCV",
"pandas.read_csv",
"sklearn.linear_model.LogisticRegression",
"numpy.random.seed",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.metrics.confusion_matrix",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"numpy.round",
"matplotlib.colors.ListedColormap",
"sklearn.svm.SVC",
"sklearn.metrics.f1_score",
"numpy.random.RandomState",
"sklearn.metrics.classification_report"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
leidix/images-to-osm | [
"5375d69c0f78aba9c153b0ada7ef7d6b8fb87b13"
] | [
"osmmodelconfig.py"
] | [
"import sys\nsys.path.append(\"Mask_RCNN\")\n\nimport os\nimport random\nimport math\nimport re\nimport time\nimport numpy as np\nimport cv2\nimport matplotlib\nimport random\nimport glob\nimport skimage\n\nfrom config import Config\nimport imagestoosm.config as osmcfg\nimport utils\nimport model as modellib\nimport visualize\nfrom model import log\n\nfeatureNames = {\n \"baseball\":1,\n \"basketball\":2,\n \"tennis\":3\n# \"american_football\":4,\n# \"soccer\":5,\n}\n\nclass OsmModelConfig(Config):\n \"\"\"Configuration for training on the toy shapes dataset.\n Derives from the base Config class and overrides values specific\n to the toy shapes dataset.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"OSM Images Baseball,Basketball,Tennis\"\n\n # Batch size is (GPUs * images/GPU).\n GPU_COUNT = 1\n IMAGES_PER_GPU = 2\n LEARNING_RATE = 0.001\n\n # 2 minutes\n #STEPS_PER_EPOCH = 100 // IMAGES_PER_GPU\n\n # 1 hour epoch\n STEPS_PER_EPOCH = 12000 // IMAGES_PER_GPU\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + len(featureNames) # background + featureType's\n\n # Each tile is 256 pixels across, training data is 3x3 tiles\n TILES=3\n IMAGE_MIN_DIM = 256*TILES\n IMAGE_MAX_DIM = 256*TILES\n\n MINI_MASK_SHAPE = (128, 128) \n #MASK_SHAPE = (IMAGE_MIN_DIM, IMAGE_MIN_DIM) \n\n # Reduce training ROIs per image because the images are small and have\n # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.\n #TRAIN_ROIS_PER_IMAGE = 64\n #DETECTION_MAX_INSTANCES = 64\n\n VALIDATION_STEPS = 100\n\nclass OsmImagesDataset(utils.Dataset):\n\n def __init__(self, rootDir):\n utils.Dataset.__init__(self)\n self.ROOT_DIR = rootDir\n\n def load(self, imageDirs, height, width):\n \"\"\"Generate the requested number of synthetic images.\n count: number of images to generate.\n height, width: the size of the generated images.\n \"\"\"\n\n for feature in featureNames:\n self.add_class(\"osm\", featureNames[feature],feature)\n\n # Add images\n for i in range(len(imageDirs)):\n imgPath = os.path.join( self.ROOT_DIR, osmcfg.trainDir,imageDirs[i],imageDirs[i] + \".jpg\")\n self.add_image(\"osm\", image_id=imageDirs[i], path=imgPath, width=width, height=height)\n\n def load_mask(self, image_id):\n \"\"\"Generate instance masks for shapes of the given image ID.\n \"\"\"\n info = self.image_info[image_id]\n\n imgDir = os.path.join( self.ROOT_DIR, osmcfg.trainDir,info['id'])\n wildcard = os.path.join( imgDir,\"*.png\")\n\n # 00015-american_football-0.png 00015-baseball-0.png 00015-baseball-1.png 00015-baseball-2.png 00015-baseball-3.png 00015-basketball-0.png 00015-basketball-1.png 00015.jpg 00015.txt\n\n maskCount = 0\n for filePath in glob.glob(wildcard): \n filename = os.path.split(filePath)[1] \n parts = filename.split( \"-\")\n if ( len(parts) == 3) and parts[1] in featureNames: \n maskCount += 1\n\n mask = np.zeros([info['height'], info['width'], maskCount], dtype=np.uint8)\n class_ids = np.zeros((maskCount), np.int32)\n \n count = 0\n for filePath in glob.glob(wildcard): \n filename = os.path.split(filePath)[1] \n parts = filename.split( \"-\")\n if ( len(parts) == 3) and parts[1] in featureNames: \n imgPath = filePath\n mask[:, :, count] = skimage.io.imread(filePath)\n class_ids[count] = featureNames[parts[1]]\n count += 1 \n \n return mask, class_ids\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LM095/DDPG-implementation | [
"fc7ca3454520f925d4973dbef57728e139fc3c64"
] | [
"utils.py"
] | [
"import numpy as np\nimport gym\nfrom collections import deque\nimport random\n\n# Ornstein-Ulhenbeck Process\n# Taken from #https://github.com/vitchyr/rlkit/blob/master/rlkit/exploration_strategies/ou_strategy.py\nclass OUNoise(object):\n def __init__(self, action_space, mu=0.0, theta=0.15, max_sigma=0.3, min_sigma=0.3, decay_period=100000):\n self.mu = mu\n self.theta = theta\n self.sigma = max_sigma\n self.max_sigma = max_sigma\n self.min_sigma = min_sigma\n self.decay_period = decay_period\n self.action_dim = action_space.shape[0]\n self.low = action_space.low\n self.high = action_space.high\n self.reset()\n\n def reset(self):\n self.state = np.ones(self.action_dim) * self.mu\n\n def evolve_state(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.action_dim)\n self.state = x + dx\n return self.state\n\n def get_action(self, action, t=0):\n ou_state = self.evolve_state()\n self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(1.0, t / self.decay_period)\n return np.clip(action + ou_state, self.low, self.high)\n\n\n# https://github.com/openai/gym/blob/master/gym/core.py\nclass NormalizedEnv(gym.ActionWrapper):\n \"\"\" Wrap action \"\"\"\n\n def _action(self, action):\n act_k = (self.action_space.high - self.action_space.low)/ 2.\n act_b = (self.action_space.high + self.action_space.low)/ 2.\n return act_k * action + act_b\n\n def _reverse_action(self, action):\n act_k_inv = 2./(self.action_space.high - self.action_space.low)\n act_b = (self.action_space.high + self.action_space.low)/ 2.\n return act_k_inv * (action - act_b)\n\n\nclass ReplayBuffer:\n def __init__(self, max_size):\n self.max_size = max_size\n self.buffer = deque(maxlen=max_size)\n\n def push(self, state, action, reward, next_state, done):\n experience = (state, action, np.array([reward]), next_state, done)\n self.buffer.append(experience)\n\n def sample(self, batch_size):\n state_batch = []\n action_batch = []\n reward_batch = []\n next_state_batch = []\n done_batch = []\n\n batch = random.sample(self.buffer, batch_size)\n\n for experience in batch:\n state, action, reward, next_state, done = experience\n state_batch.append(state)\n action_batch.append(action)\n reward_batch.append(reward)\n next_state_batch.append(next_state)\n done_batch.append(done)\n\n return state_batch, action_batch, reward_batch, next_state_batch, done_batch\n\n def __len__(self):\n return len(self.buffer)\n"
] | [
[
"numpy.array",
"numpy.ones",
"numpy.random.randn",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Stanford-ILIAD/reward-learning-scale-feedback | [
"6f8a670444080d62f785b2dfb3f47081a4da616c"
] | [
"models.py"
] | [
"from simulator import DrivingSimulation, FetchSimulation\nimport numpy as np\n\n\n\nclass Driver(DrivingSimulation):\n \"\"\"\n Original Driver model from 'Asking easy questions: A user-friendly approach to active reward learning'\n Bıyık, E., Palan, M., Landolfi, N. C., Losey, D. P., & Sadigh, D. (2019).arXiv preprint arXiv:1910.04365.\n \"\"\"\n def __init__(self, total_time=50, recording_time=[0,50]):\n super(Driver ,self).__init__(name='driver', total_time=total_time, recording_time=recording_time)\n self.ctrl_size = 10\n self.state_size = 0\n self.feed_size = self.ctrl_size + self.state_size\n self.ctrl_bounds = [(-1,1)]*self.ctrl_size\n self.state_bounds = []\n self.feed_bounds = self.state_bounds + self.ctrl_bounds\n self.num_of_features = 4\n\n def get_features(self):\n recording = self.get_recording(all_info=False)\n recording = np.array(recording)\n # staying in lane (higher is better)\n staying_in_lane = np.mean(np.exp(-30 * np.min(\n [np.square(recording[:, 0, 0] - 0.17), np.square(recording[:, 0, 0]), np.square(recording[:, 0, 0] + 0.17)],\n axis=0))) / 0.15343634\n # keeping speed (lower is better)\n # keeping_speed = np.mean(np.square(recording[:, 0, 3] - 1)) / 0.42202643\n keeping_speed = np.mean(np.square(recording[:, 0, 3] - 1)) / 0.42202643\n # heading (higher is better)\n heading = np.mean(np.sin(recording[:, 0, 2])) / 0.06112367\n # collision avoidance (lower is better)\n collision_avoidance = np.mean(np.exp(-(7 * np.square(recording[:, 0, 0] - recording[:, 1, 0]) + 3 * np.square(\n recording[:, 0, 1] - recording[:, 1, 1])))) / 0.15258019\n return [staying_in_lane, keeping_speed, heading, collision_avoidance]\n\n @property\n def state(self):\n return [self.robot.x, self.human.x]\n @state.setter\n def state(self, value):\n self.reset()\n self.initial_state = value.copy()\n\n def set_ctrl(self, value):\n arr = [[0]*self.input_size]*self.total_time\n interval_count = len(value)//self.input_size\n interval_time = int(self.total_time / interval_count)\n arr = np.array(arr).astype(float)\n j = 0\n for i in range(interval_count):\n arr[i*interval_time:(i+1)*interval_time] = [value[j], value[j+1]]\n j += 2\n self.ctrl = list(arr)\n\n def feed(self, value):\n ctrl_value = value[:]\n self.set_ctrl(ctrl_value)\n\n def get_cost_given_input(self, input):\n \"\"\"\n\n :param input:\n :param weight:\n :return:\n \"\"\"\n self.feed(list(input))\n features = np.array(self.get_features())\n return -np.dot(self.weights, features) # minus as we want to maximize\n\n def find_optimal_path(self, weights):\n \"\"\"\n New function to numerically find an optimal trajectory given weights\n Note: Using a generic numerical solver can lead to suboptimal solutions.\n :param weights:\n :param lb_input:\n :param ub_input:\n :return: optimal_controls, path_features, path_cost\n \"\"\"\n from scipy.optimize import minimize\n self.weights = weights[0:self.num_of_features]\n lb_input = [x[0] for x in self.feed_bounds]\n ub_input = [x[1] for x in self.feed_bounds]\n random_start = [0] * self.feed_size\n random_start = np.random.rand(self.feed_size)\n bounds = np.transpose([lb_input, ub_input])\n res = minimize(self.get_cost_given_input, x0=random_start, bounds=bounds, method='L-BFGS-B')\n self.feed(list(res.x))\n features = np.array(self.get_features())\n controls = res.x\n return controls, features, -res.fun\n\nclass DriverExtended(Driver):\n \"\"\"\n Extended 10 dimensional driver model\n \"\"\"\n def __init__(self, total_time=50, recording_time=[0,50]):\n super(Driver ,self).__init__(name='driverextended', total_time=total_time, recording_time=recording_time)\n self.ctrl_size = 10\n self.state_size = 0\n self.feed_size = self.ctrl_size + self.state_size\n self.ctrl_bounds = [(-1,1)]*self.ctrl_size\n self.state_bounds = []\n self.feed_bounds = self.state_bounds + self.ctrl_bounds\n self.num_of_features = 10\n\n def get_features(self):\n recording = self.get_recording(all_info=False)\n recording = np.array(recording)\n # staying in lane (higher is better)\n staying_in_lane = np.mean(np.exp(-30*np.min([np.square(recording[:,0,0]-0.17), np.square(recording[:,0,0]), np.square(recording[:,0,0]+0.17)], axis=0))) / 0.15343634\n\n # keeping speed (lower is better)\n keeping_speed = -np.mean(np.square(recording[:,0,3]-1)) / 0.42202643\n\n # heading (higher is better)\n heading = np.mean(np.sin(recording[:,0,2])) / 0.06112367\n\n # collision avoidance (lower is better)\n collision_avoidance = -np.mean(np.exp(-(7*np.square(recording[:,0,0]-recording[:,1,0])+3*np.square(recording[:,0,1]-recording[:,1,1])))) / 0.15258019\n\n # min collision avoidance over time (lower is better)\n min_collision_avoidance = -np.max(np.exp(-(7*np.square(recording[:,0,0]-recording[:,1,0])+3*np.square(recording[:,0,1]-recording[:,1,1])))) / 0.10977646\n\n # average jerk (lower is better)\n acceleration = recording[1:,0,3] - recording[:-1,0,3]\n average_jerk = -np.mean(np.abs(acceleration[1:] - acceleration[:-1])) / 0.00317041\n\n # vertical displacement (higher is better)\n vertical_displacement = (recording[-1,0,1] - recording[0,0,1]) / 1.01818467\n\n\n final_left_lane = (recording[-1, 0, 0] > -.25) and (recording[-1, 0, 0] < -.09)\n final_right_lane = (recording[-1, 0, 0] > .09) and (recording[-1, 0, 0] < .25)\n final_center_lane = (recording[-1, 0, 0] > -.09) and (recording[-1, 0, 0] < .09)\n\n return [staying_in_lane,\n keeping_speed,\n heading,\n collision_avoidance,\n min_collision_avoidance,\n average_jerk,\n vertical_displacement,\n final_left_lane,\n final_right_lane,\n final_center_lane\n ]\n\n\nclass Fetch(FetchSimulation):\n def __init__(self, total_time=1, recording_time=[0,1]):\n super(Fetch ,self).__init__(total_time=total_time, recording_time=recording_time)\n self.ctrl_size = 1\n self.state_size = 0\n self.feed_size = self.ctrl_size + self.state_size\n self.num_of_features = 8\n\n def get_features(self):\n A = np.load('ctrl_samples/fetch.npz')\n return list(A['feature_set'][self.ctrl,:])\n\n @property\n def state(self):\n return 0\n @state.setter\n def state(self, value):\n pass\n\n def set_ctrl(self, value):\n self.ctrl = value\n\n def feed(self, value):\n self.set_ctrl(value)\n"
] | [
[
"numpy.square",
"numpy.dot",
"numpy.abs",
"numpy.sin",
"scipy.optimize.minimize",
"numpy.random.rand",
"numpy.transpose",
"numpy.load",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
honnorat/skillful_nowcasting | [
"d2c99dd1fef780da349af175262a395faa9b3ad3"
] | [
"skillful_nowcasting/generators.py"
] | [
"import einops\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.modules.pixelshuffle import PixelShuffle\nfrom torch.nn.utils.parametrizations import spectral_norm\nfrom typing import List\nfrom skillful_nowcasting.common import GBlock, UpsampleGBlock\nfrom skillful_nowcasting.layers import ConvGRU\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.WARN)\n\n\nclass Sampler(torch.nn.Module):\n def __init__(\n self,\n forecast_steps: int = 18,\n latent_channels: int = 768,\n context_channels: int = 384,\n output_channels: int = 1,\n ):\n \"\"\"\n Sampler from the Skillful Nowcasting, see https://arxiv.org/pdf/2104.00954.pdf\n\n The sampler takes the output from the Latent and Context conditioning stacks and\n creates one stack of ConvGRU layers per future timestep.\n Args:\n forecast_steps: Number of forecast steps\n latent_channels: Number of input channels to the lowest ConvGRU layer\n \"\"\"\n super().__init__()\n self.forecast_steps = forecast_steps\n\n self.convGRU1 = ConvGRU(\n input_channels=latent_channels + context_channels,\n output_channels=context_channels,\n kernel_size=3,\n )\n self.gru_conv_1x1 = spectral_norm(\n torch.nn.Conv2d(\n in_channels=context_channels, out_channels=latent_channels, kernel_size=(1, 1)\n )\n )\n self.g1 = GBlock(input_channels=latent_channels, output_channels=latent_channels)\n self.up_g1 = UpsampleGBlock(\n input_channels=latent_channels, output_channels=latent_channels // 2\n )\n\n self.convGRU2 = ConvGRU(\n input_channels=latent_channels // 2 + context_channels // 2,\n output_channels=context_channels // 2,\n kernel_size=3,\n )\n self.gru_conv_1x1_2 = spectral_norm(\n torch.nn.Conv2d(\n in_channels=context_channels // 2,\n out_channels=latent_channels // 2,\n kernel_size=(1, 1),\n )\n )\n self.g2 = GBlock(input_channels=latent_channels // 2, output_channels=latent_channels // 2)\n self.up_g2 = UpsampleGBlock(\n input_channels=latent_channels // 2, output_channels=latent_channels // 4\n )\n\n self.convGRU3 = ConvGRU(\n input_channels=latent_channels // 4 + context_channels // 4,\n output_channels=context_channels // 4,\n kernel_size=3,\n )\n self.gru_conv_1x1_3 = spectral_norm(\n torch.nn.Conv2d(\n in_channels=context_channels // 4,\n out_channels=latent_channels // 4,\n kernel_size=(1, 1),\n )\n )\n self.g3 = GBlock(input_channels=latent_channels // 4, output_channels=latent_channels // 4)\n self.up_g3 = UpsampleGBlock(\n input_channels=latent_channels // 4, output_channels=latent_channels // 8\n )\n\n self.convGRU4 = ConvGRU(\n input_channels=latent_channels // 8 + context_channels // 8,\n output_channels=context_channels // 8,\n kernel_size=3,\n )\n self.gru_conv_1x1_4 = spectral_norm(\n torch.nn.Conv2d(\n in_channels=context_channels // 8,\n out_channels=latent_channels // 8,\n kernel_size=(1, 1),\n )\n )\n self.g4 = GBlock(input_channels=latent_channels // 8, output_channels=latent_channels // 8)\n self.up_g4 = UpsampleGBlock(\n input_channels=latent_channels // 8, output_channels=latent_channels // 16\n )\n\n self.bn = torch.nn.BatchNorm2d(latent_channels // 16)\n self.relu = torch.nn.ReLU()\n self.conv_1x1 = spectral_norm(\n torch.nn.Conv2d(\n in_channels=latent_channels // 16,\n out_channels=4 * output_channels,\n kernel_size=(1, 1),\n )\n )\n\n self.depth2space = PixelShuffle(upscale_factor=2)\n\n def forward(\n self, conditioning_states: List[torch.Tensor], latent_dim: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"\n Perform the sampling from Skillful Nowcasting with GANs\n Args:\n conditioning_states: Outputs from the `ContextConditioningStack` with the 4 input states, ordered from largest to smallest spatially\n latent_dim: Output from `LatentConditioningStack` for input into the ConvGRUs\n\n Returns:\n forecast_steps-length output of images for future timesteps\n\n \"\"\"\n # Iterate through each forecast step\n # Initialize with conditioning state for first one, output for second one\n init_states = conditioning_states\n # Expand latent dim to match batch size\n latent_dim = einops.repeat(\n latent_dim, \"b c h w -> (repeat b) c h w\", repeat=init_states[0].shape[0]\n )\n hidden_states = [latent_dim] * self.forecast_steps\n\n # Layer 4 (bottom most)\n hidden_states = self.convGRU1(hidden_states, init_states[3])\n hidden_states = [self.gru_conv_1x1(h) for h in hidden_states]\n hidden_states = [self.g1(h) for h in hidden_states]\n hidden_states = [self.up_g1(h) for h in hidden_states]\n\n # Layer 3.\n hidden_states = self.convGRU2(hidden_states, init_states[2])\n hidden_states = [self.gru_conv_1x1_2(h) for h in hidden_states]\n hidden_states = [self.g2(h) for h in hidden_states]\n hidden_states = [self.up_g2(h) for h in hidden_states]\n\n # Layer 2.\n hidden_states = self.convGRU3(hidden_states, init_states[1])\n hidden_states = [self.gru_conv_1x1_3(h) for h in hidden_states]\n hidden_states = [self.g3(h) for h in hidden_states]\n hidden_states = [self.up_g3(h) for h in hidden_states]\n\n # Layer 1 (top-most).\n hidden_states = self.convGRU4(hidden_states, init_states[0])\n hidden_states = [self.gru_conv_1x1_4(h) for h in hidden_states]\n hidden_states = [self.g4(h) for h in hidden_states]\n hidden_states = [self.up_g4(h) for h in hidden_states]\n\n # Output layer.\n hidden_states = [F.relu(self.bn(h)) for h in hidden_states]\n hidden_states = [self.conv_1x1(h) for h in hidden_states]\n hidden_states = [self.depth2space(h) for h in hidden_states]\n\n # Convert forecasts to a torch Tensor\n forecasts = torch.stack(hidden_states, dim=1)\n return forecasts\n\n\nclass Generator(torch.nn.Module):\n def __init__(\n self,\n conditioning_stack: torch.nn.Module,\n latent_stack: torch.nn.Module,\n sampler: torch.nn.Module,\n ):\n \"\"\"\n Wraps the three parts of the generator for simpler calling\n Args:\n conditioning_stack:\n latent_stack:\n sampler:\n \"\"\"\n super().__init__()\n self.conditioning_stack = conditioning_stack\n self.latent_stack = latent_stack\n self.sampler = sampler\n\n def forward(self, x):\n conditioning_states = self.conditioning_stack(x)\n latent_dim = self.latent_stack(x)\n x = self.sampler(conditioning_states, latent_dim)\n return x\n"
] | [
[
"torch.nn.modules.pixelshuffle.PixelShuffle",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.stack",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xxiao-1/gtsam | [
"8b1516f43ffdf6b5098fc282b566f2ee1edb50f6"
] | [
"python/gtsam/tests/test_Point3.py"
] | [
"\"\"\"\nGTSAM Copyright 2010-2019, Georgia Tech Research Corporation,\nAtlanta, Georgia 30332-0415\nAll Rights Reserved\n\nSee LICENSE for the license information\n\nPoint3 unit tests.\nAuthor: Frank Dellaert & Fan Jiang\n\"\"\"\nimport unittest\n\nimport gtsam\nimport numpy as np\nfrom gtsam.utils.test_case import GtsamTestCase\n\n\nclass TestPoint3(GtsamTestCase):\n \"\"\"Test selected Point3 methods.\"\"\"\n\n def test_constructors(self):\n \"\"\"Test constructors from doubles and vectors.\"\"\"\n expected = gtsam.Point3(1, 2, 3)\n actual = gtsam.Point3(np.array([1, 2, 3]))\n np.testing.assert_array_equal(actual, expected)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.testing.assert_array_equal",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rhayes777/AutoFit | [
"f5d769755b85a6188ec1736d0d754f27321c2f06",
"f5d769755b85a6188ec1736d0d754f27321c2f06"
] | [
"autofit/graphical/utils.py",
"autofit/messages/normal.py"
] | [
"import collections\nfrom enum import Enum\nfrom functools import reduce\nfrom operator import mul\nfrom typing import Iterable, Tuple, TypeVar, Dict, NamedTuple, Optional, Union\nimport warnings\nimport logging\n\nimport numpy as np\nimport six\nfrom scipy.linalg import block_diag\nfrom scipy.optimize import OptimizeResult\nfrom collections import abc\n\nfrom autofit.mapper.variable import Variable, VariableData\n\ndef try_getitem(value, index, default=None):\n try:\n return value[index]\n except TypeError:\n return default\n\nclass LogWarnings(warnings.catch_warnings):\n def __init__(self, *, module=None, messages=None, action=None, logger=logging.warning):\n super().__init__(record=True, module=module)\n self.messages = [] if messages is None else messages\n self.log = []\n self.action = action \n self.logger = logger\n\n def log_warning(self, warn):\n self.log.append(warn)\n warn_message = f\"{warn.filename}:{warn.lineno}: {warn.message}\"\n self.messages.append(warn_message)\n self.logger(warn_message)\n\n def __enter__(self):\n self.log = super().__enter__()\n self._module._showwarnmsg_impl = self.log_warning\n if self.action:\n warnings.simplefilter(self.action)\n\n return self\n \n\ndef is_variable(v, *args):\n return isinstance(v, Variable)\n\n\ndef is_iterable(arg):\n return isinstance(arg, abc.Iterable) and not isinstance(\n arg, six.string_types\n )\n\n\ndef nested_filter(func, *args):\n \"\"\" Iterates through a potentially nested set of list, tuples and dictionaries, \n recursively looping through the structure and returning the arguments\n that func return true on, \n\n Example\n -------\n >>> list(nested_filter(\n ... lambda x, *args: x==2,\n ... [1, (2, 3), [3, 2, {1, 2}]]\n ... ))\n [(2,), (2,), (2,)]\n\n >>> list(nested_filter(\n ... lambda x, *args: x==2,\n ... [1, (2, 3), [3, 2, {1, 2}]],\n ... [1, ('a', 3), [3, 'b', {1, 'c'}]]\n ... ))\n [(2, 'a'), (2, 'b'), (2, 'c')]\n \"\"\"\n out, *_ = args\n if isinstance(out, dict):\n for k in out:\n yield from nested_filter(func, *(out[k] for out in args))\n elif is_iterable(out):\n for elems in zip(*args):\n yield from nested_filter(func, *elems)\n else:\n if func(*args):\n yield args\n\n\ndef nested_update(out, to_replace: dict, replace_keys=False):\n \"\"\"\n Given a potentially nested set of list, tuples and dictionaries, recursively loop through the structure and\n replace any values that appear in the dict to_replace\n can set to replace dictionary keys optionally,\n\n Example\n -------\n >>> nested_update([1, (2, 3), [3, 2, {1, 2}]], {2: 'a'})\n [1, ('a', 3), [3, 'a', {1, 'a'}]]\n\n >>> nested_update([{2: 2}], {2: 'a'})\n [{2: 'a'}]\n\n >>> nested_update([{2: 2}], {2: 'a'}, True)\n [{'a': 'a'}]\n \"\"\"\n try:\n return to_replace[out]\n except KeyError:\n pass\n\n if isinstance(out, dict):\n if replace_keys:\n return type(out)(\n {\n nested_update(k, to_replace, replace_keys): nested_update(\n v, to_replace, replace_keys\n )\n for k, v in out.items()\n }\n )\n else:\n return type(out)(\n {k: nested_update(v, to_replace, replace_keys) for k, v in out.items()}\n )\n elif is_iterable(out):\n return type(out)(nested_update(elem, to_replace, replace_keys) for elem in out)\n\n return out\n\n\nclass StatusFlag(Enum):\n FAILURE = 0\n SUCCESS = 1\n NO_CHANGE = 2\n BAD_PROJECTION = 3\n\n @classmethod\n def get_flag(cls, success, n_iter):\n if success:\n if n_iter > 0:\n return cls.SUCCESS\n else:\n return cls.NO_CHANGE\n\n return cls.FAILURE\n\n\nclass Status(NamedTuple):\n success: bool = True\n messages: Tuple[str, ...] = ()\n updated: bool = True\n flag: StatusFlag = StatusFlag.SUCCESS\n\n def __bool__(self):\n return self.success\n\n def __str__(self):\n if self.success:\n return \"Optimisation succeeded\"\n return f\"Optimisation failed: {self.messages}\"\n\n\nclass FlattenArrays(dict):\n \"\"\"\n >>> shapes = FlattenArrays(a=(1, 2), b=(2, 3))\n >>> shapes\n FlattenArrays(a=(1, 2), b=(2, 3))\n >>> shapes.flatten(\n a = np.arange(2).reshape(1, 2),\n b = np.arange(6).reshape(2, 3)**2)\n array([ 0, 1, 0, 1, 4, 9, 16, 25])\n >>> shapes.unflatten(\n [ 0, 1, 0, 1, 4, 9, 16, 25])\n {'a': array([[0, 1]]), 'b': array([[ 0, 1, 4],\n [ 9, 16, 25]])}\n \"\"\"\n\n def __init__(self, dict_: Dict[Variable, Tuple[int, ...]]):\n super().__init__()\n\n self.update(dict_)\n self.splits = np.cumsum([np.prod(s) for s in self.values()], dtype=int)\n self.inds = [\n slice(i0, i1)\n for i0, i1 in\n # np.arange(i0, i1, dtype=int) for i0, i1 in\n zip(np.r_[0, self.splits[:-1]], self.splits)\n ]\n self.sizes = {k: np.prod(s, dtype=int) for k, s in self.items()}\n self.k_inds = dict(zip(self, self.inds))\n\n @classmethod\n def from_arrays(cls, arrays: Dict[str, np.ndarray]) -> \"FlattenArrays\":\n return cls({k: np.shape(arr) for k, arr in arrays.items()})\n\n def flatten(self, arrays_dict: Dict[Variable, np.ndarray]) -> np.ndarray:\n assert all(np.shape(arrays_dict[k]) == shape for k, shape in self.items())\n return np.concatenate([np.ravel(arrays_dict[k]) for k in self.keys()])\n\n def extract(self, key, flat, ndim=None):\n if ndim is None:\n ndim = len(flat.shape)\n\n ind = self.k_inds[key]\n return flat[(ind,) * ndim]\n\n def unflatten(self, arr: np.ndarray, ndim=None) -> Dict[str, np.ndarray]:\n arr = np.asanyarray(arr)\n if ndim is None:\n ndim = arr.ndim\n arrays = [arr[(ind,) * ndim] for ind in self.inds]\n arr_shapes = [arr.shape[ndim:] for arr in arrays]\n return VariableData({\n k: arr.reshape(shape * ndim + arr_shape)\n if shape or arr_shape\n else arr.item()\n for (k, shape), arr_shape, arr in zip(self.items(), arr_shapes, arrays)\n })\n\n def flatten2d(self, values: Dict[Variable, np.ndarray]) -> np.ndarray:\n assert all(np.shape(values[k]) == shape * 2 for k, shape in self.items())\n\n return block_diag(\n *(np.reshape(values[k], (n, n)) for k, n in self.sizes.items())\n )\n\n unflatten2d = unflatten\n\n def __repr__(self):\n shapes = \", \".join(map(\"{0[0]}={0[1]}\".format, self.items()))\n return f\"{type(self).__name__}({shapes})\"\n\n @property\n def size(self):\n return self.splits[-1]\n\n\nclass OptResult(NamedTuple):\n mode: Dict[Variable, np.ndarray]\n hess_inv: Dict[Variable, np.ndarray]\n log_norm: float\n full_hess_inv: np.ndarray\n result: OptimizeResult\n status: Status = Status()\n\n\ndef gen_subsets(n, x, n_iters=None, rng=None):\n \"\"\"\n Generates random subsets of length n of the array x, if the elements of\n x are unique then each subset will not contain repeated elements. Each \n element is guaranteed to reappear after at most 2*len(x) new elements. \n\n If `x` is a multi-dimensional array, it is only shuffled along its\nfirst index.\n\n if x is an integer, generate subsets of ``np.arange(x)``.\n\n generates n_iters subsets before stopping. If n_iters is None then\n generates random subsets for ever\n\n rng is an optionally passed random number generator\n\n Examples\n --------\n >>> list(gen_subsets(3, 5, n_iters=3))\n [array([0, 2, 3]), array([1, 4, 0]), array([2, 3, 4])]\n >>> list(gen_subsets(3, [1,10,5,3], n_iters=3))\n [array([ 5, 10, 1]), array([3, 5, 1]), array([10, 3, 5])]\n \"\"\"\n rng = rng or np.random.default_rng()\n x_shuffled = rng.permutation(x)\n tot = len(x_shuffled)\n\n i = 0 \n stop = tot - n + 1\n iters = iter(int, 1) if n_iters is None else range(n_iters)\n for j in iters:\n if i < stop:\n yield x_shuffled[i : i + n]\n i += n\n else:\n x_shuffled = np.r_[x_shuffled[i:], rng.permutation(x_shuffled[:i])]\n yield x_shuffled[:n]\n i = n\n\ndef gen_dict(dict_gen):\n \"\"\"\n Examples\n --------\n >>> list(gen_dict({1: gen_subsets(3, 4, 3), 2: gen_subsets(2, 5, 3)}))\n [{1: array([2, 1, 3]), 2: array([2, 0])},\n {1: array([0, 3, 1]), 2: array([3, 1])},\n {1: array([2, 0, 1]), 2: array([4, 2])}]\n \"\"\"\n keys = tuple(dict_gen.keys())\n for val in zip(*dict_gen.values()):\n yield dict(zip(keys, val))\n\n\n_M = TypeVar(\"_M\")\n\n\ndef prod(iterable: Iterable[_M], *arg: Tuple[_M]) -> _M:\n \"\"\"calculates the product of the passed iterable,\n much like sum, if a second argument is passed,\n this is the initial value of the calculation\n\n Examples\n --------\n >>> prod(range(1, 3))\n 2\n\n >>> prod(range(1, 3), 2.)\n 4.\n \"\"\"\n iterable = list(iterable)\n return reduce(mul, iterable, *arg)\n\n\ndef r2_score(y_true, y_pred, axis=None):\n y_true = np.asanyarray(y_true)\n y_pred = np.asanyarray(y_pred)\n\n mse = np.square(y_true - y_pred).mean(axis=axis)\n var = y_true.var(axis=axis)\n\n return 1 - mse / var\n\n\ndef propagate_uncertainty(cov: np.ndarray, jac: np.ndarray) -> np.ndarray:\n \"\"\"Propagates the uncertainty of a covariance matrix given the\n passed Jacobian\n\n If the variable arrays are multidimensional then will output in\n the shape of the arrays\n\n see https://en.wikipedia.org/wiki/Propagation_of_uncertainty\n \"\"\"\n cov = np.asanyarray(cov)\n\n var_ndim = cov.ndim // 2\n det_ndim = jac.ndim - var_ndim\n det_shape, var_shape = jac.shape[:det_ndim], jac.shape[det_ndim:]\n assert var_shape == cov.shape[:var_ndim] == cov.shape[var_ndim:]\n\n var_size = np.prod(var_shape, dtype=int)\n det_size = np.prod(det_shape, dtype=int)\n\n cov2d = cov.reshape((var_size, var_size))\n jac2d = jac.reshape((det_size, var_size))\n\n det_cov2d = np.linalg.multi_dot((jac2d, cov2d, jac2d.T))\n det_cov = det_cov2d.reshape(det_shape + det_shape)\n return det_cov\n\n\ndef rescale_to_artists(artists, ax=None):\n import matplotlib.pyplot as plt\n\n ax = ax or plt.gca()\n while True:\n r = ax.figure.canvas.get_renderer()\n extents = [\n t.get_window_extent(renderer=r).transformed(ax.transData.inverted())\n for t in artists\n ]\n min_extent = np.min([e.min for e in extents], axis=0)\n max_extent = np.max([e.max for e in extents], axis=0)\n min_lim, max_lim = zip(ax.get_xlim(), ax.get_ylim())\n\n # Sometimes the window doesn't always rescale first time around\n if (min_extent < min_lim).any() or (max_extent > max_lim).any():\n extent = max_extent - min_extent\n max_extent += extent * 0.05\n min_extent -= extent * 0.05\n xlim, ylim = zip(\n np.minimum(min_lim, min_extent), np.maximum(max_lim, max_extent)\n )\n ax.set_xlim(*xlim)\n ax.set_ylim(*ylim)\n else:\n break\n\n return xlim, ylim\n\n\n# These may no longer be needed?\ndef add_arrays(*arrays: np.ndarray) -> np.ndarray:\n \"\"\"Sums over broadcasting multidimensional arrays\n whilst preserving the total sum\n\n a = np.arange(10).reshape(1, 2, 1, 5)\n b = np.arange(8).reshape(2, 2, 2, 1)\n\n >>> add_arrays(a, b).sum()\n 73.0\n >>> add_arrays(a, b).shape\n (2, 2, 2, 5)\n >>> a.sum() + b.sum()\n 73\n \"\"\"\n b = np.broadcast(*arrays)\n return sum(a * np.size(a) / b.size for a in arrays)\n\n\nAxis = Optional[Union[bool, int, Tuple[int, ...]]]\n\n\ndef aggregate(array: np.ndarray, axis: Axis = None, **kwargs) -> np.ndarray:\n \"\"\"\n aggregates the values of array\n\n if axis is False then aggregate returns the unmodified array\n\n otherwise aggrate returns np.sum(array, axis=axis, **kwargs)\n \"\"\"\n if axis is False:\n return array\n \n return np.sum(array, axis=axis, **kwargs)\n\n\ndef diag(array: np.ndarray, *ds: Tuple[int, ...]) -> np.ndarray:\n array = np.asanyarray(array)\n d1 = array.shape\n if ds:\n ds = (d1,) + ds\n else:\n ds = (d1, d1)\n\n out = np.zeros(sum(ds, ()))\n diag_inds = tuple(map(np.ravel, (i for d in ds for i in np.indices(d))))\n out[diag_inds] = array.ravel()\n return out\n",
"import math\nfrom typing import Tuple, Union\n\nimport numpy as np\nfrom scipy.special.cython_special import erfcinv\nfrom scipy.stats import norm\n\nfrom autoconf import cached_property\nfrom autofit.mapper.operator import LinearOperator\nfrom autofit.messages.abstract import AbstractMessage\nfrom .transform import (\n phi_transform,\n log_transform,\n multinomial_logit_transform,\n log_10_transform,\n)\nfrom .. import exc\n\n\ndef is_nan(value):\n is_nan_ = np.isnan(value)\n if isinstance(is_nan_, np.ndarray):\n is_nan_ = is_nan_.all()\n return is_nan_\n\n\nclass NormalMessage(AbstractMessage):\n @cached_property\n def log_partition(self):\n eta1, eta2 = self.natural_parameters\n return -(eta1 ** 2) / 4 / eta2 - np.log(-2 * eta2) / 2\n\n log_base_measure = -0.5 * np.log(2 * np.pi)\n _support = ((-np.inf, np.inf),)\n _parameter_support = ((-np.inf, np.inf), (0, np.inf))\n\n def __init__(\n self,\n mean,\n sigma,\n lower_limit=-math.inf,\n upper_limit=math.inf,\n log_norm=0.0,\n id_=None,\n ):\n if (np.array(sigma) < 0).any():\n raise exc.MessageException(\"Sigma cannot be negative\")\n\n # _is_nan = np.isnan(sigma)\n # if isinstance(_is_nan, np.ndarray):\n # _is_nan = _is_nan.all()\n # if _is_nan:\n # raise exc.MessageException(\n # \"nan parameter passed to NormalMessage\"\n # )\n\n super().__init__(\n mean,\n sigma,\n log_norm=log_norm,\n lower_limit=lower_limit,\n upper_limit=upper_limit,\n id_=id_,\n )\n self.mean, self.sigma = self.parameters\n\n def cdf(self, x):\n return norm.cdf(x, loc=self.mean, scale=self.sigma)\n\n def ppf(self, x):\n return norm.ppf(x, loc=self.mean, scale=self.sigma)\n\n @cached_property\n def natural_parameters(self):\n return self.calc_natural_parameters(self.mean, self.sigma)\n\n @staticmethod\n def calc_natural_parameters(mu, sigma):\n precision = sigma ** -2\n return np.array([mu * precision, -precision / 2])\n\n @staticmethod\n def invert_natural_parameters(natural_parameters):\n eta1, eta2 = natural_parameters\n mu = -0.5 * eta1 / eta2\n sigma = np.sqrt(-0.5 / eta2)\n return mu, sigma\n\n @staticmethod\n def to_canonical_form(x):\n return np.array([x, x ** 2])\n\n @classmethod\n def invert_sufficient_statistics(cls, suff_stats):\n m1, m2 = suff_stats\n sigma = np.sqrt(m2 - m1 ** 2)\n return cls.calc_natural_parameters(m1, sigma)\n\n @cached_property\n def variance(self):\n return self.sigma ** 2\n\n def sample(self, n_samples=None):\n if n_samples:\n x = np.random.randn(n_samples, *self.shape)\n if self.shape:\n return x * self.sigma[None, ...] + self.mean[None, ...]\n else:\n x = np.random.randn(*self.shape)\n\n return x * self.sigma + self.mean\n\n def kl(self, dist):\n return (\n np.log(dist.sigma / self.sigma)\n + (self.sigma ** 2 + (self.mean - dist.mean) ** 2) / 2 / dist.sigma ** 2\n - 1 / 2\n )\n\n @classmethod\n def from_mode(\n cls, mode: np.ndarray, covariance: Union[float, LinearOperator] = 1.0, **kwargs\n ):\n if isinstance(covariance, LinearOperator):\n variance = covariance.diagonal()\n else:\n mode, variance = cls._get_mean_variance(mode, covariance)\n return cls(mode, np.abs(variance) ** 0.5, **kwargs)\n\n def _normal_gradient_hessian(\n self, x: np.ndarray\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n # raise Exception\n shape = np.shape(x)\n if shape:\n x = np.asanyarray(x)\n deltax = x - self.mean\n hess_logl = -self.sigma ** -2\n grad_logl = deltax * hess_logl\n eta_t = 0.5 * grad_logl * deltax\n logl = self.log_base_measure + eta_t - np.log(self.sigma)\n\n if shape[1:] == self.shape:\n hess_logl = np.repeat(\n np.reshape(hess_logl, (1,) + np.shape(hess_logl)), shape[0], 0\n )\n\n else:\n deltax = x - self.mean\n hess_logl = -self.sigma ** -2\n grad_logl = deltax * hess_logl\n eta_t = 0.5 * grad_logl * deltax\n logl = self.log_base_measure + eta_t - np.log(self.sigma)\n\n return logl, grad_logl, hess_logl\n\n def logpdf_gradient(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n return self._normal_gradient_hessian(x)[:2]\n\n def logpdf_gradient_hessian(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n return self._normal_gradient_hessian(x)\n\n __name__ = \"gaussian_prior\"\n\n __default_fields__ = (\"log_norm\", \"id_\")\n\n def value_for(self, unit):\n \"\"\"\n Parameters\n ----------\n unit: Float\n A unit hypercube value between 0 and 1\n Returns\n -------\n value: Float\n A value for the attribute biased to the gaussian distribution\n \"\"\"\n return self.mean + (self.sigma * math.sqrt(2) * erfcinv(2.0 * (1.0 - unit)))\n\n def log_prior_from_value(self, value):\n \"\"\"\n Returns the log prior of a physical value, so the log likelihood of a model evaluation can be converted to a\n posterior as log_prior + log_likelihood.\n This is used by Emcee in the log likelihood function evaluation.\n Parameters\n ----------\n value\n The physical value of this prior's corresponding parameter in a `NonLinearSearch` sample.\n \"\"\"\n return (value - self.mean) ** 2.0 / (2 * self.sigma ** 2.0)\n\n def __str__(self):\n \"\"\"\n The line of text describing this prior for the model_mapper.info file\n \"\"\"\n return f\"GaussianPrior, mean = {self.mean}, sigma = {self.sigma}\"\n\n def __repr__(self):\n return (\n \"<GaussianPrior id={} mean={} sigma={} \"\n \"lower_limit={} upper_limit={}>\".format(\n self.id, self.mean, self.sigma, self.lower_limit, self.upper_limit\n )\n )\n\n\nclass NaturalNormal(NormalMessage):\n \"\"\"Identical to the NormalMessage but allows non-normalised values,\n e.g negative or infinite variances\n \"\"\"\n _parameter_support = ((-np.inf, np.inf), (-np.inf, 0))\n\n def __init__(\n self,\n eta1,\n eta2,\n lower_limit=-math.inf,\n upper_limit=math.inf,\n log_norm=0.0,\n id_=None,\n ):\n AbstractMessage.__init__(\n self,\n eta1,\n eta2,\n log_norm=log_norm,\n lower_limit=lower_limit,\n upper_limit=upper_limit,\n id_=id_,\n )\n\n @cached_property\n def sigma(self):\n precision = -2 * self.parameters[1]\n return precision ** -0.5\n\n @cached_property\n def mean(self):\n return np.nan_to_num(- self.parameters[0] / self.parameters[1] / 2)\n\n @staticmethod\n def calc_natural_parameters(eta1, eta2):\n return np.array([eta1, eta2])\n\n @cached_property\n def natural_parameters(self):\n return self.calc_natural_parameters(*self.parameters)\n\n @classmethod\n def invert_sufficient_statistics(cls, suff_stats):\n m1, m2 = suff_stats\n precision = 1 / (m2 - m1 ** 2)\n return cls.calc_natural_parameters(m1 * precision, - precision / 2)\n\n @staticmethod\n def invert_natural_parameters(natural_parameters):\n return natural_parameters\n\n @classmethod\n def from_mode(\n cls, mode: np.ndarray, covariance: Union[float, LinearOperator] = 1.0, **kwargs\n ):\n if isinstance(covariance, LinearOperator):\n precision = covariance.inv().diagonal()\n else:\n mode, variance = cls._get_mean_variance(mode, covariance)\n precision = 1 / variance\n\n return cls(mode * precision, - precision / 2, **kwargs)\n\n\nUniformNormalMessage = NormalMessage.transformed(phi_transform, \"UniformNormalMessage\")\nUniformNormalMessage.__module__ = __name__\n\nLog10UniformNormalMessage = UniformNormalMessage.transformed(log_10_transform)\n\nLogNormalMessage = NormalMessage.transformed(log_transform, \"LogNormalMessage\")\n\n# Support is the simplex\nMultiLogitNormalMessage = NormalMessage.transformed(\n multinomial_logit_transform, \"MultiLogitNormalMessage\", ((0, 1),)\n)\n"
] | [
[
"numpy.square",
"matplotlib.pyplot.gca",
"numpy.minimum",
"numpy.maximum",
"numpy.min",
"numpy.reshape",
"numpy.linalg.multi_dot",
"numpy.indices",
"numpy.broadcast",
"numpy.max",
"numpy.asanyarray",
"numpy.shape",
"numpy.size",
"numpy.prod",
"numpy.ravel",
"numpy.sum",
"numpy.random.default_rng"
],
[
"scipy.stats.norm.ppf",
"numpy.log",
"scipy.stats.norm.cdf",
"numpy.sqrt",
"numpy.abs",
"numpy.isnan",
"numpy.nan_to_num",
"numpy.asanyarray",
"numpy.shape",
"numpy.random.randn",
"numpy.array",
"scipy.special.cython_special.erfcinv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rlouf/aesara | [
"150add2cde542805b69e74da235e5e7d9ff22d3c",
"150add2cde542805b69e74da235e5e7d9ff22d3c"
] | [
"tests/tensor/test_sharedvar.py",
"aesara/tensor/math_opt.py"
] | [
"from functools import update_wrapper\n\nimport numpy as np\nimport pytest\n\nimport aesara\nimport aesara.sparse\nimport aesara.tensor as at\nfrom aesara.misc.may_share_memory import may_share_memory\nfrom aesara.tensor import get_vector_length\nfrom aesara.tensor.basic import MakeVector\nfrom aesara.tensor.shape import Shape_i, specify_shape\nfrom tests import unittest_tools as utt\n\n\ndef makeSharedTester(\n shared_constructor_,\n dtype_,\n get_value_borrow_true_alias_,\n shared_borrow_true_alias_,\n set_value_borrow_true_alias_,\n set_value_inplace_,\n set_cast_value_inplace_,\n shared_constructor_accept_ndarray_,\n internal_type_,\n check_internal_type_,\n aesara_fct_,\n ref_fct_,\n cast_value_=np.asarray,\n expect_fail_fast_shape_inplace=True,\n):\n \"\"\"\n This is a generic fct to allow reusing the same test function\n for many shared variable of many types.\n\n :param shared_constructor_: The shared variable constructor to use\n :param dtype_: The dtype of the data to test\n :param get_value_borrow_true_alias_: Should a get_value(borrow=True) return the internal object\n :param shared_borrow_true_alias_: Should shared(val,borrow=True) reuse the val memory space\n :param set_value_borrow_true_alias_: Should set_value(val,borrow=True) reuse the val memory space\n :param set_value_inplace_: Should this shared variable overwrite the current\n memory when the new value is an ndarray\n :param set_cast_value_inplace_: Should this shared variable overwrite the\n current memory when the new value is of the same\n type as the internal type.\n :param shared_constructor_accept_ndarray_: Do the shared_constructor accept an ndarray as input?\n :param internal_type_: The internal type used.\n :param check_internal_type_: A function that tell if its input is of the same\n type as this shared variable internal type.\n :param aesara_fct_: A aesara op that will be used to do some computation on the shared variable\n :param ref_fct_: A reference function that should return the same value as the aesara_fct_\n :param cast_value_: A callable that cast an ndarray into the internal shared variable representation\n :param name: This string is used to set the returned class' __name__\n attribute. This is needed for tests to properly tag the\n test with its correct name, rather than use the generic\n SharedTester name. This parameter is mandatory (keeping the\n default None value will raise an error), and must be set to\n the name of the variable that will hold the returned class.\n :note:\n We must use /= as sparse type don't support other inplace operation.\n \"\"\"\n\n class m(type):\n pass\n\n class SharedTester:\n shared_constructor = staticmethod(shared_constructor_)\n dtype = dtype_\n get_value_borrow_true_alias = get_value_borrow_true_alias_\n shared_borrow_true_alias = shared_borrow_true_alias_\n internal_type = internal_type_\n check_internal_type = staticmethod(check_internal_type_)\n aesara_fct = staticmethod(aesara_fct_)\n ref_fct = staticmethod(ref_fct_)\n set_value_borrow_true_alias = set_value_borrow_true_alias_\n set_value_inplace = set_value_inplace_\n set_cast_value_inplace = set_cast_value_inplace_\n shared_constructor_accept_ndarray = shared_constructor_accept_ndarray_\n cast_value = staticmethod(cast_value_)\n\n def test_shared_dont_alias(self):\n dtype = self.dtype\n if dtype is None:\n dtype = aesara.config.floatX\n\n rng = np.random.default_rng(utt.fetch_seed())\n x = np.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)\n x = self.cast_value(x)\n\n x_ref = self.ref_fct(x)\n x_shared = self.shared_constructor(x, borrow=False)\n total = self.aesara_fct(x_shared)\n\n total_func = aesara.function([], total)\n\n total_val = total_func()\n\n assert np.allclose(self.ref_fct(x), total_val)\n\n x /= 0.5\n total_val_2 = total_func()\n\n # value used to construct should not alias with internal\n assert np.allclose(total_val, total_val_2)\n\n x = x_shared.get_value(borrow=False)\n\n x /= 0.5\n\n total_val_3 = total_func()\n\n # value returned by access should not alias with internal\n assert np.allclose(total_val, total_val_3)\n\n # in this case we can alias\n x = x_shared.get_value(borrow=True)\n x /= 0.5\n\n # this is not required by the contract but it is a feature we've\n # implemented for some type of SharedVariable.\n if self.get_value_borrow_true_alias:\n assert np.allclose(self.ref_fct(x), total_func())\n else:\n assert np.allclose(x_ref, total_func())\n\n def test_shape(self):\n dtype = self.dtype\n if dtype is None:\n dtype = aesara.config.floatX\n\n rng = np.random.default_rng(utt.fetch_seed())\n x = np.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)\n x = self.cast_value(x)\n\n self.ref_fct(x)\n x_shared = self.shared_constructor(x, borrow=False)\n self.aesara_fct(x_shared)\n\n f = aesara.function([], x_shared.shape)\n topo = f.maker.fgraph.toposort()\n\n assert np.all(f() == (2, 4))\n if aesara.config.mode != \"FAST_COMPILE\":\n assert len(topo) == 3\n assert isinstance(topo[0].op, Shape_i)\n assert isinstance(topo[1].op, Shape_i)\n assert isinstance(topo[2].op, MakeVector)\n\n def test_shape_i(self):\n dtype = self.dtype\n if dtype is None:\n dtype = aesara.config.floatX\n\n rng = np.random.default_rng(utt.fetch_seed())\n x = np.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)\n x = self.cast_value(x)\n\n self.ref_fct(x)\n x_shared = self.shared_constructor(x, borrow=False)\n self.aesara_fct(x_shared)\n\n f = aesara.function([], x_shared.shape[1])\n topo = f.maker.fgraph.toposort()\n\n assert np.all(f() == (4))\n if aesara.config.mode != \"FAST_COMPILE\":\n assert len(topo) == 1\n assert isinstance(topo[0].op, Shape_i)\n\n def test_return_internal_type(self):\n dtype = self.dtype\n if dtype is None:\n dtype = aesara.config.floatX\n\n rng = np.random.default_rng(utt.fetch_seed())\n x = np.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)\n x = self.cast_value(x)\n\n x_shared = self.shared_constructor(x, borrow=False)\n total = self.aesara_fct(x_shared)\n\n total_func = aesara.function([], total)\n\n # in this case we can alias with the internal value\n x = x_shared.get_value(borrow=True, return_internal_type=True)\n assert self.check_internal_type(x)\n\n x /= 0.5\n\n # this is not required by the contract but it is a feature we can\n # implement for some type of SharedVariable.\n assert np.allclose(self.ref_fct(x), total_func())\n\n x = x_shared.get_value(borrow=False, return_internal_type=True)\n assert self.check_internal_type(x)\n assert x is not x_shared.container.value\n x /= 0.5\n\n # this is required by the contract\n assert not np.allclose(self.ref_fct(x), total_func())\n\n def test_get_value(self):\n # Test that get_value returns a ndarray\n dtype = self.dtype\n if dtype is None:\n dtype = aesara.config.floatX\n\n rng = np.random.default_rng(utt.fetch_seed())\n x_orig = np.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)\n x_cast = self.cast_value(x_orig)\n if self.shared_constructor_accept_ndarray:\n x_shared = self.shared_constructor(x_orig, borrow=False)\n assert isinstance(x_shared.get_value(), x_orig.__class__)\n\n x_shared = self.shared_constructor(x_cast, borrow=False)\n assert isinstance(x_shared.get_value(), x_cast.__class__)\n\n def test_set_value(self):\n dtype = self.dtype\n if dtype is None:\n dtype = aesara.config.floatX\n\n rng = np.random.default_rng(utt.fetch_seed())\n x = np.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)\n x = self.cast_value(x)\n\n x_orig = x\n x_shared = self.shared_constructor(x, borrow=False)\n total = self.aesara_fct(x_shared)\n\n total_func = aesara.function([], total)\n total_func()\n\n # test if that aesara shared variable optimize set_value(borrow=True)\n get_x = x_shared.get_value(borrow=True)\n assert get_x is not x_orig # borrow=False to shared_constructor\n get_x /= 0.5\n x_shared.set_value(get_x, borrow=True)\n x = x_shared.get_value(borrow=True)\n if self.set_value_borrow_true_alias:\n assert x is get_x\n else:\n assert x is not get_x\n assert np.allclose(self.ref_fct(np.asarray(x_orig) / 0.5), self.ref_fct(x))\n\n # test optimized get set value on the gpu(don't pass data to the cpu)\n get_x = x_shared.get_value(borrow=True, return_internal_type=True)\n assert get_x is not x_orig # borrow=False to shared_constructor\n assert self.check_internal_type(get_x)\n\n get_x /= 0.5\n assert self.check_internal_type(get_x)\n x_shared.set_value(get_x, borrow=True)\n x = x_shared.get_value(borrow=True, return_internal_type=True)\n assert self.check_internal_type(x)\n assert x is get_x\n\n # TODO test Out.\n\n def test_shared_do_alias(self):\n dtype = self.dtype\n if dtype is None:\n dtype = aesara.config.floatX\n\n rng = np.random.default_rng(utt.fetch_seed())\n x = np.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)\n x = self.cast_value(x)\n x_ref = self.ref_fct(x)\n\n x_shared = self.shared_constructor(x, borrow=True)\n\n total = self.aesara_fct(x_shared)\n\n total_func = aesara.function([], total)\n\n total_val = total_func()\n\n assert np.allclose(self.ref_fct(x), total_val)\n\n x /= 0.5\n\n # not required by the contract but it is a feature we've implemented\n if self.shared_borrow_true_alias:\n assert np.allclose(self.ref_fct(x), total_func())\n else:\n assert np.allclose(x_ref, total_func())\n\n def test_inplace_set_value(self):\n # We test that if the SharedVariable implement it we do inplace set_value\n # We also test this for partial inplace modification when accessing the internal of aesara.\n\n dtype = self.dtype\n if dtype is None:\n dtype = aesara.config.floatX\n\n shp = (100 // 4, 1024) # 100KB\n\n x = np.zeros(shp, dtype=dtype)\n x = self.cast_value(x)\n x_shared = self.shared_constructor(x, borrow=True)\n\n old_data = x_shared.container.storage[0]\n nd = np.ones(shp, dtype=dtype)\n\n if x.__class__.__name__ != \"csr_matrix\":\n # sparse matrix don't support inplace affectation\n x_shared.container.value[:] = nd\n assert (np.asarray(x_shared.get_value(borrow=True)) == nd).all()\n # This should always share value!\n assert may_share_memory(old_data, x_shared.container.storage[0])\n assert may_share_memory(\n old_data, x_shared.get_value(borrow=True, return_internal_type=True)\n )\n\n nd[0] += 1\n x_shared.container.value[0] = nd[0]\n assert (np.asarray(x_shared.get_value(borrow=True)[0]) == nd[0]).all()\n assert (np.asarray(x_shared.get_value(borrow=True)[1:]) == nd[1:]).all()\n # This should always share value!\n assert may_share_memory(old_data, x_shared.container.storage[0])\n assert may_share_memory(\n old_data, x_shared.get_value(borrow=True, return_internal_type=True)\n )\n\n if x.__class__.__name__ != \"csr_matrix\":\n # sparse matrix don't support inplace affectation\n nd += 1\n # THIS DOESN'T DO WHAT WE EXPECT the content of a is\n # not updated for GpuArray, but it is for ndarray\n x_shared.get_value(borrow=True)[:] = nd\n assert may_share_memory(old_data, x_shared.container.storage[0])\n x_shared.get_value(borrow=True)\n\n # Test by set_value with borrow=False\n nd += 1\n old_data = x_shared.container.storage[0]\n x_shared.set_value(nd, borrow=False)\n assert np.allclose(\n self.ref_fct(x_shared.get_value(borrow=True)),\n self.ref_fct(self.cast_value(nd)),\n )\n assert (\n may_share_memory(old_data, x_shared.container.storage[0])\n == self.set_value_inplace\n )\n\n # Test by set_value with borrow=False when new data cast.\n # specifically useful for gpu data\n nd += 1\n old_data = x_shared.container.storage[0]\n x_shared.set_value(self.cast_value(nd), borrow=False)\n assert np.allclose(\n self.ref_fct(x_shared.get_value(borrow=True)),\n self.ref_fct(self.cast_value(nd)),\n )\n assert (\n may_share_memory(old_data, x_shared.container.storage[0])\n == self.set_cast_value_inplace\n )\n\n # Test by set_value with borrow=True\n nd += 1\n old_data = x_shared.container.storage[0]\n x_shared.set_value(nd.copy(), borrow=True)\n assert np.allclose(\n self.ref_fct(x_shared.get_value(borrow=True)),\n self.ref_fct(self.cast_value(nd)),\n )\n assert (\n may_share_memory(old_data, x_shared.container.storage[0])\n == self.set_value_inplace\n )\n\n # Test by set_value with borrow=True when new data cast.\n nd += 1\n old_data = x_shared.container.storage[0]\n x_shared.set_value(self.cast_value(nd.copy()), borrow=True)\n assert np.allclose(\n self.ref_fct(x_shared.get_value(borrow=True)),\n self.ref_fct(self.cast_value(nd)),\n )\n assert (\n may_share_memory(old_data, x_shared.container.storage[0])\n == self.set_cast_value_inplace\n )\n\n def test_specify_shape(self):\n dtype = self.dtype\n if dtype is None:\n dtype = aesara.config.floatX\n\n rng = np.random.default_rng(utt.fetch_seed())\n x1_1 = np.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)\n x1_1 = self.cast_value(x1_1)\n x1_2 = np.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)\n x1_2 = self.cast_value(x1_2)\n x2 = np.asarray(rng.uniform(1, 2, [4, 3]), dtype=dtype)\n x2 = self.cast_value(x2)\n\n # Test that we can replace with values of the same shape\n x1_shared = self.shared_constructor(x1_1)\n x1_specify_shape = specify_shape(x1_shared, x1_1.shape)\n x1_shared.set_value(x1_2)\n assert np.allclose(\n self.ref_fct(x1_shared.get_value(borrow=True)), self.ref_fct(x1_2)\n )\n shape_op_fct = aesara.function([], x1_shared.shape)\n topo = shape_op_fct.maker.fgraph.toposort()\n if aesara.config.mode != \"FAST_COMPILE\":\n assert len(topo) == 3\n assert isinstance(topo[0].op, Shape_i)\n assert isinstance(topo[1].op, Shape_i)\n assert isinstance(topo[2].op, MakeVector)\n\n # Test that we forward the input\n specify_shape_fct = aesara.function([], x1_specify_shape)\n assert np.all(self.ref_fct(specify_shape_fct()) == self.ref_fct(x1_2))\n topo_specify = specify_shape_fct.maker.fgraph.toposort()\n assert len(topo_specify) == 2\n\n # Test that we put the shape info into the graph\n shape_constant_fct = aesara.function([], x1_specify_shape.shape)\n assert np.all(shape_constant_fct() == shape_op_fct())\n topo_cst = shape_constant_fct.maker.fgraph.toposort()\n if aesara.config.mode != \"FAST_COMPILE\":\n assert len(topo_cst) == 1\n topo_cst[0].op == aesara.compile.function.types.deep_copy_op\n\n # Test that we can take the grad.\n shape_grad = aesara.gradient.grad(x1_specify_shape.sum(), x1_shared)\n shape_constant_fct_grad = aesara.function([], shape_grad)\n # aesara.printing.debugprint(shape_constant_fct_grad)\n shape_constant_fct_grad()\n\n # Test that we can replace with values of the different shape\n # but that will raise an error in some case, but not all\n specify_shape_fct()\n x1_shared.set_value(x2)\n with pytest.raises(AssertionError):\n specify_shape_fct()\n\n def test_specify_shape_partial(self):\n dtype = self.dtype\n if dtype is None:\n dtype = aesara.config.floatX\n\n rng = np.random.default_rng(utt.fetch_seed())\n x1_1 = np.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)\n x1_1 = self.cast_value(x1_1)\n x1_2 = np.asarray(rng.uniform(1, 2, [4, 2]), dtype=dtype)\n x1_2 = self.cast_value(x1_2)\n x2 = np.asarray(rng.uniform(1, 2, [5, 2]), dtype=dtype)\n x2 = self.cast_value(x2)\n\n # Test that we can replace with values of the same shape\n x1_shared = self.shared_constructor(x1_1)\n x1_specify_shape = specify_shape(\n x1_shared,\n (at.as_tensor_variable(x1_1.shape[0]), x1_shared.shape[1]),\n )\n x1_shared.set_value(x1_2)\n assert np.allclose(\n self.ref_fct(x1_shared.get_value(borrow=True)), self.ref_fct(x1_2)\n )\n shape_op_fct = aesara.function([], x1_shared.shape)\n topo = shape_op_fct.maker.fgraph.toposort()\n shape_op_fct()\n if aesara.config.mode != \"FAST_COMPILE\":\n assert len(topo) == 3\n assert isinstance(topo[0].op, Shape_i)\n assert isinstance(topo[1].op, Shape_i)\n assert isinstance(topo[2].op, MakeVector)\n\n # Test that we forward the input\n specify_shape_fct = aesara.function([], x1_specify_shape)\n specify_shape_fct()\n # aesara.printing.debugprint(specify_shape_fct)\n assert np.all(self.ref_fct(specify_shape_fct()) == self.ref_fct(x1_2))\n topo_specify = specify_shape_fct.maker.fgraph.toposort()\n if aesara.config.mode != \"FAST_COMPILE\":\n assert len(topo_specify) == 4\n\n # Test that we put the shape info into the graph\n shape_constant_fct = aesara.function([], x1_specify_shape.shape)\n # aesara.printing.debugprint(shape_constant_fct)\n assert np.all(shape_constant_fct() == shape_op_fct())\n topo_cst = shape_constant_fct.maker.fgraph.toposort()\n if aesara.config.mode != \"FAST_COMPILE\":\n assert len(topo_cst) == 2\n\n # Test that we can replace with values of the different shape\n # but that will raise an error in some case, but not all\n x1_shared.set_value(x2)\n with pytest.raises(AssertionError):\n specify_shape_fct()\n\n def test_specify_shape_inplace(self):\n # test that specify_shape don't break inserting inplace op\n\n dtype = self.dtype\n if dtype is None:\n dtype = aesara.config.floatX\n\n rng = np.random.default_rng(utt.fetch_seed())\n a = np.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)\n a = self.cast_value(a)\n a_shared = self.shared_constructor(a)\n b = np.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)\n b = self.cast_value(b)\n b_shared = self.shared_constructor(b)\n s = np.zeros((40, 40), dtype=dtype)\n s = self.cast_value(s)\n s_shared = self.shared_constructor(s)\n f = aesara.function(\n [],\n updates=[(s_shared, aesara.tensor.dot(a_shared, b_shared) + s_shared)],\n )\n topo = f.maker.fgraph.toposort()\n f()\n # [Gemm{inplace}(<TensorType(float64, (None, None))>, 0.01, <TensorType(float64, (None, None))>, <TensorType(float64, (None, None))>, 2e-06)]\n if aesara.config.mode != \"FAST_COMPILE\":\n assert (\n sum(\n [\n node.op.__class__.__name__\n in [\"Gemm\", \"GpuGemm\", \"StructuredDot\"]\n for node in topo\n ]\n )\n == 1\n )\n assert all(\n node.op == aesara.tensor.blas.gemm_inplace\n for node in topo\n if isinstance(node.op, aesara.tensor.blas.Gemm)\n )\n assert all(\n node.op.inplace\n for node in topo\n if node.op.__class__.__name__ == \"GpuGemm\"\n )\n # Their is no inplace gemm for sparse\n # assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == \"StructuredDot\")\n s_shared_specify = specify_shape(\n s_shared, s_shared.get_value(borrow=True).shape\n )\n\n # now test with the specify shape op in the output\n f = aesara.function(\n [],\n s_shared.shape,\n updates=[\n (s_shared, aesara.tensor.dot(a_shared, b_shared) + s_shared_specify)\n ],\n )\n topo = f.maker.fgraph.toposort()\n shp = f()\n assert np.all(shp == (40, 40))\n if aesara.config.mode != \"FAST_COMPILE\":\n assert (\n sum(\n [\n node.op.__class__.__name__\n in [\"Gemm\", \"GpuGemm\", \"StructuredDot\"]\n for node in topo\n ]\n )\n == 1\n )\n assert all(\n node.op == aesara.tensor.blas.gemm_inplace\n for node in topo\n if isinstance(node.op, aesara.tensor.blas.Gemm)\n )\n assert all(\n node.op.inplace\n for node in topo\n if node.op.__class__.__name__ == \"GpuGemm\"\n )\n # now test with the specify shape op in the inputs and outputs\n a_shared = specify_shape(a_shared, a_shared.get_value(borrow=True).shape)\n b_shared = specify_shape(b_shared, b_shared.get_value(borrow=True).shape)\n\n f = aesara.function(\n [],\n s_shared.shape,\n updates=[\n (s_shared, aesara.tensor.dot(a_shared, b_shared) + s_shared_specify)\n ],\n )\n topo = f.maker.fgraph.toposort()\n shp = f()\n assert np.all(shp == (40, 40))\n if aesara.config.mode != \"FAST_COMPILE\":\n assert (\n sum(\n [\n node.op.__class__.__name__\n in [\"Gemm\", \"GpuGemm\", \"StructuredDot\"]\n for node in topo\n ]\n )\n == 1\n )\n assert all(\n node.op == aesara.tensor.blas.gemm_inplace\n for node in topo\n if isinstance(node.op, aesara.tensor.blas.Gemm)\n )\n assert all(\n node.op.inplace\n for node in topo\n if node.op.__class__.__name__ == \"GpuGemm\"\n )\n\n if (\n aesara.config.cycle_detection == \"fast\"\n and expect_fail_fast_shape_inplace\n and aesara.config.mode != \"FAST_COMPILE\"\n ):\n test_specify_shape_inplace = pytest.mark.xfail(test_specify_shape_inplace)\n\n def test_values_eq(self):\n # Test the type.values_eq[_approx] function\n dtype = self.dtype\n if dtype is None:\n dtype = aesara.config.floatX\n\n # We need big shape as in the past there have been a bug in the\n # sparse values_eq_approx.\n shp = (1024, 1024)\n\n # Test the case with all zeros element\n rng = np.random.default_rng(utt.fetch_seed())\n for x in [\n np.asarray(rng.random(shp), dtype=dtype),\n np.zeros(shp, dtype=dtype),\n ]:\n zeros = (x == 0).all()\n x = self.cast_value(x)\n x_shared = self.shared_constructor(x, borrow=True)\n\n y = x.copy()\n y[0, 0], y[1, 0] = y[1, 0], y[0, 0]\n y = self.cast_value(y)\n\n assert x_shared.type.values_eq(x, x)\n assert x_shared.type.values_eq_approx(x, x)\n if not zeros:\n assert not np.allclose(self.ref_fct(x), self.ref_fct(y))\n assert not x_shared.type.values_eq(x, y)\n assert not x_shared.type.values_eq_approx(x, y)\n\n def f(cls):\n return update_wrapper(SharedTester, cls, updated=())\n\n return f\n\n\n@makeSharedTester(\n shared_constructor_=aesara.shared,\n dtype_=aesara.config.floatX,\n get_value_borrow_true_alias_=True,\n shared_borrow_true_alias_=True,\n set_value_borrow_true_alias_=True,\n set_value_inplace_=False,\n set_cast_value_inplace_=False,\n shared_constructor_accept_ndarray_=True,\n internal_type_=np.ndarray,\n check_internal_type_=lambda a: isinstance(a, np.ndarray),\n aesara_fct_=lambda a: a * 2,\n ref_fct_=lambda a: np.asarray(a * 2),\n cast_value_=np.asarray,\n)\nclass TestSharedOptions:\n pass\n\n\ndef test_scalar_shared_options():\n # Simple test to make sure we do not loose that fonctionality.\n aesara.shared(value=0.0, name=\"lk\", borrow=True)\n aesara.shared(value=np.float32(0.0), name=\"lk\", borrow=True)\n\n\ndef test_get_vector_length():\n x = aesara.shared(np.array((2, 3, 4, 5)))\n assert get_vector_length(x) == 4\n",
"\"\"\" Tensor optimizations addressing the ops in math.py.\"\"\"\n\nimport itertools\nimport logging\nimport operator\nfrom functools import partial, reduce\n\nimport numpy as np\n\nimport aesara.scalar.basic as aes\nimport aesara.scalar.math as aes_math\nfrom aesara.graph.basic import Constant, Variable\nfrom aesara.graph.opt import (\n LocalOptGroup,\n LocalOptimizer,\n PatternSub,\n copy_stack_trace,\n in2out,\n local_optimizer,\n)\nfrom aesara.graph.opt_utils import get_clients_at_depth\nfrom aesara.misc.safe_asarray import _asarray\nfrom aesara.raise_op import assert_op\nfrom aesara.tensor.basic import (\n Alloc,\n Join,\n MakeVector,\n alloc,\n as_tensor_variable,\n cast,\n constant,\n extract_constant,\n fill,\n get_scalar_constant_value,\n ones_like,\n switch,\n zeros_like,\n)\nfrom aesara.tensor.basic_opt import (\n FusionOptimizer,\n broadcast_like,\n encompasses_broadcastable,\n fuse_seqopt,\n local_fill_sink,\n register_canonicalize,\n register_specialize,\n register_specialize_device,\n register_stabilize,\n register_uncanonicalize,\n register_useless,\n)\nfrom aesara.tensor.elemwise import CAReduce, DimShuffle, Elemwise\nfrom aesara.tensor.exceptions import NotScalarConstantError\nfrom aesara.tensor.math import (\n All,\n Any,\n Dot,\n NonZeroCAReduce,\n Prod,\n ProdWithoutZeros,\n Sum,\n)\nfrom aesara.tensor.math import abs as at_abs\nfrom aesara.tensor.math import (\n add,\n dot,\n eq,\n erf,\n erfc,\n exp,\n expm1,\n ge,\n int_div,\n isinf,\n le,\n log,\n log1mexp,\n log1p,\n makeKeepDims,\n)\nfrom aesara.tensor.math import max as at_max\nfrom aesara.tensor.math import maximum, mul, neg\nfrom aesara.tensor.math import pow as at_pow\nfrom aesara.tensor.math import prod, reciprocal, sgn, sigmoid, softplus, sqr, sqrt, sub\nfrom aesara.tensor.math import sum as at_sum\nfrom aesara.tensor.math import true_div\nfrom aesara.tensor.shape import Shape, Shape_i\nfrom aesara.tensor.subtensor import Subtensor\nfrom aesara.tensor.type import (\n uint_dtypes,\n values_eq_approx_remove_inf,\n values_eq_approx_remove_inf_nan,\n values_eq_approx_remove_nan,\n)\nfrom aesara.tensor.var import TensorConstant, get_unique_value\nfrom aesara.utils import NoDuplicateOptWarningFilter\n\n\n_logger = logging.getLogger(\"aesara.tensor.math_opt\")\n_logger.addFilter(NoDuplicateOptWarningFilter())\n\n\ndef scalarconsts_rest(inputs, elemwise=True, only_process_constants=False):\n \"\"\"Partition a list of variables into two kinds:\n scalar constants, and the rest.\"\"\"\n consts = []\n origconsts = []\n nonconsts = []\n for i in inputs:\n try:\n v = get_scalar_constant_value(\n i, elemwise=elemwise, only_process_constants=only_process_constants\n )\n consts.append(v)\n origconsts.append(i)\n except NotScalarConstantError:\n nonconsts.append(i)\n return consts, origconsts, nonconsts\n\n\ndef get_constant(v):\n \"\"\"\n\n Returns\n -------\n object\n A numeric constant if v is a Constant or, well, a\n numeric constant. If v is a plain Variable, returns None.\n\n \"\"\"\n if isinstance(v, Constant):\n unique_value = get_unique_value(v)\n if unique_value is not None:\n data = unique_value\n else:\n data = v.data\n if data.ndim == 0:\n return data\n else:\n return None\n elif isinstance(v, Variable):\n return None\n else:\n return v\n\n\ndef fill_chain(new_out, orig_inputs):\n for i in orig_inputs:\n new_out = fill(i, new_out)\n return [new_out]\n\n\n@register_canonicalize\n@register_stabilize\n@local_optimizer([Dot])\ndef local_0_dot_x(fgraph, node):\n if not isinstance(node.op, Dot):\n return False\n\n x = node.inputs[0]\n y = node.inputs[1]\n replace = False\n try:\n if get_scalar_constant_value(x, only_process_constants=True) == 0:\n replace = True\n except NotScalarConstantError:\n pass\n\n try:\n if get_scalar_constant_value(y, only_process_constants=True) == 0:\n replace = True\n except NotScalarConstantError:\n pass\n\n if replace:\n constant_zero = constant(0, dtype=node.outputs[0].type.dtype)\n if x.ndim == 2 and y.ndim == 2:\n constant_zero = assert_op(constant_zero, eq(x.shape[1], y.shape[0]))\n return [alloc(constant_zero, x.shape[0], y.shape[1])]\n elif x.ndim == 1 and y.ndim == 2:\n constant_zero = assert_op(constant_zero, eq(x.shape[0], y.shape[0]))\n return [alloc(constant_zero, y.shape[1])]\n elif x.ndim == 2 and y.ndim == 1:\n constant_zero = assert_op(constant_zero, eq(x.shape[1], y.shape[0]))\n return [alloc(constant_zero, x.shape[0])]\n elif x.ndim == 1 and y.ndim == 1:\n constant_zero = assert_op(constant_zero, eq(x.shape[0], y.shape[0]))\n return [constant_zero]\n else:\n _logger.warning(\n \"Optimization Warning: \"\n \"Optimization aesara/opt.py:local_0_dot_x Found \"\n \"that it could apply, but was not implemented \"\n \"for dot product with these input types:\\n\"\n f\"({x.type}, {y.type})\"\n )\n\n\n@register_canonicalize\n@local_optimizer([DimShuffle])\ndef local_lift_transpose_through_dot(fgraph, node):\n \"\"\"Perform the rewrite ``dot(x,y).T -> dot(y.T, x.T)``\n\n These optimizations \"lift\" (propagate towards the inputs) DimShuffle\n through dot product. It allows to put the graph in a more standard shape,\n and to later merge consecutive DimShuffles.\n\n The transformation should be apply whether or not the transpose is\n inplace. The newly-introduced transpositions are not inplace, this will\n be taken care of in a later optimization phase.\n\n \"\"\"\n if not (isinstance(node.op, DimShuffle) and node.op.new_order == (1, 0)):\n return False\n if not (node.inputs[0].owner and isinstance(node.inputs[0].owner.op, Dot)):\n return False\n x, y = node.inputs[0].owner.inputs\n\n if x.ndim == y.ndim == 2:\n # Output is dot product of transposed inputs in reverse order\n ret = [dot(y.T, x.T)]\n\n # Copy over stack trace to output from result of dot-product\n copy_stack_trace(node.inputs[0], ret)\n return ret\n\n\ndef is_inverse_pair(node_op, prev_op, inv_pair):\n \"\"\"\n Given two consecutive operations, check if they are the\n provided pair of inverse functions.\n\n \"\"\"\n node_is_op0 = isinstance(node_op, inv_pair[0])\n node_is_op1 = isinstance(node_op, inv_pair[1])\n prev_is_op0 = isinstance(prev_op, inv_pair[0])\n prev_is_op1 = isinstance(prev_op, inv_pair[1])\n\n return (node_is_op0 and prev_is_op1) or (node_is_op1 and prev_is_op0)\n\n\n@register_canonicalize\n@register_specialize\n@local_optimizer([Elemwise])\ndef local_func_inv(fgraph, node):\n \"\"\"\n Check for two consecutive operations that are functional inverses\n and remove them from the function graph.\n\n \"\"\"\n inv_pairs = (\n (aes.Deg2Rad, aes.Rad2Deg),\n (aes.Cosh, aes.ArcCosh),\n (aes.Tanh, aes.ArcTanh),\n (aes.Sinh, aes.ArcSinh),\n (aes.Conj, aes.Conj),\n (aes.Neg, aes.Neg),\n (aes.Reciprocal, aes.Reciprocal),\n )\n x = node.inputs[0]\n\n if not isinstance(node.op, Elemwise):\n return\n if not x.owner or not isinstance(x.owner.op, Elemwise):\n return\n\n prev_op = x.owner.op.scalar_op\n node_op = node.op.scalar_op\n\n for inv_pair in inv_pairs:\n if is_inverse_pair(node_op, prev_op, inv_pair):\n # We don't need to copy stack trace, because the optimization\n # is trivial and maintains the earlier stack trace\n ottype = node.out.dtype\n inp = x.owner.inputs[0]\n # Functions may have casted integer input to float\n if inp.dtype != ottype:\n inp = cast(inp, ottype)\n return [inp]\n\n return\n\n\n@register_canonicalize\n@register_specialize\n@local_optimizer([Elemwise])\ndef local_exp_log(fgraph, node):\n x = node.inputs[0]\n\n if not isinstance(node.op, Elemwise):\n return\n if not x.owner or not isinstance(x.owner.op, Elemwise):\n return\n\n prev_op = x.owner.op.scalar_op\n node_op = node.op.scalar_op\n\n # Case for log(exp(x))\n if isinstance(prev_op, aes.Exp) and isinstance(node_op, aes.Log):\n new_out = x.owner.inputs[0]\n old_out = node.outputs[0]\n # Exp may have casted integer input to float\n if new_out.dtype != old_out.dtype:\n new_out = cast(new_out, old_out.dtype)\n return [new_out]\n\n # Case for exp(softplus(x)) aka exp(log1pexp)\n if isinstance(prev_op, aes_math.Softplus) and isinstance(node_op, aes.Exp):\n x = x.owner.inputs[0]\n old_out = node.outputs[0]\n new_out = add(1, exp(x))\n if not old_out.type.is_super(new_out.type):\n return\n return [new_out]\n\n\n@register_specialize\n@local_optimizer([Elemwise])\ndef local_exp_log_nan_switch(fgraph, node):\n # Rewrites of the kind exp(log...(x)) that require a `nan` switch\n x = node.inputs[0]\n\n if not isinstance(node.op, Elemwise):\n return\n if not x.owner or not isinstance(x.owner.op, Elemwise):\n return\n\n prev_op = x.owner.op.scalar_op\n node_op = node.op.scalar_op\n\n # Case for exp(log(x))\n if isinstance(prev_op, aes.Log) and isinstance(node_op, aes.Exp):\n x = x.owner.inputs[0]\n old_out = node.outputs[0]\n new_out = switch(ge(x, 0), x, np.asarray(np.nan, old_out.dtype))\n if not old_out.type.is_super(new_out.type):\n return\n return [new_out]\n\n # Case for exp(log1p(x))\n if isinstance(prev_op, aes.Log1p) and isinstance(node_op, aes.Exp):\n x = x.owner.inputs[0]\n old_out = node.outputs[0]\n new_out = switch(ge(x, -1), add(1, x), np.asarray(np.nan, old_out.dtype))\n if not old_out.type.is_super(new_out.type):\n return\n return [new_out]\n\n # Case for exp(log1mexp(x))\n if isinstance(prev_op, aes_math.Log1mexp) and isinstance(node_op, aes.Exp):\n x = x.owner.inputs[0]\n old_out = node.outputs[0]\n new_out = switch(le(x, 0), sub(1, exp(x)), np.asarray(np.nan, old_out.dtype))\n if not old_out.type.is_super(new_out.type):\n return\n return [new_out]\n\n\n@register_canonicalize\n@register_specialize\n@local_optimizer([Sum])\ndef local_sumsqr2dot(fgraph, node):\n \"\"\"\n This optimization detects\n ``at.sqr(W.dimshuffle(\"x\", 0, 1) * G.dimshuffle(0, \"x\", 1) ).sum(axis=(1, 2))``\n and converts it to ``at.dot(at.sqr(G), at.sqr(W).sum(axis=0))``.\n \"\"\"\n if (\n isinstance(node.op, Sum)\n and isinstance(node.op.scalar_op, aes.Add)\n and node.op.axis == (1, 2)\n ):\n in1 = node.inputs[0]\n out = node.outputs[0]\n\n if (\n in1.owner\n and isinstance(in1.owner.op, Elemwise)\n and isinstance(in1.owner.op.scalar_op, aes.Sqr)\n ):\n in_sqr = in1.owner.inputs[0]\n if (\n in_sqr.owner\n and isinstance(in_sqr.owner.op, Elemwise)\n and isinstance(in_sqr.owner.op.scalar_op, aes.Mul)\n and len(in_sqr.owner.inputs) == 2\n ):\n in_mul1, in_mul2 = in_sqr.owner.inputs\n\n if (\n isinstance(in_mul1.owner.op, DimShuffle)\n and in_mul1.owner.op.new_order == (\"x\", 0, 1)\n and isinstance(in_mul2.owner.op, DimShuffle)\n and in_mul2.owner.op.new_order == (0, \"x\", 1)\n ):\n W = in_mul1.owner.inputs[0]\n G = in_mul2.owner.inputs[0]\n\n new_out = dot(sqr(G), sqr(W).sum(axis=0))\n if new_out.dtype != out.dtype:\n new_out = cast(new_out, dtype=out.dtype)\n return [new_out]\n\n\n@register_stabilize\n@register_specialize\n@register_canonicalize\n@local_optimizer([Elemwise])\ndef local_expm1(fgraph, node):\n \"\"\"\n This optimization detects exp(a)-1 and converts this to expm1(a).\n \"\"\"\n if isinstance(node.op, Elemwise) and isinstance(node.op.scalar_op, aes.Sub):\n in1, in2 = node.inputs\n out = node.outputs[0]\n\n if (\n in1.owner\n and isinstance(in1.owner.op, Elemwise)\n and isinstance(in1.owner.op.scalar_op, aes.Exp)\n and extract_constant(in2, only_process_constants=False) == 1\n ):\n in11 = in1.owner.inputs[0]\n new_out = expm1(in11)\n\n if new_out.dtype != out.dtype:\n new_out = cast(new_out, dtype=out.dtype)\n\n if not out.type.is_super(new_out.type):\n return\n return [new_out]\n\n\n@register_specialize\n@register_canonicalize\n@local_optimizer([mul])\ndef local_mul_switch_sink(fgraph, node):\n \"\"\"\n This optimization makes the following changes in the graph:\n ``at.mul(A, at.switch(cond, 0, iff), B)`` -> ``at.switch(cond, 0, at.mul(A, B, iff))``\n ``at.mul(A, at.switch(cond, ift, 0), B)`` -> ``at.switch(cond, at.mul(A, B, ift), 0)``\n ``A`` and ``B`` being several (or none) symbolic variables.\n This is useful because ``A`` and ``B`` may not be numerically stable and give\n NaN or inf values for cases where the switch returns 0.\n With this optimization ``at.grad(at.switch(...))`` has the right behavior.\n\n Examples\n --------\n\n x -> f(x)\n x -> g(x)\n y = at.switch(cond, f(x), g(x))\n\n without the optimization:\n\n at.grad(y, x) -> grad(f(x), x) * grad(y, f(x)) + grad(g(x), x) * grad(y, g(x))\n\n with the optimization\n\n at.grad(y, x) -> switch(cond, grad(f(x), x), 0) + switch(cond, 0, grad(g(x), x))\n\n This will be particularly useful for the lazy ``if`` because we skip an entire\n part of the graph.\n\n \"\"\"\n if node.op != mul:\n return False\n for idx, i in enumerate(node.inputs):\n if i.owner and i.owner.op == switch:\n switch_node = i.owner\n try:\n if (\n get_scalar_constant_value(\n switch_node.inputs[1], only_process_constants=True\n )\n == 0.0\n ):\n listmul = node.inputs[:idx] + node.inputs[idx + 1 :]\n fmul = mul(*(listmul + [switch_node.inputs[2]]))\n\n # Copy over stacktrace for elementwise multiplication op\n # from previous elementwise multiplication op.\n # An error in the multiplication (e.g. errors due to\n # inconsistent shapes), will point to the\n # multiplication op.\n copy_stack_trace(node.outputs, fmul)\n\n fct = [switch(switch_node.inputs[0], 0, fmul)]\n fct[0].tag.values_eq_approx = values_eq_approx_remove_nan\n\n # Copy over stacktrace for switch op from both previous\n # elementwise multiplication op and previous switch op,\n # because an error in this part can be caused by either\n # of the two previous ops.\n copy_stack_trace(node.outputs + switch_node.outputs, fct)\n return fct\n except NotScalarConstantError:\n pass\n try:\n if (\n get_scalar_constant_value(\n switch_node.inputs[2], only_process_constants=True\n )\n == 0.0\n ):\n listmul = node.inputs[:idx] + node.inputs[idx + 1 :]\n fmul = mul(*(listmul + [switch_node.inputs[1]]))\n # Copy over stacktrace for elementwise multiplication op\n # from previous elementwise multiplication op.\n # An error in the multiplication (e.g. errors due to\n # inconsistent shapes), will point to the\n # multiplication op.\n copy_stack_trace(node.outputs, fmul)\n\n fct = [switch(switch_node.inputs[0], fmul, 0)]\n fct[0].tag.values_eq_approx = values_eq_approx_remove_nan\n\n # Copy over stacktrace for switch op from both previous\n # elementwise multiplication op and previous switch op,\n # because an error in this part can be caused by either\n # of the two previous ops.\n copy_stack_trace(node.outputs + switch_node.outputs, fct)\n return fct\n except NotScalarConstantError:\n pass\n return False\n\n\n@register_canonicalize\n@local_optimizer([true_div, int_div])\ndef local_div_switch_sink(fgraph, node):\n \"\"\"\n This optimization makes the following changes in the graph:\n\n ``at.div(at.switch(cond, 0, iff), A)`` -> ``at.switch(cond, 0, at.div(iff, A))``\n ``at.div(at.switch(cond, ift, 0), A)`` -> ``at.switch(cond, at.div(ift, A), 0)``\n\n where ``A`` is a symbolic variable.\n\n This is useful because ``A`` may not be numerically stable and give\n ``nan`` or ``inf`` values for cases where the switch returns 0.\n\n See `local_mul_switch_sink` for more details.\n\n \"\"\"\n if node.op != true_div and node.op != int_div:\n return False\n op = node.op\n if node.inputs[0].owner and node.inputs[0].owner.op == switch:\n switch_node = node.inputs[0].owner\n try:\n if (\n get_scalar_constant_value(\n switch_node.inputs[1], only_process_constants=True\n )\n == 0.0\n ):\n fdiv = op(switch_node.inputs[2], node.inputs[1])\n # Copy over stacktrace for elementwise division op\n # from previous elementwise multiplication op.\n # An error in the division (e.g. errors due to\n # inconsistent shapes or division by zero),\n # will point to the new division op.\n copy_stack_trace(node.outputs, fdiv)\n\n fct = [switch(switch_node.inputs[0], 0, fdiv)]\n fct[0].tag.values_eq_approx = values_eq_approx_remove_nan\n\n # Copy over stacktrace for switch op from both previous\n # elementwise division op and previous switch op,\n # because an error in this part can be caused by either\n # of the two previous ops.\n copy_stack_trace(node.outputs + switch_node.outputs, fct)\n return fct\n except NotScalarConstantError:\n pass\n try:\n if (\n get_scalar_constant_value(\n switch_node.inputs[2], only_process_constants=True\n )\n == 0.0\n ):\n fdiv = op(switch_node.inputs[1], node.inputs[1])\n # Copy over stacktrace for elementwise division op\n # from previous elementwise multiplication op.\n # An error in the division (e.g. errors due to\n # inconsistent shapes or division by zero),\n # will point to the new division op.\n copy_stack_trace(node.outputs, fdiv)\n\n fct = [switch(switch_node.inputs[0], fdiv, 0)]\n fct[0].tag.values_eq_approx = values_eq_approx_remove_nan\n\n # Copy over stacktrace for switch op from both previous\n # elementwise division op and previous switch op,\n # because an error in this part can be caused by either\n # of the two previous ops.\n copy_stack_trace(node.outputs + switch_node.outputs, fct)\n return fct\n except NotScalarConstantError:\n pass\n return False\n\n\nclass AlgebraicCanonizer(LocalOptimizer):\n r\"\"\"Simplification tool.\n\n The variable is a ``local_optimizer``. It is best used\n with a ``TopoOptimizer`` in ``in_to_out`` order.\n\n Usage: ``AlgebraicCanonizer(main, inverse, reciprocal, calculate)``\n\n Parameters\n ----------\n main\n A suitable ``Op`` class that is commutative, associative and\n takes one to an arbitrary number of inputs, e.g. add or\n mul\n inverse\n An ``Op`` class such that ``inverse(main(x, y), y) == x``\n e.g. ``sub`` or true_div\n reciprocal\n A function such that ``main(x, reciprocal(y)) == inverse(x, y)``\n e.g. ``neg`` or ``reciprocal``\n calculate\n Function that takes a list of numpy.ndarray instances\n for the numerator, another list for the denumerator,\n and calculates ``inverse(main(\\*num), main(\\*denum))``. It\n takes a keyword argument, aslist. If True, the value\n should be returned as a list of one element, unless\n the value is such that value = main(). In that case,\n the return value should be an empty list.\n\n Examples\n --------\n >>> import aesara.tensor as at\n >>> from aesara.tensor.math_opt import AlgebraicCanonizer\n >>> add_canonizer = AlgebraicCanonizer(add, sub, neg, \\\\\n ... lambda n, d: sum(n) - sum(d))\n >>> mul_canonizer = AlgebraicCanonizer(mul, true_div, inv, \\\\\n ... lambda n, d: prod(n) / prod(d))\n\n Examples of optimizations ``mul_canonizer`` can perform:\n\n | x / x -> 1\n | (x * y) / x -> y\n | x / y / x -> 1 / y\n | x / y / z -> x / (y * z)\n | x / (y / z) -> (x * z) / y\n | (a / b) * (b / c) * (c / d) -> a / d\n | (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\n | 2 * x / 2 -> x\n | x * y * z -> Elemwise(mul){x,y,z} #only one pass over the memory.\n | !-> Elemwise(mul){x,Elemwise(mul){y,z}}\n\n \"\"\"\n\n def __init__(self, main, inverse_fn, reciprocal_fn, calculate, use_reciprocal=True):\n self.main = main\n self.inverse = inverse_fn\n self.reciprocal = reciprocal_fn\n self.calculate = calculate\n self.use_reciprocal = use_reciprocal\n\n self.external_simplifiers = []\n\n def add_simplifier(self, simplifier, reason):\n self.external_simplifiers.append((reason, simplifier))\n\n def tracks(self):\n return [self.main, self.inverse, self.reciprocal]\n\n def get_num_denum(self, inp):\n r\"\"\"\n This extract two lists, ``num`` and ``denum``, such that the input is:\n ``self.inverse(self.main(\\*num), self.main(\\*denum))``. It returns\n the two lists in a ``(num, denum)`` pair.\n\n For example, for main, inverse and ``reciprocal = \\*, / and inv()``,\n\n | input -> returned value (num, denum)\n\n | x*y -> ([x, y], [])\n | inv(x) -> ([], [x])\n | inv(x) * inv(y) -> ([], [x, y])\n | x*y/z -> ([x, y], [z])\n | log(x) / y * (z + x) / y -> ([log(x), z + x], [y, y])\n | (((a / b) * c) / d) -> ([a, c], [b, d])\n | a / (b / c) -> ([a, c], [b])\n | log(x) -> ([log(x)], [])\n | x**y -> ([x**y], [])\n | x * y * z -> ([x, y, z], [])\n\n \"\"\"\n # This function is recursive. The idea is that there is a\n # get_num_denum recursion in which the internal ops are all\n # one of (main, inverse, reciprocal, DimShuffle) and the\n # internal data nodes all have the dtype of the 'input'\n # argument. The leaf-Variables of the graph covered by the\n # recursion may be of any Variable type.\n\n if inp.owner is None or inp.owner.op not in [\n self.main,\n self.inverse,\n self.reciprocal,\n ]:\n if inp.owner and isinstance(inp.owner.op, DimShuffle):\n # If input is a DimShuffle of some input which does\n # something like this:\n\n # * change a vector of length N into a 1xN row matrix\n # * change a scalar into a 1x1x1 tensor\n # * in general, complete the shape of a tensor\n # with broadcastable 1s to the *left*\n # Then we will simply discard the DimShuffle and return\n # the num/denum of its input\n dsn = inp.owner # dimshuffle node\n dsop = dsn.op # dimshuffle op\n\n # the first input of the dimshuffle i.e. the ndarray to redim\n dsi0 = dsn.inputs[0]\n\n # The compatible order is a DimShuffle \"new_order\" of the form:\n # ('x', ..., 'x', 0, 1, 2, ..., dimshuffle_input.type.ndim)\n\n # That kind of DimShuffle only adds broadcastable\n # dimensions on the left, without discarding any\n # existing broadcastable dimension and is inserted\n # automatically by Elemwise when the inputs have\n # different numbers of dimensions (hence why we can\n # discard its information - we know we can retrieve it\n # later on).\n compatible_order = (\"x\",) * (inp.type.ndim - dsi0.type.ndim) + tuple(\n range(dsi0.type.ndim)\n )\n if dsop.new_order == compatible_order:\n # If the \"new_order\" is the one we recognize,\n # we return the num_denum of the dimshuffled input.\n return self.get_num_denum(inp.owner.inputs[0])\n else:\n # This is when the input isn't produced by main,\n # inverse or reciprocal.\n return [inp], []\n else:\n return [inp], []\n num = []\n denum = []\n parent = inp.owner\n\n # We get the (num, denum) pairs for each input\n # pairs = [self.get_num_denum(input2) if input2.type.dtype ==\n # input.type.dtype else ([input2], []) for input2 in\n # parent.inputs]\n pairs = [self.get_num_denum(input2) for input2 in parent.inputs]\n\n if parent.op == self.main:\n # If we have main(x, y, ...), numx, denumx, numy, denumy, ...\n # then num is concat(numx, numy, num...) and denum is\n # concat(denumx, denumy, denum...) note that main() can have any\n # number of arguments >= 0 concat is list concatenation\n num = reduce(list.__iadd__, map(operator.itemgetter(0), pairs))\n denum = reduce(list.__iadd__, map(operator.itemgetter(1), pairs))\n elif parent.op == self.inverse:\n # If we have inverse(x, y), numx, denumx, numy and denumy\n # then num is concat(numx, denumy) and denum is\n # concat(denumx, numy) note that inverse() is binary\n num = pairs[0][0] + pairs[1][1]\n denum = pairs[0][1] + pairs[1][0]\n elif parent.op == self.reciprocal:\n # If we have reciprocal(x), numx, denumx\n # then num is denumx and denum is numx\n # note that reciprocal() is unary\n num = pairs[0][1]\n denum = pairs[0][0]\n return num, denum\n\n def merge_num_denum(self, num, denum):\n r\"\"\"\n Utility function which takes two lists, num and denum, and\n returns something which is equivalent to inverse(main(\\*num),\n main(\\*denum)), but depends on the length of num and the length\n of denum (in order to minimize the number of operations).\n\n Let n = len(num) and d = len(denum):\n\n | n=0, d=0: neutral element (given by self.calculate([], []))\n | (for example, this would be 0 if main is addition\n | and 1 if main is multiplication)\n | n=1, d=0: num[0]\n | n=0, d=1: reciprocal(denum[0])\n | n=1, d=1: inverse(num[0], denum[0])\n | n=0, d>1: reciprocal(main(\\*denum))\n | n>1, d=0: main(\\*num)\n | n=1, d>1: inverse(num[0], main(\\*denum))\n | n>1, d=1: inverse(main(\\*num), denum[0])\n | n>1, d>1: inverse(main(\\*num), main(\\*denum))\n\n Given the values of n and d to which they are associated, all\n of the above are equivalent to:\n inverse(main(\\*num), main(\\*denum))\n\n \"\"\"\n\n ln, ld = len(num), len(denum)\n if not ln and not ld:\n return as_tensor_variable(self.calculate([], []))\n if not ln:\n if self.use_reciprocal:\n return self.reciprocal(self.merge_num_denum(denum, []))\n else:\n ln = [self.calculate([], [], aslist=False)]\n if not ld:\n if ln == 1:\n # num[0] should always be a variable\n assert isinstance(num[0], Variable)\n return num[0]\n else:\n return self.main(*num)\n return self.inverse(\n self.merge_num_denum(num, []), self.merge_num_denum(denum, [])\n )\n\n def simplify(self, num, denum, out_type):\n \"\"\"\n Shorthand for:\n\n .. code-block:: python\n\n self.simplify_constants(*self.simplify_factors(num, denum))\n\n \"\"\"\n rval = self.simplify_constants(\n *self.simplify_factors(num, denum), out_type=out_type\n )\n for reason, simplifier in self.external_simplifiers:\n # TODO: document that 'reason' is associated with this\n # simplification to help auditing when things go\n # wrong\n rval = simplifier(*rval)\n return rval\n\n def simplify_factors(self, num, denum):\n \"\"\"\n For any Variable r which is both in num and denum, removes it\n from both lists. Modifies the lists inplace. Returns the\n modified lists. For example:\n\n | [x], [x] -> [], []\n | [x, y], [x] -> [y], []\n | [a, b], [c, d] -> [a, b], [c, d]\n\n \"\"\"\n ln = len(num)\n ld = len(denum)\n if ld > 2 and ln > 2:\n # Faster version for \"big\" inputs.\n while True:\n s = set(num)\n # Inputs can appear multiple times\n redo = len(s) != len(num)\n inter = s.intersection(denum)\n for v in inter:\n num.remove(v)\n denum.remove(v)\n if not redo or not inter:\n break\n else:\n for v in list(num):\n if v in denum:\n num.remove(v)\n denum.remove(v)\n return num, denum\n\n def simplify_constants(self, orig_num, orig_denum, out_type=None):\n \"\"\"\n Find all constants and put them together into a single constant.\n\n Finds all constants in orig_num and orig_denum (using\n get_constant) and puts them together into a single\n constant. The constant is inserted as the first element of the\n numerator. If the constant is the neutral element, it is\n removed from the numerator.\n\n Examples\n --------\n Let main be multiplication:\n\n | [2, 3, x], [] -> [6, x], []\n | [x, y, 2], [4, z] -> [0.5, x, y], [z]\n | [x, 2, y], [z, 2] -> [x, y], [z]\n\n \"\"\"\n # Lists representing the numerator and denumerator\n num, denum = [], []\n\n # Lists representing the *constant* elements of num and denum\n numct, denumct = [], []\n\n for v in orig_num:\n ct = get_constant(v)\n if ct is not None:\n # We found a constant in the numerator!\n # We add it to numct\n numct.append(ct)\n else:\n num.append(v)\n for v in orig_denum:\n ct = get_constant(v)\n if ct is not None:\n denumct.append(ct)\n else:\n denum.append(v)\n\n if self.use_reciprocal or num:\n # This will calculate either:\n # [inverse(main(*numct), main(*denumct))]\n # [] - if inverse(main(*numct), main(*denumct)) is the\n # neutral element\n ct = self.calculate(numct, denumct, aslist=True, out_type=out_type)\n else:\n # This happens if we don't allow the reciprocal and the\n # numerator is empty. That means we will need to represent\n # reciprocal(x) like inverse(neutral_element, x) so\n # we can't allow ct == []\n # TODO: why is this branch needed when merge_num_denum\n # does it for us?\n ct = [self.calculate(numct, denumct, aslist=False, out_type=out_type)]\n\n # Wrapping ct in a Constant with the right dtype\n ct = [constant(c, dtype=out_type.dtype) for c in ct]\n\n if orig_num and len(numct) == 1 and len(denumct) == 0 and ct:\n # In that case we should only have one constant in `ct`.\n assert len(ct) == 1\n first_num_ct = get_constant(orig_num[0])\n if first_num_ct is not None and ct[0].type.values_eq(\n ct[0].data, first_num_ct\n ):\n # This is an important trick :( if it so happens that:\n # * there's exactly one constant on the numerator and none on\n # the denominator\n # * it's not the neutral element (ct is an empty list in that\n # case)\n # * the constant is the same as the first argument in the\n # numerator (we only check the first argument because the\n # canonizer puts the computed constants first)\n # -> then we return very exactly the original num/denum.\n # If we don't do that the optimizer will just loop\n # infinitely because it will not catch on that there are\n # no changes to be made and every time it will want to\n # replace something by the same thing...\n # Note that it is important to use `values_eq` instead of\n # the == operator, to handle NaN values correctly.\n return orig_num, orig_denum\n\n return ct + num, denum\n\n def transform(self, fgraph, node):\n op = node.op\n if op not in [self.main, self.inverse, self.reciprocal]:\n return False\n\n assert len(node.outputs) == 1\n out = node.outputs[0]\n\n out_clients = fgraph.clients.get(out)\n\n if not out_clients:\n return False\n\n # check if any of the clients of this node would be part of\n # this canonized graph... if so, we do nothing and wait for\n # them to be transformed.\n for c, c_idx in out_clients:\n if c == \"output\":\n continue\n while (\n isinstance(getattr(c, \"op\", None), DimShuffle)\n and len(fgraph.clients[c.outputs[0]]) <= 1\n ):\n c = fgraph.clients[c.outputs[0]][0][0]\n if getattr(c, \"op\", \"\") in [self.main, self.inverse, self.reciprocal]:\n return False\n\n # Here we make the canonical version of the graph around this node\n # See the documentation of get_num_denum and simplify\n orig_num, orig_denum = self.get_num_denum(node.outputs[0])\n num, denum = self.simplify(list(orig_num), list(orig_denum), out.type)\n\n def same(x, y):\n return len(x) == len(y) and all(np.all(xe == ye) for xe, ye in zip(x, y))\n\n if (\n same(orig_num, num)\n and same(orig_denum, denum)\n and\n # Check to see if we've collapsed some nested ops.\n not (\n len(orig_denum) == 0\n and\n # Make sure this change would increase the number of vector\n # arguments--decreasing the number of unnecessary `self.main`\n # nodes.\n len(node.inputs) < len(orig_num)\n )\n and\n # Do a similar check for the reciprocal op.\n not (\n self.use_reciprocal\n and node.op == self.reciprocal\n and len(orig_num) == 0\n and node.inputs[0].owner\n and len(node.inputs[0].owner.inputs) < len(orig_denum)\n )\n ):\n return False\n\n new = self.merge_num_denum(num, denum)\n if new.type.dtype != out.type.dtype:\n new = cast(new, out.type.dtype)\n\n if new.type != out.type:\n new = fill_chain(new, node.inputs)[0]\n\n if new.type == out.type:\n # This happen with test\n # aesara/tensor/tests/test_opt.py:T_local_switch_sink\n new.tag.values_eq_approx = values_eq_approx_remove_inf_nan\n\n # We need to implement the copy over of the stacktrace.\n # See issue #5104.\n return [new]\n else:\n _logger.warning(\n \" \".join(\n (\n \"CANONIZE FAILED: new, out = \",\n new,\n \",\",\n out,\n \"types\",\n new.type,\n \",\",\n out.type,\n )\n )\n )\n return False\n\n def __str__(self):\n return getattr(\n self,\n \"name\",\n f\"AlgebraicCanonizer({self.main}, {self.inverse}, {self.reciprocal})\",\n )\n\n\ndef mul_calculate(num, denum, aslist=False, out_type=None):\n if not num and not denum:\n # Smallest 1 possible.\n if aslist:\n return []\n else:\n return np.int8(1)\n\n # Make sure we do not accidentally upcast data types.\n if out_type is None:\n out_dtype = aes.upcast(*[v.dtype for v in (num + denum)])\n else:\n out_dtype = out_type.dtype\n one = _asarray(1, dtype=out_dtype)\n\n v = reduce(np.multiply, num, one) / reduce(np.multiply, denum, one)\n if aslist:\n if np.all(v == 1):\n return []\n else:\n return [v]\n return v\n\n\nlocal_mul_canonizer = AlgebraicCanonizer(\n mul, true_div, reciprocal, mul_calculate, False\n)\nregister_canonicalize(local_mul_canonizer, name=\"local_mul_canonizer\")\n\n\n@register_canonicalize\n@local_optimizer([neg])\ndef local_neg_to_mul(fgraph, node):\n if node.op == neg:\n return [mul(np.array(-1, dtype=node.inputs[0].dtype), node.inputs[0])]\n\n\n@register_specialize\n@local_optimizer([Sum, Prod])\ndef local_sum_prod_mul_by_scalar(fgraph, node):\n \"\"\"\n sum(scalar * smth) -> scalar * sum(smth)\n sum(-smth) -> -sum(smth)\n\n or\n\n prod(scalar * smth) -> scalar ** size(smth) * prod(smth)\n prod(-smth) -> -1 ** size(smth) * prod(smth)\n\n \"\"\"\n # TODO: if the the thing inside the Sum is a division,\n # we should get at the numerator....\n if isinstance(node.op, (Sum, Prod)):\n (node_inps,) = node.inputs\n if node_inps.owner and node_inps.owner.op == mul:\n terms = node_inps.owner.inputs\n scalars = [t.dimshuffle() for t in terms if np.all(t.type.broadcastable)]\n\n if len(scalars) == 0:\n # Nothing to optimize here\n return\n\n non_scalars = [t for t in terms if not np.all(t.broadcastable)]\n\n # Perform the op only on the non-scalar inputs, if applicable\n if len(non_scalars) == 0:\n new_op_input_nb_elements = 1\n new_op_output = 1\n elif len(non_scalars) == 1:\n new_op_input_nb_elements = non_scalars[0].size\n new_op_output = node.op(non_scalars[0])\n else:\n new_op_input = mul(*non_scalars)\n # We assume that errors always come from the prod/mul op in the\n # original computational graph, and therefore need to only\n # copy over its output stacktrace.\n copy_stack_trace(node.outputs, new_op_input)\n\n new_op_input_nb_elements = new_op_input.size\n new_op_output = node.op(new_op_input)\n\n if len(non_scalars) != 0:\n # Copy over stacktrace from previous output to new mul op,\n # for same reason as above.\n copy_stack_trace(node.outputs, new_op_output)\n\n # If `node.op` is a `Prod`, then the scalars need to be raised to\n # the power of the number of elements in the input to the `Prod`\n if isinstance(node.op, Prod) and new_op_input_nb_elements != 1:\n\n scalars = [s ** new_op_input_nb_elements for s in scalars]\n\n # Scale the output of the op by the scalars and return as\n # replacement for the original output\n mul_inputs = scalars\n if new_op_input_nb_elements != 1:\n mul_inputs.append(new_op_output)\n\n if len(mul_inputs) == 1:\n # Copy over stacktrace from previous output to new mul op,\n # for same reason as above.\n copy_stack_trace(node.outputs, mul_inputs)\n\n return mul_inputs\n else:\n ret = mul(*mul_inputs)\n # Copy over stacktrace from previous output to new mul op,\n # for same reason as above.\n copy_stack_trace(node.outputs, [ret] + mul_inputs)\n\n return [ret]\n\n if isinstance(node.op, Sum) and node_inps.owner and node_inps.owner.op == neg:\n s = node.op(node_inps.owner.inputs[0])\n ret = neg(s)\n # There are never errors in the negative op, thus\n # we need only to copy over stacktrace from previous output node to\n # the two new ops.\n copy_stack_trace(node.outputs, [s, ret])\n\n return [ret]\n\n\n@register_specialize\n@local_optimizer([Elemwise])\ndef local_elemwise_sub_zeros(fgraph, node):\n \"\"\"\n Elemwise{sub}(X,X) -> zeros_like(X)\n \"\"\"\n if (\n isinstance(node.op, Elemwise)\n and node.op.scalar_op.nin == 2\n and node.op.scalar_op == aes.sub\n and node.inputs[0] == node.inputs[1]\n ):\n res = zeros_like(node.inputs[0])\n # Copy over stacktrace from previous output.\n # This could help for failures due to out-of-memory.\n copy_stack_trace(node.outputs, res)\n return [res]\n\n\n@register_useless\n@register_specialize\n@register_stabilize\n@register_canonicalize\n@local_optimizer([Elemwise])\ndef local_useless_elemwise_comparison(fgraph, node):\n \"\"\"...\n\n :note: These cases appear in the graph generated by scan.\n These optimizations will make the graph easier to read.\n # Comparing to itself is constant\n Elemwise[{LT,GT}](X, X) -> Elemwise[zeros](X)\n Elemwise[{LE,GE}](X, X) -> Elemwise[ones](X)\n Elemwise[{minimum,maximum}](X, X) -> X\n\n # Comparing shape to 0 can be constant\n Elemwise[LT](X.shape[i], 0) -> Elemwise[zeros](X)\n Elemwise[GE](X.shape[i], 0) -> Elemwise[ones](X)\n Elemwise[maximum](X.shape[i], 0) -> X.shape[i]\n Elemwise[maximum](0, X.shape[i]) -> X.shape[i]\n Elemwise[minimum](X.shape[i], 0) -> 0\n Elemwise[minimum](0, X.shape[i]) -> 0\n\n # The shape can be replaced with sum of shapes\n Elemwise[LT](add([anything that is shapes]), 0) -> Elemwise[zeros](X)\n Elemwise[GE](add([anything that is shapes]), 0) -> Elemwise[ones](X)\n\n # Shapes are never negative\n # Needed by Reshape.infer_shape\n Elemwise[EQ](Subtensor(Shape(x)), -N) -> Elemwise[zeros](X)\n\n \"\"\"\n if not isinstance(node.op, Elemwise):\n return\n if node.op.scalar_op.nin != 2:\n return\n\n # We call zeros_like and one_like with opt=True to generate a\n # cleaner graph.\n dtype = node.outputs[0].dtype\n\n # Elemwise[{LT,GT}](X, X) -> Elemwise[zeros](X)\n if (\n isinstance(node.op.scalar_op, (aes.LT, aes.GT))\n and node.inputs[0] is node.inputs[1]\n ):\n res = zeros_like(node.inputs[0], dtype=dtype, opt=True)\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n # Elemwise[{LE,GE}](X, X) -> Elemwise[ones](X)\n if (\n isinstance(node.op.scalar_op, (aes.LE, aes.GE))\n and node.inputs[0] is node.inputs[1]\n ):\n res = ones_like(node.inputs[0], dtype=dtype, opt=True)\n\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n # Elemwise[{minimum,maximum}](X, X) -> X\n if (\n isinstance(node.op.scalar_op, (aes.ScalarMinimum, aes.ScalarMaximum))\n and node.inputs[0] is node.inputs[1]\n ):\n res = node.inputs[0]\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n\n # Elemwise[LT](X.shape[i], 0) -> Elemwise[zeros](X)\n if (\n isinstance(node.op.scalar_op, aes.LT)\n and node.inputs[0].owner\n and isinstance(node.inputs[0].owner.op, Shape_i)\n and extract_constant(node.inputs[1], only_process_constants=True) == 0\n ):\n res = zeros_like(node.inputs[0], dtype=dtype, opt=True)\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n # Elemwise[GE](X.shape[i], 0) -> Elemwise[ones](X)\n if (\n isinstance(node.op.scalar_op, aes.GE)\n and node.inputs[0].owner\n and isinstance(node.inputs[0].owner.op, Shape_i)\n and extract_constant(node.inputs[1], only_process_constants=True) == 0\n ):\n res = ones_like(node.inputs[0], dtype=dtype, opt=True)\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n # Elemwise[maximum](X.shape[i], 0) -> X.shape[i]\n if (\n isinstance(node.op.scalar_op, aes.ScalarMaximum)\n and node.inputs[0].owner\n and isinstance(node.inputs[0].owner.op, Shape_i)\n and extract_constant(node.inputs[1], only_process_constants=True) == 0\n ):\n # No need to copy over stacktrace.\n return [node.inputs[0]]\n # Elemwise[maximum](0, X.shape[i]) -> X.shape[i]\n if (\n isinstance(node.op.scalar_op, aes.ScalarMaximum)\n and extract_constant(node.inputs[0], only_process_constants=True) == 0\n and node.inputs[1].owner\n and isinstance(node.inputs[1].owner.op, Shape_i)\n ):\n # No need to copy over stacktrace.\n return [node.inputs[1]]\n # Elemwise[minimum](X.shape[i], 0) -> 0\n if (\n isinstance(node.op.scalar_op, aes.ScalarMinimum)\n and node.inputs[0].owner\n and isinstance(node.inputs[0].owner.op, Shape_i)\n and extract_constant(node.inputs[1], only_process_constants=True) == 0\n ):\n res = zeros_like(node.inputs[0], dtype=dtype, opt=True)\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n\n # Elemwise[minimum](0, X.shape[i]) -> 0\n if (\n isinstance(node.op.scalar_op, aes.ScalarMinimum)\n and extract_constant(node.inputs[0], only_process_constants=True) == 0\n and node.inputs[1].owner\n and isinstance(node.inputs[1].owner.op, Shape_i)\n ):\n res = zeros_like(node.inputs[1], dtype=dtype, opt=True)\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n\n # Elemwise[LT](add([anything that is shapes]), 0) -> Elemwise[zeros](X)\n if (\n isinstance(node.op.scalar_op, aes.LT)\n and node.inputs[0].owner\n and isinstance(node.inputs[0].owner.op, Elemwise)\n and isinstance(node.inputs[0].owner.op.scalar_op, aes.Add)\n and all(\n [\n isinstance(var.owner and var.owner.op, Shape_i)\n for var in node.inputs[0].owner.inputs\n ]\n )\n and extract_constant(node.inputs[1], only_process_constants=True) == 0\n ):\n res = zeros_like(node.inputs[0], dtype=dtype, opt=True)\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n # Elemwise[GE](add([anything that is shapes]), 0) -> Elemwise[ones](X)\n if (\n isinstance(node.op.scalar_op, aes.GE)\n and node.inputs[0].owner\n and isinstance(node.inputs[0].owner.op, Elemwise)\n and isinstance(node.inputs[0].owner.op.scalar_op, aes.Add)\n and all(\n [\n isinstance(var.owner and var.owner.op, Shape_i)\n for var in node.inputs[0].owner.inputs\n ]\n )\n and extract_constant(node.inputs[1], only_process_constants=True) == 0\n ):\n res = ones_like(node.inputs[0], dtype=dtype, opt=True)\n\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n return [res]\n\n # Elemwise[EQ](Subtensor(Shape(x)), -N)\n # Elemwise[EQ](somegraph that only depend of shape, -N)\n # TODO: handle the case where the -N is on either side\n \"\"\"\n |Elemwise{eq,no_inplace} [id B] ''\n | |Subtensor{int64} [id C] ''\n | | |Join [id D] ''\n | | | |TensorConstant{0} [id E]\n | | | |Subtensor{int64:int64:} [id F] ''\n | | | | |Shape [id G] ''\n \"\"\"\n\n def investigate(node):\n \" Return True if values will be shapes, so >= 0\"\n if isinstance(node.op, (Shape, Shape_i)):\n return True\n elif isinstance(node.op, Subtensor) and node.inputs[0].owner:\n return investigate(node.inputs[0].owner)\n elif isinstance(node.op, Join):\n return all(v.owner and investigate(v.owner) for v in node.inputs[1:])\n elif isinstance(node.op, MakeVector):\n return all(v.owner and investigate(v.owner) for v in node.inputs)\n\n if (\n isinstance(node.op.scalar_op, aes.EQ)\n and node.inputs[0].owner\n and investigate(node.inputs[0].owner)\n ):\n try:\n cst = get_scalar_constant_value(node.inputs[1], only_process_constants=True)\n\n res = zeros_like(node.inputs[0], dtype=dtype, opt=True)\n\n if cst < 0:\n # Copy over stacktrace from previous output.\n copy_stack_trace(node.outputs, res)\n\n return [res]\n\n except NotScalarConstantError:\n pass\n return\n\n\n@register_canonicalize\n@register_specialize\n@local_optimizer([Sum, Prod])\ndef local_sum_prod_div_dimshuffle(fgraph, node):\n \"\"\"\n sum(a / dimshuffle{...}(b), axis=l) -> sum(a, axis={...}) / b,\n if dimension l of the DimShuffle is 'x'\n\n or\n\n prod(a / dimshuffle{...}(b), axis=l) ->\n prod(a, axis={...}) / b ** a.shape[l],\n if dimension l of the DimShuffle is 'x'\n \"\"\"\n\n # It does not make much sense now to extend it to the case where the\n # dimshuffle is in the numerator, since elemwise inversion of the\n # denominator would still be needed before the summation or production.\n\n if isinstance(node.op, (Sum, Prod)):\n axis = node.op.axis\n if axis is None:\n axis = list(range(node.inputs[0].ndim))\n node_input = node.inputs[0]\n if node_input.owner and node_input.owner.op == true_div:\n numerator, denominator = node_input.owner.inputs\n\n if denominator.owner and isinstance(denominator.owner.op, DimShuffle):\n dimshuffle_input = denominator.owner.inputs[0]\n dimshuffle_order = denominator.owner.op.new_order\n\n compatible_dims = []\n incompatible_dims = []\n for ax in axis:\n if ax < len(dimshuffle_order) and dimshuffle_order[ax] == \"x\":\n compatible_dims.append(ax)\n else:\n incompatible_dims.append(ax)\n reordered_incompatible_dims = []\n for ic_ax in incompatible_dims:\n reordered_incompatible_dims.append(\n ic_ax - sum([1 for c_ax in compatible_dims if c_ax < ic_ax])\n )\n\n if len(compatible_dims) > 0:\n optimized_dimshuffle_order = list(\n ax\n for i, ax in enumerate(dimshuffle_order)\n if (i not in axis) or (ax != \"x\")\n )\n\n # Removing leading 'x' (since it will be done automatically)\n while (\n len(optimized_dimshuffle_order) > 0\n and optimized_dimshuffle_order[0] == \"x\"\n ):\n del optimized_dimshuffle_order[0]\n\n # if optimized_dimshuffle_order is sorted with\n # not 'x', then dimshuffle is useless.\n if all(i == e for i, e in enumerate(optimized_dimshuffle_order)):\n optimized_dimshuffle = dimshuffle_input\n else:\n optimized_dimshuffle = DimShuffle(\n dimshuffle_input.type.broadcastable,\n optimized_dimshuffle_order,\n )(dimshuffle_input)\n\n if isinstance(node.op, Sum):\n op_on_compatible_dims = at_sum(numerator, axis=compatible_dims)\n rval = true_div(op_on_compatible_dims, optimized_dimshuffle)\n if len(reordered_incompatible_dims) > 0:\n rval = at_sum(rval, axis=reordered_incompatible_dims)\n elif isinstance(node.op, Prod):\n op_on_compatible_dims = prod(numerator, axis=compatible_dims)\n dtype = numerator.dtype\n rval = true_div(\n op_on_compatible_dims,\n (\n optimized_dimshuffle\n ** prod(\n [\n numerator.shape[ax].astype(dtype)\n for ax in compatible_dims\n ]\n )\n ),\n )\n if len(reordered_incompatible_dims) > 0:\n rval = prod(rval, axis=reordered_incompatible_dims)\n return [rval]\n\n\n@register_canonicalize\n@local_optimizer([Sum, Prod])\ndef local_sum_prod_all_to_none(fgraph, node):\n \"\"\"\n Sum{0,1,...N} -> Sum{} or\n Prod{0,1,...N} -> Prod{}\n\n \"\"\"\n if isinstance(node.op, Sum) or isinstance(node.op, Prod):\n opt_type = Sum if isinstance(node.op, Sum) else Prod\n # if all the axes are named, then use None as a shorthand\n # this permits more merging\n if node.op.axis is None:\n return\n if set(node.op.axis) == set(range(node.inputs[0].type.ndim)):\n return [opt_type(axis=None, dtype=node.op.dtype)(node.inputs[0])]\n\n\n@register_canonicalize\n@local_optimizer([Sum, Prod])\ndef local_op_of_op(fgraph, node):\n \"\"\"\n Prod(Prod()) -> single Prod()\n or\n Sum(Sum()) -> single Sum()\n\n \"\"\"\n if isinstance(node.op, Prod) or isinstance(node.op, Sum):\n opt_type = Sum if isinstance(node.op, Sum) else Prod\n (node_inps,) = node.inputs\n out_dtype = node.op.dtype\n # We manipulate the graph so this is done to make sure the opt\n # doesn't affect other computations.\n if len(fgraph.clients[node_inps]) == 1:\n if node_inps.owner and (isinstance(node_inps.owner.op, node.op.__class__)):\n\n # check to see either the inner or outer prod is doing a\n # product over all axis, in which case we can remove it\n if node_inps.owner.op.axis is None or node.op.axis is None:\n return [opt_type(None, dtype=out_dtype)(node_inps.owner.inputs[0])]\n\n # figure out which axes were in the original sum\n newaxis = list(tuple(node_inps.owner.op.axis))\n for i in node.op.axis:\n new_i = i\n for ii in node_inps.owner.op.axis:\n if new_i >= ii:\n new_i += 1\n assert new_i not in newaxis\n newaxis.append(new_i)\n\n assert len(newaxis) == len(\n list(node_inps.owner.op.axis) + list(node.op.axis)\n )\n\n combined = opt_type(newaxis, dtype=out_dtype)\n return [combined(node_inps.owner.inputs[0])]\n\n\nALL_REDUCE = (\n [\n CAReduce,\n All,\n Any,\n Sum,\n Prod,\n ProdWithoutZeros,\n ]\n + CAReduce.__subclasses__()\n + NonZeroCAReduce.__subclasses__()\n)\n\n\n@register_canonicalize\n@register_uncanonicalize # Needed for MaxAndArgmax -> CAReduce\n@local_optimizer(ALL_REDUCE)\ndef local_reduce_join(fgraph, node):\n \"\"\"\n Reduce{scalar.op}(Join(axis=0, a, b), axis=0) -> Elemwise{scalar.op}(a, b)\n\n Notes\n -----\n Supported scalar.op are Maximum, Minimum in some cases and Add and Mul in\n all cases.\n\n Currently we must reduce on axis 0. It is probably extensible to the case\n where we join and reduce on the same set of axis.\n\n \"\"\"\n if (\n isinstance(node.op, CAReduce)\n and node.inputs[0].owner\n and isinstance(node.inputs[0].owner.op, Join)\n ):\n join_node = node.inputs[0].owner\n if extract_constant(join_node.inputs[0], only_process_constants=True) != 0:\n return\n\n if isinstance(node.op.scalar_op, (aes.ScalarMaximum, aes.ScalarMinimum)):\n # Support only 2 inputs for now\n if len(join_node.inputs) != 3:\n return\n elif not isinstance(node.op.scalar_op, (aes.Add, aes.Mul)):\n return\n elif len(join_node.inputs) <= 2:\n # This is a useless join, that will get removed by another opt.\n return\n\n new_inp = []\n for inp in join_node.inputs[1:]:\n inp = inp.owner\n if not inp:\n return\n if not isinstance(inp.op, DimShuffle) or inp.op.new_order != (\"x\",) + tuple(\n range(inp.inputs[0].ndim)\n ):\n return\n new_inp.append(inp.inputs[0])\n ret = Elemwise(node.op.scalar_op)(*new_inp)\n\n if ret.dtype != node.outputs[0].dtype:\n # The reduction do something about the dtype.\n return\n\n reduce_axis = node.op.axis\n if reduce_axis is None:\n reduce_axis = tuple(range(node.inputs[0].ndim))\n\n if len(reduce_axis) != 1 or 0 not in reduce_axis:\n return\n\n # We add the new check late to don't add extra warning.\n try:\n join_axis = get_scalar_constant_value(\n join_node.inputs[0], only_process_constants=True\n )\n\n if join_axis != reduce_axis[0]:\n return\n except NotScalarConstantError:\n return\n\n return [ret]\n\n\n@register_canonicalize(\"fast_compile\", \"local_cut_useless_reduce\")\n@register_useless(\"local_cut_useless_reduce\")\n@local_optimizer(ALL_REDUCE)\ndef local_useless_reduce(fgraph, node):\n \"\"\"Sum(a, axis=[]) -> a \"\"\"\n if isinstance(node.op, CAReduce):\n (summed,) = node.inputs\n # if reduce were doing anything, the output ndim would be reduced\n if summed.type == node.outputs[0].type:\n return [summed]\n\n\n@register_canonicalize\n@register_uncanonicalize\n@register_specialize\n@local_optimizer(ALL_REDUCE)\ndef local_reduce_broadcastable(fgraph, node):\n \"\"\"Remove reduction over broadcastable dimensions.\"\"\"\n if isinstance(node.op, CAReduce):\n (reduced,) = node.inputs\n odtype = node.outputs[0].dtype\n if node.op.axis is None:\n if all(reduced.broadcastable):\n return [reduced.dimshuffle().astype(odtype)]\n else:\n axis = list(node.op.axis)\n cuttable = [a for a in axis if reduced.broadcastable[a]]\n if cuttable:\n # -- we can remove some axes of summation,\n # which simplifies the codegen for sum, especially on GPU\n new_axis = []\n pattern = []\n ii = 0\n for p in range(reduced.ndim):\n if p not in cuttable:\n if p in axis:\n new_axis.append(ii)\n pattern.append(p)\n ii += 1\n new_reduced = reduced.dimshuffle(*pattern)\n if new_axis:\n if type(node.op) == CAReduce:\n # This case handles `CAReduce` instances\n # (e.g. generated by `scalar_elemwise`), and not the\n # scalar `Op`-specific subclasses\n # TODO FIXME: This highlights a major design flaw in\n # `CAReduce` (or at least our use of it), and it needs\n # to be fixed\n new_op = node.op.__class__(node.op.scalar_op, axis=new_axis)\n else:\n new_op = node.op.__class__(axis=new_axis)\n return [new_op(new_reduced)]\n else:\n # -- in this case we can remove the reduction completely\n return [new_reduced.astype(odtype)]\n\n\n@register_specialize\n@local_optimizer([Sum, Prod])\ndef local_opt_alloc(fgraph, node):\n \"\"\"\n sum(alloc(constant,shapes...)) => constant*prod(shapes)\n or\n prod(alloc(constant,shapes...)) => constant**prod(shapes)\n\n \"\"\"\n if isinstance(node.op, Sum) or isinstance(node.op, Prod):\n (node_inps,) = node.inputs\n if node_inps.owner and isinstance(node_inps.owner.op, Alloc):\n inp = node_inps.owner.inputs[0]\n shapes = node_inps.owner.inputs[1:]\n try:\n val = get_scalar_constant_value(inp, only_process_constants=True)\n assert val.size == 1\n val = val.reshape(1)[0]\n # check which type of op\n size = mul(*shapes)\n if inp.dtype in (\"float16\", \"float32\"):\n # shapes are ints and normally int64.\n # We don't want to have a float64 upcast\n # We don't want to downcast to float16\n # as we fear it could loose too much precision\n # that will be amplified by the mul/pow below.\n size = size.astype(\"float32\")\n if node.op.axis is None or node.op.axis == tuple(range(inp.ndim)):\n if isinstance(node.op, Sum):\n val = val * size\n else:\n val = val ** size\n # Sum can change the input dtype (upcast or bool\n # -> float32) by default or by user request.\n # We can ignore the acc_dtype, as there is only 1\n # elemwise we will do and not a sequence, so there is no\n # accumulation of errors.\n # So mostly, we just need to cast the output to the old\n # dtype.\n val = val.astype(node.outputs[0].dtype)\n return [val]\n to_prod = [shapes[i] for i in range(len(shapes)) if i in node.op.axis]\n if to_prod:\n size = mul(*to_prod)\n if isinstance(node.op, Sum):\n val *= size\n else:\n val = val ** size\n # See comments above.\n val = val.astype(node.outputs[0].dtype)\n return [\n alloc(\n val,\n *[\n shapes[i]\n for i in range(len(shapes))\n if i not in node.op.axis\n ],\n )\n ]\n except NotScalarConstantError:\n pass\n\n\n@register_specialize\n@local_optimizer([neg])\ndef local_neg_div_neg(fgraph, node):\n \"\"\"\n - (-a / b) -> a / b\n\n Also performs - (c / b) -> ((-c) / b) when c is a scalar constant.\n\n \"\"\"\n if node.op == neg:\n if node.inputs[0].owner and node.inputs[0].owner.op == true_div:\n frac = node.inputs[0]\n num, denom = frac.owner.inputs\n if num.owner and num.owner.op == neg:\n if len(fgraph.clients[frac]) == 1:\n # No other clients of the original division\n new_num = num.owner.inputs[0]\n return [true_div(new_num, denom)]\n elif np.all(num.broadcastable) and isinstance(num, Constant):\n if len(fgraph.clients[frac]) == 1:\n new_num = -num.data\n return [true_div(new_num, denom)]\n\n\n@register_canonicalize\n@local_optimizer([mul])\ndef local_mul_zero(fgraph, node):\n \"\"\"\n As part of canonicalization, we replace multiplication by zero\n with zero.\n\n \"\"\"\n if node.op == mul:\n otype = node.outputs[0].type\n\n for i in node.inputs:\n try:\n value = get_scalar_constant_value(i)\n except NotScalarConstantError:\n continue\n # print 'MUL by value', value, node.inputs\n if value == 0:\n # print '... returning zeros'\n return fill_chain(_asarray(0, dtype=otype.dtype), node.inputs)\n\n\n# TODO: Add this to the canonicalization to reduce redundancy.\n@register_specialize\n@local_optimizer([true_div])\ndef local_div_to_reciprocal(fgraph, node):\n if node.op == true_div and np.all(get_constant(node.inputs[0]) == 1.0):\n out = node.outputs[0]\n new_out = reciprocal(local_mul_canonizer.merge_num_denum(node.inputs[1:], []))\n # The ones could have forced upcasting\n if new_out.dtype != out.dtype:\n new_out = cast(new_out, dtype=out.dtype)\n # The ones could have forced a specific length\n if not out.type.is_super(new_out.type):\n new_out = broadcast_like(new_out, out, fgraph)\n return [new_out]\n else:\n return False\n\n\n@register_canonicalize\n@local_optimizer([reciprocal])\ndef local_reciprocal_canon(fgraph, node):\n if node.op == reciprocal:\n return [at_pow(node.inputs[0], -1.0)]\n else:\n return False\n\n\n@register_canonicalize\n@local_optimizer([at_pow])\ndef local_pow_canonicalize(fgraph, node):\n if node.op == at_pow:\n cst = get_constant(node.inputs[1])\n if cst == 0:\n return [broadcast_like(1, node.outputs[0], fgraph)]\n if cst == 1:\n return [broadcast_like(node.inputs[0], node.outputs[0], fgraph)]\n else:\n return False\n\n\n@register_specialize\n@local_optimizer([mul])\ndef local_mul_to_sqr(fgraph, node):\n \"\"\"\n x*x -> sqr(x)\n\n This is faster on the GPU when memory fetching is a big part of\n the computation time.\n\n \"\"\"\n if node.op == mul:\n if len(node.inputs) == 2:\n if node.inputs[0] is node.inputs[1]:\n return [sqr(node.inputs[0])]\n\n\n@register_canonicalize\n@local_optimizer([int_div])\ndef local_intdiv_by_one(fgraph, node):\n \"\"\"x // 1 -> x\"\"\"\n if node.op in [int_div]:\n if isinstance(node.inputs[1], TensorConstant) and np.all(\n node.inputs[1].value == 1\n ):\n return [node.inputs[0].astype(node.outputs[0].dtype)]\n\n\n@register_canonicalize\n@register_specialize\n@local_optimizer([int_div, true_div])\ndef local_zero_div(fgraph, node):\n \"\"\"0 / x -> 0\"\"\"\n if isinstance(node.op, Elemwise) and isinstance(\n node.op.scalar_op, (aes.IntDiv, aes.TrueDiv)\n ):\n if get_constant(node.inputs[0]) == 0:\n ret = broadcast_like(0, node.outputs[0], fgraph)\n ret.tag.values_eq_approx = values_eq_approx_remove_nan\n return [ret]\n\n\n@register_specialize\n@local_optimizer([at_pow])\ndef local_pow_specialize(fgraph, node):\n # here, we are past the point of canonicalization, so we don't want\n # to put in un-necessary fills.\n if node.op == at_pow:\n # the idea here is that we have pow(x, y)\n odtype = node.outputs[0].dtype\n xsym = node.inputs[0]\n ysym = node.inputs[1]\n y = get_constant(ysym)\n if (y is not None) and encompasses_broadcastable(\n xsym.type.broadcastable, ysym.type.broadcastable\n ):\n rval = None\n\n if np.all(y == 2):\n rval = [sqr(xsym)]\n if np.all(y == 1):\n rval = [xsym]\n if np.all(y == 0):\n rval = [fill(xsym, np.asarray(1, dtype=odtype))]\n if np.all(y == 0.5):\n rval = [sqrt(xsym)]\n if np.all(y == -0.5):\n rval = [reciprocal(sqrt(xsym))]\n if np.all(y == -1):\n rval = [reciprocal(xsym)]\n if np.all(y == -2):\n rval = [reciprocal(sqr(xsym))]\n if rval:\n rval[0] = cast(rval[0], odtype)\n assert rval[0].type == node.outputs[0].type, (rval, node.outputs)\n return rval\n else:\n return False\n\n\n@register_specialize_device\n@local_optimizer([at_pow])\ndef local_pow_specialize_device(fgraph, node):\n \"\"\"\n This optimization is not the same on all device. We do it only on cpu here.\n \"\"\"\n if node.op == at_pow:\n # the idea here is that we have pow(x, y)\n odtype = node.outputs[0].dtype\n xsym = node.inputs[0]\n ysym = node.inputs[1]\n y = get_constant(ysym)\n\n # the next line is needed to fix a strange case that I don't\n # know how to make a separate test.\n # That happen in the test_opt.py:test_log_erfc test.\n # y is a ndarray with dtype int8 and value 2,4 or 6. This make\n # the abs(y) <= 512 fail!\n # taking the value outside ndarray solve the problem.\n # it could be that in that case, numpy make the comparison\n # into the wrong type(do in int8 that overflow.)\n if isinstance(y, np.ndarray):\n assert y.size == 1\n try:\n y = y[0]\n except IndexError:\n pass\n if (y is not None) and encompasses_broadcastable(\n xsym.type.broadcastable, ysym.type.broadcastable\n ):\n rval = None\n # 512 is too small for the cpu and too big for some gpu!\n if abs(y) == int(abs(y)) and abs(y) <= 512:\n pow2 = [xsym]\n pow2_scal = [aes.get_scalar_type(xsym.dtype)()]\n y_to_do = abs(y)\n for i in range(int(np.log2(y_to_do))):\n pow2.append(sqr(pow2[i]))\n pow2_scal.append(aes.sqr(pow2_scal[i]))\n rval1 = None\n rval1_scal = None\n while y_to_do > 0:\n log_to_do = int(np.log2(y_to_do))\n if rval1:\n rval1 *= pow2[log_to_do]\n rval1_scal *= pow2_scal[log_to_do]\n else:\n rval1 = pow2[log_to_do]\n rval1_scal = pow2_scal[log_to_do]\n y_to_do -= 2 ** log_to_do\n\n if abs(y) > 2:\n # We fuse all the pow together here to make\n # compilation faster\n rval1 = Elemwise(\n aes.Composite([pow2_scal[0]], [rval1_scal])\n ).make_node(xsym)\n if y < 0:\n rval = [reciprocal(rval1)]\n else:\n rval = [rval1]\n if rval:\n rval[0] = cast(rval[0], odtype)\n assert rval[0].type == node.outputs[0].type, (rval, node.outputs)\n return rval\n\n\n@register_specialize\n@local_optimizer([mul])\ndef local_mul_specialize(fgraph, node):\n \"\"\"\n Remove special-case constants from mul arguments and useless neg in inputs.\n\n mul(-1, x) -> neg(x)\n mul(1, x, y) -> mul(x, y)\n mul(0, ...) -> alloc(0, shapes...)\n\n This is not done if we would add more nodes in the graph, like with:\n\n mul(-1, x, y) -/-> neg(mul(x, y))\n\n \"\"\"\n # here, we are past the point of canonicalization, so we don't\n # want to put in un-necessary fills.\n #\n # at this point [post canonicalize], mul() may have many inputs.\n if node.op == mul:\n # the idea here is that we have pow(x, y)\n has_neg = False\n new_inputs = []\n nb_neg_node = 0\n nb_cst = 0\n for inp in node.inputs:\n # remove any neg arguments\n while inp.owner and inp.owner.op == neg:\n has_neg ^= True\n inp = inp.owner.inputs[0]\n nb_neg_node += 1\n\n # remove special case arguments of 1, -1 or 0\n y = get_constant(inp)\n if y == 1.0:\n nb_cst += 1\n elif y == -1.0:\n nb_cst += 1\n has_neg ^= True # toggles\n elif y == 0.0:\n # if we find any zero, we just return right away\n return [broadcast_like(0, node.outputs[0], fgraph)]\n else:\n new_inputs.append(inp)\n\n if new_inputs != node.inputs:\n if new_inputs:\n if len(new_inputs) == 1:\n if has_neg:\n if new_inputs[0].dtype in (uint_dtypes + [\"bool\"]):\n return\n else:\n rval = -new_inputs[0]\n else:\n rval = new_inputs[0]\n else:\n # The next case would cause a replace by an equivalent case.\n if has_neg and nb_neg_node == 0 and nb_cst == 1:\n return\n elif has_neg:\n # Don't add an extra neg node as we can't\n # fully replace this mul by a neg.\n m1 = np.asarray(-1, dtype=node.outputs[0].dtype)\n new_inputs = [m1] + new_inputs\n rval = mul(*new_inputs)\n\n return [broadcast_like(rval, node.outputs[0], fgraph)]\n else:\n # there are no variable inputs to mul\n # N.B. this could have been constant-folded...\n if has_neg:\n return [broadcast_like(-1, node.outputs[0], fgraph)]\n else:\n return [broadcast_like(1, node.outputs[0], fgraph)]\n\n\n@register_specialize\n@local_optimizer([add])\ndef local_add_specialize(fgraph, node):\n \"\"\"Remove zeros from ``add``s.\n\n TODO: This should be a canonicalization, no?\n \"\"\"\n # here, we are past the point of canonicalization, so we don't want\n # to put in un-necessary fills.\n if node.op != add:\n return False\n\n new_inputs = []\n for inp in node.inputs:\n try:\n y = get_scalar_constant_value(inp)\n except NotScalarConstantError:\n y = inp\n if np.all(y == 0.0):\n continue\n new_inputs.append(inp)\n\n if len(new_inputs) == len(node.inputs):\n return False\n\n node_output = node.outputs[0]\n dtype = node_output.type.dtype\n\n if len(new_inputs) == 0:\n # we got rid of the entire expression!\n ndim = node_output.type.ndim\n # Reuse call to constant for cache()\n cst = constant(np.zeros((1,) * ndim, dtype=dtype))\n assert cst.type.broadcastable == (True,) * ndim\n return fill_chain(cst, node.inputs)\n\n if len(new_inputs) == 1:\n ret = fill_chain(new_inputs[0], node.inputs)\n else:\n ret = fill_chain(add(*new_inputs), node.inputs)\n\n # The dtype should not be changed. It can happen if the input\n # that was forcing upcasting was equal to 0.\n if ret[0].dtype != dtype:\n ret = [cast(ret[0], dtype)]\n\n return ret\n\n\nmul_canonizer = in2out(\n LocalOptGroup(local_mul_canonizer, local_fill_sink, apply_all_opts=True),\n name=\"mul_canonizer_groups\",\n)\n\n\ndef check_for_x_over_absX(numerators, denominators):\n \"\"\"Convert x/abs(x) into sign(x). \"\"\"\n # TODO: this function should dig/search through dimshuffles\n # This won't catch a dimshuffled absolute value\n for den in list(denominators):\n if den.owner and den.owner.op == at_abs and den.owner.inputs[0] in numerators:\n if den.owner.inputs[0].type.dtype.startswith(\"complex\"):\n # TODO: Make an Op that projects a complex number to\n # have unit length but projects 0 to 0. That\n # would be a weird Op, but consistent with the\n # special case below. I heard there's some\n # convention in Matlab that is similar to\n # this... but not sure.\n pass\n else:\n denominators.remove(den)\n numerators.remove(den.owner.inputs[0])\n numerators.append(sgn(den.owner.inputs[0]))\n return numerators, denominators\n\n\nlocal_mul_canonizer.add_simplifier(check_for_x_over_absX, \"X_over_absX\")\n\n\n@register_canonicalize\n@local_optimizer([at_abs])\ndef local_abs_lift(fgraph, node):\n \"\"\"\n Move the abs toward the input.\n\n This is needed for check_for_x_over_absX to apply in more case.\n\n \"\"\"\n if node.op == at_abs and node.inputs[0].owner:\n assert node.nin == 1\n if node.inputs[0].owner.op == mul:\n return [mul(*[at_abs(i) for i in node.inputs[0].owner.inputs])]\n if node.inputs[0].owner.op == true_div:\n i = node.inputs[0].owner.inputs\n return [true_div(at_abs(i[0]), at_abs(i[1]))]\n\n\n@register_specialize\n@local_optimizer([mul, true_div])\ndef local_abs_merge(fgraph, node):\n \"\"\"\n Merge abs generated by local_abs_lift when the canonizer don't\n need it anymore\n\n \"\"\"\n if (\n node.op == mul\n and sum([i.owner.op == at_abs for i in node.inputs if i.owner]) > 1\n ):\n inputs = []\n for i in node.inputs:\n if i.owner and i.owner.op == at_abs:\n inputs.append(i.owner.inputs[0])\n elif isinstance(i, Constant):\n try:\n const = get_scalar_constant_value(i, only_process_constants=True)\n except NotScalarConstantError:\n return False\n if not (const >= 0).all():\n return False\n inputs.append(i)\n else:\n return False\n return [at_abs(mul(*inputs))]\n if (\n node.op == true_div\n and sum([i.owner.op == at_abs for i in node.inputs if i.owner]) == 2\n ):\n return [\n at_abs(\n true_div(node.inputs[0].owner.inputs[0], node.inputs[1].owner.inputs[0])\n )\n ]\n\n\n@register_stabilize\n@register_specialize\n@local_optimizer([log])\ndef local_log1p(fgraph, node):\n # log(1+x) -> log1p(x)\n # log(1-x) -> log1p(-x)\n if node.op == log:\n (log_arg,) = node.inputs\n if log_arg.owner and log_arg.owner.op == add:\n scalars, scalar_inputs, nonconsts = scalarconsts_rest(\n log_arg.owner.inputs, only_process_constants=True\n )\n # scalar_inputs are potentially dimshuffled and fill'd scalars\n if scalars and np.allclose(np.sum(scalars), 1):\n if nonconsts:\n if len(nonconsts) > 1:\n ninp = add(*nonconsts)\n else:\n ninp = nonconsts[0]\n if ninp.dtype != log_arg.type.dtype:\n ninp = ninp.astype(node.outputs[0].dtype)\n return fill_chain(log1p(ninp), scalar_inputs)\n\n elif log_arg.owner and log_arg.owner.op == sub:\n one = extract_constant(log_arg.owner.inputs[0], only_process_constants=True)\n if one != 1:\n return\n other = log_arg.owner.inputs[1]\n if other.dtype != log_arg.dtype:\n other = other.astype(log_arg.dtype)\n return [log1p(neg(other))]\n\n\n@register_stabilize\n@register_specialize\n@local_optimizer([log])\ndef local_log_add_exp(fgraph, node):\n \"\"\"\n ``log(exp(x)+exp(y)+exp(z)) = max + log(x-max, y-max, z-max)``\n\n TODO: in canonicalize, change log10 and log2 -> log\n \"\"\"\n\n if node.op == log:\n z = node.inputs[0]\n if z.owner and z.owner.op == add:\n zi = z.owner.inputs\n pre_exp = [x.owner.inputs[0] for x in zi if x.owner and x.owner.op == exp]\n # all arguments to add are exp(<something>)\n if len(pre_exp) == len(zi):\n # Do not offset when max_pre = -np.inf, to avoid nan in the output\n # Switch statement is placed directly inside add to break the self-symmetry\n # of the returned output (otherwise the optimization would not stabilize)\n max_pre = reduce(maximum, pre_exp)\n ret = max_pre + log(\n add(\n *[\n switch(isinf(max_pre), exp(max_pre), exp(p - max_pre))\n for p in pre_exp\n ]\n )\n )\n return [ret]\n\n\n@register_stabilize\n@register_specialize\n@local_optimizer([log])\ndef local_log_sum_exp(fgraph, node):\n # log(sum_i(exp(x_i))) = x_max + log(sum_i(exp(x_i - x_max)))\n\n if node.op != log:\n return\n\n sum_node = node.inputs[0].owner\n # If the sum has keepdims=True, there might be a dimshuffle\n if sum_node and isinstance(sum_node.op, DimShuffle):\n dimshuffle_op = sum_node.op\n sum_node = sum_node.inputs[0].owner\n else:\n dimshuffle_op = None\n\n if not sum_node or not isinstance(sum_node.op, Sum):\n return\n\n exp_node, axis = sum_node.inputs[0].owner, sum_node.op.axis\n if not exp_node or not (\n isinstance(exp_node.op, Elemwise) and isinstance(exp_node.op.scalar_op, aes.Exp)\n ):\n return\n\n pre_exp = exp_node.inputs[0]\n max_pre_exp = at_max(pre_exp, axis=axis)\n max_pre_exp_keepdims = makeKeepDims(pre_exp, max_pre_exp, axis)\n\n # Do not offset when max_pre = -np.inf, to avoid nan in the output\n # Switch statement is placed directly inside sum to break the self-symmetry\n # of the returned output (otherwise the optimization would not stabilize)\n ret = max_pre_exp + log(\n at_sum(\n switch(\n isinf(max_pre_exp_keepdims),\n exp(max_pre_exp_keepdims),\n exp(pre_exp - max_pre_exp_keepdims),\n ),\n axis=axis,\n ),\n )\n\n # Restore the dimshuffle op, if any.\n if dimshuffle_op:\n ret = dimshuffle_op(ret)\n\n return [ret]\n\n\ndef add_calculate(num, denum, aslist=False, out_type=None):\n # TODO: make sure that this function and mul_calculate are similar\n if out_type is None:\n zero = 0.0\n else:\n zero = _asarray(0, dtype=out_type.dtype)\n # zero = 0.0 if out_type is None else _asarray(0,\n # dtype=out_type.dtype)\n if out_type and out_type.dtype == \"bool\":\n if len(denum) == 0:\n # NumPy 1.14 do not accept to do \"bool - bool\"\n v = reduce(np.add, num, zero)\n else:\n raise Exception(\n \"bool subtraction not supported. This should not happen as\"\n \" an earlier error should have been raised\"\n )\n else:\n v = reduce(np.add, num, zero) - reduce(np.add, denum, zero)\n if aslist:\n if np.all(v == 0):\n return []\n else:\n return [v]\n return v\n\n\nlocal_add_canonizer = AlgebraicCanonizer(add, sub, neg, add_calculate)\nadd_canonizer = in2out(\n LocalOptGroup(local_add_canonizer, local_fill_sink, apply_all_opts=True),\n name=\"add_canonizer_group\",\n)\n\n\nregister_canonicalize(local_add_canonizer, name=\"local_add_canonizer\")\n\n\ndef distribute_greedy(pos_pairs, neg_pairs, num, denum, out_type, minscore=0):\n # each pair in pos_pairs and neg_pairs is a num/denum pair. this\n # function attempts to add num and denum to the corresponding parts\n # of each pair, and counts how many multiplications/divisions can\n # be saved in that way.\n\n # each division is counted like div_cost multiplications\n # (typically, division costs more so we are willing to multiply more\n # in order to divide less)\n # 1.5 was obtained through an informal test and may very well be\n # platform dependent\n div_cost = 1.5\n\n # score is number of operations saved, higher is better\n score = len(num) + div_cost * len(denum)\n new_pos_pairs = list(\n itertools.starmap(\n local_mul_canonizer.simplify,\n [(n + num, d + denum, out_type) for (n, d) in pos_pairs],\n )\n )\n new_neg_pairs = list(\n itertools.starmap(\n local_mul_canonizer.simplify,\n [(n + num, d + denum, out_type) for (n, d) in neg_pairs],\n )\n )\n for (n, d), (nn, dd) in zip(pos_pairs + neg_pairs, new_pos_pairs + new_neg_pairs):\n # We calculate how many operations we are saving with the new\n # num and denum\n score += len(n) + div_cost * len(d) - len(nn) - div_cost * len(dd)\n if score <= minscore:\n # the change is not applied because it adds too many operations\n return False, pos_pairs, neg_pairs\n return True, new_pos_pairs, new_neg_pairs\n\n\ndef attempt_distribution(factor, num, denum, out_type):\n \"\"\"Try to insert each `num` and each `denum` in the factor?\n\n Returns\n -------\n changes?, new_factor, new_num, new_denum\n If there are changes, `new_num` and `new_denum` contain all the\n numerators and denominators that could not be distributed in the factor\n\n \"\"\"\n pos_terms, neg_terms = local_add_canonizer.get_num_denum(factor)\n if len(pos_terms) == 1 and not neg_terms:\n return False, factor, num, denum\n pos_pairs = list(map(local_mul_canonizer.get_num_denum, pos_terms))\n neg_pairs = list(map(local_mul_canonizer.get_num_denum, neg_terms))\n change = False\n for n in list(num):\n success, pos_pairs, neg_pairs = distribute_greedy(\n pos_pairs, neg_pairs, [n], [], out_type\n )\n if success:\n change = True\n num.remove(n)\n for d in list(denum):\n success, pos_pairs, neg_pairs = distribute_greedy(\n pos_pairs, neg_pairs, [], [d], out_type\n )\n if success:\n change = True\n denum.remove(d)\n if not change:\n return change, factor, num, denum\n else:\n return (\n change,\n local_add_canonizer.merge_num_denum(\n list(itertools.starmap(local_mul_canonizer.merge_num_denum, pos_pairs)),\n list(itertools.starmap(local_mul_canonizer.merge_num_denum, neg_pairs)),\n ),\n num,\n denum,\n )\n\n\n@register_canonicalize\n@register_stabilize\n@local_optimizer([mul, true_div, reciprocal])\ndef local_greedy_distributor(fgraph, node):\n \"\"\"\n Optimize by reducing the number of multiplications and/or divisions.\n\n This optimization tries to apply distributivity of multiplication\n to addition in order to reduce the number of multiplications\n and/or divisions that must be done. The algorithm weighs division\n more than multiplication to account for the former's slightly\n greater computational cost.\n\n The following expressions are simplified:\n 1. ((a/x + b/y) * x * y) --> a*y + b*x\n 2. ((a/x + b) * x) --> a + b*x\n 3. There are other forms too where node is a true_div.\n\n The following expressions are not simplified:\n 4. ((a + b) * x) -/-> a*x + b*x\n\n This optimization aims to reduce computational cost. It may also\n increase numerical stability, e.g. when x and/or y tend to 0 in\n example 1.\n\n \"\"\"\n\n out = node.outputs[0]\n num, denum = local_mul_canonizer.get_num_denum(out)\n if len(num) == 1 and not denum:\n return False\n\n new_num, new_denum = [], []\n\n change = False\n\n out_type = out.type\n for candidate in list(num):\n if candidate not in num:\n continue\n num.remove(candidate)\n _change, candidate, num, denum = attempt_distribution(\n candidate,\n num,\n denum,\n out_type,\n )\n\n change |= _change\n new_num.append(candidate)\n\n for candidate in list(denum):\n if candidate not in denum:\n continue\n denum.remove(candidate)\n _change, candidate, denum, num = attempt_distribution(\n candidate, denum, num, out_type\n )\n change |= _change\n new_denum.append(candidate)\n if not change:\n return False\n\n new_num += num\n new_denum += denum\n\n rval = local_mul_canonizer.merge_num_denum(new_num, new_denum)\n\n if rval.type != out.type:\n # WHY DOES THIS HAPPEN?\n return False\n\n return [rval]\n\n\nget_clients_at_depth1 = partial(get_clients_at_depth, depth=1)\nget_clients_at_depth2 = partial(get_clients_at_depth, depth=2)\n\n# 1+erf(x)=>erfc(-x)\nlocal_one_plus_erf = PatternSub(\n (add, 1, (erf, \"x\")),\n (erfc, (neg, \"x\")),\n allow_multiple_clients=True,\n name=\"local_one_plus_erf\",\n tracks=[erf],\n get_nodes=get_clients_at_depth1,\n)\nregister_canonicalize(local_one_plus_erf)\nregister_stabilize(local_one_plus_erf)\nregister_specialize(local_one_plus_erf)\n\n# Only one of the two rewrites below is needed if a canonicalization is added\n# for sub(x, y) -> add(x, -y) or a specialization for add(x, -y) -> sub(x, y)\n# 1-erf(x)=>erfc(x)\nlocal_one_minus_erf = PatternSub(\n (sub, 1, (erf, \"x\")),\n (erfc, \"x\"),\n allow_multiple_clients=True,\n name=\"local_one_minus_erf\",\n tracks=[erf],\n get_nodes=get_clients_at_depth1,\n)\nregister_canonicalize(local_one_minus_erf)\nregister_stabilize(local_one_minus_erf)\nregister_specialize(local_one_minus_erf)\n\nlocal_one_minus_erf2 = PatternSub(\n (add, 1, (neg, (erf, \"x\"))),\n (erfc, \"x\"),\n allow_multiple_clients=True,\n name=\"local_one_minus_erf2\",\n tracks=[erf],\n get_nodes=get_clients_at_depth2,\n)\nregister_canonicalize(local_one_minus_erf2)\nregister_stabilize(local_one_minus_erf2)\nregister_specialize(local_one_minus_erf2)\n\n# (-1)+erf(x) => -erfc(x)\n# There is no need for erf(x)+(-1) nor erf(x) - 1, as the canonicalize will\n# convert those to the matched pattern\nlocal_erf_minus_one = PatternSub(\n (add, -1, (erf, \"x\")),\n (neg, (erfc, \"x\")),\n allow_multiple_clients=True,\n name=\"local_erf_minus_one\",\n tracks=[erf],\n get_nodes=get_clients_at_depth1,\n)\nregister_canonicalize(local_erf_minus_one)\nregister_stabilize(local_erf_minus_one)\nregister_specialize(local_erf_minus_one)\n\n# Only one of the two rewrites below is needed if a canonicalization is added\n# for sub(x, y) -> add(x, -y) or a specialization for add(x, -y) -> sub(x, y)\n# 1-erfc(x) => erf(x)\nlocal_one_minus_erfc = PatternSub(\n (sub, 1, (erfc, \"x\")),\n (erf, \"x\"),\n allow_multiple_clients=True,\n name=\"local_one_minus_erfc\",\n tracks=[erfc],\n get_nodes=get_clients_at_depth1,\n)\nregister_canonicalize(local_one_minus_erfc)\nregister_stabilize(local_one_minus_erfc)\nregister_specialize(local_one_minus_erfc)\n\nlocal_one_minus_erfc2 = PatternSub(\n (add, 1, (neg, (erfc, \"x\"))),\n (erf, \"x\"),\n allow_multiple_clients=True,\n name=\"local_one_minus_erfc2\",\n tracks=[erfc],\n get_nodes=get_clients_at_depth2,\n)\nregister_canonicalize(local_one_minus_erfc2)\nregister_stabilize(local_one_minus_erfc2)\nregister_specialize(local_one_minus_erfc2)\n\n# (-1)+erfc(-x)=>erf(x)\nlocal_erf_neg_minus_one = PatternSub(\n (add, -1, (erfc, (neg, \"x\"))),\n (erf, \"x\"),\n allow_multiple_clients=True,\n name=\"local_erf_neg_minus_one\",\n tracks=[erfc],\n get_nodes=get_clients_at_depth1,\n)\nregister_canonicalize(local_erf_neg_minus_one)\nregister_stabilize(local_erf_neg_minus_one)\nregister_specialize(local_erf_neg_minus_one)\n\n\n@register_stabilize\n@register_specialize\n@local_optimizer([log])\ndef local_log_erfc(fgraph, node):\n \"\"\"Stability optimization for `log(erfc(x))`.\n\n log(erfc(x)) => when x>threshold,\n -x**2-log(x)-.5*log(pi)+log(1-1/(2*x**2)+3/(4*x**4)-15/(8*x**6))\n for float64: threshold=26.641747557 was chosen with:\n [(i,numpy.log(scipy.special.erfc(numpy.asarray([i],dtype='float64'))))\n for i in numpy.arange(26.641747557,26.6417475571,.00000000001)]\n for float32: threshold=10.0541949, [(i,numpy.log(scipy.special.erfc(\n numpy.asarray([i],dtype='float32')))) for i in numpy.arange(\n 10.0541948,10.0541951,.0000001)]\n \"\"\"\n if node.op != log:\n return False\n if not node.inputs[0].owner or node.inputs[0].owner.op != erfc:\n return False\n\n if hasattr(node.tag, \"local_log_erfc_applied\"):\n # We use that flag to don't apply the optimization recursively\n return False\n node.tag.local_log_erfc_applied = True\n\n x = node.inputs[0].owner.inputs[0]\n stab_value = (\n -(x ** 2)\n - log(x)\n - 0.5 * log(np.pi)\n + log(1 - 1 / (2 * x ** 2) + 3 / (4 * x ** 4) - 15 / (8 * x ** 6))\n )\n\n if node.outputs[0].dtype == \"float32\" or node.outputs[0].dtype == \"float16\":\n threshold = 10.0541949\n elif node.outputs[0].dtype == \"float64\":\n threshold = 26.641747557\n\n ret = switch(x < threshold, node.outputs[0], stab_value)\n ret.tag.values_eq_approx = values_eq_approx_remove_inf\n return [ret]\n\n\n@register_stabilize\n@register_specialize\n@local_optimizer([true_div])\ndef local_grad_log_erfc_neg(fgraph, node):\n \"\"\"Stability optimization for the grad of `log(erfc(x))`.\n\n ([y*]exp(-(x**2)))/erfc(x) # The y* is optional\n ([y*]exp(x**2))/erfc(-x) => [y*](when x > threshold,\n sqrt(pi)*-x/(1-1/(2*x**2)+3/(4*x**4)-15/(8*x**6)))\n\n for float64: threshold=26.63 see at the end of the fct for the explanation\n for float32: threshold=9.3 see at the end of the fct for the explanation\n\n TODO: remove the constraint that there are only 2 inputs to exp(x**2)\n is the second.\n TODO: at the test point 10 in float32, there is instability in the original\n value. The original gives -30.0, the stab -20.1 and in float64 -18.1.\n Make it so that the test does not generate an error in that case!\n\n \"\"\"\n if node.op != true_div:\n return False\n if not node.inputs[1].owner or node.inputs[1].owner.op != erfc:\n return False\n\n erfc_in = node.inputs[1]\n erfc_x = erfc_in.owner.inputs[0]\n\n if not node.inputs[0].owner:\n return False\n\n # TODO: All of this should be replaced with a single, simple unification\n # The mul is optional.\n if node.inputs[0].owner.op != mul:\n mul_in = None\n y = []\n if not node.inputs[0].owner or node.inputs[0].owner.op != exp:\n return False\n exp_in = node.inputs[0]\n else:\n mul_in = node.inputs[0]\n exp_in = None\n for idx, inp in enumerate(mul_in.owner.inputs):\n if inp.owner and inp.owner.op == exp:\n exp_in = inp\n break\n else:\n return False\n\n if len(mul_in.owner.inputs) == 2:\n y = [mul_in.owner.inputs[1 - idx]]\n else:\n y = mul_in.owner.inputs[:]\n del y[idx]\n\n if not exp_in.owner.inputs[0].owner:\n return False\n\n if exp_in.owner.inputs[0].owner.op == neg:\n neg_in = exp_in.owner.inputs[0]\n if not neg_in.owner.inputs[0].owner or neg_in.owner.inputs[0].owner.op != sqr:\n return False\n sqr_in = neg_in.owner.inputs[0]\n x = sqr_in.owner.inputs[0]\n elif exp_in.owner.inputs[0].owner.op == mul:\n # We should compare that -(erfc_x**2) is equivalent to mul_neg.\n # There is currently no easy way to do this in the general case,\n # so we implement some common case for now.\n\n # In many cases the neg are replaced by mul in the graph.\n # This also allows to stabilize log(erfc(cst*x)).\n mul_neg = exp_in.owner.inputs[0]\n\n # In case that multiple mul are not fused together, we do it here.\n def check_input(inputs):\n new_inputs = []\n for i in inputs:\n if i.owner and i.owner.op == mul:\n new_inputs.extend(check_input(i.owner.inputs))\n else:\n new_inputs.append(i)\n return new_inputs\n\n mul_inputs = check_input(mul_neg.owner.inputs)\n\n # Put the constant first.\n for i in range(len(mul_inputs)):\n if isinstance(i, Constant):\n if i == 0:\n break\n else:\n tmp = mul_inputs[0]\n mul_inputs[0] = mul_inputs[i]\n mul_inputs[i] = tmp\n break\n mul_neg = mul(*mul_inputs)\n\n try:\n cst2 = get_scalar_constant_value(\n mul_neg.owner.inputs[0], only_process_constants=True\n )\n except NotScalarConstantError:\n return False\n\n if len(mul_neg.owner.inputs) == 2:\n if (\n not mul_neg.owner.inputs[1].owner\n or mul_neg.owner.inputs[1].owner.op != sqr\n ):\n return False\n sqr_in = mul_neg.owner.inputs[1]\n x = sqr_in.owner.inputs[0]\n elif len(mul_neg.owner.inputs) == 3:\n if mul_neg.owner.inputs[1] is not mul_neg.owner.inputs[2]:\n return False\n x = mul_neg.owner.inputs[1]\n else:\n return False\n\n if cst2 != -1:\n if (\n not erfc_x.owner\n or erfc_x.owner.op != mul\n or len(erfc_x.owner.inputs) != 2\n ):\n # todo implement that case\n return False\n if erfc_x.owner.inputs[1] is not mul_neg.owner.inputs[1]:\n return False\n\n x = erfc_x\n try:\n cst = get_scalar_constant_value(\n erfc_x.owner.inputs[0], only_process_constants=True\n )\n except NotScalarConstantError:\n return False\n if cst2 != -cst * 2:\n return False\n\n # The constant is valid. Must check that the\n elif erfc_x is not x:\n return False\n\n else:\n return False\n\n if hasattr(node.tag, \"local_grad_log_erfc_neg\"):\n # We use that flag to don't apply the optimization recursively\n return False\n\n if erfc_x is not x:\n return None\n\n # we move the y outside the div.\n true_div_no_mul = true_div(exp_in, erfc_in)\n true_div_no_mul.owner.tag.local_grad_log_erfc_neg = True\n\n # aaron value\n stab_value = (\n x\n * at_pow(1 - 1 / (2 * (x ** 2)) + 3 / (4 * (x ** 4)) - 15 / (8 * (x ** 6)), -1)\n * cast(sqrt(np.pi), dtype=x.dtype)\n )\n\n if x.dtype == \"float32\" or x.dtype == \"float16\":\n threshold = 9.3\n # threshold = 10.1\n elif x.dtype == \"float64\":\n threshold = 26.641747557\n\n ret = switch(x < threshold, true_div_no_mul, stab_value)\n\n if y:\n ret = mul(ret, *y)\n\n ret.tag.values_eq_approx = values_eq_approx_remove_inf_nan\n\n return [ret]\n\n\ndef local_add_mul_fusion(fgraph, node):\n \"\"\"Fuse consecutive add or mul in one such node with more inputs.\n\n It is better to fuse add/mul that way then in a Composite node as\n this make the inner graph of the Composite smaller. This allow to\n put more computation in a Composite before hitting the max\n recursion limit when pickling Composite.\n\n \"\"\"\n if not isinstance(node.op, Elemwise) or not isinstance(\n node.op.scalar_op, (aes.Add, aes.Mul)\n ):\n return False\n\n s_op = node.op.scalar_op.__class__\n new_inp = []\n fused = False\n nb_inputs = len(node.inputs)\n max_inputs = float(\"inf\")\n if hasattr(node.op, \"max_inputs\"):\n max_inputs = node.op.max_inputs(node)\n for inp in node.inputs:\n if (\n inp.owner\n and isinstance(inp.owner.op, Elemwise)\n and isinstance(inp.owner.op.scalar_op, s_op)\n and\n # Do not duplicate the operation.\n len(fgraph.clients[inp]) == 1\n and (nb_inputs + len(inp.owner.inputs) - 1) <= max_inputs\n ):\n new_inp.extend(inp.owner.inputs)\n fused = True\n else:\n new_inp.append(inp)\n\n # We can not compare the number of inputs as Mul and Add could have\n # 0 or 1 inputs in some corner cases.\n if fused:\n output = node.op(*new_inp)\n copy_stack_trace(node.outputs[0], output)\n\n # Do the recursion here to help lower the number of\n # FusionOptimizer iteration.\n if output.owner:\n output2 = local_add_mul_fusion(fgraph, output.owner)\n if output2:\n return output2\n return [output]\n\n\nfuse_seqopt.register(\n \"local_add_mul_fusion\",\n FusionOptimizer(local_add_mul_fusion),\n 0,\n \"fast_run\",\n \"fusion\",\n)\n\n\ndef _skip_mul_1(r):\n if r.owner and r.owner.op == mul:\n not_is_1 = [i for i in r.owner.inputs if not _is_1(i)]\n if len(not_is_1) == 1:\n return not_is_1[0]\n\n\ndef _is_1(expr):\n \"\"\"\n\n Returns\n -------\n bool\n True iff expr is a constant close to 1.\n\n \"\"\"\n try:\n v = get_scalar_constant_value(expr)\n return np.allclose(v, 1)\n except NotScalarConstantError:\n return False\n\n\nlogsigm_to_softplus = PatternSub(\n (log, (sigmoid, \"x\")),\n (neg, (softplus, (neg, \"x\"))),\n allow_multiple_clients=True,\n values_eq_approx=values_eq_approx_remove_inf,\n skip_identities_fn=_skip_mul_1,\n tracks=[sigmoid],\n get_nodes=get_clients_at_depth1,\n)\nlog1msigm_to_softplus = PatternSub(\n (log, (sub, dict(pattern=\"y\", constraint=_is_1), (sigmoid, \"x\"))),\n (neg, (softplus, \"x\")),\n allow_multiple_clients=True,\n values_eq_approx=values_eq_approx_remove_inf,\n skip_identities_fn=_skip_mul_1,\n tracks=[sigmoid],\n get_nodes=get_clients_at_depth2,\n)\nlog1pexp_to_softplus = PatternSub(\n (log1p, (exp, \"x\")),\n (softplus, \"x\"),\n values_eq_approx=values_eq_approx_remove_inf,\n allow_multiple_clients=True,\n)\nlog1p_neg_sigmoid = PatternSub(\n (log1p, (neg, (sigmoid, \"x\"))),\n (neg, (softplus, \"x\")),\n values_eq_approx=values_eq_approx_remove_inf,\n allow_multiple_clients=True,\n tracks=[sigmoid],\n get_nodes=get_clients_at_depth2,\n)\n\nregister_stabilize(logsigm_to_softplus, name=\"logsigm_to_softplus\")\nregister_stabilize(log1msigm_to_softplus, name=\"log1msigm_to_softplus\")\nregister_stabilize(log1pexp_to_softplus, name=\"log1pexp_to_softplus\")\nregister_stabilize(log1p_neg_sigmoid, name=\"log1p_neg_sigmoid\")\nregister_specialize(log1p_neg_sigmoid, name=\"log1p_neg_sigmoid\")\n\n\ndef is_1pexp(t, only_process_constants=True):\n \"\"\"\n\n Returns\n -------\n object\n If 't' is of the form (1+exp(x)), return (False, x).\n Else return None.\n\n \"\"\"\n if t.owner and t.owner.op == add:\n scalars, scalar_inputs, nonconsts = scalarconsts_rest(\n t.owner.inputs, only_process_constants=only_process_constants\n )\n # scalar_inputs are potentially dimshuffled and filled with scalars\n if len(nonconsts) == 1:\n maybe_exp = nonconsts[0]\n if maybe_exp.owner and maybe_exp.owner.op == exp:\n # Verify that the constant terms sum to 1.\n if scalars:\n scal_sum = scalars[0]\n for s in scalars[1:]:\n scal_sum = scal_sum + s\n if np.allclose(scal_sum, 1):\n return False, maybe_exp.owner.inputs[0]\n return None\n\n\ndef is_exp(var):\n \"\"\"\n Match a variable with either of the `exp(x)` or `-exp(x)` patterns.\n\n Parameters\n ----------\n var\n The Variable to analyze.\n\n Returns\n -------\n tuple\n A pair (b, x) with `b` a boolean set to True if `var` is of the\n form `-exp(x)` and False if `var` is of the form `exp(x)`. If `var`\n cannot be cast into either form, then return `None`.\n\n \"\"\"\n _neg = False\n neg_info = is_neg(var)\n if neg_info is not None:\n _neg = True\n var = neg_info\n if var.owner and var.owner.op == exp:\n return _neg, var.owner.inputs[0]\n\n\ndef is_mul(var):\n \"\"\"\n Match a variable with `x * y * z * ...`.\n\n Parameters\n ----------\n var\n The Variable to analyze.\n\n Returns\n -------\n object\n A list [x, y, z, ...] if `var` is of the form `x * y * z * ...`,\n or None if `var` cannot be cast into this form.\n\n \"\"\"\n if var.owner and var.owner.op == mul:\n return var.owner.inputs\n else:\n return None\n\n\ndef partition_num_or_denom(r, f):\n if r.owner and r.owner.op == mul:\n a = r.owner.inputs\n else:\n a = [r]\n\n # ugly 2.4-compatible thing\n f_terms = []\n _neg = False\n rest = []\n for t in a:\n f_t = f(t)\n if f_t is None:\n rest.append(t)\n else:\n neg_t, f_t = f_t\n f_terms.append(f_t)\n _neg ^= neg_t # bit flip if neg_t is true\n return f_terms, rest, _neg\n\n\ndef is_neg(var):\n \"\"\"\n Match a variable with the `-x` pattern.\n\n Parameters\n ----------\n var\n The Variable to analyze.\n\n Returns\n -------\n object\n `x` if `var` is of the form `-x`, or None otherwise.\n\n \"\"\"\n var_node = var.owner\n if not var_node:\n return None\n # First match against `neg`.\n if var_node.op == neg:\n return var_node.inputs[0]\n # Then match against a multiplication by -1.\n if var_node.op == mul and len(var_node.inputs) >= 2:\n for idx, mul_input in enumerate(var_node.inputs):\n try:\n constant = get_scalar_constant_value(mul_input)\n is_minus_1 = np.allclose(constant, -1)\n except NotScalarConstantError:\n is_minus_1 = False\n if is_minus_1:\n # Found a multiplication by -1.\n if len(var_node.inputs) == 2:\n # Only return the other input.\n return var_node.inputs[1 - idx]\n else:\n # Return the multiplication of all other inputs.\n return mul(*(var_node.inputs[0:idx] + var_node.inputs[idx + 1 :]))\n # No match.\n return None\n\n\n@register_stabilize\n@local_optimizer([true_div])\ndef local_exp_over_1_plus_exp(fgraph, node):\n \"\"\"\n exp(x)/(1+exp(x)) -> sigm(x)\n c/(1+exp(x)) -> c*sigm(-x)\n\n \"\"\"\n # this optimization should be done for numerical stability\n # so we don't care to check client counts\n if node.op == true_div:\n\n # find all the exp() terms in the numerator\n num, denom = node.inputs\n num_exp_x, num_rest, num_neg = partition_num_or_denom(num, is_exp)\n denom_1pexp, denom_rest, denom_neg = partition_num_or_denom(denom, is_1pexp)\n\n sigmoids = []\n for t in denom_1pexp:\n if t in num_exp_x:\n # case: exp(x) /(1+exp(x))\n sigmoids.append(sigmoid(t))\n del num_exp_x[num_exp_x.index(t)]\n else:\n # case: 1/(1+exp(x))\n sigmoids.append(sigmoid(-t))\n copy_stack_trace(node.outputs[0], sigmoids[-1])\n\n if not sigmoids: # we didn't find any. abort\n return\n # put the new numerator together\n new_num = sigmoids + [exp(t) for t in num_exp_x] + num_rest\n if len(new_num) == 1:\n new_num = new_num[0]\n else:\n new_num = mul(*new_num)\n\n if num_neg ^ denom_neg:\n new_num = -new_num\n\n copy_stack_trace(num, new_num)\n\n if len(denom_rest) == 0:\n return [new_num]\n elif len(denom_rest) == 1:\n out = new_num / denom_rest[0]\n else:\n out = new_num / mul(*denom_rest)\n\n copy_stack_trace(node.outputs[0], out)\n return [out]\n\n\ndef parse_mul_tree(root):\n \"\"\"\n Parse a tree of multiplications starting at the given root.\n\n Parameters\n ----------\n root\n The variable at the root of the tree.\n\n Returns\n -------\n object\n A tree where each non-leaf node corresponds to a multiplication\n in the computation of `root`, represented by the list of its inputs.\n Each input is a pair [n, x] with `n` a boolean value indicating whether\n sub-tree `x` should be negated.\n\n Examples\n --------\n\n .. code-block:: python\n\n x * y -> [False, [[False, x], [False, y]]]\n -(x * y) -> [True, [[False, x], [False, y]]]\n -x * y -> [False, [[True, x], [False, y]]]\n -x -> [True, x]\n (x * y) * -z -> [False, [[False, [[False, x], [False, y]]],\n [True, z]]]\n\n \"\"\"\n # Is it a multiplication?\n mul_info = is_mul(root)\n if mul_info is None:\n # Is it a negation?\n neg_info = is_neg(root)\n if neg_info is None:\n # Keep the root \"as is\".\n return [False, root]\n else:\n # Recurse, inverting the negation.\n neg, sub_tree = parse_mul_tree(neg_info)\n return [not neg, sub_tree]\n else:\n # Recurse into inputs.\n return [False, list(map(parse_mul_tree, mul_info))]\n\n\ndef replace_leaf(arg, leaves, new_leaves, op, neg):\n \"\"\"\n Attempt to replace a leaf of a multiplication tree.\n\n We search for a leaf in `leaves` whose argument is `arg`, and if we find\n one, we remove it from `leaves` and add to `new_leaves` a leaf with\n argument `arg` and variable `op(arg)`.\n\n Parameters\n ----------\n arg\n The argument of the leaf we are looking for.\n leaves\n List of leaves to look into. Each leaf should be a pair\n (x, l) with `x` the argument of the Op found in the leaf, and `l` the\n actual leaf as found in a multiplication tree output by `parse_mul_tree`\n (i.e. a pair [boolean, variable]).\n new_leaves\n If a replacement occurred, then the leaf is removed from `leaves`\n and added to the list `new_leaves` (after being modified by `op`).\n op\n A function that, when applied to `arg`, returns the Variable\n we want to replace the original leaf variable with.\n neg : bool\n If True, then the boolean value associated to the leaf should\n be swapped. If False, then this value should remain unchanged.\n\n Returns\n -------\n bool\n True if a replacement occurred, or False otherwise.\n\n \"\"\"\n for idx, x in enumerate(leaves):\n if x[0] == arg:\n x[1][0] ^= neg\n x[1][1] = op(arg)\n leaves.pop(idx)\n new_leaves.append(x)\n return True\n return False\n\n\ndef simplify_mul(tree):\n \"\"\"\n Simplify a multiplication tree.\n\n Parameters\n ----------\n tree\n A multiplication tree (as output by `parse_mul_tree`).\n\n Returns\n -------\n object\n A multiplication tree computing the same output as `tree` but without\n useless multiplications by 1 nor -1 (identified by leaves of the form\n [False, None] or [True, None] respectively). Useless multiplications\n (with less than two inputs) are also removed from the tree.\n\n \"\"\"\n neg, inputs = tree\n if isinstance(inputs, list):\n # Recurse through inputs.\n s_inputs = []\n for s_i in map(simplify_mul, inputs):\n if s_i[1] is None:\n # Multiplication by +/-1.\n neg ^= s_i[0]\n else:\n s_inputs.append(s_i)\n if not s_inputs:\n # The multiplication is empty.\n rval = [neg, None]\n elif len(s_inputs) == 1:\n # The multiplication has a single input.\n s_inputs[0][0] ^= neg\n rval = s_inputs[0]\n else:\n rval = [neg, s_inputs]\n else:\n rval = tree\n # print 'simplify_mul: %s -> %s' % (tree, rval)\n return rval\n\n\ndef compute_mul(tree):\n \"\"\"\n Compute the Variable that is the output of a multiplication tree.\n\n This is the inverse of the operation performed by `parse_mul_tree`, i.e.\n compute_mul(parse_mul_tree(tree)) == tree.\n\n Parameters\n ----------\n tree\n A multiplication tree (as output by `parse_mul_tree`).\n\n Returns\n -------\n object\n A Variable that computes the multiplication represented by the tree.\n\n \"\"\"\n neg, inputs = tree\n if inputs is None:\n raise AssertionError(\n \"Function `compute_mul` found a missing leaf, did you forget to \"\n \"call `simplify_mul` on the tree first?\"\n )\n elif isinstance(inputs, list):\n # Recurse through inputs.\n rval = mul(*list(map(compute_mul, inputs)))\n else:\n rval = inputs\n if neg:\n rval = -rval\n return rval\n\n\ndef perform_sigm_times_exp(\n tree,\n exp_x=None,\n exp_minus_x=None,\n sigm_x=None,\n sigm_minus_x=None,\n parent=None,\n child_idx=None,\n full_tree=None,\n):\n \"\"\"\n Core processing of the `local_sigm_times_exp` optimization.\n\n This recursive function operates on a multiplication tree as output by\n `parse_mul_tree`. It walks through the tree and modifies it in-place\n by replacing matching pairs (exp, sigmoid) with the desired optimized\n version.\n\n Parameters\n ----------\n tree\n The sub-tree to operate on.\n exp_x\n List of arguments x so that `exp(x)` exists somewhere in the whole\n multiplication tree. Each argument is a pair (x, leaf) with `x` the\n argument of the exponential, and `leaf` the corresponding leaf in the\n multiplication tree (of the form [n, exp(x)] -- see `parse_mul_tree`).\n If None, this argument is initialized to an empty list.\n exp_minus_x\n Similar to `exp_x`, but for `exp(-x)`.\n sigm_x\n Similar to `exp_x`, but for `sigmoid(x)`.\n sigm_minus_x\n Similar to `exp_x`, but for `sigmoid(-x)`.\n parent\n Parent of `tree` (None if `tree` is the global root).\n child_idx\n Index of `tree` in its parent's inputs (None if `tree` is the global\n root).\n full_tree\n The global multiplication tree (should not be set except by recursive\n calls to this function). Used for debugging only.\n\n Returns\n -------\n bool\n True if a modification was performed somewhere in the whole multiplication\n tree, or False otherwise.\n\n \"\"\"\n if exp_x is None:\n exp_x = []\n if exp_minus_x is None:\n exp_minus_x = []\n if sigm_x is None:\n sigm_x = []\n if sigm_minus_x is None:\n sigm_minus_x = []\n if full_tree is None:\n full_tree = tree\n if False: # Debug code.\n print(\"<perform_sigm_times_exp>\")\n print(f\" full_tree = {full_tree}\")\n print(f\" tree = {tree}\")\n print(f\" exp_x = {exp_x}\")\n print(f\" exp_minus_x = {exp_minus_x}\")\n print(f\" sigm_x = {sigm_x}\")\n print(f\" sigm_minus_x= {sigm_minus_x}\")\n neg, inputs = tree\n if isinstance(inputs, list):\n # Recurse through inputs of the multiplication.\n rval = False\n for sub_idx, sub_tree in enumerate(inputs):\n rval |= perform_sigm_times_exp(\n tree=sub_tree,\n parent=tree,\n child_idx=sub_idx,\n exp_x=exp_x,\n exp_minus_x=exp_minus_x,\n sigm_x=sigm_x,\n sigm_minus_x=sigm_minus_x,\n full_tree=full_tree,\n )\n return rval\n else:\n # Reached a leaf: if it is an exponential or a sigmoid, then we\n # first attempt to find a match in leaves already visited.\n # If there is such a match, we modify the already-visited leaf\n # accordingly: for instance if we visited a leaf sigmoid(x), then\n # find later a -exp(-x), we replace the previous leaf by\n # -sigmoid(-x) and remove the -exp(-x) from the tree.\n # If no match is found, then we register this leaf so that it can\n # be found later while walking the tree.\n var = inputs\n keep_it = False\n exp_info = is_exp(var)\n if exp_info is not None:\n exp_neg, exp_arg = exp_info\n neg ^= exp_neg\n neg_arg = is_neg(exp_arg)\n if neg_arg is None:\n if not replace_leaf(exp_arg, sigm_minus_x, sigm_x, sigmoid, neg):\n exp_x.append((exp_arg, tree))\n keep_it = True\n else:\n if not replace_leaf(\n neg_arg, sigm_x, sigm_minus_x, lambda x: sigmoid(-x), neg\n ):\n exp_minus_x.append((neg_arg, tree))\n keep_it = True\n elif var.owner and var.owner.op == sigmoid:\n sigm_arg = var.owner.inputs[0]\n neg_arg = is_neg(sigm_arg)\n if neg_arg is None:\n if not replace_leaf(\n sigm_arg, exp_minus_x, sigm_minus_x, lambda x: sigmoid(-x), neg\n ):\n sigm_x.append((sigm_arg, tree))\n keep_it = True\n else:\n if not replace_leaf(neg_arg, exp_x, sigm_x, sigmoid, neg):\n sigm_minus_x.append((neg_arg, tree))\n keep_it = True\n else:\n # It is not an exponential nor a sigmoid.\n keep_it = True\n if not keep_it:\n # Delete this leaf, i.e. replace it by [False, None] (corresponding\n # to a multiplication by 1).\n assert parent is not None\n parent[1][child_idx] = [False, None]\n return not keep_it\n\n\n@register_stabilize\n@local_optimizer([mul])\ndef local_sigm_times_exp(fgraph, node):\n \"\"\"\n exp(x) * sigm(-x) -> sigm(x)\n exp(-x) * sigm(x) -> sigm(-x)\n\n todo: add stack traces to the intermediate variables\n \"\"\"\n # Bail early if it is not a multiplication.\n if node.op != mul:\n return None\n # Obtain tree of multiplications starting at this node.\n mul_tree = parse_mul_tree(node.outputs[0])\n # Perform core optimization.\n did_something = perform_sigm_times_exp(mul_tree)\n if not did_something:\n # No change.\n return None\n # The optimization may have introduced multiplications by 1 in the tree:\n # get rid of them.\n mul_tree = simplify_mul(mul_tree)\n # Recompute final output based on the updated tree.\n out = compute_mul(mul_tree)\n # keep the stack trace\n copy_stack_trace(node.outputs[0], out)\n return [out]\n\n\n@register_stabilize\n@local_optimizer([reciprocal])\ndef local_reciprocal_1_plus_exp(fgraph, node):\n \"\"\"``reciprocal(1+exp(x)) -> sigm(-x)``\n\n TODO: This is redundant; we can just decided on *one* canonical form\n for division (e.g. either `true_div` or `reciprocal`) and have this\n taken care of with existing rewrites.\n \"\"\"\n # this optimization should be done for numerical stability\n # so we don't care to check client counts\n if node.op == reciprocal:\n reciprocal_arg = node.inputs[0]\n if reciprocal_arg.owner and reciprocal_arg.owner.op == add:\n scalars_, scalar_inputs, nonconsts = scalarconsts_rest(\n reciprocal_arg.owner.inputs, only_process_constants=True\n )\n # scalar_inputs are potentially dimshuffled and fill'd scalars\n if len(nonconsts) == 1:\n if nonconsts[0].owner and nonconsts[0].owner.op == exp:\n if scalars_ and np.allclose(np.sum(scalars_), 1):\n out = fill_chain(\n sigmoid(neg(nonconsts[0].owner.inputs[0])),\n scalar_inputs,\n )\n # keep combined stack traces of\n # exp(x): nonconsts[0],\n # 1 + exp(x): reciprocal_arg,\n # 1 / (1 + exp(x)): node.outputs[0]\n copy_stack_trace(\n [nonconsts[0], reciprocal_arg, node.outputs[0]], out\n )\n return out\n\n\n# 1 - sigmoid(x) -> sigmoid(-x)\nlocal_1msigmoid = PatternSub(\n (sub, dict(pattern=\"y\", constraint=_is_1), (sigmoid, \"x\")),\n (sigmoid, (neg, \"x\")),\n tracks=[sigmoid],\n get_nodes=get_clients_at_depth1,\n name=\"local_1msigmoid\",\n)\nregister_stabilize(local_1msigmoid)\nregister_specialize(local_1msigmoid)\n\n\nlog1pmexp_to_log1mexp = PatternSub(\n (log1p, (neg, (exp, \"x\"))),\n (log1mexp, \"x\"),\n allow_multiple_clients=True,\n)\nregister_stabilize(log1pmexp_to_log1mexp, name=\"log1pmexp_to_log1mexp\")\n\n\n# log(sigmoid(x) / (1 - sigmoid(x))) -> x\n# i.e logit(sigmoid(x)) -> x\nlocal_logit_sigmoid = PatternSub(\n (log, (true_div, (sigmoid, \"x\"), (sub, 1, (sigmoid, \"x\")))),\n \"x\",\n tracks=[sigmoid],\n get_nodes=get_clients_at_depth2,\n allow_multiple_clients=True,\n name=\"local_logit_sigmoid\",\n)\nregister_canonicalize(local_logit_sigmoid)\nregister_specialize(local_logit_sigmoid)\n\n\n# sigmoid(log(x / (1-x)) -> x\n# i.e., sigmoid(logit(x)) -> x\nlocal_sigmoid_logit = PatternSub(\n (sigmoid, (log, (true_div, \"x\", (sub, 1, \"x\")))),\n \"x\",\n allow_multiple_clients=True,\n name=\"local_sigmoid_logit\",\n)\nregister_canonicalize(local_sigmoid_logit)\nregister_specialize(local_sigmoid_logit)\n"
] | [
[
"numpy.allclose",
"numpy.asarray",
"numpy.ones",
"numpy.all",
"numpy.float32",
"numpy.array",
"numpy.zeros"
],
[
"numpy.log2",
"numpy.allclose",
"numpy.asarray",
"numpy.int8",
"numpy.all",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vbautista379/astr-119-session-3 | [
"60c6cdca3f89a02f823a44b4d92bbeec52af783d"
] | [
"functions.py"
] | [
"import numpy as np\nimport sys\n\n#define a function that returns a value\ndef expo(x):\n\treturn np.exp(x)\t#return the np e^x function\n\t\n#define a subroutine that does not return a value\ndef show_expo(n):\n\tfor i in range(n):\n\t\tprint(expo(float(i)))\t#call the expo funtion\n\t\t\n#define a main function\ndef main():\n\tn = 10 #provide a default function for n\n\t\n\t#check if there is a command line argument provided\n\tif(len(sys.argv)>1):\n\t\tn = int(sys.argv[1])\t#if an argument was provided, use it for n\n\t\t\n\tshow_expo(n)\t\t#call the show_expo subroutine\n\t\n\t\n#run the main function\nif __name__ == \"__main__\":\n\tmain()\n"
] | [
[
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jalayrupera/opencvBasics | [
"64be4ba03d4aa331c59c38cdd9f6f61621ccf922"
] | [
"7. Color Filtering/face- 7.py"
] | [
"import cv2\r\nimport numpy as np\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\nwhile(1):\r\n _, frame = cap.read()\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n \r\n lower_red = np.array([90,150,50])\r\n upper_red = np.array([255,255,180])\r\n \r\n mask = cv2.inRange(hsv, lower_red, upper_red)\r\n res = cv2.bitwise_and(frame,frame, mask= mask)\r\n\r\n median = cv2.medianBlur(res, 15)\r\n bilateral = cv2.bilateralFilter(res,15,75,75)\r\n\r\n \r\n kernel = np.ones((5,5),np.uint8)\r\n opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\r\n closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\r\n\r\n cv2.imshow('frame',frame)\r\n #cv2.imshow('mask',mask)\r\n cv2.imshow('res',res)\r\n #cv2.imshow('median',bilateral)\r\n cv2.imshow('opening',opening)\r\n cv2.imshow('closing',closing)\r\n \r\n k = cv2.waitKey(5) & 0xFF\r\n if k==27:\r\n break\r\n\r\n\r\ncv2.destroyAllWindows()\r\ncap.release()\r\n"
] | [
[
"numpy.array",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
motionlife/tensorflow | [
"25847c4fb449269c4641ff123645917ad6c80acd",
"2eea17699f8417e0c8c5fa4834dbcde80325a704"
] | [
"tensorflow/python/compat/compat.py",
"tensorflow/python/keras/losses.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities for API compatibility between TensorFlow release versions.\n\nSee [Version\nCompatibility](https://tensorflow.org/guide/version_compat#backward_forward)\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport datetime\nimport os\n\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# This value changes every day with an automatic CL. It can be modified in code\n# via `forward_compatibility_horizon()` or with the environment variable\n# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.\n_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 12, 15)\n_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = \"TF_FORWARD_COMPATIBILITY_DELTA_DAYS\"\n_FORWARD_COMPATIBILITY_DATE_NUMBER = None\n\n\ndef _date_to_date_number(year, month, day):\n return (year << 9) | (month << 5) | day\n\n\ndef _update_forward_compatibility_date_number(date_to_override=None):\n \"\"\"Update the base date to compare in forward_compatible function.\"\"\"\n\n global _FORWARD_COMPATIBILITY_DATE_NUMBER\n\n if date_to_override:\n date = date_to_override\n else:\n date = _FORWARD_COMPATIBILITY_HORIZON\n delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)\n if delta_days:\n date += datetime.timedelta(days=int(delta_days))\n\n if date < _FORWARD_COMPATIBILITY_HORIZON:\n logging.warning(\"Trying to set the forward compatibility date to the past\"\n \" date %s. This will be ignored by TensorFlow.\" % (date))\n return\n _FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(\n date.year, date.month, date.day)\n\n\n_update_forward_compatibility_date_number()\n\n\n@tf_export(\"compat.forward_compatible\")\ndef forward_compatible(year, month, day):\n \"\"\"Return true if the forward compatibility window has expired.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n Forward-compatibility refers to scenarios where the producer of a TensorFlow\n model (a GraphDef or SavedModel) is compiled against a version of the\n TensorFlow library newer than what the consumer was compiled against. The\n \"producer\" is typically a Python program that constructs and trains a model\n while the \"consumer\" is typically another program that loads and serves the\n model.\n\n TensorFlow has been supporting a 3 week forward-compatibility window for\n programs compiled from source at HEAD.\n\n For example, consider the case where a new operation `MyNewAwesomeAdd` is\n created with the intent of replacing the implementation of an existing Python\n wrapper - `tf.add`. The Python wrapper implementation should change from\n something like:\n\n ```python\n def add(inputs, name=None):\n return gen_math_ops.add(inputs, name)\n ```\n\n to:\n\n ```python\n from tensorflow.python.compat import compat\n\n def add(inputs, name=None):\n if compat.forward_compatible(year, month, day):\n # Can use the awesome new implementation.\n return gen_math_ops.my_new_awesome_add(inputs, name)\n # To maintain forward compatibility, use the old implementation.\n return gen_math_ops.add(inputs, name)\n ```\n\n Where `year`, `month`, and `day` specify the date beyond which binaries\n that consume a model are expected to have been updated to include the\n new operations. This date is typically at least 3 weeks beyond the date\n the code that adds the new operation is committed.\n\n Args:\n year: A year (e.g., 2018). Must be an `int`.\n month: A month (1 <= month <= 12) in year. Must be an `int`.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an\n `int`.\n\n Returns:\n True if the caller can expect that serialized TensorFlow graphs produced\n can be consumed by programs that are compiled with the TensorFlow library\n source code after (year, month, day).\n \"\"\"\n return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(\n year, month, day)\n\n\n@tf_export(\"compat.forward_compatibility_horizon\")\n@tf_contextlib.contextmanager\ndef forward_compatibility_horizon(year, month, day):\n \"\"\"Context manager for testing forward compatibility of generated graphs.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n To ensure forward compatibility of generated graphs (see `forward_compatible`)\n with older binaries, new features can be gated with:\n\n ```python\n if compat.forward_compatible(year=2018, month=08, date=01):\n generate_graph_with_new_features()\n else:\n generate_graph_so_older_binaries_can_consume_it()\n ```\n\n However, when adding new features, one may want to unittest it before\n the forward compatibility window expires. This context manager enables\n such tests. For example:\n\n ```python\n from tensorflow.python.compat import compat\n\n def testMyNewFeature(self):\n with compat.forward_compatibility_horizon(2018, 08, 02):\n # Test that generate_graph_with_new_features() has an effect\n ```\n\n Args:\n year: A year (e.g., 2018). Must be an `int`.\n month: A month (1 <= month <= 12) in year. Must be an `int`.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an\n `int`.\n\n Yields:\n Nothing.\n \"\"\"\n try:\n _update_forward_compatibility_date_number(datetime.date(year, month, day))\n yield\n finally:\n _update_forward_compatibility_date_number()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Built-in loss functions.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\n\nimport six\n\nfrom tensorflow.python.autograph.core import ag_ctx\nfrom tensorflow.python.autograph.impl import api as autograph\nfrom tensorflow.python.distribute import distribution_strategy_context\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import smart_cond\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.utils import losses_utils\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.keras.utils.generic_utils import deserialize_keras_object\nfrom tensorflow.python.keras.utils.generic_utils import serialize_keras_object\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops.losses import losses_impl\nfrom tensorflow.python.util import dispatch\nfrom tensorflow.python.util.tf_export import keras_export\nfrom tensorflow.tools.docs import doc_controls\n\n\n@keras_export('keras.losses.Loss')\nclass Loss(object):\n \"\"\"Loss base class.\n\n To be implemented by subclasses:\n * `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`.\n\n Example subclass implementation:\n\n ```python\n class MeanSquaredError(Loss):\n\n def call(self, y_true, y_pred):\n y_pred = tf.convert_to_tensor_v2(y_pred)\n y_true = tf.cast(y_true, y_pred.dtype)\n return tf.reduce_mean(math_ops.square(y_pred - y_true), axis=-1)\n ```\n\n When used with `tf.distribute.Strategy`, outside of built-in training loops\n such as `tf.keras` `compile` and `fit`, please use 'SUM' or 'NONE' reduction\n types, and reduce losses explicitly in your training loop. Using 'AUTO' or\n 'SUM_OVER_BATCH_SIZE' will raise an error.\n\n Please see this custom training [tutorial](\n https://www.tensorflow.org/tutorials/distribute/custom_training) for more\n details on this.\n\n You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like:\n ```python\n with strategy.scope():\n loss_obj = tf.keras.losses.CategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.NONE)\n ....\n loss = (tf.reduce_sum(loss_obj(labels, predictions)) *\n (1. / global_batch_size))\n ```\n \"\"\"\n\n def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name=None):\n \"\"\"Initializes `Loss` class.\n\n Args:\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to\n loss. Default value is `AUTO`. `AUTO` indicates that the reduction\n option will be determined by the usage context. For almost all cases\n this defaults to `SUM_OVER_BATCH_SIZE`. When used with\n `tf.distribute.Strategy`, outside of built-in training loops such as\n `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`\n will raise an error. Please see this custom training [tutorial](\n https://www.tensorflow.org/tutorials/distribute/custom_training) for\n more details.\n name: Optional name for the op.\n \"\"\"\n losses_utils.ReductionV2.validate(reduction)\n self.reduction = reduction\n self.name = name\n # SUM_OVER_BATCH is only allowed in losses managed by `fit` or\n # CannedEstimators.\n self._allow_sum_over_batch_size = False\n self._set_name_scope()\n\n def _set_name_scope(self):\n \"\"\"Creates a valid `name_scope` name.\"\"\"\n if self.name is None:\n self._name_scope = self.__class__.__name__\n elif self.name == '<lambda>':\n self._name_scope = 'lambda'\n else:\n # E.g. '_my_loss' => 'my_loss'\n self._name_scope = self.name.strip('_')\n\n def __call__(self, y_true, y_pred, sample_weight=None):\n \"\"\"Invokes the `Loss` instance.\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except\n sparse loss functions such as sparse categorical crossentropy where\n shape = `[batch_size, d0, .. dN-1]`\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`\n sample_weight: Optional `sample_weight` acts as a coefficient for the\n loss. If a scalar is provided, then the loss is simply scaled by the\n given value. If `sample_weight` is a tensor of size `[batch_size]`, then\n the total loss for each sample of the batch is rescaled by the\n corresponding element in the `sample_weight` vector. If the shape of\n `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted to\n this shape), then each loss element of `y_pred` is scaled\n by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss\n functions reduce by 1 dimension, usually axis=-1.)\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`, this has\n shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1`\n because all loss functions reduce by 1 dimension, usually axis=-1.)\n\n Raises:\n ValueError: If the shape of `sample_weight` is invalid.\n \"\"\"\n # If we are wrapping a lambda function strip '<>' from the name as it is not\n # accepted in scope name.\n graph_ctx = tf_utils.graph_context_for_symbolic_tensors(\n y_true, y_pred, sample_weight)\n with K.name_scope(self._name_scope), graph_ctx:\n if context.executing_eagerly():\n call_fn = self.call\n else:\n call_fn = autograph.tf_convert(self.call, ag_ctx.control_status_ctx())\n losses = call_fn(y_true, y_pred)\n return losses_utils.compute_weighted_loss(\n losses, sample_weight, reduction=self._get_reduction())\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Instantiates a `Loss` from its config (output of `get_config()`).\n\n Args:\n config: Output of `get_config()`.\n\n Returns:\n A `Loss` instance.\n \"\"\"\n return cls(**config)\n\n def get_config(self):\n \"\"\"Returns the config dictionary for a `Loss` instance.\"\"\"\n return {'reduction': self.reduction, 'name': self.name}\n\n @abc.abstractmethod\n @doc_controls.for_subclass_implementers\n def call(self, y_true, y_pred):\n \"\"\"Invokes the `Loss` instance.\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except\n sparse loss functions such as sparse categorical crossentropy where\n shape = `[batch_size, d0, .. dN-1]`\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`\n\n Returns:\n Loss values with the shape `[batch_size, d0, .. dN-1]`.\n \"\"\"\n raise NotImplementedError('Must be implemented in subclasses.')\n\n def _get_reduction(self):\n \"\"\"Handles `AUTO` reduction cases and returns the reduction value.\"\"\"\n if (not self._allow_sum_over_batch_size and\n distribution_strategy_context.has_strategy() and\n (self.reduction == losses_utils.ReductionV2.AUTO or\n self.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE)):\n raise ValueError(\n 'Please use `tf.keras.losses.Reduction.SUM` or '\n '`tf.keras.losses.Reduction.NONE` for loss reduction when losses are '\n 'used with `tf.distribute.Strategy` outside of the built-in training '\n 'loops. You can implement '\n '`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using global batch '\n 'size like:\\n```\\nwith strategy.scope():\\n'\n ' loss_obj = tf.keras.losses.CategoricalCrossentropy('\n 'reduction=tf.keras.losses.Reduction.NONE)\\n....\\n'\n ' loss = tf.reduce_sum(loss_obj(labels, predictions)) * '\n '(1. / global_batch_size)\\n```\\nPlease see '\n 'https://www.tensorflow.org/tutorials/distribute/custom_training'\n ' for more details.')\n\n if self.reduction == losses_utils.ReductionV2.AUTO:\n return losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE\n return self.reduction\n\n\nclass LossFunctionWrapper(Loss):\n \"\"\"Wraps a loss function in the `Loss` class.\"\"\"\n\n def __init__(self,\n fn,\n reduction=losses_utils.ReductionV2.AUTO,\n name=None,\n **kwargs):\n \"\"\"Initializes `LossFunctionWrapper` class.\n\n Args:\n fn: The loss function to wrap, with signature `fn(y_true, y_pred,\n **kwargs)`.\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to\n loss. Default value is `AUTO`. `AUTO` indicates that the reduction\n option will be determined by the usage context. For almost all cases\n this defaults to `SUM_OVER_BATCH_SIZE`. When used with\n `tf.distribute.Strategy`, outside of built-in training loops such as\n `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`\n will raise an error. Please see this custom training [tutorial](\n https://www.tensorflow.org/tutorials/distribute/custom_training) for\n more details.\n name: (Optional) name for the loss.\n **kwargs: The keyword arguments that are passed on to `fn`.\n \"\"\"\n super(LossFunctionWrapper, self).__init__(reduction=reduction, name=name)\n self.fn = fn\n self._fn_kwargs = kwargs\n\n def call(self, y_true, y_pred):\n \"\"\"Invokes the `LossFunctionWrapper` instance.\n\n Args:\n y_true: Ground truth values.\n y_pred: The predicted values.\n\n Returns:\n Loss values per sample.\n \"\"\"\n if tensor_util.is_tensor(y_pred) and tensor_util.is_tensor(y_true):\n y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)\n ag_fn = autograph.tf_convert(self.fn, ag_ctx.control_status_ctx())\n return ag_fn(y_true, y_pred, **self._fn_kwargs)\n\n def get_config(self):\n config = {}\n for k, v in six.iteritems(self._fn_kwargs):\n config[k] = K.eval(v) if tf_utils.is_tensor_or_variable(v) else v\n base_config = super(LossFunctionWrapper, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.losses.MeanSquaredError')\nclass MeanSquaredError(LossFunctionWrapper):\n \"\"\"Computes the mean of squares of errors between labels and predictions.\n\n `loss = square(y_true - y_pred)`\n\n Standalone usage:\n\n >>> y_true = [[0., 1.], [0., 0.]]\n >>> y_pred = [[1., 1.], [1., 0.]]\n >>> # Using 'auto'/'sum_over_batch_size' reduction type.\n >>> mse = tf.keras.losses.MeanSquaredError()\n >>> mse(y_true, y_pred).numpy()\n 0.5\n\n >>> # Calling with 'sample_weight'.\n >>> mse(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()\n 0.25\n\n >>> # Using 'sum' reduction type.\n >>> mse = tf.keras.losses.MeanSquaredError(\n ... reduction=tf.keras.losses.Reduction.SUM)\n >>> mse(y_true, y_pred).numpy()\n 1.0\n\n >>> # Using 'none' reduction type.\n >>> mse = tf.keras.losses.MeanSquaredError(\n ... reduction=tf.keras.losses.Reduction.NONE)\n >>> mse(y_true, y_pred).numpy()\n array([0.5, 0.5], dtype=float32)\n\n Usage with the `compile()` API:\n\n ```python\n model.compile(optimizer='sgd', loss=tf.keras.losses.MeanSquaredError())\n ```\n \"\"\"\n\n def __init__(self,\n reduction=losses_utils.ReductionV2.AUTO,\n name='mean_squared_error'):\n \"\"\"Initializes `MeanSquaredError` instance.\n\n Args:\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to\n loss. Default value is `AUTO`. `AUTO` indicates that the reduction\n option will be determined by the usage context. For almost all cases\n this defaults to `SUM_OVER_BATCH_SIZE`. When used with\n `tf.distribute.Strategy`, outside of built-in training loops such as\n `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`\n will raise an error. Please see this custom training [tutorial](\n https://www.tensorflow.org/tutorials/distribute/custom_training) for\n more details.\n name: Optional name for the op. Defaults to 'mean_squared_error'.\n \"\"\"\n super(MeanSquaredError, self).__init__(\n mean_squared_error, name=name, reduction=reduction)\n\n\n@keras_export('keras.losses.MeanAbsoluteError')\nclass MeanAbsoluteError(LossFunctionWrapper):\n \"\"\"Computes the mean of absolute difference between labels and predictions.\n\n `loss = abs(y_true - y_pred)`\n\n Standalone usage:\n\n >>> y_true = [[0., 1.], [0., 0.]]\n >>> y_pred = [[1., 1.], [1., 0.]]\n >>> # Using 'auto'/'sum_over_batch_size' reduction type.\n >>> mae = tf.keras.losses.MeanAbsoluteError()\n >>> mae(y_true, y_pred).numpy()\n 0.5\n\n >>> # Calling with 'sample_weight'.\n >>> mae(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()\n 0.25\n\n >>> # Using 'sum' reduction type.\n >>> mae = tf.keras.losses.MeanAbsoluteError(\n ... reduction=tf.keras.losses.Reduction.SUM)\n >>> mae(y_true, y_pred).numpy()\n 1.0\n\n >>> # Using 'none' reduction type.\n >>> mae = tf.keras.losses.MeanAbsoluteError(\n ... reduction=tf.keras.losses.Reduction.NONE)\n >>> mae(y_true, y_pred).numpy()\n array([0.5, 0.5], dtype=float32)\n\n Usage with the `compile()` API:\n\n ```python\n model.compile(optimizer='sgd', loss=tf.keras.losses.MeanAbsoluteError())\n ```\n \"\"\"\n\n def __init__(self,\n reduction=losses_utils.ReductionV2.AUTO,\n name='mean_absolute_error'):\n \"\"\"Initializes `MeanAbsoluteError` instance.\n\n Args:\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to\n loss. Default value is `AUTO`. `AUTO` indicates that the reduction\n option will be determined by the usage context. For almost all cases\n this defaults to `SUM_OVER_BATCH_SIZE`. When used with\n `tf.distribute.Strategy`, outside of built-in training loops such as\n `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`\n will raise an error. Please see this custom training [tutorial](\n https://www.tensorflow.org/tutorials/distribute/custom_training) for\n more details.\n name: Optional name for the op. Defaults to 'mean_absolute_error'.\n \"\"\"\n super(MeanAbsoluteError, self).__init__(\n mean_absolute_error, name=name, reduction=reduction)\n\n\n@keras_export('keras.losses.MeanAbsolutePercentageError')\nclass MeanAbsolutePercentageError(LossFunctionWrapper):\n \"\"\"Computes the mean absolute percentage error between `y_true` and `y_pred`.\n\n `loss = 100 * abs(y_true - y_pred) / y_true`\n\n Standalone usage:\n\n >>> y_true = [[2., 1.], [2., 3.]]\n >>> y_pred = [[1., 1.], [1., 0.]]\n >>> # Using 'auto'/'sum_over_batch_size' reduction type.\n >>> mape = tf.keras.losses.MeanAbsolutePercentageError()\n >>> mape(y_true, y_pred).numpy()\n 50.\n\n >>> # Calling with 'sample_weight'.\n >>> mape(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()\n 20.\n\n >>> # Using 'sum' reduction type.\n >>> mape = tf.keras.losses.MeanAbsolutePercentageError(\n ... reduction=tf.keras.losses.Reduction.SUM)\n >>> mape(y_true, y_pred).numpy()\n 100.\n\n >>> # Using 'none' reduction type.\n >>> mape = tf.keras.losses.MeanAbsolutePercentageError(\n ... reduction=tf.keras.losses.Reduction.NONE)\n >>> mape(y_true, y_pred).numpy()\n array([25., 75.], dtype=float32)\n\n Usage with the `compile()` API:\n\n ```python\n model.compile(optimizer='sgd',\n loss=tf.keras.losses.MeanAbsolutePercentageError())\n ```\n \"\"\"\n\n def __init__(self,\n reduction=losses_utils.ReductionV2.AUTO,\n name='mean_absolute_percentage_error'):\n \"\"\"Initializes `MeanAbsolutePercentageError` instance.\n\n Args:\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to\n loss. Default value is `AUTO`. `AUTO` indicates that the reduction\n option will be determined by the usage context. For almost all cases\n this defaults to `SUM_OVER_BATCH_SIZE`. When used with\n `tf.distribute.Strategy`, outside of built-in training loops such as\n `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`\n will raise an error. Please see this custom training [tutorial](\n https://www.tensorflow.org/tutorials/distribute/custom_training) for\n more details.\n name: Optional name for the op. Defaults to\n 'mean_absolute_percentage_error'.\n \"\"\"\n super(MeanAbsolutePercentageError, self).__init__(\n mean_absolute_percentage_error, name=name, reduction=reduction)\n\n\n@keras_export('keras.losses.MeanSquaredLogarithmicError')\nclass MeanSquaredLogarithmicError(LossFunctionWrapper):\n \"\"\"Computes the mean squared logarithmic error between `y_true` and `y_pred`.\n\n `loss = square(log(y_true + 1.) - log(y_pred + 1.))`\n\n Standalone usage:\n\n >>> y_true = [[0., 1.], [0., 0.]]\n >>> y_pred = [[1., 1.], [1., 0.]]\n >>> # Using 'auto'/'sum_over_batch_size' reduction type.\n >>> msle = tf.keras.losses.MeanSquaredLogarithmicError()\n >>> msle(y_true, y_pred).numpy()\n 0.240\n\n >>> # Calling with 'sample_weight'.\n >>> msle(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()\n 0.120\n\n >>> # Using 'sum' reduction type.\n >>> msle = tf.keras.losses.MeanSquaredLogarithmicError(\n ... reduction=tf.keras.losses.Reduction.SUM)\n >>> msle(y_true, y_pred).numpy()\n 0.480\n\n >>> # Using 'none' reduction type.\n >>> msle = tf.keras.losses.MeanSquaredLogarithmicError(\n ... reduction=tf.keras.losses.Reduction.NONE)\n >>> msle(y_true, y_pred).numpy()\n array([0.240, 0.240], dtype=float32)\n\n Usage with the `compile()` API:\n\n ```python\n model.compile(optimizer='sgd',\n loss=tf.keras.losses.MeanSquaredLogarithmicError())\n ```\n \"\"\"\n\n def __init__(self,\n reduction=losses_utils.ReductionV2.AUTO,\n name='mean_squared_logarithmic_error'):\n \"\"\"Initializes `MeanSquaredLogarithmicError` instance.\n\n Args:\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to\n loss. Default value is `AUTO`. `AUTO` indicates that the reduction\n option will be determined by the usage context. For almost all cases\n this defaults to `SUM_OVER_BATCH_SIZE`. When used with\n `tf.distribute.Strategy`, outside of built-in training loops such as\n `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`\n will raise an error. Please see this custom training [tutorial](\n https://www.tensorflow.org/tutorials/distribute/custom_training) for\n more details.\n name: Optional name for the op. Defaults to\n 'mean_squared_logarithmic_error'.\n \"\"\"\n super(MeanSquaredLogarithmicError, self).__init__(\n mean_squared_logarithmic_error, name=name, reduction=reduction)\n\n\n@keras_export('keras.losses.BinaryCrossentropy')\nclass BinaryCrossentropy(LossFunctionWrapper):\n \"\"\"Computes the cross-entropy loss between true labels and predicted labels.\n\n Use this cross-entropy loss for binary (0 or 1) classification applications.\n The loss function requires the following inputs:\n\n - `y_true` (true label): This is either 0 or 1.\n - `y_pred` (predicted value): This is the model's prediction, i.e, a single\n floating-point value which either represents a\n [logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf]\n when `from_logits=True`) or a probability (i.e, value in [0., 1.] when\n `from_logits=False`).\n\n **Recommended Usage:** (set `from_logits=True`)\n\n With `tf.keras` API:\n\n ```python\n model.compile(\n loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n ....\n )\n ```\n\n As a standalone function:\n\n >>> # Example 1: (batch_size = 1, number of samples = 4)\n >>> y_true = [0, 1, 0, 0]\n >>> y_pred = [-18.6, 0.51, 2.94, -12.8]\n >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n >>> bce(y_true, y_pred).numpy()\n 0.865\n\n >>> # Example 2: (batch_size = 2, number of samples = 4)\n >>> y_true = [[0, 1], [0, 0]]\n >>> y_pred = [[-18.6, 0.51], [2.94, -12.8]]\n >>> # Using default 'auto'/'sum_over_batch_size' reduction type.\n >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n >>> bce(y_true, y_pred).numpy()\n 0.865\n >>> # Using 'sample_weight' attribute\n >>> bce(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()\n 0.243\n >>> # Using 'sum' reduction` type.\n >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True,\n ... reduction=tf.keras.losses.Reduction.SUM)\n >>> bce(y_true, y_pred).numpy()\n 1.730\n >>> # Using 'none' reduction type.\n >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True,\n ... reduction=tf.keras.losses.Reduction.NONE)\n >>> bce(y_true, y_pred).numpy()\n array([0.235, 1.496], dtype=float32)\n\n **Default Usage:** (set `from_logits=False`)\n\n >>> # Make the following updates to the above \"Recommended Usage\" section\n >>> # 1. Set `from_logits=False`\n >>> tf.keras.losses.BinaryCrossentropy() # OR ...('from_logits=False')\n >>> # 2. Update `y_pred` to use probabilities instead of logits\n >>> y_pred = [0.6, 0.3, 0.2, 0.8] # OR [[0.6, 0.3], [0.2, 0.8]]\n \"\"\"\n\n def __init__(self,\n from_logits=False,\n label_smoothing=0,\n reduction=losses_utils.ReductionV2.AUTO,\n name='binary_crossentropy'):\n \"\"\"Initializes `BinaryCrossentropy` instance.\n\n Args:\n from_logits: Whether to interpret `y_pred` as a tensor of\n [logit](https://en.wikipedia.org/wiki/Logit) values. By default, we\n assume that `y_pred` contains probabilities (i.e., values in [0, 1]).\n **Note - Using from_logits=True may be more numerically stable.\n label_smoothing: Float in [0, 1]. When 0, no smoothing occurs. When > 0,\n we compute the loss between the predicted labels and a smoothed version\n of the true labels, where the smoothing squeezes the labels towards 0.5.\n Larger values of `label_smoothing` correspond to heavier smoothing.\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to\n loss. Default value is `AUTO`. `AUTO` indicates that the reduction\n option will be determined by the usage context. For almost all cases\n this defaults to `SUM_OVER_BATCH_SIZE`. When used with\n `tf.distribute.Strategy`, outside of built-in training loops such as\n `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`\n will raise an error. Please see this custom training [tutorial](\n https://www.tensorflow.org/tutorials/distribute/custom_training) for\n more details.\n name: (Optional) Name for the op. Defaults to 'binary_crossentropy'.\n \"\"\"\n super(BinaryCrossentropy, self).__init__(\n binary_crossentropy,\n name=name,\n reduction=reduction,\n from_logits=from_logits,\n label_smoothing=label_smoothing)\n self.from_logits = from_logits\n\n\n@keras_export('keras.losses.CategoricalCrossentropy')\nclass CategoricalCrossentropy(LossFunctionWrapper):\n \"\"\"Computes the crossentropy loss between the labels and predictions.\n\n Use this crossentropy loss function when there are two or more label classes.\n We expect labels to be provided in a `one_hot` representation. If you want to\n provide labels as integers, please use `SparseCategoricalCrossentropy` loss.\n There should be `# classes` floating point values per feature.\n\n In the snippet below, there is `# classes` floating pointing values per\n example. The shape of both `y_pred` and `y_true` are\n `[batch_size, num_classes]`.\n\n Standalone usage:\n\n >>> y_true = [[0, 1, 0], [0, 0, 1]]\n >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]\n >>> # Using 'auto'/'sum_over_batch_size' reduction type.\n >>> cce = tf.keras.losses.CategoricalCrossentropy()\n >>> cce(y_true, y_pred).numpy()\n 1.177\n\n >>> # Calling with 'sample_weight'.\n >>> cce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy()\n 0.814\n\n >>> # Using 'sum' reduction type.\n >>> cce = tf.keras.losses.CategoricalCrossentropy(\n ... reduction=tf.keras.losses.Reduction.SUM)\n >>> cce(y_true, y_pred).numpy()\n 2.354\n\n >>> # Using 'none' reduction type.\n >>> cce = tf.keras.losses.CategoricalCrossentropy(\n ... reduction=tf.keras.losses.Reduction.NONE)\n >>> cce(y_true, y_pred).numpy()\n array([0.0513, 2.303], dtype=float32)\n\n Usage with the `compile()` API:\n\n ```python\n model.compile(optimizer='sgd', loss=tf.keras.losses.CategoricalCrossentropy())\n ```\n \"\"\"\n\n def __init__(self,\n from_logits=False,\n label_smoothing=0,\n reduction=losses_utils.ReductionV2.AUTO,\n name='categorical_crossentropy'):\n \"\"\"Initializes `CategoricalCrossentropy` instance.\n\n Args:\n from_logits: Whether `y_pred` is expected to be a logits tensor. By\n default, we assume that `y_pred` encodes a probability distribution.\n **Note - Using from_logits=True is more numerically stable.**\n label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,\n meaning the confidence on label values are relaxed. For example, if\n `0.1`, use `0.1 / num_classes` for non-target labels and \n `0.9 + 0.1 / num_classes` for target labels.\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to\n loss. Default value is `AUTO`. `AUTO` indicates that the reduction\n option will be determined by the usage context. For almost all cases\n this defaults to `SUM_OVER_BATCH_SIZE`. When used with\n `tf.distribute.Strategy`, outside of built-in training loops such as\n `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`\n will raise an error. Please see this custom training [tutorial](\n https://www.tensorflow.org/tutorials/distribute/custom_training) for\n more details.\n name: Optional name for the op. Defaults to 'categorical_crossentropy'.\n \"\"\"\n super(CategoricalCrossentropy, self).__init__(\n categorical_crossentropy,\n name=name,\n reduction=reduction,\n from_logits=from_logits,\n label_smoothing=label_smoothing)\n\n\n@keras_export('keras.losses.SparseCategoricalCrossentropy')\nclass SparseCategoricalCrossentropy(LossFunctionWrapper):\n \"\"\"Computes the crossentropy loss between the labels and predictions.\n\n Use this crossentropy loss function when there are two or more label classes.\n We expect labels to be provided as integers. If you want to provide labels\n using `one-hot` representation, please use `CategoricalCrossentropy` loss.\n There should be `# classes` floating point values per feature for `y_pred`\n and a single floating point value per feature for `y_true`.\n\n In the snippet below, there is a single floating point value per example for\n `y_true` and `# classes` floating pointing values per example for `y_pred`.\n The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is\n `[batch_size, num_classes]`.\n\n Standalone usage:\n\n >>> y_true = [1, 2]\n >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]\n >>> # Using 'auto'/'sum_over_batch_size' reduction type.\n >>> scce = tf.keras.losses.SparseCategoricalCrossentropy()\n >>> scce(y_true, y_pred).numpy()\n 1.177\n\n >>> # Calling with 'sample_weight'.\n >>> scce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy()\n 0.814\n\n >>> # Using 'sum' reduction type.\n >>> scce = tf.keras.losses.SparseCategoricalCrossentropy(\n ... reduction=tf.keras.losses.Reduction.SUM)\n >>> scce(y_true, y_pred).numpy()\n 2.354\n\n >>> # Using 'none' reduction type.\n >>> scce = tf.keras.losses.SparseCategoricalCrossentropy(\n ... reduction=tf.keras.losses.Reduction.NONE)\n >>> scce(y_true, y_pred).numpy()\n array([0.0513, 2.303], dtype=float32)\n\n Usage with the `compile()` API:\n\n ```python\n model.compile(optimizer='sgd',\n loss=tf.keras.losses.SparseCategoricalCrossentropy())\n ```\n \"\"\"\n\n def __init__(self,\n from_logits=False,\n reduction=losses_utils.ReductionV2.AUTO,\n name='sparse_categorical_crossentropy'):\n \"\"\"Initializes `SparseCategoricalCrossentropy` instance.\n\n Args:\n from_logits: Whether `y_pred` is expected to be a logits tensor. By\n default, we assume that `y_pred` encodes a probability distribution.\n **Note - Using from_logits=True may be more numerically stable.\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to\n loss. Default value is `AUTO`. `AUTO` indicates that the reduction\n option will be determined by the usage context. For almost all cases\n this defaults to `SUM_OVER_BATCH_SIZE`. When used with\n `tf.distribute.Strategy`, outside of built-in training loops such as\n `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`\n will raise an error. Please see this custom training [tutorial](\n https://www.tensorflow.org/tutorials/distribute/custom_training) for\n more details.\n name: Optional name for the op. Defaults to\n 'sparse_categorical_crossentropy'.\n \"\"\"\n super(SparseCategoricalCrossentropy, self).__init__(\n sparse_categorical_crossentropy,\n name=name,\n reduction=reduction,\n from_logits=from_logits)\n\n\n@keras_export('keras.losses.Hinge')\nclass Hinge(LossFunctionWrapper):\n \"\"\"Computes the hinge loss between `y_true` and `y_pred`.\n\n `loss = maximum(1 - y_true * y_pred, 0)`\n\n `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are\n provided we will convert them to -1 or 1.\n\n Standalone usage:\n\n >>> y_true = [[0., 1.], [0., 0.]]\n >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]\n >>> # Using 'auto'/'sum_over_batch_size' reduction type.\n >>> h = tf.keras.losses.Hinge()\n >>> h(y_true, y_pred).numpy()\n 1.3\n\n >>> # Calling with 'sample_weight'.\n >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()\n 0.55\n\n >>> # Using 'sum' reduction type.\n >>> h = tf.keras.losses.Hinge(\n ... reduction=tf.keras.losses.Reduction.SUM)\n >>> h(y_true, y_pred).numpy()\n 2.6\n\n >>> # Using 'none' reduction type.\n >>> h = tf.keras.losses.Hinge(\n ... reduction=tf.keras.losses.Reduction.NONE)\n >>> h(y_true, y_pred).numpy()\n array([1.1, 1.5], dtype=float32)\n\n Usage with the `compile()` API:\n\n ```python\n model.compile(optimizer='sgd', loss=tf.keras.losses.Hinge())\n ```\n \"\"\"\n\n def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='hinge'):\n \"\"\"Initializes `Hinge` instance.\n\n Args:\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to\n loss. Default value is `AUTO`. `AUTO` indicates that the reduction\n option will be determined by the usage context. For almost all cases\n this defaults to `SUM_OVER_BATCH_SIZE`. When used with\n `tf.distribute.Strategy`, outside of built-in training loops such as\n `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`\n will raise an error. Please see this custom training [tutorial](\n https://www.tensorflow.org/tutorials/distribute/custom_training) for\n more details.\n name: Optional name for the op. Defaults to 'hinge'.\n \"\"\"\n super(Hinge, self).__init__(hinge, name=name, reduction=reduction)\n\n\n@keras_export('keras.losses.SquaredHinge')\nclass SquaredHinge(LossFunctionWrapper):\n \"\"\"Computes the squared hinge loss between `y_true` and `y_pred`.\n\n `loss = square(maximum(1 - y_true * y_pred, 0))`\n\n `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are\n provided we will convert them to -1 or 1.\n\n Standalone usage:\n\n >>> y_true = [[0., 1.], [0., 0.]]\n >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]\n >>> # Using 'auto'/'sum_over_batch_size' reduction type.\n >>> h = tf.keras.losses.SquaredHinge()\n >>> h(y_true, y_pred).numpy()\n 1.86\n\n >>> # Calling with 'sample_weight'.\n >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()\n 0.73\n\n >>> # Using 'sum' reduction type.\n >>> h = tf.keras.losses.SquaredHinge(\n ... reduction=tf.keras.losses.Reduction.SUM)\n >>> h(y_true, y_pred).numpy()\n 3.72\n\n >>> # Using 'none' reduction type.\n >>> h = tf.keras.losses.SquaredHinge(\n ... reduction=tf.keras.losses.Reduction.NONE)\n >>> h(y_true, y_pred).numpy()\n array([1.46, 2.26], dtype=float32)\n\n Usage with the `compile()` API:\n\n ```python\n model.compile(optimizer='sgd', loss=tf.keras.losses.SquaredHinge())\n ```\n \"\"\"\n\n def __init__(self,\n reduction=losses_utils.ReductionV2.AUTO,\n name='squared_hinge'):\n \"\"\"Initializes `SquaredHinge` instance.\n\n Args:\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to\n loss. Default value is `AUTO`. `AUTO` indicates that the reduction\n option will be determined by the usage context. For almost all cases\n this defaults to `SUM_OVER_BATCH_SIZE`. When used with\n `tf.distribute.Strategy`, outside of built-in training loops such as\n `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`\n will raise an error. Please see this custom training [tutorial](\n https://www.tensorflow.org/tutorials/distribute/custom_training) for\n more details.\n name: Optional name for the op. Defaults to 'squared_hinge'.\n \"\"\"\n super(SquaredHinge, self).__init__(\n squared_hinge, name=name, reduction=reduction)\n\n\n@keras_export('keras.losses.CategoricalHinge')\nclass CategoricalHinge(LossFunctionWrapper):\n \"\"\"Computes the categorical hinge loss between `y_true` and `y_pred`.\n\n `loss = maximum(neg - pos + 1, 0)`\n where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)`\n\n Standalone usage:\n\n >>> y_true = [[0, 1], [0, 0]]\n >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]\n >>> # Using 'auto'/'sum_over_batch_size' reduction type.\n >>> h = tf.keras.losses.CategoricalHinge()\n >>> h(y_true, y_pred).numpy()\n 1.4\n\n >>> # Calling with 'sample_weight'.\n >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()\n 0.6\n\n >>> # Using 'sum' reduction type.\n >>> h = tf.keras.losses.CategoricalHinge(\n ... reduction=tf.keras.losses.Reduction.SUM)\n >>> h(y_true, y_pred).numpy()\n 2.8\n\n >>> # Using 'none' reduction type.\n >>> h = tf.keras.losses.CategoricalHinge(\n ... reduction=tf.keras.losses.Reduction.NONE)\n >>> h(y_true, y_pred).numpy()\n array([1.2, 1.6], dtype=float32)\n\n Usage with the `compile()` API:\n\n ```python\n model.compile(optimizer='sgd', loss=tf.keras.losses.CategoricalHinge())\n ```\n \"\"\"\n\n def __init__(self,\n reduction=losses_utils.ReductionV2.AUTO,\n name='categorical_hinge'):\n \"\"\"Initializes `CategoricalHinge` instance.\n\n Args:\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to\n loss. Default value is `AUTO`. `AUTO` indicates that the reduction\n option will be determined by the usage context. For almost all cases\n this defaults to `SUM_OVER_BATCH_SIZE`. When used with\n `tf.distribute.Strategy`, outside of built-in training loops such as\n `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`\n will raise an error. Please see this custom training [tutorial](\n https://www.tensorflow.org/tutorials/distribute/custom_training) for\n more details.\n name: Optional name for the op. Defaults to 'categorical_hinge'.\n \"\"\"\n super(CategoricalHinge, self).__init__(\n categorical_hinge, name=name, reduction=reduction)\n\n\n@keras_export('keras.losses.Poisson')\nclass Poisson(LossFunctionWrapper):\n \"\"\"Computes the Poisson loss between `y_true` and `y_pred`.\n\n `loss = y_pred - y_true * log(y_pred)`\n\n Standalone usage:\n\n >>> y_true = [[0., 1.], [0., 0.]]\n >>> y_pred = [[1., 1.], [0., 0.]]\n >>> # Using 'auto'/'sum_over_batch_size' reduction type.\n >>> p = tf.keras.losses.Poisson()\n >>> p(y_true, y_pred).numpy()\n 0.5\n\n >>> # Calling with 'sample_weight'.\n >>> p(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()\n 0.4\n\n >>> # Using 'sum' reduction type.\n >>> p = tf.keras.losses.Poisson(\n ... reduction=tf.keras.losses.Reduction.SUM)\n >>> p(y_true, y_pred).numpy()\n 0.999\n\n >>> # Using 'none' reduction type.\n >>> p = tf.keras.losses.Poisson(\n ... reduction=tf.keras.losses.Reduction.NONE)\n >>> p(y_true, y_pred).numpy()\n array([0.999, 0.], dtype=float32)\n\n Usage with the `compile()` API:\n\n ```python\n model.compile(optimizer='sgd', loss=tf.keras.losses.Poisson())\n ```\n \"\"\"\n\n def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='poisson'):\n \"\"\"Initializes `Poisson` instance.\n\n Args:\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to\n loss. Default value is `AUTO`. `AUTO` indicates that the reduction\n option will be determined by the usage context. For almost all cases\n this defaults to `SUM_OVER_BATCH_SIZE`. When used with\n `tf.distribute.Strategy`, outside of built-in training loops such as\n `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`\n will raise an error. Please see this custom training [tutorial](\n https://www.tensorflow.org/tutorials/distribute/custom_training) for\n more details.\n name: Optional name for the op. Defaults to 'poisson'.\n \"\"\"\n super(Poisson, self).__init__(poisson, name=name, reduction=reduction)\n\n\n@keras_export('keras.losses.LogCosh')\nclass LogCosh(LossFunctionWrapper):\n \"\"\"Computes the logarithm of the hyperbolic cosine of the prediction error.\n\n `logcosh = log((exp(x) + exp(-x))/2)`,\n where x is the error `y_pred - y_true`.\n\n Standalone usage:\n\n >>> y_true = [[0., 1.], [0., 0.]]\n >>> y_pred = [[1., 1.], [0., 0.]]\n >>> # Using 'auto'/'sum_over_batch_size' reduction type.\n >>> l = tf.keras.losses.LogCosh()\n >>> l(y_true, y_pred).numpy()\n 0.108\n\n >>> # Calling with 'sample_weight'.\n >>> l(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()\n 0.087\n\n >>> # Using 'sum' reduction type.\n >>> l = tf.keras.losses.LogCosh(\n ... reduction=tf.keras.losses.Reduction.SUM)\n >>> l(y_true, y_pred).numpy()\n 0.217\n\n >>> # Using 'none' reduction type.\n >>> l = tf.keras.losses.LogCosh(\n ... reduction=tf.keras.losses.Reduction.NONE)\n >>> l(y_true, y_pred).numpy()\n array([0.217, 0.], dtype=float32)\n\n Usage with the `compile()` API:\n\n ```python\n model.compile(optimizer='sgd', loss=tf.keras.losses.LogCosh())\n ```\n \"\"\"\n\n def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='log_cosh'):\n \"\"\"Initializes `LogCosh` instance.\n\n Args:\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to\n loss. Default value is `AUTO`. `AUTO` indicates that the reduction\n option will be determined by the usage context. For almost all cases\n this defaults to `SUM_OVER_BATCH_SIZE`. When used with\n `tf.distribute.Strategy`, outside of built-in training loops such as\n `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`\n will raise an error. Please see this custom training [tutorial](\n https://www.tensorflow.org/tutorials/distribute/custom_training) for\n more details.\n name: Optional name for the op. Defaults to 'log_cosh'.\n \"\"\"\n super(LogCosh, self).__init__(log_cosh, name=name, reduction=reduction)\n\n\n@keras_export('keras.losses.KLDivergence')\nclass KLDivergence(LossFunctionWrapper):\n \"\"\"Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.\n\n `loss = y_true * log(y_true / y_pred)`\n\n See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence\n\n Standalone usage:\n\n >>> y_true = [[0, 1], [0, 0]]\n >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]\n >>> # Using 'auto'/'sum_over_batch_size' reduction type.\n >>> kl = tf.keras.losses.KLDivergence()\n >>> kl(y_true, y_pred).numpy()\n 0.458\n\n >>> # Calling with 'sample_weight'.\n >>> kl(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()\n 0.366\n\n >>> # Using 'sum' reduction type.\n >>> kl = tf.keras.losses.KLDivergence(\n ... reduction=tf.keras.losses.Reduction.SUM)\n >>> kl(y_true, y_pred).numpy()\n 0.916\n\n >>> # Using 'none' reduction type.\n >>> kl = tf.keras.losses.KLDivergence(\n ... reduction=tf.keras.losses.Reduction.NONE)\n >>> kl(y_true, y_pred).numpy()\n array([0.916, -3.08e-06], dtype=float32)\n\n Usage with the `compile()` API:\n\n ```python\n model.compile(optimizer='sgd', loss=tf.keras.losses.KLDivergence())\n ```\n \"\"\"\n\n def __init__(self,\n reduction=losses_utils.ReductionV2.AUTO,\n name='kl_divergence'):\n \"\"\"Initializes `KLDivergence` instance.\n\n Args:\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to\n loss. Default value is `AUTO`. `AUTO` indicates that the reduction\n option will be determined by the usage context. For almost all cases\n this defaults to `SUM_OVER_BATCH_SIZE`. When used with\n `tf.distribute.Strategy`, outside of built-in training loops such as\n `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`\n will raise an error. Please see this custom training [tutorial](\n https://www.tensorflow.org/tutorials/distribute/custom_training) for\n more details.\n name: Optional name for the op. Defaults to 'kl_divergence'.\n \"\"\"\n super(KLDivergence, self).__init__(\n kl_divergence, name=name, reduction=reduction)\n\n\n@keras_export('keras.losses.Huber')\nclass Huber(LossFunctionWrapper):\n \"\"\"Computes the Huber loss between `y_true` and `y_pred`.\n\n For each value x in `error = y_true - y_pred`:\n\n ```\n loss = 0.5 * x^2 if |x| <= d\n loss = 0.5 * d^2 + d * (|x| - d) if |x| > d\n ```\n where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss\n\n Standalone usage:\n\n >>> y_true = [[0, 1], [0, 0]]\n >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]\n >>> # Using 'auto'/'sum_over_batch_size' reduction type.\n >>> h = tf.keras.losses.Huber()\n >>> h(y_true, y_pred).numpy()\n 0.155\n\n >>> # Calling with 'sample_weight'.\n >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()\n 0.09\n\n >>> # Using 'sum' reduction type.\n >>> h = tf.keras.losses.Huber(\n ... reduction=tf.keras.losses.Reduction.SUM)\n >>> h(y_true, y_pred).numpy()\n 0.31\n\n >>> # Using 'none' reduction type.\n >>> h = tf.keras.losses.Huber(\n ... reduction=tf.keras.losses.Reduction.NONE)\n >>> h(y_true, y_pred).numpy()\n array([0.18, 0.13], dtype=float32)\n\n Usage with the `compile()` API:\n\n ```python\n model.compile(optimizer='sgd', loss=tf.keras.losses.Huber())\n ```\n \"\"\"\n\n def __init__(self,\n delta=1.0,\n reduction=losses_utils.ReductionV2.AUTO,\n name='huber_loss'):\n \"\"\"Initializes `Huber` instance.\n\n Args:\n delta: A float, the point where the Huber loss function changes from a\n quadratic to linear.\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to\n loss. Default value is `AUTO`. `AUTO` indicates that the reduction\n option will be determined by the usage context. For almost all cases\n this defaults to `SUM_OVER_BATCH_SIZE`. When used with\n `tf.distribute.Strategy`, outside of built-in training loops such as\n `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`\n will raise an error. Please see this custom training [tutorial](\n https://www.tensorflow.org/tutorials/distribute/custom_training) for\n more details.\n name: Optional name for the op. Defaults to 'huber_loss'.\n \"\"\"\n super(Huber, self).__init__(\n huber, name=name, reduction=reduction, delta=delta)\n\n\n@keras_export('keras.metrics.mean_squared_error', 'keras.metrics.mse',\n 'keras.metrics.MSE', 'keras.losses.mean_squared_error',\n 'keras.losses.mse', 'keras.losses.MSE')\[email protected]_dispatch_support\ndef mean_squared_error(y_true, y_pred):\n \"\"\"Computes the mean squared error between labels and predictions.\n\n After computing the squared distance between the inputs, the mean value over\n the last dimension is returned.\n\n `loss = mean(square(y_true - y_pred), axis=-1)`\n\n Standalone usage:\n\n >>> y_true = np.random.randint(0, 2, size=(2, 3))\n >>> y_pred = np.random.random(size=(2, 3))\n >>> loss = tf.keras.losses.mean_squared_error(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> assert np.array_equal(\n ... loss.numpy(), np.mean(np.square(y_true - y_pred), axis=-1))\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n\n Returns:\n Mean squared error values. shape = `[batch_size, d0, .. dN-1]`.\n \"\"\"\n y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n return K.mean(math_ops.squared_difference(y_pred, y_true), axis=-1)\n\n\n@keras_export('keras.metrics.mean_absolute_error', 'keras.metrics.mae',\n 'keras.metrics.MAE', 'keras.losses.mean_absolute_error',\n 'keras.losses.mae', 'keras.losses.MAE')\[email protected]_dispatch_support\ndef mean_absolute_error(y_true, y_pred):\n \"\"\"Computes the mean absolute error between labels and predictions.\n\n `loss = mean(abs(y_true - y_pred), axis=-1)`\n\n Standalone usage:\n\n >>> y_true = np.random.randint(0, 2, size=(2, 3))\n >>> y_pred = np.random.random(size=(2, 3))\n >>> loss = tf.keras.losses.mean_absolute_error(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> assert np.array_equal(\n ... loss.numpy(), np.mean(np.abs(y_true - y_pred), axis=-1))\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n\n Returns:\n Mean absolute error values. shape = `[batch_size, d0, .. dN-1]`.\n \"\"\"\n y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n return K.mean(math_ops.abs(y_pred - y_true), axis=-1)\n\n\n@keras_export('keras.metrics.mean_absolute_percentage_error',\n 'keras.metrics.mape', 'keras.metrics.MAPE',\n 'keras.losses.mean_absolute_percentage_error',\n 'keras.losses.mape', 'keras.losses.MAPE')\[email protected]_dispatch_support\ndef mean_absolute_percentage_error(y_true, y_pred):\n \"\"\"Computes the mean absolute percentage error between `y_true` and `y_pred`.\n\n `loss = 100 * mean(abs((y_true - y_pred) / y_true), axis=-1)`\n\n Standalone usage:\n\n >>> y_true = np.random.random(size=(2, 3))\n >>> y_true = np.maximum(y_true, 1e-7) # Prevent division by zero\n >>> y_pred = np.random.random(size=(2, 3))\n >>> loss = tf.keras.losses.mean_absolute_percentage_error(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> assert np.array_equal(\n ... loss.numpy(),\n ... 100. * np.mean(np.abs((y_true - y_pred) / y_true), axis=-1))\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n\n Returns:\n Mean absolute percentage error values. shape = `[batch_size, d0, .. dN-1]`.\n \"\"\"\n y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n diff = math_ops.abs(\n (y_true - y_pred) / K.maximum(math_ops.abs(y_true), K.epsilon()))\n return 100. * K.mean(diff, axis=-1)\n\n\n@keras_export('keras.metrics.mean_squared_logarithmic_error',\n 'keras.metrics.msle', 'keras.metrics.MSLE',\n 'keras.losses.mean_squared_logarithmic_error',\n 'keras.losses.msle', 'keras.losses.MSLE')\[email protected]_dispatch_support\ndef mean_squared_logarithmic_error(y_true, y_pred):\n \"\"\"Computes the mean squared logarithmic error between `y_true` and `y_pred`.\n\n `loss = mean(square(log(y_true + 1) - log(y_pred + 1)), axis=-1)`\n\n Standalone usage:\n\n >>> y_true = np.random.randint(0, 2, size=(2, 3))\n >>> y_pred = np.random.random(size=(2, 3))\n >>> loss = tf.keras.losses.mean_squared_logarithmic_error(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> y_true = np.maximum(y_true, 1e-7)\n >>> y_pred = np.maximum(y_pred, 1e-7)\n >>> assert np.allclose(\n ... loss.numpy(),\n ... np.mean(\n ... np.square(np.log(y_true + 1.) - np.log(y_pred + 1.)), axis=-1))\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n\n Returns:\n Mean squared logarithmic error values. shape = `[batch_size, d0, .. dN-1]`.\n \"\"\"\n y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n first_log = math_ops.log(K.maximum(y_pred, K.epsilon()) + 1.)\n second_log = math_ops.log(K.maximum(y_true, K.epsilon()) + 1.)\n return K.mean(math_ops.squared_difference(first_log, second_log), axis=-1)\n\n\ndef _maybe_convert_labels(y_true):\n \"\"\"Converts binary labels into -1/1.\"\"\"\n are_zeros = math_ops.equal(y_true, 0)\n are_ones = math_ops.equal(y_true, 1)\n is_binary = math_ops.reduce_all(math_ops.logical_or(are_zeros, are_ones))\n\n def _convert_binary_labels():\n # Convert the binary labels to -1 or 1.\n return 2. * y_true - 1.\n\n updated_y_true = smart_cond.smart_cond(is_binary, _convert_binary_labels,\n lambda: y_true)\n return updated_y_true\n\n\n@keras_export('keras.metrics.squared_hinge', 'keras.losses.squared_hinge')\[email protected]_dispatch_support\ndef squared_hinge(y_true, y_pred):\n \"\"\"Computes the squared hinge loss between `y_true` and `y_pred`.\n\n `loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)`\n\n Standalone usage:\n\n >>> y_true = np.random.choice([-1, 1], size=(2, 3))\n >>> y_pred = np.random.random(size=(2, 3))\n >>> loss = tf.keras.losses.squared_hinge(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> assert np.array_equal(\n ... loss.numpy(),\n ... np.mean(np.square(np.maximum(1. - y_true * y_pred, 0.)), axis=-1))\n\n Args:\n y_true: The ground truth values. `y_true` values are expected to be -1 or 1.\n If binary (0 or 1) labels are provided we will convert them to -1 or 1.\n shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n\n Returns:\n Squared hinge loss values. shape = `[batch_size, d0, .. dN-1]`.\n \"\"\"\n y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n y_true = _maybe_convert_labels(y_true)\n return K.mean(\n math_ops.square(math_ops.maximum(1. - y_true * y_pred, 0.)), axis=-1)\n\n\n@keras_export('keras.metrics.hinge', 'keras.losses.hinge')\[email protected]_dispatch_support\ndef hinge(y_true, y_pred):\n \"\"\"Computes the hinge loss between `y_true` and `y_pred`.\n\n `loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)`\n\n Standalone usage:\n\n >>> y_true = np.random.choice([-1, 1], size=(2, 3))\n >>> y_pred = np.random.random(size=(2, 3))\n >>> loss = tf.keras.losses.hinge(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> assert np.array_equal(\n ... loss.numpy(),\n ... np.mean(np.maximum(1. - y_true * y_pred, 0.), axis=-1))\n\n Args:\n y_true: The ground truth values. `y_true` values are expected to be -1 or 1.\n If binary (0 or 1) labels are provided they will be converted to -1 or 1.\n shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n\n Returns:\n Hinge loss values. shape = `[batch_size, d0, .. dN-1]`.\n \"\"\"\n y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n y_true = _maybe_convert_labels(y_true)\n return K.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1)\n\n\n@keras_export('keras.losses.categorical_hinge')\[email protected]_dispatch_support\ndef categorical_hinge(y_true, y_pred):\n \"\"\"Computes the categorical hinge loss between `y_true` and `y_pred`.\n\n `loss = maximum(neg - pos + 1, 0)`\n where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)`\n\n Standalone usage:\n\n >>> y_true = np.random.randint(0, 3, size=(2,))\n >>> y_true = tf.keras.utils.to_categorical(y_true, num_classes=3)\n >>> y_pred = np.random.random(size=(2, 3))\n >>> loss = tf.keras.losses.categorical_hinge(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> pos = np.sum(y_true * y_pred, axis=-1)\n >>> neg = np.amax((1. - y_true) * y_pred, axis=-1)\n >>> assert np.array_equal(loss.numpy(), np.maximum(0., neg - pos + 1.))\n\n Args:\n y_true: The ground truth values. `y_true` values are expected to be 0 or 1.\n y_pred: The predicted values.\n\n Returns:\n Categorical hinge loss values.\n \"\"\"\n y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n pos = math_ops.reduce_sum(y_true * y_pred, axis=-1)\n neg = math_ops.reduce_max((1. - y_true) * y_pred, axis=-1)\n zero = math_ops.cast(0., y_pred.dtype)\n return math_ops.maximum(neg - pos + 1., zero)\n\n\n@keras_export('keras.losses.huber', v1=[])\[email protected]_dispatch_support\ndef huber(y_true, y_pred, delta=1.0):\n \"\"\"Computes Huber loss value.\n\n For each value x in `error = y_true - y_pred`:\n\n ```\n loss = 0.5 * x^2 if |x| <= d\n loss = 0.5 * d^2 + d * (|x| - d) if |x| > d\n ```\n where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss\n\n Args:\n y_true: tensor of true targets.\n y_pred: tensor of predicted targets.\n delta: A float, the point where the Huber loss function changes from a\n quadratic to linear.\n\n Returns:\n Tensor with one scalar loss entry per sample.\n \"\"\"\n y_pred = math_ops.cast(y_pred, dtype=K.floatx())\n y_true = math_ops.cast(y_true, dtype=K.floatx())\n delta = math_ops.cast(delta, dtype=K.floatx())\n error = math_ops.subtract(y_pred, y_true)\n abs_error = math_ops.abs(error)\n half = ops.convert_to_tensor_v2_with_dispatch(0.5, dtype=abs_error.dtype)\n return K.mean(\n array_ops.where_v2(\n abs_error <= delta, half * math_ops.pow(error, 2),\n half * math_ops.pow(delta, 2) + delta * (abs_error - delta)),\n axis=-1)\n\n\n@keras_export('keras.losses.log_cosh', 'keras.losses.logcosh',\n 'keras.metrics.log_cosh', 'keras.metrics.logcosh')\[email protected]_dispatch_support\ndef log_cosh(y_true, y_pred):\n \"\"\"Logarithm of the hyperbolic cosine of the prediction error.\n\n `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and\n to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly\n like the mean squared error, but will not be so strongly affected by the\n occasional wildly incorrect prediction.\n\n Standalone usage:\n\n >>> y_true = np.random.random(size=(2, 3))\n >>> y_pred = np.random.random(size=(2, 3))\n >>> loss = tf.keras.losses.logcosh(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> x = y_pred - y_true\n >>> assert np.allclose(\n ... loss.numpy(),\n ... np.mean(x + np.log(np.exp(-2. * x) + 1.) - math_ops.log(2.), axis=-1),\n ... atol=1e-5)\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n\n Returns:\n Logcosh error values. shape = `[batch_size, d0, .. dN-1]`.\n \"\"\"\n y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n\n def _logcosh(x):\n return x + nn.softplus(-2. * x) - math_ops.cast(math_ops.log(2.), x.dtype)\n\n return K.mean(_logcosh(y_pred - y_true), axis=-1)\n\n\n@keras_export('keras.metrics.categorical_crossentropy',\n 'keras.losses.categorical_crossentropy')\[email protected]_dispatch_support\ndef categorical_crossentropy(y_true,\n y_pred,\n from_logits=False,\n label_smoothing=0):\n \"\"\"Computes the categorical crossentropy loss.\n\n Standalone usage:\n\n >>> y_true = [[0, 1, 0], [0, 0, 1]]\n >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]\n >>> loss = tf.keras.losses.categorical_crossentropy(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> loss.numpy()\n array([0.0513, 2.303], dtype=float32)\n\n Args:\n y_true: Tensor of one-hot true targets.\n y_pred: Tensor of predicted targets.\n from_logits: Whether `y_pred` is expected to be a logits tensor. By default,\n we assume that `y_pred` encodes a probability distribution.\n label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For\n example, if `0.1`, use `0.1 / num_classes` for non-target labels\n and `0.9 + 0.1 / num_classes` for target labels.\n\n Returns:\n Categorical crossentropy loss value.\n \"\"\"\n y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n label_smoothing = ops.convert_to_tensor_v2_with_dispatch(\n label_smoothing, dtype=K.floatx())\n\n def _smooth_labels():\n num_classes = math_ops.cast(array_ops.shape(y_true)[-1], y_pred.dtype)\n return y_true * (1.0 - label_smoothing) + (label_smoothing / num_classes)\n\n y_true = smart_cond.smart_cond(label_smoothing, _smooth_labels,\n lambda: y_true)\n return K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits)\n\n\n@keras_export('keras.metrics.sparse_categorical_crossentropy',\n 'keras.losses.sparse_categorical_crossentropy')\[email protected]_dispatch_support\ndef sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):\n \"\"\"Computes the sparse categorical crossentropy loss.\n\n Standalone usage:\n\n >>> y_true = [1, 2]\n >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]\n >>> loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> loss.numpy()\n array([0.0513, 2.303], dtype=float32)\n\n Args:\n y_true: Ground truth values.\n y_pred: The predicted values.\n from_logits: Whether `y_pred` is expected to be a logits tensor. By default,\n we assume that `y_pred` encodes a probability distribution.\n axis: (Optional) Defaults to -1. The dimension along which the entropy is\n computed.\n\n Returns:\n Sparse categorical crossentropy loss value.\n \"\"\"\n y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n return K.sparse_categorical_crossentropy(\n y_true, y_pred, from_logits=from_logits, axis=axis)\n\n\n@keras_export('keras.metrics.binary_crossentropy',\n 'keras.losses.binary_crossentropy')\[email protected]_dispatch_support\ndef binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0):\n \"\"\"Computes the binary crossentropy loss.\n\n Standalone usage:\n\n >>> y_true = [[0, 1], [0, 0]]\n >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]\n >>> loss = tf.keras.losses.binary_crossentropy(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> loss.numpy()\n array([0.916 , 0.714], dtype=float32)\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n from_logits: Whether `y_pred` is expected to be a logits tensor. By default,\n we assume that `y_pred` encodes a probability distribution.\n label_smoothing: Float in [0, 1]. If > `0` then smooth the labels by \n squeezing them towards 0.5 That is, using `1. - 0.5 * label_smoothing`\n for the target class and `0.5 * label_smoothing` for the non-target class.\n\n Returns:\n Binary crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`.\n \"\"\"\n y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n label_smoothing = ops.convert_to_tensor_v2_with_dispatch(\n label_smoothing, dtype=K.floatx())\n\n def _smooth_labels():\n return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing\n\n y_true = smart_cond.smart_cond(label_smoothing, _smooth_labels,\n lambda: y_true)\n return K.mean(\n K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1)\n\n\n@keras_export('keras.metrics.kl_divergence',\n 'keras.metrics.kullback_leibler_divergence', 'keras.metrics.kld',\n 'keras.metrics.KLD', 'keras.losses.kl_divergence',\n 'keras.losses.kullback_leibler_divergence', 'keras.losses.kld',\n 'keras.losses.KLD')\[email protected]_dispatch_support\ndef kl_divergence(y_true, y_pred):\n \"\"\"Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.\n\n `loss = y_true * log(y_true / y_pred)`\n\n See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence\n\n Standalone usage:\n\n >>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float64)\n >>> y_pred = np.random.random(size=(2, 3))\n >>> loss = tf.keras.losses.kullback_leibler_divergence(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> y_true = tf.keras.backend.clip(y_true, 1e-7, 1)\n >>> y_pred = tf.keras.backend.clip(y_pred, 1e-7, 1)\n >>> assert np.array_equal(\n ... loss.numpy(), np.sum(y_true * np.log(y_true / y_pred), axis=-1))\n\n Args:\n y_true: Tensor of true targets.\n y_pred: Tensor of predicted targets.\n\n Returns:\n A `Tensor` with loss.\n\n Raises:\n TypeError: If `y_true` cannot be cast to the `y_pred.dtype`.\n \"\"\"\n y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n y_true = K.clip(y_true, K.epsilon(), 1)\n y_pred = K.clip(y_pred, K.epsilon(), 1)\n return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)\n\n\n@keras_export('keras.metrics.poisson', 'keras.losses.poisson')\[email protected]_dispatch_support\ndef poisson(y_true, y_pred):\n \"\"\"Computes the Poisson loss between y_true and y_pred.\n\n The Poisson loss is the mean of the elements of the `Tensor`\n `y_pred - y_true * log(y_pred)`.\n\n Standalone usage:\n\n >>> y_true = np.random.randint(0, 2, size=(2, 3))\n >>> y_pred = np.random.random(size=(2, 3))\n >>> loss = tf.keras.losses.poisson(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> y_pred = y_pred + 1e-7\n >>> assert np.allclose(\n ... loss.numpy(), np.mean(y_pred - y_true * np.log(y_pred), axis=-1),\n ... atol=1e-5)\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n\n Returns:\n Poisson loss value. shape = `[batch_size, d0, .. dN-1]`.\n\n Raises:\n InvalidArgumentError: If `y_true` and `y_pred` have incompatible shapes.\n \"\"\"\n y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1)\n\n\n@keras_export(\n 'keras.losses.cosine_similarity',\n v1=[\n 'keras.metrics.cosine_proximity',\n 'keras.metrics.cosine',\n 'keras.losses.cosine_proximity',\n 'keras.losses.cosine',\n 'keras.losses.cosine_similarity',\n ])\[email protected]_dispatch_support\ndef cosine_similarity(y_true, y_pred, axis=-1):\n \"\"\"Computes the cosine similarity between labels and predictions.\n\n Note that it is a number between -1 and 1. When it is a negative number\n between -1 and 0, 0 indicates orthogonality and values closer to -1\n indicate greater similarity. The values closer to 1 indicate greater\n dissimilarity. This makes it usable as a loss function in a setting\n where you try to maximize the proximity between predictions and\n targets. If either `y_true` or `y_pred` is a zero vector, cosine\n similarity will be 0 regardless of the proximity between predictions\n and targets.\n\n `loss = -sum(l2_norm(y_true) * l2_norm(y_pred))`\n\n Standalone usage:\n\n >>> y_true = [[0., 1.], [1., 1.], [1., 1.]]\n >>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]]\n >>> loss = tf.keras.losses.cosine_similarity(y_true, y_pred, axis=1)\n >>> loss.numpy()\n array([-0., -0.999, 0.999], dtype=float32)\n\n Args:\n y_true: Tensor of true targets.\n y_pred: Tensor of predicted targets.\n axis: Axis along which to determine similarity.\n\n Returns:\n Cosine similarity tensor.\n \"\"\"\n y_true = nn.l2_normalize(y_true, axis=axis)\n y_pred = nn.l2_normalize(y_pred, axis=axis)\n return -math_ops.reduce_sum(y_true * y_pred, axis=axis)\n\n\n@keras_export('keras.losses.CosineSimilarity')\nclass CosineSimilarity(LossFunctionWrapper):\n \"\"\"Computes the cosine similarity between labels and predictions.\n\n Note that it is a number between -1 and 1. When it is a negative number\n between -1 and 0, 0 indicates orthogonality and values closer to -1\n indicate greater similarity. The values closer to 1 indicate greater\n dissimilarity. This makes it usable as a loss function in a setting\n where you try to maximize the proximity between predictions and targets.\n If either `y_true` or `y_pred` is a zero vector, cosine similarity will be 0\n regardless of the proximity between predictions and targets.\n\n `loss = -sum(l2_norm(y_true) * l2_norm(y_pred))`\n\n Standalone usage:\n\n >>> y_true = [[0., 1.], [1., 1.]]\n >>> y_pred = [[1., 0.], [1., 1.]]\n >>> # Using 'auto'/'sum_over_batch_size' reduction type.\n >>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1)\n >>> # l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]]\n >>> # l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]]\n >>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]\n >>> # loss = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1))\n >>> # = -((0. + 0.) + (0.5 + 0.5)) / 2\n >>> cosine_loss(y_true, y_pred).numpy()\n -0.5\n\n >>> # Calling with 'sample_weight'.\n >>> cosine_loss(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()\n -0.0999\n\n >>> # Using 'sum' reduction type.\n >>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1,\n ... reduction=tf.keras.losses.Reduction.SUM)\n >>> cosine_loss(y_true, y_pred).numpy()\n -0.999\n\n >>> # Using 'none' reduction type.\n >>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1,\n ... reduction=tf.keras.losses.Reduction.NONE)\n >>> cosine_loss(y_true, y_pred).numpy()\n array([-0., -0.999], dtype=float32)\n\n Usage with the `compile()` API:\n\n ```python\n model.compile(optimizer='sgd', loss=tf.keras.losses.CosineSimilarity(axis=1))\n ```\n\n Args:\n axis: (Optional) Defaults to -1. The dimension along which the cosine\n similarity is computed.\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.\n Default value is `AUTO`. `AUTO` indicates that the reduction option will\n be determined by the usage context. For almost all cases this defaults to\n `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of\n built-in training loops such as `tf.keras` `compile` and `fit`, using\n `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this\n custom training [tutorial]\n (https://www.tensorflow.org/tutorials/distribute/custom_training) for more\n details.\n name: Optional name for the op.\n \"\"\"\n\n def __init__(self,\n axis=-1,\n reduction=losses_utils.ReductionV2.AUTO,\n name='cosine_similarity'):\n super(CosineSimilarity, self).__init__(\n cosine_similarity, reduction=reduction, name=name, axis=axis)\n\n\n# Aliases.\n\nbce = BCE = binary_crossentropy\nmse = MSE = mean_squared_error\nmae = MAE = mean_absolute_error\nmape = MAPE = mean_absolute_percentage_error\nmsle = MSLE = mean_squared_logarithmic_error\nkld = KLD = kullback_leibler_divergence = kl_divergence\nlogcosh = log_cosh\nhuber_loss = huber\n\n\ndef is_categorical_crossentropy(loss):\n result = ((isinstance(loss, CategoricalCrossentropy) or\n (isinstance(loss, LossFunctionWrapper) and\n loss.fn == categorical_crossentropy) or\n (hasattr(loss, '__name__') and\n loss.__name__ == 'categorical_crossentropy') or\n (loss == 'categorical_crossentropy')))\n return result\n\n\n@keras_export('keras.losses.serialize')\ndef serialize(loss):\n \"\"\"Serializes loss function or `Loss` instance.\n\n Arguments:\n loss: A Keras `Loss` instance or a loss function.\n\n Returns:\n Loss configuration dictionary.\n \"\"\"\n return serialize_keras_object(loss)\n\n\n@keras_export('keras.losses.deserialize')\ndef deserialize(name, custom_objects=None):\n \"\"\"Deserializes a serialized loss class/function instance.\n\n Arguments:\n name: Loss configuration.\n custom_objects: Optional dictionary mapping names (strings) to custom\n objects (classes and functions) to be considered during deserialization.\n\n Returns:\n A Keras `Loss` instance or a loss function.\n \"\"\"\n return deserialize_keras_object(\n name,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name='loss function')\n\n\n@keras_export('keras.losses.get')\ndef get(identifier):\n \"\"\"Retrieves a Keras loss as a `function`/`Loss` class instance.\n\n The `identifier` may be the string name of a loss function or `Loss` class.\n\n >>> loss = tf.keras.losses.get(\"categorical_crossentropy\")\n >>> type(loss)\n <class 'function'>\n >>> loss = tf.keras.losses.get(\"CategoricalCrossentropy\")\n >>> type(loss)\n <class '...tensorflow.python.keras.losses.CategoricalCrossentropy'>\n\n You can also specify `config` of the loss to this function by passing dict\n containing `class_name` and `config` as an identifier. Also note that the\n `class_name` must map to a `Loss` class\n\n >>> identifier = {\"class_name\": \"CategoricalCrossentropy\",\n ... \"config\": {\"from_logits\": True}}\n >>> loss = tf.keras.losses.get(identifier)\n >>> type(loss)\n <class '...tensorflow.python.keras.losses.CategoricalCrossentropy'>\n\n Arguments:\n identifier: A loss identifier. One of None or string name of a loss\n function/class or loss configuration dictionary or a loss function or a\n loss class instance\n\n Returns:\n A Keras loss as a `function`/ `Loss` class instance.\n\n Raises:\n ValueError: If `identifier` cannot be interpreted.\n \"\"\"\n if identifier is None:\n return None\n if isinstance(identifier, six.string_types):\n identifier = str(identifier)\n return deserialize(identifier)\n if isinstance(identifier, dict):\n return deserialize(identifier)\n elif callable(identifier):\n return identifier\n else:\n raise ValueError(\n 'Could not interpret loss function identifier: {}'.format(identifier))\n\n\nLABEL_DTYPES_FOR_LOSSES = {\n losses_impl.sparse_softmax_cross_entropy: 'int32',\n sparse_categorical_crossentropy: 'int32'\n}\n"
] | [
[
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.util.tf_export.tf_export"
],
[
"tensorflow.python.ops.math_ops.log",
"tensorflow.python.ops.math_ops.subtract",
"tensorflow.python.keras.backend.epsilon",
"tensorflow.python.keras.backend.name_scope",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.math_ops.reduce_max",
"tensorflow.python.distribute.distribution_strategy_context.has_strategy",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.keras.backend.categorical_crossentropy",
"tensorflow.python.ops.math_ops.abs",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.python.framework.ops.convert_to_tensor_v2_with_dispatch",
"tensorflow.python.keras.backend.floatx",
"tensorflow.python.ops.math_ops.logical_or",
"tensorflow.python.keras.backend.binary_crossentropy",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.keras.utils.tf_utils.is_tensor_or_variable",
"tensorflow.python.keras.utils.losses_utils.ReductionV2.validate",
"tensorflow.python.framework.tensor_util.is_tensor",
"tensorflow.python.ops.math_ops.pow",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.ops.math_ops.squared_difference",
"tensorflow.python.framework.smart_cond.smart_cond",
"tensorflow.python.keras.utils.losses_utils.squeeze_or_expand_dimensions",
"tensorflow.python.keras.backend.eval",
"tensorflow.python.ops.nn.softplus",
"tensorflow.python.keras.utils.tf_utils.graph_context_for_symbolic_tensors",
"tensorflow.python.keras.backend.sparse_categorical_crossentropy",
"tensorflow.python.keras.backend.mean",
"tensorflow.python.autograph.core.ag_ctx.control_status_ctx",
"tensorflow.python.ops.nn.l2_normalize",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.keras.utils.generic_utils.serialize_keras_object"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.5",
"1.7",
"1.4"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.9",
"2.5",
"2.6",
"2.10"
]
}
] |
CaimdotAIAccount/youtube8mchallenge | [
"44a07314d6e89df5755b137716f2b4a87b632ce6"
] | [
"video_level_models.py"
] | [
"# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Contains model definitions.\"\"\"\nimport math\n\nimport models\nimport tensorflow as tf\nimport utils\n\nfrom tensorflow import flags\nimport tensorflow.contrib.slim as slim\n\nFLAGS = flags.FLAGS\nflags.DEFINE_integer(\n \"moe_num_mixtures\", 2,\n \"The number of mixtures (excluding the dummy 'expert') used for MoeModel.\")\nflags.DEFINE_float(\n \"moe_l2\", 1e-8,\n \"L2 penalty for MoeModel.\")\nflags.DEFINE_integer(\n \"moe_low_rank_gating\", -1,\n \"Low rank gating for MoeModel.\")\nflags.DEFINE_bool(\n \"moe_prob_gating\", False,\n \"Prob gating for MoeModel.\")\nflags.DEFINE_string(\n \"moe_prob_gating_input\", \"prob\",\n \"input Prob gating for MoeModel.\")\n\n\nclass LogisticModel(models.BaseModel):\n \"\"\"Logistic model with L2 regularization.\"\"\"\n\n def create_model(self, model_input, vocab_size, l2_penalty=1e-8, **unused_params):\n \"\"\"Creates a logistic model.\n\n Args:\n model_input: 'batch' x 'num_features' matrix of input features.\n vocab_size: The number of classes in the dataset.\n\n Returns:\n A dictionary with a tensor containing the probability predictions of the\n model in the 'predictions' key. The dimensions of the tensor are\n batch_size x num_classes.\"\"\"\n output = slim.fully_connected(\n model_input, vocab_size, activation_fn=tf.nn.sigmoid,\n weights_regularizer=slim.l2_regularizer(l2_penalty))\n return {\"predictions\": output}\n\nclass MoeModel(models.BaseModel):\n \"\"\"A softmax over a mixture of logistic models (with L2 regularization).\"\"\"\n\n def create_model(self,\n model_input,\n vocab_size,\n is_training,\n num_mixtures=None,\n l2_penalty=1e-8,\n **unused_params):\n \"\"\"Creates a Mixture of (Logistic) Experts model.\n\n The model consists of a per-class softmax distribution over a\n configurable number of logistic classifiers. One of the classifiers in the\n mixture is not trained, and always predicts 0.\n\n Args:\n model_input: 'batch_size' x 'num_features' matrix of input features.\n vocab_size: The number of classes in the dataset.\n num_mixtures: The number of mixtures (excluding a dummy 'expert' that\n always predicts the non-existence of an entity).\n l2_penalty: How much to penalize the squared magnitudes of parameter\n values.\n Returns:\n A dictionary with a tensor containing the probability predictions of the\n model in the 'predictions' key. The dimensions of the tensor are\n batch_size x num_classes.\n \"\"\"\n num_mixtures = num_mixtures or FLAGS.moe_num_mixtures\n low_rank_gating = FLAGS.moe_low_rank_gating\n l2_penalty = FLAGS.moe_l2;\n gating_probabilities = FLAGS.moe_prob_gating\n gating_input = FLAGS.moe_prob_gating_input\n\n input_size = model_input.get_shape().as_list()[1]\n remove_diag = FLAGS.gating_remove_diag\n\n if low_rank_gating == -1:\n gate_activations = slim.fully_connected(\n model_input,\n vocab_size * (num_mixtures + 1),\n activation_fn=None,\n biases_initializer=None,\n weights_regularizer=slim.l2_regularizer(l2_penalty),\n scope=\"gates\")\n else:\n gate_activations1 = slim.fully_connected(\n model_input,\n low_rank_gating,\n activation_fn=None,\n biases_initializer=None,\n weights_regularizer=slim.l2_regularizer(l2_penalty),\n scope=\"gates1\")\n gate_activations = slim.fully_connected(\n gate_activations1,\n vocab_size * (num_mixtures + 1),\n activation_fn=None,\n biases_initializer=None,\n weights_regularizer=slim.l2_regularizer(l2_penalty),\n scope=\"gates2\")\n\n expert_activations = slim.fully_connected(\n model_input,\n vocab_size * num_mixtures,\n activation_fn=None,\n weights_regularizer=slim.l2_regularizer(l2_penalty),\n scope=\"experts\")\n\n gating_distribution = tf.nn.softmax(tf.reshape(\n gate_activations,\n [-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)\n expert_distribution = tf.nn.sigmoid(tf.reshape(\n expert_activations,\n [-1, num_mixtures])) # (Batch * #Labels) x num_mixtures\n\n probabilities_by_class_and_batch = tf.reduce_sum(\n gating_distribution[:, :num_mixtures] * expert_distribution, 1)\n probabilities = tf.reshape(probabilities_by_class_and_batch,\n [-1, vocab_size])\n\n if gating_probabilities:\n if gating_input == 'prob':\n gating_weights = tf.get_variable(\"gating_prob_weights\",\n [vocab_size, vocab_size],\n initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(vocab_size)))\n gates = tf.matmul(probabilities, gating_weights)\n else:\n gating_weights = tf.get_variable(\"gating_prob_weights\",\n [input_size, vocab_size],\n initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(vocab_size)))\n\n gates = tf.matmul(model_input, gating_weights)\n\n if remove_diag:\n # removes diagonals coefficients\n diagonals = tf.matrix_diag_part(gating_weights)\n gates = gates - tf.multiply(diagonals, probabilities)\n\n gates = slim.batch_norm(\n gates,\n center=True,\n scale=True,\n is_training=is_training,\n scope=\"gating_prob_bn\")\n\n gates = tf.sigmoid(gates)\n\n probabilities = tf.multiply(probabilities, gates)\n\n return {\"predictions\": probabilities}\n"
] | [
[
"tensorflow.matmul",
"tensorflow.multiply",
"tensorflow.matrix_diag_part",
"tensorflow.flags.DEFINE_string",
"tensorflow.reduce_sum",
"tensorflow.reshape",
"tensorflow.sigmoid",
"tensorflow.contrib.slim.l2_regularizer",
"tensorflow.flags.DEFINE_float",
"tensorflow.flags.DEFINE_bool",
"tensorflow.contrib.slim.batch_norm",
"tensorflow.flags.DEFINE_integer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
samlobel/simple_rl_mbrl | [
"ed868916d06dbf68f4af23bea83b0e852e88df6e",
"ed868916d06dbf68f4af23bea83b0e852e88df6e"
] | [
"simple_rl/tasks/lunar_lander/wrappers.py",
"simple_rl/agents/func_approx/sam_stuff/main.py"
] | [
"\"\"\"An observation wrapper that augments observations by pixel values.\"\"\"\n\nimport numpy as np\n\nfrom gym import spaces\nfrom gym.spaces import Box\nfrom gym import ObservationWrapper\nimport pdb\n\n\nclass PixelObservationWrapper(ObservationWrapper):\n \"\"\"Augment observations by pixel values.\"\"\"\n\n def __init__(self, env):\n \"\"\"Initializes a new pixel Wrapper.\n\n Args:\n env: The environment to wrap.\n \"\"\"\n\n super(PixelObservationWrapper, self).__init__(env)\n\n render_kwargs = {'mode': 'rgb_array'}\n\n # Extend observation space with pixels.\n pixels = self.env.render(**render_kwargs)\n\n if np.issubdtype(pixels.dtype, np.integer):\n low, high = (0, 255)\n elif np.issubdtype(pixels.dtype, np.float):\n low, high = (-float('inf'), float('inf'))\n else:\n raise TypeError(pixels.dtype)\n\n self.observation_space = spaces.Box(shape=pixels.shape, low=low, high=high, dtype=pixels.dtype)\n\n self._env = env\n self._render_kwargs = render_kwargs\n\n def observation(self, observation):\n return self.env.render(**self._render_kwargs)\n\n\nclass ResizeObservation(ObservationWrapper):\n r\"\"\"Downsample the image observation to a square image. \"\"\"\n def __init__(self, env, shape):\n super(ResizeObservation, self).__init__(env)\n if isinstance(shape, int):\n shape = (shape, shape)\n assert all(x > 0 for x in shape), shape\n self.shape = tuple(shape)\n\n obs_shape = self.shape + self.observation_space.shape[2:]\n self.observation_space = Box(low=0, high=255, shape=obs_shape, dtype=np.uint8)\n\n def observation(self, observation):\n import cv2\n observation = cv2.resize(observation, self.shape[::-1], interpolation=cv2.INTER_AREA)\n if observation.ndim == 2:\n observation = np.expand_dims(observation, -1)\n return observation\n\n\nclass GrayScaleObservation(ObservationWrapper):\n r\"\"\"Convert the image observation from RGB to gray scale. \"\"\"\n def __init__(self, env, keep_dim=False):\n super(GrayScaleObservation, self).__init__(env)\n self.keep_dim = keep_dim\n\n assert len(env.observation_space.shape) == 3 and env.observation_space.shape[-1] == 3, env.observation_space.shape\n obs_shape = self.observation_space.shape[:2]\n if self.keep_dim:\n self.observation_space = Box(low=0, high=255, shape=(obs_shape[0], obs_shape[1], 1), dtype=np.uint8)\n else:\n self.observation_space = Box(low=0, high=255, shape=obs_shape, dtype=np.uint8)\n\n def observation(self, observation):\n import cv2\n observation = cv2.cvtColor(observation, cv2.COLOR_RGB2GRAY)\n if self.keep_dim:\n observation = np.expand_dims(observation, -1)\n return observation",
"import numpy as np\nimport random\nfrom collections import namedtuple, deque, defaultdict\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\nimport pdb\nfrom copy import deepcopy\nimport shutil\nimport os\nimport time\nimport argparse\nimport pickle\n\nimport torch.optim as optim\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom tensorboardX import SummaryWriter\n\n\nfrom simple_rl.agents.AgentClass import Agent\nfrom simple_rl.agents.func_approx.ddpg.utils import compute_gradient_norm\n# from simple_rl.agents.func_approx.sam_stuff.replay_buffer import ReplayBuffer\nfrom simple_rl.agents.func_approx.sam_stuff.model import ConvQNetwork, DenseQNetwork\nfrom simple_rl.agents.func_approx.sam_stuff.epsilon_schedule import *\nfrom simple_rl.tasks.gym.GymMDPClass import GymMDP\nfrom simple_rl.tasks.lunar_lander.LunarLanderMDPClass import LunarLanderMDP\n# from simple_rl.agents.func_approx.sam_stuff.RandomNetworkDistillationClass import RNDModel, RunningMeanStd\nfrom simple_rl.agents.func_approx.sam_stuff.RandomNetworkDistillationClass import RunningMeanStd\n\nfrom simple_rl.agents.func_approx.sam_stuff.DQNAgentClass import DQNAgent\nfrom simple_rl.agents.func_approx.sam_stuff.DQNAgentClass import WorldModel\nfrom simple_rl.agents.func_approx.sam_stuff.DQNAgentClass import OnlineComposer\nfrom simple_rl.agents.func_approx.sam_stuff.ModelQNetworkComposer import Composer\n\n\nNUM_EPISODES = 3500\nNUM_STEPS = 10000\n\n\n\n# def test_forward_pass(dqn_agent, mdp):\n# # load the weights from file\n# mdp.reset()\n# state = deepcopy(mdp.init_state)\n# overall_reward = 0.\n# mdp.render = True\n\n# while not state.is_terminal():\n# action = dqn_agent.act(state.features(), train_mode=False)\n# reward, next_state = mdp.execute_agent_action(action)\n# overall_reward += reward\n# state = next_state\n\n# mdp.render = False\n# return overall_reward\n\n\ndef show_video(dqn_agent, mdp):\n # load the weights from file\n mdp.reset()\n state = deepcopy(mdp.init_state)\n overall_reward = 0.\n mdp.render = True\n\n while not state.is_terminal():\n action = dqn_agent.act(state.features(), train_mode=False)\n reward, next_state = mdp.execute_agent_action(action)\n overall_reward += reward\n state = next_state\n\n mdp.render = False\n return overall_reward\n\n\ndef save_all_scores(experiment_name, log_dir, seed, scores):\n print(\"\\rSaving training and validation scores..\")\n training_scores_file_name = \"{}_{}_training_scores.pkl\".format(experiment_name, seed)\n\n if log_dir:\n training_scores_file_name = os.path.join(log_dir, training_scores_file_name)\n\n with open(training_scores_file_name, \"wb+\") as _f:\n pickle.dump(scores, _f)\n\ndef create_log_dir(experiment_name):\n path = os.path.join(os.getcwd(), \"logs\", experiment_name)\n try:\n os.mkdir(path)\n except OSError:\n print(\"Creation of the directory %s failed\" % path)\n else:\n print(\"Successfully created the directory %s \" % path)\n return path\n\ndef test_render(agent, mdp):\n while True:\n print(\"Press ctrl-C to quit\")\n mdp.set_render(True)\n mdp.reset()\n state = mdp.init_state\n while True:\n # action = agent.act(state.features(), train_mode=False)\n action = agent.get_best_action(state.features())\n reward, next_state = mdp.execute_agent_action(action)\n state = next_state\n\n game_over = mdp.game_over if hasattr(mdp, 'game_over') else False\n if state.is_terminal() or game_over:\n print('bye bye')\n break\n\n\ndef collect_data_for_bias_variance_calculation(mdp, q_agent, num_runs):\n \"\"\"\n Runs on-policy, and just makes the data that we'll pass to the composer.\n \"\"\"\n exp = namedtuple(\"Experience\", field_names=[\"state\",\"action\",\"reward\",\"next_state\", \"done\", \"time_limit_truncated\"])\n experiences = []\n\n states = []\n actions = []\n rewards = []\n next_states = []\n dones = []\n time_limit_truncateds = []\n \n for _ in range(num_runs):\n mdp.reset()\n state = deepcopy(mdp.init_state)\n state = np.asarray(state.features())\n\n true_finish = False\n while True:\n # action = agent.act(state.features(), train_mode=True)\n # reward, next_state = mdp.execute_agent_action(action)\n\n\n action = composer.q_agent.get_best_action(state)\n reward, next_state = mdp.execute_agent_action(action)\n # is_terminal = next_state.is_terminal()\n # time_limit_truncated = next_state.is_time_limit_truncated()\n\n\n experiences.append(\n exp(state=state,\n action=action,\n reward=reward,\n next_state=np.asarray(next_state.features()),\n done=next_state.is_terminal(),\n time_limit_truncated=next_state.is_time_limit_truncated()\n ))\n\n states.append(state)\n actions.append(action)\n rewards.append(reward)\n next_states.append(np.asarray(next_state.features()))\n dones.append(next_state.is_terminal())\n time_limit_truncateds.append(next_state.is_time_limit_truncated())\n\n game_over = mdp.game_over if hasattr(mdp, 'game_over') else False\n\n if game_over and not next_state.is_terminal():\n print('howza')\n # import ipdb; ipdb.set_trace()\n raise Exception(\"Honestly, we're just not dealing with this well here.\")\n\n if next_state.is_terminal():\n break\n\n state = np.asarray(next_state.features())\n\n\n return experiences\n\n # return dict(\n # states=states,\n # actions=actions,\n # rewards=rewards,\n # next_states=next_states,\n # dones=dones,\n # time_limit_truncateds=time_limit_truncateds,\n # )\n \n\n\n pass\n\n\nclass Evaluator:\n\n def __init__(self, mdp, composer, num_runs_each=1, rollout_depth=5, lambdas_to_test=None, logdir=\".\"):\n self.mdp = mdp\n self.composer = composer\n self.num_runs_each = num_runs_each\n self.rollout_depth = rollout_depth\n self.logdir = logdir\n\n self._bias = None\n self._variance = None\n\n if lambdas_to_test is None:\n self.lambdas_to_test = [0.0, 0.5, 1.0]\n else:\n self.lambdas_to_test = lambdas_to_test\n\n\n self.results = defaultdict(list)\n\n def _set_bias_variance(self, num_runs_to_collect_over):\n data = collect_data_for_bias_variance_calculation(self.mdp, self.composer.q_agent, num_runs_to_collect_over)\n # bias, variance = self.composer.create_bias_variance_from_data(data, self.rollout_depth)\n bias, variance, covariance = self.composer.create_bias_variance_covariance_from_data(data, self.rollout_depth)\n\n # print(\"This is about to be mega self-defeating...\")\n # self._bias = np.zeros((self.rollout_depth,), dtype=np.float32)\n # self._variance = np.ones((self.rollout_depth,), dtype=np.float32)\n # self._variance[0] -= 0.999\n # self._variance *= 1000\n # print(\"self, defeated\")\n\n self._bias = bias\n self._variance = variance\n self._covariance = covariance\n print(f\"Bias: {bias}\\nVariance: {variance}\")\n print(f\"Covariance: {covariance}\")\n\n def evaluate_different_models(self, *, training_steps):\n \"\"\"\n This does the evaluation, prints out results, but then importantly\n populates some storage list, which we can then use to make plots.\n \"\"\"\n assert self._bias is not None\n assert self._variance is not None\n\n lambdas_to_test = self.lambdas_to_test\n # print(self.lambdas_to_test)\n mdp = self.mdp\n composer = self.composer\n num_runs_each = self.num_runs_each\n rollout_depth = self.rollout_depth\n\n # lambdas_to_test.reverse()\n # funcs = []\n\n print(\"TODO: I know that it's a scoping and reference problem. Maybe use partials?\")\n\n # There's a really annoying referencing problem here. Let's see how it goes.\n funcs = [(lam, (lambda l: lambda s: composer.get_best_action_td_lambda(s, rollout_depth, gamma=0.99, lam=l))(lam))\n for lam in lambdas_to_test]\n\n # print(funcs)\n\n funcs.append((\"OptimalVariance\",\n lambda s: composer.get_best_action_for_bias_variance(s, rollout_depth, self._bias, self._variance, gamma=0.99)))\n\n funcs.append((\"OptimalCovariance\",\n lambda s: composer.get_best_action_for_bias_covariance(s, rollout_depth, self._bias, self._covariance, gamma=0.99)))\n\n\n # for lam in lambdas_to_test:\n for key, func in funcs:\n all_rewards = []\n for _ in range(num_runs_each):\n mdp.reset()\n state = deepcopy(mdp.init_state)\n state = np.asarray(state.features())\n reward_so_far = 0.0\n while True:\n # state = torch.from_numpy(state).float().unsqueeze(0).to(\"cuda\")\n # action = composer.get_best_action_td_lambda(state, rollout_depth, gamma=0.99, lam=lam)\n action = func(state)\n # print(action)\n reward, next_state = mdp.execute_agent_action(action)\n reward_so_far += reward\n game_over = mdp.game_over if hasattr(mdp, 'game_over') else False\n if next_state.is_terminal() or game_over:\n break\n\n state = np.asarray(next_state.features())\n self.results[key].append((training_steps, reward_so_far))\n all_rewards.append(reward_so_far)\n all_rewards = np.asarray(all_rewards)\n print(f\"{num_runs_each} runs: Key={key}, Reward={np.mean(all_rewards)} ({np.std(all_rewards)})\")\n print(all_rewards)\n\n def write_graphs(self):\n plt.figure()\n for lam, vals in self.results.items():\n xs, ys = zip(*vals)\n ax = sns.lineplot(x=xs, y=ys, label=f\"Lam={lam}\")\n\n plt.savefig(os.path.join(self.logdir, \"results.png\"))\n # plt.show()\n plt.clf()\n\n\n# def evaluate_different_models(mdp, composer, num_runs_each=1, training_steps=None):\n# # Somehow I want to also graph this... How should I do that?\n# # I could make this a class, and keep track of past things. But that does\n# # seem heavy-handed. How about I start by just printing them out...\n# lambdas_to_test = [0.0, 0.5, 1.0]\n# rollout_depth = 5\n\n# for lam in lambdas_to_test:\n# all_rewards = []\n# for _ in range(num_runs_each):\n# mdp.reset()\n# state = deepcopy(mdp.init_state)\n# state = np.asarray(state.features())\n# reward_so_far = 0.0\n# while True:\n# # state = torch.from_numpy(state).float().unsqueeze(0).to(\"cuda\")\n# action = composer.get_best_action_td_lambda(state, rollout_depth, gamma=0.99, lam=lam)\n# reward, next_state = mdp.execute_agent_action(action)\n# reward_so_far += reward\n# game_over = mdp.game_over if hasattr(mdp, 'game_over') else False\n# if next_state.is_terminal() or game_over:\n# break\n\n# state = np.asarray(next_state.features())\n# all_rewards.append(reward_so_far)\n# all_rewards = np.asarray(all_rewards)\n# print(f\"{num_runs_each} runs: Lam={lam}, Reward={np.mean(all_rewards)} ({np.std(all_rewards)})\")\n# print(all_rewards)\n\ndef test_optimal(agent, mdp, num_episodes=1):\n # Going to return a total reward...\n scores = []\n\n for _ in range(num_episodes):\n score = 0\n\n mdp.reset()\n state = deepcopy(mdp.init_state)\n\n while True:\n action = agent.get_best_action(state.features())\n qvalues = agent.get_qvalues(state.features())\n # print(action)\n # print(qvalues)\n # print(state.features())\n reward, next_state = mdp.execute_agent_action(action)\n\n score += reward\n state = next_state\n\n game_over = mdp.game_over if hasattr(mdp, 'game_over') else False\n if state.is_terminal() or game_over:\n break\n scores.append(score)\n\n average_score = np.mean(scores)\n\n print(f\"score is {average_score}\")\n\n return average_score\n\ndef train(agent, mdp, episodes, steps, init_episodes=10, evaluate_every=25, *, save_every, logdir, world_model, composer):\n model_save_loc = os.path.join(logdir, 'model.tar')\n per_episode_scores = []\n last_10_scores = deque(maxlen=100)\n iteration_counter = 0\n state_ri_buffer = []\n\n # Observation and reward normalization\n reward_rms = RunningMeanStd()\n obs_rms = RunningMeanStd(shape=(1, 84, 84))\n\n last_save = time.time()\n\n ## Commenting this out for now while we switch to something more reasonable.\n if composer:\n evaluator = Evaluator(mdp, composer, num_runs_each=5, rollout_depth=5, logdir=logdir)\n\n for episode in range(episodes):\n\n if evaluate_every > 0 and episode % evaluate_every == 0 and episode != 0:\n print(f\"Evaluating on episode {episode}\")\n test_optimal(agent, mdp)\n # test_optimal(composer.q_agent, mdp)\n # test_optimal(agent, mdp)\n # print(\"just kidding\")\n # evaluator._set_bias_variance(10)\n\n\n # if composer:\n # print(\"Shouldn't be here?\")\n # evaluator._set_bias_variance(10)\n # evaluator.evaluate_different_models(training_steps=episode)\n # print(\"At some point definitely make this a CL-Arg\")\n # evaluator.write_graphs()\n\n if time.time() - last_save > save_every:\n print(\"Saving Model\")\n last_save = time.time()\n torch.save(agent.state_dict(), model_save_loc)\n\n mdp.reset()\n state = deepcopy(mdp.init_state)\n\n observation_buffer = []\n\n init_features = np.asarray(mdp.init_state.features())\n if len(init_features.shape) == 3:\n init_observation = init_features[-1, :, :]\n assert init_observation.shape == (84, 84), init_observation.shape\n else:\n init_observation = init_features\n\n #### FROM AKHIL\n # init_observation = np.array(mdp.init_state.features())[-1, :, :]\n # assert init_observation.shape == (84, 84), init_observation.shape\n observation_buffer.append(init_observation)\n\n score = 0.\n while True:\n iteration_counter += 1\n action = agent.act(state.features(), train_mode=True)\n reward, next_state = mdp.execute_agent_action(action)\n\n agent.step(state.features(), action, reward, next_state.features(), next_state.is_terminal(),\n num_steps=1, time_limit_truncated=next_state.is_time_limit_truncated())\n agent.update_epsilon()\n\n if world_model:\n world_model.step(state.features(), action, reward, next_state.features(), next_state.is_terminal(),\n num_steps=1, time_limit_truncated=next_state.is_time_limit_truncated())\n\n state = next_state\n score += reward\n\n game_over = mdp.game_over if hasattr(mdp, 'game_over') else False\n if state.is_terminal() or game_over:\n if agent.tensor_log:\n print(\"Is this happening too?\")\n agent.writer.add_scalar(\"Score\", score, episode)\n break\n\n last_10_scores.append(score)\n per_episode_scores.append(score)\n\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\tEpsilon: {:.2f}'.format(episode, np.mean(last_10_scores), agent.epsilon), end=\"\")\n if episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\tEpsilon: {:.2f}'.format(episode, np.mean(last_10_scores), agent.epsilon))\n\n return per_episode_scores, state_ri_buffer\n\n\ndef bayes_functional(*, mdp, args):\n \"\"\"\n This will like do the setup and stuff, and then return a singular number at the end.\n We would like this to return a function that has all the constants filled in.\n Because bayes_opt doesn't seem to have a good way of passing the same thing to\n everyone...\n \"\"\"\n def functional(lr_exp, tau_exp):\n print(f\"Running for {lr_exp} {tau_exp}\")\n state_dim = overall_mdp.env.observation_space.shape if args.pixel_observation else overall_mdp.env.observation_space.shape[0]\n action_dim = len(overall_mdp.actions)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n # device = torch.device(\"cpu\")\n\n # We're going to pass in something like lr=4, and it'll translate it to 10^-4\n # And we'll bound at 0 and 5 or something.\n\n lr = 10**-lr_exp\n tau = 10**-tau_exp\n\n print(f\"Running for lr_exp={lr_exp} tau_exp={tau_exp}\")\n print(f\"AKA lr={lr} tau={tau}\")\n\n ddqn_agent = DQNAgent(state_size=state_dim, action_size=action_dim,\n seed=args.seed, device=device,\n name=\"GlobalDDQN\", lr=lr, tau=tau, tensor_log=args.tensor_log, use_double_dqn=True,\n exploration_method=args.exploration_method, pixel_observation=args.pixel_observation,\n evaluation_epsilon=args.eval_eps,\n epsilon_linear_decay=args.epsilon_linear_decay,\n use_softmax_target=args.use_softmax_target)\n\n world_model = WorldModel(state_size=state_dim, action_size=action_dim,\n seed=args.seed, device=device,\n name=\"WorldModel\", lr=lr, tensor_log=args.tensor_log,# use_double_dqn=True,\n writer = ddqn_agent.writer, # Because I'm concerned it's over-writing...\n #exploration_method=args.exploration_method, pixel_observation=args.pixel_observation,\n #evaluation_epsilon=args.eval_eps,\n #epsilon_linear_decay=args.epsilon_linear_decay\n )\n\n\n composer = Composer(\n q_agent=ddqn_agent,\n world_model=world_model,\n action_size=action_dim,\n device=device)\n\n train(\n ddqn_agent, overall_mdp, args.episodes, args.steps,\n save_every=args.save_every, logdir=logdir, world_model=world_model,\n composer=composer,\n evaluate_every=0)\n\n print(\"Boom, training complete. Now testing optimal!\")\n val = test_optimal(ddqn_agent, mdp, num_episodes=25) \n return val\n\n return functional\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--experiment_name\", type=str, help=\"Experiment Name\")\n parser.add_argument(\"--seed\", type=int, help=\"Random seed for this run (default=0)\", default=0)\n parser.add_argument(\"--episodes\", type=int, help=\"# episodes\", default=NUM_EPISODES)\n parser.add_argument(\"--steps\", type=int, help=\"# steps\", default=NUM_STEPS)\n parser.add_argument(\"--render\", type=bool, help=\"Render the mdp env\", default=False)\n parser.add_argument(\"--pixel_observation\", action='store_true', help=\"Images / Dense input\", default=False)\n parser.add_argument(\"--exploration_method\", type=str, default=\"eps-greedy\")\n parser.add_argument(\"--eval_eps\", type=float, default=0.05)\n parser.add_argument(\"--tensor_log\", default=False, action='store_true', help=\"Include this option if you want logging.\")\n parser.add_argument(\"--env\", type=str, default=\"Acrobot-v1\")\n parser.add_argument(\"--save_every\", type=int, help=\"Save every n seconds\", default=60)\n parser.add_argument(\"--mode\", type=str, help=\"'train' or 'view'\", default='train')\n parser.add_argument(\"--epsilon_linear_decay\", type=int, help=\"'train' or 'view'\", default=100000)\n parser.add_argument(\"--use_softmax_target\", default=False, action='store_true', help='When calculating backups, do you use the max or the softmax?')\n parser.add_argument(\"--learning_rate\", default=1e-3, type=float, help='What do you think!')\n parser.add_argument(\"--tau\", default=1e-3, type=float, help='Target copying rate')\n parser.add_argument(\"--evaluate_every\", default=25, type=int, help='Expensive evaluation step for tracking')\n parser.add_argument(\"--use_online_composer\", default=False, action=\"store_true\", help='If you include this option, the model is used to make more accurate Q updates')\n parser.add_argument(\"--num_rollouts\", default=5, type=int, help='Only used if use_online_composer')\n # parser.add_argument(\"--use_world_model\", default=False, action='store_true', help=\"Include this option if you want to see how a world model trains.\")\n args = parser.parse_args()\n\n logdir = create_log_dir(args.experiment_name)\n model_save_loc = os.path.join(logdir, 'model.tar')\n # learning_rate = 1e-3 # 0.00025 for pong\n\n overall_mdp = GymMDP(env_name=args.env, pixel_observation=args.pixel_observation, render=args.render,\n clip_rewards=False, term_func=None, seed=args.seed)\n ### THIS ONE WORKS FINE SO LONG AS YOU HAVE PIXEL OBSERVATIONS ####\n # overall_mdp = GymMDP(env_name=\"MontezumaRevengeNoFrameskip-v0\", pixel_observation=args.pixel_observation, render=args.render,\n # clip_rewards=False, term_func=None, seed=args.seed)\n ### END ###\n # overall_mdp = GymMDP(env_name=\"MontezumaRevengeNoFrameskip-v4\", pixel_observation=args.pixel_observation, render=args.render,\n # clip_rewards=False, term_func=None, seed=args.seed)\n # overall_mdp = GymMDP(env_name=\"CartPole-v0\", pixel_observation=args.pixel_observation, render=args.render,\n # clip_rewards=False, term_func=None, seed=args.seed)\n\n # overall_mdp = LunarLanderMDP(render=args.render, seed=args.seed)\n\n state_dim = overall_mdp.env.observation_space.shape if args.pixel_observation else overall_mdp.env.observation_space.shape[0]\n action_dim = len(overall_mdp.actions)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n # device = torch.device(\"cpu\")\n\n\n if args.use_online_composer:\n world_model = WorldModel(state_size=state_dim, action_size=action_dim,\n seed=args.seed, device=device,\n name=\"WorldModel\", lr=args.learning_rate, tensor_log=args.tensor_log,# use_double_dqn=True,\n # writer = agent.writer, # Because I'm concerned it's over-writing...\n #exploration_method=args.exploration_method, pixel_observation=args.pixel_observation,\n #evaluation_epsilon=args.eval_eps,\n #epsilon_linear_decay=args.epsilon_linear_decay\n )\n\n agent = OnlineComposer(\n world_model=world_model, num_rollouts=args.num_rollouts,\n state_size=state_dim, action_size=action_dim,\n seed=args.seed, device=device,\n name=\"OnlineComposer\",\n mixing_speed=0.9999,\n lr=args.learning_rate, tau=args.tau,\n tensor_log=args.tensor_log, use_double_dqn=True,\n writer = world_model.writer, # Because I'm concerned it's oevr-writing.\n exploration_method=args.exploration_method, pixel_observation=args.pixel_observation,\n evaluation_epsilon=args.eval_eps,\n epsilon_linear_decay=args.epsilon_linear_decay,\n use_softmax_target=args.use_softmax_target)\n\n world_model = None\n composer = None\n \n else:\n agent = DQNAgent(state_size=state_dim, action_size=action_dim,\n seed=args.seed, device=device,\n name=\"GlobalDDQN\",\n lr=args.learning_rate, tau=args.tau,\n tensor_log=args.tensor_log, use_double_dqn=True,\n exploration_method=args.exploration_method, pixel_observation=args.pixel_observation,\n evaluation_epsilon=args.eval_eps,\n epsilon_linear_decay=args.epsilon_linear_decay,\n use_softmax_target=args.use_softmax_target)\n\n world_model = WorldModel(state_size=state_dim, action_size=action_dim,\n seed=args.seed, device=device,\n name=\"WorldModel\", lr=args.learning_rate, tensor_log=args.tensor_log,# use_double_dqn=True,\n writer = agent.writer, # Because I'm concerned it's over-writing...\n #exploration_method=args.exploration_method, pixel_observation=args.pixel_observation,\n #evaluation_epsilon=args.eval_eps,\n #epsilon_linear_decay=args.epsilon_linear_decay\n )\n\n\n composer = Composer(\n q_agent=agent,\n world_model=world_model,\n action_size=action_dim,\n device=device)\n\n # data = collect_data_for_bias_variance_calculation(overall_mdp, ddqn_agent, 1)\n # bias, variance = composer.create_bias_variance_from_data(data, 5)\n\n\n if args.mode == 'train':\n ddqn_episode_scores, s_ri_buffer = train(\n agent, overall_mdp, args.episodes, args.steps, save_every=args.save_every, logdir=logdir, world_model=world_model,\n composer=composer, evaluate_every=args.evaluate_every)\n save_all_scores(args.experiment_name, logdir, args.seed, ddqn_episode_scores)\n elif args.mode == 'view':\n print('waow')\n print(model_save_loc)\n agent.load_state_dict(torch.load(model_save_loc))\n test_render(agent, overall_mdp)\n pass\n elif args.mode == 'hyper':\n from bayes_opt import BayesianOptimization\n f = bayes_functional(mdp=overall_mdp, args=args)\n pbounds = {'lr_exp': (1, 5), 'tau_exp': (1,5)}\n optimizer = BayesianOptimization(\n f=f,\n pbounds=pbounds,\n random_state=1,\n )\n\n optimizer.maximize(\n init_points=5,\n n_iter=10,\n )\n print(optimizer.max)\n for i, res in enumerate(optimizer.res):\n print(\"Iteration {}: \\n\\t{}\".format(i, res))\n import pdb; pdb.set_trace()\n print('bingester')\n\n else:\n raise Exception(\"HEELLOOO\")\n"
] | [
[
"numpy.issubdtype",
"numpy.expand_dims"
],
[
"torch.load",
"numpy.asarray",
"numpy.std",
"matplotlib.pyplot.clf",
"numpy.mean",
"torch.cuda.is_available",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
djsegal/metis | [
"54b84108a58d3e95679b519eb361e6916a693709"
] | [
"X_airbnb_revisited/airbnb_pricer/utils/get_dist_to_clusters.py"
] | [
"import numpy as np\nimport pandas as pd\n\nfrom time import sleep\nfrom airbnb_pricer.utils.async_run import async_run\n\nmile_per_degree__latitude = 111.32 * 0.621371\nmile_per_degree__longitude = 84.35 * 0.621371\n\ndef get_dist_to_clusters(location_data, cluster_data):\n location_data = location_data.copy()\n cluster_data = cluster_data.copy()\n\n location_data[\"x\"] = location_data[\"lon\"] * mile_per_degree__longitude\n location_data[\"y\"] = location_data[\"lat\"] * mile_per_degree__latitude\n\n cluster_data[\"x\"] = cluster_data[\"lon\"] * mile_per_degree__longitude\n cluster_data[\"y\"] = cluster_data[\"lat\"] * mile_per_degree__latitude\n\n def _get_cluster_dists(cur_input):\n cur_x, cur_y, cur_is_bkn = cur_input\n\n data_dict = {\n \"center\": cluster_data[cluster_data.is_com],\n \"hub\": cluster_data[\n ~cluster_data.is_com & (cluster_data.is_bkn == cur_is_bkn)\n ]\n }\n\n sleep(0.01)\n\n cur_dists = {}\n\n for cur_key, sub_data in data_dict.items():\n cur_dist = ( sub_data.x - cur_x ) ** 2\n cur_dist += ( sub_data.y - cur_y ) ** 2\n cur_dist = np.min(np.sqrt(cur_dist))\n\n cur_dists[cur_key] = cur_dist\n\n return cur_dists\n\n cluster_dists_iter = list(\n zip(location_data[\"x\"], location_data[\"y\"], location_data[\"is_brooklyn\"])\n )\n\n dist_list = async_run(\n _get_cluster_dists, cluster_dists_iter\n )\n\n return pd.DataFrame(dist_list)\n"
] | [
[
"numpy.sqrt",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
techierishi/flask-vue-sig-verifier | [
"40ed04422e3cbd02c2df64a6af2c4f8101a21cad"
] | [
"server/sigrecogtf.py"
] | [
"import cv2\nimport os\nimport tensorflow as tf\nimport preprocessor\n\n\ndef main(author):\n print('OpenCV version {} '.format(cv2.__version__))\n\n current_dir = os.path.dirname(__file__)\n\n training_folder = os.path.join(current_dir, 'data/training/', author)\n test_folder = os.path.join(current_dir, 'data/test/', author)\n\n training_data = []\n training_labels = []\n for filename in os.listdir(training_folder):\n img = cv2.imread(os.path.join(training_folder, filename), 0)\n if img is not None:\n data = preprocessor.prepare(img)\n training_data.append(data)\n training_labels.append([0, 1] if \"genuine\" in filename else [1, 0])\n\n test_data = []\n test_labels = []\n for filename in os.listdir(test_folder):\n img = cv2.imread(os.path.join(test_folder, filename), 0)\n if img is not None:\n data = preprocessor.prepare(img)\n test_data.append(data)\n test_labels.append([0, 1] if \"genuine\" in filename else [1, 0])\n\n return sgd(training_data, training_labels, test_data, test_labels)\n\n\n# Softmax Regression Model\ndef regression(x):\n W = tf.Variable(tf.zeros([901, 2]), name=\"W\")\n b = tf.Variable(tf.zeros([2]), name=\"b\")\n y = tf.nn.softmax(tf.matmul(x, W) + b)\n return y, [W, b]\n\n\ndef sgd(training_data, training_labels, test_data, test_labels):\n tf.compat.v1.disable_eager_execution()\n # model\n with tf.compat.v1.variable_scope(\"regression\"):\n x = tf.compat.v1.placeholder(tf.float32, [None, 901])\n y, variables = regression(x)\n\n # train\n y_ = tf.compat.v1.placeholder(\"float\", [None, 2])\n cross_entropy = -tf.reduce_sum(y_ * tf.compat.v1.log(y))\n train_step = tf.compat.v1.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)\n correct_prediction = tf.compat.v1.equal(tf.compat.v1.argmax(y, 1), tf.compat.v1.argmax(y_, 1))\n accuracy = tf.compat.v1.reduce_mean(tf.compat.v1.cast(correct_prediction, tf.compat.v1.float32))\n\n with tf.compat.v1.Session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n sess.run(train_step, feed_dict={x: training_data, y_: training_labels})\n testresult = sess.run(accuracy, feed_dict={x: test_data, y_: test_labels})\n print('testresult',testresult)\n return testresult\n\nif __name__ == '__main__':\n main('029')"
] | [
[
"tensorflow.matmul",
"tensorflow.zeros",
"tensorflow.compat.v1.argmax",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.compat.v1.log",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.train.GradientDescentOptimizer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
huang-ziyuan/EssentialMC2 | [
"87141df94c1ac8e426ceec071720b97f5b9d3b88",
"87141df94c1ac8e426ceec071720b97f5b9d3b88",
"87141df94c1ac8e426ceec071720b97f5b9d3b88"
] | [
"essmc2/transforms/tensor.py",
"papers/CVPR2021-MOSI/impls/transforms/video.py",
"essmc2/hooks/checkpoint.py"
] | [
"# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.\n\nimport numpy as np\nimport torch\n\nfrom .registry import TRANSFORMS\n\n\ndef to_tensor(data):\n if isinstance(data, torch.Tensor):\n return data\n elif isinstance(data, np.ndarray):\n return torch.from_numpy(data)\n elif isinstance(data, list):\n return torch.tensor(data)\n elif isinstance(data, int):\n return torch.LongTensor([data])\n elif isinstance(data, float):\n return torch.FloatTensor([data])\n else:\n raise TypeError(f\"Unsupported type {type(data)}\")\n\n\[email protected]_class()\nclass ToTensor(object):\n def __init__(self, keys):\n self.keys = keys\n\n def __call__(self, item):\n for key in self.keys:\n item[key] = to_tensor(item[key])\n return item\n\n\[email protected]_class()\nclass Select(object):\n def __init__(self, keys, meta_keys=()):\n self.keys = keys\n if not isinstance(meta_keys, (list, tuple)):\n raise TypeError(f\"Expected meta_keys to be list or tuple, got {type(meta_keys)}\")\n self.meta_keys = meta_keys\n\n def __call__(self, item):\n data = {}\n for key in self.keys:\n data[key] = item[key]\n if \"meta\" in item and len(self.meta_keys) > 0:\n data[\"meta\"] = {}\n for key in self.meta_keys:\n data[\"meta\"][key] = item['meta'][key]\n return data\n\n\[email protected]_class()\nclass TensorToGPU(object):\n def __init__(self, keys, device_id=None):\n self.keys = keys\n self.device_id = device_id\n\n def __call__(self, item):\n ret = {}\n for key, value in item.items():\n if key in self.keys and isinstance(value, torch.Tensor) and torch.cuda.is_available():\n ret[key] = value.cuda(self.device_id, non_blocking=True)\n else:\n ret[key] = value\n return ret\n",
"# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.\n\n# ColorJitterVideo class is modified from\n# https://github.com/TengdaHan/DPC/blob/master/utils/augmentation.py\n# MIT License\n#\n# Copyright (c) 2019 Tengda Han\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport numbers\nimport random\n\nimport torch\nfrom torchvision.transforms import Lambda, Compose\n\nfrom essmc2.transforms import VideoTransform, TRANSFORMS\n\n\[email protected]_class()\nclass ColorJitterVideo(VideoTransform):\n \"\"\" Modified from https://github.com/TengdaHan/DPC/blob/master/utils/augmentation.py.\n Randomly change the brightness, contrast and saturation of an image.\n Args:\n brightness (float or tuple of float (min, max)): How much to jitter brightness.\n brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]\n or the given [min, max]. Should be non negative numbers.\n contrast (float or tuple of float (min, max)): How much to jitter contrast.\n contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]\n or the given [min, max]. Should be non negative numbers.\n saturation (float or tuple of float (min, max)): How much to jitter saturation.\n saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]\n or the given [min, max]. Should be non negative numbers.\n hue (float or tuple of float (min, max)): How much to jitter hue.\n hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].\n Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.\n grayscale (float): possibility to transform the video to grayscale.\n Should have a value range of [0, 1]\n consistent (bool): indicates whether or not to keep all the color transformations consistent for all the frames.\n shuffle (bool): indicates whether or not to shuffle the sequence of the augmentations.\n gray_first (bool): indicates whether or not to put grayscale transform first.\n \"\"\"\n\n def __init__(\n self,\n brightness=0, contrast=0, saturation=0, hue=0, grayscale=0,\n consistent=False, shuffle=True, gray_first=True,\n **kwargs\n ):\n super(ColorJitterVideo, self).__init__(**kwargs)\n self.brightness = self._check_input(brightness, 'brightness')\n self.contrast = self._check_input(contrast, 'contrast')\n self.saturation = self._check_input(saturation, 'saturation')\n self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),\n clip_first_on_zero=False)\n\n self.grayscale = grayscale\n self.consistent = consistent\n self.shuffle = shuffle\n self.gray_first = gray_first\n\n @staticmethod\n def _check_input(value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):\n if isinstance(value, numbers.Number):\n if value < 0:\n raise ValueError(\"If {} is a single number, it must be non negative.\".format(name))\n value = [center - float(value), center + float(value)]\n if clip_first_on_zero:\n value[0] = max(value[0], 0.0)\n elif isinstance(value, (tuple, list)) and len(value) == 2:\n if not bound[0] <= value[0] <= value[1] <= bound[1]:\n raise ValueError(\"{} values should be between {}\".format(name, bound))\n else:\n raise TypeError(\"{} should be a single number or a list/tuple with lenght 2.\".format(name))\n\n # if value is 0 or (1., 1.) for brightness/contrast/saturation\n # or (0., 0.) for hue, do nothing\n if value[0] == value[1] == center:\n value = None\n return value\n\n def _get_transform(self, T, device):\n \"\"\"Get a randomized transform to be applied on image.\n Arguments are same as that of __init__.\n Arg:\n T (int): number of frames. Used when consistent = False.\n Returns:\n Transform which randomly adjusts brightness, contrast and\n saturation in a random order.\n \"\"\"\n transforms = []\n if self.brightness is not None:\n if self.consistent:\n brightness_factor = random.uniform(self.brightness[0], self.brightness[1])\n else:\n brightness_factor = torch.empty([1, T, 1, 1], device=device).uniform_(self.brightness[0],\n self.brightness[1])\n transforms.append(Lambda(lambda frame: adjust_brightness(frame, brightness_factor)))\n\n if self.contrast is not None:\n if self.consistent:\n contrast_factor = random.uniform(self.contrast[0], self.contrast[1])\n else:\n contrast_factor = torch.empty([1, T, 1, 1], device=device).uniform_(self.contrast[0], self.contrast[1])\n transforms.append(Lambda(lambda frame: adjust_contrast(frame, contrast_factor)))\n\n if self.saturation is not None:\n if self.consistent:\n saturation_factor = random.uniform(self.saturation[0], self.saturation[1])\n else:\n saturation_factor = torch.empty([1, T, 1, 1], device=device).uniform_(self.saturation[0],\n self.saturation[1])\n transforms.append(Lambda(lambda frame: adjust_saturation(frame, saturation_factor)))\n\n if self.hue is not None:\n if self.consistent:\n hue_factor = random.uniform(self.hue[0], self.hue[1])\n else:\n hue_factor = torch.empty([T, 1, 1], device=device).uniform_(self.hue[0], self.hue[1])\n transforms.append(Lambda(lambda frame: adjust_hue(frame, hue_factor)))\n\n if self.shuffle:\n random.shuffle(transforms)\n\n if random.uniform(0, 1) < self.grayscale:\n gray_transform = Lambda(lambda frame: rgb_to_grayscale(frame))\n if self.gray_first:\n transforms.insert(0, gray_transform)\n else:\n transforms.append(gray_transform)\n\n transform = Compose(transforms)\n\n return transform\n\n def __call__(self, item):\n clip = item[self.input_key]\n raw_shape = clip.shape # (C, T, H, W)\n device = clip.device\n T = raw_shape[1]\n transform = self._get_transform(T, device)\n clip = transform(clip)\n assert clip.shape == raw_shape\n item[self.output_key] = clip\n return item\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n format_string += 'brightness={0}'.format(self.brightness)\n format_string += ', contrast={0}'.format(self.contrast)\n format_string += ', saturation={0}'.format(self.saturation)\n format_string += ', hue={0})'.format(self.hue)\n format_string += ', grayscale={0})'.format(self.grayscale)\n return format_string\n\n\ndef _is_tensor_a_torch_image(input):\n return input.ndim >= 2\n\n\ndef _blend(img1, img2, ratio):\n # type: (Tensor, Tensor, float) -> Tensor\n bound = 1 if img1.dtype in [torch.half, torch.float32, torch.float64] else 255\n return (ratio * img1 + (1 - ratio) * img2).clamp(0, bound).to(img1.dtype)\n\n\ndef rgb_to_grayscale(img):\n # type: (Tensor) -> Tensor\n \"\"\"Convert the given RGB Image Tensor to Grayscale.\n For RGB to Grayscale conversion, ITU-R 601-2 luma transform is performed which\n is L = R * 0.2989 + G * 0.5870 + B * 0.1140\n Args:\n img (Tensor): Image to be converted to Grayscale in the form [C, H, W].\n Returns:\n Tensor: Grayscale image.\n Args:\n clip (torch.tensor): Size is (T, H, W, C)\n Return:\n clip (torch.tensor): Size is (T, H, W, C)\n \"\"\"\n orig_dtype = img.dtype\n rgb_convert = torch.tensor([0.299, 0.587, 0.114])\n\n assert img.shape[0] == 3, \"First dimension need to be 3 Channels\"\n if img.is_cuda:\n rgb_convert = rgb_convert.to(img.device)\n\n img = img.float().permute(1, 2, 3, 0).matmul(rgb_convert).to(orig_dtype)\n return torch.stack([img, img, img], 0)\n\n\ndef _rgb2hsv(img):\n r, g, b = img.unbind(0)\n\n maxc, _ = torch.max(img, dim=0)\n minc, _ = torch.min(img, dim=0)\n\n eqc = maxc == minc\n cr = maxc - minc\n s = cr / torch.where(eqc, maxc.new_ones(()), maxc)\n cr_divisor = torch.where(eqc, maxc.new_ones(()), cr)\n rc = (maxc - r) / cr_divisor\n gc = (maxc - g) / cr_divisor\n bc = (maxc - b) / cr_divisor\n\n hr = (maxc == r) * (bc - gc)\n hg = ((maxc == g) & (maxc != r)) * (2.0 + rc - bc)\n hb = ((maxc != g) & (maxc != r)) * (4.0 + gc - rc)\n h = (hr + hg + hb)\n h = torch.fmod((h / 6.0 + 1.0), 1.0)\n return torch.stack((h, s, maxc))\n\n\ndef _hsv2rgb(img):\n l = len(img.shape)\n h, s, v = img.unbind(0)\n i = torch.floor(h * 6.0)\n f = (h * 6.0) - i\n i = i.to(dtype=torch.int32)\n\n p = torch.clamp((v * (1.0 - s)), 0.0, 1.0)\n q = torch.clamp((v * (1.0 - s * f)), 0.0, 1.0)\n t = torch.clamp((v * (1.0 - s * (1.0 - f))), 0.0, 1.0)\n i = i % 6\n\n if l == 3:\n tmp = torch.arange(6)[:, None, None]\n elif l == 4:\n tmp = torch.arange(6)[:, None, None, None]\n\n if img.is_cuda:\n tmp = tmp.to(img.device)\n\n mask = i == tmp # (H, W) == (6, H, W)\n\n a1 = torch.stack((v, q, p, p, t, v))\n a2 = torch.stack((t, v, v, q, p, p))\n a3 = torch.stack((p, p, t, v, v, q))\n a4 = torch.stack((a1, a2, a3)) # (3, 6, H, W)\n\n if l == 3:\n return torch.einsum(\"ijk, xijk -> xjk\", mask.to(dtype=img.dtype), a4) # (C, H, W)\n elif l == 4:\n return torch.einsum(\"itjk, xitjk -> xtjk\", mask.to(dtype=img.dtype), a4) # (C, T, H, W)\n\n\ndef adjust_brightness(img, brightness_factor):\n # type: (Tensor, float) -> Tensor\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n return _blend(img, torch.zeros_like(img), brightness_factor)\n\n\ndef adjust_contrast(img, contrast_factor):\n # type: (Tensor, float) -> Tensor\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n mean = torch.mean(rgb_to_grayscale(img).to(torch.float), dim=(-4, -2, -1), keepdim=True)\n\n return _blend(img, mean, contrast_factor)\n\n\ndef adjust_saturation(img, saturation_factor):\n # type: (Tensor, float) -> Tensor\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n return _blend(img, rgb_to_grayscale(img), saturation_factor)\n\n\ndef adjust_hue(img, hue_factor):\n \"\"\"Adjust hue of an image.\n The image hue is adjusted by converting the image to HSV and\n cyclically shifting the intensities in the hue channel (H).\n The image is then converted back to original image mode.\n `hue_factor` is the amount of shift in H channel and must be in the\n interval `[-0.5, 0.5]`.\n See `Hue`_ for more details.\n .. _Hue: https://en.wikipedia.org/wiki/Hue\n Args:\n img (Tensor): Image to be adjusted. Image type is either uint8 or float.\n hue_factor (float): How much to shift the hue channel. Should be in\n [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in\n HSV space in positive and negative direction respectively.\n 0 means no shift. Therefore, both -0.5 and 0.5 will give an image\n with complementary colors while 0 gives the original image.\n Returns:\n Tensor: Hue adjusted image.\n \"\"\"\n if isinstance(hue_factor, float) and not (-0.5 <= hue_factor <= 0.5):\n raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))\n elif isinstance(hue_factor, torch.Tensor) and not (\n (-0.5 <= hue_factor).sum() == hue_factor.shape[0] and (hue_factor <= 0.5).sum() == hue_factor.shape[0]):\n raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))\n\n if not _is_tensor_a_torch_image(img):\n raise TypeError('tensor is not a torch image.')\n\n orig_dtype = img.dtype\n if img.dtype == torch.uint8:\n img = img.to(dtype=torch.float32) / 255.0\n\n img = _rgb2hsv(img)\n h, s, v = img.unbind(0)\n h += hue_factor\n h = h % 1.0\n img = torch.stack((h, s, v))\n img_hue_adj = _hsv2rgb(img)\n\n if orig_dtype == torch.uint8:\n img_hue_adj = (img_hue_adj * 255.0).to(dtype=orig_dtype)\n\n return img_hue_adj\n",
"# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.\nimport os.path as osp\nimport sys\nimport warnings\n\nimport torch\nimport torch.distributed as du\n\nfrom essmc2.utils.file_systems import FS\nfrom .hook import Hook\nfrom .registry import HOOKS\n\n_DEFAULT_CHECKPOINT_PRIORITY = 300\n\n\[email protected]_class()\nclass CheckpointHook(Hook):\n \"\"\" Checkpoint resume or save hook.\n\n Args:\n interval (int): Save interval, by epoch.\n save_best (bool): Save the best checkpoint by a metric key, default is False.\n save_best_by (str): How to get the best the checkpoint by the metric key, default is ''.\n + means the higher the best (default).\n - means the lower the best.\n E.g. +acc@1, -err@1, acc@5(same as +acc@5)\n \"\"\"\n\n def __init__(self,\n interval=1,\n save_best=False,\n save_best_by=\"\",\n **kwargs):\n priority = kwargs.pop(\"priority\") if \"priority\" in kwargs else _DEFAULT_CHECKPOINT_PRIORITY\n super(CheckpointHook, self).__init__(priority=priority)\n self.interval = interval\n self.save_best = save_best\n self.save_best_by = save_best_by\n if self.save_best and not self.save_best_by:\n warnings.warn(\"CheckpointHook: Parameter 'save_best_by' is not set, turn off save_best function.\")\n self.save_best = False\n self.higher_the_best = True\n if self.save_best:\n if self.save_best_by.startswith(\"+\"):\n self.save_best_by = self.save_best_by[1:]\n elif self.save_best_by.startswith(\"-\"):\n self.save_best_by = self.save_best_by[1:]\n self.higher_the_best = False\n if self.save_best and not self.save_best_by:\n warnings.warn(\"CheckpointHook: Parameter 'save_best_by' is not valid, turn off save_best function.\")\n self.save_best = False\n self._last_best = None if not self.save_best else (\n sys.float_info.min if self.higher_the_best else sys.float_info.max\n )\n\n def before_solve(self, solver):\n if solver.resume_from is None:\n return\n with FS.get_fs_client(solver.resume_from) as client:\n if not client.exists(solver.resume_from):\n solver.logger.error(f\"File not exists {solver.resume_from}\")\n return\n solver.logger.info(f\"Loading checkpoint from {solver.resume_from}\")\n local_file = client.get_object_to_local_file(solver.resume_from)\n checkpoint = torch.load(local_file)\n solver.load_checkpoint(checkpoint)\n if self.save_best and \"_CheckpointHook_best\" in checkpoint:\n self._last_best = checkpoint[\"_CheckpointHook_best\"]\n\n def after_epoch(self, solver):\n if du.is_available() and du.is_initialized() and du.get_rank() != 0:\n return\n if (solver.epoch + 1) % self.interval == 0:\n solver.logger.info(f'Saving checkpoint after {solver.epoch + solver.num_folds} epochs')\n checkpoint = solver.save_checkpoint()\n if checkpoint is None or len(checkpoint) == 0:\n return\n cur_is_best = False\n if self.save_best:\n # Try to get current state from epoch_outputs[\"eval\"]\n cur_state = None \\\n if self.save_best_by not in solver.epoch_outputs[\"eval\"] \\\n else solver.epoch_outputs[\"eval\"][self.save_best_by]\n # Try to get current state from agg_iter_outputs[\"eval\"] if do_final_eval is False\n if cur_state is None:\n cur_state = None \\\n if self.save_best_by not in solver.agg_iter_outputs[\"eval\"] \\\n else solver.agg_iter_outputs[\"eval\"][self.save_best_by]\n # Try to get current state from agg_iter_outputs[\"train\"] if no evaluation\n if cur_state is None:\n cur_state = None \\\n if self.save_best_by not in solver.agg_iter_outputs[\"train\"] \\\n else solver.agg_iter_outputs[\"train\"][self.save_best_by]\n if cur_state is not None:\n if self.higher_the_best and cur_state > self._last_best:\n self._last_best = cur_state\n cur_is_best = True\n elif not self.higher_the_best and cur_state < self._last_best:\n self._last_best = cur_state\n cur_is_best = True\n checkpoint[\"_CheckpointHook_best\"] = self._last_best\n # minus 1, means index\n save_path = osp.join(solver.work_dir, \"epoch-{:05d}.pth\".format(solver.epoch + solver.num_folds))\n\n with FS.get_fs_client(save_path) as client:\n local_file = client.convert_to_local_path(save_path)\n with open(local_file, \"wb\") as f:\n torch.save(checkpoint, f)\n client.put_object_from_local_file(local_file, save_path)\n\n if cur_is_best:\n best_path = osp.join(solver.work_dir, f\"best.pth\")\n client.make_link(best_path, save_path)\n"
] | [
[
"torch.LongTensor",
"torch.from_numpy",
"torch.tensor",
"torch.FloatTensor",
"torch.cuda.is_available"
],
[
"torch.fmod",
"torch.max",
"torch.floor",
"torch.empty",
"torch.min",
"torch.zeros_like",
"torch.tensor",
"torch.arange",
"torch.stack",
"torch.clamp"
],
[
"torch.load",
"torch.distributed.is_initialized",
"torch.distributed.is_available",
"torch.distributed.get_rank",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AppleHolic/fairseq | [
"ba340f2f4c2c541ceb126f87b219864058565505"
] | [
"fairseq/progress_bar.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nWrapper around various loggers and progress bars (e.g., tqdm).\n\"\"\"\n\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nimport json\nimport logging\nfrom numbers import Number\nimport os\nimport sys\n\nimport torch\n\nfrom fairseq import distributed_utils\nfrom fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef build_progress_bar(args, iterator, epoch=None, prefix=None, default='tqdm', no_progress_bar='none'):\n if args.log_format is None:\n args.log_format = no_progress_bar if args.no_progress_bar else default\n\n if args.log_format == 'tqdm' and not sys.stderr.isatty():\n args.log_format = 'simple'\n\n if args.log_format == 'json':\n bar = json_progress_bar(iterator, epoch, prefix, args.log_interval)\n elif args.log_format == 'none':\n bar = noop_progress_bar(iterator, epoch, prefix)\n elif args.log_format == 'simple':\n bar = simple_progress_bar(iterator, epoch, prefix, args.log_interval)\n elif args.log_format == 'tqdm':\n bar = tqdm_progress_bar(iterator, epoch, prefix)\n else:\n raise ValueError('Unknown log format: {}'.format(args.log_format))\n\n if args.tensorboard_logdir and distributed_utils.is_master(args):\n try:\n # [FB only] custom wrapper for TensorBoard\n import palaas # noqa\n from fairseq.fb_tbmf_wrapper import fb_tbmf_wrapper\n bar = fb_tbmf_wrapper(bar, args, args.log_interval)\n except ImportError:\n bar = tensorboard_log_wrapper(bar, args.tensorboard_logdir, args)\n\n return bar\n\n\ndef format_stat(stat):\n if isinstance(stat, Number):\n stat = '{:g}'.format(stat)\n elif isinstance(stat, AverageMeter):\n stat = '{:.3f}'.format(stat.avg)\n elif isinstance(stat, TimeMeter):\n stat = '{:g}'.format(round(stat.avg))\n elif isinstance(stat, StopwatchMeter):\n stat = '{:g}'.format(round(stat.sum))\n elif torch.is_tensor(stat):\n stat = stat.tolist()\n return stat\n\n\nclass progress_bar(object):\n \"\"\"Abstract class for progress bars.\"\"\"\n def __init__(self, iterable, epoch=None, prefix=None):\n self.iterable = iterable\n self.offset = getattr(iterable, 'offset', 0)\n self.epoch = epoch\n self.prefix = ''\n if epoch is not None:\n self.prefix += 'epoch {:03d}'.format(epoch)\n if prefix is not None:\n self.prefix += ' | {}'.format(prefix)\n\n def __len__(self):\n return len(self.iterable)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *exc):\n return False\n\n def __iter__(self):\n raise NotImplementedError\n\n def log(self, stats, tag=None, step=None):\n \"\"\"Log intermediate stats according to log_interval.\"\"\"\n raise NotImplementedError\n\n def print(self, stats, tag=None, step=None):\n \"\"\"Print end-of-epoch stats.\"\"\"\n raise NotImplementedError\n\n def _str_commas(self, stats):\n return ', '.join(key + '=' + stats[key].strip()\n for key in stats.keys())\n\n def _str_pipes(self, stats):\n return ' | '.join(key + ' ' + stats[key].strip()\n for key in stats.keys())\n\n def _format_stats(self, stats):\n postfix = OrderedDict(stats)\n # Preprocess stats according to datatype\n for key in postfix.keys():\n postfix[key] = str(format_stat(postfix[key]))\n return postfix\n\n\n@contextmanager\ndef rename_logger(logger, new_name):\n old_name = logger.name\n if new_name is not None:\n logger.name = new_name\n yield logger\n logger.name = old_name\n\n\nclass json_progress_bar(progress_bar):\n \"\"\"Log output in JSON format.\"\"\"\n\n def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):\n super().__init__(iterable, epoch, prefix)\n self.log_interval = log_interval\n self.stats = None\n self.tag = None\n\n def __iter__(self):\n size = float(len(self.iterable))\n for i, obj in enumerate(self.iterable, start=self.offset):\n yield obj\n if (\n self.stats is not None\n and i > 0\n and self.log_interval is not None\n and (i + 1) % self.log_interval == 0\n ):\n update = (\n self.epoch - 1 + float(i / size)\n if self.epoch is not None\n else None\n )\n stats = self._format_stats(self.stats, epoch=self.epoch, update=update)\n with rename_logger(logger, self.tag):\n logger.info(json.dumps(stats))\n\n def log(self, stats, tag=None, step=None):\n \"\"\"Log intermediate stats according to log_interval.\"\"\"\n self.stats = stats\n self.tag = tag\n\n def print(self, stats, tag=None, step=None):\n \"\"\"Print end-of-epoch stats.\"\"\"\n self.stats = stats\n if tag is not None:\n self.stats = OrderedDict([(tag + '_' + k, v) for k, v in self.stats.items()])\n stats = self._format_stats(self.stats, epoch=self.epoch)\n with rename_logger(logger, tag):\n logger.info(json.dumps(stats))\n\n def _format_stats(self, stats, epoch=None, update=None):\n postfix = OrderedDict()\n if epoch is not None:\n postfix['epoch'] = epoch\n if update is not None:\n postfix['update'] = round(update, 3)\n # Preprocess stats according to datatype\n for key in stats.keys():\n postfix[key] = format_stat(stats[key])\n return postfix\n\n\nclass noop_progress_bar(progress_bar):\n \"\"\"No logging.\"\"\"\n\n def __init__(self, iterable, epoch=None, prefix=None):\n super().__init__(iterable, epoch, prefix)\n\n def __iter__(self):\n for obj in self.iterable:\n yield obj\n\n def log(self, stats, tag=None, step=None):\n \"\"\"Log intermediate stats according to log_interval.\"\"\"\n pass\n\n def print(self, stats, tag=None, step=None):\n \"\"\"Print end-of-epoch stats.\"\"\"\n pass\n\n\nclass simple_progress_bar(progress_bar):\n \"\"\"A minimal logger for non-TTY environments.\"\"\"\n\n def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):\n super().__init__(iterable, epoch, prefix)\n self.log_interval = log_interval\n self.stats = None\n self.tag = None\n\n def __iter__(self):\n size = len(self.iterable)\n for i, obj in enumerate(self.iterable, start=self.offset):\n yield obj\n if (\n self.stats is not None\n and i > 0\n and self.log_interval is not None\n and (i + 1) % self.log_interval == 0\n ):\n postfix = self._str_commas(self.stats)\n with rename_logger(logger, self.tag):\n logger.info('{}: {:5d} / {:d} {}'.format(self.prefix, i, size, postfix))\n\n def log(self, stats, tag=None, step=None):\n \"\"\"Log intermediate stats according to log_interval.\"\"\"\n self.stats = self._format_stats(stats)\n self.tag = tag\n\n def print(self, stats, tag=None, step=None):\n \"\"\"Print end-of-epoch stats.\"\"\"\n postfix = self._str_pipes(self._format_stats(stats))\n with rename_logger(logger, tag):\n logger.info('{} | {}'.format(self.prefix, postfix))\n\n\nclass tqdm_progress_bar(progress_bar):\n \"\"\"Log to tqdm.\"\"\"\n\n def __init__(self, iterable, epoch=None, prefix=None):\n super().__init__(iterable, epoch, prefix)\n from tqdm import tqdm\n self.tqdm = tqdm(iterable, self.prefix, leave=False)\n\n def __iter__(self):\n return iter(self.tqdm)\n\n def log(self, stats, tag=None, step=None):\n \"\"\"Log intermediate stats according to log_interval.\"\"\"\n self.tqdm.set_postfix(self._format_stats(stats), refresh=False)\n\n def print(self, stats, tag=None, step=None):\n \"\"\"Print end-of-epoch stats.\"\"\"\n postfix = self._str_pipes(self._format_stats(stats))\n self.tqdm.write('{} | {}'.format(self.tqdm.desc, postfix))\n\n\nclass tensorboard_log_wrapper(progress_bar):\n \"\"\"Log to tensorboard.\"\"\"\n\n def __init__(self, wrapped_bar, tensorboard_logdir, args):\n self.wrapped_bar = wrapped_bar\n self.tensorboard_logdir = tensorboard_logdir\n self.args = args\n\n try:\n from tensorboardX import SummaryWriter\n self.SummaryWriter = SummaryWriter\n self._writers = {}\n except ImportError:\n logger.warning(\n \"tensorboard or required dependencies not found, \"\n \"please see README for using tensorboard. (e.g. pip install tensorboardX)\"\n )\n self.SummaryWriter = None\n\n def _writer(self, key):\n if self.SummaryWriter is None:\n return None\n if key not in self._writers:\n self._writers[key] = self.SummaryWriter(\n os.path.join(self.tensorboard_logdir, key),\n )\n self._writers[key].add_text('args', str(vars(self.args)))\n self._writers[key].add_text('sys.argv', \" \".join(sys.argv))\n return self._writers[key]\n\n def __iter__(self):\n return iter(self.wrapped_bar)\n\n def log(self, stats, tag=None, step=None):\n \"\"\"Log intermediate stats to tensorboard.\"\"\"\n self._log_to_tensorboard(stats, tag, step)\n self.wrapped_bar.log(stats, tag=tag, step=step)\n\n def print(self, stats, tag=None, step=None):\n \"\"\"Print end-of-epoch stats.\"\"\"\n self._log_to_tensorboard(stats, tag, step)\n self.wrapped_bar.print(stats, tag=tag, step=step)\n\n def __exit__(self, *exc):\n for writer in getattr(self, '_writers', {}).values():\n writer.close()\n return False\n\n def _log_to_tensorboard(self, stats, tag=None, step=None):\n writer = self._writer(tag or '')\n if writer is None:\n return\n if step is None:\n step = stats['num_updates']\n for key in stats.keys() - {'num_updates'}:\n if isinstance(stats[key], AverageMeter):\n writer.add_scalar(key, stats[key].val, step)\n elif isinstance(stats[key], Number):\n writer.add_scalar(key, stats[key], step)\n"
] | [
[
"torch.is_tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
prs-eth/graph-super-resolution | [
"bd5af4d9b1682c309262e4153f6529c9b0584bcb",
"bd5af4d9b1682c309262e4153f6529c9b0584bcb"
] | [
"data/diml.py",
"model/graph_sr_net.py"
] | [
"from pathlib import Path\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom torchvision.transforms import InterpolationMode, Resize\n\nfrom .utils import downsample, bicubic_with_mask, random_crop, random_rotate, random_horizontal_flip\n\nDIML_BASE_SIZE = (756, 1344)\n\n\nclass DIMLDataset(Dataset):\n\n def __init__(\n self,\n data_dir: str,\n resolution='HR',\n scale=1.0,\n crop_size=(128, 128),\n do_horizontal_flip=True,\n max_rotation_angle: int = 15,\n scale_interpolation=InterpolationMode.BILINEAR,\n rotation_interpolation=InterpolationMode.BILINEAR,\n image_transform=None,\n depth_transform=None,\n in_memory=True,\n split='train',\n crop_valid=False,\n crop_deterministic=False,\n scaling=8\n ):\n self.scale = scale\n self.crop_size = crop_size\n self.do_horizontal_flip = do_horizontal_flip\n self.max_rotation_angle = max_rotation_angle\n self.scale_interpolation = scale_interpolation\n self.rotation_interpolation = rotation_interpolation\n self.image_transform = image_transform\n self.depth_transform = depth_transform\n self.crop_valid = crop_valid\n self.crop_deterministic = crop_deterministic\n self.scaling = scaling\n data_dir = Path(data_dir)\n\n if max_rotation_angle > 0 and crop_deterministic:\n raise ValueError('Max rotation angle has to be zero when cropping deterministically')\n\n if split not in ('train', 'val', 'test'):\n raise ValueError(split)\n\n mmap_mode = None if in_memory else 'c'\n\n self.images = np.load(str(data_dir / f'npy/images_{split}_{resolution}.npy'), mmap_mode)\n self.depth_maps = np.load(str(data_dir / f'npy/depth_{split}_{resolution}.npy'), mmap_mode)\n assert len(self.images) == len(self.depth_maps)\n\n self.H, self.W = int(DIML_BASE_SIZE[0] * self.scale), int(DIML_BASE_SIZE[1] * self.scale)\n\n if self.crop_valid:\n if self.max_rotation_angle > 45:\n raise ValueError('When crop_valid=True, only rotation angles up to 45° are supported for now')\n\n # make sure that max rotation angle is valid, else decrease\n max_angle = np.floor(min(\n 2 * np.arctan\n ((np.sqrt(-(crop_size[0] ** 2) + self.H ** 2 + self.W ** 2) - self.W) / (crop_size[0] + self.H)),\n 2 * np.arctan\n ((np.sqrt(-(crop_size[1] ** 2) + self.W ** 2 + self.H ** 2) - self.H) / (crop_size[1] + self.W))\n ) * (180. / np.pi))\n\n if self.max_rotation_angle > max_angle:\n print(f'max rotation angle too large for given image size and crop size, decreased to {max_angle}')\n self.max_rotation_angle = max_angle\n\n def __getitem__(self, index):\n if self.crop_deterministic:\n num_crops_h, num_crops_w = self.H // self.crop_size[0], self.W // self.crop_size[1]\n im_index = index // (num_crops_h * num_crops_w)\n else:\n im_index = index\n\n image = torch.from_numpy(self.images[im_index].astype('float32')) / 255.\n depth_map = torch.from_numpy(self.depth_maps[im_index].astype('float32')).unsqueeze(0)\n resize = Resize((self.H, self.W), self.scale_interpolation)\n image, depth_map = resize(image), resize(depth_map)\n\n if self.do_horizontal_flip and not self.crop_deterministic:\n image, depth_map = random_horizontal_flip((image, depth_map))\n\n if self.max_rotation_angle > 0 and not self.crop_deterministic:\n image, depth_map = random_rotate((image, depth_map), self.max_rotation_angle, self.rotation_interpolation,\n crop_valid=self.crop_valid)\n # passing fill=np.nan to rotate sets all pixels to nan, so set it here explicitly\n depth_map[depth_map == 0.] = np.nan\n\n if self.crop_deterministic:\n crop_index = index % (num_crops_h * num_crops_w)\n crop_index_h, crop_index_w = crop_index // num_crops_w, crop_index % num_crops_w\n slice_h = slice(crop_index_h * self.crop_size[0], (crop_index_h + 1) * self.crop_size[0])\n slice_w = slice(crop_index_w * self.crop_size[1], (crop_index_w + 1) * self.crop_size[1])\n image, depth_map = image[:, slice_h, slice_w], depth_map[:, slice_h, slice_w]\n else:\n image, depth_map = random_crop((image, depth_map), self.crop_size)\n\n # apply user transforms\n if self.image_transform is not None:\n image = self.image_transform(image)\n if self.depth_transform is not None:\n depth_map = self.depth_transform(depth_map)\n\n source = downsample(depth_map.unsqueeze(0), self.scaling).squeeze().unsqueeze(0)\n\n mask_hr = (~torch.isnan(depth_map)).float()\n mask_lr = (~torch.isnan(source)).float()\n\n depth_map[mask_hr == 0.] = 0.\n source[mask_lr == 0.] = 0.\n\n y_bicubic = torch.from_numpy(\n bicubic_with_mask(source.squeeze().numpy(), mask_lr.squeeze().numpy(), self.scaling)).float()\n y_bicubic = y_bicubic.reshape((1, self.crop_size[0], self.crop_size[1]))\n\n return {'guide': image, 'y': depth_map, 'source': source, 'mask_hr': mask_hr, 'mask_lr': mask_lr,\n 'y_bicubic': y_bicubic}\n\n def __len__(self):\n if self.crop_deterministic:\n return len(self.depth_maps) * (self.H // self.crop_size[0]) * (self.W // self.crop_size[1])\n return len(self.depth_maps)\n",
"from math import log\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport segmentation_models_pytorch as smp\n\nfrom .functional import create_fixed_cupy_sparse_matrices, GraphQuadraticSolver\nfrom losses import l1_loss_func, mse_loss_func\n\nINPUT_DIM = 4\nFEATURE_DIM = 64\n\n\ndef get_neighbor_affinity_no_border(feature_map, mu, lambda_):\n B, M, H, W = feature_map.shape\n\n feature_map_padded = F.pad(feature_map, (1, 1, 1, 1), 'constant', 0)\n\n top = torch.mean((feature_map_padded[:, :, 0:-2, 1:-1] - feature_map)**2, dim=1, keepdim=True)\n bottom = torch.mean((feature_map_padded[:, :, 2:, 1:-1] - feature_map)**2, dim=1, keepdim=True)\n left = torch.mean((feature_map_padded[:, :, 1:-1, 0:-2] - feature_map)**2, dim=1, keepdim=True)\n right = torch.mean((feature_map_padded[:, :, 1:-1, 2:] - feature_map)**2, dim=1, keepdim=True)\n\n affinity = torch.cat([top, bottom, left, right], dim=1) / (1e-6 + mu**2)\n affinity = torch.exp(-affinity)\n\n border_remover = torch.ones((1, 4, H, W), device=feature_map.device)\n border_remover[0, 0, 0, :] = 0 # top\n border_remover[0, 1, -1, :] = 0 # bottom\n border_remover[0, 2, :, 0] = 0 # left\n border_remover[0, 3, :, -1] = 0 # right\n\n affinity = affinity * border_remover\n center = torch.sum(affinity, dim=1, keepdim=True)\n affinity = torch.cat([affinity, center], dim=1)\n affinity = affinity * lambda_\n\n return affinity\n\n\nclass GraphSuperResolutionNet(nn.Module):\n \n def __init__(\n self,\n scaling: int,\n crop_size=256,\n feature_extractor='UResNet',\n pretrained=False,\n lambda_init=1.0,\n mu_init=0.1\n ):\n super().__init__()\n\n if crop_size not in [64, 128, 256]:\n raise ValueError('Crop size should be in {64, 128, 256}, got ' + str(crop_size))\n \n if feature_extractor == 'Color':\n self.feature_extractor = None\n # so the optimizer does not complain in case we have no other parameters\n self.dummy_param = nn.Parameter(torch.zeros(1))\n elif feature_extractor == 'UResNet':\n self.feature_extractor = smp.Unet('resnet50', classes=FEATURE_DIM, in_channels=INPUT_DIM,\n encoder_weights='imagenet' if pretrained else None)\n elif feature_extractor == 'UResNet18':\n self.feature_extractor = smp.Unet('resnet18', classes=FEATURE_DIM, in_channels=INPUT_DIM,\n encoder_weights='imagenet' if pretrained else None)\n elif feature_extractor == 'UEffNet2':\n self.feature_extractor = smp.Unet('efficientnet-b2', classes=FEATURE_DIM, in_channels=INPUT_DIM,\n encoder_weights='imagenet' if pretrained else None)\n else:\n raise NotImplementedError(f'Feature extractor {feature_extractor}')\n\n self.log_lambda = nn.Parameter(torch.tensor([log(lambda_init)]))\n self.log_mu = nn.Parameter(torch.tensor([log(mu_init)]))\n self.mx_dict = create_fixed_cupy_sparse_matrices(crop_size, crop_size, scaling)\n\n def forward(self, sample):\n guide, source, mask_lr = sample['guide'], sample['source'], sample['mask_lr']\n\n if self.feature_extractor is None:\n pixel_features = torch.cat([guide, sample['y_bicubic']], dim=1)\n else:\n pixel_features = self.feature_extractor(torch.cat([guide, sample['y_bicubic']], dim=1))\n\n mu, lambda_ = torch.exp(self.log_mu), torch.exp(self.log_lambda)\n neighbor_affinity = get_neighbor_affinity_no_border(pixel_features, mu, lambda_)\n\n y_pred = GraphQuadraticSolver.apply(neighbor_affinity, source, self.mx_dict, mask_lr)\n\n return {'y_pred': y_pred, 'neighbor_affinity': neighbor_affinity}\n\n def get_loss(self, output, sample, kind='l1'):\n y_pred = output['y_pred']\n y, mask_hr, mask_lr = (sample[k] for k in ('y', 'mask_hr', 'mask_lr'))\n\n l1_loss = l1_loss_func(y_pred, y, mask_hr)\n mse_loss = mse_loss_func(y_pred, y, mask_hr)\n loss = l1_loss if kind == 'l1' else mse_loss\n\n return loss, {\n 'l1_loss': l1_loss.detach().item(),\n 'mse_loss': mse_loss.detach().item(),\n 'mu': torch.exp(self.log_mu).detach().item(),\n 'lambda': torch.exp(self.log_lambda).detach().item(),\n 'optimization_loss': loss.detach().item(),\n 'average_link': torch.mean(output['neighbor_affinity'][:, 0:4].detach()).item()\n }\n"
] | [
[
"numpy.sqrt",
"torch.isnan"
],
[
"torch.mean",
"torch.ones",
"torch.cat",
"torch.zeros",
"torch.sum",
"torch.exp",
"torch.nn.functional.pad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Len-Li/pytorch3d | [
"95029e7a1211904814b3919cf12fe9d1952fb415",
"95029e7a1211904814b3919cf12fe9d1952fb415"
] | [
"tests/common_testing.py",
"tests/test_marching_cubes.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport os\nimport unittest\nfrom pathlib import Path\nfrom typing import Callable, Optional, Union\n\nimport numpy as np\nimport torch\nfrom PIL import Image\n\n\ndef get_tests_dir() -> Path:\n \"\"\"\n Returns Path for the directory containing this file.\n \"\"\"\n return Path(__file__).resolve().parent\n\n\ndef get_pytorch3d_dir() -> Path:\n \"\"\"\n Returns Path for the root PyTorch3D directory.\n\n Facebook internal systems need a special case here.\n \"\"\"\n if os.environ.get(\"INSIDE_RE_WORKER\") is not None:\n return Path(__file__).resolve().parent\n else:\n return Path(__file__).resolve().parent.parent\n\n\ndef load_rgb_image(filename: str, data_dir: Union[str, Path]):\n filepath = data_dir / filename\n with Image.open(filepath) as raw_image:\n image = torch.from_numpy(np.array(raw_image) / 255.0)\n image = image.to(dtype=torch.float32)\n return image[..., :3]\n\n\nTensorOrArray = Union[torch.Tensor, np.ndarray]\n\n\ndef get_random_cuda_device() -> str:\n \"\"\"\n Function to get a random GPU device from the\n available devices. This is useful for testing\n that custom cuda kernels can support inputs on\n any device without having to set the device explicitly.\n \"\"\"\n num_devices = torch.cuda.device_count()\n device_id = (\n torch.randint(high=num_devices, size=(1,)).item() if num_devices > 1 else 0\n )\n return \"cuda:%d\" % device_id\n\n\nclass TestCaseMixin(unittest.TestCase):\n def assertSeparate(self, tensor1, tensor2) -> None:\n \"\"\"\n Verify that tensor1 and tensor2 have their data in distinct locations.\n \"\"\"\n self.assertNotEqual(tensor1.storage().data_ptr(), tensor2.storage().data_ptr())\n\n def assertNotSeparate(self, tensor1, tensor2) -> None:\n \"\"\"\n Verify that tensor1 and tensor2 have their data in the same locations.\n \"\"\"\n self.assertEqual(tensor1.storage().data_ptr(), tensor2.storage().data_ptr())\n\n def assertAllSeparate(self, tensor_list) -> None:\n \"\"\"\n Verify that all tensors in tensor_list have their data in\n distinct locations.\n \"\"\"\n ptrs = [i.storage().data_ptr() for i in tensor_list]\n self.assertCountEqual(ptrs, set(ptrs))\n\n def assertNormsClose(\n self,\n input: TensorOrArray,\n other: TensorOrArray,\n norm_fn: Callable[[TensorOrArray], TensorOrArray],\n *,\n rtol: float = 1e-05,\n atol: float = 1e-08,\n equal_nan: bool = False,\n msg: Optional[str] = None,\n ) -> None:\n \"\"\"\n Verifies that two tensors or arrays have the same shape and are close\n given absolute and relative tolerance; raises AssertionError otherwise.\n A custom norm function is computed before comparison. If no such pre-\n processing needed, pass `torch.abs` or, equivalently, call `assertClose`.\n Args:\n input, other: two tensors or two arrays.\n norm_fn: The function evaluates\n `all(norm_fn(input - other) <= atol + rtol * norm_fn(other))`.\n norm_fn is a tensor -> tensor function; the output has:\n * all entries non-negative,\n * shape defined by the input shape only.\n rtol, atol, equal_nan: as for torch.allclose.\n msg: message in case the assertion is violated.\n Note:\n Optional arguments here are all keyword-only, to avoid confusion\n with msg arguments on other assert functions.\n \"\"\"\n\n self.assertEqual(np.shape(input), np.shape(other))\n\n diff = norm_fn(input - other)\n other_ = norm_fn(other)\n\n # We want to generalise allclose(input, output), which is essentially\n # all(diff <= atol + rtol * other)\n # but with a sophisticated handling non-finite values.\n # We work that around by calling allclose() with the following arguments:\n # allclose(diff + other_, other_). This computes what we want because\n # all(|diff + other_ - other_| <= atol + rtol * |other_|) ==\n # all(|norm_fn(input - other)| <= atol + rtol * |norm_fn(other)|) ==\n # all(norm_fn(input - other) <= atol + rtol * norm_fn(other)).\n\n self.assertClose(\n diff + other_, other_, rtol=rtol, atol=atol, equal_nan=equal_nan\n )\n\n def assertClose(\n self,\n input: TensorOrArray,\n other: TensorOrArray,\n *,\n rtol: float = 1e-05,\n atol: float = 1e-08,\n equal_nan: bool = False,\n msg: Optional[str] = None,\n ) -> None:\n \"\"\"\n Verifies that two tensors or arrays have the same shape and are close\n given absolute and relative tolerance, i.e. checks\n `all(|input - other| <= atol + rtol * |other|)`;\n raises AssertionError otherwise.\n Args:\n input, other: two tensors or two arrays.\n rtol, atol, equal_nan: as for torch.allclose.\n msg: message in case the assertion is violated.\n Note:\n Optional arguments here are all keyword-only, to avoid confusion\n with msg arguments on other assert functions.\n \"\"\"\n\n self.assertEqual(np.shape(input), np.shape(other))\n\n backend = torch if torch.is_tensor(input) else np\n close = backend.allclose(\n input, other, rtol=rtol, atol=atol, equal_nan=equal_nan\n )\n\n if not close and msg is None:\n diff = backend.abs(input - other) + 0.0\n ratio = diff / backend.abs(other)\n try_relative = (diff <= atol) | (backend.isfinite(ratio) & (ratio > 0))\n if try_relative.all():\n if backend == np:\n # Avoid a weirdness with zero dimensional arrays.\n ratio = np.array(ratio)\n ratio[diff <= atol] = 0\n extra = f\" Max relative diff {ratio.max()}\"\n else:\n extra = \"\"\n shape = tuple(input.shape)\n max_diff = diff.max()\n self.fail(f\"Not close. Max diff {max_diff}.{extra} Shape {shape}.\")\n\n self.assertTrue(close, msg)\n",
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\nimport os\nimport pickle\nimport unittest\n\nimport torch\nfrom common_testing import TestCaseMixin, get_tests_dir\nfrom pytorch3d.ops.marching_cubes import marching_cubes_naive\n\n\nUSE_SCIKIT = False\nDATA_DIR = get_tests_dir() / \"data\"\n\n\ndef convert_to_local(verts, volume_dim):\n return (2 * verts) / (volume_dim - 1) - 1\n\n\nclass TestCubeConfiguration(TestCaseMixin, unittest.TestCase):\n\n # Test single cubes. Each case corresponds to the corresponding\n # cube vertex configuration in each case here (0-indexed):\n # https://en.wikipedia.org/wiki/Marching_cubes#/media/File:MarchingCubes.svg\n\n def test_empty_volume(self): # case 0\n volume_data = torch.ones(1, 2, 2, 2) # (B, W, H, D)\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=False)\n\n expected_verts = torch.tensor([])\n expected_faces = torch.tensor([], dtype=torch.int64)\n self.assertClose(verts, expected_verts)\n self.assertClose(faces, expected_faces)\n\n def test_case1(self): # case 1\n volume_data = torch.ones(1, 2, 2, 2) # (B, W, H, D)\n volume_data[0, 0, 0, 0] = 0\n volume_data = volume_data.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=False)\n\n expected_verts = torch.tensor(\n [\n [0.5, 0, 0],\n [0, 0, 0.5],\n [0, 0.5, 0],\n ]\n )\n\n expected_faces = torch.tensor([[1, 2, 0]])\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=True)\n expected_verts = convert_to_local(expected_verts, 2)\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n def test_case2(self):\n volume_data = torch.ones(1, 2, 2, 2) # (B, W, H, D)\n volume_data[0, 0:2, 0, 0] = 0\n volume_data = volume_data.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=False)\n\n expected_verts = torch.tensor(\n [\n [1.0000, 0.0000, 0.5000],\n [0.0000, 0.0000, 0.5000],\n [1.0000, 0.5000, 0.0000],\n [0.0000, 0.5000, 0.0000],\n ]\n )\n expected_faces = torch.tensor([[1, 2, 0], [3, 2, 1]])\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=True)\n expected_verts = convert_to_local(expected_verts, 2)\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n def test_case3(self):\n volume_data = torch.ones(1, 2, 2, 2) # (B, W, H, D)\n volume_data[0, 0, 0, 0] = 0\n volume_data[0, 1, 1, 0] = 0\n volume_data = volume_data.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=False)\n\n expected_verts = torch.tensor(\n [\n [0.5000, 0.0000, 0.0000],\n [0.0000, 0.0000, 0.5000],\n [1.0000, 1.0000, 0.5000],\n [0.5000, 1.0000, 0.0000],\n [1.0000, 0.5000, 0.0000],\n [0.0000, 0.5000, 0.0000],\n ]\n )\n expected_faces = torch.tensor([[0, 1, 5], [4, 3, 2]])\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=True)\n expected_verts = convert_to_local(expected_verts, 2)\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n def test_case4(self):\n volume_data = torch.ones(1, 2, 2, 2) # (B, W, H, D)\n volume_data[0, 1, 0, 0] = 0\n volume_data[0, 1, 0, 1] = 0\n volume_data[0, 0, 0, 1] = 0\n volume_data = volume_data.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=False)\n\n expected_verts = torch.tensor(\n [\n [0.5000, 0.0000, 0.0000],\n [0.0000, 0.0000, 0.5000],\n [0.0000, 0.5000, 1.0000],\n [1.0000, 0.5000, 1.0000],\n [1.0000, 0.5000, 0.0000],\n ]\n )\n expected_faces = torch.tensor([[0, 2, 1], [0, 4, 2], [4, 3, 2]])\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=True)\n expected_verts = convert_to_local(expected_verts, 2)\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n def test_case5(self):\n volume_data = torch.ones(1, 2, 2, 2) # (B, W, H, D)\n volume_data[0, 0:2, 0, 0:2] = 0\n volume_data = volume_data.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=False)\n\n expected_verts = torch.tensor(\n [\n [0.0000, 0.5000, 1.0000],\n [1.0000, 0.5000, 1.0000],\n [1.0000, 0.5000, 0.0000],\n [0.0000, 0.5000, 0.0000],\n ]\n )\n\n expected_faces = torch.tensor([[1, 0, 2], [2, 0, 3]])\n\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=True)\n expected_verts = convert_to_local(expected_verts, 2)\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n def test_case6(self):\n volume_data = torch.ones(1, 2, 2, 2) # (B, W, H, D)\n volume_data[0, 1, 0, 0] = 0\n volume_data[0, 1, 0, 1] = 0\n volume_data[0, 0, 0, 1] = 0\n volume_data[0, 0, 1, 0] = 0\n volume_data = volume_data.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=False)\n\n expected_verts = torch.tensor(\n [\n [0.5000, 0.0000, 0.0000],\n [0.0000, 0.0000, 0.5000],\n [0.5000, 1.0000, 0.0000],\n [0.0000, 1.0000, 0.5000],\n [0.0000, 0.5000, 1.0000],\n [1.0000, 0.5000, 1.0000],\n [1.0000, 0.5000, 0.0000],\n [0.0000, 0.5000, 0.0000],\n ]\n )\n expected_faces = torch.tensor([[2, 7, 3], [0, 6, 1], [6, 4, 1], [6, 5, 4]])\n\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=True)\n expected_verts = convert_to_local(expected_verts, 2)\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n def test_case7(self):\n volume_data = torch.ones(1, 2, 2, 2) # (B, W, H, D)\n volume_data[0, 0, 0, 0] = 0\n volume_data[0, 1, 0, 1] = 0\n volume_data[0, 1, 1, 0] = 0\n volume_data[0, 0, 1, 1] = 0\n volume_data = volume_data.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=False)\n\n expected_verts = torch.tensor(\n [\n [0.5000, 0.0000, 1.0000],\n [1.0000, 0.0000, 0.5000],\n [0.5000, 0.0000, 0.0000],\n [0.0000, 0.0000, 0.5000],\n [0.5000, 1.0000, 1.0000],\n [1.0000, 1.0000, 0.5000],\n [0.5000, 1.0000, 0.0000],\n [0.0000, 1.0000, 0.5000],\n [0.0000, 0.5000, 1.0000],\n [1.0000, 0.5000, 1.0000],\n [1.0000, 0.5000, 0.0000],\n [0.0000, 0.5000, 0.0000],\n ]\n )\n\n expected_faces = torch.tensor([[0, 1, 9], [4, 7, 8], [2, 3, 11], [5, 10, 6]])\n\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=True)\n expected_verts = convert_to_local(expected_verts, 2)\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n def test_case8(self):\n volume_data = torch.ones(1, 2, 2, 2) # (B, W, H, D)\n volume_data[0, 0, 0, 0] = 0\n volume_data[0, 0, 0, 1] = 0\n volume_data[0, 1, 0, 1] = 0\n volume_data[0, 0, 1, 1] = 0\n volume_data = volume_data.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=False)\n\n expected_verts = torch.tensor(\n [\n [1.0000, 0.0000, 0.5000],\n [0.5000, 0.0000, 0.0000],\n [0.5000, 1.0000, 1.0000],\n [0.0000, 1.0000, 0.5000],\n [1.0000, 0.5000, 1.0000],\n [0.0000, 0.5000, 0.0000],\n ]\n )\n expected_faces = torch.tensor([[2, 3, 5], [4, 2, 5], [4, 5, 1], [4, 1, 0]])\n\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=True)\n expected_verts = convert_to_local(expected_verts, 2)\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n def test_case9(self):\n volume_data = torch.ones(1, 2, 2, 2) # (B, W, H, D)\n volume_data[0, 1, 0, 0] = 0\n volume_data[0, 0, 0, 1] = 0\n volume_data[0, 1, 0, 1] = 0\n volume_data[0, 0, 1, 1] = 0\n volume_data = volume_data.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=False)\n\n expected_verts = torch.tensor(\n [\n [0.5000, 0.0000, 0.0000],\n [0.0000, 0.0000, 0.5000],\n [0.5000, 1.0000, 1.0000],\n [0.0000, 1.0000, 0.5000],\n [1.0000, 0.5000, 1.0000],\n [1.0000, 0.5000, 0.0000],\n ]\n )\n expected_faces = torch.tensor([[0, 5, 4], [0, 4, 3], [0, 3, 1], [3, 4, 2]])\n\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=True)\n expected_verts = convert_to_local(expected_verts, 2)\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n def test_case10(self):\n volume_data = torch.ones(1, 2, 2, 2) # (B, W, H, D)\n volume_data[0, 0, 0, 0] = 0\n volume_data[0, 1, 1, 1] = 0\n volume_data = volume_data.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=False)\n\n expected_verts = torch.tensor(\n [\n [0.5000, 0.0000, 0.0000],\n [0.0000, 0.0000, 0.5000],\n [0.5000, 1.0000, 1.0000],\n [1.0000, 1.0000, 0.5000],\n [1.0000, 0.5000, 1.0000],\n [0.0000, 0.5000, 0.0000],\n ]\n )\n\n expected_faces = torch.tensor([[4, 3, 2], [0, 1, 5]])\n\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=True)\n expected_verts = convert_to_local(expected_verts, 2)\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n def test_case11(self):\n volume_data = torch.ones(1, 2, 2, 2) # (B, W, H, D)\n volume_data[0, 0, 0, 0] = 0\n volume_data[0, 1, 0, 0] = 0\n volume_data[0, 1, 1, 1] = 0\n volume_data = volume_data.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=False)\n\n expected_verts = torch.tensor(\n [\n [1.0000, 0.0000, 0.5000],\n [0.0000, 0.0000, 0.5000],\n [0.5000, 1.0000, 1.0000],\n [1.0000, 1.0000, 0.5000],\n [1.0000, 0.5000, 1.0000],\n [1.0000, 0.5000, 0.0000],\n [0.0000, 0.5000, 0.0000],\n ]\n )\n\n expected_faces = torch.tensor([[5, 1, 6], [5, 0, 1], [4, 3, 2]])\n\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=True)\n expected_verts = convert_to_local(expected_verts, 2)\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n def test_case12(self):\n volume_data = torch.ones(1, 2, 2, 2) # (B, W, H, D)\n volume_data[0, 1, 0, 0] = 0\n volume_data[0, 0, 1, 0] = 0\n volume_data[0, 1, 1, 1] = 0\n volume_data = volume_data.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=False)\n\n expected_verts = torch.tensor(\n [\n [1.0000, 0.0000, 0.5000],\n [0.5000, 0.0000, 0.0000],\n [0.5000, 1.0000, 1.0000],\n [1.0000, 1.0000, 0.5000],\n [0.5000, 1.0000, 0.0000],\n [0.0000, 1.0000, 0.5000],\n [1.0000, 0.5000, 1.0000],\n [1.0000, 0.5000, 0.0000],\n [0.0000, 0.5000, 0.0000],\n ]\n )\n\n expected_faces = torch.tensor([[6, 3, 2], [7, 0, 1], [5, 4, 8]])\n\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=True)\n expected_verts = convert_to_local(expected_verts, 2)\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n def test_case13(self):\n volume_data = torch.ones(1, 2, 2, 2) # (B, W, H, D)\n volume_data[0, 0, 0, 0] = 0\n volume_data[0, 0, 1, 0] = 0\n volume_data[0, 1, 0, 1] = 0\n volume_data[0, 1, 1, 1] = 0\n volume_data = volume_data.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=False)\n\n expected_verts = torch.tensor(\n [\n [0.5000, 0.0000, 1.0000],\n [1.0000, 0.0000, 0.5000],\n [0.5000, 0.0000, 0.0000],\n [0.0000, 0.0000, 0.5000],\n [0.5000, 1.0000, 1.0000],\n [1.0000, 1.0000, 0.5000],\n [0.5000, 1.0000, 0.0000],\n [0.0000, 1.0000, 0.5000],\n ]\n )\n\n expected_faces = torch.tensor([[3, 6, 2], [3, 7, 6], [1, 5, 0], [5, 4, 0]])\n\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=True)\n expected_verts = convert_to_local(expected_verts, 2)\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n def test_case14(self):\n volume_data = torch.ones(1, 2, 2, 2) # (B, W, H, D)\n volume_data[0, 0, 0, 0] = 0\n volume_data[0, 0, 0, 1] = 0\n volume_data[0, 1, 0, 1] = 0\n volume_data[0, 1, 1, 1] = 0\n volume_data = volume_data.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=False)\n\n expected_verts = torch.tensor(\n [\n [1.0000, 0.0000, 0.5000],\n [0.5000, 0.0000, 0.0000],\n [0.5000, 1.0000, 1.0000],\n [1.0000, 1.0000, 0.5000],\n [0.0000, 0.5000, 1.0000],\n [0.0000, 0.5000, 0.0000],\n ]\n )\n\n expected_faces = torch.tensor([[1, 0, 3], [1, 3, 4], [1, 4, 5], [2, 4, 3]])\n\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=True)\n expected_verts = convert_to_local(expected_verts, 2)\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n\nclass TestMarchingCubes(TestCaseMixin, unittest.TestCase):\n def test_single_point(self):\n volume_data = torch.zeros(1, 3, 3, 3) # (B, W, H, D)\n volume_data[0, 1, 1, 1] = 1\n volume_data = volume_data.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=False)\n\n expected_verts = torch.tensor(\n [\n [0.5, 1, 1],\n [1, 1, 0.5],\n [1, 0.5, 1],\n [1, 1, 1.5],\n [1, 1.5, 1],\n [1.5, 1, 1],\n ]\n )\n expected_faces = torch.tensor(\n [\n [2, 0, 1],\n [2, 3, 0],\n [0, 4, 1],\n [3, 4, 0],\n [5, 2, 1],\n [3, 2, 5],\n [5, 1, 4],\n [3, 5, 4],\n ]\n )\n\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=True)\n expected_verts = convert_to_local(expected_verts, 3)\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n self.assertTrue(verts[0].ge(-1).all() and verts[0].le(1).all())\n\n def test_cube(self):\n volume_data = torch.zeros(1, 5, 5, 5) # (B, W, H, D)\n volume_data[0, 1, 1, 1] = 1\n volume_data[0, 1, 1, 2] = 1\n volume_data[0, 2, 1, 1] = 1\n volume_data[0, 2, 1, 2] = 1\n volume_data[0, 1, 2, 1] = 1\n volume_data[0, 1, 2, 2] = 1\n volume_data[0, 2, 2, 1] = 1\n volume_data[0, 2, 2, 2] = 1\n volume_data = volume_data.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume_data, 0.9, return_local_coords=False)\n\n expected_verts = torch.tensor(\n [\n [0.9000, 1.0000, 1.0000],\n [1.0000, 1.0000, 0.9000],\n [1.0000, 0.9000, 1.0000],\n [0.9000, 1.0000, 2.0000],\n [1.0000, 0.9000, 2.0000],\n [1.0000, 1.0000, 2.1000],\n [0.9000, 2.0000, 1.0000],\n [1.0000, 2.0000, 0.9000],\n [0.9000, 2.0000, 2.0000],\n [1.0000, 2.0000, 2.1000],\n [1.0000, 2.1000, 1.0000],\n [1.0000, 2.1000, 2.0000],\n [2.0000, 1.0000, 0.9000],\n [2.0000, 0.9000, 1.0000],\n [2.0000, 0.9000, 2.0000],\n [2.0000, 1.0000, 2.1000],\n [2.0000, 2.0000, 0.9000],\n [2.0000, 2.0000, 2.1000],\n [2.0000, 2.1000, 1.0000],\n [2.0000, 2.1000, 2.0000],\n [2.1000, 1.0000, 1.0000],\n [2.1000, 1.0000, 2.0000],\n [2.1000, 2.0000, 1.0000],\n [2.1000, 2.0000, 2.0000],\n ]\n )\n\n expected_faces = torch.tensor(\n [\n [2, 0, 1],\n [2, 4, 3],\n [0, 2, 3],\n [4, 5, 3],\n [0, 6, 7],\n [1, 0, 7],\n [3, 8, 0],\n [8, 6, 0],\n [5, 9, 8],\n [3, 5, 8],\n [6, 10, 7],\n [11, 10, 6],\n [8, 11, 6],\n [9, 11, 8],\n [13, 2, 1],\n [12, 13, 1],\n [14, 4, 13],\n [13, 4, 2],\n [4, 14, 15],\n [5, 4, 15],\n [12, 1, 16],\n [1, 7, 16],\n [15, 17, 5],\n [5, 17, 9],\n [16, 7, 10],\n [18, 16, 10],\n [19, 18, 11],\n [18, 10, 11],\n [9, 17, 19],\n [11, 9, 19],\n [20, 13, 12],\n [20, 21, 14],\n [13, 20, 14],\n [15, 14, 21],\n [22, 20, 12],\n [16, 22, 12],\n [21, 20, 23],\n [23, 20, 22],\n [17, 15, 21],\n [23, 17, 21],\n [22, 16, 18],\n [23, 22, 18],\n [19, 23, 18],\n [17, 23, 19],\n ]\n )\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n verts, faces = marching_cubes_naive(volume_data, 0.9, return_local_coords=True)\n expected_verts = convert_to_local(expected_verts, 5)\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n # Check all values are in the range [-1, 1]\n self.assertTrue(verts[0].ge(-1).all() and verts[0].le(1).all())\n\n def test_cube_no_duplicate_verts(self):\n volume_data = torch.zeros(1, 5, 5, 5) # (B, W, H, D)\n volume_data[0, 1, 1, 1] = 1\n volume_data[0, 1, 1, 2] = 1\n volume_data[0, 2, 1, 1] = 1\n volume_data[0, 2, 1, 2] = 1\n volume_data[0, 1, 2, 1] = 1\n volume_data[0, 1, 2, 2] = 1\n volume_data[0, 2, 2, 1] = 1\n volume_data[0, 2, 2, 2] = 1\n volume_data = volume_data.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume_data, 1, return_local_coords=False)\n\n expected_verts = torch.tensor(\n [\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 2.0],\n [1.0, 2.0, 1.0],\n [1.0, 2.0, 2.0],\n [2.0, 1.0, 1.0],\n [2.0, 1.0, 2.0],\n [2.0, 2.0, 1.0],\n [2.0, 2.0, 2.0],\n ]\n )\n\n expected_faces = torch.tensor(\n [\n [1, 3, 0],\n [3, 2, 0],\n [5, 1, 4],\n [4, 1, 0],\n [4, 0, 6],\n [0, 2, 6],\n [5, 7, 1],\n [1, 7, 3],\n [7, 6, 3],\n [6, 2, 3],\n [5, 4, 7],\n [7, 4, 6],\n ]\n )\n\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n verts, faces = marching_cubes_naive(volume_data, 1, return_local_coords=True)\n expected_verts = convert_to_local(expected_verts, 5)\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n # Check all values are in the range [-1, 1]\n self.assertTrue(verts[0].ge(-1).all() and verts[0].le(1).all())\n\n def test_sphere(self):\n # (B, W, H, D)\n volume = torch.Tensor(\n [\n [\n [(x - 10) ** 2 + (y - 10) ** 2 + (z - 10) ** 2 for z in range(20)]\n for y in range(20)\n ]\n for x in range(20)\n ]\n ).unsqueeze(0)\n volume = volume.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(\n volume, isolevel=64, return_local_coords=False\n )\n\n data_filename = \"test_marching_cubes_data/sphere_level64.pickle\"\n filename = os.path.join(DATA_DIR, data_filename)\n with open(filename, \"rb\") as file:\n verts_and_faces = pickle.load(file)\n expected_verts = verts_and_faces[\"verts\"].squeeze()\n expected_faces = verts_and_faces[\"faces\"].squeeze()\n\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n verts, faces = marching_cubes_naive(\n volume, isolevel=64, return_local_coords=True\n )\n\n expected_verts = convert_to_local(expected_verts, 20)\n self.assertClose(verts[0], expected_verts)\n self.assertClose(faces[0], expected_faces)\n\n # Check all values are in the range [-1, 1]\n self.assertTrue(verts[0].ge(-1).all() and verts[0].le(1).all())\n\n # Uses skimage.draw.ellipsoid\n def test_double_ellipsoid(self):\n if USE_SCIKIT:\n import numpy as np\n from skimage.draw import ellipsoid\n\n ellip_base = ellipsoid(6, 10, 16, levelset=True)\n ellip_double = np.concatenate(\n (ellip_base[:-1, ...], ellip_base[2:, ...]), axis=0\n )\n volume = torch.Tensor(ellip_double).unsqueeze(0)\n volume = volume.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume, isolevel=0.001)\n\n data_filename = \"test_marching_cubes_data/double_ellipsoid.pickle\"\n filename = os.path.join(DATA_DIR, data_filename)\n with open(filename, \"rb\") as file:\n verts_and_faces = pickle.load(file)\n expected_verts = verts_and_faces[\"verts\"]\n expected_faces = verts_and_faces[\"faces\"]\n\n self.assertClose(verts[0], expected_verts[0])\n self.assertClose(faces[0], expected_faces[0])\n\n def test_cube_surface_area(self):\n if USE_SCIKIT:\n from skimage.measure import marching_cubes_classic, mesh_surface_area\n\n volume_data = torch.zeros(1, 5, 5, 5)\n volume_data[0, 1, 1, 1] = 1\n volume_data[0, 1, 1, 2] = 1\n volume_data[0, 2, 1, 1] = 1\n volume_data[0, 2, 1, 2] = 1\n volume_data[0, 1, 2, 1] = 1\n volume_data[0, 1, 2, 2] = 1\n volume_data[0, 2, 2, 1] = 1\n volume_data[0, 2, 2, 2] = 1\n volume_data = volume_data.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume_data, return_local_coords=False)\n verts_sci, faces_sci = marching_cubes_classic(volume_data[0])\n\n surf = mesh_surface_area(verts[0], faces[0])\n surf_sci = mesh_surface_area(verts_sci, faces_sci)\n\n self.assertClose(surf, surf_sci)\n\n def test_sphere_surface_area(self):\n if USE_SCIKIT:\n from skimage.measure import marching_cubes_classic, mesh_surface_area\n\n # (B, W, H, D)\n volume = torch.Tensor(\n [\n [\n [\n (x - 10) ** 2 + (y - 10) ** 2 + (z - 10) ** 2\n for z in range(20)\n ]\n for y in range(20)\n ]\n for x in range(20)\n ]\n ).unsqueeze(0)\n volume = volume.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume, isolevel=64)\n verts_sci, faces_sci = marching_cubes_classic(volume[0], level=64)\n\n surf = mesh_surface_area(verts[0], faces[0])\n surf_sci = mesh_surface_area(verts_sci, faces_sci)\n\n self.assertClose(surf, surf_sci)\n\n def test_double_ellipsoid_surface_area(self):\n if USE_SCIKIT:\n import numpy as np\n from skimage.draw import ellipsoid\n from skimage.measure import marching_cubes_classic, mesh_surface_area\n\n ellip_base = ellipsoid(6, 10, 16, levelset=True)\n ellip_double = np.concatenate(\n (ellip_base[:-1, ...], ellip_base[2:, ...]), axis=0\n )\n volume = torch.Tensor(ellip_double).unsqueeze(0)\n volume = volume.permute(0, 3, 2, 1) # (B, D, H, W)\n verts, faces = marching_cubes_naive(volume, isolevel=0)\n verts_sci, faces_sci = marching_cubes_classic(volume[0], level=0)\n\n surf = mesh_surface_area(verts[0], faces[0])\n surf_sci = mesh_surface_area(verts_sci, faces_sci)\n\n self.assertClose(surf, surf_sci)\n\n @staticmethod\n def marching_cubes_with_init(batch_size: int, V: int):\n device = torch.device(\"cuda:0\")\n volume_data = torch.rand(\n (batch_size, V, V, V), dtype=torch.float32, device=device\n )\n torch.cuda.synchronize()\n\n def convert():\n marching_cubes_naive(volume_data, return_local_coords=False)\n torch.cuda.synchronize()\n\n return convert\n"
] | [
[
"torch.randint",
"torch.is_tensor",
"numpy.shape",
"torch.cuda.device_count",
"numpy.array"
],
[
"torch.cuda.synchronize",
"torch.ones",
"torch.Tensor",
"torch.zeros",
"torch.tensor",
"numpy.concatenate",
"torch.rand",
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nikhase/statsmodels | [
"e1822d4513f442002816bb898ca5794785f35c32",
"e1822d4513f442002816bb898ca5794785f35c32",
"e1822d4513f442002816bb898ca5794785f35c32",
"e1822d4513f442002816bb898ca5794785f35c32",
"e1822d4513f442002816bb898ca5794785f35c32",
"e1822d4513f442002816bb898ca5794785f35c32",
"e1822d4513f442002816bb898ca5794785f35c32",
"e1822d4513f442002816bb898ca5794785f35c32",
"e1822d4513f442002816bb898ca5794785f35c32",
"e1822d4513f442002816bb898ca5794785f35c32",
"e1822d4513f442002816bb898ca5794785f35c32",
"e1822d4513f442002816bb898ca5794785f35c32",
"e1822d4513f442002816bb898ca5794785f35c32",
"e1822d4513f442002816bb898ca5794785f35c32",
"e1822d4513f442002816bb898ca5794785f35c32",
"e1822d4513f442002816bb898ca5794785f35c32",
"e1822d4513f442002816bb898ca5794785f35c32",
"e1822d4513f442002816bb898ca5794785f35c32",
"e1822d4513f442002816bb898ca5794785f35c32",
"e1822d4513f442002816bb898ca5794785f35c32"
] | [
"examples/python/quantile_regression.py",
"statsmodels/tsa/base/tests/test_base.py",
"statsmodels/examples/example_functional_plots.py",
"statsmodels/examples/tsa/compare_arma.py",
"examples/python/tsa_filters.py",
"statsmodels/iolib/tests/test_pickle.py",
"statsmodels/distributions/mixture_rvs.py",
"statsmodels/examples/tsa/arma_plots.py",
"statsmodels/distributions/tests/test_edgeworth.py",
"statsmodels/sandbox/tests/test_pca.py",
"statsmodels/sandbox/examples/thirdparty/findow_0.py",
"statsmodels/duration/tests/test_phreg.py",
"statsmodels/tsa/vector_ar/tests/test_coint.py",
"statsmodels/tools/tests/test_decorators.py",
"statsmodels/tsa/statespace/tests/test_kalman.py",
"statsmodels/imputation/tests/test_bayes_mi.py",
"statsmodels/genmod/tests/gee_poisson_simulation_check.py",
"statsmodels/sandbox/contrast_old.py",
"statsmodels/iolib/tests/test_summary_old.py",
"statsmodels/examples/tsa/ex_arma_all.py"
] | [
"\n## Quantile regression\n\n# \n# This example page shows how to use ``statsmodels``' ``QuantReg`` class to replicate parts of the analysis published in \n# \n# * Koenker, Roger and Kevin F. Hallock. \"Quantile Regressioin\". Journal of Economic Perspectives, Volume 15, Number 4, Fall 2001, Pages 143–156\n# \n# We are interested in the relationship between income and expenditures on food for a sample of working class Belgian households in 1857 (the Engel data). \n# \n# ## Setup\n# \n# We first need to load some modules and to retrieve the data. Conveniently, the Engel dataset is shipped with ``statsmodels``.\n\nfrom __future__ import print_function\nimport patsy\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nimport matplotlib.pyplot as plt\nfrom statsmodels.regression.quantile_regression import QuantReg\n\ndata = sm.datasets.engel.load_pandas().data\ndata.head()\n\n\n# ## Least Absolute Deviation\n# \n# The LAD model is a special case of quantile regression where q=0.5\n\nmod = smf.quantreg('foodexp ~ income', data)\nres = mod.fit(q=.5)\nprint(res.summary())\n\n\n# ## Visualizing the results\n# \n# We estimate the quantile regression model for many quantiles between .05 and .95, and compare best fit line from each of these models to Ordinary Least Squares results. \n\n# ### Prepare data for plotting\n# \n# For convenience, we place the quantile regression results in a Pandas DataFrame, and the OLS results in a dictionary.\n\nquantiles = np.arange(.05, .96, .1)\n\n\ndef fit_model(q):\n res = mod.fit(q=q)\n return [q, res.params['Intercept'], res.params['income']] + res.conf_int().loc['income'].tolist()\n\n\nmodels = [fit_model(x) for x in quantiles]\nmodels = pd.DataFrame(models, columns=['q', 'a', 'b','lb','ub'])\n\nols = smf.ols('foodexp ~ income', data).fit()\nols_ci = ols.conf_int().loc['income'].tolist()\nols = dict(a = ols.params['Intercept'],\n b = ols.params['income'],\n lb = ols_ci[0],\n ub = ols_ci[1])\n\nprint(models)\nprint(ols)\n\n\n# ### First plot\n# \n# This plot compares best fit lines for 10 quantile regression models to the least squares fit. As Koenker and Hallock (2001) point out, we see that:\n# \n# 1. Food expenditure increases with income\n# 2. The *dispersion* of food expenditure increases with income\n# 3. The least squares estimates fit low income observations quite poorly (i.e. the OLS line passes over most low income households)\n\nx = np.arange(data.income.min(), data.income.max(), 50)\nget_y = lambda a, b: a + b * x\n\nfor i in range(models.shape[0]):\n y = get_y(models.a[i], models.b[i])\n plt.plot(x, y, linestyle='dotted', color='grey')\n \ny = get_y(ols['a'], ols['b'])\nplt.plot(x, y, color='red', label='OLS')\n\nplt.scatter(data.income, data.foodexp, alpha=.2)\nplt.xlim((240, 3000))\nplt.ylim((240, 2000))\nplt.legend()\nplt.xlabel('Income')\nplt.ylabel('Food expenditure')\nplt.show()\n\n\n# ### Second plot\n# \n# The dotted black lines form 95% point-wise confidence band around 10 quantile regression estimates (solid black line). The red lines represent OLS regression results along with their 95% confindence interval.\n# \n# In most cases, the quantile regression point estimates lie outside the OLS confidence interval, which suggests that the effect of income on food expenditure may not be constant across the distribution.\n\nfrom matplotlib import rc\nrc('text', usetex=True)\nn = models.shape[0]\np1 = plt.plot(models.q, models.b, color='black', label='Quantile Reg.')\np2 = plt.plot(models.q, models.ub, linestyle='dotted', color='black')\np3 = plt.plot(models.q, models.lb, linestyle='dotted', color='black')\np4 = plt.plot(models.q, [ols['b']] * n, color='red', label='OLS')\np5 = plt.plot(models.q, [ols['lb']] * n, linestyle='dotted', color='red')\np6 = plt.plot(models.q, [ols['ub']] * n, linestyle='dotted', color='red')\nplt.ylabel(r'\\beta_\\mbox{income}')\nplt.xlabel('Quantiles of the conditional food expenditure distribution')\nplt.legend()\nplt.show()\n",
"import numpy as np\nimport numpy.testing as npt\nimport pandas as pd\nfrom statsmodels.tsa.base.tsa_model import TimeSeriesModel\nfrom statsmodels.tools.testing import assert_equal\nfrom datetime import datetime\n\n\ndef test_pandas_nodates_index():\n\n data = [988, 819, 964]\n dates = ['a', 'b', 'c']\n s = pd.Series(data, index=dates)\n\n # TODO: Remove this, this is now valid\n # npt.assert_raises(ValueError, TimeSeriesModel, s)\n\n # Test with a non-date index that doesn't raise an exception because it\n # can be coerced into a nanosecond DatetimeIndex\n data = [988, 819, 964]\n # index=pd.date_range('1970-01-01', periods=3, freq='QS')\n index = pd.to_datetime([100, 101, 102])\n s = pd.Series(data, index=index)\n\n actual_str = (index[0].strftime('%Y-%m-%d %H:%M:%S.%f') +\n str(index[0].value))\n assert_equal(actual_str, '1970-01-01 00:00:00.000000100')\n mod = TimeSeriesModel(s)\n start, end, out_of_sample, _ = mod._get_prediction_index(0, 4)\n assert_equal(len(mod.data.predict_dates), 5)\n\n\ndef test_predict_freq():\n # test that predicted dates have same frequency\n x = np.arange(1,36.)\n\n # there's a bug in pandas up to 0.10.2 for YearBegin\n #dates = date_range(\"1972-4-1\", \"2007-4-1\", freq=\"AS-APR\")\n dates = pd.date_range(\"1972-4-30\", \"2006-4-30\", freq=\"A-APR\")\n series = pd.Series(x, index=dates)\n model = TimeSeriesModel(series)\n #npt.assert_(model.data.freq == \"AS-APR\")\n assert_equal(model._index.freqstr, \"A-APR\")\n\n start, end, out_of_sample, _ = (\n model._get_prediction_index(\"2006-4-30\", \"2016-4-30\"))\n\n predict_dates = model.data.predict_dates\n\n #expected_dates = date_range(\"2006-12-31\", \"2016-12-31\",\n # freq=\"AS-APR\")\n expected_dates = pd.date_range(\"2006-4-30\", \"2016-4-30\", freq=\"A-APR\")\n assert_equal(predict_dates, expected_dates)\n #ptesting.assert_series_equal(predict_dates, expected_dates)\n\n\ndef test_keyerror_start_date():\n x = np.arange(1,36.)\n\n # dates = date_range(\"1972-4-1\", \"2007-4-1\", freq=\"AS-APR\")\n dates = pd.date_range(\"1972-4-30\", \"2006-4-30\", freq=\"A-APR\")\n series = pd.Series(x, index=dates)\n model = TimeSeriesModel(series)\n\n npt.assert_raises(KeyError, model._get_prediction_index, \"1970-4-30\", None)\n\n\ndef test_period_index():\n # test 1285\n\n dates = pd.PeriodIndex(start=\"1/1/1990\", periods=20, freq=\"M\")\n x = np.arange(1, 21.)\n\n model = TimeSeriesModel(pd.Series(x, index=dates))\n assert_equal(model._index.freqstr, \"M\")\n model = TimeSeriesModel(pd.Series(x, index=dates))\n npt.assert_(model.data.freq == \"M\")\n\n\ndef test_pandas_dates():\n\n data = [988, 819, 964]\n dates = ['2016-01-01 12:00:00', '2016-02-01 12:00:00', '2016-03-01 12:00:00']\n\n datetime_dates = pd.to_datetime(dates)\n\n result = pd.Series(data=data, index=datetime_dates, name='price')\n df = pd.DataFrame(data={'price': data}, index=pd.DatetimeIndex(dates, freq='MS'))\n\n model = TimeSeriesModel(df['price'])\n\n assert_equal(model.data.dates, result.index)\n\n\ndef test_get_predict_start_end():\n index = pd.DatetimeIndex(start='1970-01-01', end='1990-01-01', freq='AS')\n endog = pd.Series(np.zeros(10), index[:10])\n model = TimeSeriesModel(endog)\n\n predict_starts = [1, '1971-01-01', datetime(1971, 1, 1), index[1]]\n predict_ends = [20, '1990-01-01', datetime(1990, 1, 1), index[-1]]\n\n desired = (1, 9, 11)\n for start in predict_starts:\n for end in predict_ends:\n assert_equal(model._get_prediction_index(start, end)[:3], desired)\n",
"'''Functional boxplots and rainbow plots\n\nsee docstrings for an explanation\n\n\nAuthor: Ralf Gommers\n\n'''\n\nfrom __future__ import print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\n\n#Load the El Nino dataset. Consists of 60 years worth of Pacific Ocean sea\n#surface temperature data.\n\ndata = sm.datasets.elnino.load(as_pandas=False)\n\n#Create a functional boxplot:\n\n#We see that the years 1982-83 and 1997-98 are outliers; these are\n#the years where El Nino (a climate pattern characterized by warming\n#up of the sea surface and higher air pressures) occurred with unusual\n#intensity.\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nres = sm.graphics.fboxplot(data.raw_data[:, 1:], wfactor=2.58,\n labels=data.raw_data[:, 0].astype(int),\n ax=ax)\n\nax.set_xlabel(\"Month of the year\")\nax.set_ylabel(\"Sea surface temperature (C)\")\nax.set_xticks(np.arange(13, step=3) - 1)\nax.set_xticklabels([\"\", \"Mar\", \"Jun\", \"Sep\", \"Dec\"])\nax.set_xlim([-0.2, 11.2])\n\n\n\n#Create a rainbow plot:\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nres = sm.graphics.rainbowplot(data.raw_data[:, 1:], ax=ax)\n\nax.set_xlabel(\"Month of the year\")\nax.set_ylabel(\"Sea surface temperature (C)\")\nax.set_xticks(np.arange(13, step=3) - 1)\nax.set_xticklabels([\"\", \"Mar\", \"Jun\", \"Sep\", \"Dec\"])\nax.set_xlim([-0.2, 11.2])\nplt.show()\n",
"from __future__ import print_function\nfrom time import time\nfrom statsmodels.tsa.arma_mle import Arma\nfrom statsmodels.tsa.api import ARMA\nimport numpy as np\n\nprint(\"Battle of the dueling ARMAs\")\n\ny_arma22 = np.loadtxt(r'C:\\Josef\\eclipsegworkspace\\statsmodels-josef-experimental-gsoc\\scikits\\statsmodels\\tsa\\y_arma22.txt')\n\narma1 = Arma(y_arma22)\narma2 = ARMA(y_arma22)\n\nprint(\"The actual results from gretl exact mle are\")\nparams_mle = np.array([.826990, -.333986, .0362419, -.792825])\nsigma_mle = 1.094011\nllf_mle = -1510.233\nprint(\"params: \", params_mle)\nprint(\"sigma: \", sigma_mle)\nprint(\"llf: \", llf_mle)\nprint(\"The actual results from gretl css are\")\nparams_css = np.array([.824810, -.337077, .0407222, -.789792])\nsigma_css = 1.095688\nllf_css = -1507.301\n\nresults = []\nresults += [\"gretl exact mle\", params_mle, sigma_mle, llf_mle]\nresults += [\"gretl css\", params_css, sigma_css, llf_css]\n\nt0 = time()\nprint(\"Exact MLE - Kalman filter version using l_bfgs_b\")\narma2.fit(order=(2,2), trend='nc')\nt1 = time()\nprint(\"params: \", arma2.params)\nprint(\"sigma: \", arma2.sigma2**.5)\narma2.llf = arma2.loglike(arma2._invtransparams(arma2.params))\nresults += [\"exact mle kalmanf\", arma2.params, arma2.sigma2**.5, arma2.llf]\nprint('time used:', t1-t0)\n\nt1=time()\nprint(\"CSS MLE - ARMA Class\")\narma2.fit(order=(2,2), trend='nc', method=\"css\")\nt2=time()\narma2.llf = arma2.loglike_css(arma2._invtransparams(arma2.params))\nprint(\"params: \", arma2.params)\nprint(\"sigma: \", arma2.sigma2**.5)\nresults += [\"css kalmanf\", arma2.params, arma2.sigma2**.5, arma2.llf]\nprint('time used:', t2-t1)\n\nprint(\"Arma.fit_mle results\")\n# have to set nar and nma manually\narma1.nar = 2\narma1.nma = 2\nt2=time()\nret = arma1.fit_mle()\nt3=time()\nprint(\"params, first 4, sigma, last 1 \", ret.params)\nresults += [\"Arma.fit_mle \", ret.params[:4], ret.params[-1], ret.llf]\nprint('time used:', t3-t2)\n\nprint(\"Arma.fit method = \\\"ls\\\"\")\nt3=time()\nret2 = arma1.fit(order=(2,0,2), method=\"ls\")\nt4=time()\nprint(ret2[0])\nresults += [\"Arma.fit ls\", ret2[0]]\nprint('time used:', t4-t3)\n\nprint(\"Arma.fit method = \\\"CLS\\\"\")\nt4=time()\nret3 = arma1.fit(order=(2,0,2), method=\"None\")\nt5=time()\nprint(ret3)\nresults += [\"Arma.fit other\", ret3[0]]\nprint('time used:', t5-t4)\n\nfor i in results: print(i)\n",
"\n## Time Series Filters\n\nfrom __future__ import print_function\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport statsmodels.api as sm\n\n\ndta = sm.datasets.macrodata.load_pandas().data\n\n\nindex = pd.Index(sm.tsa.datetools.dates_from_range('1959Q1', '2009Q3'))\nprint(index)\n\n\ndta.index = index\ndel dta['year']\ndel dta['quarter']\n\n\nprint(sm.datasets.macrodata.NOTE)\n\n\nprint(dta.head(10))\n\n\nfig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111)\ndta.realgdp.plot(ax=ax);\nlegend = ax.legend(loc = 'upper left');\nlegend.prop.set_size(20);\n\n\n#### Hodrick-Prescott Filter\n\n# The Hodrick-Prescott filter separates a time-series $y_t$ into a trend $\\tau_t$ and a cyclical component $\\zeta_t$\n#\n# $$y_t = \\tau_t + \\zeta_t$$\n#\n# The components are determined by minimizing the following quadratic loss function\n#\n# $$\\min_{\\\\{ \\tau_{t}\\\\} }\\sum_{t}^{T}\\zeta_{t}^{2}+\\lambda\\sum_{t=1}^{T}\\left[\\left(\\tau_{t}-\\tau_{t-1}\\right)-\\left(\\tau_{t-1}-\\tau_{t-2}\\right)\\right]^{2}$$\n\ngdp_cycle, gdp_trend = sm.tsa.filters.hpfilter(dta.realgdp)\n\n\ngdp_decomp = dta[['realgdp']]\ngdp_decomp[\"cycle\"] = gdp_cycle\ngdp_decomp[\"trend\"] = gdp_trend\n\n\nfig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111)\ngdp_decomp[[\"realgdp\", \"trend\"]][\"2000-03-31\":].plot(ax=ax, fontsize=16);\nlegend = ax.get_legend()\nlegend.prop.set_size(20);\n\n\n#### Baxter-King approximate band-pass filter: Inflation and Unemployment\n\n##### Explore the hypothesis that inflation and unemployment are counter-cyclical.\n\n# The Baxter-King filter is intended to explictly deal with the periodicty of the business cycle. By applying their band-pass filter to a series, they produce a new series that does not contain fluctuations at higher or lower than those of the business cycle. Specifically, the BK filter takes the form of a symmetric moving average\n#\n# $$y_{t}^{*}=\\sum_{k=-K}^{k=K}a_ky_{t-k}$$\n#\n# where $a_{-k}=a_k$ and $\\sum_{k=-k}^{K}a_k=0$ to eliminate any trend in the series and render it stationary if the series is I(1) or I(2).\n#\n# For completeness, the filter weights are determined as follows\n#\n# $$a_{j} = B_{j}+\\theta\\text{ for }j=0,\\pm1,\\pm2,\\dots,\\pm K$$\n#\n# $$B_{0} = \\frac{\\left(\\omega_{2}-\\omega_{1}\\right)}{\\pi}$$\n# $$B_{j} = \\frac{1}{\\pi j}\\left(\\sin\\left(\\omega_{2}j\\right)-\\sin\\left(\\omega_{1}j\\right)\\right)\\text{ for }j=0,\\pm1,\\pm2,\\dots,\\pm K$$\n#\n# where $\\theta$ is a normalizing constant such that the weights sum to zero.\n#\n# $$\\theta=\\frac{-\\sum_{j=-K^{K}b_{j}}}{2K+1}$$\n#\n# $$\\omega_{1}=\\frac{2\\pi}{P_{H}}$$\n#\n# $$\\omega_{2}=\\frac{2\\pi}{P_{L}}$$\n#\n# $P_L$ and $P_H$ are the periodicity of the low and high cut-off frequencies. Following Burns and Mitchell's work on US business cycles which suggests cycles last from 1.5 to 8 years, we use $P_L=6$ and $P_H=32$ by default.\n\nbk_cycles = sm.tsa.filters.bkfilter(dta[[\"infl\",\"unemp\"]])\n\n\n# * We lose K observations on both ends. It is suggested to use K=12 for quarterly data.\n\nfig = plt.figure(figsize=(14,10))\nax = fig.add_subplot(111)\nbk_cycles.plot(ax=ax, style=['r--', 'b-']);\n\n\n#### Christiano-Fitzgerald approximate band-pass filter: Inflation and Unemployment\n\n# The Christiano-Fitzgerald filter is a generalization of BK and can thus also be seen as weighted moving average. However, the CF filter is asymmetric about $t$ as well as using the entire series. The implementation of their filter involves the\n# calculations of the weights in\n#\n# $$y_{t}^{*}=B_{0}y_{t}+B_{1}y_{t+1}+\\dots+B_{T-1-t}y_{T-1}+\\tilde B_{T-t}y_{T}+B_{1}y_{t-1}+\\dots+B_{t-2}y_{2}+\\tilde B_{t-1}y_{1}$$\n#\n# for $t=3,4,...,T-2$, where\n#\n# $$B_{j} = \\frac{\\sin(jb)-\\sin(ja)}{\\pi j},j\\geq1$$\n#\n# $$B_{0} = \\frac{b-a}{\\pi},a=\\frac{2\\pi}{P_{u}},b=\\frac{2\\pi}{P_{L}}$$\n#\n# $\\tilde B_{T-t}$ and $\\tilde B_{t-1}$ are linear functions of the $B_{j}$'s, and the values for $t=1,2,T-1,$ and $T$ are also calculated in much the same way. $P_{U}$ and $P_{L}$ are as described above with the same interpretation.\n\n# The CF filter is appropriate for series that may follow a random walk.\n\nprint(sm.tsa.stattools.adfuller(dta['unemp'])[:3])\n\n\nprint(sm.tsa.stattools.adfuller(dta['infl'])[:3])\n\n\ncf_cycles, cf_trend = sm.tsa.filters.cffilter(dta[[\"infl\",\"unemp\"]])\nprint(cf_cycles.head(10))\n\n\nfig = plt.figure(figsize=(14,10))\nax = fig.add_subplot(111)\ncf_cycles.plot(ax=ax, style=['r--','b-']);\n\n\n# Filtering assumes *a priori* that business cycles exist. Due to this assumption, many macroeconomic models seek to create models that match the shape of impulse response functions rather than replicating properties of filtered series. See VAR notebook.\n",
"import tempfile\n\nfrom numpy.testing import assert_equal\n\nfrom statsmodels.compat.python import lrange, BytesIO\nfrom statsmodels.iolib.smpickle import save_pickle, load_pickle\n\n\ndef test_pickle():\n tmpdir = tempfile.mkdtemp(prefix='pickle')\n a = lrange(10)\n save_pickle(a, tmpdir+'/res.pkl')\n b = load_pickle(tmpdir+'/res.pkl')\n assert_equal(a, b)\n\n # cleanup, tested on Windows\n try:\n import os\n os.remove(tmpdir+'/res.pkl')\n os.rmdir(tmpdir)\n except (OSError, IOError):\n pass\n assert not os.path.exists(tmpdir)\n\n # test with file handle\n fh = BytesIO()\n save_pickle(a, fh)\n fh.seek(0, 0)\n c = load_pickle(fh)\n fh.close()\n assert_equal(a, c)\n",
"from statsmodels.compat.python import range\nimport numpy as np\n\ndef _make_index(prob,size):\n \"\"\"\n Returns a boolean index for given probabilities.\n\n Notes\n ---------\n prob = [.75,.25] means that there is a 75% chance of the first column\n being True and a 25% chance of the second column being True. The\n columns are mutually exclusive.\n \"\"\"\n rv = np.random.uniform(size=(size,1))\n cumprob = np.cumsum(prob)\n return np.logical_and(np.r_[0,cumprob[:-1]] <= rv, rv < cumprob)\n\ndef mixture_rvs(prob, size, dist, kwargs=None):\n \"\"\"\n Sample from a mixture of distributions.\n\n Parameters\n ----------\n prob : array-like\n Probability of sampling from each distribution in dist\n size : int\n The length of the returned sample.\n dist : array-like\n An iterable of distributions objects from scipy.stats.\n kwargs : tuple of dicts, optional\n A tuple of dicts. Each dict in kwargs can have keys loc, scale, and\n args to be passed to the respective distribution in dist. If not\n provided, the distribution defaults are used.\n\n Examples\n --------\n Say we want 5000 random variables from mixture of normals with two\n distributions norm(-1,.5) and norm(1,.5) and we want to sample from the\n first with probability .75 and the second with probability .25.\n\n >>> from scipy import stats\n >>> prob = [.75,.25]\n >>> Y = mixture_rvs(prob, 5000, dist=[stats.norm, stats.norm],\n ... kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))\n \"\"\"\n if len(prob) != len(dist):\n raise ValueError(\"You must provide as many probabilities as distributions\")\n if not np.allclose(np.sum(prob), 1):\n raise ValueError(\"prob does not sum to 1\")\n\n if kwargs is None:\n kwargs = ({},)*len(prob)\n\n idx = _make_index(prob,size)\n sample = np.empty(size)\n for i in range(len(prob)):\n sample_idx = idx[...,i]\n sample_size = sample_idx.sum()\n loc = kwargs[i].get('loc',0)\n scale = kwargs[i].get('scale',1)\n args = kwargs[i].get('args',())\n sample[sample_idx] = dist[i].rvs(*args, **dict(loc=loc,scale=scale,\n size=sample_size))\n return sample\n\n\nclass MixtureDistribution(object):\n '''univariate mixture distribution\n\n for simple case for now (unbound support)\n does not yet inherit from scipy.stats.distributions\n\n adding pdf to mixture_rvs, some restrictions on broadcasting\n Currently it does not hold any state, all arguments included in each method.\n '''\n\n #def __init__(self, prob, size, dist, kwargs=None):\n\n def rvs(self, prob, size, dist, kwargs=None):\n return mixture_rvs(prob, size, dist, kwargs=kwargs)\n\n\n def pdf(self, x, prob, dist, kwargs=None):\n \"\"\"\n pdf a mixture of distributions.\n\n Parameters\n ----------\n x : array-like\n Array containing locations where the PDF should be evaluated\n prob : array-like\n Probability of sampling from each distribution in dist\n dist : array-like\n An iterable of distributions objects from scipy.stats.\n kwargs : tuple of dicts, optional\n A tuple of dicts. Each dict in kwargs can have keys loc, scale, and\n args to be passed to the respective distribution in dist. If not\n provided, the distribution defaults are used.\n\n Examples\n --------\n Say we want 5000 random variables from mixture of normals with two\n distributions norm(-1,.5) and norm(1,.5) and we want to sample from the\n first with probability .75 and the second with probability .25.\n\n >>> import numpy as np\n >>> from scipy import stats\n >>> from statsmodels.distributions.mixture_rvs import MixtureDistribution\n >>> x = np.arange(-4.0, 4.0, 0.01)\n >>> prob = [.75,.25]\n >>> mixture = MixtureDistribution()\n >>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm],\n ... kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))\n \"\"\"\n if len(prob) != len(dist):\n raise ValueError(\"You must provide as many probabilities as distributions\")\n if not np.allclose(np.sum(prob), 1):\n raise ValueError(\"prob does not sum to 1\")\n\n if kwargs is None:\n kwargs = ({},)*len(prob)\n\n for i in range(len(prob)):\n loc = kwargs[i].get('loc',0)\n scale = kwargs[i].get('scale',1)\n args = kwargs[i].get('args',())\n if i == 0: #assume all broadcast the same as the first dist\n pdf_ = prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale)\n else:\n pdf_ += prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale)\n return pdf_\n\n def cdf(self, x, prob, dist, kwargs=None):\n \"\"\"\n cdf of a mixture of distributions.\n\n Parameters\n ----------\n x : array-like\n Array containing locations where the CDF should be evaluated\n prob : array-like\n Probability of sampling from each distribution in dist\n size : int\n The length of the returned sample.\n dist : array-like\n An iterable of distributions objects from scipy.stats.\n kwargs : tuple of dicts, optional\n A tuple of dicts. Each dict in kwargs can have keys loc, scale, and\n args to be passed to the respective distribution in dist. If not\n provided, the distribution defaults are used.\n\n Examples\n --------\n Say we want 5000 random variables from mixture of normals with two\n distributions norm(-1,.5) and norm(1,.5) and we want to sample from the\n first with probability .75 and the second with probability .25.\n\n >>> import numpy as np\n >>> from scipy import stats\n >>> from statsmodels.distributions.mixture_rvs import MixtureDistribution\n >>> x = np.arange(-4.0, 4.0, 0.01)\n >>> prob = [.75,.25]\n >>> mixture = MixtureDistribution()\n >>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm],\n ... kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))\n \"\"\"\n if len(prob) != len(dist):\n raise ValueError(\"You must provide as many probabilities as distributions\")\n if not np.allclose(np.sum(prob), 1):\n raise ValueError(\"prob does not sum to 1\")\n\n if kwargs is None:\n kwargs = ({},)*len(prob)\n\n for i in range(len(prob)):\n loc = kwargs[i].get('loc',0)\n scale = kwargs[i].get('scale',1)\n args = kwargs[i].get('args',())\n if i == 0: #assume all broadcast the same as the first dist\n cdf_ = prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale)\n else:\n cdf_ += prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale)\n return cdf_\n\n\ndef mv_mixture_rvs(prob, size, dist, nvars, **kwargs):\n \"\"\"\n Sample from a mixture of multivariate distributions.\n\n Parameters\n ----------\n prob : array-like\n Probability of sampling from each distribution in dist\n size : int\n The length of the returned sample.\n dist : array-like\n An iterable of distributions instances with callable method rvs.\n nvargs : int\n dimension of the multivariate distribution, could be inferred instead\n kwargs : tuple of dicts, optional\n ignored\n\n Examples\n --------\n Say we want 2000 random variables from mixture of normals with two\n multivariate normal distributions, and we want to sample from the\n first with probability .4 and the second with probability .6.\n\n import statsmodels.sandbox.distributions.mv_normal as mvd\n\n cov3 = np.array([[ 1. , 0.5 , 0.75],\n [ 0.5 , 1.5 , 0.6 ],\n [ 0.75, 0.6 , 2. ]])\n\n mu = np.array([-1, 0.0, 2.0])\n mu2 = np.array([4, 2.0, 2.0])\n mvn3 = mvd.MVNormal(mu, cov3)\n mvn32 = mvd.MVNormal(mu2, cov3/2., 4)\n rvs = mix.mv_mixture_rvs([0.4, 0.6], 2000, [mvn3, mvn32], 3)\n\n \"\"\"\n if len(prob) != len(dist):\n raise ValueError(\"You must provide as many probabilities as distributions\")\n if not np.allclose(np.sum(prob), 1):\n raise ValueError(\"prob does not sum to 1\")\n\n if kwargs is None:\n kwargs = ({},)*len(prob)\n\n idx = _make_index(prob,size)\n sample = np.empty((size, nvars))\n for i in range(len(prob)):\n sample_idx = idx[...,i]\n sample_size = sample_idx.sum()\n #loc = kwargs[i].get('loc',0)\n #scale = kwargs[i].get('scale',1)\n #args = kwargs[i].get('args',())\n # use int to avoid numpy bug with np.random.multivariate_normal\n sample[sample_idx] = dist[i].rvs(size=int(sample_size))\n return sample\n\n\n\nif __name__ == '__main__':\n\n from scipy import stats\n\n obs_dist = mixture_rvs([.25,.75], size=10000, dist=[stats.norm, stats.beta],\n kwargs=(dict(loc=-1,scale=.5),dict(loc=1,scale=1,args=(1,.5))))\n\n\n\n nobs = 10000\n mix = MixtureDistribution()\n## mrvs = mixture_rvs([1/3.,2/3.], size=nobs, dist=[stats.norm, stats.norm],\n## kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.75)))\n\n mix_kwds = (dict(loc=-1,scale=.25),dict(loc=1,scale=.75))\n mrvs = mix.rvs([1/3.,2/3.], size=nobs, dist=[stats.norm, stats.norm],\n kwargs=mix_kwds)\n\n grid = np.linspace(-4,4, 100)\n mpdf = mix.pdf(grid, [1/3.,2/3.], dist=[stats.norm, stats.norm],\n kwargs=mix_kwds)\n mcdf = mix.cdf(grid, [1/3.,2/3.], dist=[stats.norm, stats.norm],\n kwargs=mix_kwds)\n\n doplot = 1\n if doplot:\n import matplotlib.pyplot as plt\n plt.figure()\n plt.hist(mrvs, bins=50, normed=True, color='red')\n plt.title('histogram of sample and pdf')\n plt.plot(grid, mpdf, lw=2, color='black')\n\n plt.figure()\n plt.hist(mrvs, bins=50, normed=True, cumulative=True, color='red')\n plt.title('histogram of sample and pdf')\n plt.plot(grid, mcdf, lw=2, color='black')\n\n plt.show()\n",
"'''Plot acf and pacf for some ARMA(1,1)\n\n'''\n\n\nfrom __future__ import print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport statsmodels.tsa.arima_process as tsp\nfrom statsmodels.sandbox.tsa.fftarma import ArmaFft as FftArmaProcess\nimport statsmodels.tsa.stattools as tss\nfrom statsmodels.graphics.tsaplots import plotacf\n\nnp.set_printoptions(precision=2)\n\n\narcoefs = [0.9, 0., -0.5] #[0.9, 0.5, 0.1, 0., -0.5]\nmacoefs = [0.9, 0., -0.5] #[0.9, 0.5, 0.1, 0., -0.5]\nnsample = 1000\nnburnin = 1000\nsig = 1\n\nfig = plt.figure(figsize=(8, 13))\nfig.suptitle('ARMA: Autocorrelation (left) and Partial Autocorrelation (right)')\nsubplotcount = 1\nnrows = 4\nfor arcoef in arcoefs[:-1]:\n for macoef in macoefs[:-1]:\n ar = np.r_[1., -arcoef]\n ma = np.r_[1., macoef]\n\n #y = tsp.arma_generate_sample(ar,ma,nsample, sig, burnin)\n #armaprocess = FftArmaProcess(ar, ma, nsample) #TODO: make n optional\n #armaprocess.plot4()\n armaprocess = tsp.ArmaProcess(ar, ma)\n acf = armaprocess.acf(20)[:20]\n pacf = armaprocess.pacf(20)[:20]\n ax = fig.add_subplot(nrows, 2, subplotcount)\n plotacf(acf, ax=ax)\n## ax.set_title('Autocorrelation \\nar=%s, ma=%rs' % (ar, ma),\n## size='xx-small')\n ax.text(0.7, 0.6, 'ar =%s \\nma=%s' % (ar, ma),\n transform=ax.transAxes,\n horizontalalignment='left', #'right',\n size='xx-small')\n ax.set_xlim(-1,20)\n subplotcount +=1\n ax = fig.add_subplot(nrows, 2, subplotcount)\n plotacf(pacf, ax=ax)\n## ax.set_title('Partial Autocorrelation \\nar=%s, ma=%rs' % (ar, ma),\n## size='xx-small')\n ax.text(0.7, 0.6, 'ar =%s \\nma=%s' % (ar, ma),\n transform=ax.transAxes,\n horizontalalignment='left', #'right',\n size='xx-small')\n ax.set_xlim(-1,20)\n subplotcount +=1\n\naxs = fig.axes\n### turn of the 2nd column y tick labels\n##for ax in axs[1::2]:#[:,1].flat:\n## for label in ax.get_yticklabels(): label.set_visible(False)\n\n# turn off all but the bottom xtick labels\nfor ax in axs[:-2]:#[:-1,:].flat:\n for label in ax.get_xticklabels(): label.set_visible(False)\n\n\n# use a MaxNLocator on the first column y axis if you have a bunch of\n# rows to avoid bunching; example below uses at most 3 ticks\nimport matplotlib.ticker as mticker\nfor ax in axs: #[::2]:#[:,1].flat:\n ax.yaxis.set_major_locator( mticker.MaxNLocator(3 ))\n\n\n\nplt.show()\n",
"from __future__ import division, print_function, absolute_import\n\nimport warnings\n\nimport numpy as np\nfrom numpy.polynomial.hermite_e import HermiteE\nfrom numpy.testing import (assert_equal, assert_raises,\n assert_allclose)\nimport numpy.testing as npt\n\nfrom statsmodels.compat.scipy import factorial, factorial2\nfrom scipy.special import gamma\nimport scipy.stats as stats\n\nfrom statsmodels.distributions.edgeworth import (_faa_di_bruno_partitions,\n cumulant_from_moments, ExpandedNormal)\n\nclass TestFaaDiBruno(object):\n def test_neg_arg(self):\n assert_raises(ValueError, _faa_di_bruno_partitions, -1)\n assert_raises(ValueError, _faa_di_bruno_partitions, 0)\n\n def test_small_vals(self):\n for n in range(1, 5):\n for ks in _faa_di_bruno_partitions(n):\n lhs = sum(m * k for (m, k) in ks)\n assert_equal(lhs, n)\n\n\ndef _norm_moment(n):\n # moments of N(0, 1)\n return (1 - n % 2) * factorial2(n - 1)\n\ndef _norm_cumulant(n):\n # cumulants of N(0, 1)\n try:\n return {1: 0, 2: 1}[n]\n except KeyError:\n return 0\n\ndef _chi2_moment(n, df):\n # (raw) moments of \\chi^2(df)\n return (2**n) * gamma(n + df/2.) / gamma(df/2.)\n\ndef _chi2_cumulant(n, df):\n assert n > 0\n return 2**(n-1) * factorial(n - 1) * df\n\n\nclass TestCumulants(object):\n def test_badvalues(self):\n assert_raises(ValueError, cumulant_from_moments, [1, 2, 3], 0)\n assert_raises(ValueError, cumulant_from_moments, [1, 2, 3], 4)\n\n def test_norm(self):\n N = 4\n momt = [_norm_moment(j+1) for j in range(N)]\n for n in range(1, N+1):\n kappa = cumulant_from_moments(momt, n)\n assert_allclose(kappa, _norm_cumulant(n),\n atol=1e-12)\n\n def test_chi2(self):\n N = 4\n df = 8\n momt = [_chi2_moment(j+1, df) for j in range(N)]\n for n in range(1, N+1):\n kappa = cumulant_from_moments(momt, n)\n assert_allclose(kappa, _chi2_cumulant(n, df))\n\n\nclass TestExpandedNormal(object):\n def test_too_few_cumulants(self):\n assert_raises(ValueError, ExpandedNormal, [1])\n\n def test_coefficients(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RuntimeWarning)\n # 3rd order in n**(1/2)\n ne3 = ExpandedNormal([0., 1., 1.])\n assert_allclose(ne3._coef, [1., 0., 0., 1./6])\n\n # 4th order in n**(1/2)\n ne4 = ExpandedNormal([0., 1., 1., 1.])\n assert_allclose(ne4._coef, [1., 0., 0., 1./6, 1./24, 0., 1./72])\n\n # 5th order\n ne5 = ExpandedNormal([0., 1., 1., 1., 1.])\n assert_allclose(ne5._coef, [1., 0., 0., 1./6, 1./24, 1./120,\n 1./72, 1./144, 0., 1./1296])\n\n # adding trailing zeroes increases the order\n ne33 = ExpandedNormal([0., 1., 1., 0.])\n assert_allclose(ne33._coef, [1., 0., 0., 1./6, 0., 0., 1./72])\n\n def test_normal(self):\n # with two cumulants, it's just a gaussian\n ne2 = ExpandedNormal([3, 4])\n x = np.linspace(-2., 2., 100)\n assert_allclose(ne2.pdf(x), stats.norm.pdf(x, loc=3, scale=2))\n\n def test_chi2_moments(self):\n # construct the expansion for \\chi^2\n N, df = 6, 15\n cum = [_chi2_cumulant(n+1, df) for n in range(N)]\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n ne = ExpandedNormal(cum, name='edgw_chi2')\n\n # compare the moments\n assert_allclose([_chi2_moment(n, df) for n in range(N)],\n [ne.moment(n) for n in range(N)])\n\n # compare the pdf [fragile!]\n # this one is actually not a very good test: there is, strictly\n # speaking, no guarantee that the pdfs match point-by-point\n # m, s = df, np.sqrt(df)\n # x = np.linspace(m - s, m + s, 10)\n # assert_allclose(ne.pdf(x), stats.chi2.pdf(x, df),\n # atol=1e-4, rtol=1e-5)\n\n # pdf-cdf roundtrip\n check_pdf(ne, arg=(), msg='')\n\n # cdf-ppf roundtrip\n check_cdf_ppf(ne, arg=(), msg='')\n\n # cdf + sf == 1\n check_cdf_sf(ne, arg=(), msg='')\n\n # generate rvs & run a KS test\n np.random.seed(765456)\n rvs = ne.rvs(size=500)\n check_distribution_rvs(ne, args=(), alpha=0.01, rvs=rvs)\n\n def test_pdf_no_roots(self):\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", RuntimeWarning)\n ne = ExpandedNormal([0, 1])\n ne = ExpandedNormal([0, 1, 0.1, 0.1])\n\n def test_pdf_has_roots(self):\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", RuntimeWarning)\n assert_raises(RuntimeWarning, ExpandedNormal, [0, 1, 101])\n\n\n## stolen verbatim from scipy/stats/tests/test_continuous_extra.py\nDECIMAL = 8\n\ndef check_pdf(distfn, arg, msg):\n # compares pdf at median with numerical derivative of cdf\n median = distfn.ppf(0.5, *arg)\n eps = 1e-6\n pdfv = distfn.pdf(median, *arg)\n if (pdfv < 1e-4) or (pdfv > 1e4):\n # avoid checking a case where pdf is close to zero\n # or huge (singularity)\n median = median + 0.1\n pdfv = distfn.pdf(median, *arg)\n cdfdiff = (distfn.cdf(median + eps, *arg) -\n distfn.cdf(median - eps, *arg))/eps/2.0\n # replace with better diff and better test (more points),\n # actually, this works pretty well\n npt.assert_almost_equal(pdfv, cdfdiff,\n decimal=DECIMAL, err_msg=msg + ' - cdf-pdf relationship')\n\n\ndef check_cdf_ppf(distfn, arg, msg):\n values = [0.001, 0.5, 0.999]\n npt.assert_almost_equal(distfn.cdf(distfn.ppf(values, *arg), *arg),\n values, decimal=DECIMAL, err_msg=msg + ' - cdf-ppf roundtrip')\n\n\ndef check_cdf_sf(distfn, arg, msg):\n values = [0.001, 0.5, 0.999]\n npt.assert_almost_equal(distfn.cdf(values, *arg),\n 1. - distfn.sf(values, *arg),\n decimal=DECIMAL, err_msg=msg +' - sf+cdf == 1')\n\n\ndef check_distribution_rvs(distfn, args, alpha, rvs):\n ## signature changed to avoid calling a distribution by name\n # test from scipy.stats.tests\n # this version reuses existing random variables\n D,pval = stats.kstest(rvs, distfn.cdf, args=args, N=1000)\n if (pval < alpha):\n D,pval = stats.kstest(distfn.rvs, distfn.cdf, args=args, N=1000)\n npt.assert_(pval > alpha, \"D = \" + str(D) + \"; pval = \" + str(pval) +\n \"; alpha = \" + str(alpha) + \"\\nargs = \" + str(args))\n",
"'''tests for pca and arma to ar and ma representation\n\ncompared with matlab princomp, and garchar, garchma\n\nTODO:\n* convert to generators with yield to have individual tests\n* incomplete: test relationship of pca-evecs and pinv (adding constant)\n'''\n\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal\nfrom statsmodels.sandbox import tools\nfrom statsmodels.sandbox.tools import pca, pcasvd\nfrom statsmodels.tsa.arima_process import arma_impulse_response\n\nfrom .datamlw import princomp1, princomp2, princomp3, data\n\n\ndef check_pca_princomp(pcares, princomp):\n factors, evals, evecs = pcares[1:]\n #res_princomp.coef, res_princomp.factors, res_princomp.values\n msign = (evecs/princomp.coef)[0]\n assert_array_almost_equal(msign*evecs, princomp.coef, 13)\n assert_array_almost_equal(msign*factors, princomp.factors, 13)\n assert_array_almost_equal(evals, princomp.values.ravel(), 13)\n\ndef check_pca_svd(pcares, pcasvdres):\n xreduced, factors, evals, evecs = pcares\n xred_svd, factors_svd, evals_svd, evecs_svd = pcasvdres\n assert_array_almost_equal(evals_svd, evals, 14)\n msign = (evecs/evecs_svd)[0]\n assert_array_almost_equal(msign*evecs_svd, evecs, 13)\n assert_array_almost_equal(msign*factors_svd, factors, 13)\n assert_array_almost_equal(xred_svd, xreduced, 13)\n\n\nxf = data.xo/1000.\n\ndef test_pca_princomp():\n pcares = pca(xf)\n check_pca_princomp(pcares, princomp1)\n pcares = pca(xf[:20,:])\n check_pca_princomp(pcares, princomp2)\n pcares = pca(xf[:20,:]-xf[:20,:].mean(0))\n check_pca_princomp(pcares, princomp3)\n pcares = pca(xf[:20,:]-xf[:20,:].mean(0), demean=0)\n check_pca_princomp(pcares, princomp3)\n\n\ndef test_pca_svd():\n xreduced, factors, evals, evecs = pca(xf)\n factors_wconst = np.c_[factors, np.ones((factors.shape[0],1))]\n beta = np.dot(np.linalg.pinv(factors_wconst), xf)\n #np.dot(np.linalg.pinv(factors_wconst),x2/1000.).T[:,:4] - evecs\n assert_array_almost_equal(beta.T[:,:4], evecs, 14)\n\n xred_svd, factors_svd, evals_svd, evecs_svd = pcasvd(xf, keepdim=0)\n assert_array_almost_equal(evals_svd, evals, 14)\n msign = (evecs/evecs_svd)[0]\n assert_array_almost_equal(msign*evecs_svd, evecs, 13)\n assert_array_almost_equal(msign*factors_svd, factors, 12)\n assert_array_almost_equal(xred_svd, xreduced, 13)\n\n pcares = pca(xf, keepdim=2)\n pcasvdres = pcasvd(xf, keepdim=2)\n check_pca_svd(pcares, pcasvdres)\n\n#print np.dot(factors[:,:3], evecs.T[:3,:])[:5]\n\n\nif __name__ == '__main__':\n test_pca_svd()\n",
"# -*- coding: utf-8 -*-\n\"\"\"A quick look at volatility of stock returns for 2009\n\nJust an exercise to find my way around the pandas methods.\nShows the daily rate of return, the square of it (volatility) and\na 5 day moving average of the volatility.\nNo guarantee for correctness.\nAssumes no missing values.\ncolors of lines in graphs are not great\n\nuses DataFrame and WidePanel to hold data downloaded from yahoo using matplotlib.\nI haven't figured out storage, so the download happens at each run\nof the script.\n\ngetquotes is from pandas\\examples\\finance.py\n\nCreated on Sat Jan 30 16:30:18 2010\nAuthor: josef-pktd\n\"\"\"\nfrom statsmodels.compat.python import lzip\nimport numpy as np\nimport matplotlib.finance as fin\nimport matplotlib.pyplot as plt\nimport datetime as dt\n\nimport pandas as pa\n\n\ndef getquotes(symbol, start, end):\n quotes = fin.quotes_historical_yahoo(symbol, start, end)\n dates, open, close, high, low, volume = lzip(*quotes)\n\n data = {\n 'open' : open,\n 'close' : close,\n 'high' : high,\n 'low' : low,\n 'volume' : volume\n }\n\n dates = pa.Index([dt.datetime.fromordinal(int(d)) for d in dates])\n return pa.DataFrame(data, index=dates)\n\n\nstart_date = dt.datetime(2009, 1, 1)\nend_date = dt.datetime(2010, 1, 1)\n\nmysym = ['msft', 'ibm', 'goog']\nindexsym = ['gspc', 'dji']\n\n\n# download data\ndmall = {}\nfor sy in mysym:\n dmall[sy] = getquotes(sy, start_date, end_date)\n\n# combine into WidePanel\npawp = pa.WidePanel.fromDict(dmall)\nprint(pawp.values.shape)\n\n# select closing prices\npaclose = pawp.getMinorXS('close')\n\n# take log and first difference over time\npaclose_ratereturn = paclose.apply(np.log).diff()\nplt.figure()\npaclose_ratereturn.plot()\nplt.title('daily rate of return')\n\n# square the returns\npaclose_ratereturn_vol = paclose_ratereturn.apply(lambda x:np.power(x,2))\nplt.figure()\nplt.title('volatility (with 5 day moving average')\npaclose_ratereturn_vol.plot()\n\n# use convolution to get moving average\npaclose_ratereturn_vol_mov = paclose_ratereturn_vol.apply(\n lambda x:np.convolve(x,np.ones(5)/5.,'same'))\npaclose_ratereturn_vol_mov.plot()\n\n#plt.show()\n",
"import itertools\nimport os\n\nimport numpy as np\nfrom statsmodels.duration.hazard_regression import PHReg\nfrom numpy.testing import (assert_allclose,\n assert_equal, assert_)\nimport pandas as pd\nimport pytest\n\n# TODO: Include some corner cases: data sets with empty strata, strata\n# with no events, entry times after censoring times, etc.\n\n# All the R results\nfrom .results import survival_r_results\nfrom .results import survival_enet_r_results\n\n\"\"\"\nTests of PHReg against R coxph.\n\nTests include entry times and stratification.\n\nphreg_gentests.py generates the test data sets and puts them into the\nresults folder.\n\nsurvival.R runs R on all the test data sets and constructs the\nsurvival_r_results module.\n\"\"\"\n\n# Arguments passed to the PHReg fit method.\nargs = {\"method\": \"bfgs\", \"disp\": 0}\n\n\ndef get_results(n, p, ext, ties):\n if ext is None:\n coef_name = \"coef_%d_%d_%s\" % (n, p, ties)\n se_name = \"se_%d_%d_%s\" % (n, p, ties)\n time_name = \"time_%d_%d_%s\" % (n, p, ties)\n hazard_name = \"hazard_%d_%d_%s\" % (n, p, ties)\n else:\n coef_name = \"coef_%d_%d_%s_%s\" % (n, p, ext, ties)\n se_name = \"se_%d_%d_%s_%s\" % (n, p, ext, ties)\n time_name = \"time_%d_%d_%s_%s\" % (n, p, ext, ties)\n hazard_name = \"hazard_%d_%d_%s_%s\" % (n, p, ext, ties)\n coef = getattr(survival_r_results, coef_name)\n se = getattr(survival_r_results, se_name)\n time = getattr(survival_r_results, time_name)\n hazard = getattr(survival_r_results, hazard_name)\n return coef, se, time, hazard\n\nclass TestPHReg(object):\n\n # Load a data file from the results directory\n @staticmethod\n def load_file(fname):\n cur_dir = os.path.dirname(os.path.abspath(__file__))\n data = np.genfromtxt(os.path.join(cur_dir, 'results', fname),\n delimiter=\" \")\n time = data[:,0]\n status = data[:,1]\n entry = data[:,2]\n exog = data[:,3:]\n\n return time, status, entry, exog\n\n # Run a single test against R output\n @staticmethod\n def do1(fname, ties, entry_f, strata_f):\n\n # Read the test data.\n time, status, entry, exog = TestPHReg.load_file(fname)\n n = len(time)\n\n vs = fname.split(\"_\")\n n = int(vs[2])\n p = int(vs[3].split(\".\")[0])\n ties1 = ties[0:3]\n\n # Needs to match the kronecker statement in survival.R\n strata = np.kron(range(5), np.ones(n // 5))\n\n # No stratification or entry times\n mod = PHReg(time, exog, status, ties=ties)\n phrb = mod.fit(**args)\n coef_r, se_r, time_r, hazard_r = get_results(n, p, None, ties1)\n assert_allclose(phrb.params, coef_r, rtol=1e-3)\n assert_allclose(phrb.bse, se_r, rtol=1e-4)\n time_h, cumhaz, surv = phrb.baseline_cumulative_hazard[0]\n\n # Entry times but no stratification\n phrb = PHReg(time, exog, status, entry=entry,\n ties=ties).fit(**args)\n coef, se, time_r, hazard_r = get_results(n, p, \"et\", ties1)\n assert_allclose(phrb.params, coef, rtol=1e-3)\n assert_allclose(phrb.bse, se, rtol=1e-3)\n\n # Stratification but no entry times\n phrb = PHReg(time, exog, status, strata=strata,\n ties=ties).fit(**args)\n coef, se, time_r, hazard_r = get_results(n, p, \"st\", ties1)\n assert_allclose(phrb.params, coef, rtol=1e-4)\n assert_allclose(phrb.bse, se, rtol=1e-4)\n\n # Stratification and entry times\n phrb = PHReg(time, exog, status, entry=entry,\n strata=strata, ties=ties).fit(**args)\n coef, se, time_r, hazard_r = get_results(n, p, \"et_st\", ties1)\n assert_allclose(phrb.params, coef, rtol=1e-3)\n assert_allclose(phrb.bse, se, rtol=1e-4)\n\n #smoke test\n time_h, cumhaz, surv = phrb.baseline_cumulative_hazard[0]\n\n def test_missing(self):\n\n np.random.seed(34234)\n time = 50 * np.random.uniform(size=200)\n status = np.random.randint(0, 2, 200).astype(np.float64)\n exog = np.random.normal(size=(200,4))\n\n time[0:5] = np.nan\n status[5:10] = np.nan\n exog[10:15,:] = np.nan\n\n md = PHReg(time, exog, status, missing='drop')\n assert_allclose(len(md.endog), 185)\n assert_allclose(len(md.status), 185)\n assert_allclose(md.exog.shape, np.r_[185,4])\n\n def test_formula(self):\n\n np.random.seed(34234)\n time = 50 * np.random.uniform(size=200)\n status = np.random.randint(0, 2, 200).astype(np.float64)\n exog = np.random.normal(size=(200,4))\n entry = np.zeros_like(time)\n entry[0:10] = time[0:10] / 2\n\n df = pd.DataFrame({\"time\": time, \"status\": status,\n \"exog1\": exog[:, 0], \"exog2\": exog[:, 1],\n \"exog3\": exog[:, 2], \"exog4\": exog[:, 3],\n \"entry\": entry})\n\n mod1 = PHReg(time, exog, status, entry=entry)\n rslt1 = mod1.fit()\n\n # works with \"0 +\" on RHS but issues warning\n fml = \"time ~ exog1 + exog2 + exog3 + exog4\"\n mod2 = PHReg.from_formula(fml, df, status=status,\n entry=entry)\n rslt2 = mod2.fit()\n\n mod3 = PHReg.from_formula(fml, df, status=\"status\",\n entry=\"entry\")\n rslt3 = mod3.fit()\n\n assert_allclose(rslt1.params, rslt2.params)\n assert_allclose(rslt1.params, rslt3.params)\n assert_allclose(rslt1.bse, rslt2.bse)\n assert_allclose(rslt1.bse, rslt3.bse)\n\n def test_formula_cat_interactions(self):\n\n time = np.r_[1, 2, 3, 4, 5, 6, 7, 8, 9]\n status = np.r_[1, 1, 0, 0, 1, 0, 1, 1, 1]\n x1 = np.r_[1, 1, 1, 2, 2, 2, 3, 3, 3]\n x2 = np.r_[1, 2, 3, 1, 2, 3, 1, 2, 3]\n df = pd.DataFrame({\"time\": time, \"status\": status,\n \"x1\": x1, \"x2\": x2})\n\n model1 = PHReg.from_formula(\"time ~ C(x1) + C(x2) + C(x1)*C(x2)\", status=\"status\",\n data=df)\n assert_equal(model1.exog.shape, [9, 8])\n\n def test_predict_formula(self):\n\n n = 100\n np.random.seed(34234)\n time = 50 * np.random.uniform(size=n)\n status = np.random.randint(0, 2, n).astype(np.float64)\n exog = np.random.uniform(1, 2, size=(n, 2))\n\n df = pd.DataFrame({\"time\": time, \"status\": status,\n \"exog1\": exog[:, 0], \"exog2\": exog[:, 1]})\n\n # Works with \"0 +\" on RHS but issues warning\n fml = \"time ~ exog1 + np.log(exog2) + exog1*exog2\"\n model1 = PHReg.from_formula(fml, df, status=status)\n result1 = model1.fit()\n\n from patsy import dmatrix\n dfp = dmatrix(model1.data.design_info, df)\n\n pr1 = result1.predict()\n pr2 = result1.predict(exog=df)\n pr3 = model1.predict(result1.params, exog=dfp) # No standard errors\n pr4 = model1.predict(result1.params, cov_params=result1.cov_params(), exog=dfp)\n\n prl = (pr1, pr2, pr3, pr4)\n for i in range(4):\n for j in range(i):\n assert_allclose(prl[i].predicted_values, prl[j].predicted_values)\n\n prl = (pr1, pr2, pr4)\n for i in range(3):\n for j in range(i):\n assert_allclose(prl[i].standard_errors, prl[j].standard_errors)\n\n def test_formula_args(self):\n\n np.random.seed(34234)\n n = 200\n time = 50 * np.random.uniform(size=n)\n status = np.random.randint(0, 2, size=n).astype(np.float64)\n exog = np.random.normal(size=(200, 2))\n offset = np.random.uniform(size=n)\n entry = np.random.uniform(0, 1, size=n) * time\n\n df = pd.DataFrame({\"time\": time, \"status\": status, \"x1\": exog[:, 0],\n \"x2\": exog[:, 1], \"offset\": offset, \"entry\": entry})\n model1 = PHReg.from_formula(\"time ~ x1 + x2\", status=\"status\", offset=\"offset\",\n entry=\"entry\", data=df)\n result1 = model1.fit()\n model2 = PHReg.from_formula(\"time ~ x1 + x2\", status=df.status, offset=df.offset,\n entry=df.entry, data=df)\n result2 = model2.fit()\n assert_allclose(result1.params, result2.params)\n assert_allclose(result1.bse, result2.bse)\n\n def test_offset(self):\n\n np.random.seed(34234)\n time = 50 * np.random.uniform(size=200)\n status = np.random.randint(0, 2, 200).astype(np.float64)\n exog = np.random.normal(size=(200,4))\n\n for ties in \"breslow\", \"efron\":\n mod1 = PHReg(time, exog, status)\n rslt1 = mod1.fit()\n offset = exog[:,0] * rslt1.params[0]\n exog = exog[:, 1:]\n\n mod2 = PHReg(time, exog, status, offset=offset, ties=ties)\n rslt2 = mod2.fit()\n\n assert_allclose(rslt2.params, rslt1.params[1:])\n\n def test_post_estimation(self):\n # All regression tests\n np.random.seed(34234)\n time = 50 * np.random.uniform(size=200)\n status = np.random.randint(0, 2, 200).astype(np.float64)\n exog = np.random.normal(size=(200,4))\n\n mod = PHReg(time, exog, status)\n rslt = mod.fit()\n mart_resid = rslt.martingale_residuals\n assert_allclose(np.abs(mart_resid).sum(), 120.72475743348433)\n\n w_avg = rslt.weighted_covariate_averages\n assert_allclose(np.abs(w_avg[0]).sum(0),\n np.r_[7.31008415, 9.77608674,10.89515885, 13.1106801])\n\n bc_haz = rslt.baseline_cumulative_hazard\n v = [np.mean(np.abs(x)) for x in bc_haz[0]]\n w = np.r_[23.482841556421608, 0.44149255358417017,\n 0.68660114081275281]\n assert_allclose(v, w)\n\n score_resid = rslt.score_residuals\n v = np.r_[ 0.50924792, 0.4533952, 0.4876718, 0.5441128]\n w = np.abs(score_resid).mean(0)\n assert_allclose(v, w)\n\n groups = np.random.randint(0, 3, 200)\n mod = PHReg(time, exog, status)\n rslt = mod.fit(groups=groups)\n robust_cov = rslt.cov_params()\n v = [0.00513432, 0.01278423, 0.00810427, 0.00293147]\n w = np.abs(robust_cov).mean(0)\n assert_allclose(v, w, rtol=1e-6)\n\n s_resid = rslt.schoenfeld_residuals\n ii = np.flatnonzero(np.isfinite(s_resid).all(1))\n s_resid = s_resid[ii, :]\n v = np.r_[0.85154336, 0.72993748, 0.73758071, 0.78599333]\n assert_allclose(np.abs(s_resid).mean(0), v)\n\n def test_summary(self):\n # smoke test\n np.random.seed(34234)\n time = 50 * np.random.uniform(size=200)\n status = np.random.randint(0, 2, 200).astype(np.float64)\n exog = np.random.normal(size=(200,4))\n\n mod = PHReg(time, exog, status)\n rslt = mod.fit()\n smry = rslt.summary()\n\n strata = np.kron(np.arange(50), np.ones(4))\n mod = PHReg(time, exog, status, strata=strata)\n rslt = mod.fit()\n smry = rslt.summary()\n msg = \"3 strata dropped for having no events\"\n assert_(msg in str(smry))\n\n groups = np.kron(np.arange(25), np.ones(8))\n mod = PHReg(time, exog, status)\n rslt = mod.fit(groups=groups)\n smry = rslt.summary()\n\n entry = np.random.uniform(0.1, 0.8, 200) * time\n mod = PHReg(time, exog, status, entry=entry)\n rslt = mod.fit()\n smry = rslt.summary()\n msg = \"200 observations have positive entry times\"\n assert_(msg in str(smry))\n\n def test_predict(self):\n # All smoke tests. We should be able to convert the lhr and hr\n # tests into real tests against R. There are many options to\n # this function that may interact in complicated ways. Only a\n # few key combinations are tested here.\n np.random.seed(34234)\n endog = 50 * np.random.uniform(size=200)\n status = np.random.randint(0, 2, 200).astype(np.float64)\n exog = np.random.normal(size=(200,4))\n\n mod = PHReg(endog, exog, status)\n rslt = mod.fit()\n rslt.predict()\n for pred_type in 'lhr', 'hr', 'cumhaz', 'surv':\n rslt.predict(pred_type=pred_type)\n rslt.predict(endog=endog[0:10], pred_type=pred_type)\n rslt.predict(endog=endog[0:10], exog=exog[0:10,:],\n pred_type=pred_type)\n\n def test_get_distribution(self):\n # Smoke test\n np.random.seed(34234)\n n = 200\n exog = np.random.normal(size=(n, 2))\n lin_pred = exog.sum(1)\n elin_pred = np.exp(-lin_pred)\n time = -elin_pred * np.log(np.random.uniform(size=n))\n status = np.ones(n)\n status[0:20] = 0\n strata = np.kron(range(5), np.ones(n // 5))\n\n mod = PHReg(time, exog, status=status, strata=strata)\n rslt = mod.fit()\n\n dist = rslt.get_distribution()\n\n fitted_means = dist.mean()\n true_means = elin_pred\n fitted_var = dist.var()\n fitted_sd = dist.std()\n sample = dist.rvs()\n\n\n def test_fit_regularized(self):\n\n # Data set sizes\n for n,p in (50,2),(100,5):\n\n # Penalty weights\n for js,s in enumerate([0,0.1]):\n\n coef_name = \"coef_%d_%d_%d\" % (n, p, js)\n params = getattr(survival_enet_r_results, coef_name)\n\n fname = \"survival_data_%d_%d.csv\" % (n, p)\n time, status, entry, exog = self.load_file(fname)\n\n exog -= exog.mean(0)\n exog /= exog.std(0, ddof=1)\n\n model = PHReg(time, exog, status=status, ties='breslow')\n sm_result = model.fit_regularized(alpha=s)\n\n # The agreement isn't very high, the issue may be on\n # the R side. See below for further checks.\n assert_allclose(sm_result.params, params, rtol=0.3)\n\n # Smoke test for summary\n smry = sm_result.summary()\n\n # The penalized log-likelihood that we are maximizing.\n def plf(params):\n llf = model.loglike(params) / len(time)\n L1_wt = 1\n llf = llf - s * ((1 - L1_wt)*np.sum(params**2) / 2 + L1_wt*np.sum(np.abs(params)))\n return llf\n\n # Confirm that we are doing better than glmnet.\n llf_r = plf(params)\n llf_sm = plf(sm_result.params)\n assert_equal(np.sign(llf_sm - llf_r), 1)\n\n\ncur_dir = os.path.dirname(os.path.abspath(__file__))\nrdir = os.path.join(cur_dir, 'results')\nfnames = os.listdir(rdir)\nfnames = [x for x in fnames if x.startswith(\"survival\")\n and x.endswith(\".csv\")]\n\nties = (\"breslow\", \"efron\")\nentry_f = (False, True)\nstrata_f = (False, True)\n\n\[email protected]('fname,ties,entry_f,strata_f',\n list(itertools.product(fnames, ties, entry_f, strata_f)))\ndef test_r(fname, ties, entry_f, strata_f):\n TestPHReg.do1(fname, ties, entry_f, strata_f)\n",
"# -*- coding: utf-8 -*-\n\"\"\"Test Johansen's Cointegration test against jplv, Spatial Econometrics Toolbox\n\nCreated on Thu Aug 30 21:51:08 2012\nAuthor: Josef Perktold\n\n\"\"\"\nimport os\nimport warnings\n\nimport numpy as np\nfrom numpy.testing import assert_almost_equal, assert_equal\n\nfrom statsmodels.tsa.vector_ar.vecm import coint_johansen\nfrom statsmodels.tools.sm_exceptions import HypothesisTestWarning\n\ncurrent_path = os.path.dirname(os.path.abspath(__file__))\ndta_path = os.path.join(current_path, \"Matlab_results\", \"test_coint.csv\")\nwith open(dta_path, \"rb\") as fd:\n dta = np.genfromtxt(fd)\n\n\nclass CheckCointJoh(object):\n\n def test_basic(self):\n assert_equal(self.res.ind, np.arange(len(self.res.ind), dtype=int))\n assert_equal(self.res.r0t.shape, (self.nobs_r, 8))\n\n def test_table_trace(self):\n table1 = np.column_stack((self.res.lr1, self.res.cvt))\n assert_almost_equal(table1,\n self.res1_m.reshape(table1.shape, order='F'))\n\n def test_table_maxeval(self):\n table2 = np.column_stack((self.res.lr2, self.res.cvm))\n assert_almost_equal(table2,\n self.res2_m.reshape(table2.shape, order='F'))\n\n\nclass TestCointJoh12(CheckCointJoh):\n\n @classmethod\n def setup_class(cls):\n cls.res = coint_johansen(dta, 1, 2)\n cls.nobs_r = 173 - 1 - 2\n\n cls.res1_m = np.array([241.985452556075, 166.4781461662553, 110.3298006342814, 70.79801574443575, 44.90887371527634, 27.22385073668511, 11.74205493173769, 3.295435325623445, 169.0618, 133.7852, 102.4674, 75.1027, 51.6492, 32.0645, 16.1619, 2.7055, 175.1584, 139.278, 107.3429, 79.34220000000001, 55.2459, 35.0116, 18.3985, 3.8415, 187.1891, 150.0778, 116.9829, 87.7748, 62.5202, 41.0815, 23.1485, 6.6349])\n cls.res2_m = np.array([75.50730638981975, 56.14834553197396, 39.5317848898456, 25.8891420291594, 17.68502297859124, 15.48179580494741, 8.446619606114249, 3.295435325623445, 52.5858, 46.5583, 40.5244, 34.4202, 28.2398, 21.8731, 15.0006, 2.7055, 55.7302, 49.5875, 43.4183, 37.1646, 30.8151, 24.2522, 17.1481, 3.8415, 62.1741, 55.8171, 49.4095, 42.8612, 36.193, 29.2631, 21.7465, 6.6349,])\n\n evec = np.array([\n 0.01102517075074406, -0.2185481584930077, 0.04565819524210763, -0.06556394587400775,\n 0.04711496306104131, -0.1500111976629196, 0.03775327003706507, 0.03479475877437702,\n \n 0.007517888890275335, -0.2014629352546497, 0.01526001455616041, 0.0707900418057458,\n -0.002388919695513273, 0.04486516694838273, -0.02936314422571188, 0.009900554050392113,\n \n 0.02846074144367176, 0.02021385478834498, -0.04276914888645468, 0.1738024290422287,\n 0.07821155002012749, -0.1066523077111768, -0.3011042488399306, 0.04965189679477353,\n \n 0.07141291326159237, -0.01406702689857725, -0.07842109866080313, -0.04773566072362181,\n -0.04768640728128824, -0.04428737926285261, 0.4143225656833862, 0.04512787132114879,\n \n -0.06817130121837202, 0.2246249779872569, -0.009356548567565763, 0.006685350535849125,\n -0.02040894506833539, 0.008131690308487425, -0.2503209797396666, 0.01560186979508953,\n \n 0.03327070126502506, -0.263036624535624, -0.04669882107497259, 0.0146457545413255,\n 0.01408691619062709, 0.1004753600191269, -0.02239205763487946, -0.02169291468272568,\n \n 0.08782313160608619, -0.07696508791577318, 0.008925177304198475, -0.06230900392092828,\n -0.01548907461158638, 0.04574831652028973, -0.2972228156126774, 0.003469819004961912,\n \n -0.001868995544352928, 0.05993345996347871, 0.01213394328069316, 0.02096614212178651,\n -0.08624395993789938, 0.02108183181049973, -0.08470307289295617, -5.135072530480897e-005])\n cls.evec_m = evec.reshape(cls.res.evec.shape, order='F')\n\n cls.eig_m = np.array([0.3586376068088151, 0.2812806889719111, 0.2074818815675726, 0.141259991767926, 0.09880133062878599, 0.08704563854307619, 0.048471840356709, 0.01919823444066367])\n\n def test_evec(self):\n for col in range(self.evec_m.shape[1]):\n try:\n assert_almost_equal(self.res.evec[:, col],\n self.evec_m[:, col])\n except AssertionError:\n assert_almost_equal(self.res.evec[:, col],\n -self.evec_m[:, col])\n\n def test_evals(self):\n assert_almost_equal(self.res.eig, self.eig_m)\n\n\nclass TestCointJoh09(CheckCointJoh):\n\n @classmethod\n def setup_class(cls):\n cls.res = coint_johansen(dta, 0, 9)\n cls.nobs_r = 173 - 1 - 9\n #fprintf(1, '%18.16g, ', r1)\n cls.res1_m = np.array([307.6888935095814, 205.3839229398245, 129.1330243009336, 83.3101865760208, 52.51955460357912, 30.20027050520502, 13.84158157562689, 0.4117390188204866, 153.6341, 120.3673, 91.109, 65.8202, 44.4929, 27.0669, 13.4294, 2.7055, 159.529, 125.6185, 95.7542, 69.8189, 47.8545, 29.7961, 15.4943, 3.8415, 171.0905, 135.9825, 104.9637, 77.8202, 54.6815, 35.4628, 19.9349, 6.6349])\n #r2 = [res.lr2 res.cvm]\n cls.res2_m = np.array([102.3049705697569, 76.25089863889085, 45.82283772491284, 30.7906319724417, 22.31928409837409, 16.35868892957814, 13.4298425568064, 0.4117390188204866, 49.2855, 43.2947, 37.2786, 31.2379, 25.1236, 18.8928, 12.2971, 2.7055, 52.3622, 46.2299, 40.0763, 33.8777, 27.5858, 21.1314, 14.2639, 3.8415, 58.6634, 52.3069, 45.8662, 39.3693, 32.7172, 25.865, 18.52, 6.6349])\n\n\nclass TestCointJohMin18(CheckCointJoh):\n\n @classmethod\n def setup_class(cls):\n cls.res = coint_johansen(dta, -1, 8)\n cls.nobs_r = 173 - 1 - 8\n\n cls.res1_m = np.array([260.6786029744658, 162.7966072512681, 105.8253545950566, 71.16133060790817, 47.68490211260372, 28.11843682526138, 13.03968537077271, 2.25398078597622, 137.9954, 106.7351, 79.5329, 56.2839, 37.0339, 21.7781, 10.4741, 2.9762, 143.6691, 111.7797, 83.9383, 60.0627, 40.1749, 24.2761, 12.3212, 4.1296, 154.7977, 121.7375, 92.7136, 67.63670000000001, 46.5716, 29.5147, 16.364, 6.9406])\n cls.res2_m = np.array([97.88199572319769, 56.97125265621156, 34.66402398714837, 23.47642849530445, 19.56646528734234, 15.07875145448866, 10.7857045847965, 2.25398078597622, 45.893, 39.9085, 33.9271, 27.916, 21.837, 15.7175, 9.4748, 2.9762, 48.8795, 42.7679, 36.6301, 30.4428, 24.1592, 17.7961, 11.2246, 4.1296, 55.0335, 48.6606, 42.2333, 35.7359, 29.0609, 22.2519, 15.0923, 6.9406])\n\n\nclass TestCointJoh25(CheckCointJoh):\n\n @classmethod\n def setup_class(cls):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=HypothesisTestWarning)\n cls.res = coint_johansen(dta, 2, 5)\n cls.nobs_r = 173 - 1 - 5\n\n #Note: critical values not available if trend>1\n cls.res1_m = np.array([270.1887263915158, 171.6870096307863, 107.8613367358704, 70.82424032233558, 44.62551818267534, 25.74352073857572, 14.17882426926978, 4.288656185006764, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n cls.res1_m[cls.res1_m == 0] = np.nan\n cls.res2_m = np.array([98.50171676072955, 63.82567289491584, 37.03709641353485, 26.19872213966024, 18.88199744409963, 11.56469646930594, 9.890168084263012, 4.288656185006764, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n cls.res2_m[cls.res2_m == 0] = np.nan\n",
"# -*- coding: utf-8 -*-\n\nimport pytest\nfrom numpy.testing import assert_equal\n\nfrom statsmodels.tools.decorators import (\n resettable_cache, cache_readonly, cache_writable, CacheWriteWarning)\n\n\ndef test_resettable_cache():\n # This test was taken from the old __main__ section of decorators.py\n\n reset = dict(a=('b',), b=('c',))\n cache = resettable_cache(a=0, b=1, c=2, reset=reset)\n assert_equal(cache, dict(a=0, b=1, c=2))\n\n # Try resetting a\n cache['a'] = 1\n assert_equal(cache, dict(a=1, b=None, c=None))\n cache['c'] = 2\n assert_equal(cache, dict(a=1, b=None, c=2))\n cache['b'] = 0\n assert_equal(cache, dict(a=1, b=0, c=None))\n\n # Try deleting b\n del cache['a']\n assert_equal(cache, {})\n\n\ndef test_cache_readonly():\n\n class Example(object):\n def __init__(self):\n self._cache = resettable_cache()\n self.a = 0\n\n @cache_readonly\n def b(self):\n return 1\n\n @cache_writable(resetlist='d')\n def c(self):\n return 2\n\n @cache_writable(resetlist=('e', 'f'))\n def d(self):\n return self.c + 1\n\n @cache_readonly\n def e(self):\n return 4\n\n @cache_readonly\n def f(self):\n return self.e + 1\n\n ex = Example()\n\n # Try accessing/setting a readonly attribute\n assert_equal(ex.__dict__, dict(a=0, _cache={}))\n\n b = ex.b\n assert_equal(b, 1)\n assert_equal(ex.__dict__, dict(a=0, _cache=dict(b=1,)))\n # assert_equal(ex.__dict__, dict(a=0, b=1, _cache=dict(b=1)))\n\n with pytest.warns(CacheWriteWarning):\n ex.b = -1\n\n assert_equal(ex._cache, dict(b=1,))\n\n # Try accessing/resetting a cachewritable attribute\n c = ex.c\n assert_equal(c, 2)\n assert_equal(ex._cache, dict(b=1, c=2))\n d = ex.d\n assert_equal(d, 3)\n assert_equal(ex._cache, dict(b=1, c=2, d=3))\n ex.c = 0\n assert_equal(ex._cache, dict(b=1, c=0, d=None, e=None, f=None))\n d = ex.d\n assert_equal(ex._cache, dict(b=1, c=0, d=1, e=None, f=None))\n ex.d = 5\n assert_equal(ex._cache, dict(b=1, c=0, d=5, e=None, f=None))\n",
"\"\"\"\nTests for _representation and _kalman_filter modules\n\nAuthor: Chad Fulton\nLicense: Simplified-BSD\n\nReferences\n----------\n\nKim, Chang-Jin, and Charles R. Nelson. 1999.\n\"State-Space Models with Regime Switching:\nClassical and Gibbs-Sampling Approaches with Applications\".\nMIT Press Books. The MIT Press.\n\nHamilton, James D. 1994.\nTime Series Analysis.\nPrinceton, N.J.: Princeton University Press.\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\nfrom statsmodels.compat import cPickle\n\nfrom distutils.version import LooseVersion\nimport copy\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport pytest\n\nfrom scipy.linalg.blas import find_best_blas_type\nfrom scipy.linalg import solve_discrete_lyapunov\nfrom statsmodels.tsa.statespace.mlemodel import MLEModel\nfrom statsmodels.tsa.statespace.sarimax import SARIMAX\nfrom statsmodels.tsa.statespace import _representation, _kalman_filter\nfrom .results import results_kalman_filter\nfrom numpy.testing import assert_almost_equal, assert_allclose\n\nprefix_statespace_map = {\n 's': _representation.sStatespace, 'd': _representation.dStatespace,\n 'c': _representation.cStatespace, 'z': _representation.zStatespace\n}\nprefix_kalman_filter_map = {\n 's': _kalman_filter.sKalmanFilter, 'd': _kalman_filter.dKalmanFilter,\n 'c': _kalman_filter.cKalmanFilter, 'z': _kalman_filter.zKalmanFilter\n}\n\ncurrent_path = os.path.dirname(os.path.abspath(__file__))\n\n\nclass Clark1987(object):\n \"\"\"\n Clark's (1987) univariate unobserved components model of real GDP (as\n presented in Kim and Nelson, 1999)\n\n Test data produced using GAUSS code described in Kim and Nelson (1999) and\n found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm\n\n See `results.results_kalman_filter` for more information.\n \"\"\"\n @classmethod\n def setup_class(cls, dtype=float, conserve_memory=0, loglikelihood_burn=0):\n cls.true = results_kalman_filter.uc_uni\n cls.true_states = pd.DataFrame(cls.true['states'])\n\n # GDP, Quarterly, 1947.1 - 1995.3\n data = pd.DataFrame(\n cls.true['data'],\n index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),\n columns=['GDP']\n )\n data['lgdp'] = np.log(data['GDP'])\n\n # Parameters\n cls.conserve_memory = conserve_memory\n cls.loglikelihood_burn = loglikelihood_burn\n\n # Observed data\n cls.obs = np.array(data['lgdp'], ndmin=2, dtype=dtype, order=\"F\")\n\n # Measurement equation\n cls.k_endog = k_endog = 1 # dimension of observed data\n # design matrix\n cls.design = np.zeros((k_endog, 4, 1), dtype=dtype, order=\"F\")\n cls.design[:, :, 0] = [1, 1, 0, 0]\n # observation intercept\n cls.obs_intercept = np.zeros((k_endog, 1), dtype=dtype, order=\"F\")\n # observation covariance matrix\n cls.obs_cov = np.zeros((k_endog, k_endog, 1), dtype=dtype, order=\"F\")\n\n # Transition equation\n cls.k_states = k_states = 4 # dimension of state space\n # transition matrix\n cls.transition = np.zeros((k_states, k_states, 1),\n dtype=dtype, order=\"F\")\n cls.transition[([0, 0, 1, 1, 2, 3],\n [0, 3, 1, 2, 1, 3],\n [0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1]\n # state intercept\n cls.state_intercept = np.zeros((k_states, 1), dtype=dtype, order=\"F\")\n # selection matrix\n cls.selection = np.asfortranarray(np.eye(k_states)[:, :, None],\n dtype=dtype)\n # state covariance matrix\n cls.state_cov = np.zeros((k_states, k_states, 1),\n dtype=dtype, order=\"F\")\n\n # Initialization: Diffuse priors\n cls.initial_state = np.zeros((k_states,), dtype=dtype, order=\"F\")\n cls.initial_state_cov = np.asfortranarray(np.eye(k_states)*100,\n dtype=dtype)\n\n # Update matrices with given parameters\n (sigma_v, sigma_e, sigma_w, phi_1, phi_2) = np.array(\n cls.true['parameters'], dtype=dtype\n )\n cls.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]\n cls.state_cov[\n np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [\n sigma_v**2, sigma_e**2, 0, sigma_w**2\n ]\n\n # Initialization: modification\n # Due to the difference in the way Kim and Nelson (1999) and Durbin\n # and Koopman (2012) define the order of the Kalman filter routines,\n # we need to modify the initial state covariance matrix to match\n # Kim and Nelson's results, since the *Statespace models follow Durbin\n # and Koopman.\n cls.initial_state_cov = np.asfortranarray(\n np.dot(\n np.dot(cls.transition[:, :, 0], cls.initial_state_cov),\n cls.transition[:, :, 0].T\n )\n )\n\n @classmethod\n def init_filter(cls):\n # Use the appropriate Statespace model\n prefix = find_best_blas_type((cls.obs,))\n klass = prefix_statespace_map[prefix[0]]\n\n # Instantiate the statespace model\n model = klass(\n cls.obs, cls.design, cls.obs_intercept, cls.obs_cov,\n cls.transition, cls.state_intercept, cls.selection,\n cls.state_cov\n )\n model.initialize_known(cls.initial_state, cls.initial_state_cov)\n\n # Initialize the appropriate Kalman filter\n klass = prefix_kalman_filter_map[prefix[0]]\n kfilter = klass(model, conserve_memory=cls.conserve_memory,\n loglikelihood_burn=cls.loglikelihood_burn)\n\n return model, kfilter\n\n @classmethod\n def run_filter(cls):\n # Filter the data\n cls.filter()\n\n # Get results\n return {\n 'loglike': lambda burn: np.sum(cls.filter.loglikelihood[burn:]),\n 'state': np.array(cls.filter.filtered_state),\n }\n\n def test_loglike(self):\n assert_almost_equal(\n self.result['loglike'](self.true['start']), self.true['loglike'], 5\n )\n\n def test_filtered_state(self):\n assert_almost_equal(\n self.result['state'][0][self.true['start']:],\n self.true_states.iloc[:, 0], 4\n )\n assert_almost_equal(\n self.result['state'][1][self.true['start']:],\n self.true_states.iloc[:, 1], 4\n )\n assert_almost_equal(\n self.result['state'][3][self.true['start']:],\n self.true_states.iloc[:, 2], 4\n )\n\n def test_pickled_filter(self):\n pickled = cPickle.loads(cPickle.dumps(self.filter))\n # Run the filters\n self.filter()\n pickled()\n\n assert id(filter) != id(pickled)\n assert_allclose(np.array(self.filter.filtered_state),\n np.array(pickled.filtered_state))\n assert_allclose(np.array(self.filter.loglikelihood),\n np.array(pickled.loglikelihood))\n\n def test_copied_filter(self):\n copied = copy.deepcopy(self.filter)\n # Run the filters\n self.filter()\n copied()\n\n assert id(filter) != id(copied)\n assert_allclose(np.array(self.filter.filtered_state),\n np.array(copied.filtered_state))\n\n assert_allclose(np.array(self.filter.loglikelihood),\n np.array(copied.loglikelihood))\n\n\nclass TestClark1987Single(Clark1987):\n \"\"\"\n Basic single precision test for the loglikelihood and filtered states.\n \"\"\"\n @classmethod\n def setup_class(cls):\n pytest.skip('Not implemented')\n super(TestClark1987Single, cls).setup_class(\n dtype=np.float32, conserve_memory=0\n )\n cls.model, cls.filter = cls.init_filter()\n cls.result = cls.run_filter()\n\n def test_loglike(self):\n assert_allclose(\n self.result['loglike'](self.true['start']), self.true['loglike'],\n rtol=1e-3\n )\n\n def test_filtered_state(self):\n assert_allclose(\n self.result['state'][0][self.true['start']:],\n self.true_states.iloc[:, 0],\n atol=1e-2\n )\n assert_allclose(\n self.result['state'][1][self.true['start']:],\n self.true_states.iloc[:, 1],\n atol=1e-2\n )\n assert_allclose(\n self.result['state'][3][self.true['start']:],\n self.true_states.iloc[:, 2],\n atol=1e-2\n )\n\n\nclass TestClark1987Double(Clark1987):\n \"\"\"\n Basic double precision test for the loglikelihood and filtered states.\n \"\"\"\n @classmethod\n def setup_class(cls):\n super(TestClark1987Double, cls).setup_class(\n dtype=float, conserve_memory=0\n )\n cls.model, cls.filter = cls.init_filter()\n cls.result = cls.run_filter()\n\n\nclass TestClark1987SingleComplex(Clark1987):\n \"\"\"\n Basic single precision complex test for the loglikelihood and filtered\n states.\n \"\"\"\n @classmethod\n def setup_class(cls):\n pytest.skip('Not implemented')\n super(TestClark1987SingleComplex, cls).setup_class(\n dtype=np.complex64, conserve_memory=0\n )\n cls.model, cls.filter = cls.init_filter()\n cls.result = cls.run_filter()\n\n def test_loglike(self):\n assert_allclose(\n self.result['loglike'](self.true['start']), self.true['loglike'],\n rtol=1e-3\n )\n\n def test_filtered_state(self):\n assert_allclose(\n self.result['state'][0][self.true['start']:],\n self.true_states.iloc[:, 0],\n atol=1e-2\n )\n assert_allclose(\n self.result['state'][1][self.true['start']:],\n self.true_states.iloc[:, 1],\n atol=1e-2\n )\n assert_allclose(\n self.result['state'][3][self.true['start']:],\n self.true_states.iloc[:, 2],\n atol=1e-2\n )\n\n\nclass TestClark1987DoubleComplex(Clark1987):\n \"\"\"\n Basic double precision complex test for the loglikelihood and filtered\n states.\n \"\"\"\n @classmethod\n def setup_class(cls):\n super(TestClark1987DoubleComplex, cls).setup_class(\n dtype=complex, conserve_memory=0\n )\n cls.model, cls.filter = cls.init_filter()\n cls.result = cls.run_filter()\n\n\nclass TestClark1987Conserve(Clark1987):\n \"\"\"\n Memory conservation test for the loglikelihood and filtered states.\n \"\"\"\n @classmethod\n def setup_class(cls):\n super(TestClark1987Conserve, cls).setup_class(\n dtype=float, conserve_memory=0x01 | 0x02\n )\n cls.model, cls.filter = cls.init_filter()\n cls.result = cls.run_filter()\n\n\nclass Clark1987Forecast(Clark1987):\n \"\"\"\n Forecasting test for the loglikelihood and filtered states.\n \"\"\"\n @classmethod\n def setup_class(cls, dtype=float, nforecast=100, conserve_memory=0):\n super(Clark1987Forecast, cls).setup_class(\n dtype, conserve_memory\n )\n cls.nforecast = nforecast\n\n # Add missing observations to the end (to forecast)\n cls._obs = cls.obs\n cls.obs = np.array(np.r_[cls.obs[0, :], [np.nan]*nforecast],\n ndmin=2, dtype=dtype, order=\"F\")\n\n def test_filtered_state(self):\n assert_almost_equal(\n self.result['state'][0][self.true['start']:-self.nforecast],\n self.true_states.iloc[:, 0], 4\n )\n assert_almost_equal(\n self.result['state'][1][self.true['start']:-self.nforecast],\n self.true_states.iloc[:, 1], 4\n )\n assert_almost_equal(\n self.result['state'][3][self.true['start']:-self.nforecast],\n self.true_states.iloc[:, 2], 4\n )\n\n\nclass TestClark1987ForecastDouble(Clark1987Forecast):\n \"\"\"\n Basic double forecasting test for the loglikelihood and filtered states.\n \"\"\"\n @classmethod\n def setup_class(cls):\n super(TestClark1987ForecastDouble, cls).setup_class()\n cls.model, cls.filter = cls.init_filter()\n cls.result = cls.run_filter()\n\n\nclass TestClark1987ForecastDoubleComplex(Clark1987Forecast):\n \"\"\"\n Basic double complex forecasting test for the loglikelihood and filtered\n states.\n \"\"\"\n @classmethod\n def setup_class(cls):\n super(TestClark1987ForecastDoubleComplex, cls).setup_class(\n dtype=complex\n )\n cls.model, cls.filter = cls.init_filter()\n cls.result = cls.run_filter()\n\n\nclass TestClark1987ForecastConserve(Clark1987Forecast):\n \"\"\"\n Memory conservation forecasting test for the loglikelihood and filtered\n states.\n \"\"\"\n @classmethod\n def setup_class(cls):\n super(TestClark1987ForecastConserve, cls).setup_class(\n dtype=float, conserve_memory=0x01 | 0x02\n )\n cls.model, cls.filter = cls.init_filter()\n cls.result = cls.run_filter()\n\n\nclass TestClark1987ConserveAll(Clark1987):\n \"\"\"\n Memory conservation forecasting test for the loglikelihood and filtered\n states.\n \"\"\"\n @classmethod\n def setup_class(cls):\n super(TestClark1987ConserveAll, cls).setup_class(\n dtype=float, conserve_memory=0x01 | 0x02 | 0x04 | 0x08\n )\n cls.loglikelihood_burn = cls.true['start']\n cls.model, cls.filter = cls.init_filter()\n cls.result = cls.run_filter()\n\n def test_loglike(self):\n assert_almost_equal(\n self.result['loglike'](0), self.true['loglike'], 5\n )\n\n def test_filtered_state(self):\n end = self.true_states.shape[0]\n assert_almost_equal(\n self.result['state'][0][-1],\n self.true_states.iloc[end-1, 0], 4\n )\n assert_almost_equal(\n self.result['state'][1][-1],\n self.true_states.iloc[end-1, 1], 4\n )\n\n\nclass Clark1989(object):\n \"\"\"\n Clark's (1989) bivariate unobserved components model of real GDP (as\n presented in Kim and Nelson, 1999)\n\n Tests two-dimensional observation data.\n\n Test data produced using GAUSS code described in Kim and Nelson (1999) and\n found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm\n\n See `results.results_kalman_filter` for more information.\n \"\"\"\n @classmethod\n def setup_class(cls, dtype=float, conserve_memory=0, loglikelihood_burn=0):\n cls.true = results_kalman_filter.uc_bi\n cls.true_states = pd.DataFrame(cls.true['states'])\n\n # GDP and Unemployment, Quarterly, 1948.1 - 1995.3\n data = pd.DataFrame(\n cls.true['data'],\n index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),\n columns=['GDP', 'UNEMP']\n )[4:]\n data['GDP'] = np.log(data['GDP'])\n data['UNEMP'] = (data['UNEMP']/100)\n\n # Observed data\n cls.obs = np.array(data, ndmin=2, dtype=dtype, order=\"C\").T\n\n # Parameters\n cls.k_endog = k_endog = 2 # dimension of observed data\n cls.k_states = k_states = 6 # dimension of state space\n cls.conserve_memory = conserve_memory\n cls.loglikelihood_burn = loglikelihood_burn\n\n # Measurement equation\n\n # design matrix\n cls.design = np.zeros((k_endog, k_states, 1), dtype=dtype, order=\"F\")\n cls.design[:, :, 0] = [[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1]]\n # observation intercept\n cls.obs_intercept = np.zeros((k_endog, 1), dtype=dtype, order=\"F\")\n # observation covariance matrix\n cls.obs_cov = np.zeros((k_endog, k_endog, 1), dtype=dtype, order=\"F\")\n\n # Transition equation\n\n # transition matrix\n cls.transition = np.zeros((k_states, k_states, 1),\n dtype=dtype, order=\"F\")\n cls.transition[([0, 0, 1, 1, 2, 3, 4, 5],\n [0, 4, 1, 2, 1, 2, 4, 5],\n [0, 0, 0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1, 1, 1]\n # state intercept\n cls.state_intercept = np.zeros((k_states, 1), dtype=dtype, order=\"F\")\n # selection matrix\n cls.selection = np.asfortranarray(np.eye(k_states)[:, :, None],\n dtype=dtype)\n # state covariance matrix\n cls.state_cov = np.zeros((k_states, k_states, 1),\n dtype=dtype, order=\"F\")\n\n # Initialization: Diffuse priors\n cls.initial_state = np.zeros((k_states,), dtype=dtype)\n cls.initial_state_cov = np.asfortranarray(np.eye(k_states)*100,\n dtype=dtype)\n\n # Update matrices with given parameters\n (sigma_v, sigma_e, sigma_w, sigma_vl, sigma_ec,\n phi_1, phi_2, alpha_1, alpha_2, alpha_3) = np.array(\n cls.true['parameters'], dtype=dtype\n )\n cls.design[([1, 1, 1], [1, 2, 3], [0, 0, 0])] = [\n alpha_1, alpha_2, alpha_3\n ]\n cls.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]\n cls.obs_cov[1, 1, 0] = sigma_ec**2\n cls.state_cov[\n np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [\n sigma_v**2, sigma_e**2, 0, 0, sigma_w**2, sigma_vl**2\n ]\n\n # Initialization: modification\n # Due to the difference in the way Kim and Nelson (1999) and Drubin\n # and Koopman (2012) define the order of the Kalman filter routines,\n # we need to modify the initial state covariance matrix to match\n # Kim and Nelson's results, since the *Statespace models follow Durbin\n # and Koopman.\n cls.initial_state_cov = np.asfortranarray(\n np.dot(\n np.dot(cls.transition[:, :, 0], cls.initial_state_cov),\n cls.transition[:, :, 0].T\n )\n )\n\n @classmethod\n def init_filter(cls):\n # Use the appropriate Statespace model\n prefix = find_best_blas_type((cls.obs,))\n klass = prefix_statespace_map[prefix[0]]\n\n # Instantiate the statespace model\n model = klass(\n cls.obs, cls.design, cls.obs_intercept, cls.obs_cov,\n cls.transition, cls.state_intercept, cls.selection,\n cls.state_cov\n )\n model.initialize_known(cls.initial_state, cls.initial_state_cov)\n\n # Initialize the appropriate Kalman filter\n klass = prefix_kalman_filter_map[prefix[0]]\n kfilter = klass(model, conserve_memory=cls.conserve_memory,\n loglikelihood_burn=cls.loglikelihood_burn)\n\n return model, kfilter\n\n @classmethod\n def run_filter(cls):\n # Filter the data\n cls.filter()\n\n # Get results\n return {\n 'loglike': lambda burn: np.sum(cls.filter.loglikelihood[burn:]),\n 'state': np.array(cls.filter.filtered_state),\n }\n\n def test_loglike(self):\n assert_almost_equal(\n # self.result['loglike'](self.true['start']),\n self.result['loglike'](0),\n self.true['loglike'], 2\n )\n\n def test_filtered_state(self):\n assert_almost_equal(\n self.result['state'][0][self.true['start']:],\n self.true_states.iloc[:, 0], 4\n )\n assert_almost_equal(\n self.result['state'][1][self.true['start']:],\n self.true_states.iloc[:, 1], 4\n )\n assert_almost_equal(\n self.result['state'][4][self.true['start']:],\n self.true_states.iloc[:, 2], 4\n )\n assert_almost_equal(\n self.result['state'][5][self.true['start']:],\n self.true_states.iloc[:, 3], 4\n )\n\n\nclass TestClark1989(Clark1989):\n \"\"\"\n Basic double precision test for the loglikelihood and filtered\n states with two-dimensional observation vector.\n \"\"\"\n @classmethod\n def setup_class(cls):\n super(TestClark1989, cls).setup_class(dtype=float, conserve_memory=0)\n cls.model, cls.filter = cls.init_filter()\n cls.result = cls.run_filter()\n\n\nclass TestClark1989Conserve(Clark1989):\n \"\"\"\n Memory conservation test for the loglikelihood and filtered states with\n two-dimensional observation vector.\n \"\"\"\n @classmethod\n def setup_class(cls):\n super(TestClark1989Conserve, cls).setup_class(\n dtype=float, conserve_memory=0x01 | 0x02\n )\n cls.model, cls.filter = cls.init_filter()\n cls.result = cls.run_filter()\n\n\nclass Clark1989Forecast(Clark1989):\n \"\"\"\n Memory conservation test for the loglikelihood and filtered states with\n two-dimensional observation vector.\n \"\"\"\n @classmethod\n def setup_class(cls, dtype=float, nforecast=100, conserve_memory=0):\n super(Clark1989Forecast, cls).setup_class(dtype, conserve_memory)\n cls.nforecast = nforecast\n\n # Add missing observations to the end (to forecast)\n cls._obs = cls.obs\n cls.obs = np.array(\n np.c_[\n cls._obs,\n np.r_[[np.nan, np.nan]*nforecast].reshape(2, nforecast)\n ],\n ndmin=2, dtype=dtype, order=\"F\"\n )\n\n cls.model, cls.filter = cls.init_filter()\n cls.result = cls.run_filter()\n\n def test_filtered_state(self):\n assert_almost_equal(\n self.result['state'][0][self.true['start']:-self.nforecast],\n self.true_states.iloc[:, 0], 4\n )\n assert_almost_equal(\n self.result['state'][1][self.true['start']:-self.nforecast],\n self.true_states.iloc[:, 1], 4\n )\n assert_almost_equal(\n self.result['state'][4][self.true['start']:-self.nforecast],\n self.true_states.iloc[:, 2], 4\n )\n assert_almost_equal(\n self.result['state'][5][self.true['start']:-self.nforecast],\n self.true_states.iloc[:, 3], 4\n )\n\n\nclass TestClark1989ForecastDouble(Clark1989Forecast):\n \"\"\"\n Basic double forecasting test for the loglikelihood and filtered states.\n \"\"\"\n @classmethod\n def setup_class(cls):\n super(TestClark1989ForecastDouble, cls).setup_class()\n cls.model, cls.filter = cls.init_filter()\n cls.result = cls.run_filter()\n\n\nclass TestClark1989ForecastDoubleComplex(Clark1989Forecast):\n \"\"\"\n Basic double complex forecasting test for the loglikelihood and filtered\n states.\n \"\"\"\n @classmethod\n def setup_class(cls):\n super(TestClark1989ForecastDoubleComplex, cls).setup_class(\n dtype=complex\n )\n cls.model, cls.filter = cls.init_filter()\n cls.result = cls.run_filter()\n\n\nclass TestClark1989ForecastConserve(Clark1989Forecast):\n \"\"\"\n Memory conservation forecasting test for the loglikelihood and filtered\n states.\n \"\"\"\n @classmethod\n def setup_class(cls):\n super(TestClark1989ForecastConserve, cls).setup_class(\n dtype=float, conserve_memory=0x01 | 0x02\n )\n cls.model, cls.filter = cls.init_filter()\n cls.result = cls.run_filter()\n\n\nclass TestClark1989ConserveAll(Clark1989):\n \"\"\"\n Memory conservation forecasting test for the loglikelihood and filtered\n states.\n \"\"\"\n @classmethod\n def setup_class(cls):\n super(TestClark1989ConserveAll, cls).setup_class(\n dtype=float, conserve_memory=0x01 | 0x02 | 0x04 | 0x08,\n )\n # cls.loglikelihood_burn = cls.true['start']\n cls.loglikelihood_burn = 0\n cls.model, cls.filter = cls.init_filter()\n cls.result = cls.run_filter()\n\n def test_loglike(self):\n assert_almost_equal(\n self.result['loglike'](0), self.true['loglike'], 2\n )\n\n def test_filtered_state(self):\n end = self.true_states.shape[0]\n assert_almost_equal(\n self.result['state'][0][-1],\n self.true_states.iloc[end-1, 0], 4\n )\n assert_almost_equal(\n self.result['state'][1][-1],\n self.true_states.iloc[end-1, 1], 4\n )\n assert_almost_equal(\n self.result['state'][4][-1],\n self.true_states.iloc[end-1, 2], 4\n )\n assert_almost_equal(\n self.result['state'][5][-1],\n self.true_states.iloc[end-1, 3], 4\n )\n\n\ndef check_stationary_initialization_1dim(dtype=float):\n endog = np.zeros(10, dtype=dtype)\n\n # 1-dimensional example\n mod = MLEModel(endog, k_states=1, k_posdef=1)\n mod.ssm.initialize_stationary()\n intercept = np.array([2.3], dtype=dtype)\n phi = np.diag([0.9]).astype(dtype)\n sigma2 = np.diag([1.3]).astype(dtype)\n\n mod['state_intercept'] = intercept\n mod['transition'] = phi\n mod['selection'] = np.eye(1).astype(dtype)\n mod['state_cov'] = sigma2\n\n mod.ssm._initialize_filter()\n mod.ssm._initialize_state()\n\n _statespace = mod.ssm._statespace\n initial_state = np.array(_statespace.initial_state)\n initial_state_cov = np.array(_statespace.initial_state_cov)\n # precision reductions only required for float complex case\n\n # mean = intercept + phi * mean\n # intercept = (1 - phi) * mean\n # mean = intercept / (1 - phi)\n assert_allclose(initial_state, intercept / (1 - phi[0, 0]))\n desired = np.linalg.inv(np.eye(1) - phi).dot(intercept)\n assert_allclose(initial_state, desired)\n # var = phi**2 var + sigma2\n # var = sigma2 / (1 - phi**2)\n assert_allclose(initial_state_cov, sigma2 / (1 - phi**2))\n assert_allclose(initial_state_cov, solve_discrete_lyapunov(phi, sigma2))\n\n\ndef check_stationary_initialization_2dim(dtype=float):\n endog = np.zeros(10, dtype=dtype)\n # 2-dimensional example\n mod = MLEModel(endog, k_states=2, k_posdef=2)\n mod.ssm.initialize_stationary()\n intercept = np.array([2.3, -10.2], dtype=dtype)\n phi = np.array([[0.8, 0.1],\n [-0.2, 0.7]], dtype=dtype)\n sigma2 = np.array([[1.4, -0.2],\n [-0.2, 4.5]], dtype=dtype)\n\n mod['state_intercept'] = intercept\n mod['transition'] = phi\n mod['selection'] = np.eye(2).astype(dtype)\n mod['state_cov'] = sigma2\n\n mod.ssm._initialize_filter()\n mod.ssm._initialize_state()\n\n _statespace = mod.ssm._statespace\n initial_state = np.array(_statespace.initial_state)\n initial_state_cov = np.array(_statespace.initial_state_cov)\n\n desired = np.linalg.solve(np.eye(2).astype(dtype) - phi, intercept)\n assert_allclose(initial_state, desired)\n desired = solve_discrete_lyapunov(phi, sigma2)\n # precision reductions only required for single precision float / complex\n assert_allclose(initial_state_cov, desired, atol=1e-5)\n\n\ndef test_stationary_initialization():\n check_stationary_initialization_1dim(np.float32)\n check_stationary_initialization_1dim(np.float64)\n check_stationary_initialization_1dim(np.complex64)\n check_stationary_initialization_1dim(np.complex128)\n\n check_stationary_initialization_2dim(np.float32)\n check_stationary_initialization_2dim(np.float64)\n check_stationary_initialization_2dim(np.complex64)\n check_stationary_initialization_2dim(np.complex128)\n",
"import numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nfrom statsmodels.imputation.bayes_mi import BayesGaussMI, MI\nfrom numpy.testing import assert_allclose, assert_equal\n\n\ndef test_pat():\n\n x = np.asarray([[1, np.nan, 3], [np.nan, 2, np.nan], [3, np.nan, 0],\n [np.nan, 1, np.nan], [3, 2, 1]])\n bm = BayesGaussMI(x)\n assert_allclose(bm.patterns[0], np.r_[0, 2])\n assert_allclose(bm.patterns[1], np.r_[1, 3])\n\n\ndef test_2x2():\n\n # Generate correlated data with mean and variance\n np.random.seed(3434)\n x = np.random.normal(size=(1000, 2))\n r = 0.5\n x[:, 1] = r*x[:, 0] + np.sqrt(1-r**2)*x[:, 1]\n x[:, 0] *= 2\n x[:, 1] *= 3\n x[:, 0] += 1\n x[:, 1] -= 2\n\n # Introduce some missing values\n u = np.random.normal(size=x.shape[0])\n x[u > 1, 0] = np.nan\n u = np.random.normal(size=x.shape[0])\n x[u > 1, 1] = np.nan\n\n bm = BayesGaussMI(x)\n\n # Burn-in\n for k in range(500):\n bm.update()\n\n # Estimate the posterior mean\n mean = 0\n cov = 0\n dmean = 0\n dcov = 0\n for k in range(500):\n bm.update()\n mean += bm.mean\n cov += bm.cov\n dmean += bm.data.mean(0)\n dcov += np.cov(bm.data.T)\n mean /= 500\n cov /= 500\n dmean /= 500\n dcov /= 500\n\n assert_allclose(mean, np.r_[1, -2], 0.1)\n assert_allclose(dmean, np.r_[1, -2], 0.1)\n assert_allclose(cov, np.asarray([[4, 6*r], [6*r, 9]]), 0.1)\n assert_allclose(dcov, np.asarray([[4, 6*r], [6*r, 9]]), 0.1)\n\n\ndef test_MI():\n\n np.random.seed(414)\n x = np.random.normal(size=(200, 4))\n x[[1, 3, 9], 0] = np.nan\n x[[1, 4, 3], 1] = np.nan\n x[[2, 11, 21], 2] = np.nan\n x[[11, 22, 99], 3] = np.nan\n\n def model_args_fn(x):\n # Return endog, exog\n # Regress x0 on x1 and x2\n if type(x) is np.ndarray:\n return (x[:, 0], x[:, 1:])\n else:\n return (x.iloc[:, 0].values, x.iloc[:, 1:].values)\n\n for j in (0, 1):\n np.random.seed(2342)\n imp = BayesGaussMI(x.copy())\n mi = MI(imp, sm.OLS, model_args_fn, burn=0)\n r = mi.fit()\n r.summary() # smoke test\n # TODO: why does the test tolerance need to be so slack?\n # There is unexpected variation across versions on travis.\n assert_allclose(r.params, np.r_[\n -0.05347919, -0.02479701, 0.10075517], 0.25, 0)\n\n c = np.asarray([[0.00418232, 0.00029746, -0.00035057],\n [0.00029746, 0.00407264, 0.00019496],\n [-0.00035057, 0.00019496, 0.00509413]])\n assert_allclose(r.cov_params(), c, 0.3, 0)\n\n # Test with ndarray and pandas input\n x = pd.DataFrame(x)\n\n\ndef test_MI_stat():\n # Test for MI where we know statistically what should happen. The\n # analysis model is x0 ~ x1 with standard error 1/sqrt(n) for the\n # slope parameter. The nominal n is 1000, but half of the cases\n # have missing x1. Then we introduce x2 that is either\n # independent of x1, or almost perfectly correlated with x1. In\n # the first case the SE is 1/sqrt(500), in the second case the SE\n # is 1/sqrt(1000).\n\n np.random.seed(414)\n z = np.random.normal(size=(1000, 3))\n z[:, 0] += 0.5*z[:, 1]\n\n # Control the degree to which x2 proxies for x1\n exp = [1/np.sqrt(500), 1/np.sqrt(1000)]\n fmi = [0.5, 0]\n for j, r in enumerate((0, 0.9999)):\n\n x = z.copy()\n x[:, 2] = r*x[:, 1] + np.sqrt(1 - r**2)*x[:, 2]\n x[0:500, 1] = np.nan\n\n def model_args(x):\n # Return endog, exog\n # Regress x1 on x2\n return (x[:, 0], x[:, 1])\n\n np.random.seed(2342)\n imp = BayesGaussMI(x.copy())\n mi = MI(imp, sm.OLS, model_args, nrep=100, skip=10)\n r = mi.fit()\n\n # Check the SE\n d = np.abs(r.bse[0] - exp[j]) / exp[j]\n assert(d < 0.03)\n\n # Check the FMI\n d = np.abs(r.fmi[0] - fmi[j])\n assert(d < 0.05)\n\n\ndef test_mi_formula():\n\n np.random.seed(414)\n x = np.random.normal(size=(200, 4))\n x[[1, 3, 9], 0] = np.nan\n x[[1, 4, 3], 1] = np.nan\n x[[2, 11, 21], 2] = np.nan\n x[[11, 22, 99], 3] = np.nan\n df = pd.DataFrame({\"y\": x[:, 0], \"x1\": x[:, 1],\n \"x2\": x[:, 2], \"x3\": x[:, 3]})\n fml = \"y ~ 0 + x1 + x2 + x3\"\n\n def model_kwds_fn(x):\n return {\"data\": x}\n\n np.random.seed(2342)\n imp = BayesGaussMI(df.copy())\n mi = MI(imp, sm.OLS, formula=fml, burn=0,\n model_kwds_fn=model_kwds_fn)\n\n results_cb = lambda x: x\n\n r = mi.fit(results_cb=results_cb)\n r.summary() # smoke test\n # TODO: why does the test tolerance need to be so slack?\n # There is unexpected variation across versions on travis.\n assert_allclose(r.params, np.r_[\n -0.05347919, -0.02479701, 0.10075517], 0.25, 0)\n\n c = np.asarray([[0.00418232, 0.00029746, -0.00035057],\n [0.00029746, 0.00407264, 0.00019496],\n [-0.00035057, 0.00019496, 0.00509413]])\n assert_allclose(r.cov_params(), c, 0.3, 0)\n\n assert_equal(len(r.results), 20)",
"\"\"\"\nAssesment of Generalized Estimating Equations using simulation.\n\nThis script checks Poisson models.\n\nSee the generated file \"gee_poisson_simulation_check.txt\" for results.\n\"\"\"\n\nfrom __future__ import print_function\nimport numpy as np\nfrom statsmodels.genmod.families import Poisson\nfrom .gee_gaussian_simulation_check import GEE_simulator\nfrom statsmodels.genmod.generalized_estimating_equations import GEE\nfrom statsmodels.genmod.cov_struct import Exchangeable,Independence\n\n\nclass Exchangeable_simulator(GEE_simulator):\n \"\"\"\n Simulate exchangeable Poisson data.\n\n The data within a cluster are simulated as y_i = z_c + z_i. The\n z_c, and {z_i} are independent Poisson random variables with\n expected values e_c and {e_i}, respectively. In order for the\n pairwise correlation to be equal to `f` for all pairs, we need\n\n e_c / sqrt((e_c + e_i) * (e_c + e_j)) = f for all i, j.\n\n By setting all e_i = e within a cluster, these equations can be\n satisfied. We thus need\n\n e_c * (1 - f) = f * e,\n\n which can be solved (non-uniquely) for e and e_c.\n \"\"\"\n\n scale_inv = 1.\n\n def print_dparams(self, dparams_est):\n OUT.write(\"Estimated common pairwise correlation: %8.4f\\n\" %\n dparams_est[0])\n OUT.write(\"True common pairwise correlation: %8.4f\\n\" %\n self.dparams[0])\n OUT.write(\"Estimated inverse scale parameter: %8.4f\\n\" %\n dparams_est[1])\n OUT.write(\"True inverse scale parameter: %8.4f\\n\" %\n self.scale_inv)\n OUT.write(\"\\n\")\n\n\n def simulate(self):\n\n endog, exog, group, time = [], [], [], []\n\n # Get a basis for the orthogonal complement to params.\n f = np.sum(self.params**2)\n u,s,vt = np.linalg.svd(np.eye(len(self.params)) -\n np.outer(self.params, self.params) / f)\n params0 = u[:,np.flatnonzero(s > 1e-6)]\n\n for i in range(self.ngroups):\n\n gsize = np.random.randint(self.group_size_range[0],\n self.group_size_range[1])\n\n group.append([i,] * gsize)\n\n time1 = np.random.normal(size=(gsize, 2))\n time.append(time1)\n\n e_c = np.random.uniform(low=1, high=10)\n e = e_c * (1 - self.dparams[0]) / self.dparams[0]\n\n common = np.random.poisson(e_c)\n unique = np.random.poisson(e, gsize)\n endog1 = common + unique\n endog.append(endog1)\n\n lpr = np.log(e_c + e) * np.ones(gsize)\n\n # Create an exog matrix so that E[Y] = log(dot(exog1, params))\n exog1 = np.outer(lpr, self.params) / np.sum(self.params**2)\n emat = np.random.normal(size=(len(lpr), params0.shape[1]))\n exog1 += np.dot(emat, params0.T)\n\n exog.append(exog1)\n\n self.exog = np.concatenate(exog, axis=0)\n self.endog = np.concatenate(endog)\n self.time = np.concatenate(time, axis=0)\n self.group = np.concatenate(group)\n\n\nclass Overdispersed_simulator(GEE_simulator):\n \"\"\"\n Use the negative binomial distribution to check GEE estimation\n using the overdispered Poisson model with independent dependence.\n\n Simulating\n X = np.random.negative_binomial(n, p, size)\n then EX = (1 - p) * n / p\n Var(X) = (1 - p) * n / p**2\n\n These equations can be inverted as follows:\n\n p = E / V\n n = E * p / (1 - p)\n\n dparams[0] is the common correlation coefficient\n \"\"\"\n\n\n def print_dparams(self, dparams_est):\n OUT.write(\"Estimated inverse scale parameter: %8.4f\\n\" %\n dparams_est[0])\n OUT.write(\"True inverse scale parameter: %8.4f\\n\" %\n self.scale_inv)\n OUT.write(\"\\n\")\n\n\n def simulate(self):\n\n endog, exog, group, time = [], [], [], []\n\n # Get a basis for the orthogonal complement to params.\n f = np.sum(self.params**2)\n u,s,vt = np.linalg.svd(np.eye(len(self.params)) -\n np.outer(self.params, self.params) / f)\n params0 = u[:,np.flatnonzero(s > 1e-6)]\n\n for i in range(self.ngroups):\n\n gsize = np.random.randint(self.group_size_range[0],\n self.group_size_range[1])\n\n group.append([i,] * gsize)\n\n time1 = np.random.normal(size=(gsize, 2))\n time.append(time1)\n\n exog1 = np.random.normal(size=(gsize, len(self.params)))\n exog.append(exog1)\n\n E = np.exp(np.dot(exog1, self.params))\n V = E * self.scale_inv\n\n p = E / V\n n = E * p / (1 - p)\n\n endog1 = np.random.negative_binomial(n, p, gsize)\n endog.append(endog1)\n\n self.exog = np.concatenate(exog, axis=0)\n self.endog = np.concatenate(endog)\n self.time = np.concatenate(time, axis=0)\n self.group = np.concatenate(group)\n\n\n\ndef gendat_exchangeable():\n exs = Exchangeable_simulator()\n exs.params = np.r_[2., 0.2, 0.2, -0.1, -0.2]\n exs.ngroups = 200\n exs.dparams = [0.3,]\n exs.simulate()\n return exs, Exchangeable()\n\ndef gendat_overdispersed():\n exs = Overdispersed_simulator()\n exs.params = np.r_[2., 0.2, 0.2, -0.1, -0.2]\n exs.ngroups = 200\n exs.scale_inv = 2.\n exs.dparams = []\n exs.simulate()\n return exs, Independence()\n\n\nif __name__ == \"__main__\":\n\n np.set_printoptions(formatter={'all': lambda x: \"%8.3f\" % x},\n suppress=True)\n\n OUT = open(\"gee_poisson_simulation_check.txt\", \"w\")\n\n nrep = 100\n\n gendats = [gendat_exchangeable, gendat_overdispersed]\n\n lhs = np.array([[0., 1, -1, 0, 0],])\n rhs = np.r_[0.0,]\n\n # Loop over data generating models\n for gendat in gendats:\n\n pvalues = []\n params = []\n std_errors = []\n dparams = []\n\n for j in range(nrep):\n\n da, va = gendat()\n ga = Poisson()\n\n # Poisson seems to be more sensitive to starting values,\n # so we run the independence model first.\n md = GEE(da.endog, da.exog, da.group, da.time, ga,\n Independence())\n mdf = md.fit()\n\n md = GEE(da.endog, da.exog, da.group, da.time, ga, va)\n mdf = md.fit(start_params = mdf.params)\n if mdf is None or (not mdf.converged):\n print(\"Failed to converge\")\n continue\n\n scale_inv = 1. / md.estimate_scale()\n dparams.append(np.r_[va.dparams, scale_inv])\n params.append(np.asarray(mdf.params))\n std_errors.append(np.asarray(mdf.standard_errors))\n\n da,va = gendat()\n ga = Poisson()\n\n md = GEE(da.endog, da.exog, da.group, da.time, ga, va,\n constraint=(lhs, rhs))\n mdf = md.fit()\n if mdf is None or (not mdf.converged):\n print(\"Failed to converge\")\n continue\n\n score = md.score_test_results\n pvalue = score[\"p-value\"]\n pvalues.append(pvalue)\n\n dparams_mean = np.array(sum(dparams) / len(dparams))\n OUT.write(\"Results based on %d successful fits out of %d data sets.\\n\\n\"\n % (len(dparams), nrep))\n OUT.write(\"Checking dependence parameters:\\n\")\n da.print_dparams(dparams_mean)\n\n params = np.array(params)\n eparams = params.mean(0)\n sdparams = params.std(0)\n std_errors = np.array(std_errors)\n std_errors = std_errors.mean(0)\n\n OUT.write(\"Checking parameter values:\\n\")\n OUT.write(\"Observed: \")\n OUT.write(np.array_str(eparams) + \"\\n\")\n OUT.write(\"Expected: \")\n OUT.write(np.array_str(da.params) + \"\\n\")\n OUT.write(\"Absolute difference: \")\n OUT.write(np.array_str(eparams - da.params) + \"\\n\")\n OUT.write(\"Relative difference: \")\n OUT.write(np.array_str((eparams - da.params) / da.params)\n + \"\\n\")\n OUT.write(\"\\n\")\n\n OUT.write(\"Checking standard errors\\n\")\n OUT.write(\"Observed: \")\n OUT.write(np.array_str(sdparams) + \"\\n\")\n OUT.write(\"Expected: \")\n OUT.write(np.array_str(std_errors) + \"\\n\")\n OUT.write(\"Absolute difference: \")\n OUT.write(np.array_str(sdparams - std_errors) + \"\\n\")\n OUT.write(\"Relative difference: \")\n OUT.write(np.array_str((sdparams - std_errors) / std_errors)\n + \"\\n\")\n OUT.write(\"\\n\")\n\n pvalues.sort()\n OUT.write(\"Checking constrained estimation:\\n\")\n OUT.write(\"Left hand side:\\n\")\n OUT.write(np.array_str(lhs) + \"\\n\")\n OUT.write(\"Right hand side:\\n\")\n OUT.write(np.array_str(rhs) + \"\\n\")\n OUT.write(\"Observed p-values Expected Null p-values\\n\")\n for q in np.arange(0.1, 0.91, 0.1):\n OUT.write(\"%20.3f %20.3f\\n\" %\n (pvalues[int(q*len(pvalues))], q))\n\n OUT.write(\"=\" * 80 + \"\\n\\n\")\n\n OUT.close()\n",
"import copy\n\nimport numpy as np\nfrom numpy.linalg import pinv\nfrom statsmodels.sandbox import utils_old as utils\n\nclass ContrastResults(object):\n \"\"\"\n Results from looking at a particular contrast of coefficients in\n a parametric model. The class does nothing, it is a container\n for the results from T and F contrasts.\n \"\"\"\n\n def __init__(self, t=None, F=None, sd=None, effect=None, df_denom=None,\n df_num=None):\n if F is not None:\n self.F = F\n self.df_denom = df_denom\n self.df_num = df_num\n else:\n self.t = t\n self.sd = sd\n self.effect = effect\n self.df_denom = df_denom\n\n def __array__(self):\n if hasattr(self, \"F\"):\n return self.F\n else:\n return self.t\n\n def __str__(self):\n if hasattr(self, 'F'):\n return '<F contrast: F=%s, df_denom=%d, df_num=%d>' % \\\n (repr(self.F), self.df_denom, self.df_num)\n else:\n return '<T contrast: effect=%s, sd=%s, t=%s, df_denom=%d>' % \\\n (repr(self.effect), repr(self.sd), repr(self.t), self.df_denom)\n\n\nclass Contrast(object):\n \"\"\"\n This class is used to construct contrast matrices in regression models.\n They are specified by a (term, formula) pair.\n\n The term, T, is a linear combination of columns of the design\n matrix D=formula(). The matrix attribute is\n a contrast matrix C so that\n\n colspan(dot(D, C)) = colspan(dot(D, dot(pinv(D), T)))\n\n where pinv(D) is the generalized inverse of D. Further, the matrix\n\n Tnew = dot(C, D)\n\n is full rank. The rank attribute is the rank of\n\n dot(D, dot(pinv(D), T))\n\n In a regression model, the contrast tests that E(dot(Tnew, Y)) = 0\n for each column of Tnew.\n\n \"\"\"\n\n def __init__(self, term, formula, name=''):\n self.term = term\n self.formula = formula\n if name is '':\n self.name = str(term)\n else:\n self.name = name\n\n def __str__(self):\n return '<contrast:%s>' % \\\n repr({'term':str(self.term), 'formula':str(self.formula)})\n\n def compute_matrix(self, *args, **kw):\n \"\"\"\n Construct a contrast matrix C so that\n\n colspan(dot(D, C)) = colspan(dot(D, dot(pinv(D), T)))\n\n where pinv(D) is the generalized inverse of D=self.D=self.formula().\n\n If the design, self.D is already set,\n then evaldesign can be set to False.\n \"\"\"\n\n t = copy.copy(self.term)\n t.namespace = self.formula.namespace\n T = np.transpose(np.array(t(*args, **kw)))\n\n if T.ndim == 1:\n T.shape = (T.shape[0], 1)\n\n self.T = utils.clean0(T)\n\n self.D = self.formula.design(*args, **kw)\n\n self._matrix = contrastfromcols(self.T, self.D)\n try:\n self.rank = self.matrix.shape[1]\n except:\n self.rank = 1\n\n def _get_matrix(self):\n \"\"\"\n This will fail if the formula needs arguments to construct\n the design.\n \"\"\"\n if not hasattr(self, \"_matrix\"):\n self.compute_matrix()\n return self._matrix\n matrix = property(_get_matrix)\n\ndef contrastfromcols(L, D, pseudo=None):\n \"\"\"\n From an n x p design matrix D and a matrix L, tries\n to determine a p x q contrast matrix C which\n determines a contrast of full rank, i.e. the\n n x q matrix\n\n dot(transpose(C), pinv(D))\n\n is full rank.\n\n L must satisfy either L.shape[0] == n or L.shape[1] == p.\n\n If L.shape[0] == n, then L is thought of as representing\n columns in the column space of D.\n\n If L.shape[1] == p, then L is thought of as what is known\n as a contrast matrix. In this case, this function returns an estimable\n contrast corresponding to the dot(D, L.T)\n\n Note that this always produces a meaningful contrast, not always\n with the intended properties because q is always non-zero unless\n L is identically 0. That is, it produces a contrast that spans\n the column space of L (after projection onto the column space of D).\n\n \"\"\"\n\n L = np.asarray(L)\n D = np.asarray(D)\n\n n, p = D.shape\n\n if L.shape[0] != n and L.shape[1] != p:\n raise ValueError('shape of L and D mismatched')\n\n if pseudo is None:\n pseudo = pinv(D)\n\n if L.shape[0] == n:\n C = np.dot(pseudo, L).T\n else:\n C = L\n C = np.dot(pseudo, np.dot(D, C.T)).T\n\n Lp = np.dot(D, C.T)\n\n if len(Lp.shape) == 1:\n Lp.shape = (n, 1)\n\n if utils.rank(Lp) != Lp.shape[1]:\n Lp = utils.fullrank(Lp)\n C = np.dot(pseudo, Lp).T\n\n return np.squeeze(C)\n",
"from statsmodels.compat.python import zip\nimport warnings\n\ndef _est_regression_summary():\n #little luck getting this test to pass (It should?), can be used for\n #visual testing of the regression.summary table\n #fixed, might fail at minute changes\n from statsmodels.regression.tests.test_regression import TestOLS\n #from test_regression import TestOLS\n import time\n from string import Template\n t = time.localtime()\n desired = Template(\n''' Summary of Regression Results\n=======================================\n| Dependent Variable: y|\n| Model: OLS|\n| Method: Least Squares|\n| Date: $XXcurrentXdateXX|\n| Time: $XXtimeXXX|\n| # obs: 16.0|\n| Df residuals: 9.0|\n| Df model: 6.0|\n==============================================================================\n| coefficient std. error t-statistic prob. |\n------------------------------------------------------------------------------\n| x1 15.06 84.91 0.1774 0.8631 |\n| x2 -0.03582 0.03349 -1.0695 0.3127 |\n| x3 -2.020 0.4884 -4.1364 0.0025 |\n| x4 -1.033 0.2143 -4.8220 0.0009 |\n| x5 -0.05110 0.2261 -0.2261 0.8262 |\n| x6 1829. 455.5 4.0159 0.0030 |\n| const -3.482e+06 8.904e+05 -3.9108 0.0036 |\n==============================================================================\n| Models stats Residual stats |\n------------------------------------------------------------------------------\n| R-squared: 0.9955 Durbin-Watson: 2.559 |\n| Adjusted R-squared: 0.9925 Omnibus: 0.7486 |\n| F-statistic: 330.3 Prob(Omnibus): 0.6878 |\n| Prob (F-statistic): 4.984e-10 JB: 0.6841 |\n| Log likelihood: -109.6 Prob(JB): 0.7103 |\n| AIC criterion: 233.2 Skew: 0.4200 |\n| BIC criterion: 238.6 Kurtosis: 2.434 |\n------------------------------------------------------------------------------'''\n).substitute(XXcurrentXdateXX = str(time.strftime(\"%a, %d %b %Y\",t)),\n XXtimeXXX = str(time.strftime(\"%H:%M:%S\",t)))\n desired = str(desired)\n aregression = TestOLS()\n TestOLS.setup_class()\n results = aregression.res1\n # be quiet!\n original_filters = warnings.filters[:] # copy original\n warnings.simplefilter(\"ignore\")\n try:\n r_summary = str(results.summary_old())\n finally:\n warnings.filters = original_filters # restore filters\n\n## print('###')\n## print(r_summary)\n## print('###')\n## print(desired)\n## print('###')\n actual = r_summary\n import numpy as np\n actual = '\\n'.join((line.rstrip() for line in actual.split('\\n')))\n# print len(actual), len(desired)\n# print repr(actual)\n# print repr(desired)\n# counter = 0\n# for c1,c2 in zip(actual, desired):\n# if not c1==c2 and counter<20:\n# print c1,c2\n# counter += 1\n np.testing.assert_(actual == desired)\n",
"\n\nfrom __future__ import print_function\nimport numpy as np\nfrom numpy.testing import assert_almost_equal\nimport matplotlib.pyplot as plt\nimport statsmodels.sandbox.tsa.fftarma as fa\nfrom statsmodels.tsa.descriptivestats import TsaDescriptive\nfrom statsmodels.tsa.arma_mle import Arma\n\nx = fa.ArmaFft([1, -0.5], [1., 0.4], 40).generate_sample(size=200, burnin=1000)\nd = TsaDescriptive(x)\nd.plot4()\n\n#d.fit(order=(1,1))\nd.fit((1,1), trend='nc')\nprint(d.res.params)\n\nmodc = Arma(x)\nresls = modc.fit(order=(1,1))\nprint(resls[0])\nrescm = modc.fit_mle(order=(1,1), start_params=[-0.4,0.4, 1.])\nprint(rescm.params)\n\n#decimal 1 corresponds to threshold of 5% difference\nassert_almost_equal(resls[0] / d.res.params, 1, decimal=1)\nassert_almost_equal(rescm.params[:-1] / d.res.params, 1, decimal=1)\n#copied to tsa.tests\n\nplt.figure()\nplt.plot(x, 'b-o')\nplt.plot(modc.predicted(), 'r-')\nplt.figure()\nplt.plot(modc.error_estimate)\n#plt.show()\n\nfrom statsmodels.miscmodels.tmodel import TArma\n\nmodct = TArma(x)\nreslst = modc.fit(order=(1,1))\nprint(reslst[0])\nrescmt = modct.fit_mle(order=(1,1), start_params=[-0.4,0.4, 10, 1.],maxiter=500,\n maxfun=500)\nprint(rescmt.params)\n\n\nfrom statsmodels.tsa.arima_model import ARMA\nmkf = ARMA(x)\n##rkf = mkf.fit((1,1))\n##rkf.params\nrkf = mkf.fit((1,1), trend='nc')\nprint(rkf.params)\n\nfrom statsmodels.tsa.arima_process import arma_generate_sample\nnp.random.seed(12345)\ny_arma22 = arma_generate_sample([1.,-.85,.35, -0.1],[1,.25,-.7], nsample=1000)\n##arma22 = ARMA(y_arma22)\n##res22 = arma22.fit(trend = 'nc', order=(2,2))\n##print 'kf ',res22.params\n##res22css = arma22.fit(method='css',trend = 'nc', order=(2,2))\n##print 'css', res22css.params\nmod22 = Arma(y_arma22)\nresls22 = mod22.fit(order=(2,2))\nprint('ls ', resls22[0])\nresmle22 = mod22.fit_mle(order=(2,2), maxfun=2000)\nprint('mle', resmle22.params)\n\nf = mod22.forecast()\nf3 = mod22.forecast3(start=900)[-20:]\n\nprint(y_arma22[-10:])\nprint(f[-20:])\nprint(f3[-109:-90])\nplt.show()"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.scatter",
"numpy.arange",
"matplotlib.pyplot.ylim",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.rc",
"matplotlib.pyplot.ylabel"
],
[
"pandas.to_datetime",
"pandas.Series",
"pandas.PeriodIndex",
"numpy.arange",
"pandas.DatetimeIndex",
"numpy.testing.assert_raises",
"numpy.testing.assert_",
"pandas.date_range",
"numpy.zeros"
],
[
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.array",
"numpy.loadtxt"
],
[
"matplotlib.pyplot.figure"
],
[
"numpy.testing.assert_equal"
],
[
"numpy.sum",
"numpy.linspace",
"matplotlib.pyplot.title",
"numpy.cumsum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.random.uniform",
"numpy.logical_and",
"matplotlib.pyplot.hist",
"numpy.empty",
"matplotlib.pyplot.figure"
],
[
"numpy.set_printoptions",
"matplotlib.pyplot.show",
"matplotlib.ticker.MaxNLocator",
"matplotlib.pyplot.figure"
],
[
"scipy.stats.kstest",
"numpy.testing.assert_equal",
"scipy.special.gamma",
"numpy.linspace",
"numpy.random.seed",
"scipy.stats.norm.pdf",
"numpy.testing.assert_almost_equal",
"numpy.testing.assert_raises",
"numpy.testing.assert_allclose"
],
[
"numpy.linalg.pinv",
"numpy.ones",
"numpy.testing.assert_array_almost_equal"
],
[
"pandas.WidePanel.fromDict",
"matplotlib.pyplot.title",
"numpy.power",
"pandas.DataFrame",
"numpy.ones",
"matplotlib.finance.quotes_historical_yahoo",
"matplotlib.pyplot.figure"
],
[
"numpy.testing.assert_equal",
"numpy.abs",
"numpy.random.seed",
"numpy.isfinite",
"numpy.arange",
"pandas.DataFrame",
"numpy.ones",
"numpy.sign",
"numpy.random.normal",
"numpy.zeros_like",
"numpy.testing.assert_allclose",
"numpy.random.uniform",
"numpy.exp",
"numpy.sum",
"numpy.random.randint"
],
[
"numpy.testing.assert_equal",
"numpy.genfromtxt",
"numpy.testing.assert_almost_equal",
"numpy.column_stack",
"numpy.array"
],
[
"numpy.testing.assert_equal"
],
[
"numpy.diag",
"numpy.dot",
"numpy.log",
"scipy.linalg.blas.find_best_blas_type",
"pandas.date_range",
"numpy.eye",
"pandas.DataFrame",
"numpy.testing.assert_almost_equal",
"numpy.testing.assert_allclose",
"numpy.diag_indices",
"numpy.array",
"scipy.linalg.solve_discrete_lyapunov",
"numpy.sum",
"numpy.zeros"
],
[
"numpy.abs",
"numpy.random.seed",
"numpy.sqrt",
"numpy.asarray",
"pandas.DataFrame",
"numpy.random.normal",
"numpy.cov",
"numpy.testing.assert_allclose"
],
[
"numpy.dot",
"numpy.log",
"numpy.outer",
"numpy.asarray",
"numpy.arange",
"numpy.set_printoptions",
"numpy.random.negative_binomial",
"numpy.ones",
"numpy.concatenate",
"numpy.random.poisson",
"numpy.random.normal",
"numpy.array_str",
"numpy.flatnonzero",
"numpy.random.uniform",
"numpy.array",
"numpy.sum",
"numpy.random.randint"
],
[
"numpy.asarray",
"numpy.dot",
"numpy.squeeze",
"numpy.linalg.pinv"
],
[
"numpy.testing.assert_"
],
[
"numpy.random.seed",
"matplotlib.pyplot.plot",
"numpy.testing.assert_almost_equal",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.20",
"0.19"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
happog/object_rect_attention | [
"31aaf0bc0631050f82a4754caa60e92bb24e13b0"
] | [
"tools/utils.py"
] | [
"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport collections\n\nclass strLabelConverterForAttention(object):\n \"\"\"Convert between str and label.\n\n NOTE:\n Insert `EOS` to the alphabet for attention.\n\n Args:\n alphabet (str): set of the possible characters.\n ignore_case (bool, default=True): whether or not to ignore all of the case.\n \"\"\"\n\n def __init__(self, alphabet, sep):\n self._scanned_list = False\n self._out_of_list = ''\n self._ignore_case = True\n self.sep = sep\n self.alphabet = alphabet.split(sep)\n\n self.dict = {}\n for i, item in enumerate(self.alphabet):\n # NOTE: 0 is reserved for 'blank' required by wrap_ctc\n self.dict[item] = i\n\n def scan(self, text):\n # print text\n text_tmp = text\n text = []\n for i in range(len(text_tmp)):\n text_result = ''\n for j in range(len(text_tmp[i])):\n chara = text_tmp[i][j].lower() if self._ignore_case else text_tmp[i][j]\n if chara not in self.alphabet:\n if chara in self._out_of_list:\n continue\n else:\n self._out_of_list += chara\n file_out_of_list = open(\"out_of_list.txt\", \"a+\")\n file_out_of_list.write(chara + \"\\n\")\n file_out_of_list.close()\n print(\"\\\" %s \\\" is not in alphabet...\" % chara)\n continue\n else:\n text_result += chara\n text.append(text_result)\n text_result = tuple(text)\n self._scanned_list = True\n return text_result\n\n def encode(self, text, scanned=True):\n \"\"\"Support batch or single str.\n\n Args:\n text (str or list of str): texts to convert.\n\n Returns:\n torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.\n torch.IntTensor [n]: length of each text.\n \"\"\"\n self._scanned_list = scanned\n if not self._scanned_list:\n text = self.scan(text)\n\n if isinstance(text, str):\n text = [\n self.dict[char.lower() if self._ignore_case else char]\n for char in text\n ]\n length = [len(text)]\n elif isinstance(text, collections.Iterable):\n length = [len(s) for s in text]\n text = ''.join(text)\n text, _ = self.encode(text)\n return (torch.LongTensor(text), torch.LongTensor(length))\n\n def decode(self, t, length):\n \"\"\"Decode encoded texts back into strs.\n\n Args:\n torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.\n torch.IntTensor [n]: length of each text.\n\n Raises:\n AssertionError: when the texts and its length does not match.\n\n Returns:\n text (str or list of str): texts to convert.\n \"\"\"\n if length.numel() == 1:\n length = length[0]\n assert t.numel() == length, \"text with length: {} does not match declared length: {}\".format(t.numel(), length)\n return ''.join([self.alphabet[i] for i in t])\n else:\n # batch mode\n assert t.numel() == length.sum(), \"texts with length: {} does not match declared length: {}\".format(t.numel(), length.sum())\n texts = []\n index = 0\n for i in range(length.numel()):\n l = length[i]\n texts.append(\n self.decode(\n t[index:index + l], torch.LongTensor([l])))\n index += l\n return texts\n\nclass averager(object):\n \"\"\"Compute average for `torch.Variable` and `torch.Tensor`. \"\"\"\n\n def __init__(self):\n self.reset()\n\n def add(self, v):\n if isinstance(v, Variable):\n count = v.data.numel()\n v = v.data.sum()\n elif isinstance(v, torch.Tensor):\n count = v.numel()\n v = v.sum()\n\n self.n_count += count\n self.sum += v\n\n def reset(self):\n self.n_count = 0\n self.sum = 0\n\n def val(self):\n res = 0\n if self.n_count != 0:\n res = self.sum / float(self.n_count)\n return res\n\ndef loadData(v, data):\n v.data.resize_(data.size()).copy_(data)"
] | [
[
"torch.LongTensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ravali27/Keras-FasterRCNN | [
"e0847e18a85c988ae8baaba50ea40a2a7c05c855"
] | [
"keras_frcnn/simple_parser.py"
] | [
"from __future__ import division\nimport numpy as np\nfrom scipy.ndimage import interpolation\n\n\ndef resize_n(old, new_shape):\n new_f, new_t = new_shape\n old_f, old_t = old.shape\n scale_f = new_f/old_f\n scale_t = new_t/old_t\n #print(scale_f, 'scale_f-------------------')\n #print(scale_t, 'scale_t-------------------')\n new = interpolation.zoom(old, (scale_f, scale_t))\n #print(new.shape)\n return new \n\ndef get_data(input_path):\n found_bg = False\n all_imgs = {}\n\n classes_count = {}\n\n class_mapping = {}\n\n visualise = True\n\n with open(input_path,'r') as f:\n\n print('Parsing annotation files')\n\n for line in f:\n line_split = line.strip().split(',')\n (filename,x1,y1,x2,y2,class_name) = line_split\n filename = '/home/LORIEN+ravali.nalla/Txt_data/' + filename#.strip()\n #filename = filename.strip()\n if class_name not in classes_count:\n classes_count[class_name] = 1\n else:\n classes_count[class_name] += 1\n\n if class_name not in class_mapping:\n if class_name == 'bg' and found_bg == False:\n print('Found class name with special name bg. Will be treated as a background region (this is usually for hard negative mining).')\n found_bg = True\n class_mapping[class_name] = len(class_mapping)\n\n if filename not in all_imgs:\n all_imgs[filename] = {}\n #print(filename)\n img_o = np.loadtxt(filename)\n sd = 2126.5\n img = img_o/sd\n #print(img.shape, 'old')\n img = resize_n(img, (224, 224))\n img = np.stack((img, img, img), axis=2)\n (rows,cols) = img.shape[:2]\n #print(img.shape)\n all_imgs[filename]['filepath'] = filename\n all_imgs[filename]['width'] = cols\n all_imgs[filename]['height'] = rows\n all_imgs[filename]['bboxes'] = []\n set_n = filename.split('/')[5]\n #print(set_n)\n if set_n == \"Train\":\n all_imgs[filename]['imageset'] = 'train'\n elif set_n == \"Validate\":\n all_imgs[filename]['imageset'] = 'val'\n else:\n all_imgs[filename]['imageset'] = 'test'\n\n all_imgs[filename]['bboxes'].append({'class': class_name, 'x1': int(x1), 'x2': int(x2), 'y1': int(y1), 'y2': int(y2)})\n \n print('Done parsing.')\n all_data = []\n for key in all_imgs:\n all_data.append(all_imgs[key])\n\n # make sure the bg class is last in the list\n if found_bg:\n if class_mapping['bg'] != len(class_mapping) - 1:\n key_to_switch = [key for key in class_mapping.keys() if class_mapping[key] == len(class_mapping)-1][0]\n val_to_switch = class_mapping['bg']\n class_mapping['bg'] = len(class_mapping) - 1\n class_mapping[key_to_switch] = val_to_switch\n\n return all_data, classes_count, class_mapping\n\n\n"
] | [
[
"scipy.ndimage.interpolation.zoom",
"numpy.loadtxt",
"numpy.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
}
] |
zuhorski/EPL_Project | [
"2d2417652879cfbe33c44c003ad77b7222590849",
"2d2417652879cfbe33c44c003ad77b7222590849"
] | [
"PythonVirtEnv/Lib/site-packages/win32com/test/testPippo.py",
"PythonVirtEnv/Lib/site-packages/pandas/io/formats/style.py"
] | [
"import sys\nimport unittest\nimport pythoncom\nfrom win32com.client import Dispatch\nfrom win32com.client.gencache import EnsureDispatch\n\nclass PippoTester(unittest.TestCase):\n def setUp(self):\n from win32com.test.util import RegisterPythonServer\n from win32com.test import pippo_server\n RegisterPythonServer(pippo_server.__file__, \"Python.Test.Pippo\")\n # create it.\n self.object = Dispatch(\"Python.Test.Pippo\")\n\n def testLeaks(self):\n try:\n gtrc = sys.gettotalrefcount\n except AttributeError:\n print(\"Please run this with python_d for leak tests\")\n gtrc = lambda: 0\n # note creating self.object() should have consumed our \"one time\" leaks\n self.object.Method1()\n start = gtrc()\n for i in range(1000):\n object = Dispatch(\"Python.Test.Pippo\")\n object.Method1()\n object = None\n end = gtrc()\n if end-start > 5:\n self.fail(\"We lost %d references!\" % (end-start,))\n\n def testResults(self):\n rc, out1 = self.object.Method2(123, 111)\n self.assertEqual(rc, 123)\n self.assertEqual(out1, 222)\n\n def testPythonArrays(self):\n self._testArray([-3, -2, -1, 0, 1, 2, 3])\n self._testArray([-3.14, -2, -.1, 0., 1.1, 2.5, 3])\n\n def testNumpyArrays(self):\n try:\n import numpy\n except:\n print(\"Numpy test not possible because numpy module failed to import\")\n return\n self._testArray(numpy.array([-3, -2, -1, 0, 1, 2, 3]))\n self._testArray(numpy.array([-3.14, -2, -.1, 0., 1.1, 2.5, 3]))\n\n def testByteArrays(self):\n if 'bytes' in dir(__builtins__):\n # Use eval to avoid compilation error in Python 2.\n self._testArray(eval(\"b'abcdef'\"))\n self._testArray(eval(\"bytearray(b'abcdef')\"))\n\n def _testArray(self, inArray):\n outArray = self.object.Method3(inArray)\n self.assertEqual(list(outArray), list(inArray))\n\n def testLeaksGencache(self):\n try:\n gtrc = sys.gettotalrefcount\n except AttributeError:\n print(\"Please run this with python_d for leak tests\")\n gtrc = lambda: 0\n # note creating self.object() should have consumed our \"one time\" leaks\n object = EnsureDispatch(\"Python.Test.Pippo\")\n start = gtrc()\n for i in range(1000):\n object = EnsureDispatch(\"Python.Test.Pippo\")\n object.Method1()\n object = None\n end = gtrc()\n if end-start > 10:\n self.fail(\"We lost %d references!\" % (end-start,))\n\nif __name__=='__main__':\n unittest.main()\n",
"\"\"\"\nModule for applying conditional formatting to DataFrames and Series.\n\"\"\"\nfrom __future__ import annotations\n\nfrom contextlib import contextmanager\nimport copy\nfrom functools import partial\nimport operator\nfrom typing import (\n Any,\n Callable,\n Hashable,\n Sequence,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._typing import (\n Axis,\n FilePathOrBuffer,\n FrameOrSeries,\n FrameOrSeriesUnion,\n IndexLabel,\n Scalar,\n)\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.util._decorators import doc\n\nimport pandas as pd\nfrom pandas import (\n IndexSlice,\n RangeIndex,\n)\nfrom pandas.api.types import is_list_like\nfrom pandas.core import generic\nimport pandas.core.common as com\nfrom pandas.core.frame import (\n DataFrame,\n Series,\n)\nfrom pandas.core.generic import NDFrame\n\nfrom pandas.io.formats.format import save_to_buffer\n\njinja2 = import_optional_dependency(\"jinja2\", extra=\"DataFrame.style requires jinja2.\")\n\nfrom pandas.io.formats.style_render import (\n CSSProperties,\n CSSStyles,\n StylerRenderer,\n Subset,\n Tooltips,\n maybe_convert_css_to_tuples,\n non_reducing_slice,\n)\n\ntry:\n from matplotlib import colors\n import matplotlib.pyplot as plt\n\n has_mpl = True\nexcept ImportError:\n has_mpl = False\n no_mpl_message = \"{0} requires matplotlib.\"\n\n\n@contextmanager\ndef _mpl(func: Callable):\n if has_mpl:\n yield plt, colors\n else:\n raise ImportError(no_mpl_message.format(func.__name__))\n\n\nclass Styler(StylerRenderer):\n r\"\"\"\n Helps style a DataFrame or Series according to the data with HTML and CSS.\n\n Parameters\n ----------\n data : Series or DataFrame\n Data to be styled - either a Series or DataFrame.\n precision : int\n Precision to round floats to, defaults to pd.options.display.precision.\n table_styles : list-like, default None\n List of {selector: (attr, value)} dicts; see Notes.\n uuid : str, default None\n A unique identifier to avoid CSS collisions; generated automatically.\n caption : str, tuple, default None\n String caption to attach to the table. Tuple only used for LaTeX dual captions.\n table_attributes : str, default None\n Items that show up in the opening ``<table>`` tag\n in addition to automatic (by default) id.\n cell_ids : bool, default True\n If True, each cell will have an ``id`` attribute in their HTML tag.\n The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``\n where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row\n number and ``<num_col>`` is the column number.\n na_rep : str, optional\n Representation for missing values.\n If ``na_rep`` is None, no special formatting is applied.\n\n .. versionadded:: 1.0.0\n\n uuid_len : int, default 5\n If ``uuid`` is not specified, the length of the ``uuid`` to randomly generate\n expressed in hex characters, in range [0, 32].\n\n .. versionadded:: 1.2.0\n\n decimal : str, default \".\"\n Character used as decimal separator for floats, complex and integers\n\n .. versionadded:: 1.3.0\n\n thousands : str, optional, default None\n Character used as thousands separator for floats, complex and integers\n\n .. versionadded:: 1.3.0\n\n escape : str, optional\n Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``\"``\n in cell display string with HTML-safe sequences.\n Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``,\n ``{``, ``}``, ``~``, ``^``, and ``\\`` in the cell display string with\n LaTeX-safe sequences.\n\n .. versionadded:: 1.3.0\n\n Attributes\n ----------\n env : Jinja2 jinja2.Environment\n template : Jinja2 Template\n loader : Jinja2 Loader\n\n See Also\n --------\n DataFrame.style : Return a Styler object containing methods for building\n a styled HTML representation for the DataFrame.\n\n Notes\n -----\n Most styling will be done by passing style functions into\n ``Styler.apply`` or ``Styler.applymap``. Style functions should\n return values with strings containing CSS ``'attr: value'`` that will\n be applied to the indicated cells.\n\n If using in the Jupyter notebook, Styler has defined a ``_repr_html_``\n to automatically render itself. Otherwise call Styler.render to get\n the generated HTML.\n\n CSS classes are attached to the generated HTML\n\n * Index and Column names include ``index_name`` and ``level<k>``\n where `k` is its level in a MultiIndex\n * Index label cells include\n\n * ``row_heading``\n * ``row<n>`` where `n` is the numeric position of the row\n * ``level<k>`` where `k` is the level in a MultiIndex\n\n * Column label cells include\n * ``col_heading``\n * ``col<n>`` where `n` is the numeric position of the column\n * ``level<k>`` where `k` is the level in a MultiIndex\n\n * Blank cells include ``blank``\n * Data cells include ``data``\n \"\"\"\n\n def __init__(\n self,\n data: FrameOrSeriesUnion,\n precision: int | None = None,\n table_styles: CSSStyles | None = None,\n uuid: str | None = None,\n caption: str | tuple | None = None,\n table_attributes: str | None = None,\n cell_ids: bool = True,\n na_rep: str | None = None,\n uuid_len: int = 5,\n decimal: str = \".\",\n thousands: str | None = None,\n escape: str | None = None,\n ):\n super().__init__(\n data=data,\n uuid=uuid,\n uuid_len=uuid_len,\n table_styles=table_styles,\n table_attributes=table_attributes,\n caption=caption,\n cell_ids=cell_ids,\n )\n\n # validate ordered args\n self.precision = precision # can be removed on set_precision depr cycle\n self.na_rep = na_rep # can be removed on set_na_rep depr cycle\n self.format(\n formatter=None,\n precision=precision,\n na_rep=na_rep,\n escape=escape,\n decimal=decimal,\n thousands=thousands,\n )\n\n def _repr_html_(self) -> str:\n \"\"\"\n Hooks into Jupyter notebook rich display system.\n \"\"\"\n return self.render()\n\n def render(\n self,\n sparse_index: bool | None = None,\n sparse_columns: bool | None = None,\n **kwargs,\n ) -> str:\n \"\"\"\n Render the ``Styler`` including all applied styles to HTML.\n\n Parameters\n ----------\n sparse_index : bool, optional\n Whether to sparsify the display of a hierarchical index. Setting to False\n will display each explicit level element in a hierarchical key for each row.\n Defaults to ``pandas.options.styler.sparse.index`` value.\n sparse_columns : bool, optional\n Whether to sparsify the display of a hierarchical index. Setting to False\n will display each explicit level element in a hierarchical key for each row.\n Defaults to ``pandas.options.styler.sparse.columns`` value.\n **kwargs\n Any additional keyword arguments are passed\n through to ``self.template.render``.\n This is useful when you need to provide\n additional variables for a custom template.\n\n Returns\n -------\n rendered : str\n The rendered HTML.\n\n Notes\n -----\n Styler objects have defined the ``_repr_html_`` method\n which automatically calls ``self.render()`` when it's the\n last item in a Notebook cell. When calling ``Styler.render()``\n directly, wrap the result in ``IPython.display.HTML`` to view\n the rendered HTML in the notebook.\n\n Pandas uses the following keys in render. Arguments passed\n in ``**kwargs`` take precedence, so think carefully if you want\n to override them:\n\n * head\n * cellstyle\n * body\n * uuid\n * table_styles\n * caption\n * table_attributes\n \"\"\"\n if sparse_index is None:\n sparse_index = get_option(\"styler.sparse.index\")\n if sparse_columns is None:\n sparse_columns = get_option(\"styler.sparse.columns\")\n return self._render_html(sparse_index, sparse_columns, **kwargs)\n\n def set_tooltips(\n self,\n ttips: DataFrame,\n props: CSSProperties | None = None,\n css_class: str | None = None,\n ) -> Styler:\n \"\"\"\n Set the DataFrame of strings on ``Styler`` generating ``:hover`` tooltips.\n\n These string based tooltips are only applicable to ``<td>`` HTML elements,\n and cannot be used for column or index headers.\n\n .. versionadded:: 1.3.0\n\n Parameters\n ----------\n ttips : DataFrame\n DataFrame containing strings that will be translated to tooltips, mapped\n by identical column and index values that must exist on the underlying\n Styler data. None, NaN values, and empty strings will be ignored and\n not affect the rendered HTML.\n props : list-like or str, optional\n List of (attr, value) tuples or a valid CSS string. If ``None`` adopts\n the internal default values described in notes.\n css_class : str, optional\n Name of the tooltip class used in CSS, should conform to HTML standards.\n Only useful if integrating tooltips with external CSS. If ``None`` uses the\n internal default value 'pd-t'.\n\n Returns\n -------\n self : Styler\n\n Notes\n -----\n Tooltips are created by adding `<span class=\"pd-t\"></span>` to each data cell\n and then manipulating the table level CSS to attach pseudo hover and pseudo\n after selectors to produce the required the results.\n\n The default properties for the tooltip CSS class are:\n\n - visibility: hidden\n - position: absolute\n - z-index: 1\n - background-color: black\n - color: white\n - transform: translate(-20px, -20px)\n\n The property 'visibility: hidden;' is a key prerequisite to the hover\n functionality, and should always be included in any manual properties\n specification, using the ``props`` argument.\n\n Tooltips are not designed to be efficient, and can add large amounts of\n additional HTML for larger tables, since they also require that ``cell_ids``\n is forced to `True`.\n\n Examples\n --------\n Basic application\n\n >>> df = pd.DataFrame(data=[[0, 1], [2, 3]])\n >>> ttips = pd.DataFrame(\n ... data=[[\"Min\", \"\"], [np.nan, \"Max\"]], columns=df.columns, index=df.index\n ... )\n >>> s = df.style.set_tooltips(ttips).render()\n\n Optionally controlling the tooltip visual display\n\n >>> df.style.set_tooltips(ttips, css_class='tt-add', props=[\n ... ('visibility', 'hidden'),\n ... ('position', 'absolute'),\n ... ('z-index', 1)])\n >>> df.style.set_tooltips(ttips, css_class='tt-add',\n ... props='visibility:hidden; position:absolute; z-index:1;')\n \"\"\"\n if not self.cell_ids:\n # tooltips not optimised for individual cell check. requires reasonable\n # redesign and more extensive code for a feature that might be rarely used.\n raise NotImplementedError(\n \"Tooltips can only render with 'cell_ids' is True.\"\n )\n if not ttips.index.is_unique or not ttips.columns.is_unique:\n raise KeyError(\n \"Tooltips render only if `ttips` has unique index and columns.\"\n )\n if self.tooltips is None: # create a default instance if necessary\n self.tooltips = Tooltips()\n self.tooltips.tt_data = ttips\n if props:\n self.tooltips.class_properties = props\n if css_class:\n self.tooltips.class_name = css_class\n\n return self\n\n @doc(\n NDFrame.to_excel,\n klass=\"Styler\",\n storage_options=generic._shared_docs[\"storage_options\"],\n )\n def to_excel(\n self,\n excel_writer,\n sheet_name: str = \"Sheet1\",\n na_rep: str = \"\",\n float_format: str | None = None,\n columns: Sequence[Hashable] | None = None,\n header: Sequence[Hashable] | bool = True,\n index: bool = True,\n index_label: IndexLabel | None = None,\n startrow: int = 0,\n startcol: int = 0,\n engine: str | None = None,\n merge_cells: bool = True,\n encoding: str | None = None,\n inf_rep: str = \"inf\",\n verbose: bool = True,\n freeze_panes: tuple[int, int] | None = None,\n ) -> None:\n\n from pandas.io.formats.excel import ExcelFormatter\n\n formatter = ExcelFormatter(\n self,\n na_rep=na_rep,\n cols=columns,\n header=header,\n float_format=float_format,\n index=index,\n index_label=index_label,\n merge_cells=merge_cells,\n inf_rep=inf_rep,\n )\n formatter.write(\n excel_writer,\n sheet_name=sheet_name,\n startrow=startrow,\n startcol=startcol,\n freeze_panes=freeze_panes,\n engine=engine,\n )\n\n def to_latex(\n self,\n buf: FilePathOrBuffer[str] | None = None,\n *,\n column_format: str | None = None,\n position: str | None = None,\n position_float: str | None = None,\n hrules: bool = False,\n label: str | None = None,\n caption: str | tuple | None = None,\n sparse_index: bool | None = None,\n sparse_columns: bool | None = None,\n multirow_align: str = \"c\",\n multicol_align: str = \"r\",\n siunitx: bool = False,\n encoding: str | None = None,\n convert_css: bool = False,\n ):\n r\"\"\"\n Write Styler to a file, buffer or string in LaTeX format.\n\n .. versionadded:: 1.3.0\n\n Parameters\n ----------\n buf : str, Path, or StringIO-like, optional, default None\n Buffer to write to. If ``None``, the output is returned as a string.\n column_format : str, optional\n The LaTeX column specification placed in location:\n\n \\\\begin{tabular}{<column_format>}\n\n Defaults to 'l' for index and\n non-numeric data columns, and, for numeric data columns,\n to 'r' by default, or 'S' if ``siunitx`` is ``True``.\n position : str, optional\n The LaTeX positional argument (e.g. 'h!') for tables, placed in location:\n\n \\\\begin{table}[<position>]\n position_float : {\"centering\", \"raggedleft\", \"raggedright\"}, optional\n The LaTeX float command placed in location:\n\n \\\\begin{table}[<position>]\n\n \\\\<position_float>\n hrules : bool, default False\n Set to `True` to add \\\\toprule, \\\\midrule and \\\\bottomrule from the\n {booktabs} LaTeX package.\n label : str, optional\n The LaTeX label included as: \\\\label{<label>}.\n This is used with \\\\ref{<label>} in the main .tex file.\n caption : str, tuple, optional\n If string, the LaTeX table caption included as: \\\\caption{<caption>}.\n If tuple, i.e (\"full caption\", \"short caption\"), the caption included\n as: \\\\caption[<caption[1]>]{<caption[0]>}.\n sparse_index : bool, optional\n Whether to sparsify the display of a hierarchical index. Setting to False\n will display each explicit level element in a hierarchical key for each row.\n Defaults to ``pandas.options.styler.sparse.index`` value.\n sparse_columns : bool, optional\n Whether to sparsify the display of a hierarchical index. Setting to False\n will display each explicit level element in a hierarchical key for each row.\n Defaults to ``pandas.options.styler.sparse.columns`` value.\n multirow_align : {\"c\", \"t\", \"b\"}\n If sparsifying hierarchical MultiIndexes whether to align text centrally,\n at the top or bottom.\n multicol_align : {\"r\", \"c\", \"l\"}\n If sparsifying hierarchical MultiIndex columns whether to align text at\n the left, centrally, or at the right.\n siunitx : bool, default False\n Set to ``True`` to structure LaTeX compatible with the {siunitx} package.\n encoding : str, default \"utf-8\"\n Character encoding setting.\n convert_css : bool, default False\n Convert simple cell-styles from CSS to LaTeX format. Any CSS not found in\n conversion table is dropped. A style can be forced by adding option\n `--latex`. See notes.\n\n Returns\n -------\n str or None\n If `buf` is None, returns the result as a string. Otherwise returns `None`.\n\n See Also\n --------\n Styler.format: Format the text display value of cells.\n\n Notes\n -----\n **Latex Packages**\n\n For the following features we recommend the following LaTeX inclusions:\n\n ===================== ==========================================================\n Feature Inclusion\n ===================== ==========================================================\n sparse columns none: included within default {tabular} environment\n sparse rows \\\\usepackage{multirow}\n hrules \\\\usepackage{booktabs}\n colors \\\\usepackage[table]{xcolor}\n siunitx \\\\usepackage{siunitx}\n bold (with siunitx) | \\\\usepackage{etoolbox}\n | \\\\robustify\\\\bfseries\n | \\\\sisetup{detect-all = true} *(within {document})*\n italic (with siunitx) | \\\\usepackage{etoolbox}\n | \\\\robustify\\\\itshape\n | \\\\sisetup{detect-all = true} *(within {document})*\n ===================== ==========================================================\n\n **Cell Styles**\n\n LaTeX styling can only be rendered if the accompanying styling functions have\n been constructed with appropriate LaTeX commands. All styling\n functionality is built around the concept of a CSS ``(<attribute>, <value>)``\n pair (see `Table Visualization <../../user_guide/style.ipynb>`_), and this\n should be replaced by a LaTeX\n ``(<command>, <options>)`` approach. Each cell will be styled individually\n using nested LaTeX commands with their accompanied options.\n\n For example the following code will highlight and bold a cell in HTML-CSS:\n\n >>> df = pd.DataFrame([[1,2], [3,4]])\n >>> s = df.style.highlight_max(axis=None,\n ... props='background-color:red; font-weight:bold;')\n >>> s.render()\n\n The equivalent using LaTeX only commands is the following:\n\n >>> s = df.style.highlight_max(axis=None,\n ... props='cellcolor:{red}; bfseries: ;')\n >>> s.to_latex()\n\n Internally these structured LaTeX ``(<command>, <options>)`` pairs\n are translated to the\n ``display_value`` with the default structure:\n ``\\<command><options> <display_value>``.\n Where there are multiple commands the latter is nested recursively, so that\n the above example highlighed cell is rendered as\n ``\\cellcolor{red} \\bfseries 4``.\n\n Occasionally this format does not suit the applied command, or\n combination of LaTeX packages that is in use, so additional flags can be\n added to the ``<options>``, within the tuple, to result in different\n positions of required braces (the **default** being the same as ``--nowrap``):\n\n =================================== ============================================\n Tuple Format Output Structure\n =================================== ============================================\n (<command>,<options>) \\\\<command><options> <display_value>\n (<command>,<options> ``--nowrap``) \\\\<command><options> <display_value>\n (<command>,<options> ``--rwrap``) \\\\<command><options>{<display_value>}\n (<command>,<options> ``--wrap``) {\\\\<command><options> <display_value>}\n (<command>,<options> ``--lwrap``) {\\\\<command><options>} <display_value>\n (<command>,<options> ``--dwrap``) {\\\\<command><options>}{<display_value>}\n =================================== ============================================\n\n For example the `textbf` command for font-weight\n should always be used with `--rwrap` so ``('textbf', '--rwrap')`` will render a\n working cell, wrapped with braces, as ``\\textbf{<display_value>}``.\n\n A more comprehensive example is as follows:\n\n >>> df = pd.DataFrame([[1, 2.2, \"dogs\"], [3, 4.4, \"cats\"], [2, 6.6, \"cows\"]],\n ... index=[\"ix1\", \"ix2\", \"ix3\"],\n ... columns=[\"Integers\", \"Floats\", \"Strings\"])\n >>> s = df.style.highlight_max(\n ... props='cellcolor:[HTML]{FFFF00}; color:{red};'\n ... 'textit:--rwrap; textbf:--rwrap;'\n ... )\n >>> s.to_latex()\n\n .. figure:: ../../_static/style/latex_1.png\n\n **Table Styles**\n\n Internally Styler uses its ``table_styles`` object to parse the\n ``column_format``, ``position``, ``position_float``, and ``label``\n input arguments. These arguments are added to table styles in the format:\n\n .. code-block:: python\n\n set_table_styles([\n {\"selector\": \"column_format\", \"props\": f\":{column_format};\"},\n {\"selector\": \"position\", \"props\": f\":{position};\"},\n {\"selector\": \"position_float\", \"props\": f\":{position_float};\"},\n {\"selector\": \"label\", \"props\": f\":{{{label.replace(':','§')}}};\"}\n ], overwrite=False)\n\n Exception is made for the ``hrules`` argument which, in fact, controls all three\n commands: ``toprule``, ``bottomrule`` and ``midrule`` simultaneously. Instead of\n setting ``hrules`` to ``True``, it is also possible to set each\n individual rule definition, by manually setting the ``table_styles``,\n for example below we set a regular ``toprule``, set an ``hline`` for\n ``bottomrule`` and exclude the ``midrule``:\n\n .. code-block:: python\n\n set_table_styles([\n {'selector': 'toprule', 'props': ':toprule;'},\n {'selector': 'bottomrule', 'props': ':hline;'},\n ], overwrite=False)\n\n If other ``commands`` are added to table styles they will be detected, and\n positioned immediately above the '\\\\begin{tabular}' command. For example to\n add odd and even row coloring, from the {colortbl} package, in format\n ``\\rowcolors{1}{pink}{red}``, use:\n\n .. code-block:: python\n\n set_table_styles([\n {'selector': 'rowcolors', 'props': ':{1}{pink}{red};'}\n ], overwrite=False)\n\n A more comprehensive example using these arguments is as follows:\n\n >>> df.columns = pd.MultiIndex.from_tuples([\n ... (\"Numeric\", \"Integers\"),\n ... (\"Numeric\", \"Floats\"),\n ... (\"Non-Numeric\", \"Strings\")\n ... ])\n >>> df.index = pd.MultiIndex.from_tuples([\n ... (\"L0\", \"ix1\"), (\"L0\", \"ix2\"), (\"L1\", \"ix3\")\n ... ])\n >>> s = df.style.highlight_max(\n ... props='cellcolor:[HTML]{FFFF00}; color:{red}; itshape:; bfseries:;'\n ... )\n >>> s.to_latex(\n ... column_format=\"rrrrr\", position=\"h\", position_float=\"centering\",\n ... hrules=True, label=\"table:5\", caption=\"Styled LaTeX Table\",\n ... multirow_align=\"t\", multicol_align=\"r\"\n ... )\n\n .. figure:: ../../_static/style/latex_2.png\n\n **Formatting**\n\n To format values :meth:`Styler.format` should be used prior to calling\n `Styler.to_latex`, as well as other methods such as :meth:`Styler.hide_index`\n or :meth:`Styler.hide_columns`, for example:\n\n >>> s.clear()\n >>> s.table_styles = []\n >>> s.caption = None\n >>> s.format({\n ... (\"Numeric\", \"Integers\"): '\\${}',\n ... (\"Numeric\", \"Floats\"): '{:.3f}',\n ... (\"Non-Numeric\", \"Strings\"): str.upper\n ... })\n >>> s.to_latex()\n \\begin{tabular}{llrrl}\n {} & {} & \\multicolumn{2}{r}{Numeric} & {Non-Numeric} \\\\\n {} & {} & {Integers} & {Floats} & {Strings} \\\\\n \\multirow[c]{2}{*}{L0} & ix1 & \\\\$1 & 2.200 & DOGS \\\\\n & ix2 & \\$3 & 4.400 & CATS \\\\\n L1 & ix3 & \\$2 & 6.600 & COWS \\\\\n \\end{tabular}\n\n **CSS Conversion**\n\n This method can convert a Styler constructured with HTML-CSS to LaTeX using\n the following limited conversions.\n\n ================== ==================== ============= ==========================\n CSS Attribute CSS value LaTeX Command LaTeX Options\n ================== ==================== ============= ==========================\n font-weight | bold | bfseries\n | bolder | bfseries\n font-style | italic | itshape\n | oblique | slshape\n background-color | red cellcolor | {red}--lwrap\n | #fe01ea | [HTML]{FE01EA}--lwrap\n | #f0e | [HTML]{FF00EE}--lwrap\n | rgb(128,255,0) | [rgb]{0.5,1,0}--lwrap\n | rgba(128,0,0,0.5) | [rgb]{0.5,0,0}--lwrap\n | rgb(25%,255,50%) | [rgb]{0.25,1,0.5}--lwrap\n color | red color | {red}\n | #fe01ea | [HTML]{FE01EA}\n | #f0e | [HTML]{FF00EE}\n | rgb(128,255,0) | [rgb]{0.5,1,0}\n | rgba(128,0,0,0.5) | [rgb]{0.5,0,0}\n | rgb(25%,255,50%) | [rgb]{0.25,1,0.5}\n ================== ==================== ============= ==========================\n\n It is also possible to add user-defined LaTeX only styles to a HTML-CSS Styler\n using the ``--latex`` flag, and to add LaTeX parsing options that the\n converter will detect within a CSS-comment.\n\n >>> df = pd.DataFrame([[1]])\n >>> df.style.set_properties(\n ... **{\"font-weight\": \"bold /* --dwrap */\", \"Huge\": \"--latex--rwrap\"}\n ... ).to_latex(css_convert=True)\n \\begin{tabular}{lr}\n {} & {0} \\\\\n 0 & {\\bfseries}{\\Huge{1}} \\\\\n \\end{tabular}\n \"\"\"\n obj = self._copy(deepcopy=True) # manipulate table_styles on obj, not self\n\n table_selectors = (\n [style[\"selector\"] for style in self.table_styles]\n if self.table_styles is not None\n else []\n )\n\n if column_format is not None:\n # add more recent setting to table_styles\n obj.set_table_styles(\n [{\"selector\": \"column_format\", \"props\": f\":{column_format}\"}],\n overwrite=False,\n )\n elif \"column_format\" in table_selectors:\n pass # adopt what has been previously set in table_styles\n else:\n # create a default: set float, complex, int cols to 'r' ('S'), index to 'l'\n _original_columns = self.data.columns\n self.data.columns = RangeIndex(stop=len(self.data.columns))\n numeric_cols = self.data._get_numeric_data().columns.to_list()\n self.data.columns = _original_columns\n column_format = \"\" if self.hide_index_ else \"l\" * self.data.index.nlevels\n for ci, _ in enumerate(self.data.columns):\n if ci not in self.hidden_columns:\n column_format += (\n (\"r\" if not siunitx else \"S\") if ci in numeric_cols else \"l\"\n )\n obj.set_table_styles(\n [{\"selector\": \"column_format\", \"props\": f\":{column_format}\"}],\n overwrite=False,\n )\n\n if position:\n obj.set_table_styles(\n [{\"selector\": \"position\", \"props\": f\":{position}\"}],\n overwrite=False,\n )\n\n if position_float:\n if position_float not in [\"raggedright\", \"raggedleft\", \"centering\"]:\n raise ValueError(\n f\"`position_float` should be one of \"\n f\"'raggedright', 'raggedleft', 'centering', \"\n f\"got: '{position_float}'\"\n )\n obj.set_table_styles(\n [{\"selector\": \"position_float\", \"props\": f\":{position_float}\"}],\n overwrite=False,\n )\n\n if hrules:\n obj.set_table_styles(\n [\n {\"selector\": \"toprule\", \"props\": \":toprule\"},\n {\"selector\": \"midrule\", \"props\": \":midrule\"},\n {\"selector\": \"bottomrule\", \"props\": \":bottomrule\"},\n ],\n overwrite=False,\n )\n\n if label:\n obj.set_table_styles(\n [{\"selector\": \"label\", \"props\": f\":{{{label.replace(':', '§')}}}\"}],\n overwrite=False,\n )\n\n if caption:\n obj.set_caption(caption)\n\n if sparse_index is None:\n sparse_index = get_option(\"styler.sparse.index\")\n if sparse_columns is None:\n sparse_columns = get_option(\"styler.sparse.columns\")\n\n latex = obj._render_latex(\n sparse_index=sparse_index,\n sparse_columns=sparse_columns,\n multirow_align=multirow_align,\n multicol_align=multicol_align,\n convert_css=convert_css,\n )\n\n return save_to_buffer(latex, buf=buf, encoding=encoding)\n\n def to_html(\n self,\n buf: FilePathOrBuffer[str] | None = None,\n *,\n table_uuid: str | None = None,\n table_attributes: str | None = None,\n encoding: str | None = None,\n doctype_html: bool = False,\n exclude_styles: bool = False,\n ):\n \"\"\"\n Write Styler to a file, buffer or string in HTML-CSS format.\n\n .. versionadded:: 1.3.0\n\n Parameters\n ----------\n buf : str, Path, or StringIO-like, optional, default None\n Buffer to write to. If ``None``, the output is returned as a string.\n table_uuid : str, optional\n Id attribute assigned to the <table> HTML element in the format:\n\n ``<table id=\"T_<table_uuid>\" ..>``\n\n If not given uses Styler's initially assigned value.\n table_attributes : str, optional\n Attributes to assign within the `<table>` HTML element in the format:\n\n ``<table .. <table_attributes> >``\n\n If not given defaults to Styler's preexisting value.\n encoding : str, optional\n Character encoding setting for file output, and HTML meta tags,\n defaults to \"utf-8\" if None.\n doctype_html : bool, default False\n Whether to output a fully structured HTML file including all\n HTML elements, or just the core ``<style>`` and ``<table>`` elements.\n exclude_styles : bool, default False\n Whether to include the ``<style>`` element and all associated element\n ``class`` and ``id`` identifiers, or solely the ``<table>`` element without\n styling identifiers.\n\n Returns\n -------\n str or None\n If `buf` is None, returns the result as a string. Otherwise returns `None`.\n\n See Also\n --------\n DataFrame.to_html: Write a DataFrame to a file, buffer or string in HTML format.\n \"\"\"\n if table_uuid:\n self.set_uuid(table_uuid)\n\n if table_attributes:\n self.set_table_attributes(table_attributes)\n\n # Build HTML string..\n html = self.render(\n exclude_styles=exclude_styles,\n encoding=encoding if encoding else \"utf-8\",\n doctype_html=doctype_html,\n )\n\n return save_to_buffer(\n html, buf=buf, encoding=(encoding if buf is not None else None)\n )\n\n def set_td_classes(self, classes: DataFrame) -> Styler:\n \"\"\"\n Set the DataFrame of strings added to the ``class`` attribute of ``<td>``\n HTML elements.\n\n Parameters\n ----------\n classes : DataFrame\n DataFrame containing strings that will be translated to CSS classes,\n mapped by identical column and index key values that must exist on the\n underlying Styler data. None, NaN values, and empty strings will\n be ignored and not affect the rendered HTML.\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.set_table_styles: Set the table styles included within the ``<style>``\n HTML element.\n Styler.set_table_attributes: Set the table attributes added to the ``<table>``\n HTML element.\n\n Notes\n -----\n Can be used in combination with ``Styler.set_table_styles`` to define an\n internal CSS solution without reference to external CSS files.\n\n Examples\n --------\n >>> df = pd.DataFrame(data=[[1, 2, 3], [4, 5, 6]], columns=[\"A\", \"B\", \"C\"])\n >>> classes = pd.DataFrame([\n ... [\"min-val red\", \"\", \"blue\"],\n ... [\"red\", None, \"blue max-val\"]\n ... ], index=df.index, columns=df.columns)\n >>> df.style.set_td_classes(classes)\n\n Using `MultiIndex` columns and a `classes` `DataFrame` as a subset of the\n underlying,\n\n >>> df = pd.DataFrame([[1,2],[3,4]], index=[\"a\", \"b\"],\n ... columns=[[\"level0\", \"level0\"], [\"level1a\", \"level1b\"]])\n >>> classes = pd.DataFrame([\"min-val\"], index=[\"a\"],\n ... columns=[[\"level0\"],[\"level1a\"]])\n >>> df.style.set_td_classes(classes)\n\n Form of the output with new additional css classes,\n\n >>> df = pd.DataFrame([[1]])\n >>> css = pd.DataFrame([[\"other-class\"]])\n >>> s = Styler(df, uuid=\"_\", cell_ids=False).set_td_classes(css)\n >>> s.hide_index().render()\n '<style type=\"text/css\"></style>'\n '<table id=\"T__\">'\n ' <thead>'\n ' <tr><th class=\"col_heading level0 col0\" >0</th></tr>'\n ' </thead>'\n ' <tbody>'\n ' <tr><td class=\"data row0 col0 other-class\" >1</td></tr>'\n ' </tbody>'\n '</table>'\n \"\"\"\n if not classes.index.is_unique or not classes.columns.is_unique:\n raise KeyError(\n \"Classes render only if `classes` has unique index and columns.\"\n )\n classes = classes.reindex_like(self.data)\n\n for r, row_tup in enumerate(classes.itertuples()):\n for c, value in enumerate(row_tup[1:]):\n if not (pd.isna(value) or value == \"\"):\n self.cell_context[(r, c)] = str(value)\n\n return self\n\n def _update_ctx(self, attrs: DataFrame) -> None:\n \"\"\"\n Update the state of the ``Styler`` for data cells.\n\n Collects a mapping of {index_label: [('<property>', '<value>'), ..]}.\n\n Parameters\n ----------\n attrs : DataFrame\n should contain strings of '<property>: <value>;<prop2>: <val2>'\n Whitespace shouldn't matter and the final trailing ';' shouldn't\n matter.\n \"\"\"\n if not self.index.is_unique or not self.columns.is_unique:\n raise KeyError(\n \"`Styler.apply` and `.applymap` are not compatible \"\n \"with non-unique index or columns.\"\n )\n\n for cn in attrs.columns:\n for rn, c in attrs[[cn]].itertuples():\n if not c:\n continue\n css_list = maybe_convert_css_to_tuples(c)\n i, j = self.index.get_loc(rn), self.columns.get_loc(cn)\n self.ctx[(i, j)].extend(css_list)\n\n def _copy(self, deepcopy: bool = False) -> Styler:\n \"\"\"\n Copies a Styler, allowing for deepcopy or shallow copy\n\n Copying a Styler aims to recreate a new Styler object which contains the same\n data and styles as the original.\n\n Data dependent attributes [copied and NOT exported]:\n - formatting (._display_funcs)\n - hidden index values or column values (.hidden_rows, .hidden_columns)\n - tooltips\n - cell_context (cell css classes)\n - ctx (cell css styles)\n - caption\n\n Non-data dependent attributes [copied and exported]:\n - hidden index state and hidden columns state (.hide_index_, .hide_columns_)\n - table_attributes\n - table_styles\n - applied styles (_todo)\n\n \"\"\"\n # GH 40675\n styler = Styler(\n self.data, # populates attributes 'data', 'columns', 'index' as shallow\n uuid_len=self.uuid_len,\n )\n shallow = [ # simple string or boolean immutables\n \"hide_index_\",\n \"hide_columns_\",\n \"table_attributes\",\n \"cell_ids\",\n \"caption\",\n ]\n deep = [ # nested lists or dicts\n \"_display_funcs\",\n \"hidden_rows\",\n \"hidden_columns\",\n \"ctx\",\n \"cell_context\",\n \"_todo\",\n \"table_styles\",\n \"tooltips\",\n ]\n\n for attr in shallow:\n setattr(styler, attr, getattr(self, attr))\n\n for attr in deep:\n val = getattr(self, attr)\n setattr(styler, attr, copy.deepcopy(val) if deepcopy else val)\n\n return styler\n\n def __copy__(self) -> Styler:\n return self._copy(deepcopy=False)\n\n def __deepcopy__(self, memo) -> Styler:\n return self._copy(deepcopy=True)\n\n def clear(self) -> None:\n \"\"\"\n Reset the ``Styler``, removing any previously applied styles.\n\n Returns None.\n \"\"\"\n self.ctx.clear()\n self.tooltips = None\n self.cell_context.clear()\n self._todo.clear()\n\n self.hide_index_ = False\n self.hidden_columns = []\n # self.format and self.table_styles may be dependent on user\n # input in self.__init__()\n\n def _apply(\n self,\n func: Callable[..., Styler],\n axis: Axis | None = 0,\n subset: Subset | None = None,\n **kwargs,\n ) -> Styler:\n subset = slice(None) if subset is None else subset\n subset = non_reducing_slice(subset)\n data = self.data.loc[subset]\n if axis is not None:\n result = data.apply(func, axis=axis, result_type=\"expand\", **kwargs)\n result.columns = data.columns\n else:\n result = func(data, **kwargs)\n if not isinstance(result, DataFrame):\n if not isinstance(result, np.ndarray):\n raise TypeError(\n f\"Function {repr(func)} must return a DataFrame or ndarray \"\n f\"when passed to `Styler.apply` with axis=None\"\n )\n if not (data.shape == result.shape):\n raise ValueError(\n f\"Function {repr(func)} returned ndarray with wrong shape.\\n\"\n f\"Result has shape: {result.shape}\\n\"\n f\"Expected shape: {data.shape}\"\n )\n result = DataFrame(result, index=data.index, columns=data.columns)\n elif not (\n result.index.equals(data.index) and result.columns.equals(data.columns)\n ):\n raise ValueError(\n f\"Result of {repr(func)} must have identical \"\n f\"index and columns as the input\"\n )\n\n if result.shape != data.shape:\n raise ValueError(\n f\"Function {repr(func)} returned the wrong shape.\\n\"\n f\"Result has shape: {result.shape}\\n\"\n f\"Expected shape: {data.shape}\"\n )\n self._update_ctx(result)\n return self\n\n def apply(\n self,\n func: Callable[..., Styler],\n axis: Axis | None = 0,\n subset: Subset | None = None,\n **kwargs,\n ) -> Styler:\n \"\"\"\n Apply a CSS-styling function column-wise, row-wise, or table-wise.\n\n Updates the HTML representation with the result.\n\n Parameters\n ----------\n func : function\n ``func`` should take a Series if ``axis`` in [0,1] and return an object\n of same length, also with identical index if the object is a Series.\n ``func`` should take a DataFrame if ``axis`` is ``None`` and return either\n an ndarray with the same shape or a DataFrame with identical columns and\n index.\n\n .. versionchanged:: 1.3.0\n\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Apply to each column (``axis=0`` or ``'index'``), to each row\n (``axis=1`` or ``'columns'``), or to the entire DataFrame at once\n with ``axis=None``.\n subset : label, array-like, IndexSlice, optional\n A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input\n or single key, to `DataFrame.loc[:, <subset>]` where the columns are\n prioritised, to limit ``data`` to *before* applying the function.\n **kwargs : dict\n Pass along to ``func``.\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.applymap: Apply a CSS-styling function elementwise.\n\n Notes\n -----\n The elements of the output of ``func`` should be CSS styles as strings, in the\n format 'attribute: value; attribute2: value2; ...' or,\n if nothing is to be applied to that element, an empty string or ``None``.\n\n This is similar to ``DataFrame.apply``, except that ``axis=None``\n applies the function to the entire DataFrame at once,\n rather than column-wise or row-wise.\n\n Examples\n --------\n >>> def highlight_max(x, color):\n ... return np.where(x == np.nanmax(x.to_numpy()), f\"color: {color};\", None)\n >>> df = pd.DataFrame(np.random.randn(5, 2), columns=[\"A\", \"B\"])\n >>> df.style.apply(highlight_max, color='red')\n >>> df.style.apply(highlight_max, color='blue', axis=1)\n >>> df.style.apply(highlight_max, color='green', axis=None)\n\n Using ``subset`` to restrict application to a single column or multiple columns\n\n >>> df.style.apply(highlight_max, color='red', subset=\"A\")\n >>> df.style.apply(highlight_max, color='red', subset=[\"A\", \"B\"])\n\n Using a 2d input to ``subset`` to select rows in addition to columns\n\n >>> df.style.apply(highlight_max, color='red', subset=([0,1,2], slice(None))\n >>> df.style.apply(highlight_max, color='red', subset=(slice(0,5,2), \"A\")\n \"\"\"\n self._todo.append(\n (lambda instance: getattr(instance, \"_apply\"), (func, axis, subset), kwargs)\n )\n return self\n\n def _applymap(\n self, func: Callable, subset: Subset | None = None, **kwargs\n ) -> Styler:\n func = partial(func, **kwargs) # applymap doesn't take kwargs?\n if subset is None:\n subset = IndexSlice[:]\n subset = non_reducing_slice(subset)\n result = self.data.loc[subset].applymap(func)\n self._update_ctx(result)\n return self\n\n def applymap(\n self, func: Callable, subset: Subset | None = None, **kwargs\n ) -> Styler:\n \"\"\"\n Apply a CSS-styling function elementwise.\n\n Updates the HTML representation with the result.\n\n Parameters\n ----------\n func : function\n ``func`` should take a scalar and return a scalar.\n subset : label, array-like, IndexSlice, optional\n A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input\n or single key, to `DataFrame.loc[:, <subset>]` where the columns are\n prioritised, to limit ``data`` to *before* applying the function.\n **kwargs : dict\n Pass along to ``func``.\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.apply: Apply a CSS-styling function column-wise, row-wise, or table-wise.\n\n Notes\n -----\n The elements of the output of ``func`` should be CSS styles as strings, in the\n format 'attribute: value; attribute2: value2; ...' or,\n if nothing is to be applied to that element, an empty string or ``None``.\n\n Examples\n --------\n >>> def color_negative(v, color):\n ... return f\"color: {color};\" if v < 0 else None\n >>> df = pd.DataFrame(np.random.randn(5, 2), columns=[\"A\", \"B\"])\n >>> df.style.applymap(color_negative, color='red')\n\n Using ``subset`` to restrict application to a single column or multiple columns\n\n >>> df.style.applymap(color_negative, color='red', subset=\"A\")\n >>> df.style.applymap(color_negative, color='red', subset=[\"A\", \"B\"])\n\n Using a 2d input to ``subset`` to select rows in addition to columns\n\n >>> df.style.applymap(color_negative, color='red', subset=([0,1,2], slice(None))\n >>> df.style.applymap(color_negative, color='red', subset=(slice(0,5,2), \"A\")\n \"\"\"\n self._todo.append(\n (lambda instance: getattr(instance, \"_applymap\"), (func, subset), kwargs)\n )\n return self\n\n def where(\n self,\n cond: Callable,\n value: str,\n other: str | None = None,\n subset: Subset | None = None,\n **kwargs,\n ) -> Styler:\n \"\"\"\n Apply CSS-styles based on a conditional function elementwise.\n\n .. deprecated:: 1.3.0\n\n Updates the HTML representation with a style which is\n selected in accordance with the return value of a function.\n\n Parameters\n ----------\n cond : callable\n ``cond`` should take a scalar, and optional keyword arguments, and return\n a boolean.\n value : str\n Applied when ``cond`` returns true.\n other : str\n Applied when ``cond`` returns false.\n subset : label, array-like, IndexSlice, optional\n A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input\n or single key, to `DataFrame.loc[:, <subset>]` where the columns are\n prioritised, to limit ``data`` to *before* applying the function.\n **kwargs : dict\n Pass along to ``cond``.\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.applymap: Apply a CSS-styling function elementwise.\n Styler.apply: Apply a CSS-styling function column-wise, row-wise, or table-wise.\n\n Notes\n -----\n This method is deprecated.\n\n This method is a convenience wrapper for :meth:`Styler.applymap`, which we\n recommend using instead.\n\n The example:\n\n >>> df = pd.DataFrame([[1, 2], [3, 4]])\n >>> def cond(v, limit=4):\n ... return v > 1 and v != limit\n >>> df.style.where(cond, value='color:green;', other='color:red;')\n\n should be refactored to:\n\n >>> def style_func(v, value, other, limit=4):\n ... cond = v > 1 and v != limit\n ... return value if cond else other\n >>> df.style.applymap(style_func, value='color:green;', other='color:red;')\n \"\"\"\n warnings.warn(\n \"this method is deprecated in favour of `Styler.applymap()`\",\n FutureWarning,\n stacklevel=2,\n )\n\n if other is None:\n other = \"\"\n\n return self.applymap(\n lambda val: value if cond(val, **kwargs) else other,\n subset=subset,\n )\n\n def set_precision(self, precision: int) -> StylerRenderer:\n \"\"\"\n Set the precision used to display values.\n\n .. deprecated:: 1.3.0\n\n Parameters\n ----------\n precision : int\n\n Returns\n -------\n self : Styler\n\n Notes\n -----\n This method is deprecated see `Styler.format`.\n \"\"\"\n warnings.warn(\n \"this method is deprecated in favour of `Styler.format(precision=..)`\",\n FutureWarning,\n stacklevel=2,\n )\n self.precision = precision\n return self.format(precision=precision, na_rep=self.na_rep)\n\n def set_table_attributes(self, attributes: str) -> Styler:\n \"\"\"\n Set the table attributes added to the ``<table>`` HTML element.\n\n These are items in addition to automatic (by default) ``id`` attribute.\n\n Parameters\n ----------\n attributes : str\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.set_table_styles: Set the table styles included within the ``<style>``\n HTML element.\n Styler.set_td_classes: Set the DataFrame of strings added to the ``class``\n attribute of ``<td>`` HTML elements.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(10, 4))\n >>> df.style.set_table_attributes('class=\"pure-table\"')\n # ... <table class=\"pure-table\"> ...\n \"\"\"\n self.table_attributes = attributes\n return self\n\n def export(self) -> list[tuple[Callable, tuple, dict]]:\n \"\"\"\n Export the styles applied to the current ``Styler``.\n\n Can be applied to a second Styler with ``Styler.use``.\n\n Returns\n -------\n styles : list\n\n See Also\n --------\n Styler.use: Set the styles on the current ``Styler``.\n \"\"\"\n return self._todo\n\n def use(self, styles: list[tuple[Callable, tuple, dict]]) -> Styler:\n \"\"\"\n Set the styles on the current ``Styler``.\n\n Possibly uses styles from ``Styler.export``.\n\n Parameters\n ----------\n styles : list\n List of style functions.\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.export : Export the styles to applied to the current ``Styler``.\n \"\"\"\n self._todo.extend(styles)\n return self\n\n def set_uuid(self, uuid: str) -> Styler:\n \"\"\"\n Set the uuid applied to ``id`` attributes of HTML elements.\n\n Parameters\n ----------\n uuid : str\n\n Returns\n -------\n self : Styler\n\n Notes\n -----\n Almost all HTML elements within the table, and including the ``<table>`` element\n are assigned ``id`` attributes. The format is ``T_uuid_<extra>`` where\n ``<extra>`` is typically a more specific identifier, such as ``row1_col2``.\n \"\"\"\n self.uuid = uuid\n return self\n\n def set_caption(self, caption: str | tuple) -> Styler:\n \"\"\"\n Set the text added to a ``<caption>`` HTML element.\n\n Parameters\n ----------\n caption : str, tuple\n For HTML output either the string input is used or the first element of the\n tuple. For LaTeX the string input provides a caption and the additional\n tuple input allows for full captions and short captions, in that order.\n\n Returns\n -------\n self : Styler\n \"\"\"\n self.caption = caption\n return self\n\n def set_sticky(\n self,\n axis: Axis = 0,\n pixel_size: int | None = None,\n levels: list[int] | None = None,\n ) -> Styler:\n \"\"\"\n Add CSS to permanently display the index or column headers in a scrolling frame.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Whether to make the index or column headers sticky.\n pixel_size : int, optional\n Required to configure the width of index cells or the height of column\n header cells when sticking a MultiIndex. Defaults to 75 and 25 respectively.\n levels : list of int\n If ``axis`` is a MultiIndex the specific levels to stick. If ``None`` will\n stick all levels.\n\n Returns\n -------\n self : Styler\n \"\"\"\n if axis in [0, \"index\"]:\n axis, obj, tag, pos = 0, self.data.index, \"tbody\", \"left\"\n pixel_size = 75 if not pixel_size else pixel_size\n elif axis in [1, \"columns\"]:\n axis, obj, tag, pos = 1, self.data.columns, \"thead\", \"top\"\n pixel_size = 25 if not pixel_size else pixel_size\n else:\n raise ValueError(\"`axis` must be one of {0, 1, 'index', 'columns'}\")\n\n if not isinstance(obj, pd.MultiIndex):\n return self.set_table_styles(\n [\n {\n \"selector\": f\"{tag} th\",\n \"props\": f\"position:sticky; {pos}:0px; background-color:white;\",\n }\n ],\n overwrite=False,\n )\n else:\n range_idx = list(range(obj.nlevels))\n\n levels = sorted(levels) if levels else range_idx\n for i, level in enumerate(levels):\n self.set_table_styles(\n [\n {\n \"selector\": f\"{tag} th.level{level}\",\n \"props\": f\"position: sticky; \"\n f\"{pos}: {i * pixel_size}px; \"\n f\"{f'height: {pixel_size}px; ' if axis == 1 else ''}\"\n f\"{f'min-width: {pixel_size}px; ' if axis == 0 else ''}\"\n f\"{f'max-width: {pixel_size}px; ' if axis == 0 else ''}\"\n f\"background-color: white;\",\n }\n ],\n overwrite=False,\n )\n\n return self\n\n def set_table_styles(\n self,\n table_styles: dict[Any, CSSStyles] | CSSStyles,\n axis: int = 0,\n overwrite: bool = True,\n ) -> Styler:\n \"\"\"\n Set the table styles included within the ``<style>`` HTML element.\n\n This function can be used to style the entire table, columns, rows or\n specific HTML selectors.\n\n Parameters\n ----------\n table_styles : list or dict\n If supplying a list, each individual table_style should be a\n dictionary with ``selector`` and ``props`` keys. ``selector``\n should be a CSS selector that the style will be applied to\n (automatically prefixed by the table's UUID) and ``props``\n should be a list of tuples with ``(attribute, value)``.\n If supplying a dict, the dict keys should correspond to\n column names or index values, depending upon the specified\n `axis` argument. These will be mapped to row or col CSS\n selectors. MultiIndex values as dict keys should be\n in their respective tuple form. The dict values should be\n a list as specified in the form with CSS selectors and\n props that will be applied to the specified row or column.\n\n .. versionchanged:: 1.2.0\n\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Apply to each column (``axis=0`` or ``'index'``), to each row\n (``axis=1`` or ``'columns'``). Only used if `table_styles` is\n dict.\n\n .. versionadded:: 1.2.0\n\n overwrite : bool, default True\n Styles are replaced if `True`, or extended if `False`. CSS\n rules are preserved so most recent styles set will dominate\n if selectors intersect.\n\n .. versionadded:: 1.2.0\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.set_td_classes: Set the DataFrame of strings added to the ``class``\n attribute of ``<td>`` HTML elements.\n Styler.set_table_attributes: Set the table attributes added to the ``<table>``\n HTML element.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(10, 4),\n ... columns=['A', 'B', 'C', 'D'])\n >>> df.style.set_table_styles(\n ... [{'selector': 'tr:hover',\n ... 'props': [('background-color', 'yellow')]}]\n ... )\n\n Or with CSS strings\n\n >>> df.style.set_table_styles(\n ... [{'selector': 'tr:hover',\n ... 'props': 'background-color: yellow; font-size: 1em;']}]\n ... )\n\n Adding column styling by name\n\n >>> df.style.set_table_styles({\n ... 'A': [{'selector': '',\n ... 'props': [('color', 'red')]}],\n ... 'B': [{'selector': 'td',\n ... 'props': 'color: blue;']}]\n ... }, overwrite=False)\n\n Adding row styling\n\n >>> df.style.set_table_styles({\n ... 0: [{'selector': 'td:hover',\n ... 'props': [('font-size', '25px')]}]\n ... }, axis=1, overwrite=False)\n \"\"\"\n if isinstance(table_styles, dict):\n if axis in [0, \"index\"]:\n obj, idf = self.data.columns, \".col\"\n else:\n obj, idf = self.data.index, \".row\"\n\n table_styles = [\n {\n \"selector\": str(s[\"selector\"]) + idf + str(idx),\n \"props\": maybe_convert_css_to_tuples(s[\"props\"]),\n }\n for key, styles in table_styles.items()\n for idx in obj.get_indexer_for([key])\n for s in styles\n ]\n else:\n table_styles = [\n {\n \"selector\": s[\"selector\"],\n \"props\": maybe_convert_css_to_tuples(s[\"props\"]),\n }\n for s in table_styles\n ]\n\n if not overwrite and self.table_styles is not None:\n self.table_styles.extend(table_styles)\n else:\n self.table_styles = table_styles\n return self\n\n def set_na_rep(self, na_rep: str) -> StylerRenderer:\n \"\"\"\n Set the missing data representation on a ``Styler``.\n\n .. versionadded:: 1.0.0\n\n .. deprecated:: 1.3.0\n\n Parameters\n ----------\n na_rep : str\n\n Returns\n -------\n self : Styler\n\n Notes\n -----\n This method is deprecated. See `Styler.format()`\n \"\"\"\n warnings.warn(\n \"this method is deprecated in favour of `Styler.format(na_rep=..)`\",\n FutureWarning,\n stacklevel=2,\n )\n self.na_rep = na_rep\n return self.format(na_rep=na_rep, precision=self.precision)\n\n def hide_index(self, subset: Subset | None = None) -> Styler:\n \"\"\"\n Hide the entire index, or specific keys in the index from rendering.\n\n This method has dual functionality:\n\n - if ``subset`` is ``None`` then the entire index will be hidden whilst\n displaying all data-rows.\n - if a ``subset`` is given then those specific rows will be hidden whilst the\n index itself remains visible.\n\n .. versionchanged:: 1.3.0\n\n Parameters\n ----------\n subset : label, array-like, IndexSlice, optional\n A valid 1d input or single key along the index axis within\n `DataFrame.loc[<subset>, :]`, to limit ``data`` to *before* applying\n the function.\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.hide_columns: Hide the entire column headers row, or specific columns.\n\n Examples\n --------\n Simple application hiding specific rows:\n\n >>> df = pd.DataFrame([[1,2], [3,4], [5,6]], index=[\"a\", \"b\", \"c\"])\n >>> df.style.hide_index([\"a\", \"b\"])\n 0 1\n c 5 6\n\n Hide the index and retain the data values:\n\n >>> midx = pd.MultiIndex.from_product([[\"x\", \"y\"], [\"a\", \"b\", \"c\"]])\n >>> df = pd.DataFrame(np.random.randn(6,6), index=midx, columns=midx)\n >>> df.style.format(\"{:.1f}\").hide_index()\n x y\n a b c a b c\n 0.1 0.0 0.4 1.3 0.6 -1.4\n 0.7 1.0 1.3 1.5 -0.0 -0.2\n 1.4 -0.8 1.6 -0.2 -0.4 -0.3\n 0.4 1.0 -0.2 -0.8 -1.2 1.1\n -0.6 1.2 1.8 1.9 0.3 0.3\n 0.8 0.5 -0.3 1.2 2.2 -0.8\n\n Hide specific rows but retain the index:\n\n >>> df.style.format(\"{:.1f}\").hide_index(subset=(slice(None), [\"a\", \"c\"]))\n x y\n a b c a b c\n x b 0.7 1.0 1.3 1.5 -0.0 -0.2\n y b -0.6 1.2 1.8 1.9 0.3 0.3\n\n Hide specific rows and the index:\n\n >>> df.style.format(\"{:.1f}\").hide_index(subset=(slice(None), [\"a\", \"c\"]))\n ... .hide_index()\n x y\n a b c a b c\n 0.7 1.0 1.3 1.5 -0.0 -0.2\n -0.6 1.2 1.8 1.9 0.3 0.3\n \"\"\"\n if subset is None:\n self.hide_index_ = True\n else:\n subset_ = IndexSlice[subset, :] # new var so mypy reads not Optional\n subset = non_reducing_slice(subset_)\n hide = self.data.loc[subset]\n hrows = self.index.get_indexer_for(hide.index)\n # error: Incompatible types in assignment (expression has type\n # \"ndarray\", variable has type \"Sequence[int]\")\n self.hidden_rows = hrows # type: ignore[assignment]\n return self\n\n def hide_columns(self, subset: Subset | None = None) -> Styler:\n \"\"\"\n Hide the column headers or specific keys in the columns from rendering.\n\n This method has dual functionality:\n\n - if ``subset`` is ``None`` then the entire column headers row will be hidden\n whilst the data-values remain visible.\n - if a ``subset`` is given then those specific columns, including the\n data-values will be hidden, whilst the column headers row remains visible.\n\n .. versionchanged:: 1.3.0\n\n Parameters\n ----------\n subset : label, array-like, IndexSlice, optional\n A valid 1d input or single key along the columns axis within\n `DataFrame.loc[:, <subset>]`, to limit ``data`` to *before* applying\n the function.\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.hide_index: Hide the entire index, or specific keys in the index.\n\n Examples\n --------\n Simple application hiding specific columns:\n\n >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=[\"a\", \"b\", \"c\"])\n >>> df.style.hide_columns([\"a\", \"b\"])\n c\n 0 3\n 1 6\n\n Hide column headers and retain the data values:\n\n >>> midx = pd.MultiIndex.from_product([[\"x\", \"y\"], [\"a\", \"b\", \"c\"]])\n >>> df = pd.DataFrame(np.random.randn(6,6), index=midx, columns=midx)\n >>> df.style.format(\"{:.1f}\").hide_columns()\n x d 0.1 0.0 0.4 1.3 0.6 -1.4\n e 0.7 1.0 1.3 1.5 -0.0 -0.2\n f 1.4 -0.8 1.6 -0.2 -0.4 -0.3\n y d 0.4 1.0 -0.2 -0.8 -1.2 1.1\n e -0.6 1.2 1.8 1.9 0.3 0.3\n f 0.8 0.5 -0.3 1.2 2.2 -0.8\n\n Hide specific columns but retain the column headers:\n\n >>> df.style.format(\"{:.1f}\").hide_columns(subset=(slice(None), [\"a\", \"c\"]))\n x y\n b b\n x a 0.0 0.6\n b 1.0 -0.0\n c -0.8 -0.4\n y a 1.0 -1.2\n b 1.2 0.3\n c 0.5 2.2\n\n Hide specific columns and the column headers:\n\n >>> df.style.format(\"{:.1f}\").hide_columns(subset=(slice(None), [\"a\", \"c\"]))\n ... .hide_columns()\n x a 0.0 0.6\n b 1.0 -0.0\n c -0.8 -0.4\n y a 1.0 -1.2\n b 1.2 0.3\n c 0.5 2.2\n \"\"\"\n if subset is None:\n self.hide_columns_ = True\n else:\n subset_ = IndexSlice[:, subset] # new var so mypy reads not Optional\n subset = non_reducing_slice(subset_)\n hide = self.data.loc[subset]\n hcols = self.columns.get_indexer_for(hide.columns)\n # error: Incompatible types in assignment (expression has type\n # \"ndarray\", variable has type \"Sequence[int]\")\n self.hidden_columns = hcols # type: ignore[assignment]\n return self\n\n # -----------------------------------------------------------------------\n # A collection of \"builtin\" styles\n # -----------------------------------------------------------------------\n\n @doc(\n name=\"background\",\n alt=\"text\",\n image_prefix=\"bg\",\n axis=\"{0 or 'index', 1 or 'columns', None}\",\n text_threshold=\"\",\n )\n def background_gradient(\n self,\n cmap=\"PuBu\",\n low: float = 0,\n high: float = 0,\n axis: Axis | None = 0,\n subset: Subset | None = None,\n text_color_threshold: float = 0.408,\n vmin: float | None = None,\n vmax: float | None = None,\n gmap: Sequence | None = None,\n ) -> Styler:\n \"\"\"\n Color the {name} in a gradient style.\n\n The {name} color is determined according\n to the data in each column, row or frame, or by a given\n gradient map. Requires matplotlib.\n\n Parameters\n ----------\n cmap : str or colormap\n Matplotlib colormap.\n low : float\n Compress the color range at the low end. This is a multiple of the data\n range to extend below the minimum; good values usually in [0, 1],\n defaults to 0.\n high : float\n Compress the color range at the high end. This is a multiple of the data\n range to extend above the maximum; good values usually in [0, 1],\n defaults to 0.\n axis : {axis}, default 0\n Apply to each column (``axis=0`` or ``'index'``), to each row\n (``axis=1`` or ``'columns'``), or to the entire DataFrame at once\n with ``axis=None``.\n subset : label, array-like, IndexSlice, optional\n A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input\n or single key, to `DataFrame.loc[:, <subset>]` where the columns are\n prioritised, to limit ``data`` to *before* applying the function.\n text_color_threshold : float or int\n {text_threshold}\n Luminance threshold for determining text color in [0, 1]. Facilitates text\n visibility across varying background colors. All text is dark if 0, and\n light if 1, defaults to 0.408.\n vmin : float, optional\n Minimum data value that corresponds to colormap minimum value.\n If not specified the minimum value of the data (or gmap) will be used.\n\n .. versionadded:: 1.0.0\n\n vmax : float, optional\n Maximum data value that corresponds to colormap maximum value.\n If not specified the maximum value of the data (or gmap) will be used.\n\n .. versionadded:: 1.0.0\n\n gmap : array-like, optional\n Gradient map for determining the {name} colors. If not supplied\n will use the underlying data from rows, columns or frame. If given as an\n ndarray or list-like must be an identical shape to the underlying data\n considering ``axis`` and ``subset``. If given as DataFrame or Series must\n have same index and column labels considering ``axis`` and ``subset``.\n If supplied, ``vmin`` and ``vmax`` should be given relative to this\n gradient map.\n\n .. versionadded:: 1.3.0\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.{alt}_gradient: Color the {alt} in a gradient style.\n\n Notes\n -----\n When using ``low`` and ``high`` the range\n of the gradient, given by the data if ``gmap`` is not given or by ``gmap``,\n is extended at the low end effectively by\n `map.min - low * map.range` and at the high end by\n `map.max + high * map.range` before the colors are normalized and determined.\n\n If combining with ``vmin`` and ``vmax`` the `map.min`, `map.max` and\n `map.range` are replaced by values according to the values derived from\n ``vmin`` and ``vmax``.\n\n This method will preselect numeric columns and ignore non-numeric columns\n unless a ``gmap`` is supplied in which case no preselection occurs.\n\n Examples\n --------\n >>> df = pd.DataFrame(columns=[\"City\", \"Temp (c)\", \"Rain (mm)\", \"Wind (m/s)\"],\n ... data=[[\"Stockholm\", 21.6, 5.0, 3.2],\n ... [\"Oslo\", 22.4, 13.3, 3.1],\n ... [\"Copenhagen\", 24.5, 0.0, 6.7]])\n\n Shading the values column-wise, with ``axis=0``, preselecting numeric columns\n\n >>> df.style.{name}_gradient(axis=0)\n\n .. figure:: ../../_static/style/{image_prefix}_ax0.png\n\n Shading all values collectively using ``axis=None``\n\n >>> df.style.{name}_gradient(axis=None)\n\n .. figure:: ../../_static/style/{image_prefix}_axNone.png\n\n Compress the color map from the both ``low`` and ``high`` ends\n\n >>> df.style.{name}_gradient(axis=None, low=0.75, high=1.0)\n\n .. figure:: ../../_static/style/{image_prefix}_axNone_lowhigh.png\n\n Manually setting ``vmin`` and ``vmax`` gradient thresholds\n\n >>> df.style.{name}_gradient(axis=None, vmin=6.7, vmax=21.6)\n\n .. figure:: ../../_static/style/{image_prefix}_axNone_vminvmax.png\n\n Setting a ``gmap`` and applying to all columns with another ``cmap``\n\n >>> df.style.{name}_gradient(axis=0, gmap=df['Temp (c)'], cmap='YlOrRd')\n\n .. figure:: ../../_static/style/{image_prefix}_gmap.png\n\n Setting the gradient map for a dataframe (i.e. ``axis=None``), we need to\n explicitly state ``subset`` to match the ``gmap`` shape\n\n >>> gmap = np.array([[1,2,3], [2,3,4], [3,4,5]])\n >>> df.style.{name}_gradient(axis=None, gmap=gmap,\n ... cmap='YlOrRd', subset=['Temp (c)', 'Rain (mm)', 'Wind (m/s)']\n ... )\n\n .. figure:: ../../_static/style/{image_prefix}_axNone_gmap.png\n \"\"\"\n if subset is None and gmap is None:\n subset = self.data.select_dtypes(include=np.number).columns\n\n self.apply(\n _background_gradient,\n cmap=cmap,\n subset=subset,\n axis=axis,\n low=low,\n high=high,\n text_color_threshold=text_color_threshold,\n vmin=vmin,\n vmax=vmax,\n gmap=gmap,\n )\n return self\n\n @doc(\n background_gradient,\n name=\"text\",\n alt=\"background\",\n image_prefix=\"tg\",\n axis=\"{0 or 'index', 1 or 'columns', None}\",\n text_threshold=\"This argument is ignored (only used in `background_gradient`).\",\n )\n def text_gradient(\n self,\n cmap=\"PuBu\",\n low: float = 0,\n high: float = 0,\n axis: Axis | None = 0,\n subset: Subset | None = None,\n vmin: float | None = None,\n vmax: float | None = None,\n gmap: Sequence | None = None,\n ) -> Styler:\n if subset is None and gmap is None:\n subset = self.data.select_dtypes(include=np.number).columns\n\n return self.apply(\n _background_gradient,\n cmap=cmap,\n subset=subset,\n axis=axis,\n low=low,\n high=high,\n vmin=vmin,\n vmax=vmax,\n gmap=gmap,\n text_only=True,\n )\n\n def set_properties(self, subset: Subset | None = None, **kwargs) -> Styler:\n \"\"\"\n Set defined CSS-properties to each ``<td>`` HTML element within the given\n subset.\n\n Parameters\n ----------\n subset : label, array-like, IndexSlice, optional\n A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input\n or single key, to `DataFrame.loc[:, <subset>]` where the columns are\n prioritised, to limit ``data`` to *before* applying the function.\n **kwargs : dict\n A dictionary of property, value pairs to be set for each cell.\n\n Returns\n -------\n self : Styler\n\n Notes\n -----\n This is a convenience methods which wraps the :meth:`Styler.applymap` calling a\n function returning the CSS-properties independently of the data.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(10, 4))\n >>> df.style.set_properties(color=\"white\", align=\"right\")\n >>> df.style.set_properties(**{'background-color': 'yellow'})\n \"\"\"\n values = \"\".join(f\"{p}: {v};\" for p, v in kwargs.items())\n return self.applymap(lambda x: values, subset=subset)\n\n @staticmethod\n def _bar(\n s,\n align: str,\n colors: list[str],\n width: float = 100,\n vmin: float | None = None,\n vmax: float | None = None,\n ):\n \"\"\"\n Draw bar chart in dataframe cells.\n \"\"\"\n # Get input value range.\n smin = np.nanmin(s.to_numpy()) if vmin is None else vmin\n smax = np.nanmax(s.to_numpy()) if vmax is None else vmax\n if align == \"mid\":\n smin = min(0, smin)\n smax = max(0, smax)\n elif align == \"zero\":\n # For \"zero\" mode, we want the range to be symmetrical around zero.\n smax = max(abs(smin), abs(smax))\n smin = -smax\n # Transform to percent-range of linear-gradient\n normed = width * (s.to_numpy(dtype=float) - smin) / (smax - smin + 1e-12)\n zero = -width * smin / (smax - smin + 1e-12)\n\n def css_bar(start: float, end: float, color: str) -> str:\n \"\"\"\n Generate CSS code to draw a bar from start to end.\n \"\"\"\n css = \"width: 10em; height: 80%;\"\n if end > start:\n css += \"background: linear-gradient(90deg,\"\n if start > 0:\n css += f\" transparent {start:.1f}%, {color} {start:.1f}%, \"\n e = min(end, width)\n css += f\"{color} {e:.1f}%, transparent {e:.1f}%)\"\n return css\n\n def css(x):\n if pd.isna(x):\n return \"\"\n\n # avoid deprecated indexing `colors[x > zero]`\n color = colors[1] if x > zero else colors[0]\n\n if align == \"left\":\n return css_bar(0, x, color)\n else:\n return css_bar(min(x, zero), max(x, zero), color)\n\n if s.ndim == 1:\n return [css(x) for x in normed]\n else:\n return DataFrame(\n [[css(x) for x in row] for row in normed],\n index=s.index,\n columns=s.columns,\n )\n\n def bar(\n self,\n subset: Subset | None = None,\n axis: Axis | None = 0,\n color=\"#d65f5f\",\n width: float = 100,\n align: str = \"left\",\n vmin: float | None = None,\n vmax: float | None = None,\n ) -> Styler:\n \"\"\"\n Draw bar chart in the cell backgrounds.\n\n Parameters\n ----------\n subset : label, array-like, IndexSlice, optional\n A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input\n or single key, to `DataFrame.loc[:, <subset>]` where the columns are\n prioritised, to limit ``data`` to *before* applying the function.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Apply to each column (``axis=0`` or ``'index'``), to each row\n (``axis=1`` or ``'columns'``), or to the entire DataFrame at once\n with ``axis=None``.\n color : str or 2-tuple/list\n If a str is passed, the color is the same for both\n negative and positive numbers. If 2-tuple/list is used, the\n first element is the color_negative and the second is the\n color_positive (eg: ['#d65f5f', '#5fba7d']).\n width : float, default 100\n A number between 0 or 100. The largest value will cover `width`\n percent of the cell's width.\n align : {'left', 'zero',' mid'}, default 'left'\n How to align the bars with the cells.\n\n - 'left' : the min value starts at the left of the cell.\n - 'zero' : a value of zero is located at the center of the cell.\n - 'mid' : the center of the cell is at (max-min)/2, or\n if values are all negative (positive) the zero is aligned\n at the right (left) of the cell.\n vmin : float, optional\n Minimum bar value, defining the left hand limit\n of the bar drawing range, lower values are clipped to `vmin`.\n When None (default): the minimum value of the data will be used.\n vmax : float, optional\n Maximum bar value, defining the right hand limit\n of the bar drawing range, higher values are clipped to `vmax`.\n When None (default): the maximum value of the data will be used.\n\n Returns\n -------\n self : Styler\n \"\"\"\n if align not in (\"left\", \"zero\", \"mid\"):\n raise ValueError(\"`align` must be one of {'left', 'zero',' mid'}\")\n\n if not (is_list_like(color)):\n color = [color, color]\n elif len(color) == 1:\n color = [color[0], color[0]]\n elif len(color) > 2:\n raise ValueError(\n \"`color` must be string or a list-like \"\n \"of length 2: [`color_neg`, `color_pos`] \"\n \"(eg: color=['#d65f5f', '#5fba7d'])\"\n )\n\n if subset is None:\n subset = self.data.select_dtypes(include=np.number).columns\n\n self.apply(\n self._bar,\n subset=subset,\n axis=axis,\n align=align,\n colors=color,\n width=width,\n vmin=vmin,\n vmax=vmax,\n )\n\n return self\n\n def highlight_null(\n self,\n null_color: str = \"red\",\n subset: Subset | None = None,\n props: str | None = None,\n ) -> Styler:\n \"\"\"\n Highlight missing values with a style.\n\n Parameters\n ----------\n null_color : str, default 'red'\n subset : label, array-like, IndexSlice, optional\n A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input\n or single key, to `DataFrame.loc[:, <subset>]` where the columns are\n prioritised, to limit ``data`` to *before* applying the function.\n\n .. versionadded:: 1.1.0\n\n props : str, default None\n CSS properties to use for highlighting. If ``props`` is given, ``color``\n is not used.\n\n .. versionadded:: 1.3.0\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.highlight_max: Highlight the maximum with a style.\n Styler.highlight_min: Highlight the minimum with a style.\n Styler.highlight_between: Highlight a defined range with a style.\n Styler.highlight_quantile: Highlight values defined by a quantile with a style.\n \"\"\"\n\n def f(data: DataFrame, props: str) -> np.ndarray:\n return np.where(pd.isna(data).to_numpy(), props, \"\")\n\n if props is None:\n props = f\"background-color: {null_color};\"\n # error: Argument 1 to \"apply\" of \"Styler\" has incompatible type\n # \"Callable[[DataFrame, str], ndarray]\"; expected \"Callable[..., Styler]\"\n return self.apply(\n f, axis=None, subset=subset, props=props # type: ignore[arg-type]\n )\n\n def highlight_max(\n self,\n subset: Subset | None = None,\n color: str = \"yellow\",\n axis: Axis | None = 0,\n props: str | None = None,\n ) -> Styler:\n \"\"\"\n Highlight the maximum with a style.\n\n Parameters\n ----------\n subset : label, array-like, IndexSlice, optional\n A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input\n or single key, to `DataFrame.loc[:, <subset>]` where the columns are\n prioritised, to limit ``data`` to *before* applying the function.\n color : str, default 'yellow'\n Background color to use for highlighting.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Apply to each column (``axis=0`` or ``'index'``), to each row\n (``axis=1`` or ``'columns'``), or to the entire DataFrame at once\n with ``axis=None``.\n props : str, default None\n CSS properties to use for highlighting. If ``props`` is given, ``color``\n is not used.\n\n .. versionadded:: 1.3.0\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.highlight_null: Highlight missing values with a style.\n Styler.highlight_min: Highlight the minimum with a style.\n Styler.highlight_between: Highlight a defined range with a style.\n Styler.highlight_quantile: Highlight values defined by a quantile with a style.\n \"\"\"\n\n def f(data: FrameOrSeries, props: str) -> np.ndarray:\n return np.where(data == np.nanmax(data.to_numpy()), props, \"\")\n\n if props is None:\n props = f\"background-color: {color};\"\n # error: Argument 1 to \"apply\" of \"Styler\" has incompatible type\n # \"Callable[[FrameOrSeries, str], ndarray]\"; expected \"Callable[..., Styler]\"\n return self.apply(\n f, axis=axis, subset=subset, props=props # type: ignore[arg-type]\n )\n\n def highlight_min(\n self,\n subset: Subset | None = None,\n color: str = \"yellow\",\n axis: Axis | None = 0,\n props: str | None = None,\n ) -> Styler:\n \"\"\"\n Highlight the minimum with a style.\n\n Parameters\n ----------\n subset : label, array-like, IndexSlice, optional\n A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input\n or single key, to `DataFrame.loc[:, <subset>]` where the columns are\n prioritised, to limit ``data`` to *before* applying the function.\n color : str, default 'yellow'\n Background color to use for highlighting.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Apply to each column (``axis=0`` or ``'index'``), to each row\n (``axis=1`` or ``'columns'``), or to the entire DataFrame at once\n with ``axis=None``.\n props : str, default None\n CSS properties to use for highlighting. If ``props`` is given, ``color``\n is not used.\n\n .. versionadded:: 1.3.0\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.highlight_null: Highlight missing values with a style.\n Styler.highlight_max: Highlight the maximum with a style.\n Styler.highlight_between: Highlight a defined range with a style.\n Styler.highlight_quantile: Highlight values defined by a quantile with a style.\n \"\"\"\n\n def f(data: FrameOrSeries, props: str) -> np.ndarray:\n return np.where(data == np.nanmin(data.to_numpy()), props, \"\")\n\n if props is None:\n props = f\"background-color: {color};\"\n # error: Argument 1 to \"apply\" of \"Styler\" has incompatible type\n # \"Callable[[FrameOrSeries, str], ndarray]\"; expected \"Callable[..., Styler]\"\n return self.apply(\n f, axis=axis, subset=subset, props=props # type: ignore[arg-type]\n )\n\n def highlight_between(\n self,\n subset: Subset | None = None,\n color: str = \"yellow\",\n axis: Axis | None = 0,\n left: Scalar | Sequence | None = None,\n right: Scalar | Sequence | None = None,\n inclusive: str = \"both\",\n props: str | None = None,\n ) -> Styler:\n \"\"\"\n Highlight a defined range with a style.\n\n .. versionadded:: 1.3.0\n\n Parameters\n ----------\n subset : label, array-like, IndexSlice, optional\n A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input\n or single key, to `DataFrame.loc[:, <subset>]` where the columns are\n prioritised, to limit ``data`` to *before* applying the function.\n color : str, default 'yellow'\n Background color to use for highlighting.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n If ``left`` or ``right`` given as sequence, axis along which to apply those\n boundaries. See examples.\n left : scalar or datetime-like, or sequence or array-like, default None\n Left bound for defining the range.\n right : scalar or datetime-like, or sequence or array-like, default None\n Right bound for defining the range.\n inclusive : {'both', 'neither', 'left', 'right'}\n Identify whether bounds are closed or open.\n props : str, default None\n CSS properties to use for highlighting. If ``props`` is given, ``color``\n is not used.\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.highlight_null: Highlight missing values with a style.\n Styler.highlight_max: Highlight the maximum with a style.\n Styler.highlight_min: Highlight the minimum with a style.\n Styler.highlight_quantile: Highlight values defined by a quantile with a style.\n\n Notes\n -----\n If ``left`` is ``None`` only the right bound is applied.\n If ``right`` is ``None`` only the left bound is applied. If both are ``None``\n all values are highlighted.\n\n ``axis`` is only needed if ``left`` or ``right`` are provided as a sequence or\n an array-like object for aligning the shapes. If ``left`` and ``right`` are\n both scalars then all ``axis`` inputs will give the same result.\n\n This function only works with compatible ``dtypes``. For example a datetime-like\n region can only use equivalent datetime-like ``left`` and ``right`` arguments.\n Use ``subset`` to control regions which have multiple ``dtypes``.\n\n Examples\n --------\n Basic usage\n\n >>> df = pd.DataFrame({\n ... 'One': [1.2, 1.6, 1.5],\n ... 'Two': [2.9, 2.1, 2.5],\n ... 'Three': [3.1, 3.2, 3.8],\n ... })\n >>> df.style.highlight_between(left=2.1, right=2.9)\n\n .. figure:: ../../_static/style/hbetw_basic.png\n\n Using a range input sequnce along an ``axis``, in this case setting a ``left``\n and ``right`` for each column individually\n\n >>> df.style.highlight_between(left=[1.4, 2.4, 3.4], right=[1.6, 2.6, 3.6],\n ... axis=1, color=\"#fffd75\")\n\n .. figure:: ../../_static/style/hbetw_seq.png\n\n Using ``axis=None`` and providing the ``left`` argument as an array that\n matches the input DataFrame, with a constant ``right``\n\n >>> df.style.highlight_between(left=[[2,2,3],[2,2,3],[3,3,3]], right=3.5,\n ... axis=None, color=\"#fffd75\")\n\n .. figure:: ../../_static/style/hbetw_axNone.png\n\n Using ``props`` instead of default background coloring\n\n >>> df.style.highlight_between(left=1.5, right=3.5,\n ... props='font-weight:bold;color:#e83e8c')\n\n .. figure:: ../../_static/style/hbetw_props.png\n \"\"\"\n if props is None:\n props = f\"background-color: {color};\"\n return self.apply(\n _highlight_between, # type: ignore[arg-type]\n axis=axis,\n subset=subset,\n props=props,\n left=left,\n right=right,\n inclusive=inclusive,\n )\n\n def highlight_quantile(\n self,\n subset: Subset | None = None,\n color: str = \"yellow\",\n axis: Axis | None = 0,\n q_left: float = 0.0,\n q_right: float = 1.0,\n interpolation: str = \"linear\",\n inclusive: str = \"both\",\n props: str | None = None,\n ) -> Styler:\n \"\"\"\n Highlight values defined by a quantile with a style.\n\n .. versionadded:: 1.3.0\n\n Parameters\n ----------\n subset : label, array-like, IndexSlice, optional\n A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input\n or single key, to `DataFrame.loc[:, <subset>]` where the columns are\n prioritised, to limit ``data`` to *before* applying the function.\n color : str, default 'yellow'\n Background color to use for highlighting\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Axis along which to determine and highlight quantiles. If ``None`` quantiles\n are measured over the entire DataFrame. See examples.\n q_left : float, default 0\n Left bound, in [0, q_right), for the target quantile range.\n q_right : float, default 1\n Right bound, in (q_left, 1], for the target quantile range.\n interpolation : {‘linear’, ‘lower’, ‘higher’, ‘midpoint’, ‘nearest’}\n Argument passed to ``Series.quantile`` or ``DataFrame.quantile`` for\n quantile estimation.\n inclusive : {'both', 'neither', 'left', 'right'}\n Identify whether quantile bounds are closed or open.\n props : str, default None\n CSS properties to use for highlighting. If ``props`` is given, ``color``\n is not used.\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.highlight_null: Highlight missing values with a style.\n Styler.highlight_max: Highlight the maximum with a style.\n Styler.highlight_min: Highlight the minimum with a style.\n Styler.highlight_between: Highlight a defined range with a style.\n\n Notes\n -----\n This function does not work with ``str`` dtypes.\n\n Examples\n --------\n Using ``axis=None`` and apply a quantile to all collective data\n\n >>> df = pd.DataFrame(np.arange(10).reshape(2,5) + 1)\n >>> df.style.highlight_quantile(axis=None, q_left=0.8, color=\"#fffd75\")\n\n .. figure:: ../../_static/style/hq_axNone.png\n\n Or highlight quantiles row-wise or column-wise, in this case by row-wise\n\n >>> df.style.highlight_quantile(axis=1, q_left=0.8, color=\"#fffd75\")\n\n .. figure:: ../../_static/style/hq_ax1.png\n\n Use ``props`` instead of default background coloring\n\n >>> df.style.highlight_quantile(axis=None, q_left=0.2, q_right=0.8,\n ... props='font-weight:bold;color:#e83e8c')\n\n .. figure:: ../../_static/style/hq_props.png\n \"\"\"\n subset_ = slice(None) if subset is None else subset\n subset_ = non_reducing_slice(subset_)\n data = self.data.loc[subset_]\n\n # after quantile is found along axis, e.g. along rows,\n # applying the calculated quantile to alternate axis, e.g. to each column\n kwargs = {\"q\": [q_left, q_right], \"interpolation\": interpolation}\n if axis in [0, \"index\"]:\n q = data.quantile(axis=axis, numeric_only=False, **kwargs)\n axis_apply: int | None = 1\n elif axis in [1, \"columns\"]:\n q = data.quantile(axis=axis, numeric_only=False, **kwargs)\n axis_apply = 0\n else: # axis is None\n q = Series(data.to_numpy().ravel()).quantile(**kwargs)\n axis_apply = None\n\n if props is None:\n props = f\"background-color: {color};\"\n return self.apply(\n _highlight_between, # type: ignore[arg-type]\n axis=axis_apply,\n subset=subset,\n props=props,\n left=q.iloc[0],\n right=q.iloc[1],\n inclusive=inclusive,\n )\n\n @classmethod\n def from_custom_template(\n cls, searchpath, html_table: str | None = None, html_style: str | None = None\n ):\n \"\"\"\n Factory function for creating a subclass of ``Styler``.\n\n Uses custom templates and Jinja environment.\n\n .. versionchanged:: 1.3.0\n\n Parameters\n ----------\n searchpath : str or list\n Path or paths of directories containing the templates.\n html_table : str\n Name of your custom template to replace the html_table template.\n\n .. versionadded:: 1.3.0\n\n html_style : str\n Name of your custom template to replace the html_style template.\n\n .. versionadded:: 1.3.0\n\n Returns\n -------\n MyStyler : subclass of Styler\n Has the correct ``env``,``template_html``, ``template_html_table`` and\n ``template_html_style`` class attributes set.\n \"\"\"\n loader = jinja2.ChoiceLoader([jinja2.FileSystemLoader(searchpath), cls.loader])\n\n # mypy doesn't like dynamically-defined classes\n # error: Variable \"cls\" is not valid as a type\n # error: Invalid base class \"cls\"\n class MyStyler(cls): # type:ignore[valid-type,misc]\n env = jinja2.Environment(loader=loader)\n if html_table:\n template_html_table = env.get_template(html_table)\n if html_style:\n template_html_style = env.get_template(html_style)\n\n return MyStyler\n\n def pipe(self, func: Callable, *args, **kwargs):\n \"\"\"\n Apply ``func(self, *args, **kwargs)``, and return the result.\n\n Parameters\n ----------\n func : function\n Function to apply to the Styler. Alternatively, a\n ``(callable, keyword)`` tuple where ``keyword`` is a string\n indicating the keyword of ``callable`` that expects the Styler.\n *args : optional\n Arguments passed to `func`.\n **kwargs : optional\n A dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object :\n The value returned by ``func``.\n\n See Also\n --------\n DataFrame.pipe : Analogous method for DataFrame.\n Styler.apply : Apply a CSS-styling function column-wise, row-wise, or\n table-wise.\n\n Notes\n -----\n Like :meth:`DataFrame.pipe`, this method can simplify the\n application of several user-defined functions to a styler. Instead\n of writing:\n\n .. code-block:: python\n\n f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)\n\n users can write:\n\n .. code-block:: python\n\n (df.style.set_precision(3)\n .pipe(g, arg1=a)\n .pipe(f, arg2=b, arg3=c))\n\n In particular, this allows users to define functions that take a\n styler object, along with other parameters, and return the styler after\n making styling changes (such as calling :meth:`Styler.apply` or\n :meth:`Styler.set_properties`). Using ``.pipe``, these user-defined\n style \"transformations\" can be interleaved with calls to the built-in\n Styler interface.\n\n Examples\n --------\n >>> def format_conversion(styler):\n ... return (styler.set_properties(**{'text-align': 'right'})\n ... .format({'conversion': '{:.1%}'}))\n\n The user-defined ``format_conversion`` function above can be called\n within a sequence of other style modifications:\n\n >>> df = pd.DataFrame({'trial': list(range(5)),\n ... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]})\n >>> (df.style\n ... .highlight_min(subset=['conversion'], color='yellow')\n ... .pipe(format_conversion)\n ... .set_caption(\"Results with minimum conversion highlighted.\"))\n \"\"\"\n return com.pipe(self, func, *args, **kwargs)\n\n\ndef _validate_apply_axis_arg(\n arg: FrameOrSeries | Sequence | np.ndarray,\n arg_name: str,\n dtype: Any | None,\n data: FrameOrSeries,\n) -> np.ndarray:\n \"\"\"\n For the apply-type methods, ``axis=None`` creates ``data`` as DataFrame, and for\n ``axis=[1,0]`` it creates a Series. Where ``arg`` is expected as an element\n of some operator with ``data`` we must make sure that the two are compatible shapes,\n or raise.\n\n Parameters\n ----------\n arg : sequence, Series or DataFrame\n the user input arg\n arg_name : string\n name of the arg for use in error messages\n dtype : numpy dtype, optional\n forced numpy dtype if given\n data : Series or DataFrame\n underling subset of Styler data on which operations are performed\n\n Returns\n -------\n ndarray\n \"\"\"\n dtype = {\"dtype\": dtype} if dtype else {}\n # raise if input is wrong for axis:\n if isinstance(arg, Series) and isinstance(data, DataFrame):\n raise ValueError(\n f\"'{arg_name}' is a Series but underlying data for operations \"\n f\"is a DataFrame since 'axis=None'\"\n )\n elif isinstance(arg, DataFrame) and isinstance(data, Series):\n raise ValueError(\n f\"'{arg_name}' is a DataFrame but underlying data for \"\n f\"operations is a Series with 'axis in [0,1]'\"\n )\n elif isinstance(arg, (Series, DataFrame)): # align indx / cols to data\n arg = arg.reindex_like(data, method=None).to_numpy(**dtype)\n else:\n arg = np.asarray(arg, **dtype)\n assert isinstance(arg, np.ndarray) # mypy requirement\n if arg.shape != data.shape: # check valid input\n raise ValueError(\n f\"supplied '{arg_name}' is not correct shape for data over \"\n f\"selected 'axis': got {arg.shape}, \"\n f\"expected {data.shape}\"\n )\n return arg\n\n\ndef _background_gradient(\n data,\n cmap=\"PuBu\",\n low: float = 0,\n high: float = 0,\n text_color_threshold: float = 0.408,\n vmin: float | None = None,\n vmax: float | None = None,\n gmap: Sequence | np.ndarray | FrameOrSeries | None = None,\n text_only: bool = False,\n):\n \"\"\"\n Color background in a range according to the data or a gradient map\n \"\"\"\n if gmap is None: # the data is used the gmap\n gmap = data.to_numpy(dtype=float)\n else: # else validate gmap against the underlying data\n gmap = _validate_apply_axis_arg(gmap, \"gmap\", float, data)\n\n with _mpl(Styler.background_gradient) as (plt, colors):\n smin = np.nanmin(gmap) if vmin is None else vmin\n smax = np.nanmax(gmap) if vmax is None else vmax\n rng = smax - smin\n # extend lower / upper bounds, compresses color range\n norm = colors.Normalize(smin - (rng * low), smax + (rng * high))\n rgbas = plt.cm.get_cmap(cmap)(norm(gmap))\n\n def relative_luminance(rgba) -> float:\n \"\"\"\n Calculate relative luminance of a color.\n\n The calculation adheres to the W3C standards\n (https://www.w3.org/WAI/GL/wiki/Relative_luminance)\n\n Parameters\n ----------\n color : rgb or rgba tuple\n\n Returns\n -------\n float\n The relative luminance as a value from 0 to 1\n \"\"\"\n r, g, b = (\n x / 12.92 if x <= 0.04045 else ((x + 0.055) / 1.055) ** 2.4\n for x in rgba[:3]\n )\n return 0.2126 * r + 0.7152 * g + 0.0722 * b\n\n def css(rgba, text_only) -> str:\n if not text_only:\n dark = relative_luminance(rgba) < text_color_threshold\n text_color = \"#f1f1f1\" if dark else \"#000000\"\n return f\"background-color: {colors.rgb2hex(rgba)};color: {text_color};\"\n else:\n return f\"color: {colors.rgb2hex(rgba)};\"\n\n if data.ndim == 1:\n return [css(rgba, text_only) for rgba in rgbas]\n else:\n return DataFrame(\n [[css(rgba, text_only) for rgba in row] for row in rgbas],\n index=data.index,\n columns=data.columns,\n )\n\n\ndef _highlight_between(\n data: FrameOrSeries,\n props: str,\n left: Scalar | Sequence | np.ndarray | FrameOrSeries | None = None,\n right: Scalar | Sequence | np.ndarray | FrameOrSeries | None = None,\n inclusive: bool | str = True,\n) -> np.ndarray:\n \"\"\"\n Return an array of css props based on condition of data values within given range.\n \"\"\"\n if np.iterable(left) and not isinstance(left, str):\n left = _validate_apply_axis_arg(\n left, \"left\", None, data # type: ignore[arg-type]\n )\n\n if np.iterable(right) and not isinstance(right, str):\n right = _validate_apply_axis_arg(\n right, \"right\", None, data # type: ignore[arg-type]\n )\n\n # get ops with correct boundary attribution\n if inclusive == \"both\":\n ops = (operator.ge, operator.le)\n elif inclusive == \"neither\":\n ops = (operator.gt, operator.lt)\n elif inclusive == \"left\":\n ops = (operator.ge, operator.lt)\n elif inclusive == \"right\":\n ops = (operator.gt, operator.le)\n else:\n raise ValueError(\n f\"'inclusive' values can be 'both', 'left', 'right', or 'neither' \"\n f\"got {inclusive}\"\n )\n\n g_left = (\n ops[0](data, left)\n if left is not None\n else np.full(data.shape, True, dtype=bool)\n )\n l_right = (\n ops[1](data, right)\n if right is not None\n else np.full(data.shape, True, dtype=bool)\n )\n return np.where(g_left & l_right, props, \"\")\n"
] | [
[
"numpy.array"
],
[
"numpy.nanmax",
"numpy.asarray",
"numpy.nanmin",
"pandas._config.get_option",
"pandas.core.common.pipe",
"pandas.io.formats.style_render.non_reducing_slice",
"pandas.isna",
"pandas.core.frame.DataFrame",
"numpy.where",
"pandas.io.formats.style_render.maybe_convert_css_to_tuples",
"pandas.compat._optional.import_optional_dependency",
"numpy.full",
"pandas.io.formats.style_render.Tooltips",
"matplotlib.pyplot.cm.get_cmap",
"pandas.api.types.is_list_like",
"numpy.iterable",
"pandas.io.formats.format.save_to_buffer",
"matplotlib.colors.Normalize",
"pandas.io.formats.excel.ExcelFormatter",
"pandas.util._decorators.doc",
"matplotlib.colors.rgb2hex"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fclubb/fault-swath | [
"687b1af26a969b1ed4e94cc597b23762fd1bb3da",
"687b1af26a969b1ed4e94cc597b23762fd1bb3da"
] | [
"data_processing/get_basin_medians.py",
"plotting/LSDPlottingTools/inundation.py"
] | [
"# get_basin_medians.py\n# This script creates a shapefile of the basins along the SAF and gets the median channel gradient,\n# hillslope gradient and hilltop curvature in each basin\n# FJC 14/06/21\n\n# import modules\nimport pandas as pd\nimport geopandas as gpd\nimport numpy as np\nfrom shapely.geometry import Point\n\ndef percentile(n):\n def percentile_(x):\n return np.percentile(x, n)\n percentile_.__name__ = 'percentile_%s' % n\n return percentile_\n\n\n# read in the river profile CSV\ndata_dir = '/raid/fclubb/san_andreas/USGS_NED_10m/SAF_combined/'\nfname = 'SAF_combined_10m'\nso = 5\ndf = pd.read_csv(data_dir+fname+'_profiles_SO{}.csv'.format(so))\ndf = df[df['slope'] > 0]\ndf.columns\n\n# read in the hillslope metrics CSV\nhs_df = pd.read_csv(data_dir+fname+'_hillslopes_SO{}.csv'.format(so))\n\n# read in the hilltop metrics CSV\nht_df = pd.read_csv(data_dir+fname+'_RidgeData_SO{}.csv'.format(so))\n\n\n# convert the river csv to a geodataframe. Remove the non-unique ID labels - these will be replaced by unique basin IDs\ngeometry = [Point(xy) for xy in zip(df.longitude, df.latitude)]\ncrs = 'epsg:4326' #http://www.spatialreference.org/ref/epsg/2263/\nriver_gdf = gpd.GeoDataFrame(df.drop(['latitude','longitude','basin_id','id','new_id','node'], axis=1), crs=crs, geometry=geometry)\nriver_gdf_clean = river_gdf[river_gdf.geometry.type == 'Point']\n\n# convert the hillslope csv to a geodataframe. Remove the non-unique ID labels\ngeometry = [Point(xy) for xy in zip(hs_df.longitude_outlet, hs_df.latitude_outlet)]\nhs_gdf = gpd.GeoDataFrame(hs_df, crs=crs, geometry=geometry)\n\n# convert the hilltop csv to a geodataframe. Remove the non-unique ID labels\ngeometry = [Point(xy) for xy in zip(ht_df.longitude, ht_df.latitude)]\nht_gdf = gpd.GeoDataFrame(ht_df.drop(['latitude','longitude','basin_id','new_id'], axis=1), crs=crs, geometry=geometry)\n\n# add a unique id to the basin\nbasin_gdf = gpd.read_file(data_dir+fname+'_basins_SO{}.shp'.format(so))\n# convert the basin GDF to WGS84\nbasin_gdf = basin_gdf.to_crs('epsg:4326')\n#basin_gdf = basin_gdf.drop(['basin_id'], axis=1)\nbasin_gdf['unique_id'] = basin_gdf.index\nbasin_gdf = basin_gdf[basin_gdf.geometry.type == 'Polygon']\n\n# merge the river and basins gdf and calculate the median channel slope in each basin\njoin = gpd.sjoin(river_gdf, basin_gdf, how='left', op='intersects')\ngr = join.groupby(['unique_id'])['slope'].agg(['median', 'std', percentile(16), percentile(84)]).rename(columns={'median': 'channel_slope_median', 'std': 'channel_slope_std', 'percentile_16': 'channel_slope_16th', 'percentile_84': 'channel_slope_84th'}).reset_index()\nbasin_gdf = basin_gdf.merge(gr, on='unique_id')\n\n# now join the hillslope data\njoin = gpd.sjoin(basin_gdf, hs_gdf, how='left', op='contains')\n\n# now join the hilltop data - find points within the basin and get the median curvature in each basin\njoin = join.drop(['index_right'], axis=1)\nht_join = gpd.sjoin(ht_gdf, join, how='left', op='within')\ngr = ht_join.groupby(['unique_id'])['curvature'].agg(['median', 'std', percentile(16), percentile(84)]).rename(columns={'median': 'ht_curv_median', 'std': 'ht_curv_std', 'percentile_16': 'ht_curv_16th', 'percentile_84': 'ht_curv_84th'}).reset_index()\njoin = join.merge(gr, on='unique_id')\nprint(len(join.unique_id.unique()))\n\n# write to shapefile\njoin.to_file(data_dir+fname+'_channels_plus_hilltops_by_basin_SO{}.shp'.format(so))\n\n\n\n\n",
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\ninundation.py\r\n\r\nA script that calculates the area of the model/DEM domain that is inundated by\r\nwater; either across the entire domain, the floodplain (using Fiona's floodplain\r\nID algorithm, or in the Channel, using the channel network and measuring along \r\nthis line. The mean, max, and total area wil be able to be calculated.)\r\n\"\"\"\r\n\r\nimport LSDMap_GDALIO as lsdgdal\r\nimport numpy as _np\r\nimport matplotlib.pyplot as plt\r\nimport glob\r\nimport os\r\nimport re\r\n\r\n\r\ndef calculate_mean_waterdepth(raster):\r\n print(\"Mean water depth (whole raster): \", _np.mean(raster))\r\n return _np.mean(raster)\r\n \r\ndef calcualte_max_waterdepth(raster):\r\n #print(_np.max(raster))\r\n return _np.max(raster)\r\n \r\ndef calculate_waterinundation_area(raster, cellsize, threshold):\r\n \"\"\"Note: this will probably need some sort of threshold as Caesar maps \r\n out very small water depths and so could give huge 'inundation' areas.\"\"\"\r\n total_cells = _np.count_nonzero(raster > threshold)\r\n area = DX * DX * total_cells # metres\r\n \r\n print(\"Inundation area is: \", area, \" metres square\")\r\n return area\r\n \r\ndef floodplain_mean_depth(water_raster, floodplain_mask, threshold=0.0):\r\n \"\"\"Calculates the mean waterdepth on the floodplain\"\"\"\r\n \r\n # I.e. mask where we are NOT in a floodplain (flagged==1)\r\n floodplain_waters = _np.ma.masked_where(floodplain_mask != 1, water_raster)\r\n #plt.imshow(floodplain_waters)\r\n \r\n mean_water_depth_on_floodplain = _np.mean(floodplain_waters)\r\n print(\"Mean water depth in floodplain: \", mean_water_depth_on_floodplain)\r\n #print(floodplain_waters)\r\n return mean_water_depth_on_floodplain\r\n \r\ndef main_channel_mean_depth(water_raster, floodplain_mask, stream_mask, threshold=0.0):\r\n \"\"\"Calculates the mean water depth in the floodplain channel.\r\n I.e. does not include channel headwaters outside the floodplain.\"\"\"\r\n #floodplain_channel_waters = _np.ma.masked_where(_np.logical_and(floodplain_mask !=1, stream_mask >= 0), water_raster) \r\n #floodplain_channel_waters = water_raster[_np.logical_and(floodplain_mask==1 , (~_np.isnan(stream_mask)))]\r\n #floodplain_channel_waters = water_raster[(floodplain_mask==1) & (~_np.isnan(stream_mask))]\r\n \r\n # Floodplain minus channel...\r\n #floodplain_channel_waters = _np.ma.masked_where( (floodplain_mask !=1 & _np.isnan(stream_mask) ), water_raster)\r\n \r\n # Channels within the flood plain\r\n #floodplain_channel_waters = _np.ma.masked_where( (floodplain_mask != 1 & ~_np.isnan(stream_mask) ), water_raster)\r\n \r\n # Get fourth order streams\r\n floodplain_channel_waters = _np.ma.masked_where( stream_mask!=5 , water_raster)\r\n \r\n mean_channel_depth = _np.mean(floodplain_channel_waters)\r\n print(\"Mean main channel depth: \", mean_channel_depth)\r\n\r\n #plt.imshow(floodplain_mask)\r\n #plt.imshow(stream_mask)\r\n #plt.imshow(floodplain_channel_waters)\r\n \r\n return mean_channel_depth\r\n\r\ndef split_letternumber_string(string):\r\n \"\"\"\r\n Splits strings of the form \"Alphabet123\" into a tuple of (\"Alpha\", \"123\")\r\n \"\"\"\r\n match = re.match(r\"([a-z]+)([0-9]+)\", string, re.I)\r\n if match:\r\n items = match.groups()\r\n return items #tuple\r\n \r\ndef timestep_string_from_filename(filename):\r\n \"\"\"Extracts the timestep string from the file\"\"\"\r\n base_name = os.path.splitext(os.path.basename(filename))[0]\r\n print(base_name)\r\n timestep_string = split_letternumber_string(base_name)\r\n \r\n return timestep_string[1]\r\n\r\n\r\ndef natural_key(string_):\r\n \"\"\"Sorts strings in a 'natural sort' way, ie.. if you have numbers in the strings,\r\n it treats the digits as a single number. \r\n See http://www.codinghorror.com/blog/archives/001018.html\r\n \"\"\"\r\n return [int(s) if s.isdigit() else s for s in re.split(r'(\\d+)', string_)]\r\n\r\ndef simulation_inundation_timeseries(glob_wildcard, floodplain_mask, stream_mask,\r\n threshold=0,\r\n savefilename=\"inundation_metrics.txt\"):\r\n \"\"\"Creates a timeseries of a given inundation metric. \r\n \r\n Options should be:\r\n Inundation Area (Entire catchment)\r\n Mean Water Depth (Entire catchment)\r\n Mean Water Depth (Floodplain only)\r\n Mean Water Depth (Channel)\r\n \"\"\"\r\n # Create an empty array with the correct number of columns\r\n data_array = _np.empty((0,5), dtype=_np.float32)\r\n print(\"Data array shape: \", data_array.shape)\r\n\r\n for water_raster_file in sorted(glob.glob(glob_wildcard), key=natural_key):\r\n print(water_raster_file)\r\n water_raster = lsdgdal.ReadRasterArrayBlocks(water_raster_file)\r\n \r\n timestep_row = [] # Empty list to store elements of the row\r\n cur_timestep = timestep_string_from_filename(water_raster_file) # get the current timestep by parsing the filename\r\n\r\n # Inundation area\r\n this_inundation_area = calculate_waterinundation_area(water_raster, DX, 0.02)\r\n this_mean_catchment_waterdepth = calculate_mean_waterdepth(water_raster)\r\n this_mean_floodplain_waterdepth = floodplain_mean_depth(water_raster, floodplain_mask)\r\n this_mean_mainchannel_waterdepth = main_channel_mean_depth(water_raster, floodplain_mask, stream_mask)\r\n \r\n # Append each value to the current row list object\r\n timestep_row.append([cur_timestep,\r\n this_inundation_area,\r\n this_mean_catchment_waterdepth,\r\n this_mean_floodplain_waterdepth,\r\n this_mean_mainchannel_waterdepth])\r\n # Convert the list into a numpy array\r\n timestep_row = _np.asarray(timestep_row, dtype=_np.float32)\r\n \r\n # Now append (stack) that row onto the bottom of the array (axis=0)\r\n data_array = _np.append(data_array, timestep_row, axis=0)\r\n \r\n print(data_array)\r\n print(data_array.shape)\r\n \r\n with open(savefilename,'wb') as f:\r\n _np.savetxt(f, data_array, fmt='%i %f %f %f %f')\r\n\r\n\r\n\r\n \r\n\"\"\"Get your rasters into arrays\"\"\" \r\nwater_raster_wildcard = \"/run/media/dav/SHETLAND/ModelRuns/Ryedale_storms/Gridded/Hydro/WaterDepths*.asc\"\r\nwater_raster_file = \"/mnt/SCRATCH/Analyses/HydrogeomorphPaper/peak_flood_maps/ryedale/WaterDepths2880_GRID_TLIM.asc\"\r\n#raster_file = \"/run/media/dav/SHETLAND/Analyses/HydrogeomorphPaper/peak_flood_maps/boscastle/peak_flood/WaterDepths2400_GRID_HYDRO.asc\"\r\nfloodplain_file = \"/mnt/SCRATCH/Analyses/ChannelMaskAnalysis/floodplain_ryedale/RyedaleElevations_FP.bil\"\r\nstream_raster_file = \"/mnt/SCRATCH/Analyses/ChannelMaskAnalysis/floodplain_ryedale/RyedaleElevations_SO.bil\"\r\n\r\nwater_raster = lsdgdal.ReadRasterArrayBlocks(water_raster_file)\r\n\r\nfloodplain_mask = lsdgdal.ReadRasterArrayBlocks(floodplain_file)\r\nstream_mask = lsdgdal.ReadRasterArrayBlocks(stream_raster_file)\r\n#print(stream_mask)\r\n\r\nDX = lsdgdal.GetUTMMaxMin(water_raster_file)[0] # I never realised you could do this!\r\nprint(DX)\r\n\r\n\"\"\"Calculate the depths and areas\"\"\"\r\n#calculate_mean_waterdepth(water_raster)\r\n#calcualte_max_waterdepth(water_raster)\r\n#calculate_waterinundation_area(water_raster, DX, 0.02)\r\n#floodplain_mean_depth(water_raster, floodplain_mask)\r\n#main_channel_mean_depth(water_raster, floodplain_mask, stream_mask)\r\n\r\n\"\"\"Make the timeseries file\"\"\"\r\nsimulation_inundation_timeseries(water_raster_wildcard, floodplain_mask,\r\n stream_mask,\r\n savefilename=\"ryedale_inundation_GRIDDED_HYDRO.txt\")\r\n"
] | [
[
"numpy.percentile"
],
[
"numpy.asarray",
"numpy.max",
"numpy.append",
"numpy.mean",
"numpy.count_nonzero",
"numpy.savetxt",
"numpy.ma.masked_where",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ooyamatakehisa/mirdata | [
"12303e4c9941d96cafc178a8f542abf6cdf7cb1b",
"12303e4c9941d96cafc178a8f542abf6cdf7cb1b"
] | [
"mirdata/datasets/medleydb_pitch.py",
"mirdata/datasets/cante100.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"MedleyDB pitch Dataset Loader\n\n.. admonition:: Dataset Info\n :class: dropdown\n\n MedleyDB Pitch is a pitch-tracking subset of the MedleyDB dataset \n containing only f0-annotated, monophonic stems. \n\n MedleyDB is a dataset of annotated, royalty-free multitrack recordings.\n MedleyDB was curated primarily to support research on melody extraction,\n addressing important shortcomings of existing collections. For each song\n we provide melody f0 annotations as well as instrument activations for\n evaluating automatic instrument recognition.\n\n For more details, please visit: https://medleydb.weebly.com\n\n\"\"\"\n\nimport csv\nimport json\nimport librosa\nimport logging\nimport numpy as np\nimport os\n\nfrom mirdata import download_utils\nfrom mirdata import jams_utils\nfrom mirdata import core\nfrom mirdata import annotations\n\n\nBIBTEX = \"\"\"@inproceedings{bittner2014medleydb,\n Author = {Bittner, Rachel M and Salamon, Justin and Tierney, Mike and Mauch, Matthias and Cannam, Chris and Bello, Juan P},\n Booktitle = {International Society of Music Information Retrieval (ISMIR)},\n Month = {October},\n Title = {Medley{DB}: A Multitrack Dataset for Annotation-Intensive {MIR} Research},\n Year = {2014}\n}\"\"\"\nDOWNLOAD_INFO = \"\"\"\n To download this dataset, visit:\n https://zenodo.org/record/2620624#.XKZc7hNKh24\n and request access.\n \n Once downloaded, unzip the file MedleyDB-Pitch.zip\n and copy the result to:\n {}\n\"\"\"\n\nLICENSE_INFO = (\n \"Creative Commons Attribution Non-Commercial Share-Alike 4.0 (CC BY-NC-SA 4.0).\"\n)\n\n\ndef _load_metadata(data_home):\n metadata_path = os.path.join(data_home, \"medleydb_pitch_metadata.json\")\n\n if not os.path.exists(metadata_path):\n logging.info(\"Metadata file {} not found.\".format(metadata_path))\n return None\n\n with open(metadata_path, \"r\") as fhandle:\n metadata = json.load(fhandle)\n\n metadata[\"data_home\"] = data_home\n return metadata\n\n\nDATA = core.LargeData(\"medleydb_pitch_index.json\", _load_metadata)\n\n\nclass Track(core.Track):\n \"\"\"medleydb_pitch Track class\n\n Args:\n track_id (str): track id of the track\n\n Attributes:\n artist (str): artist\n audio_path (str): path to the audio file\n genre (str): genre\n instrument (str): instrument of the track\n pitch_path (str): path to the pitch annotation file\n title (str): title\n track_id (str): track id\n\n Cached Properties:\n pitch (F0Data): human annotated pitch\n\n \"\"\"\n\n def __init__(self, track_id, data_home):\n if track_id not in DATA.index[\"tracks\"]:\n raise ValueError(\n \"{} is not a valid track ID in MedleyDB-Pitch\".format(track_id)\n )\n\n self.track_id = track_id\n\n self._data_home = data_home\n self._track_paths = DATA.index[\"tracks\"][track_id]\n self.pitch_path = os.path.join(self._data_home, self._track_paths[\"pitch\"][0])\n\n metadata = DATA.metadata(data_home)\n if metadata is not None and track_id in metadata:\n self._track_metadata = metadata[track_id]\n else:\n self._track_metadata = {\n \"instrument\": None,\n \"artist\": None,\n \"title\": None,\n \"genre\": None,\n }\n\n self.audio_path = os.path.join(self._data_home, self._track_paths[\"audio\"][0])\n self.instrument = self._track_metadata[\"instrument\"]\n self.artist = self._track_metadata[\"artist\"]\n self.title = self._track_metadata[\"title\"]\n self.genre = self._track_metadata[\"genre\"]\n\n @core.cached_property\n def pitch(self):\n return load_pitch(self.pitch_path)\n\n @property\n def audio(self):\n \"\"\"The track's audio\n\n Returns:\n * np.ndarray - audio signal\n * float - sample rate\n\n \"\"\"\n return load_audio(self.audio_path)\n\n def to_jams(self):\n \"\"\"Get the track's data in jams format\n\n Returns:\n jams.JAMS: the track's data in jams format\n\n \"\"\"\n return jams_utils.jams_converter(\n audio_path=self.audio_path,\n f0_data=[(self.pitch, \"annotated pitch\")],\n metadata=self._track_metadata,\n )\n\n\ndef load_audio(audio_path):\n \"\"\"Load a MedleyDB audio file.\n\n Args:\n audio_path (str): path to audio file\n\n Returns:\n * np.ndarray - the mono audio signal\n * float - The sample rate of the audio file\n\n \"\"\"\n if not os.path.exists(audio_path):\n raise IOError(\"audio_path {} does not exist\".format(audio_path))\n\n return librosa.load(audio_path, sr=None, mono=True)\n\n\ndef load_pitch(pitch_path):\n \"\"\"load a MedleyDB pitch annotation file\n\n Args:\n pitch_path (str): path to pitch annotation file\n\n Raises:\n IOError: if pitch_path doesn't exist\n\n Returns:\n F0Data: pitch annotation\n\n \"\"\"\n if not os.path.exists(pitch_path):\n raise IOError(\"pitch_path {} does not exist\".format(pitch_path))\n\n times = []\n freqs = []\n with open(pitch_path, \"r\") as fhandle:\n reader = csv.reader(fhandle, delimiter=\",\")\n for line in reader:\n times.append(float(line[0]))\n freqs.append(float(line[1]))\n\n times = np.array(times)\n freqs = np.array(freqs)\n confidence = (freqs > 0).astype(float)\n pitch_data = annotations.F0Data(times, freqs, confidence)\n return pitch_data\n\n\[email protected]_inherit(core.Dataset)\nclass Dataset(core.Dataset):\n \"\"\"\n The medleydb_pitch dataset\n \"\"\"\n\n def __init__(self, data_home=None):\n super().__init__(\n data_home,\n index=DATA.index,\n name=\"medleydb_pitch\",\n track_object=Track,\n bibtex=BIBTEX,\n download_info=DOWNLOAD_INFO,\n license_info=LICENSE_INFO,\n )\n\n @core.copy_docs(load_audio)\n def load_audio(self, *args, **kwargs):\n return load_audio(*args, **kwargs)\n\n @core.copy_docs(load_pitch)\n def load_pitch(self, *args, **kwargs):\n return load_pitch(*args, **kwargs)\n",
"# -*- coding: utf-8 -*-\n\"\"\"\ncante100 Loader\n\n.. admonition:: Dataset Info\n :class: dropdown\n\n The cante100 dataset contains 100 tracks taken from the COFLA corpus. We defined 10 style\n families of which 10 tracks each are included. Apart from the style family, we manually\n annotated the sections of the track in which the vocals are present. In addition, we\n provide a number of low-level descriptors and the fundamental frequency corresponding to\n the predominant melody for each track. The meta-information includes editoral meta-data\n and the musicBrainz ID.\n\n Total tracks: 100\n\n cante100 audio is only available upon request. To download the audio request access in\n this link: https://zenodo.org/record/1324183. Then\n unzip the audio into the cante100 general dataset folder for the rest of annotations\n and files.\n\n Audio specifications:\n\n - Sampling frequency: 44.1 kHz\n - Bit-depth: 16 bit\n - Audio format: .mp3\n\n cante100 dataset has spectrogram available, in csv format. spectrogram is available to download\n without request needed, so at first instance, cante100 loader uses the spectrogram of the tracks.\n\n The available annotations are:\n\n - F0 (predominant melody)\n - Automatic transcription of notes (of singing voice)\n\n CANTE100 LICENSE (COPIED FROM ZENODO PAGE)\n\n .. code-block:: latex\n\n The provided datasets are offered free of charge for internal non-commercial use.\n We do not grant any rights for redistribution or modification. All data collections were gathered\n by the COFLA team.\n © COFLA 2015. All rights reserved.\n\n For more details, please visit: http://www.cofla-project.com/?page_id=134\n\n\"\"\"\nimport csv\nimport os\nimport logging\nimport xml.etree.ElementTree as ET\n\nimport librosa\nimport numpy as np\n\nfrom mirdata import download_utils\nfrom mirdata import jams_utils\nfrom mirdata import core\nfrom mirdata import annotations\n\n\nBIBTEX = \"\"\"@dataset{nadine_kroher_2018_1322542,\n author = {Nadine Kroher and\n José Miguel Díaz-Báñez and\n Joaquin Mora and\n Emilia Gómez},\n title = {cante100 Metadata},\n month = jul,\n year = 2018,\n publisher = {Zenodo},\n version = {1.0},\n doi = {10.5281/zenodo.1322542},\n url = {https://doi.org/10.5281/zenodo.1322542}\n},\n@dataset{nadine_kroher_2018_1324183,\n author = {Nadine Kroher and\n José Miguel Díaz-Báñez and\n Joaquin Mora and\n Emilia Gómez},\n title = {cante100 Audio},\n month = jul,\n year = 2018,\n publisher = {Zenodo},\n version = {1.0},\n doi = {10.5281/zenodo.1324183},\n url = {https://doi.org/10.5281/zenodo.1324183}\n}\n\"\"\"\n\n\nREMOTES = {\n \"spectrogram\": download_utils.RemoteFileMetadata(\n filename=\"cante100_spectrum.zip\",\n url=\"https://zenodo.org/record/1322542/files/cante100_spectrum.zip?download=1\",\n checksum=\"0b81fe0fd7ab2c1adc1ad789edb12981\", # the md5 checksum\n destination_dir=\"cante100_spectrum\", # relative path for where to unzip the data, or None\n ),\n \"melody\": download_utils.RemoteFileMetadata(\n filename=\"cante100midi_f0.zip\",\n url=\"https://zenodo.org/record/1322542/files/cante100midi_f0.zip?download=1\",\n checksum=\"cce543b5125eda5a984347b55fdcd5e8\", # the md5 checksum\n destination_dir=\"cante100midi_f0\", # relative path for where to unzip the data, or None\n ),\n \"notes\": download_utils.RemoteFileMetadata(\n filename=\"cante100_automaticTranscription.zip\",\n url=\"https://zenodo.org/record/1322542/files/cante100_automaticTranscription.zip?download=1\",\n checksum=\"47fea64c744f9fe678ae5642a8f0ee8e\", # the md5 checksum\n destination_dir=\"cante100_automaticTranscription\", # relative path for where to unzip the data, or None\n ),\n \"metadata\": download_utils.RemoteFileMetadata(\n filename=\"cante100Meta.xml\",\n url=\"https://zenodo.org/record/1322542/files/cante100Meta.xml?download=1\",\n checksum=\"6cce186ce77a06541cdb9f0a671afb46\", # the md5 checksum\n destination_dir=None, # relative path for where to unzip the data, or None\n ),\n \"README\": download_utils.RemoteFileMetadata(\n filename=\"cante100_README.txt\",\n url=\"https://zenodo.org/record/1322542/files/cante100_README.txt?download=1\",\n checksum=\"184209b7e7d816fa603f0c7f481c0aae\", # the md5 checksum\n destination_dir=None, # relative path for where to unzip the data, or None\n ),\n}\n\n\nDOWNLOAD_INFO = \"\"\"\n This loader is designed to load the spectrum, as it is available for download.\n However, the loader supports audio as well. Unfortunately the audio files of the \n cante100 dataset are not available for free download, but upon request. However, \n you can request de audio in both links here: \n ==> http://www.cofla-project.com/?page_id=208\n ==> https://zenodo.org/record/1324183\n Then, locate the downloaded the cante100audio folder like this:\n > cante100/\n > cante100_spectrum/\n ... (rest of the annotation folders)\n > cante100audio/\n Remember to locate the cante100 folder to {}\n\"\"\"\n\nLICENSE_INFO = \"\"\"\nThe provided datasets are offered free of charge for internal non-commercial use.\nWe do not grant any rights for redistribution or modification. All data collections\nwere gathered by the COFLA team. COFLA 2015. All rights reserved.\n\"\"\"\n\n\ndef _load_metadata(data_home):\n metadata_path = os.path.join(data_home, \"cante100Meta.xml\")\n if not os.path.exists(metadata_path):\n logging.info(\n \"Metadata file {} not found.\".format(metadata_path)\n + \"You can download the metadata file for cante100 \"\n + \"by running cante100.download()\"\n )\n return None\n\n tree = ET.parse(metadata_path)\n root = tree.getroot()\n\n # ids\n indexes = []\n for child in root:\n index = child.attrib.get(\"id\")\n if len(index) == 1:\n index = \"00\" + index\n indexes.append(index)\n continue\n if len(index) == 2:\n index = \"0\" + index\n indexes.append(index)\n continue\n else:\n indexes.append(index)\n\n # musicBrainzID\n identifiers = []\n for ident in root.iter(\"musicBrainzID\"):\n identifiers.append(ident.text)\n\n # artist\n artists = []\n for artist in root.iter(\"artist\"):\n artists.append(artist.text)\n\n # titles\n titles = []\n for title in root.iter(\"title\"):\n titles.append(title.text)\n\n # releases\n releases = []\n for release in root.iter(\"anthology\"):\n releases.append(release.text)\n\n # duration\n durations = []\n minutes = []\n for minute in root.iter(\"duration_m\"):\n minutes.append(float(minute.text) * 60)\n seconds = []\n for second in root.iter(\"duration_s\"):\n seconds.append(float(second.text))\n for i in np.arange(len(minutes)):\n durations.append(minutes[i] + seconds[i])\n\n metadata = dict()\n metadata[\"data_home\"] = data_home\n for i, j in zip(indexes, range(len(artists))):\n metadata[i] = {\n \"musicBrainzID\": identifiers[j],\n \"artist\": artists[j],\n \"title\": titles[j],\n \"release\": releases[j],\n \"duration\": durations[j],\n }\n\n return metadata\n\n\nDATA = core.LargeData(\"cante100_index.json\", _load_metadata)\n\n\nclass Track(core.Track):\n \"\"\"cante100 track class\n\n Args:\n track_id (str): track id of the track\n data_home (str): Local path where the dataset is stored.\n If `None`, looks for the data in the default directory, `~/mir_datasets/cante100`\n\n Attributes:\n track_id (str): track id\n identifier (str): musicbrainz id of the track\n artist (str): performing artists\n title (str): title of the track song\n release (str): release where the track can be found\n duration (str): duration in seconds of the track\n\n Cached Properties:\n melody (F0Data): annotated melody\n notes (NoteData): annotated notes\n\n \"\"\"\n\n def __init__(self, track_id, data_home):\n if track_id not in DATA.index[\"tracks\"]:\n raise ValueError(\"{} is not a valid track ID in Example\".format(track_id))\n\n self.track_id = track_id\n\n self._data_home = data_home\n\n self._track_paths = DATA.index[\"tracks\"][track_id]\n self.audio_path = os.path.join(self._data_home, self._track_paths[\"audio\"][0])\n self.spectrogram_path = os.path.join(\n self._data_home, self._track_paths[\"spectrum\"][0]\n )\n self.f0_path = os.path.join(self._data_home, self._track_paths[\"f0\"][0])\n self.notes_path = os.path.join(self._data_home, self._track_paths[\"notes\"][0])\n\n metadata = DATA.metadata(data_home=data_home)\n if metadata is not None and track_id in metadata:\n self._track_metadata = metadata[track_id]\n else:\n self._track_metadata = {\n \"musicBrainzID\": None,\n \"artist\": None,\n \"title\": None,\n \"release\": None,\n \"duration\": None,\n }\n\n self.identifier = self._track_metadata[\"musicBrainzID\"]\n self.artist = self._track_metadata[\"artist\"]\n self.title = self._track_metadata[\"title\"]\n self.release = self._track_metadata[\"release\"]\n self.duration = self._track_metadata[\"duration\"]\n\n @property\n def audio(self):\n \"\"\"The track's audio\n\n Returns:\n * np.ndarray - audio signal\n * float - sample rate\n\n \"\"\"\n return load_audio(self.audio_path)\n\n @property\n def spectrogram(self):\n \"\"\"spectrogram of The track's audio\n\n Returns:\n (np.ndarray): spectrogram\n \"\"\"\n return load_spectrogram(self.spectrogram_path)\n\n @core.cached_property\n def melody(self):\n return load_melody(self.f0_path)\n\n @core.cached_property\n def notes(self):\n return load_notes(self.notes_path)\n\n def to_jams(self):\n \"\"\"Get the track's data in jams format\n\n Returns:\n jams.JAMS: the track's data in jams format\n\n \"\"\"\n return jams_utils.jams_converter(\n audio_path=self.audio_path,\n spectrogram_path=self.spectrogram_path,\n f0_data=[(self.melody, \"pitch_contour\")],\n note_data=[(self.notes, \"note_hz\")],\n metadata=self._track_metadata,\n )\n\n\ndef load_spectrogram(spectrogram_path):\n \"\"\"Load a cante100 dataset spectrogram file.\n\n Args:\n spectrogram_path (str): path to audio file\n\n Returns:\n np.ndarray: spectrogram\n\n \"\"\"\n if not os.path.exists(spectrogram_path):\n raise IOError(\"spectrogram_path {} does not exist\".format(spectrogram_path))\n parsed_spectrogram = np.genfromtxt(spectrogram_path, delimiter=\" \")\n spectrogram = parsed_spectrogram.astype(np.float)\n\n return spectrogram\n\n\ndef load_audio(audio_path):\n \"\"\"Load a cante100 audio file.\n\n Args:\n audio_path (str): path to audio file\n\n Returns:\n * np.ndarray - the mono audio signal\n * float - The sample rate of the audio file\n\n \"\"\"\n if not os.path.exists(audio_path):\n raise IOError(\"audio_path {} does not exist\".format(audio_path))\n audio, sr = librosa.load(audio_path, sr=22050, mono=False)\n return audio, sr\n\n\ndef load_melody(f0_path):\n \"\"\"Load cante100 f0 annotations\n\n Args:\n f0_path (str): path to audio file\n\n Returns:\n F0Data: predominant melody\n\n \"\"\"\n if not os.path.exists(f0_path):\n raise IOError(\"f0_path {} does not exist\".format(f0_path))\n\n times = []\n freqs = []\n with open(f0_path, \"r\") as fhandle:\n reader = csv.reader(fhandle, delimiter=\",\")\n for line in reader:\n times.append(float(line[0]))\n freqs.append(float(line[1]))\n\n times = np.array(times)\n freqs = np.array(freqs)\n confidence = (freqs > 0).astype(float)\n\n return annotations.F0Data(times, freqs, confidence)\n\n\ndef load_notes(notes_path):\n \"\"\"Load note data from the annotation files\n\n Args:\n notes_path (str): path to notes file\n\n Returns:\n NoteData: note annotations\n\n \"\"\"\n if not os.path.exists(notes_path):\n raise IOError(\"notes_path {} does not exist\".format(notes_path))\n\n intervals = []\n pitches = []\n confidence = []\n with open(notes_path, \"r\") as fhandle:\n reader = csv.reader(fhandle, delimiter=\",\")\n for line in reader:\n intervals.append([line[0], float(line[0]) + float(line[1])])\n # Convert midi value to frequency\n pitches.append((440 / 32) * (2 ** ((int(line[2]) - 9) / 12)))\n confidence.append(1.0)\n\n return annotations.NoteData(\n np.array(intervals, dtype=\"float\"),\n np.array(pitches, dtype=\"float\"),\n np.array(confidence, dtype=\"float\"),\n )\n\n\[email protected]_inherit(core.Dataset)\nclass Dataset(core.Dataset):\n \"\"\"\n The cante100 dataset\n \"\"\"\n\n def __init__(self, data_home=None):\n super().__init__(\n data_home,\n index=DATA.index,\n name=\"cante100\",\n track_object=Track,\n bibtex=BIBTEX,\n remotes=REMOTES,\n download_info=DOWNLOAD_INFO,\n license_info=LICENSE_INFO,\n )\n\n @core.copy_docs(load_audio)\n def load_audio(self, *args, **kwargs):\n return load_audio(*args, **kwargs)\n\n @core.copy_docs(load_spectrogram)\n def load_spectrogram(self, *args, **kwargs):\n return load_spectrogram(*args, **kwargs)\n\n @core.copy_docs(load_melody)\n def load_melody(self, *args, **kwargs):\n return load_melody(*args, **kwargs)\n\n @core.copy_docs(load_notes)\n def load_notes(self, *args, **kwargs):\n return load_notes(*args, **kwargs)\n"
] | [
[
"numpy.array"
],
[
"numpy.array",
"numpy.genfromtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
samhippie/shallow-red | [
"5690cdf380c6e138e25d88e85093738951438298"
] | [
"deepcfr.py"
] | [
"#!/usr/bin/env python3\n\nimport asyncio\nimport collections\nimport copy\nimport io\nimport math\nimport numpy as np\nimport random\nimport sys\nimport torch\nimport torch.distributed as dist\nimport os.path\n\nimport config\nimport model\nimport dataStorage\n\n#Deep MCCFR\n\n#based on this paper\n#https://arxiv.org/pdf/1811.00164.pdf\n\n#this agent breaks some compatibility with the other agents\n#so it needs a separate runner function\n#(not that the other agents even exist any more)\n\nclass DeepCfrAgent:\n #each player gets one of each model\n #advModels calculates advantages\n #stratModels calculates average strategy\n\n #branch limit is the maximum number of actions taken\n #actions that aren't taken are probed (i.e. rolled out)\n\n #depth limit is the maximum number of turns taken from the root\n #after the limit it hit, all games are evaluated via rollout\n #this agent is only applied at the root, so depth limit might\n #significantly affect the quality of late-game strategies\n #(although we could use RM to find new late-game strategies,\n #but that's outside the scope of this agent)\n\n #resumeIter is the iteration to resume from\n #this really should be a parameter to the search function,\n #but we need to know about it when we initialize the models\n #this is a problem with our agent being made for a one-shot training cycle\n #instead of multiple training cycles like the others\n def __init__(\n self,\n writeLock,\n sharedDict,\n advModels=None, stratModels=None,\n singleDeep=False,\n verbose=False):\n\n self.pid = -1\n\n\n self.writeLock = writeLock\n #self.trainingBarrier = trainingBarrier\n self.sharedDict = sharedDict\n\n #TODO REFACTOR agents no longer need to manage models\n #if the adv models are passed in, assume we aren't responsible for sharing them\n if advModels:\n self.advModels = advModels\n self.manageSharedModels = False\n else:\n self.advModels = [full.model.DeepCfrModel(name='adv' + str(i), softmax=False, writeLock=writeLock, sharedDict=sharedDict) for i in range(2)]\n self.manageSharedModels = True\n\n #if stratModels:\n #self.stratModels = stratModels\n #else:\n #self.stratModels = [full.model.DeepCfrModel(name='strat' + str(i), softmax=True, writeLock=writeLock, sharedDict=sharedDict) for i in range(2)]\n\n #TODO REFACTOR everything is single deep\n #whether to save old models for single deep cfr\n self.singleDeep = singleDeep\n if(singleDeep):\n self.oldModels = [[],[]]\n self.oldModelWeights = [[],[]]\n\n #TODO REFACTOR we should always train\n #flag so if we never search, we don't bother training\n self.needsTraining = False\n\n self.verbose = verbose\n\n async def search(self, context, distGroup, pid=0, limit=100, innerLoops=1, seed=None, history=[[],[]]):\n self.pid = pid\n\n start = config.resumeIter if config.resumeIter else 0\n\n if self.pid == 0:\n print(end='', file=sys.stderr)\n for i in range(start, limit):\n\n #this is mainly used for setting a condition breakpoint\n #there's probably a better way \n #(there is, it's 'break deepcfr.py:110, i == 3' in pdb)\n #if i == 3:\n #print('ready for debugging')\n\n #for small games, this is necessary to get a decent number of samples\n print(self.pid, 'starting search')\n for j in range(innerLoops):\n if self.pid == 0:\n print('\\rTurn Progress: ' + str(i) + '/' + str(limit) + ' inner ' + str(j) + '/' + str(innerLoops), end='', file=sys.stderr)\n self.needsTraining = True\n #we want each game tree traversal to use the same seed\n if seed:\n curSeed = seed\n else:\n curSeed = config.game.getSeed()\n game = config.game.Game(context=context, seed=curSeed, history=history, verbose=self.verbose)\n await game.startGame()\n await self.cfrRecur(context, game, curSeed, history, i)\n print(self.pid, 'done with search')\n\n\n #save our adv data after each iteration\n #so the non-zero pid workers don't have data cached\n self.advModels[i % 2].clearSampleCache()\n #go ahead and clear our strat caches as well\n #just in case the program is exited\n #for j in range(2):\n #self.stratModels[j].clearSampleCache()\n\n dist.barrier(distGroup)\n\n if self.pid == 0:\n if self.needsTraining:\n print('sending train message')\n self.advTrain(i % 2, iter=i // 2 + 1)\n\n distGroup.barrier()\n\n if os.path.isfile('stopEarly'):\n #cant' rename the file here\n if self.pid == 0:\n print('stopping early')\n break\n\n self.needsTraining = False\n\n if self.pid == 0:\n print('\\nplaying games', file=sys.stderr)\n \n distGroup.barrier()\n\n if self.pid == 0:\n #have to wait to rename this until all processes have had a chance to see it\n if os.path.isfile('stopEarly'):\n os.rename('stopEarly', 'XstopEarly')\n out = torch.zeros(3)\n dist.send(out, dst=0)\n print('playtime is over', file=sys.stderr)\n print(file=sys.stderr)\n\n def advTrain(self, player, iter=1):\n #send message to net process to train network\n dist.send(torch.tensor([1, player, 0]), dst=0)\n #just block until it's done\n out = torch.zeros(1, dtype=torch.long)\n dist.recv(out, src=0)\n\n #model = self.advModels[player]\n #model.train(epochs=config.advEpochs)\n #if(self.singleDeep):\n #model.net.cpu()\n #self.oldModels[player].append(model.net)\n #model.net.cuda()\n #self.oldModelWeights[player].append(iter)\n #self.sharedDict['oldModels'] = self.oldModels\n #self.sharedDict['oldModelWeights'] = self.oldModelWeights\n\n\n #TODO REFACTOR we don't train a strategy network anymore\n def stratTrain(self):\n if(self.singleDeep):\n self.oldModels = self.sharedDict['oldModels']\n self.oldModelWeights = self.sharedDict['oldModelWeights']\n print('skipping strategy', file=sys.stderr)\n return\n if self.pid == 0:\n print('training strategy', file=sys.stderr)\n #we train both strat models at once\n for i, model in enumerate(self.stratModels):\n #let's try copying the embedding from the current adv net\n model.net.embeddings = self.advModels[i].net.embeddings\n model.train(epochs=config.stratEpochs)\n\n def getPredict(self, player, infoset):\n inputTensor = model.infosetToTensor(infoset)\n #if self.pid == 1:\n #print('sending', inputTensor)\n #print(self.pid, 'sending request')\n #print(self.pid, 'sending', inputTensor)\n dist.send(torch.tensor([2, player, inputTensor.shape[0]]), dst=0)\n #print(self.pid, 'sending input with shape', inputTensor.shape, 'dtype', inputTensor.dtype, inputTensor)\n dist.send(inputTensor, dst=0)\n out = torch.zeros(config.game.numActions + 1)\n #print(self.pid, 'getting output')\n dist.recv(out, src=0)\n #print(self.pid, 'got output')\n out = out.detach().numpy()\n #if self.pid == 1:\n #print('got', out)\n return out[0:-1], out[-1]\n\n #getting probability for a given model to follow a given trajectory\n #where a trajectory is a list of infoset-action pairs\n def getReachProb(self, model, traj):\n reachProb = 1\n for infoset, actionIndex, actions in traj:\n numActions = len(actions)\n probs, _ = model.predict(infoset)\n probs = probs[0:numActions]\n #actionNum = config.game.enumAction(action)\n for i, p in enumerate(probs):\n if probs[i] < 0:\n probs[i] = 0\n pSum = sum(probs)\n if pSum > 0:\n reachProb *= probs[actionIndex] / pSum\n else:\n #if pSum is 0, assume we played randomly\n reachProb *= 1 / numActions\n #if we have an action that never accumulates any regret, then the models might all spit out 0\n #which would give a reach probability of 0\n return max(reachProb, 0.01)\n\n #getting final probabilities for executing a strategy\n def getProbs(self, player, infoset, actions, prevTrajectory=None, file=sys.stdout):\n print('infoset', infoset, file=file)\n #TODO REFACTOR we're always using single deep\n if(self.singleDeep):\n stratProbs = None\n expVal = 0\n weights = []\n model = self.advModels[player]\n totalWeight = 0\n for i in range(len(self.oldModels[player])):\n model.net = self.oldModels[player][i]\n weight = self.oldModelWeights[player][i]\n reachProb = self.getReachProb(model, prevTrajectory)\n weight *= reachProb\n totalWeight += weight\n probs, ev = model.predict(infoset, trace=False)\n print('raw probs', probs, file=file)\n probs = probs[0:len(actions)]\n _, bestIndex = max([(p, i) for (i, p) in enumerate(probs)])\n for j, p in enumerate(probs):\n if p < 0:\n probs[j] = 0\n pSum = sum(probs)\n if pSum > 0:\n probs /= pSum\n else:\n probs = np.zeros(len(probs))\n probs[bestIndex] = 1\n #probs = np.array([1 / len(probs) for p in probs])\n print(self.oldModelWeights[player][i], 'weight', weight, 'probs', probs, file=file)\n #probs, ev = self.getPredict(player, infoset)\n expVal += ev * weight\n if(stratProbs is not None):\n stratProbs += weight * probs\n else:\n stratProbs = weight * probs\n\n if totalWeight > 0:#shouldn't be the case, but who knows what you get from untrained networks\n expVal /= totalWeight\n else:\n sm = self.stratModels[player]\n stratProbs, expVal = sm.predict(infoset, trace=False)\n #stratProbs, expVal = self.getPredict(sm, infoset)\n print('strat probs', stratProbs, file=file)\n print('expVal', expVal, file=file)\n #actionNums = [config.game.enumAction(a) for a in actions]\n actionNums = list(range(len(actions)))\n probs = []\n for n in actionNums:\n probs.append(stratProbs[n])\n probs = np.array(probs)\n\n pSum = np.sum(probs)\n if pSum > 0:\n return probs / np.sum(probs)\n else:\n #play randomly\n return np.array([1 / len(actions) for a in actions])\n\n #recursive implementation of cfr\n #history is a list of (seed, player, action) tuples\n #assumes the game has already had the history applied\n async def cfrRecur(self, context, game, startSeed, history, iter, depth=0, q=1, rollout=False):\n if config.depthLimit and depth > config.depthLimit:\n rollout = True\n\n onPlayer = iter % 2\n offPlayer = (iter + 1) % 2\n\n player, req, actions = await game.getTurn()\n\n if 'win' in req:\n if player == onPlayer:\n #return (req['win'] + 2) / 4\n return req['win']\n else:\n return -1 * req['win']\n\n #game uses append, so we have to make a copy to keep everything consistent when we get advantages later\n infoset = copy.copy(game.getInfoset(player))\n\n if player == offPlayer:\n #get probs so we can sample a single action\n probs, _ = self.regretMatch(offPlayer, infoset, actions, -1)\n exploreProbs = probs * (1 - config.offExploreRate) + config.offExploreRate / len(actions)\n actionIndex = np.random.choice(len(actions), p=exploreProbs)\n\n #if depth == 1 and self.pid == 0:\n #print('offplayer ' + str(player) + ' hand ' + str(game.hands[player]) + ' probs', list(zip(actions, probs)), file=sys.stderr)\n await game.takeAction(player, actionIndex)\n\n if player == 0:\n newHistory = [history[0] + [(None, actionIndex)], history[1]]\n else:\n newHistory = [history[0], history[1] + [(None, actionIndex)]]\n\n onExpValue = await self.cfrRecur(context, game, startSeed, newHistory, iter, depth=depth, rollout=rollout, q=q)\n\n #save sample for final average strategy\n \"\"\"\n if not rollout:\n sm = self.stratModels[offPlayer]\n sm.addSample(infoset, zip(actions, probs), iter // 2 + 1, -1 * onExpValue)\n \"\"\"\n return onExpValue\n\n elif player == onPlayer:\n #get probs, which action we take depends on the configuration\n probs, regrets = self.regretMatch(onPlayer, infoset, actions, depth)\n #I don't think I'm using sampleProbs for anything\n if rollout:\n #we pick one action according to the current strategy\n #like this paper, except we also do it when we hit a depth limit\n #https://poker.cs.ualberta.ca/publications/AAAI12-generalmccfr.pdf\n actionIndices = [np.random.choice(len(actions), p=probs)]\n #sampleProbs don't matter, but this is accurate\n sampleProbs = probs\n elif config.branchingLimit:\n #select a set of actions to pick\n #chance to play randomly instead of picking the best actions\n #this paper suggests playing according the currect strategy with some exploration factor for outcome\n #sampling (i.e. branchLimit = 1), so I assume that that's a good method for other branch limits\n #http://mlanctot.info/files/papers/nips09mccfr.pdf\n #the double neural CFR paper suggests using uniform random distribution, which is probably fine too\n exploreProbs = probs * (1 - config.onExploreRate) + config.onExploreRate / len(probs)\n actionIndices = np.random.choice(len(actions), min(len(actions), config.branchingLimit), \n replace=False, p=exploreProbs)\n #this is only true for branchingLimit=1, but I don't feel like coding a general solution right now\n sampleProbs = exploreProbs\n else:\n #we're picking every action\n actionIndices = list(range(len(actions)))\n #100% chance of sampling each action\n sampleProbs = np.ones(probs.shape)\n\n #get expected reward for each action\n rewards = []\n gameUsed = False\n\n for i in range(len(actions)):\n action = actions[i]\n\n #use rollout for non-sampled actions\n if not i in actionIndices and not rollout:\n if not config.enableProbingRollout:\n rewards.append(0)\n continue\n #rollout non-sampled actions\n curRollout = True\n elif not i in actionIndices:\n #if we're rolling out, just pretend the other actions don't exist\n continue\n else:\n curRollout = rollout\n\n #don't have to re-init game for the first action\n if gameUsed:\n game = config.game.Game(context, seed=startSeed, history=history, verbose=self.verbose)\n await game.startGame()\n await game.getTurn()\n else:\n gameUsed = True\n\n #I want to see if we get good results by keeping the RNG the same\n #this is closer to normal external sampling\n #seed = await game.resetSeed()\n await game.takeAction(player, i)\n #historyEntry = (None, player, action)\n\n if player == 0:\n newHistory = [history[0] + [(None, i)], history[1]]\n else:\n newHistory = [history[0], history[1] + [(None, i)]]\n\n r = await self.cfrRecur(context, game, startSeed, newHistory, iter, depth=depth+1, rollout=curRollout, q=q*sampleProbs[i])\n rewards.append(r)\n\n if not rollout:\n #save sample of advantages\n #the normal implementations of outcome sampling just divide things by q, but that makes the numbers blow up\n #I think this is because we're not really calculating regret, but advantages\n #probs / sampleProbs won't blow up, but I don't know if it's correct\n #the nice thing about external sampling is that you don't run into these kinds of issues, the math makes everything work out\n\n #if we probe, then we end up with a decent set of expected values and probabilities, which should be enough\n #but probing is expensive. We could instead cheat and get the calculated expected value and return that a fraction of the time\n #but then we'd have to terminate the game early\n\n \"\"\"\n if config.branchingLimit == 1 and not config.enableProbingRollout:\n ind = actionIndices[0]\n stateExpValue = rewards[ind]\n #if q is 0, then us being here is a 0 probability event\n #but I don't want to die because of some weird rounding\n advantages = [stateExpValue / max(q, 0.001) * ((1 if i == ind else 0) - p) for i, p in enumerate(probs)]\n else:\n \"\"\"\n #actually, if the rewards for non-sampled actions are 0, then outcome sampling here only differs\n #from standard outcome sampling by not dividing by q, which messes up the scaling\n #I think that we're really just approximating something proportional to regret, so scaling everything\n #by q isn't needed\n #I did some math and doing OS like this scales the regret we would accumulate\n #by 1/(overall reach prob), while ES scales by 1/(off-player and chance reach prob)\n stateExpValue = 0\n for p,r in zip(probs, rewards):\n stateExpValue += p * r\n advantages = [r - stateExpValue for r in rewards]\n #if self.pid == 0:\n #print('infoset', infoset)\n #print('q', q, 'exp val', stateExpValue)\n #print('actions, prob, reward, advantage', *list(zip(actions, probs, rewards, advantages)))\n #CFR+, anyone?\n #also using the sqrt(t) equation from that double neural cfr paper\n #advantages = [max(0, math.sqrt(iter // 2) * g / math.sqrt(iter // 2 + 1) + (r - stateExpValue) / math.sqrt(iter // 2 + 1)) for r, g in zip(rewards, regrets)]\n #if depth == 1 and self.pid == 0:\n #print('onplayer', player, 'hand', game.hands[player], 'new advs', list(zip(actions, advantages)), 'exp value', stateExpValue, file=sys.stderr)\n #print('advantages', advantages)\n\n am = self.advModels[onPlayer]\n am.addSample(infoset, advantages, iter // 2 + 1, stateExpValue)\n\n #if depth == 0 and self.pid == 0:\n #print('player', str(onPlayer), file=sys.stderr)\n #print('stateExpValue', stateExpValue, 'from', list(zip(probs, rewards)), file=sys.stderr)\n #print('advantages', list(zip(actions, advantages)), file=sys.stderr)\n\n return stateExpValue\n else:\n #we can't calculate advantage, so we can't update anything\n #we only have one reward, so just return it\n return rewards[0]\n\n \n #generates probabilities for each action\n #based on modeled advantages\n def regretMatch(self, player, infoset, actions, depth):\n #am = self.advModels[player]\n #advs, expVal = am.predict(infoset)\n advs, expVal = self.getPredict(player, infoset)\n #illegal actions should be 0\n flatAdvs = np.zeros(len(advs))\n #actionNums = [config.game.enumAction(a) for a in actions]\n actionNums = list(range(len(actions)))\n probs = []\n for n in actionNums:\n probs.append(max(0, advs[n]))\n flatAdvs[n] = advs[n]\n #if depth == 0 and self.pid == 0:\n #print('predicted advantages', [(action, advs[n]) for action, n in zip(actions, actionNums)], file=sys.stderr)\n probs = np.array(probs)\n pSum = np.sum(probs)\n if pSum > 0:\n return probs / pSum, flatAdvs\n else:\n #pick the best action with probability 1\n best = None\n for i in range(len(actionNums)):\n n = actionNums[i]\n if best == None or advs[n] > advs[actionNums[best]]:\n best = i\n probs = [0 for a in actions]\n probs[best] = 1\n return np.array(probs), flatAdvs\n #actually, play randomly\n #return np.array([1 / len(actions) for a in actions]), flatAdvs\n"
] | [
[
"torch.distributed.send",
"torch.zeros",
"torch.distributed.recv",
"torch.distributed.barrier",
"torch.tensor",
"numpy.ones",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Hekstra-Lab/reciprocalspaceship | [
"48b72ad70608fdbdfef31f6b38aac5873abc0dba"
] | [
"tests/dtypes/test_inference.py"
] | [
"import pytest\nfrom pandas.testing import assert_series_equal\n\nimport reciprocalspaceship as rs\n\n\[email protected](\n \"dataseries\",\n [\n (rs.DataSeries(range(10), dtype=rs.PhaseDtype()), \"P\"),\n (rs.DataSeries(range(10), dtype=rs.HKLIndexDtype()), \"H\"),\n (rs.DataSeries(range(10), name=\"Phi\", dtype=rs.PhaseDtype()), \"P\"),\n (rs.DataSeries(range(10), name=None), \"I\"),\n (rs.DataSeries(range(10), name=None, dtype=float), \"R\"),\n (rs.DataSeries(range(10), name=\"blah\", dtype=float), \"R\"),\n (rs.DataSeries(range(10), name=None), \"I\"),\n (rs.DataSeries(range(10), name=None, dtype=float), \"R\"),\n (rs.DataSeries([\"h\"] * 3, name=None, dtype=object), object),\n (rs.DataSeries([\"h\"] * 3, name=\"blah\", dtype=object), object),\n (rs.DataSeries(range(10), name=\"H\"), \"H\"),\n (rs.DataSeries(range(10), name=\"K\"), \"H\"),\n (rs.DataSeries(range(10), name=\"L\"), \"H\"),\n (rs.DataSeries(range(10), name=\"I\"), \"J\"),\n (rs.DataSeries(range(10), name=\"IMEAN\"), \"J\"),\n (rs.DataSeries(range(10), name=\"SIGIMEAN\"), \"Q\"),\n (rs.DataSeries(range(10), name=\"SIGI\"), \"Q\"),\n (rs.DataSeries(range(10), name=\"SigI\"), \"Q\"),\n (rs.DataSeries(range(10), name=\"SigF\"), \"Q\"),\n (rs.DataSeries(range(10), name=\"SIGF\"), \"Q\"),\n (rs.DataSeries(range(10), name=\"F\"), \"F\"),\n (rs.DataSeries(range(10), name=\"F-obs\"), \"F\"),\n (rs.DataSeries(range(10), name=\"ANOM\"), \"F\"),\n (rs.DataSeries(range(10), name=\"PHANOM\"), \"P\"),\n (rs.DataSeries(range(10), name=\"PHI\"), \"P\"),\n (rs.DataSeries(range(10), name=\"PHIFMODEL\"), \"P\"),\n (rs.DataSeries(range(10), name=\"F(+)\"), \"G\"),\n (rs.DataSeries(range(10), name=\"F(-)\"), \"G\"),\n (rs.DataSeries(range(10), name=\"SigF(+)\"), \"L\"),\n (rs.DataSeries(range(10), name=\"SigF(-)\"), \"L\"),\n (rs.DataSeries(range(10), name=\"I(+)\"), \"K\"),\n (rs.DataSeries(range(10), name=\"I(-)\"), \"K\"),\n (rs.DataSeries(range(10), name=\"SigI(+)\"), \"M\"),\n (rs.DataSeries(range(10), name=\"SigI(-)\"), \"M\"),\n (rs.DataSeries(range(10), name=\"HLA\"), \"A\"),\n (rs.DataSeries(range(10), name=\"HLB\"), \"A\"),\n (rs.DataSeries(range(10), name=\"HLC\"), \"A\"),\n (rs.DataSeries(range(10), name=\"HLD\"), \"A\"),\n (rs.DataSeries(range(10), name=\"M/ISYM\"), \"Y\"),\n (rs.DataSeries(range(10), name=\"E\"), \"E\"),\n (rs.DataSeries(range(10), name=\"batch\"), \"B\"),\n (rs.DataSeries(range(10), name=\"image\"), \"B\"),\n (rs.DataSeries(range(10), name=\"weight\"), \"W\"),\n (rs.DataSeries(range(10), name=\"weights\"), \"W\"),\n (rs.DataSeries(range(10), name=\"W\"), \"W\"),\n (rs.DataSeries(range(10), name=\"FreeR_flag\"), \"I\"),\n ],\n)\ndef test_inference(dataseries):\n \"\"\"Test DataSeries.infer_mtz_dtype()\"\"\"\n result = dataseries[0].infer_mtz_dtype()\n expected = dataseries[0].astype(dataseries[1])\n assert_series_equal(result, expected)\n"
] | [
[
"pandas.testing.assert_series_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mebusy/cs234_RL_2019_stanford | [
"6ca051294f8af5257a051d2933fcc6a39177f24d"
] | [
"assignments/assignment3/starter_code_torch/code/policy_gradient.py"
] | [
"import numpy as np\nimport torch\nimport gym\nimport os\nfrom general import get_logger, Progbar, export_plot\nfrom baseline_network import BaselineNetwork\nfrom network_utils import build_mlp, device, np2torch\nfrom policy import CategoricalPolicy, GaussianPolicy\n\n\nclass PolicyGradient(object):\n \"\"\"\n Class for implementing a policy gradient algorithm\n \"\"\"\n def __init__(self, env, config, seed, logger=None):\n \"\"\"\n Initialize Policy Gradient Class\n\n Args:\n env: an OpenAI Gym environment\n config: class with hyperparameters\n logger: logger instance from the logging module\n\n You do not need to implement anything in this function. However,\n you will need to use self.discrete, self.observation_dim,\n self.action_dim, and self.lr in other methods.\n \"\"\"\n # directory for training outputs\n if not os.path.exists(config.output_path):\n os.makedirs(config.output_path)\n\n # store hyperparameters\n self.config = config\n self.seed = seed\n\n self.logger = logger\n if logger is None:\n self.logger = get_logger(config.log_path)\n self.env = env\n self.env.seed(self.seed)\n\n # discrete vs continuous action space\n self.discrete = isinstance(env.action_space, gym.spaces.Discrete)\n self.observation_dim = self.env.observation_space.shape[0]\n self.action_dim = self.env.action_space.n if self.discrete else self.env.action_space.shape[0]\n\n self.lr = self.config.learning_rate\n\n self.init_policy()\n\n if config.use_baseline:\n self.baseline_network = BaselineNetwork(env, config)\n\n def init_policy(self):\n \"\"\"\n Please do the following:\n 1. Create a network using build_mlp. It should map vectors of size\n self.observation_dim to vectors of size self.action_dim, and use\n the number of layers and layer size from self.config\n 2. If self.discrete = True (meaning that the actions are discrete, i.e.\n from the set {0, 1, ..., N-1} where N is the number of actions),\n instantiate a CategoricalPolicy.\n If self.discrete = False (meaning that the actions are continuous,\n i.e. elements of R^d where d is the dimension), instantiate a\n GaussianPolicy. Either way, assign the policy to self.policy\n 3. Create an optimizer for the policy, with learning rate self.lr\n Note that the policy is an instance of (a subclass of) nn.Module, so\n you can call the parameters() method to get its parameters.\n \"\"\"\n #######################################################\n ######### YOUR CODE HERE - 8-12 lines. ############\n\n #######################################################\n ######### END YOUR CODE. ############\n\n def init_averages(self):\n \"\"\"\n You don't have to change or use anything here.\n \"\"\"\n self.avg_reward = 0.\n self.max_reward = 0.\n self.std_reward = 0.\n self.eval_reward = 0.\n\n def update_averages(self, rewards, scores_eval):\n \"\"\"\n Update the averages.\n You don't have to change or use anything here.\n\n Args:\n rewards: deque\n scores_eval: list\n \"\"\"\n self.avg_reward = np.mean(rewards)\n self.max_reward = np.max(rewards)\n self.std_reward = np.sqrt(np.var(rewards) / len(rewards))\n\n if len(scores_eval) > 0:\n self.eval_reward = scores_eval[-1]\n\n def record_summary(self, t):\n pass\n\n def sample_path(self, env, num_episodes = None):\n \"\"\"\n Sample paths (trajectories) from the environment.\n\n Args:\n num_episodes: the number of episodes to be sampled\n if none, sample one batch (size indicated by config file)\n env: open AI Gym envinronment\n\n Returns:\n paths: a list of paths. Each path in paths is a dictionary with\n path[\"observation\"] a numpy array of ordered observations in the path\n path[\"actions\"] a numpy array of the corresponding actions in the path\n path[\"reward\"] a numpy array of the corresponding rewards in the path\n total_rewards: the sum of all rewards encountered during this \"path\"\n\n You do not have to implement anything in this function, but you will need to\n understand what it returns, and it is worthwhile to look over the code\n just so you understand how we are taking actions in the environment\n and generating batches to train on.\n \"\"\"\n episode = 0\n episode_rewards = []\n paths = []\n t = 0\n\n while (num_episodes or t < self.config.batch_size):\n state = env.reset()\n states, actions, rewards = [], [], []\n episode_reward = 0\n\n for step in range(self.config.max_ep_len):\n states.append(state)\n action = self.policy.act(states[-1][None])[0]\n state, reward, done, info = env.step(action)\n actions.append(action)\n rewards.append(reward)\n episode_reward += reward\n t += 1\n if (done or step == self.config.max_ep_len-1):\n episode_rewards.append(episode_reward)\n break\n if (not num_episodes) and t == self.config.batch_size:\n break\n\n path = {\"observation\" : np.array(states),\n \"reward\" : np.array(rewards),\n \"action\" : np.array(actions)}\n paths.append(path)\n episode += 1\n if num_episodes and episode >= num_episodes:\n break\n\n return paths, episode_rewards\n\n def get_returns(self, paths):\n \"\"\"\n Calculate the returns G_t for each timestep\n\n Args:\n paths: recorded sample paths. See sample_path() for details.\n\n Return:\n returns: return G_t for each timestep\n\n After acting in the environment, we record the observations, actions, and\n rewards. To get the advantages that we need for the policy update, we have\n to convert the rewards into returns, G_t, which are themselves an estimate\n of Q^π (s_t, a_t):\n\n G_t = r_t + γ r_{t+1} + γ^2 r_{t+2} + ... + γ^{T-t} r_T\n\n where T is the last timestep of the episode.\n\n Note that here we are creating a list of returns for each path\n\n TODO: compute and return G_t for each timestep. Use self.config.gamma.\n \"\"\"\n\n all_returns = []\n for path in paths:\n rewards = path[\"reward\"]\n #######################################################\n ######### YOUR CODE HERE - 5-10 lines. ############\n\n #######################################################\n ######### END YOUR CODE. ############\n all_returns.append(returns)\n returns = np.concatenate(all_returns)\n\n return returns\n\n def normalize_advantage(self, advantages):\n \"\"\"\n Args:\n advantages: np.array of shape [batch size]\n Returns:\n normalized_advantages: np.array of shape [batch size]\n\n TODO:\n Normalize the advantages so that they have a mean of 0 and standard\n deviation of 1. Put the result in a variable called\n normalized_advantages (which will be returned).\n\n Note:\n This function is called only if self.config.normalize_advantage is True.\n \"\"\"\n #######################################################\n ######### YOUR CODE HERE - 1-2 lines. ############\n\n #######################################################\n ######### END YOUR CODE. ############\n return normalized_advantages\n\n def calculate_advantage(self, returns, observations):\n \"\"\"\n Calculates the advantage for each of the observations\n Args:\n returns: np.array of shape [batch size]\n observations: np.array of shape [batch size, dim(observation space)]\n Returns:\n advantages: np.array of shape [batch size]\n \"\"\"\n if self.config.use_baseline:\n # override the behavior of advantage by subtracting baseline\n advantages = self.baseline_network.calculate_advantage(returns, observations)\n else:\n advantages = returns\n\n if self.config.normalize_advantage:\n advantages = self.normalize_advantage(advantages)\n\n return advantages\n\n def update_policy(self, observations, actions, advantages):\n \"\"\"\n Args:\n observations: np.array of shape [batch size, dim(observation space)]\n actions: np.array of shape\n [batch size, dim(action space)] if continuous\n [batch size] (and integer type) if discrete\n advantages: np.array of shape [batch size]\n\n Perform one update on the policy using the provided data.\n To compute the loss, you will need the log probabilities of the actions\n given the observations. Note that the policy's action_distribution\n method returns an instance of a subclass of\n torch.distributions.Distribution, and that object can be used to\n compute log probabilities.\n See https://pytorch.org/docs/stable/distributions.html#distribution\n\n Note:\n PyTorch optimizers will try to minimize the loss you compute, but you\n want to maximize the policy's performance.\n \"\"\"\n observations = np2torch(observations)\n actions = np2torch(actions)\n advantages = np2torch(advantages)\n #######################################################\n ######### YOUR CODE HERE - 5-7 lines. ############\n\n #######################################################\n ######### END YOUR CODE. ############\n\n def train(self):\n \"\"\"\n Performs training\n\n You do not have to change or use anything here, but take a look\n to see how all the code you've written fits together!\n \"\"\"\n last_record = 0\n\n self.init_averages()\n all_total_rewards = [] # the returns of all episodes samples for training purposes\n averaged_total_rewards = [] # the returns for each iteration\n\n for t in range(self.config.num_batches):\n\n # collect a minibatch of samples\n paths, total_rewards = self.sample_path(self.env)\n all_total_rewards.extend(total_rewards)\n observations = np.concatenate([path[\"observation\"] for path in paths])\n actions = np.concatenate([path[\"action\"] for path in paths])\n rewards = np.concatenate([path[\"reward\"] for path in paths])\n # compute Q-val estimates (discounted future returns) for each time step\n returns = self.get_returns(paths)\n\n # advantage will depend on the baseline implementation\n advantages = self.calculate_advantage(returns, observations)\n\n # run training operations\n if self.config.use_baseline:\n self.baseline_network.update_baseline(returns, observations)\n self.update_policy(observations, actions, advantages)\n\n # logging\n if (t % self.config.summary_freq == 0):\n self.update_averages(total_rewards, all_total_rewards)\n self.record_summary(t)\n\n # compute reward statistics for this batch and log\n avg_reward = np.mean(total_rewards)\n sigma_reward = np.sqrt(np.var(total_rewards) / len(total_rewards))\n msg = \"Average reward: {:04.2f} +/- {:04.2f}\".format(avg_reward, sigma_reward)\n averaged_total_rewards.append(avg_reward)\n self.logger.info(msg)\n\n if self.config.record and (last_record > self.config.record_freq):\n self.logger.info(\"Recording...\")\n last_record = 0\n self.record()\n\n self.logger.info(\"- Training done.\")\n np.save(self.config.scores_output, averaged_total_rewards)\n export_plot(averaged_total_rewards, \"Score\", self.config.env_name, self.config.plot_output)\n\n def evaluate(self, env=None, num_episodes=1):\n \"\"\"\n Evaluates the return for num_episodes episodes.\n Not used right now, all evaluation statistics are computed during training\n episodes.\n \"\"\"\n if env==None: env = self.env\n paths, rewards = self.sample_path(env, num_episodes)\n avg_reward = np.mean(rewards)\n sigma_reward = np.sqrt(np.var(rewards) / len(rewards))\n msg = \"Average reward: {:04.2f} +/- {:04.2f}\".format(avg_reward, sigma_reward)\n self.logger.info(msg)\n return avg_reward\n\n def record(self):\n \"\"\"\n Recreate an env and record a video for one episode\n \"\"\"\n env = gym.make(self.config.env_name)\n env.seed(self.seed)\n env = gym.wrappers.Monitor(env, self.config.record_path, video_callable=lambda x: True, resume=True)\n self.evaluate(env, 1)\n\n def run(self):\n \"\"\"\n Apply procedures of training for a PG.\n \"\"\"\n # record one game at the beginning\n if self.config.record:\n self.record()\n # model\n self.train()\n # record one game at the end\n if self.config.record:\n self.record()\n"
] | [
[
"numpy.save",
"numpy.concatenate",
"numpy.max",
"numpy.mean",
"numpy.var",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bozliu/E2E-Keyword-Spotting | [
"64fc6fe414370a12a22fdf8ca5c8379d2c60b64e",
"64fc6fe414370a12a22fdf8ca5c8379d2c60b64e"
] | [
"kws/utils/mixup.py",
"kws/transforms/transforms_stft.py"
] | [
"#!/usr/bin/env python\n\"\"\"\nSimple implementation for mixup. The loss and onehot functions origin from: https://github.com/moskomule/mixup.pytorch\n\nHongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz: mixup: Beyond Empirical Risk Minimization\nhttps://arxiv.org/abs/1710.09412\n\"\"\"\n\n\n__all__ = [ 'mixup_cross_entropy_loss', 'mixup' ]\n\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\n\ndef mixup_cross_entropy_loss(input, target, size_average=True):\n \"\"\"Origin: https://github.com/moskomule/mixup.pytorch\n in PyTorch's cross entropy, targets are expected to be labels\n so to predict probabilities this loss is needed\n suppose q is the target and p is the input\n loss(p, q) = -\\sum_i q_i \\log p_i\n \"\"\"\n assert input.size() == target.size()\n assert isinstance(input, Variable) and isinstance(target, Variable)\n input = torch.log(torch.nn.functional.softmax(input, dim=1).clamp(1e-5, 1))\n # input = input - torch.log(torch.sum(torch.exp(input), dim=1)).view(-1, 1)\n loss = - torch.sum(input * target)\n return loss / input.size()[0] if size_average else loss\n\ndef onehot(targets, num_classes):\n \"\"\"Origin: https://github.com/moskomule/mixup.pytorch\n convert index tensor into onehot tensor\n :param targets: index tensor\n :param num_classes: number of classes\n \"\"\"\n assert isinstance(targets, torch.LongTensor)\n return torch.zeros(targets.size()[0], num_classes).scatter_(1, targets.view(-1, 1), 1)\n\ndef mixup(inputs, targets, num_classes, alpha=2):\n \"\"\"Mixup on 1x32x32 mel-spectrograms.\n \"\"\"\n s = inputs.size()[0]\n weight = torch.Tensor(np.random.beta(alpha, alpha, s))\n index = np.random.permutation(s)\n x1, x2 = inputs, inputs[index, :, :, :]\n y1, y2 = onehot(targets, num_classes), onehot(targets[index,], num_classes)\n weight = weight.view(s, 1, 1, 1)\n inputs = weight*x1 + (1-weight)*x2\n weight = weight.view(s, 1)\n targets = weight*y1 + (1-weight)*y2\n return inputs, targets\n",
"\"\"\"Transforms on the short time fourier transforms of wav samples.\"\"\"\n\nimport random\n\nimport numpy as np\nimport librosa\nimport torch\nimport torchaudio\nfrom torch.utils.data import Dataset\n\nfrom .transforms_wav import should_apply_transform\n\nclass ToSTFT(object):\n \"\"\"Applies on an audio the short time fourier transform.\"\"\"\n\n def __init__(self, n_fft=2048, hop_length=512):\n self.n_fft = n_fft\n self.hop_length = hop_length\n\n def __call__(self, data):\n samples = data['samples']\n sample_rate = data['sample_rate']\n data['n_fft'] = self.n_fft\n data['hop_length'] = self.hop_length\n data['stft'] = librosa.stft(samples, n_fft=self.n_fft, hop_length=self.hop_length)\n data['stft_shape'] = data['stft'].shape\n return data\n\nclass StretchAudioOnSTFT(object):\n \"\"\"Stretches an audio on the frequency domain.\"\"\"\n\n def __init__(self, max_scale=0.2):\n self.max_scale = max_scale\n\n def __call__(self, data):\n if not should_apply_transform():\n return data\n\n stft = data['stft']\n sample_rate = data['sample_rate']\n hop_length = data['hop_length']\n scale = random.uniform(-self.max_scale, self.max_scale)\n stft_stretch = librosa.core.phase_vocoder(stft, 1+scale, hop_length=hop_length)\n data['stft'] = stft_stretch\n return data\n\nclass TimeshiftAudioOnSTFT(object):\n \"\"\"A simple timeshift on the frequency domain without multiplying with exp.\"\"\"\n\n def __init__(self, max_shift=8):\n self.max_shift = max_shift\n\n def __call__(self, data):\n if not should_apply_transform():\n return data\n\n stft = data['stft']\n shift = random.randint(-self.max_shift, self.max_shift)\n a = -min(0, shift)\n b = max(0, shift)\n stft = np.pad(stft, ((0, 0), (a, b)), \"constant\")\n if a == 0:\n stft = stft[:,b:]\n else:\n stft = stft[:,0:-a]\n data['stft'] = stft\n return data\n\nclass AddBackgroundNoiseOnSTFT(Dataset):\n \"\"\"Adds a random background noise on the frequency domain.\"\"\"\n\n def __init__(self, bg_dataset, max_percentage=0.45):\n self.bg_dataset = bg_dataset\n self.max_percentage = max_percentage\n\n def __call__(self, data):\n if not should_apply_transform():\n return data\n\n noise = random.choice(self.bg_dataset)['stft']\n percentage = random.uniform(0, self.max_percentage)\n data['stft'] = data['stft'] * (1 - percentage) + noise * percentage\n return data\n\nclass FixSTFTDimension(object):\n \"\"\"Either pads or truncates in the time axis on the frequency domain, applied after stretching, time shifting etc.\"\"\"\n\n def __call__(self, data):\n stft = data['stft']\n t_len = stft.shape[1]\n orig_t_len = data['stft_shape'][1]\n if t_len > orig_t_len:\n stft = stft[:,0:orig_t_len]\n elif t_len < orig_t_len:\n stft = np.pad(stft, ((0, 0), (0, orig_t_len-t_len)), \"constant\")\n\n data['stft'] = stft\n return data\n\nclass ToMelSpectrogramFromSTFT(object):\n \"\"\"Creates the mel spectrogram from the short time fourier transform of a file. The result is a 32x32 matrix.\"\"\"\n\n def __init__(self, n_mels=32):\n self.n_mels = n_mels\n\n def __call__(self, data):\n stft = data['stft']\n sample_rate = data['sample_rate']\n n_fft = data['n_fft']\n mel_basis = librosa.filters.mel(sample_rate, n_fft, self.n_mels)\n s = np.dot(mel_basis, np.abs(stft)**2.0)\n data['mel_spectrogram'] = librosa.power_to_db(s, ref=np.max)\n return data\n\nclass DeleteSTFT(object):\n \"\"\"Pytorch doesn't like complex numbers, use this transform to remove STFT after computing the mel spectrogram.\"\"\"\n\n def __call__(self, data):\n del data['stft']\n return data\n\nclass AudioFromSTFT(object):\n \"\"\"Inverse short time fourier transform.\"\"\"\n\n def __call__(self, data):\n stft = data['stft']\n data['istft_samples'] = librosa.core.istft(stft, dtype=data['samples'].dtype)\n return data\n\n\n\nclass RandomPitchShift(object):\n def __init__(self, sample_rate=22050, pitch_shift=(-1.0, 1.0)):\n if isinstance(pitch_shift, (tuple, list)):\n self.min_pitch_shift = pitch_shift[0]\n self.max_pitch_shift = pitch_shift[1]\n else:\n self.min_pitch_shift = -pitch_shift\n self.max_pitch_shift = pitch_shift\n self.sample_rate=sample_rate\n\n def __call__(self, waveform):\n waveform = waveform.numpy()\n pitch_shift = random.uniform(self.min_pitch_shift, self.max_pitch_shift)\n waveform = librosa.effects.pitch_shift(waveform, sr=self.sample_rate,\n n_steps=pitch_shift)\n return torch.from_numpy(waveform)\n\n\nclass RandomVolume(object):\n def __init__(self, gain_db=(-50.0, 50.0)):\n self.gain = gain_db\n\n def __call__(self, waveform):\n rand_gain = random.uniform(self.gain[0], self.gain[1])\n return torch.clamp(torchaudio.functional.gain(waveform, rand_gain), -1.0, 1.0)\n\n\nclass AudioNoise(object):\n def __init__(self, scale=0.25, sample_rate=22050, examples=None):\n self.scale = scale\n self.sample_rate = sample_rate\n if examples is None:\n examples = ['brahms', 'choice', 'fishin', 'nutcracker', 'trumpet', 'vibeace']\n self.examples = []\n\n for example in examples:\n waveform, sample_rate = librosa.load(librosa.example(example))\n if sample_rate != self.sample_rate:\n waveform = librosa.core.resample(waveform, sample_rate, self.sample_rate)\n self.examples.append(torch.from_numpy(waveform))\n else:\n self.examples = examples\n\n def __call__(self, waveform):\n noise = random.choice(self.examples)\n if noise.shape[0] < waveform.shape[0]:\n noise = noise.repeat(waveform.shape[0] // noise.shape[0] + 1)\n\n rand_pos = random.randrange(noise.shape[0] - waveform.shape[0] + 1)\n noise = noise[rand_pos:rand_pos + waveform.shape[0]]\n return waveform + self.scale * noise\n\n\nclass GaussianNoise(object):\n def __init__(self, scale=0.01):\n self.scale = scale\n\n def __call__(self, data):\n return data + self.scale * torch.randn(data.shape)\n\n\nclass SpectogramNormalize(object):\n def __init__(self, mean=-7.0, std=6.0, eps=1e-8):\n self.mean = mean\n self.std = std\n self.eps = 1e-8\n\n def __call__(self, spec):\n spec = torch.log(spec + self.eps)\n spec = (spec - self.mean) / self.std\n return spec"
] | [
[
"numpy.random.beta",
"numpy.random.permutation",
"torch.sum",
"torch.nn.functional.softmax"
],
[
"numpy.abs",
"numpy.pad",
"torch.randn",
"torch.from_numpy",
"torch.log"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pscedu/DPF-Core | [
"05816d1d4940aacc2b51055ab67b8f4c31fc1ca3"
] | [
"tests/test_scoping.py"
] | [
"import pytest\nimport numpy as np\n\nfrom ansys import dpf\nfrom ansys.dpf.core import Scoping\nimport numpy as np\nfrom ansys.dpf.core import errors as dpf_errors\nfrom ansys.dpf.core.check_version import meets_version, get_server_version\n\nserv = dpf.core.start_local_server('127.0.0.1', 50075)\nSERVER_VERSION_HIGHER_THAN_2_0 = meets_version(get_server_version(serv), \"2.1\")\n# serv.shutdown()\n\ndef test_create_scoping():\n scop = Scoping()\n assert scop._message.id\n\n\ndef test_createbycopy_scoping():\n scop = Scoping()\n scop2 = Scoping(scoping=scop._message)\n assert scop._message.id == scop2._message.id\n\n\ndef test_create_scoping_with_ids_location():\n scop = Scoping(ids=[1,2,3,5,8,9,10], location=dpf.core.locations.elemental)\n assert scop._message.id\n assert scop.ids == [1,2,3,5,8,9,10]\n assert scop.location == dpf.core.locations.elemental\n\n\ndef test_set_get_ids_scoping():\n scop = Scoping()\n ids=[1,2,3,5,8,9,10]\n scop.ids = ids\n assert scop.ids == ids\n\[email protected](not SERVER_VERSION_HIGHER_THAN_2_0, reason='Requires server version higher than 2.0')\ndef test_set_get_ids_long_scoping():\n scop = Scoping()\n ids=range(1,1000000)\n scop.ids = ids\n assert np.allclose(scop.ids,ids)\n\n\ndef test_get_location_scoping():\n scop = Scoping()\n scop._set_location(\"Nodal\")\n assert scop._get_location() == \"Nodal\"\n scop = Scoping()\n scop._set_location(dpf.core.locations.nodal)\n assert scop._get_location() == \"Nodal\"\n\n\ndef test_get_location_property_scoping():\n scop = Scoping()\n scop.location = \"Nodal\"\n assert scop.location == \"Nodal\"\n scop = Scoping()\n scop.location = dpf.core.locations.nodal\n assert scop.location == \"Nodal\"\n\n\ndef test_count_scoping():\n scop = Scoping()\n ids=[1,2,3,5,8,9,10]\n scop.ids = ids\n assert scop._count() == len(ids)\n\n\ndef test_set_get_entity_data_scoping():\n scop = Scoping()\n ids=[1,2,3,5,8,9,10]\n scop.ids= ids \n scop.set_id(0,11)\n assert scop._get_id(0)==11\n assert scop._get_index(11)==0\n scop.set_id(1,12)\n assert scop._get_id(1)==12\n assert scop._get_index(12)==1\n \n\ndef test_print_scoping():\n scop = Scoping()\n ids=[1,2,3,5,8,9,10]\n scop.ids= ids \n print(scop)\n\ndef test_iter_scoping():\n scop = Scoping()\n ids=[1,2,3,5,8,9,10]\n scop.ids =ids\n for i,id in enumerate(scop):\n assert id == ids[i]\n\ndef test_delete_scoping():\n scop = Scoping()\n scop.__del__()\n with pytest.raises(Exception):\n scop.ids\n\n\ndef test_delete_auto_scoping():\n scop = Scoping()\n scop2 = Scoping(scoping=scop)\n scop.__del__()\n with pytest.raises(Exception):\n scop2.ids\n\[email protected](SERVER_VERSION_HIGHER_THAN_2_0, reason='Requires server version below (or equal) than 2.0')\ndef test_throw_if_unsufficient_version():\n scop = Scoping()\n ids = range(1, int(2e6))\n with pytest.raises(dpf_errors.DpfVersionNotSupported):\n scop.ids = ids\n ids = range(1, int(3e6))\n with pytest.raises(dpf_errors.DpfVersionNotSupported):\n scop.ids = ids\n ids = range(1, 2000)\n scop.ids = ids\n ids_check = scop.ids\n assert np.allclose(ids, ids_check)\n\[email protected](not SERVER_VERSION_HIGHER_THAN_2_0, reason='Requires server version higher than 2.0')\ndef test_field_with_scoping_many_ids(allkindofcomplexity):\n # set scoping ids with a scoping created from a model\n model = dpf.core.Model(allkindofcomplexity)\n mesh = model.metadata.meshed_region\n nnodes = mesh.nodes.n_nodes\n assert nnodes == 15129\n nod_ids = mesh.nodes.scoping.ids\n mesh.nodes.scoping.ids = nod_ids\n new_nod_ids = mesh.nodes.scoping.ids\n assert np.allclose(nod_ids, new_nod_ids)\n modif_nod_ids = nod_ids\n modif_nod_ids[245] = 45\n modif_nod_ids[1129] = 69\n modif_nod_ids[1999] = 2086\n modif_nod_ids[9046] = 12\n modif_nod_ids[12907] = 7894\n modif_nod_ids[15128] = 2789\n mesh.nodes.scoping.ids = modif_nod_ids\n new_modif_nod_ids = mesh.nodes.scoping.ids\n assert np.allclose(new_modif_nod_ids, modif_nod_ids)\n \n # set scoping ids with a scoping created from scratch\n scop = dpf.core.Scoping()\n ids=range(1,1000000)\n scop.ids = ids\n ids_check = scop.ids\n assert np.allclose(ids_check, ids)\n modif_ids = ids_check\n modif_ids[245] = 45\n modif_ids[10046] = 69\n modif_ids[1999] = 2086\n modif_ids[50067] = 12\n modif_ids[999345] = 7894\n modif_ids[506734] = 2789\n # np.ndarray\n scop.ids = np.array(modif_ids)\n new_modif_ids = scop.ids\n assert np.allclose(new_modif_ids, modif_ids)\n # list\n modif_ids = modif_ids\n scop.ids = modif_ids\n new_modif_ids = scop.ids\n assert np.allclose(new_modif_ids, modif_ids)\n \n \ndef test_largest_set_ids_one_shot():\n scop = dpf.core.Scoping()\n scop.ids = range(1, int(8e6/28))\n assert np.allclose( scop.ids,range(1, int(8e6/28)))\n try :\n scop.ids = range(1, int(8.2e6/28))\n except :\n return #check that either more than 8MB works or it throws\n \n assert np.allclose( scop.ids,range(1, int(8.2e6/28)))\n "
] | [
[
"numpy.array",
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pcraster/lue | [
"e64c18f78a8b6d8a602b7578a2572e9740969202",
"e64c18f78a8b6d8a602b7578a2572e9740969202"
] | [
"source/framework/python/test/algorithm/d8_flow_direction_test.py",
"source/framework/python/test/algorithm/sin_test.py"
] | [
"import lue.framework as lfr\nimport lue_test\nimport numpy as np\n\n\ndef setUpModule():\n lue_test.start_hpx_runtime()\n\n\ndef tearDownModule():\n return\n lue_test.stop_hpx_runtime()\n\n\nclass D8FlowDirectionTest(lue_test.TestCase):\n\n @lue_test.framework_test_case\n def test_overloads(self):\n\n array_shape = (60, 40)\n partition_shape = (10, 10)\n direction = 6\n\n for type_ in [np.float32, np.float64]:\n elevation = lfr.create_array(array_shape, partition_shape, np.dtype(type_), direction)\n flow_direction = lfr.d8_flow_direction(elevation)\n\n\n @lue_test.framework_test_case\n def test_directions(self):\n\n array_shape = (60, 40)\n partition_shape = (10, 10)\n\n for direction in [1, 2, 3, 4, 5, 6, 7, 8, 9]:\n elevation = lfr.create_array(array_shape, partition_shape, np.dtype(np.float32), direction)\n flow_direction = lfr.d8_flow_direction(elevation)\n",
"import lue.framework as lfr\nimport lue_test\nimport numpy as np\n\n\ndef setUpModule():\n lue_test.start_hpx_runtime()\n\n\ndef tearDownModule():\n return\n lue_test.stop_hpx_runtime()\n\n\nclass SinTest(lue_test.TestCase):\n\n @lue_test.framework_test_case\n def test_overloads(self):\n\n array_shape = (60, 40)\n partition_shape = (10, 10)\n fill_value = 5.5\n\n for dtype in [np.float32, np.float64]:\n array = lfr.create_array(array_shape, partition_shape, np.dtype(dtype), fill_value)\n lfr.sin(array)\n"
] | [
[
"numpy.dtype"
],
[
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
matthew-brett/nibabel | [
"b61161a8bc42c37c6c8f24ec43736ce4d3d3cb6b"
] | [
"nibabel/tests/test_nifti1.py"
] | [
"# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the NiBabel package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n''' Tests for nifti reading package '''\nfrom __future__ import with_statement\nimport os\n\nfrom ..py3k import BytesIO, ZEROB, asbytes\n\nimport numpy as np\n\nfrom ..casting import type_info, have_binary128\nfrom ..tmpdirs import InTemporaryDirectory\nfrom ..spatialimages import HeaderDataError\nfrom ..affines import from_matvec\nfrom .. import nifti1 as nifti1\nfrom ..nifti1 import (load, Nifti1Header, Nifti1PairHeader, Nifti1Image,\n Nifti1Pair, Nifti1Extension, Nifti1Extensions,\n data_type_codes, extension_codes, slice_order_codes)\n\nfrom .test_arraywriters import rt_err_estimate, IUINT_TYPES\n\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal\nfrom nose.tools import (assert_true, assert_false, assert_equal,\n assert_raises)\nfrom nose import SkipTest\n\nfrom ..testing import data_path\n\nfrom . import test_analyze as tana\n\nheader_file = os.path.join(data_path, 'nifti1.hdr')\nimage_file = os.path.join(data_path, 'example4d.nii.gz')\n\n\n# Example transformation matrix\nR = [[0, -1, 0], [1, 0, 0], [0, 0, 1]] # rotation matrix\nZ = [2.0, 3.0, 4.0] # zooms\nT = [20, 30, 40] # translations\nA = np.eye(4)\nA[:3,:3] = np.array(R) * Z # broadcasting does the job\nA[:3,3] = T\n\n\nclass TestNifti1PairHeader(tana.TestAnalyzeHeader):\n header_class = Nifti1PairHeader\n example_file = header_file\n\n def test_empty(self):\n tana.TestAnalyzeHeader.test_empty(self)\n hdr = self.header_class()\n assert_equal(hdr['magic'], asbytes('ni1'))\n assert_equal(hdr['scl_slope'], 1)\n assert_equal(hdr['vox_offset'], 0)\n\n def test_from_eg_file(self):\n hdr = Nifti1Header.from_fileobj(open(self.example_file, 'rb'))\n assert_equal(hdr.endianness, '<')\n assert_equal(hdr['magic'], asbytes('ni1'))\n assert_equal(hdr['sizeof_hdr'], 348)\n\n def test_big_scaling(self):\n # Test that upcasting works for huge scalefactors\n # See tests for apply_read_scaling in test_utils\n hdr = self.header_class()\n hdr.set_data_shape((2,1,1))\n hdr.set_data_dtype(np.int16)\n sio = BytesIO()\n dtt = np.float32\n # This will generate a huge scalefactor\n finf = type_info(dtt)\n data = np.array([finf['min'], finf['max']], dtype=dtt)[:,None, None]\n hdr.data_to_fileobj(data, sio)\n data_back = hdr.data_from_fileobj(sio)\n assert_true(np.allclose(data, data_back))\n\n def test_nifti_log_checks(self):\n # in addition to analyze header checks\n HC = self.header_class\n # intercept and slope\n hdr = HC()\n # Slope of 0 is OK\n hdr['scl_slope'] = 0\n fhdr, message, raiser = self.log_chk(hdr, 0)\n assert_equal((fhdr, message), (hdr, ''))\n # But not with non-zero intercept\n hdr['scl_inter'] = 3\n fhdr, message, raiser = self.log_chk(hdr, 20)\n assert_equal(fhdr['scl_inter'], 0)\n assert_equal(message,\n 'Unused \"scl_inter\" is 3.0; should be 0; '\n 'setting \"scl_inter\" to 0')\n # Or not-finite intercept\n hdr['scl_inter'] = np.nan\n # NaN string representation can be odd on windows\n nan_str = '%s' % np.nan\n fhdr, message, raiser = self.log_chk(hdr, 20)\n assert_equal(fhdr['scl_inter'], 0)\n assert_equal(message,\n 'Unused \"scl_inter\" is %s; should be 0; '\n 'setting \"scl_inter\" to 0' % nan_str)\n # Reset to usable scale\n hdr['scl_slope'] = 1\n # not finite inter is more of a problem\n hdr['scl_inter'] = np.nan # severity 30\n fhdr, message, raiser = self.log_chk(hdr, 40)\n assert_equal(fhdr['scl_inter'], 0)\n assert_equal(message,\n '\"scl_slope\" is 1.0; but \"scl_inter\" is %s; '\n '\"scl_inter\" should be finite; setting '\n '\"scl_inter\" to 0' % nan_str)\n assert_raises(*raiser)\n # Not finite scale also bad, generates message for scale and offset\n hdr['scl_slope'] = np.nan\n fhdr, message, raiser = self.log_chk(hdr, 30)\n assert_equal(fhdr['scl_slope'], 0)\n assert_equal(fhdr['scl_inter'], 0)\n assert_equal(message,\n '\"scl_slope\" is nan; should be finite; '\n 'Unused \"scl_inter\" is nan; should be 0; '\n 'setting \"scl_slope\" to 0 (no scaling); '\n 'setting \"scl_inter\" to 0')\n assert_raises(*raiser)\n # Or just scale if inter is already 0\n hdr['scl_inter'] = 0\n fhdr, message, raiser = self.log_chk(hdr, 30)\n assert_equal(fhdr['scl_slope'], 0)\n assert_equal(fhdr['scl_inter'], 0)\n assert_equal(message,\n '\"scl_slope\" is nan; should be finite; '\n 'setting \"scl_slope\" to 0 (no scaling)')\n assert_raises(*raiser)\n # qfac\n hdr = HC()\n hdr['pixdim'][0] = 0\n fhdr, message, raiser = self.log_chk(hdr, 20)\n assert_equal(fhdr['pixdim'][0], 1)\n assert_equal(message, 'pixdim[0] (qfac) should be 1 '\n '(default) or -1; setting qfac to 1')\n # magic and offset\n hdr = HC()\n hdr['magic'] = 'ooh'\n fhdr, message, raiser = self.log_chk(hdr, 45)\n assert_equal(fhdr['magic'], asbytes('ooh'))\n assert_equal(message, 'magic string \"ooh\" is not valid; '\n 'leaving as is, but future errors are likely')\n hdr['magic'] = 'n+1' # single file needs suitable offset\n hdr['vox_offset'] = 0\n fhdr, message, raiser = self.log_chk(hdr, 40)\n assert_equal(fhdr['vox_offset'], 352)\n assert_equal(message, 'vox offset 0 too low for single '\n 'file nifti1; setting to minimum value '\n 'of 352')\n # qform, sform\n hdr = HC()\n hdr['qform_code'] = -1\n fhdr, message, raiser = self.log_chk(hdr, 30)\n assert_equal(fhdr['qform_code'], 0)\n assert_equal(message, 'qform_code -1 not valid; '\n 'setting to 0')\n hdr = HC()\n hdr['sform_code'] = -1\n fhdr, message, raiser = self.log_chk(hdr, 30)\n assert_equal(fhdr['sform_code'], 0)\n assert_equal(message, 'sform_code -1 not valid; '\n 'setting to 0')\n\n def test_freesurfer_hack(self):\n # For large vector images, Freesurfer appears to set dim[1] to -1 and\n # then use glmin for the vector length (an i4)\n HC = self.header_class\n # The standard case\n hdr = HC()\n hdr.set_data_shape((2, 3, 4))\n assert_equal(hdr.get_data_shape(), (2, 3, 4))\n assert_equal(hdr['glmin'], 0)\n # Just left of the freesurfer case\n dim_type = hdr.template_dtype['dim'].base\n glmin = hdr.template_dtype['glmin'].base\n too_big = int(np.iinfo(dim_type).max) + 1\n hdr.set_data_shape((too_big-1, 1, 1))\n assert_equal(hdr.get_data_shape(), (too_big-1, 1, 1))\n # The freesurfer case\n hdr.set_data_shape((too_big, 1, 1))\n assert_equal(hdr.get_data_shape(), (too_big, 1, 1))\n assert_array_equal(hdr['dim'][:4], [3, -1, 1, 1])\n assert_equal(hdr['glmin'], too_big)\n # This only works for the case of a 3D with -1, 1, 1\n assert_raises(HeaderDataError, hdr.set_data_shape, (too_big,))\n assert_raises(HeaderDataError, hdr.set_data_shape, (too_big,1))\n assert_raises(HeaderDataError, hdr.set_data_shape, (too_big,1,2))\n assert_raises(HeaderDataError, hdr.set_data_shape, (too_big,2,1))\n assert_raises(HeaderDataError, hdr.set_data_shape, (1, too_big))\n assert_raises(HeaderDataError, hdr.set_data_shape, (1, too_big, 1))\n assert_raises(HeaderDataError, hdr.set_data_shape, (1, 1, too_big))\n # Outside range of glmin raises error\n far_too_big = int(np.iinfo(glmin).max) + 1\n hdr.set_data_shape((far_too_big-1, 1, 1))\n assert_equal(hdr.get_data_shape(), (far_too_big-1, 1, 1))\n assert_raises(HeaderDataError, hdr.set_data_shape, (far_too_big,1,1))\n # glmin of zero raises error (implausible vector length)\n hdr.set_data_shape((-1,1,1))\n hdr['glmin'] = 0\n assert_raises(HeaderDataError, hdr.get_data_shape)\n # Lists or tuples or arrays will work for setting shape\n for shape in ((too_big-1, 1, 1), (too_big, 1, 1)):\n for constructor in (list, tuple, np.array):\n hdr.set_data_shape(constructor(shape))\n assert_equal(hdr.get_data_shape(), shape)\n\n\n def test_qform_sform(self):\n HC = self.header_class\n hdr = HC()\n assert_array_equal(hdr.get_qform(), np.eye(4))\n empty_sform = np.zeros((4,4))\n empty_sform[-1,-1] = 1\n assert_array_equal(hdr.get_sform(), empty_sform)\n assert_equal(hdr.get_qform(coded=True), (None, 0))\n assert_equal(hdr.get_sform(coded=True), (None, 0))\n # Affine with no shears\n nice_aff = np.diag([2, 3, 4, 1])\n # Affine with shears\n nasty_aff = from_matvec(np.arange(9).reshape((3,3)), [9, 10, 11])\n fixed_aff = unshear_44(nasty_aff)\n for in_meth, out_meth in ((hdr.set_qform, hdr.get_qform),\n (hdr.set_sform, hdr.get_sform)):\n in_meth(nice_aff, 2)\n aff, code = out_meth(coded=True)\n assert_array_equal(aff, nice_aff)\n assert_equal(code, 2)\n assert_array_equal(out_meth(), nice_aff) # non coded\n # Affine can also be passed if code == 0, affine will be suitably set\n in_meth(nice_aff, 0)\n assert_equal(out_meth(coded=True), (None, 0))\n assert_array_almost_equal(out_meth(), nice_aff)\n # Default qform code when previous == 0 is 2\n in_meth(nice_aff)\n aff, code = out_meth(coded=True)\n assert_equal(code, 2)\n # Unless code was non-zero before\n in_meth(nice_aff, 1)\n in_meth(nice_aff)\n aff, code = out_meth(coded=True)\n assert_equal(code, 1)\n # Can set code without modifying affine, by passing affine=None\n assert_array_equal(aff, nice_aff) # affine same as before\n in_meth(None, 3)\n aff, code = out_meth(coded=True)\n assert_array_equal(aff, nice_aff) # affine same as before\n assert_equal(code, 3)\n # affine is None on its own, or with code==0, resets code to 0\n in_meth(None, 0)\n assert_equal(out_meth(coded=True), (None, 0))\n in_meth(None)\n assert_equal(out_meth(coded=True), (None, 0))\n # List works as input\n in_meth(nice_aff.tolist())\n assert_array_equal(out_meth(), nice_aff)\n # Qform specifics\n # inexact set (with shears) is OK\n hdr.set_qform(nasty_aff, 1)\n assert_array_almost_equal(hdr.get_qform(), fixed_aff)\n # Unless allow_shears is False\n assert_raises(HeaderDataError, hdr.set_qform, nasty_aff, 1, False)\n # Reset sform, give qform a code, to test sform\n hdr.set_sform(None)\n hdr.set_qform(nice_aff, 1)\n # Check sform unchanged by setting qform\n assert_equal(hdr.get_sform(coded=True), (None, 0))\n # Setting does change the sform ouput\n hdr.set_sform(nasty_aff, 1)\n aff, code = hdr.get_sform(coded=True)\n assert_array_equal(aff, nasty_aff)\n assert_equal(code, 1)\n\n\ndef unshear_44(affine):\n RZS = affine[:3, :3]\n zooms = np.sqrt(np.sum(RZS * RZS, axis=0))\n R = RZS / zooms\n P, S, Qs = np.linalg.svd(R)\n PR = np.dot(P, Qs)\n return from_matvec(PR * zooms, affine[:3,3])\n\n\nclass TestNifti1SingleHeader(TestNifti1PairHeader):\n\n header_class = Nifti1Header\n\n def test_empty(self):\n tana.TestAnalyzeHeader.test_empty(self)\n hdr = self.header_class()\n assert_equal(hdr['magic'], asbytes('n+1'))\n assert_equal(hdr['scl_slope'], 1)\n assert_equal(hdr['vox_offset'], 352)\n\n def test_binblock_is_file(self):\n # Override test that binary string is the same as the file on disk; in\n # the case of the single file version of the header, we need to append\n # the extension string (4 0s)\n hdr = self.header_class()\n str_io = BytesIO()\n hdr.write_to(str_io)\n assert_equal(str_io.getvalue(), hdr.binaryblock + ZEROB * 4)\n\n def test_float128(self):\n hdr = self.header_class()\n if have_binary128():\n hdr.set_data_dtype(np.longdouble)\n assert_equal(hdr.get_data_dtype().type, np.longdouble)\n else:\n assert_raises(HeaderDataError, hdr.set_data_dtype, np.longdouble)\n\n\nclass TestNifti1Image(tana.TestAnalyzeImage):\n # Run analyze-flavor spatialimage tests\n image_class = Nifti1Image\n\n def _qform_rt(self, img):\n # Round trip image after setting qform, sform codes\n hdr = img.get_header()\n hdr['qform_code'] = 3\n hdr['sform_code'] = 4\n # Save / reload using bytes IO objects\n for key, value in img.file_map.items():\n value.fileobj = BytesIO()\n img.to_file_map()\n return img.from_file_map(img.file_map)\n\n def test_qform_cycle(self):\n # Qform load save cycle\n img_klass = self.image_class\n # None affine\n img = img_klass(np.zeros((2,3,4)), None)\n hdr_back = self._qform_rt(img).get_header()\n assert_equal(hdr_back['qform_code'], 3)\n assert_equal(hdr_back['sform_code'], 4)\n # Try non-None affine\n img = img_klass(np.zeros((2,3,4)), np.eye(4))\n hdr_back = self._qform_rt(img).get_header()\n assert_equal(hdr_back['qform_code'], 3)\n assert_equal(hdr_back['sform_code'], 4)\n # Modify affine in-place - does it hold?\n img.get_affine()[0,0] = 9\n img.to_file_map()\n img_back = img.from_file_map(img.file_map)\n exp_aff = np.diag([9,1,1,1])\n assert_array_equal(img_back.get_affine(), exp_aff)\n hdr_back = img.get_header()\n assert_array_equal(hdr_back.get_sform(), exp_aff)\n assert_array_equal(hdr_back.get_qform(), exp_aff)\n\n def test_header_update_affine(self):\n # Test that updating occurs only if affine is not allclose\n img = self.image_class(np.zeros((2,3,4)), np.eye(4))\n hdr = img.get_header()\n aff = img.get_affine()\n aff[:] = np.diag([1.1, 1.1, 1.1, 1]) # inexact floats\n hdr.set_qform(aff, 2)\n hdr.set_sform(aff, 2)\n img.update_header()\n assert_equal(hdr['sform_code'], 2)\n assert_equal(hdr['qform_code'], 2)\n\n def test_set_qform(self):\n img = self.image_class(np.zeros((2,3,4)), np.diag([2.2, 3.3, 4.3, 1]))\n hdr = img.get_header()\n new_affine = np.diag([1.1, 1.1, 1.1, 1])\n # Affine is same as sform (best affine)\n assert_array_almost_equal(img.get_affine(), hdr.get_best_affine())\n # Reset affine to something different again\n aff_affine = np.diag([3.3, 4.5, 6.6, 1])\n img.get_affine()[:] = aff_affine\n assert_array_almost_equal(img.get_affine(), aff_affine)\n # Set qform using new_affine\n img.set_qform(new_affine, 1)\n assert_array_almost_equal(img.get_qform(), new_affine)\n assert_equal(hdr['qform_code'], 1)\n # Image get is same as header get\n assert_array_almost_equal(img.get_qform(), new_affine)\n # Coded version of get gets same information\n qaff, code = img.get_qform(coded=True)\n assert_equal(code, 1)\n assert_array_almost_equal(qaff, new_affine)\n # Image affine now reset to best affine (which is sform)\n assert_array_almost_equal(img.get_affine(), hdr.get_best_affine())\n # Reset image affine and try update_affine == False\n img.get_affine()[:] = aff_affine\n img.set_qform(new_affine, 1, update_affine=False)\n assert_array_almost_equal(img.get_affine(), aff_affine)\n # Clear qform using None, zooms unchanged\n assert_array_almost_equal(hdr.get_zooms(), [1.1, 1.1, 1.1])\n img.set_qform(None)\n qaff, code = img.get_qform(coded=True)\n assert_equal((qaff, code), (None, 0))\n assert_array_almost_equal(hdr.get_zooms(), [1.1, 1.1, 1.1])\n # Best affine similarly\n assert_array_almost_equal(img.get_affine(), hdr.get_best_affine())\n # If sform is not set, qform should update affine\n img.set_sform(None)\n img.set_qform(new_affine, 1)\n qaff, code = img.get_qform(coded=True)\n assert_equal(code, 1)\n assert_array_almost_equal(img.get_affine(), new_affine)\n new_affine[0, 1] = 2\n # If affine has has shear, should raise Error if strip_shears=False\n img.set_qform(new_affine, 2)\n assert_raises(HeaderDataError, img.set_qform, new_affine, 2, False)\n # Unexpected keyword raises error\n assert_raises(TypeError, img.get_qform, strange=True)\n\n def test_set_sform(self):\n orig_aff = np.diag([2.2, 3.3, 4.3, 1])\n img = self.image_class(np.zeros((2,3,4)), orig_aff)\n hdr = img.get_header()\n new_affine = np.diag([1.1, 1.1, 1.1, 1])\n qform_affine = np.diag([1.2, 1.2, 1.2, 1])\n # Reset image affine to something different again\n aff_affine = np.diag([3.3, 4.5, 6.6, 1])\n img.get_affine()[:] = aff_affine\n assert_array_almost_equal(img.get_affine(), aff_affine)\n # Sform, Qform codes are 'aligned', 'unknown' by default\n assert_equal((hdr['sform_code'], hdr['qform_code']), (2, 0))\n # Set sform using new_affine when qform is 0\n img.set_sform(new_affine, 1)\n assert_equal(hdr['sform_code'], 1)\n assert_array_almost_equal(hdr.get_sform(), new_affine)\n # Image get is same as header get\n assert_array_almost_equal(img.get_sform(), new_affine)\n # Coded version gives same result\n saff, code = img.get_sform(coded=True)\n assert_equal(code, 1)\n assert_array_almost_equal(saff, new_affine)\n # Because we've reset the sform with update_affine, the affine changes\n assert_array_almost_equal(img.get_affine(), hdr.get_best_affine())\n # Reset image affine and try update_affine == False\n img.get_affine()[:] = aff_affine\n img.set_sform(new_affine, 1, update_affine=False)\n assert_array_almost_equal(img.get_affine(), aff_affine)\n # zooms do not get updated when qform is 0\n assert_array_almost_equal(img.get_qform(), orig_aff)\n assert_array_almost_equal(hdr.get_zooms(), [2.2, 3.3, 4.3])\n img.set_qform(None)\n assert_array_almost_equal(hdr.get_zooms(), [2.2, 3.3, 4.3])\n # Set sform using new_affine when qform is set\n img.set_qform(qform_affine, 1)\n img.set_sform(new_affine, 1)\n saff, code = img.get_sform(coded=True)\n assert_equal(code, 1)\n assert_array_almost_equal(saff, new_affine)\n assert_array_almost_equal(img.get_affine(), new_affine)\n # zooms follow qform\n assert_array_almost_equal(hdr.get_zooms(), [1.2, 1.2, 1.2])\n # Clear sform using None, best_affine should fall back on qform\n img.set_sform(None)\n assert_equal(hdr['sform_code'], 0)\n assert_equal(hdr['qform_code'], 1)\n # Sform holds previous affine from last set\n assert_array_almost_equal(hdr.get_sform(), saff)\n # Image affine follows qform\n assert_array_almost_equal(img.get_affine(), qform_affine)\n assert_array_almost_equal(hdr.get_best_affine(), img.get_affine())\n # Unexpected keyword raises error\n assert_raises(TypeError, img.get_sform, strange=True)\n\n def test_hdr_diff(self):\n # Check an offset beyond data does not raise an error\n img = self.image_class(np.zeros((2,3,4)), np.eye(4))\n ext = dict(img.files_types)['image']\n hdr = img.get_header()\n hdr['vox_offset'] = 400\n with InTemporaryDirectory():\n img.to_filename('another_file' + ext)\n\n\nclass TestNifti1Pair(TestNifti1Image):\n # Run analyze-flavor spatialimage tests\n image_class = Nifti1Pair\n\n\ndef test_datatypes():\n hdr = Nifti1Header()\n for code in data_type_codes.value_set():\n dt = data_type_codes.type[code]\n if dt == np.void:\n continue\n hdr.set_data_dtype(code)\n (assert_equal,\n hdr.get_data_dtype(),\n data_type_codes.dtype[code])\n # Check that checks also see new datatypes\n hdr.set_data_dtype(np.complex128)\n hdr.check_fix()\n\n\ndef test_quaternion():\n hdr = Nifti1Header()\n hdr['quatern_b'] = 0\n hdr['quatern_c'] = 0\n hdr['quatern_d'] = 0\n assert_true(np.allclose(hdr.get_qform_quaternion(), [1.0, 0, 0, 0]))\n hdr['quatern_b'] = 1\n hdr['quatern_c'] = 0\n hdr['quatern_d'] = 0\n assert_true(np.allclose(hdr.get_qform_quaternion(), [0, 1, 0, 0]))\n # Check threshold set correctly for float32\n hdr['quatern_b'] = 1+np.finfo(np.float32).eps\n assert_array_almost_equal(hdr.get_qform_quaternion(), [0, 1, 0, 0])\n\n\ndef test_qform():\n # Test roundtrip case\n ehdr = Nifti1Header()\n ehdr.set_qform(A)\n qA = ehdr.get_qform()\n assert_true, np.allclose(A, qA, atol=1e-5)\n assert_true, np.allclose(Z, ehdr['pixdim'][1:4])\n xfas = nifti1.xform_codes\n assert_true, ehdr['qform_code'] == xfas['aligned']\n ehdr.set_qform(A, 'scanner')\n assert_true, ehdr['qform_code'] == xfas['scanner']\n ehdr.set_qform(A, xfas['aligned'])\n assert_true, ehdr['qform_code'] == xfas['aligned']\n\n\ndef test_sform():\n # Test roundtrip case\n ehdr = Nifti1Header()\n ehdr.set_sform(A)\n sA = ehdr.get_sform()\n assert_true, np.allclose(A, sA, atol=1e-5)\n xfas = nifti1.xform_codes\n assert_true, ehdr['sform_code'] == xfas['aligned']\n ehdr.set_sform(A, 'scanner')\n assert_true, ehdr['sform_code'] == xfas['scanner']\n ehdr.set_sform(A, xfas['aligned'])\n assert_true, ehdr['sform_code'] == xfas['aligned']\n\n\ndef test_dim_info():\n ehdr = Nifti1Header()\n assert_true(ehdr.get_dim_info() == (None, None, None))\n for info in ((0,2,1),\n (None, None, None),\n (0,2,None),\n (0,None,None),\n (None,2,1),\n (None, None,1),\n ):\n ehdr.set_dim_info(*info)\n assert_true(ehdr.get_dim_info() == info)\n\n\ndef test_slice_times():\n hdr = Nifti1Header()\n # error if slice dimension not specified\n assert_raises(HeaderDataError, hdr.get_slice_times)\n hdr.set_dim_info(slice=2)\n # error if slice dimension outside shape\n assert_raises(HeaderDataError, hdr.get_slice_times)\n hdr.set_data_shape((1, 1, 7))\n # error if slice duration not set\n assert_raises(HeaderDataError, hdr.get_slice_times) \n hdr.set_slice_duration(0.1)\n # We need a function to print out the Nones and floating point\n # values in a predictable way, for the tests below.\n _stringer = lambda val: val is not None and '%2.1f' % val or None\n _print_me = lambda s: map(_stringer, s)\n #The following examples are from the nifti1.h documentation.\n hdr['slice_code'] = slice_order_codes['sequential increasing']\n assert_equal(_print_me(hdr.get_slice_times()), \n ['0.0', '0.1', '0.2', '0.3', '0.4',\n '0.5', '0.6'])\n hdr['slice_start'] = 1\n hdr['slice_end'] = 5\n assert_equal(_print_me(hdr.get_slice_times()),\n [None, '0.0', '0.1', '0.2', '0.3', '0.4', None])\n hdr['slice_code'] = slice_order_codes['sequential decreasing']\n assert_equal(_print_me(hdr.get_slice_times()),\n [None, '0.4', '0.3', '0.2', '0.1', '0.0', None])\n hdr['slice_code'] = slice_order_codes['alternating increasing']\n assert_equal(_print_me(hdr.get_slice_times()),\n [None, '0.0', '0.3', '0.1', '0.4', '0.2', None])\n hdr['slice_code'] = slice_order_codes['alternating decreasing']\n assert_equal(_print_me(hdr.get_slice_times()),\n [None, '0.2', '0.4', '0.1', '0.3', '0.0', None])\n hdr['slice_code'] = slice_order_codes['alternating increasing 2']\n assert_equal(_print_me(hdr.get_slice_times()),\n [None, '0.2', '0.0', '0.3', '0.1', '0.4', None])\n hdr['slice_code'] = slice_order_codes['alternating decreasing 2']\n assert_equal(_print_me(hdr.get_slice_times()),\n [None, '0.4', '0.1', '0.3', '0.0', '0.2', None])\n # test set\n hdr = Nifti1Header()\n hdr.set_dim_info(slice=2)\n # need slice dim to correspond with shape\n times = [None, 0.2, 0.4, 0.1, 0.3, 0.0, None]\n assert_raises(HeaderDataError, hdr.set_slice_times, times)\n hdr.set_data_shape([1, 1, 7])\n assert_raises(HeaderDataError,\n hdr.set_slice_times,\n times[:-1]) # wrong length\n assert_raises(HeaderDataError,\n hdr.set_slice_times,\n (None,) * len(times)) # all None\n n_mid_times = times[:]\n n_mid_times[3] = None\n assert_raises(HeaderDataError,\n hdr.set_slice_times,\n n_mid_times) # None in middle\n funny_times = times[:]\n funny_times[3] = 0.05\n assert_raises(HeaderDataError,\n hdr.set_slice_times,\n funny_times) # can't get single slice duration\n hdr.set_slice_times(times)\n assert_equal(hdr.get_value_label('slice_code'),\n 'alternating decreasing')\n assert_equal(hdr['slice_start'], 1)\n assert_equal(hdr['slice_end'], 5)\n assert_array_almost_equal(hdr['slice_duration'], 0.1)\n\n\ndef test_intents():\n ehdr = Nifti1Header()\n ehdr.set_intent('t test', (10,), name='some score')\n assert_equal(ehdr.get_intent(),\n ('t test', (10.0,), 'some score'))\n # invalid intent name\n assert_raises(KeyError,\n ehdr.set_intent, 'no intention')\n # too many parameters\n assert_raises(HeaderDataError,\n ehdr.set_intent,\n 't test', (10,10))\n # too few parameters\n assert_raises(HeaderDataError,\n ehdr.set_intent,\n 'f test', (10,))\n # check unset parameters are set to 0, and name to ''\n ehdr.set_intent('t test')\n assert_equal((ehdr['intent_p1'],\n ehdr['intent_p2'],\n ehdr['intent_p3']), (0,0,0))\n assert_equal(ehdr['intent_name'], asbytes(''))\n ehdr.set_intent('t test', (10,))\n assert_equal((ehdr['intent_p2'],\n ehdr['intent_p3']), (0,0))\n\n\ndef test_set_slice_times():\n hdr = Nifti1Header()\n hdr.set_dim_info(slice=2)\n hdr.set_data_shape([1, 1, 7])\n hdr.set_slice_duration(0.1)\n times = [0] * 6\n assert_raises(HeaderDataError, hdr.set_slice_times, times)\n times = [None] * 7\n assert_raises(HeaderDataError, hdr.set_slice_times, times)\n times = [None, 0, 1, None, 3, 4, None]\n assert_raises(HeaderDataError, hdr.set_slice_times, times)\n times = [None, 0, 1, 2.1, 3, 4, None]\n assert_raises(HeaderDataError, hdr.set_slice_times, times)\n times = [None, 0, 4, 3, 2, 1, None]\n assert_raises(HeaderDataError, hdr.set_slice_times, times)\n times = [0, 1, 2, 3, 4, 5, 6]\n hdr.set_slice_times(times)\n assert_equal(hdr['slice_code'], 1)\n assert_equal(hdr['slice_start'], 0)\n assert_equal(hdr['slice_end'], 6)\n assert_equal(hdr['slice_duration'], 1.0)\n times = [None, 0, 1, 2, 3, 4, None]\n hdr.set_slice_times(times)\n assert_equal(hdr['slice_code'], 1)\n assert_equal(hdr['slice_start'], 1)\n assert_equal(hdr['slice_end'], 5)\n assert_equal(hdr['slice_duration'], 1.0)\n times = [None, 0.4, 0.3, 0.2, 0.1, 0, None]\n hdr.set_slice_times(times)\n assert_true(np.allclose(hdr['slice_duration'], 0.1))\n times = [None, 4, 3, 2, 1, 0, None]\n hdr.set_slice_times(times)\n assert_equal(hdr['slice_code'], 2)\n times = [None, 0, 3, 1, 4, 2, None]\n hdr.set_slice_times(times)\n assert_equal(hdr['slice_code'], 3)\n times = [None, 2, 4, 1, 3, 0, None]\n hdr.set_slice_times(times)\n assert_equal(hdr['slice_code'], 4)\n times = [None, 2, 0, 3, 1, 4, None]\n hdr.set_slice_times(times)\n assert_equal(hdr['slice_code'], 5)\n times = [None, 4, 1, 3, 0, 2, None]\n hdr.set_slice_times(times)\n assert_equal(hdr['slice_code'], 6)\n\n\ndef test_nifti1_images():\n shape = (2, 4, 6)\n npt = np.float32\n data = np.arange(np.prod(shape), dtype=npt).reshape(shape)\n affine = np.diag([1, 2, 3, 1])\n img = Nifti1Image(data, affine)\n assert_equal(img.shape, shape)\n img.set_data_dtype(npt)\n stio = BytesIO()\n img.file_map['image'].fileobj = stio\n img.to_file_map()\n img2 = Nifti1Image.from_file_map(img.file_map)\n assert_array_equal(img2.get_data(), data)\n with InTemporaryDirectory() as tmpdir:\n for ext in ('.gz', '.bz2'):\n fname = os.path.join(tmpdir, 'test.nii' + ext)\n img.to_filename(fname)\n img3 = Nifti1Image.load(fname)\n assert_true(isinstance(img3, img.__class__))\n assert_array_equal(img3.get_data(), data)\n assert_equal(img3.get_header(), img.get_header())\n # del to avoid windows errors of form 'The process cannot\n # access the file because it is being used'\n del img3\n\n\ndef test_extension_basics():\n raw = '123'\n ext = Nifti1Extension('comment', raw)\n assert_true(ext.get_sizeondisk() == 16)\n assert_true(ext.get_content() == raw)\n assert_true(ext.get_code() == 6)\n\n\ndef test_ext_eq():\n ext = Nifti1Extension('comment', '123')\n assert_true(ext == ext)\n assert_false(ext != ext)\n ext2 = Nifti1Extension('comment', '124')\n assert_false(ext == ext2)\n assert_true(ext != ext2)\n\n\ndef test_extension_codes():\n for k in extension_codes.keys():\n ext = Nifti1Extension(k, 'somevalue')\n\n\ndef test_extension_list():\n ext_c0 = Nifti1Extensions()\n ext_c1 = Nifti1Extensions()\n assert_equal(ext_c0, ext_c1)\n ext = Nifti1Extension('comment', '123')\n ext_c1.append(ext)\n assert_false(ext_c0 == ext_c1)\n ext_c0.append(ext)\n assert_true(ext_c0 == ext_c1)\n\n\ndef test_nifti_extensions():\n nim = load(image_file)\n # basic checks of the available extensions\n hdr = nim.get_header()\n exts_container = hdr.extensions\n assert_equal(len(exts_container), 2)\n assert_equal(exts_container.count('comment'), 2)\n assert_equal(exts_container.count('afni'), 0)\n assert_equal(exts_container.get_codes(), [6, 6])\n assert_equal((exts_container.get_sizeondisk()) % 16, 0)\n # first extension should be short one\n assert_equal(exts_container[0].get_content(), asbytes('extcomment1'))\n # add one\n afniext = Nifti1Extension('afni', '<xml></xml>')\n exts_container.append(afniext)\n assert_true(exts_container.get_codes() == [6, 6, 4])\n assert_true(exts_container.count('comment') == 2)\n assert_true(exts_container.count('afni') == 1)\n assert_true((exts_container.get_sizeondisk()) % 16 == 0)\n # delete one\n del exts_container[1]\n assert_true(exts_container.get_codes() == [6, 4])\n assert_true(exts_container.count('comment') == 1)\n assert_true(exts_container.count('afni') == 1)\n\n\ndef test_loadsave_cycle():\n nim = load(image_file)\n # ensure we have extensions\n hdr = nim.get_header()\n exts_container = hdr.extensions\n assert_true(len(exts_container) > 0)\n # write into the air ;-)\n stio = BytesIO()\n nim.file_map['image'].fileobj = stio\n nim.to_file_map()\n stio.seek(0)\n # reload\n lnim = Nifti1Image.from_file_map(nim.file_map)\n hdr = lnim.get_header()\n lexts_container = hdr.extensions\n assert_equal(exts_container,\n lexts_container)\n # build int16 image\n data = np.ones((2,3,4,5), dtype='int16')\n img = Nifti1Image(data, np.eye(4))\n hdr = img.get_header()\n assert_equal(hdr.get_data_dtype(), np.int16)\n # default should have no scaling\n assert_equal(hdr.get_slope_inter(), (1.0, 0.0))\n # set scaling\n hdr.set_slope_inter(2, 8)\n assert_equal(hdr.get_slope_inter(), (2, 8))\n # now build new image with updated header\n wnim = Nifti1Image(data, np.eye(4), header=hdr)\n assert_equal(wnim.get_data_dtype(), np.int16)\n assert_equal(wnim.get_header().get_slope_inter(), (2, 8))\n # write into the air again ;-)\n stio = BytesIO()\n wnim.file_map['image'].fileobj = stio\n wnim.to_file_map()\n stio.seek(0)\n lnim = Nifti1Image.from_file_map(wnim.file_map)\n assert_equal(lnim.get_data_dtype(), np.int16)\n # the test below does not pass, because the slope and inter are\n # always reset from the data, by the image write\n raise SkipTest\n assert_equal(lnim.get_header().get_slope_inter(), (2, 8))\n\n\ndef test_slope_inter():\n hdr = Nifti1Header()\n assert_equal(hdr.get_slope_inter(), (1.0, 0.0))\n for intup, outup in (((2.0,), (2.0, 0.0)),\n ((None,), (None, None)),\n ((3.0, None), (3.0, 0.0)),\n ((0.0, None), (None, None)),\n ((None, 0.0), (None, None)),\n ((None, 3.0), (None, None)),\n ((2.0, 3.0), (2.0, 3.0))):\n hdr.set_slope_inter(*intup)\n assert_equal(hdr.get_slope_inter(), outup)\n # Check set survives through checking\n hdr = Nifti1Header.from_header(hdr, check=True)\n assert_equal(hdr.get_slope_inter(), outup)\n\n\ndef test_xyzt_units():\n hdr = Nifti1Header()\n assert_equal(hdr.get_xyzt_units(), ('unknown', 'unknown'))\n hdr.set_xyzt_units('mm', 'sec')\n assert_equal(hdr.get_xyzt_units(), ('mm', 'sec'))\n hdr.set_xyzt_units()\n assert_equal(hdr.get_xyzt_units(), ('unknown', 'unknown'))\n\n\ndef test_recoded_fields():\n hdr = Nifti1Header()\n assert_equal(hdr.get_value_label('qform_code'), 'unknown')\n hdr['qform_code'] = 3\n assert_equal(hdr.get_value_label('qform_code'), 'talairach')\n assert_equal(hdr.get_value_label('sform_code'), 'unknown')\n hdr['sform_code'] = 3\n assert_equal(hdr.get_value_label('sform_code'), 'talairach')\n assert_equal(hdr.get_value_label('intent_code'), 'none')\n hdr.set_intent('t test', (10,), name='some score')\n assert_equal(hdr.get_value_label('intent_code'), 't test')\n assert_equal(hdr.get_value_label('slice_code'), 'unknown')\n hdr['slice_code'] = 4 # alternating decreasing\n assert_equal(hdr.get_value_label('slice_code'),\n 'alternating decreasing')\n\n\ndef test_load():\n # test module level load. We try to load a nii and an .img and a .hdr and\n # expect to get a nifti back of single or pair type\n arr = np.arange(24).reshape((2,3,4))\n aff = np.diag([2, 3, 4, 1])\n simg = Nifti1Image(arr, aff)\n pimg = Nifti1Pair(arr, aff)\n with InTemporaryDirectory():\n nifti1.save(simg, 'test.nii')\n assert_array_equal(arr, nifti1.load('test.nii').get_data())\n nifti1.save(simg, 'test.img')\n assert_array_equal(arr, nifti1.load('test.img').get_data())\n nifti1.save(simg, 'test.hdr')\n assert_array_equal(arr, nifti1.load('test.hdr').get_data())\n\n\ndef test_load_pixdims():\n # Make sure load preserves separate qform, pixdims, sform\n arr = np.arange(24).reshape((2,3,4))\n qaff = np.diag([2, 3, 4, 1])\n saff = np.diag([5, 6, 7, 1])\n hdr = Nifti1Header()\n hdr.set_qform(qaff)\n assert_array_equal(hdr.get_qform(), qaff)\n hdr.set_sform(saff)\n assert_array_equal(hdr.get_sform(), saff)\n simg = Nifti1Image(arr, None, hdr)\n img_hdr = simg.get_header()\n # Check qform, sform, pixdims are the same\n assert_array_equal(img_hdr.get_qform(), qaff)\n assert_array_equal(img_hdr.get_sform(), saff)\n assert_array_equal(img_hdr.get_zooms(), [2,3,4])\n # Save to stringio\n fm = Nifti1Image.make_file_map()\n fm['image'].fileobj = BytesIO()\n simg.to_file_map(fm)\n # Load again\n re_simg = Nifti1Image.from_file_map(fm)\n assert_array_equal(re_simg.get_data(), arr)\n # Check qform, sform, pixdims are the same\n rimg_hdr = re_simg.get_header()\n assert_array_equal(rimg_hdr.get_qform(), qaff)\n assert_array_equal(rimg_hdr.get_sform(), saff)\n assert_array_equal(rimg_hdr.get_zooms(), [2,3,4])\n\n\ndef test_affines_init():\n # Test we are doing vaguely spec-related qform things. The 'spec' here is\n # some thoughts by Mark Jenkinson:\n # http://nifti.nimh.nih.gov/nifti-1/documentation/nifti1fields/nifti1fields_pages/qsform_brief_usage\n arr = np.arange(24).reshape((2,3,4))\n aff = np.diag([2, 3, 4, 1])\n # Default is sform set, qform not set\n img = Nifti1Image(arr, aff)\n hdr = img.get_header()\n assert_equal(hdr['qform_code'], 0)\n assert_equal(hdr['sform_code'], 2)\n assert_array_equal(hdr.get_zooms(), [2, 3, 4])\n # This is also true for affines with header passed\n qaff = np.diag([3, 4, 5, 1])\n saff = np.diag([6, 7, 8, 1])\n hdr.set_qform(qaff, code='scanner')\n hdr.set_sform(saff, code='talairach')\n assert_array_equal(hdr.get_zooms(), [3, 4, 5])\n img = Nifti1Image(arr, aff, hdr)\n new_hdr = img.get_header()\n # Again affine is sort of anonymous space\n assert_equal(new_hdr['qform_code'], 0)\n assert_equal(new_hdr['sform_code'], 2)\n assert_array_equal(new_hdr.get_sform(), aff)\n assert_array_equal(new_hdr.get_zooms(), [2, 3, 4])\n # But if no affine passed, codes and matrices stay the same\n img = Nifti1Image(arr, None, hdr)\n new_hdr = img.get_header()\n assert_equal(new_hdr['qform_code'], 1) # scanner\n assert_array_equal(new_hdr.get_qform(), qaff)\n assert_equal(new_hdr['sform_code'], 3) # Still talairach\n assert_array_equal(new_hdr.get_sform(), saff)\n # Pixdims as in the original header\n assert_array_equal(new_hdr.get_zooms(), [3, 4, 5])\n\n\ndef round_trip(img):\n stio = BytesIO()\n img.file_map['image'].fileobj = stio\n img.to_file_map()\n return Nifti1Image.from_file_map(img.file_map)\n\n\ndef test_float_int_min_max():\n # Conversion between float and int\n # Parallel test to arraywriters\n aff = np.eye(4)\n for in_dt in (np.float32, np.float64):\n finf = type_info(in_dt)\n arr = np.array([finf['min'], finf['max']], dtype=in_dt)\n for out_dt in IUINT_TYPES:\n img = Nifti1Image(arr, aff)\n img_back = round_trip(img)\n arr_back_sc = img_back.get_data()\n assert_true(np.allclose(arr, arr_back_sc))\n\n\ndef test_float_int_spread():\n # Test rounding error for spread of values\n # Parallel test to arraywriters\n powers = np.arange(-10, 10, 0.5)\n arr = np.concatenate((-10**powers, 10**powers))\n aff = np.eye(4)\n for in_dt in (np.float32, np.float64):\n arr_t = arr.astype(in_dt)\n for out_dt in IUINT_TYPES:\n img = Nifti1Image(arr_t, aff)\n img_back = round_trip(img)\n arr_back_sc = img_back.get_data()\n slope, inter = img_back.get_header().get_slope_inter()\n # Get estimate for error\n max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, slope, inter)\n # Simulate allclose test with large atol\n diff = np.abs(arr_t - arr_back_sc)\n rdiff = diff / np.abs(arr_t)\n assert_true(np.all((diff <= max_miss) | (rdiff <= 1e-5)))\n\n\ndef test_rt_bias():\n # Check for bias in round trip\n # Parallel test to arraywriters\n rng = np.random.RandomState(20111214)\n mu, std, count = 100, 10, 100\n arr = rng.normal(mu, std, size=(count,))\n eps = np.finfo(np.float32).eps\n aff = np.eye(4)\n for in_dt in (np.float32, np.float64):\n arr_t = arr.astype(in_dt)\n for out_dt in IUINT_TYPES:\n img = Nifti1Image(arr_t, aff)\n img_back = round_trip(img)\n arr_back_sc = img_back.get_data()\n slope, inter = img_back.get_header().get_slope_inter()\n bias = np.mean(arr_t - arr_back_sc)\n # Get estimate for error\n max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, slope, inter)\n # Hokey use of max_miss as a std estimate\n bias_thresh = np.max([max_miss / np.sqrt(count), eps])\n assert_true(np.abs(bias) < bias_thresh)\n"
] | [
[
"numpy.diag",
"numpy.dot",
"numpy.sqrt",
"numpy.concatenate",
"numpy.all",
"numpy.mean",
"numpy.iinfo",
"numpy.linalg.svd",
"numpy.allclose",
"numpy.arange",
"numpy.eye",
"numpy.finfo",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal",
"numpy.array",
"numpy.random.RandomState",
"numpy.sum",
"numpy.abs",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mavrix93/LightCurvesClassifier | [
"a0a51f033cb8adf45296913f0de0aa2568e0530c",
"a0a51f033cb8adf45296913f0de0aa2568e0530c"
] | [
"lcc_web/web/interface/helpers.py",
"test/stars_processing/test_params_estim.py"
] | [
"import ast\nimport copy\nimport datetime\nimport json\nimport logging\nimport os\nimport shutil\nimport traceback\nimport warnings\nfrom io import StringIO\n\nimport numpy as np\nfrom astropy.io import fits\nfrom django.conf import settings\nfrom interface.models import StarsFilter\n\nfrom lcc.cli.input_parse import parse_query_ranges\nfrom lcc.cli.input_parse import parse_tun_query\nfrom lcc.db_tier.connectors.file_manager import FileManager\nfrom lcc.entities.exceptions import QueryInputError\nfrom lcc.entities.star import Star\nfrom lcc.stars_processing.systematic_search.stars_searcher import StarsSearcherRedis\nfrom lcc.stars_processing.tools.params_estim import ParamsEstimator\nfrom lcc.stars_processing.tools.stats_manager import StatsManager\nfrom lcc.stars_processing.tools.visualization import plotProbabSpace\nfrom lcc.utils.helpers import create_folder\nfrom lcc.utils.helpers import get_arguments\nfrom lcc.utils.helpers import get_combinations\nfrom lcc.utils.output_process_modules import saveIntoFile\nfrom lcc.utils.stars import get_stars_dict\n\nMAX_SAMPLES = 100\n\n\ndef get_queries_from_df(df):\n keys = df.columns.values.tolist()\n queries = []\n for row in df.values:\n queries.append(dict(zip(keys, row)))\n return queries\n\n\ndef make_data_file(estim, filt_id, data_path, N=None, xmax=None, xmin=None, ymax=None, ymin=None, max_samples=100):\n filt = estim.filters[filt_id]\n\n searched_test_coo = filt.getSpaceCoordinates(estim.searched_test)\n others_test_coo = filt.getSpaceCoordinates(estim.others_test)\n searched_train_coo = filt.getSpaceCoordinates(estim.searched_train)\n others_train_coo = filt.getSpaceCoordinates(estim.others_train)\n\n if len(searched_test_coo) > max_samples:\n searched_test_coo = searched_test_coo.sample(max_samples)\n\n if len(others_test_coo) > max_samples:\n others_test_coo = others_test_coo.sample(max_samples)\n\n if len(searched_train_coo) > max_samples:\n searched_train_coo = searched_train_coo.sample(max_samples)\n\n if len(others_train_coo) > max_samples:\n others_train_coo = others_train_coo.sample(max_samples)\n\n stars_coo = [searched_test_coo, searched_train_coo,\n others_test_coo, others_train_coo]\n\n searched_test_ind = searched_test_coo.index\n others_test_ind = others_test_coo.index\n searched_train_ind = searched_train_coo.index\n others_train_ind = others_train_coo.index\n stars_ind = [searched_test_ind, searched_train_ind,\n others_test_ind, others_train_ind]\n # TODO !!!\n coo_data = [np.transpose(st_coo.values).tolist()\n for st_coo in stars_coo]\n\n if coo_data:\n OVERLAY = 0.2\n dim = len(coo_data[0])\n plot_ranges = []\n for ll in range(dim):\n x_max = np.max([np.max(c[ll]) for c in coo_data])\n x_min = np.min([np.min(c[ll]) for c in coo_data])\n x_overlay = (x_max - x_min) * OVERLAY\n plot_ranges.append([x_min - x_overlay, x_max + x_overlay])\n if dim == 2:\n if xmax:\n try:\n plot_ranges[0][1] = float(xmax)\n except (ValueError, TypeError):\n pass\n if xmin:\n try:\n plot_ranges[0][0] = float(xmin)\n except (ValueError, TypeError):\n pass\n if ymax:\n try:\n plot_ranges[1][1] = float(ymax)\n except (ValueError, TypeError):\n pass\n if ymin:\n try:\n plot_ranges[1][0] = float(ymin)\n except (ValueError, TypeError):\n pass\n\n else:\n plot_ranges = None\n\n try:\n N = int(N)\n except (ValueError, TypeError):\n N = None\n\n if not N:\n N = 300\n\n _probab_space = plotProbabSpace(filt, plot_ranges=plot_ranges, opt=\"return\", N=N)\n if len(_probab_space) == 4:\n pca = _probab_space[-1]\n orig_coo_data = copy.deepcopy(coo_data)\n coo_data = [np.transpose(pca.transform(np.transpose(this_coo))).tolist() for this_coo in coo_data]\n r = -1\n else:\n orig_coo_data = None\n r = None\n\n probab_space = [q.tolist() for q in _probab_space[:r]]\n probab_plot_title = \"Probability plot (\" + \", \".join(\n [dec.__class__.__name__ for dec in filt.deciders]) + \")\"\n\n lcs = []\n star_labels = []\n id_labels = []\n\n stars_dict = get_stars_dict(estim.searched_train + estim.others_train + estim.searched_test + estim.others_test)\n for st_group in stars_ind:\n this_star_labels = []\n for st_ind in st_group:\n st = stars_dict.get(st_ind)\n if not st:\n break\n stkeys = list(st.more.keys())\n stval = list(st.more.values())\n\n l = st.name + \"<br>\"\n if len(stkeys) >= 3:\n l += \"\\t|\\t\".join(stkeys[:3]) + \"<br>\" + \\\n \"\\t|\\t\".join([str(x) for x in stval[:3]])\n\n l += \"<br>\" + \"<br>\" + \\\n \"\\t|\\t\".join(stkeys[3:]) + \"<br>\" + \\\n \"\\t|\\t\".join([str(x) for x in stval[3:]])\n\n elif len(stkeys) != 0:\n l += \"\\t|\\t\".join(stkeys) + \"<br>\" + \\\n \"\\t|\\t\".join([str(x) for x in stval])\n\n this_star_labels.append(str(l))\n\n id_labels.append(str(st.name))\n if st.lightCurve:\n lcs.append(\n [st.lightCurve.time.tolist(), st.lightCurve.mag.tolist(), st.lightCurve.err.tolist()])\n else:\n lcs.append([[], [], []])\n star_labels.append(this_star_labels)\n\n view_data = {\"probab_data\": probab_space,\n \"probab_plot_title\": probab_plot_title,\n \"coo_data\": coo_data,\n \"space_coords\" : orig_coo_data,\n \"zeroes\": [[0 for _ in coo_data[i][0]] for i in range(4)],\n \"lcs\": lcs,\n \"point_labels\": star_labels,\n \"labels\": id_labels}\n\n with open(data_path, 'w') as outfile:\n json.dump(view_data, outfile, default=json_numpy_default)\n\n\ndef create_filter(searched_stars, contamination_stars, descriptors,\n deciders, tuned_params, static_params, job, split_ratio, user_id):\n\n create_folder(\n os.path.join(settings.MEDIA_ROOT, user_id, \"stars_filters\", str(job.id)))\n\n # TODO map to db, no status file\n try:\n info = {\"searched_stars\": \", \".join([st.name for st in searched_stars]),\n \"contamination_stars\": \", \".join([st.name for st in contamination_stars]),\n \"descriptors\": \", \".join([desc.__name__ for desc in descriptors]),\n \"deciders\": \", \".join([desc.__name__ for desc in deciders]),\n \"tuned_params_num\": len(tuned_params),\n \"start\": str(datetime.datetime.now())[:-5],\n \"finish\": \"\"}\n\n job.status = \"Running\"\n job.save()\n\n job_id = str(job.id)\n\n with open(os.path.join(settings.MEDIA_ROOT, user_id, \"stars_filters\", job_id, 'info.json'), 'w') as outfile:\n json.dump(info, outfile, default=json_numpy_default)\n \n logging.debug(\"Params estimator params\")\n logging.debug(\"Searching sample: %s\" % searched_stars)\n logging.debug(\"Contamin sample: %s\" % contamination_stars)\n logging.debug(\"Descriptors: %s\" % descriptors)\n logging.debug(\"Deciders: %s\" % deciders)\n logging.debug(\"Tuned params: %s\" % tuned_params)\n logging.debug(\"Static params: %s\" % static_params)\n \n estim = ParamsEstimator(searched=searched_stars,\n others=contamination_stars,\n descriptors=descriptors,\n deciders=deciders,\n tuned_params=tuned_params,\n static_params=static_params,\n split_ratio=split_ratio,\n multiproc=False)\n\n filt, best_stats, best_params = estim.fit()\n\n job.status = \"Done\"\n job.finish_date = datetime.datetime.utcnow()\n job.save()\n\n stats = estim.stats\n roc = StatsManager(stats).getROC()\n x = [_x.tolist() for _x in roc[0]]\n y = [_x.tolist() for _x in roc[1]]\n roc = [x, y]\n\n axis = filt.searched_coords.columns.tolist()\n\n if len(axis) == 2 and axis[1] == \"\":\n axis = [axis[0].replace(\",\", \"<br>\"), \"\"]\n\n coo_plot_labels = [\"Searched test sample\", \"Searched train sample\",\n \"Contamination test sample\", \"Contamination train sample\"]\n coo_plot_axis = axis\n\n coo_plot_title = \"\"\n\n stat_table = [list(stats[0].keys())] + roundNumbers(stats)\n\n probab_plot_axis = axis\n\n filt_name = \"_\".join([desc.__name__ for desc in descriptors])\n saveIntoFile(\n filt, os.path.join(settings.MEDIA_ROOT, user_id, \"stars_filters\", job_id), filt_name + \".filter\")\n\n saveIntoFile(\n estim, os.path.join(settings.MEDIA_ROOT, user_id, \"stars_filters\", job_id), \"estimator\")\n\n view_data = {\n \"probab_plot_axis\": probab_plot_axis,\n \"coo_plot_labels\": coo_plot_labels,\n \"coo_plot_title\": coo_plot_title,\n \"coo_plot_axis\": coo_plot_axis,\n \"roc_data\": roc,\n \"rows\": stat_table,\n \"best_id\": int(estim.best_id),\n \"job_id\": int(job_id),\n \"filt_path\": \"filt\" + str(job_id)}\n\n with open(os.path.join(settings.MEDIA_ROOT, user_id, \"stars_filters\", job_id, 'data.json'), 'w') as outfile:\n json.dump(view_data, outfile, default=json_numpy_default)\n\n make_data_file(estim, estim.best_id, os.path.join(settings.MEDIA_ROOT, user_id, \"stars_filters\",\n job_id, 'data_%i.json' % estim.best_id), max_samples=MAX_SAMPLES)\n\n job = StarsFilter.objects.get(pk=job_id)\n job.status = \"Done\"\n job.finish_date = datetime.datetime.now()\n job.save()\n\n except Exception as e:\n filename = os.path.join(settings.MEDIA_ROOT, user_id, \"stars_filters\", job_id, \"error.txt\")\n\n with open(filename, \"w\") as fi:\n traceback.print_exc(file=fi)\n\n job = StarsFilter.objects.get(pk=job_id)\n job.status = \"Failed\"\n job.finish_date = datetime.datetime.now()\n job.save()\n\n\ndef query_dbs(all_queries, job_path, job, star_filters=[]):\n\n try:\n for db_name, queries in all_queries.items():\n lcs_path = os.path.join(job_path, db_name, \"lcs\")\n create_folder(job_path)\n create_folder(lcs_path)\n\n searcher = StarsSearcherRedis(star_filters, job_name=\"job:\" + job_path.split('/')[-1], save_path=lcs_path,\n db_connector=db_name, save_coords=True)\n searcher.queryStars(queries)\n searcher._wait_to_done()\n\n\n shutil.make_archive(\n os.path.join(job_path, \"..\", str(job.id)), 'zip', job_path)\n\n job.status = \"Done\"\n job.finish_date = datetime.datetime.now()\n \n except Exception as e:\n job.status = \"Failed\"\n\n err_txt = \"Error occurred:\\n\" + traceback.format_exc()\n\n with open(os.path.join(job_path, \"error.txt\"), \"w\") as f:\n f.write(err_txt)\n\n job.save()\n\n\ndef parse_conn_combs(field_names, all_fields):\n queries = {}\n for field_n, field_v in all_fields.items():\n if field_n in field_names:\n if field_v.startswith(\"`\") and field_v.endswith(\"`\"):\n pyth_expr = ast.literal_eval(field_v[1:-1])\n\n if isinstance(pyth_expr, list):\n queries[field_n] = pyth_expr\n\n elif isinstance(pyth_expr, dict):\n queries[field_n] = [pyth_expr]\n\n else:\n keys, params = _parse_conn_combs(field_v)\n queries[field_n] = get_combinations(keys, *params)\n return queries\n\n\ndef _parse_conn_combs(raw_query, keys_sep=\"\\n\", values_sep=\":\"):\n parts = raw_query.split(keys_sep)\n params = []\n keys = []\n for part in parts:\n part = part.strip()\n if part.startswith(\"`\") and part.endswith(\"`\"):\n this_comb = ast.literal_eval(part[1:-1])\n\n if isinstance(this_comb, dict):\n keys += list(this_comb.keys())\n params += [[v] for v in this_comb.values()]\n\n else:\n warnings.warn(\"Query not parsed! %s\" % part)\n\n else:\n ind = part.find(values_sep)\n keys.append(part[:ind])\n this_comb = parse_query_ranges([part[ind + 1:]])[0]\n params.append(this_comb)\n\n return keys, params\n\n\ndef makeDesc(descriptors, _params):\n ready_descriptors = []\n for i, des in enumerate(descriptors):\n try:\n params = _params.get(des.__name__, {})\n\n ready_descriptors.append(des(**params))\n\n except TypeError:\n raise QueryInputError(\"Not enough parameters to construct constructor {0}\\nGot: {1}\".format(\n des.__name__, params))\n\n return ready_descriptors\n\n\ndef roundNumbers(stats):\n x = []\n for row in stats:\n z = []\n for y in row.values():\n try:\n y = round(y, 3)\n except TypeError:\n pass\n z.append(y)\n x.append(z)\n return x\n\n\ndef parse_combinations(keys, raw_dict, split_by=\":\"):\n _header, _params = _parse_combinations(keys, raw_dict, split_by)\n all_params = parse_query_ranges(_params)\n\n static_params = {}\n params = []\n header = []\n for p, head in zip(all_params, _header):\n if len(p) == 1:\n static_params[head] = p[0]\n else:\n params.append(p)\n header.append(head)\n\n combinations = get_combinations(header, *params)\n return parse_tun_query(combinations), parse_tun_query([static_params])[0]\n\n\ndef parse_comp_stars(files_dict):\n params = {}\n for key in files_dict.keys():\n key = key.strip()\n if key.startswith(\"templ_file\"):\n parts = key.split(\":\")\n if len(parts) == 3:\n stars = parse_stars(files_dict.getlist(key))\n _, name, param_name = parts\n\n if name not in params:\n params[name] = {}\n\n params[name][param_name] = stars\n\n return params\n\n\ndef getFields(ident_obj):\n args = get_arguments(list(ident_obj.values()))\n fields = []\n for arg in args:\n text_inputs = []\n name = arg.get(\"name\")\n mand = arg[\"mandatory_params\"]\n defa = arg[\"default_params\"]\n\n text_inputs += zip(mand, [\"\" for _ in mand])\n text_inputs += zip(defa,\n [\"`\" + str(v) + \"`\" for v in arg[\"default_values\"]])\n fields.append([name, text_inputs])\n return fields\n\n\ndef parse_combinations_from_file(fi, delim=\";\"):\n if fi:\n data = []\n for i, chunk in enumerate(fi.read().strip().split(\"\\n\")):\n if chunk.startswith(\"#\"):\n header = chunk[1:].split(delim)\n # TODO: Various delimiters\n else:\n row = {}\n for head, dat in zip(header, chunk.split(delim)):\n row[head] = dat\n\n data.append(row)\n return data\n\n\ndef _parse_combinations(keys, raw_dict, split_by=\":\"):\n \"\"\"\n\n Parameters\n ----------\n keys : list\n Names of objects to tune\n\n raw_dict : dict\n Dictionary of keys to parse. For example: {'descriptor:AbbeValueDesc:bins' : 10, ..}\n\n\n split_by : str\n Symbol which divides keys\n\n Returns\n -------\n\n \"\"\"\n header = []\n params = []\n for k, val in raw_dict.items():\n parts = k.split(split_by)\n if len(parts) == 3:\n _, name, param_name = parts\n\n if name.strip() in keys:\n params.append(val)\n header.append(name + split_by + param_name)\n return header, params\n\n\ndef parse_stars(fi):\n stars = []\n for st_fi in fi:\n if st_fi.__dict__.get(\"_name\", \"\").endswith(\".dat\"):\n fi_io = StringIO(st_fi.read())\n lc = FileManager._loadLcFromDat(fi_io)\n st = Star(name=st_fi.__dict__[\"_name\"].split(\".\")[0])\n st.putLightCurve(lc)\n stars.append(st)\n\n else:\n stars.append(FileManager._createStarFromFITS(fits.open(st_fi)))\n return stars\n\n\ndef load_test_stars(path):\n return FileManager({\"path\": path}).getStars()\n\n\ndef json_numpy_default(x):\n\n if isinstance(x, np.int):\n return int(x)\n\n elif isinstance(x, np.float):\n return float(x)\n\n raise TypeError(\"Unserializable object {} of type {}\".format(x, type(x)))\n",
"\"\"\"\nCreated on Jan 25, 2017\n\n@author: Martin Vo\n\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nfrom lcc.entities.star import Star\nfrom lcc.stars_processing.deciders.supervised_deciders import QDADec\nfrom lcc.stars_processing.descriptors.abbe_value_descr import AbbeValueDescr\nfrom lcc.stars_processing.descriptors.curves_shape_descr import CurvesShapeDescr\nfrom lcc.stars_processing.tools.params_estim import ParamsEstimator\n\n\n# TODO: Need to be fixed\nclass Test(unittest.TestCase):\n\n def setUp(self):\n N = 20\n\n x = np.linspace(0, 10, 100)\n\n self.template = []\n for ii in range(N):\n st = Star(name=\"TemplateStar%i\" % ii)\n st.putLightCurve([x, np.cos(x) + np.random.normal(x) * 0.1])\n self.template.append(st)\n\n self.variables = []\n for ii in range(N):\n st = Star(name=\"VariableStar%i\" % ii)\n st.putLightCurve([x, np.sin(x) + np.random.normal(x) * 0.1])\n self.variables.append(st)\n\n self.noisy = []\n for ii in range(N):\n st = Star(name=\"NonvariableStar%i\" % ii)\n st.putLightCurve([x, np.random.normal(x) * 2])\n self.noisy.append(st)\n\n def testName(self):\n deciders = [QDADec]\n descriptors = [AbbeValueDescr, CurvesShapeDescr]\n static_params = {\"AbbeValueDescr\": {\"bins\": 100},\n \"CurvesShapeDescr\": {\"comp_stars\": self.template}}\n tuned_params = [{\"CurvesShapeDescr\": {\"days_per_bin\": 3, \"alphabet_size\": 10}},\n {\"CurvesShapeDescr\": {\"days_per_bin\": 0.5, \"alphabet_size\": 12}}]\n \n est = ParamsEstimator(self.variables, self.noisy, descriptors, deciders,\n tuned_params, static_params=static_params)\n\n star_filter, stat, best_params = est.fit()\n assert best_params is not None\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.max",
"numpy.min",
"numpy.transpose"
],
[
"numpy.random.normal",
"numpy.cos",
"numpy.linspace",
"numpy.sin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
albert-dot-ai/allennlp | [
"580dc8b0e2c6491d4d75b54c3b15b34b462e0c67"
] | [
"allennlp/training/trainer.py"
] | [
"\"\"\"\nA :class:`~allennlp.training.trainer.Trainer` is responsible for training a\n:class:`~allennlp.models.model.Model`.\n\nTypically you might create a configuration file specifying the model and\ntraining parameters and then use :mod:`~allennlp.commands.train`\nrather than instantiating a ``Trainer`` yourself.\n\"\"\"\n# pylint: disable=too-many-lines\n\nimport logging\nimport os\nimport shutil\nimport time\nimport re\nimport datetime\nimport traceback\nfrom typing import Dict, Optional, List, Tuple, Union, Iterable, Any, Set\n\nimport torch\nimport torch.optim.lr_scheduler\nfrom torch.nn.parallel import replicate, parallel_apply\nfrom torch.nn.parallel.scatter_gather import scatter_kwargs, gather\nfrom tensorboardX import SummaryWriter\n\n\nfrom allennlp.common import Params\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.common.util import peak_memory_mb, gpu_memory_mb\nfrom allennlp.common.tqdm import Tqdm\nfrom allennlp.data.instance import Instance\nfrom allennlp.data.iterators.data_iterator import DataIterator\nfrom allennlp.models.model import Model\nfrom allennlp.nn import util\nfrom allennlp.training.learning_rate_schedulers import LearningRateScheduler\nfrom allennlp.training.optimizers import Optimizer\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\ndef is_sparse(tensor):\n return tensor.is_sparse\n\n\ndef sparse_clip_norm(parameters, max_norm, norm_type=2) -> float:\n \"\"\"Clips gradient norm of an iterable of parameters.\n\n The norm is computed over all gradients together, as if they were\n concatenated into a single vector. Gradients are modified in-place.\n Supports sparse gradients.\n\n Parameters\n ----------\n parameters : ``(Iterable[torch.Tensor])``\n An iterable of Tensors that will have gradients normalized.\n max_norm : ``float``\n The max norm of the gradients.\n norm_type : ``float``\n The type of the used p-norm. Can be ``'inf'`` for infinity norm.\n\n Returns\n -------\n Total norm of the parameters (viewed as a single vector).\n \"\"\"\n # pylint: disable=invalid-name,protected-access\n parameters = list(filter(lambda p: p.grad is not None, parameters))\n max_norm = float(max_norm)\n norm_type = float(norm_type)\n if norm_type == float('inf'):\n total_norm = max(p.grad.data.abs().max() for p in parameters)\n else:\n total_norm = 0\n for p in parameters:\n if is_sparse(p.grad):\n # need to coalesce the repeated indices before finding norm\n grad = p.grad.data.coalesce()\n param_norm = grad._values().norm(norm_type)\n else:\n param_norm = p.grad.data.norm(norm_type)\n total_norm += param_norm ** norm_type\n total_norm = total_norm ** (1. / norm_type)\n clip_coef = max_norm / (total_norm + 1e-6)\n if clip_coef < 1:\n for p in parameters:\n if is_sparse(p.grad):\n p.grad.data._values().mul_(clip_coef)\n else:\n p.grad.data.mul_(clip_coef)\n return total_norm\n\n\ndef move_optimizer_to_cuda(optimizer):\n \"\"\"\n Move the optimizer state to GPU, if necessary.\n After calling, any parameter specific state in the optimizer\n will be located on the same device as the parameter.\n \"\"\"\n for param_group in optimizer.param_groups:\n for param in param_group['params']:\n if param.is_cuda:\n param_state = optimizer.state[param]\n for k in param_state.keys():\n if isinstance(param_state[k], torch.Tensor):\n param_state[k] = param_state[k].cuda(device=param.get_device())\n\n\nclass TensorboardWriter:\n \"\"\"\n Wraps a pair of ``SummaryWriter`` instances but is a no-op if they're ``None``.\n Allows Tensorboard logging without always checking for Nones first.\n \"\"\"\n def __init__(self, train_log: SummaryWriter = None, validation_log: SummaryWriter = None) -> None:\n self._train_log = train_log\n self._validation_log = validation_log\n\n @staticmethod\n def _item(value: Any):\n if hasattr(value, 'item'):\n val = value.item()\n else:\n val = value\n return val\n\n def add_train_scalar(self, name: str, value: float, global_step: int) -> None:\n # get the scalar\n if self._train_log is not None:\n self._train_log.add_scalar(name, self._item(value), global_step)\n\n def add_train_histogram(self, name: str, values: torch.Tensor, global_step: int) -> None:\n if self._train_log is not None:\n if isinstance(values, torch.Tensor):\n values_to_write = values.cpu().data.numpy().flatten()\n self._train_log.add_histogram(name, values_to_write, global_step)\n\n def add_validation_scalar(self, name: str, value: float, global_step: int) -> None:\n\n if self._validation_log is not None:\n self._validation_log.add_scalar(name, self._item(value), global_step)\n\n\ndef time_to_str(timestamp: int) -> str:\n \"\"\"\n Convert seconds past Epoch to human readable string.\n \"\"\"\n datetimestamp = datetime.datetime.fromtimestamp(timestamp)\n return '{:04d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}'.format(\n datetimestamp.year, datetimestamp.month, datetimestamp.day,\n datetimestamp.hour, datetimestamp.minute, datetimestamp.second\n )\n\n\ndef str_to_time(time_str: str) -> datetime.datetime:\n \"\"\"\n Convert human readable string to datetime.datetime.\n \"\"\"\n pieces: Any = [int(piece) for piece in time_str.split('-')]\n return datetime.datetime(*pieces)\n\n\nclass Trainer:\n def __init__(self,\n model: Model,\n optimizer: torch.optim.Optimizer,\n iterator: DataIterator,\n train_dataset: Iterable[Instance],\n validation_dataset: Optional[Iterable[Instance]] = None,\n patience: Optional[int] = None,\n validation_metric: str = \"-loss\",\n validation_iterator: DataIterator = None,\n num_epochs: int = 20,\n serialization_dir: Optional[str] = None,\n num_serialized_models_to_keep: int = 20,\n keep_serialized_model_every_num_seconds: int = None,\n model_save_interval: float = None,\n cuda_device: Union[int, List] = -1,\n grad_norm: Optional[float] = None,\n grad_clipping: Optional[float] = None,\n learning_rate_scheduler: Optional[LearningRateScheduler] = None,\n summary_interval: int = 100,\n histogram_interval: int = None) -> None:\n \"\"\"\n Parameters\n ----------\n model : ``Model``, required.\n An AllenNLP model to be optimized. Pytorch Modules can also be optimized if\n their ``forward`` method returns a dictionary with a \"loss\" key, containing a\n scalar tensor representing the loss function to be optimized.\n optimizer : ``torch.nn.Optimizer``, required.\n An instance of a Pytorch Optimizer, instantiated with the parameters of the\n model to be optimized.\n iterator : ``DataIterator``, required.\n A method for iterating over a ``Dataset``, yielding padded indexed batches.\n train_dataset : ``Dataset``, required.\n A ``Dataset`` to train on. The dataset should have already been indexed.\n validation_dataset : ``Dataset``, optional, (default = None).\n A ``Dataset`` to evaluate on. The dataset should have already been indexed.\n patience : Optional[int] > 0, optional (default=None)\n Number of epochs to be patient before early stopping: the training is stopped\n after ``patience`` epochs with no improvement. If given, it must be ``> 0``.\n If None, early stopping is disabled.\n validation_metric : str, optional (default=\"loss\")\n Validation metric to measure for whether to stop training using patience\n and whether to serialize an ``is_best`` model each epoch. The metric name\n must be prepended with either \"+\" or \"-\", which specifies whether the metric\n is an increasing or decreasing function.\n validation_iterator : ``DataIterator``, optional (default=None)\n An iterator to use for the validation set. If ``None``, then\n use the training `iterator`.\n num_epochs : int, optional (default = 20)\n Number of training epochs.\n serialization_dir : str, optional (default=None)\n Path to directory for saving and loading model files. Models will not be saved if\n this parameter is not passed.\n num_serialized_models_to_keep : ``int``, optional (default=20)\n Number of previous model checkpoints to retain. Default is to keep 20 checkpoints.\n A value of None or -1 means all checkpoints will be kept.\n keep_serialized_model_every_num_seconds : ``int``, optional (default=None)\n If num_serialized_models_to_keep is not None, then occasionally it's useful to\n save models at a given interval in addition to the last num_serialized_models_to_keep.\n To do so, specify keep_serialized_model_every_num_seconds as the number of seconds\n between permanently saved checkpoints. Note that this option is only used if\n num_serialized_models_to_keep is not None, otherwise all checkpoints are kept.\n model_save_interval : ``float``, optional (default=None)\n If provided, then serialize models every ``model_save_interval``\n seconds within single epochs. In all cases, models are also saved\n at the end of every epoch if ``serialization_dir`` is provided.\n cuda_device : ``int``, optional (default = -1)\n An integer specifying the CUDA device to use. If -1, the CPU is used.\n grad_norm : ``float``, optional, (default = None).\n If provided, gradient norms will be rescaled to have a maximum of this value.\n grad_clipping : ``float``, optional (default = ``None``).\n If provided, gradients will be clipped `during the backward pass` to have an (absolute)\n maximum of this value. If you are getting ``NaNs`` in your gradients during training\n that are not solved by using ``grad_norm``, you may need this.\n learning_rate_scheduler : ``PytorchLRScheduler``, optional, (default = None)\n A Pytorch learning rate scheduler. The learning rate will be decayed with respect to\n this schedule at the end of each epoch. If you use\n :class:`torch.optim.lr_scheduler.ReduceLROnPlateau`, this will use the ``validation_metric``\n provided to determine if learning has plateaued. To support updating the learning\n rate on every batch, this can optionally implement ``step_batch(batch_num_total)`` which\n updates the learning rate given the batch number.\n summary_interval: ``int``, optional, (default = 100)\n Number of batches between logging scalars to tensorboard\n histogram_interval : ``int``, optional, (default = ``None``)\n If not None, then log histograms to tensorboard every ``histogram_interval`` batches.\n When this parameter is specified, the following additional logging is enabled:\n * Histograms of model parameters\n * The ratio of parameter update norm to parameter norm\n * Histogram of layer activations\n We log histograms of the parameters returned by\n ``model.get_parameters_for_histogram_tensorboard_logging``.\n The layer activations are logged for any modules in the ``Model`` that have\n the attribute ``should_log_activations`` set to ``True``. Logging\n histograms requires a number of GPU-CPU copies during training and is typically\n slow, so we recommend logging histograms relatively infrequently.\n Note: only Modules that return tensors, tuples of tensors or dicts\n with tensors as values currently support activation logging.\n \"\"\"\n self._model = model\n self._iterator = iterator\n self._validation_iterator = validation_iterator\n self._optimizer = optimizer\n self._train_data = train_dataset\n self._validation_data = validation_dataset\n\n if patience is None: # no early stopping\n if validation_dataset:\n logger.warning('You provided a validation dataset but patience was set to None, '\n 'meaning that early stopping is disabled')\n elif (not isinstance(patience, int)) or patience <= 0:\n raise ConfigurationError('{} is an invalid value for \"patience\": it must be a positive integer '\n 'or None (if you want to disable early stopping)'.format(patience))\n self._patience = patience\n self._num_epochs = num_epochs\n\n self._serialization_dir = serialization_dir\n self._num_serialized_models_to_keep = num_serialized_models_to_keep\n self._keep_serialized_model_every_num_seconds = keep_serialized_model_every_num_seconds\n self._serialized_paths: List[Any] = []\n self._last_permanent_saved_checkpoint_time = time.time()\n self._model_save_interval = model_save_interval\n\n self._grad_norm = grad_norm\n self._grad_clipping = grad_clipping\n self._learning_rate_scheduler = learning_rate_scheduler\n\n increase_or_decrease = validation_metric[0]\n if increase_or_decrease not in [\"+\", \"-\"]:\n raise ConfigurationError(\"Validation metrics must specify whether they should increase \"\n \"or decrease by pre-pending the metric name with a +/-.\")\n self._validation_metric = validation_metric[1:]\n self._validation_metric_decreases = increase_or_decrease == \"-\"\n\n if not isinstance(cuda_device, int) and not isinstance(cuda_device, list):\n raise ConfigurationError(\"Expected an int or list for cuda_device, got {}\".format(cuda_device))\n\n if isinstance(cuda_device, list):\n logger.info(f\"WARNING: Multiple GPU support is experimental not recommended for use. \"\n \"In some cases it may lead to incorrect results or undefined behavior.\")\n self._multiple_gpu = True\n self._cuda_devices = cuda_device\n # data_parallel will take care of transfering to cuda devices,\n # so the iterator keeps data on CPU.\n self._iterator_device = -1\n else:\n self._multiple_gpu = False\n self._cuda_devices = [cuda_device]\n self._iterator_device = cuda_device\n\n if self._cuda_devices[0] != -1:\n self._model = self._model.cuda(self._cuda_devices[0])\n\n self._log_interval = 10 # seconds\n self._summary_interval = summary_interval\n self._histogram_interval = histogram_interval\n self._log_histograms_this_batch = False\n # We keep the total batch number as a class variable because it\n # is used inside a closure for the hook which logs activations in\n # ``_enable_activation_logging``.\n self._batch_num_total = 0\n\n self._last_log = 0.0 # time of last logging\n\n if serialization_dir is not None:\n train_log = SummaryWriter(os.path.join(serialization_dir, \"log\", \"train\"))\n validation_log = SummaryWriter(os.path.join(serialization_dir, \"log\", \"validation\"))\n self._tensorboard = TensorboardWriter(train_log, validation_log)\n else:\n self._tensorboard = TensorboardWriter()\n self._warned_tqdm_ignores_underscores = False\n\n def _enable_gradient_clipping(self) -> None:\n if self._grad_clipping is not None:\n # Pylint is unable to tell that we're in the case that _grad_clipping is not None...\n # pylint: disable=invalid-unary-operand-type\n clip_function = lambda grad: grad.clamp(-self._grad_clipping, self._grad_clipping)\n for parameter in self._model.parameters():\n if parameter.requires_grad:\n parameter.register_hook(clip_function)\n\n def _enable_activation_logging(self) -> None:\n \"\"\"\n Log activations to tensorboard\n \"\"\"\n if self._histogram_interval is not None:\n # To log activation histograms to the forward pass, we register\n # a hook on forward to capture the output tensors.\n # This uses a closure on self._log_histograms_this_batch to\n # determine whether to send the activations to tensorboard,\n # since we don't want them on every call.\n for _, module in self._model.named_modules():\n if not getattr(module, 'should_log_activations', False):\n # skip it\n continue\n\n def hook(module_, inputs, outputs):\n # pylint: disable=unused-argument,cell-var-from-loop\n log_prefix = 'activation_histogram/{0}'.format(module_.__class__)\n if self._log_histograms_this_batch:\n if isinstance(outputs, torch.Tensor):\n log_name = log_prefix\n self._tensorboard.add_train_histogram(log_name,\n outputs,\n self._batch_num_total)\n elif isinstance(outputs, (list, tuple)):\n for i, output in enumerate(outputs):\n log_name = \"{0}_{1}\".format(log_prefix, i)\n self._tensorboard.add_train_histogram(log_name,\n output,\n self._batch_num_total)\n elif isinstance(outputs, dict):\n for k, tensor in outputs.items():\n log_name = \"{0}_{1}\".format(log_prefix, k)\n self._tensorboard.add_train_histogram(log_name,\n tensor,\n self._batch_num_total)\n else:\n # skip it\n pass\n\n module.register_forward_hook(hook)\n\n def _rescale_gradients(self) -> Optional[float]:\n \"\"\"\n Performs gradient rescaling. Is a no-op if gradient rescaling is not enabled.\n \"\"\"\n if self._grad_norm:\n parameters_to_clip = [p for p in self._model.parameters()\n if p.grad is not None]\n return sparse_clip_norm(parameters_to_clip, self._grad_norm)\n return None\n\n def _data_parallel(self, batch):\n \"\"\"\n Do the forward pass using multiple GPUs. This is a simplification\n of torch.nn.parallel.data_parallel to support the allennlp model\n interface.\n \"\"\"\n inputs, module_kwargs = scatter_kwargs((), batch, self._cuda_devices, 0)\n used_device_ids = self._cuda_devices[:len(inputs)]\n replicas = replicate(self._model, used_device_ids)\n outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)\n\n # Only the 'loss' is needed.\n # a (num_gpu, ) tensor with loss on each GPU\n losses = gather([output['loss'].unsqueeze(0) for output in outputs], used_device_ids[0], 0)\n return {'loss': losses.mean()}\n\n def _batch_loss(self, batch: torch.Tensor, for_training: bool) -> torch.Tensor:\n \"\"\"\n Does a forward pass on the given batch and returns the ``loss`` value in the result.\n If ``for_training`` is `True` also applies regularization penalty.\n \"\"\"\n if self._multiple_gpu:\n output_dict = self._data_parallel(batch)\n else:\n output_dict = self._model(**batch)\n\n try:\n loss = output_dict[\"loss\"]\n if for_training:\n loss += self._model.get_regularization_penalty()\n except KeyError:\n if for_training:\n raise RuntimeError(\"The model you are trying to optimize does not contain a\"\n \" 'loss' key in the output of model.forward(inputs).\")\n loss = None\n\n return loss\n\n def _get_metrics(self, total_loss: float, num_batches: int, reset: bool = False) -> Dict[str, float]:\n \"\"\"\n Gets the metrics but sets ``\"loss\"`` to\n the total loss divided by the ``num_batches`` so that\n the ``\"loss\"`` metric is \"average loss per batch\".\n \"\"\"\n metrics = self._model.get_metrics(reset=reset)\n metrics[\"loss\"] = float(total_loss / num_batches) if num_batches > 0 else 0.0\n return metrics\n\n def _train_epoch(self, epoch: int) -> Dict[str, float]:\n \"\"\"\n Trains one epoch and returns metrics.\n \"\"\"\n logger.info(\"Epoch %d/%d\", epoch, self._num_epochs - 1)\n logger.info(f\"Peak CPU memory usage MB: {peak_memory_mb()}\")\n for gpu, memory in gpu_memory_mb().items():\n logger.info(f\"GPU {gpu} memory usage MB: {memory}\")\n\n train_loss = 0.0\n # Set the model to \"train\" mode.\n self._model.train()\n\n # Get tqdm for the training batches\n train_generator = self._iterator(self._train_data,\n num_epochs=1,\n cuda_device=self._iterator_device)\n num_training_batches = self._iterator.get_num_batches(self._train_data)\n train_generator_tqdm = Tqdm.tqdm(train_generator,\n total=num_training_batches)\n self._last_log = time.time()\n last_save_time = time.time()\n\n batches_this_epoch = 0\n if self._batch_num_total is None:\n self._batch_num_total = 0\n\n if self._histogram_interval is not None:\n histogram_parameters = set(self._model.get_parameters_for_histogram_tensorboard_logging())\n\n logger.info(\"Training\")\n for batch in train_generator_tqdm:\n batches_this_epoch += 1\n self._batch_num_total += 1\n batch_num_total = self._batch_num_total\n\n self._log_histograms_this_batch = self._histogram_interval is not None and (\n batch_num_total % self._histogram_interval == 0)\n\n self._optimizer.zero_grad()\n\n loss = self._batch_loss(batch, for_training=True)\n loss.backward()\n\n train_loss += loss.item()\n\n batch_grad_norm = self._rescale_gradients()\n\n # This does nothing if batch_num_total is None or you are using an\n # LRScheduler which doesn't update per batch.\n if self._learning_rate_scheduler:\n self._learning_rate_scheduler.step_batch(batch_num_total)\n\n if self._log_histograms_this_batch:\n # get the magnitude of parameter updates for logging\n # We need a copy of current parameters to compute magnitude of updates,\n # and copy them to CPU so large models won't go OOM on the GPU.\n param_updates = {name: param.detach().cpu().clone()\n for name, param in self._model.named_parameters()}\n self._optimizer.step()\n for name, param in self._model.named_parameters():\n param_updates[name].sub_(param.detach().cpu())\n update_norm = torch.norm(param_updates[name].view(-1, ))\n param_norm = torch.norm(param.view(-1, ))\n self._tensorboard.add_train_scalar(\"gradient_update/\" + name,\n update_norm / (param_norm + 1e-7),\n batch_num_total)\n else:\n self._optimizer.step()\n\n # Update the description with the latest metrics\n metrics = self._get_metrics(train_loss, batches_this_epoch)\n description = self._description_from_metrics(metrics)\n\n train_generator_tqdm.set_description(description, refresh=False)\n\n # Log parameter values to Tensorboard\n if batch_num_total % self._summary_interval == 0:\n self._parameter_and_gradient_statistics_to_tensorboard(batch_num_total, batch_grad_norm)\n self._tensorboard.add_train_scalar(\"loss/loss_train\", metrics[\"loss\"], batch_num_total)\n self._metrics_to_tensorboard(batch_num_total,\n {\"epoch_metrics/\" + k: v for k, v in metrics.items()})\n\n if self._log_histograms_this_batch:\n self._histograms_to_tensorboard(batch_num_total, histogram_parameters)\n\n # Save model if needed.\n if self._model_save_interval is not None and (\n time.time() - last_save_time > self._model_save_interval\n ):\n last_save_time = time.time()\n self._save_checkpoint(\n '{0}.{1}'.format(epoch, time_to_str(int(last_save_time))), [], is_best=False\n )\n\n return self._get_metrics(train_loss, batches_this_epoch, reset=True)\n\n def _should_stop_early(self, metric_history: List[float]) -> bool:\n \"\"\"\n uses patience and the validation metric to determine if training should stop early\n \"\"\"\n if self._patience and self._patience < len(metric_history):\n # Pylint can't figure out that in this branch `self._patience` is an int.\n # pylint: disable=invalid-unary-operand-type\n\n # Is the best score in the past N epochs worse than or equal the best score overall?\n if self._validation_metric_decreases:\n return min(metric_history[-self._patience:]) >= min(metric_history[:-self._patience])\n else:\n return max(metric_history[-self._patience:]) <= max(metric_history[:-self._patience])\n\n return False\n\n def _parameter_and_gradient_statistics_to_tensorboard(self, # pylint: disable=invalid-name\n epoch: int,\n batch_grad_norm: float) -> None:\n \"\"\"\n Send the mean and std of all parameters and gradients to tensorboard, as well\n as logging the average gradient norm.\n \"\"\"\n # Log parameter values to Tensorboard\n for name, param in self._model.named_parameters():\n self._tensorboard.add_train_scalar(\"parameter_mean/\" + name,\n param.data.mean(),\n epoch)\n self._tensorboard.add_train_scalar(\"parameter_std/\" + name, param.data.std(), epoch)\n if param.grad is not None:\n if is_sparse(param.grad):\n # pylint: disable=protected-access\n grad_data = param.grad.data._values()\n else:\n grad_data = param.grad.data\n\n # skip empty gradients\n if torch.prod(torch.tensor(grad_data.shape)).item() > 0: # pylint: disable=not-callable\n self._tensorboard.add_train_scalar(\"gradient_mean/\" + name,\n grad_data.mean(),\n epoch)\n self._tensorboard.add_train_scalar(\"gradient_std/\" + name,\n grad_data.std(),\n epoch)\n else:\n # no gradient for a parameter with sparse gradients\n logger.info(\"No gradient for %s, skipping tensorboard logging.\", name)\n # norm of gradients\n if batch_grad_norm is not None:\n self._tensorboard.add_train_scalar(\"gradient_norm\",\n batch_grad_norm,\n epoch)\n\n def _histograms_to_tensorboard(self, epoch: int, histogram_parameters: Set[str]) -> None:\n \"\"\"\n Send histograms of parameters to tensorboard.\n \"\"\"\n for name, param in self._model.named_parameters():\n if name in histogram_parameters:\n self._tensorboard.add_train_histogram(\"parameter_histogram/\" + name,\n param,\n epoch)\n\n def _metrics_to_tensorboard(self,\n epoch: int,\n train_metrics: dict,\n val_metrics: dict = None) -> None:\n \"\"\"\n Sends all of the train metrics (and validation metrics, if provided) to tensorboard.\n \"\"\"\n metric_names = set(train_metrics.keys())\n if val_metrics is not None:\n metric_names.update(val_metrics.keys())\n val_metrics = val_metrics or {}\n\n for name in metric_names:\n train_metric = train_metrics.get(name)\n if train_metric is not None:\n self._tensorboard.add_train_scalar(name, train_metric, epoch)\n val_metric = val_metrics.get(name)\n if val_metric is not None:\n self._tensorboard.add_validation_scalar(name, val_metric, epoch)\n\n def _metrics_to_console(self, # pylint: disable=no-self-use\n train_metrics: dict,\n val_metrics: dict = None) -> None:\n \"\"\"\n Logs all of the train metrics (and validation metrics, if provided) to the console.\n \"\"\"\n val_metrics = val_metrics or {}\n dual_message_template = \"Training %s : %3f Validation %s : %3f \"\n message_template = \"%s %s : %3f \"\n\n metric_names = set(train_metrics.keys())\n if val_metrics:\n metric_names.update(val_metrics.keys())\n\n for name in metric_names:\n train_metric = train_metrics.get(name)\n val_metric = val_metrics.get(name)\n\n if val_metric is not None and train_metric is not None:\n logger.info(dual_message_template, name, train_metric, name, val_metric)\n elif val_metric is not None:\n logger.info(message_template, \"Validation\", name, val_metric)\n elif train_metric is not None:\n logger.info(message_template, \"Training\", name, train_metric)\n\n def _validation_loss(self) -> Tuple[float, int]:\n \"\"\"\n Computes the validation loss. Returns it and the number of batches.\n \"\"\"\n logger.info(\"Validating\")\n\n self._model.eval()\n\n if self._validation_iterator is not None:\n val_iterator = self._validation_iterator\n else:\n val_iterator = self._iterator\n\n val_generator = val_iterator(self._validation_data,\n num_epochs=1,\n cuda_device=self._iterator_device)\n num_validation_batches = val_iterator.get_num_batches(self._validation_data)\n val_generator_tqdm = Tqdm.tqdm(val_generator,\n total=num_validation_batches)\n batches_this_epoch = 0\n val_loss = 0\n for batch in val_generator_tqdm:\n\n loss = self._batch_loss(batch, for_training=False)\n if loss is not None:\n # You shouldn't necessarily have to compute a loss for validation, so we allow for\n # `loss` to be None. We need to be careful, though - `batches_this_epoch` is\n # currently only used as the divisor for the loss function, so we can safely only\n # count those batches for which we actually have a loss. If this variable ever\n # gets used for something else, we might need to change things around a bit.\n batches_this_epoch += 1\n val_loss += loss.detach().cpu().numpy()\n\n # Update the description with the latest metrics\n val_metrics = self._get_metrics(val_loss, batches_this_epoch)\n description = self._description_from_metrics(val_metrics)\n val_generator_tqdm.set_description(description, refresh=False)\n\n return val_loss, batches_this_epoch\n\n def train(self) -> Dict[str, Any]:\n \"\"\"\n Trains the supplied model with the supplied parameters.\n \"\"\"\n try:\n epoch_counter, validation_metric_per_epoch = self._restore_checkpoint()\n except RuntimeError:\n traceback.print_exc()\n raise ConfigurationError(\"Could not recover training from the checkpoint. Did you mean to output to \"\n \"a different serialization directory or delete the existing serialization \"\n \"directory?\")\n\n self._enable_gradient_clipping()\n self._enable_activation_logging()\n\n logger.info(\"Beginning training.\")\n\n train_metrics: Dict[str, float] = {}\n val_metrics: Dict[str, float] = {}\n epochs_trained = 0\n training_start_time = time.time()\n for epoch in range(epoch_counter, self._num_epochs):\n epoch_start_time = time.time()\n train_metrics = self._train_epoch(epoch)\n\n if self._validation_data is not None:\n with torch.no_grad():\n # We have a validation set, so compute all the metrics on it.\n val_loss, num_batches = self._validation_loss()\n val_metrics = self._get_metrics(val_loss, num_batches, reset=True)\n\n # Check validation metric for early stopping\n this_epoch_val_metric = val_metrics[self._validation_metric]\n\n # Check validation metric to see if it's the best so far\n is_best_so_far = self._is_best_so_far(this_epoch_val_metric, validation_metric_per_epoch)\n\n validation_metric_per_epoch.append(this_epoch_val_metric)\n if self._should_stop_early(validation_metric_per_epoch):\n logger.info(\"Ran out of patience. Stopping training.\")\n break\n\n else:\n # No validation set, so just assume it's the best so far.\n is_best_so_far = True\n val_metrics = {}\n this_epoch_val_metric = None\n\n self._save_checkpoint(epoch, validation_metric_per_epoch, is_best=is_best_so_far)\n self._metrics_to_tensorboard(epoch, train_metrics, val_metrics=val_metrics)\n self._metrics_to_console(train_metrics, val_metrics)\n\n if self._learning_rate_scheduler:\n # The LRScheduler API is agnostic to whether your schedule requires a validation metric -\n # if it doesn't, the validation metric passed here is ignored.\n self._learning_rate_scheduler.step(this_epoch_val_metric, epoch)\n\n epoch_elapsed_time = time.time() - epoch_start_time\n logger.info(\"Epoch duration: %s\", time.strftime(\"%H:%M:%S\", time.gmtime(epoch_elapsed_time)))\n\n if epoch < self._num_epochs - 1:\n training_elapsed_time = time.time() - training_start_time\n estimated_time_remaining = training_elapsed_time * \\\n ((self._num_epochs - epoch_counter) / float(epoch - epoch_counter + 1) - 1)\n formatted_time = time.strftime(\"%H:%M:%S\", time.gmtime(estimated_time_remaining))\n logger.info(\"Estimated training time remaining: %s\", formatted_time)\n\n epochs_trained += 1\n\n training_elapsed_time = time.time() - training_start_time\n metrics = {\n \"training_duration\": time.strftime(\"%H:%M:%S\", time.gmtime(training_elapsed_time)),\n \"training_start_epoch\": epoch_counter,\n \"training_epochs\": epochs_trained\n }\n for key, value in train_metrics.items():\n metrics[\"training_\" + key] = value\n for key, value in val_metrics.items():\n metrics[\"validation_\" + key] = value\n\n if validation_metric_per_epoch:\n # We may not have had validation data, so we need to hide this behind an if.\n if self._validation_metric_decreases:\n best_validation_metric = min(validation_metric_per_epoch)\n else:\n best_validation_metric = max(validation_metric_per_epoch)\n metrics[f\"best_validation_{self._validation_metric}\"] = best_validation_metric\n metrics['best_epoch'] = [i for i, value in enumerate(validation_metric_per_epoch)\n if value == best_validation_metric][-1]\n return metrics\n\n def _is_best_so_far(self,\n this_epoch_val_metric: float,\n validation_metric_per_epoch: List[float]):\n if not validation_metric_per_epoch:\n return True\n elif self._validation_metric_decreases:\n return this_epoch_val_metric < min(validation_metric_per_epoch)\n else:\n return this_epoch_val_metric > max(validation_metric_per_epoch)\n\n def _description_from_metrics(self, metrics: Dict[str, float]) -> str:\n if (not self._warned_tqdm_ignores_underscores and\n any(metric_name.startswith(\"_\") for metric_name in metrics)):\n logger.warning(\"Metrics with names beginning with \\\"_\\\" will \"\n \"not be logged to the tqdm progress bar.\")\n self._warned_tqdm_ignores_underscores = True\n return ', '.join([\"%s: %.4f\" % (name, value) for name, value in\n metrics.items() if not name.startswith(\"_\")]) + \" ||\"\n\n def _save_checkpoint(self,\n epoch: Union[int, str],\n val_metric_per_epoch: List[float],\n is_best: Optional[bool] = None) -> None:\n \"\"\"\n Saves a checkpoint of the model to self._serialization_dir.\n Is a no-op if self._serialization_dir is None.\n\n Parameters\n ----------\n epoch : Union[int, str], required.\n The epoch of training. If the checkpoint is saved in the middle\n of an epoch, the parameter is a string with the epoch and timestamp.\n is_best: bool, optional (default = None)\n A flag which causes the model weights at the given epoch to\n be copied to a \"best.th\" file. The value of this flag should\n be based on some validation metric computed by your model.\n \"\"\"\n if self._serialization_dir is not None:\n model_path = os.path.join(self._serialization_dir, \"model_state_epoch_{}.th\".format(epoch))\n model_state = self._model.state_dict()\n torch.save(model_state, model_path)\n\n training_state = {'epoch': epoch,\n 'val_metric_per_epoch': val_metric_per_epoch,\n 'optimizer': self._optimizer.state_dict(),\n 'batch_num_total': self._batch_num_total}\n training_path = os.path.join(self._serialization_dir,\n \"training_state_epoch_{}.th\".format(epoch))\n torch.save(training_state, training_path)\n if is_best:\n logger.info(\"Best validation performance so far. \"\n \"Copying weights to '%s/best.th'.\", self._serialization_dir)\n shutil.copyfile(model_path, os.path.join(self._serialization_dir, \"best.th\"))\n\n if self._num_serialized_models_to_keep and self._num_serialized_models_to_keep >= 0:\n self._serialized_paths.append([time.time(), model_path, training_path])\n if len(self._serialized_paths) > self._num_serialized_models_to_keep:\n paths_to_remove = self._serialized_paths.pop(0)\n # Check to see if we should keep this checkpoint, if it has been longer\n # then self._keep_serialized_model_every_num_seconds since the last\n # kept checkpoint.\n remove_path = True\n if self._keep_serialized_model_every_num_seconds is not None:\n save_time = paths_to_remove[0]\n time_since_checkpoint_kept = save_time - self._last_permanent_saved_checkpoint_time\n if time_since_checkpoint_kept > self._keep_serialized_model_every_num_seconds:\n # We want to keep this checkpoint.\n remove_path = False\n self._last_permanent_saved_checkpoint_time = save_time\n if remove_path:\n for fname in paths_to_remove[1:]:\n os.remove(fname)\n\n def find_latest_checkpoint(self) -> Tuple[str, str]:\n \"\"\"\n Return the location of the latest model and training state files.\n If there isn't a valid checkpoint then return None.\n \"\"\"\n have_checkpoint = (self._serialization_dir is not None and\n any(\"model_state_epoch_\" in x for x in os.listdir(self._serialization_dir)))\n\n if not have_checkpoint:\n return None\n\n serialization_files = os.listdir(self._serialization_dir)\n model_checkpoints = [x for x in serialization_files if \"model_state_epoch\" in x]\n # Get the last checkpoint file. Epochs are specified as either an\n # int (for end of epoch files) or with epoch and timestamp for\n # within epoch checkpoints, e.g. 5.2018-02-02-15-33-42\n found_epochs = [\n # pylint: disable=anomalous-backslash-in-string\n re.search(\"model_state_epoch_([0-9\\.\\-]+)\\.th\", x).group(1)\n for x in model_checkpoints\n ]\n int_epochs: Any = []\n for epoch in found_epochs:\n pieces = epoch.split('.')\n if len(pieces) == 1:\n # Just a single epoch without timestamp\n int_epochs.append([int(pieces[0]), 0])\n else:\n # has a timestamp\n int_epochs.append([int(pieces[0]), pieces[1]])\n last_epoch = sorted(int_epochs, reverse=True)[0]\n if last_epoch[1] == 0:\n epoch_to_load = str(last_epoch[0])\n else:\n epoch_to_load = '{0}.{1}'.format(last_epoch[0], last_epoch[1])\n\n model_path = os.path.join(self._serialization_dir,\n \"model_state_epoch_{}.th\".format(epoch_to_load))\n training_state_path = os.path.join(self._serialization_dir,\n \"training_state_epoch_{}.th\".format(epoch_to_load))\n\n return (model_path, training_state_path)\n\n def _restore_checkpoint(self) -> Tuple[int, List[float]]:\n \"\"\"\n Restores a model from a serialization_dir to the last saved checkpoint.\n This includes an epoch count and optimizer state, which is serialized separately\n from model parameters. This function should only be used to continue training -\n if you wish to load a model for inference/load parts of a model into a new\n computation graph, you should use the native Pytorch functions:\n `` model.load_state_dict(torch.load(\"/path/to/model/weights.th\"))``\n\n If ``self._serialization_dir`` does not exist or does not contain any checkpointed weights,\n this function will do nothing and return 0.\n\n Returns\n -------\n epoch: int\n The epoch at which to resume training, which should be one after the epoch\n in the saved training state.\n \"\"\"\n latest_checkpoint = self.find_latest_checkpoint()\n\n if latest_checkpoint is None:\n # No checkpoint to restore, start at 0\n return 0, []\n\n model_path, training_state_path = latest_checkpoint\n\n # Load the parameters onto CPU, then transfer to GPU.\n # This avoids potential OOM on GPU for large models that\n # load parameters onto GPU then make a new GPU copy into the parameter\n # buffer. The GPU transfer happens implicitly in load_state_dict.\n model_state = torch.load(model_path, map_location=util.device_mapping(-1))\n training_state = torch.load(training_state_path, map_location=util.device_mapping(-1))\n self._model.load_state_dict(model_state)\n self._optimizer.load_state_dict(training_state[\"optimizer\"])\n move_optimizer_to_cuda(self._optimizer)\n\n # We didn't used to save `validation_metric_per_epoch`, so we can't assume\n # that it's part of the trainer state. If it's not there, an empty list is all\n # we can do.\n if \"val_metric_per_epoch\" not in training_state:\n logger.warning(\"trainer state `val_metric_per_epoch` not found, using empty list\")\n val_metric_per_epoch: List[float] = []\n else:\n val_metric_per_epoch = training_state[\"val_metric_per_epoch\"]\n\n if isinstance(training_state[\"epoch\"], int):\n epoch_to_return = training_state[\"epoch\"] + 1\n else:\n epoch_to_return = int(training_state[\"epoch\"].split('.')[0]) + 1\n\n # For older checkpoints with batch_num_total missing, default to old behavior where\n # it is unchanged.\n batch_num_total = training_state.get('batch_num_total')\n if batch_num_total is not None:\n self._batch_num_total = batch_num_total\n\n return epoch_to_return, val_metric_per_epoch\n\n # Requires custom from_params.\n @classmethod\n def from_params(cls,\n model: Model,\n serialization_dir: str,\n iterator: DataIterator,\n train_data: Iterable[Instance],\n validation_data: Optional[Iterable[Instance]],\n params: Params,\n validation_iterator: DataIterator = None) -> 'Trainer':\n\n patience = params.pop_int(\"patience\", None)\n validation_metric = params.pop(\"validation_metric\", \"-loss\")\n num_epochs = params.pop_int(\"num_epochs\", 20)\n cuda_device = params.pop_int(\"cuda_device\", -1)\n grad_norm = params.pop_float(\"grad_norm\", None)\n grad_clipping = params.pop_float(\"grad_clipping\", None)\n lr_scheduler_params = params.pop(\"learning_rate_scheduler\", None)\n\n if cuda_device >= 0:\n model = model.cuda(cuda_device)\n parameters = [[n, p] for n, p in model.named_parameters() if p.requires_grad]\n optimizer = Optimizer.from_params(parameters, params.pop(\"optimizer\"))\n\n if lr_scheduler_params:\n scheduler = LearningRateScheduler.from_params(optimizer, lr_scheduler_params)\n else:\n scheduler = None\n\n num_serialized_models_to_keep = params.pop_int(\"num_serialized_models_to_keep\", 20)\n keep_serialized_model_every_num_seconds = params.pop_int(\n \"keep_serialized_model_every_num_seconds\", None)\n model_save_interval = params.pop_float(\"model_save_interval\", None)\n summary_interval = params.pop_int(\"summary_interval\", 100)\n histogram_interval = params.pop_int(\"histogram_interval\", None)\n\n params.assert_empty(cls.__name__)\n return Trainer(model, optimizer, iterator,\n train_data, validation_data,\n patience=patience,\n validation_metric=validation_metric,\n validation_iterator=validation_iterator,\n num_epochs=num_epochs,\n serialization_dir=serialization_dir,\n cuda_device=cuda_device,\n grad_norm=grad_norm,\n grad_clipping=grad_clipping,\n learning_rate_scheduler=scheduler,\n num_serialized_models_to_keep=num_serialized_models_to_keep,\n keep_serialized_model_every_num_seconds=keep_serialized_model_every_num_seconds,\n model_save_interval=model_save_interval,\n summary_interval=summary_interval,\n histogram_interval=histogram_interval)\n"
] | [
[
"torch.nn.parallel.parallel_apply",
"torch.nn.parallel.replicate",
"torch.nn.parallel.scatter_gather.scatter_kwargs",
"torch.tensor",
"torch.no_grad",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pinedbean/similarIV | [
"ee31311749c07e4fc93bfdcf8a575f3f5477d7d5"
] | [
"similarIV/instant_function.py"
] | [
"import pandas.core.algorithms as algos\nfrom pandas import Series\nimport scipy.stats.stats as stats\nimport re\nimport traceback\nimport string\nimport numpy as np\nimport pandas as pd\n\n\nmax_bin = 20\nforce_bin = 3\n\n# define a binning function\ndef mono_bin(Y, X, n = max_bin):\n \n df1 = pd.DataFrame({\"X\": X, \"Y\": Y})\n justmiss = df1[['X','Y']][df1.X.isnull()]\n notmiss = df1[['X','Y']][df1.X.notnull()]\n r = 0\n while np.abs(r) < 1:\n try:\n d1 = pd.DataFrame({\"X\": notmiss.X, \"Y\": notmiss.Y, \"Bucket\": pd.qcut(notmiss.X, n)})\n d2 = d1.groupby('Bucket', as_index=True)\n r, p = stats.spearmanr(d2.mean().X, d2.mean().Y)\n n = n - 1 \n except Exception as e:\n n = n - 1\n\n if len(d2) == 1:\n n = force_bin \n bins = algos.quantile(notmiss.X, np.linspace(0, 1, n))\n if len(np.unique(bins)) == 2:\n bins = np.insert(bins, 0, 1)\n bins[1] = bins[1]-(bins[1]/2)\n d1 = pd.DataFrame({\"X\": notmiss.X, \"Y\": notmiss.Y, \"Bucket\": pd.cut(notmiss.X, np.unique(bins),include_lowest=True)}) \n d2 = d1.groupby('Bucket', as_index=True)\n \n d3 = pd.DataFrame({},index=[])\n d3[\"MIN_VALUE\"] = d2.min().X\n d3[\"MAX_VALUE\"] = d2.max().X\n d3[\"COUNT\"] = d2.count().Y\n d3[\"EVENT\"] = d2.sum().Y\n d3[\"NONEVENT\"] = d2.count().Y - d2.sum().Y\n d3=d3.reset_index(drop=True)\n \n if len(justmiss.index) > 0:\n d4 = pd.DataFrame({'MIN_VALUE':np.nan},index=[0])\n d4[\"MAX_VALUE\"] = np.nan\n d4[\"COUNT\"] = justmiss.count().Y\n d4[\"EVENT\"] = justmiss.sum().Y\n d4[\"NONEVENT\"] = justmiss.count().Y - justmiss.sum().Y\n d3 = d3.append(d4,ignore_index=True)\n \n d3[\"EVENT_RATE\"] = d3.EVENT/d3.COUNT\n d3[\"NON_EVENT_RATE\"] = d3.NONEVENT/d3.COUNT\n d3[\"DIST_EVENT\"] = d3.EVENT/d3.sum().EVENT\n d3[\"DIST_NON_EVENT\"] = d3.NONEVENT/d3.sum().NONEVENT\n d3[\"WOE\"] = np.log(d3.DIST_EVENT/d3.DIST_NON_EVENT)\n d3[\"IV\"] = (d3.DIST_EVENT-d3.DIST_NON_EVENT)*np.log(d3.DIST_EVENT/d3.DIST_NON_EVENT)\n d3[\"VAR_NAME\"] = \"VAR\"\n d3 = d3[['VAR_NAME','MIN_VALUE', 'MAX_VALUE', 'COUNT', 'EVENT', 'EVENT_RATE', 'NONEVENT', 'NON_EVENT_RATE', 'DIST_EVENT','DIST_NON_EVENT','WOE', 'IV']] \n d3 = d3.replace([np.inf, -np.inf], 0)\n d3.IV = d3.IV.sum()\n \n return(d3)\n\ndef char_bin(Y, X):\n \n df1 = pd.DataFrame({\"X\": X, \"Y\": Y})\n justmiss = df1[['X','Y']][df1.X.isnull()]\n notmiss = df1[['X','Y']][df1.X.notnull()] \n df2 = notmiss.groupby('X',as_index=True)\n \n d3 = pd.DataFrame({},index=[])\n d3[\"COUNT\"] = df2.count().Y\n d3[\"MIN_VALUE\"] = df2.sum().Y.index\n d3[\"MAX_VALUE\"] = d3[\"MIN_VALUE\"]\n d3[\"EVENT\"] = df2.sum().Y\n d3[\"NONEVENT\"] = df2.count().Y - df2.sum().Y\n \n if len(justmiss.index) > 0:\n d4 = pd.DataFrame({'MIN_VALUE':np.nan},index=[0])\n d4[\"MAX_VALUE\"] = np.nan\n d4[\"COUNT\"] = justmiss.count().Y\n d4[\"EVENT\"] = justmiss.sum().Y\n d4[\"NONEVENT\"] = justmiss.count().Y - justmiss.sum().Y\n d3 = d3.append(d4,ignore_index=True)\n \n d3[\"EVENT_RATE\"] = d3.EVENT/d3.COUNT\n d3[\"NON_EVENT_RATE\"] = d3.NONEVENT/d3.COUNT\n d3[\"DIST_EVENT\"] = d3.EVENT/d3.sum().EVENT\n d3[\"DIST_NON_EVENT\"] = d3.NONEVENT/d3.sum().NONEVENT\n d3[\"WOE\"] = np.log(d3.DIST_EVENT/d3.DIST_NON_EVENT)\n d3[\"IV\"] = (d3.DIST_EVENT-d3.DIST_NON_EVENT)*np.log(d3.DIST_EVENT/d3.DIST_NON_EVENT)\n d3[\"VAR_NAME\"] = \"VAR\"\n d3 = d3[['VAR_NAME','MIN_VALUE', 'MAX_VALUE', 'COUNT', 'EVENT', 'EVENT_RATE', 'NONEVENT', 'NON_EVENT_RATE', 'DIST_EVENT','DIST_NON_EVENT','WOE', 'IV']] \n d3 = d3.replace([np.inf, -np.inf], 0)\n d3.IV = d3.IV.sum()\n d3 = d3.reset_index(drop=True)\n \n return(d3)\n\ndef data_vars(df1, target):\n \n stack = traceback.extract_stack()\n filename, lineno, function_name, code = stack[-2]\n vars_name = re.compile(r'\\((.*?)\\).*$').search(code).groups()[0]\n final = (re.findall(r\"[\\w']+\", vars_name))[-1]\n \n x = df1.dtypes.index\n count = -1\n \n for i in x:\n if i.upper() not in (final.upper()):\n if np.issubdtype(df1[i], np.number) and len(Series.unique(df1[i])) > 2:\n conv = mono_bin(target, df1[i])\n conv[\"VAR_NAME\"] = i\n count = count + 1\n else:\n conv = char_bin(target, df1[i])\n conv[\"VAR_NAME\"] = i \n count = count + 1\n \n if count == 0:\n iv_df = conv\n else:\n iv_df = iv_df.append(conv,ignore_index=True)\n \n iv = pd.DataFrame({'IV':iv_df.groupby('VAR_NAME').IV.max()})\n iv = iv.reset_index()\n return(iv_df,iv)"
] | [
[
"numpy.log",
"numpy.abs",
"numpy.linspace",
"numpy.unique",
"numpy.issubdtype",
"pandas.DataFrame",
"numpy.insert",
"pandas.Series.unique",
"pandas.qcut"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
DeqiTang/pymatflow | [
"bd8776feb40ecef0e6704ee898d9f42ded3b0186"
] | [
"pymatflow/elk/post/opt.py"
] | [
"import os\r\nimport sys\r\nimport datetime\r\nimport subprocess\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom pymatflow.vasp.base.xyz import Atom\r\n\r\nclass opt_out:\r\n \"\"\"\r\n Note:\r\n \"\"\"\r\n def __init__(self):\r\n \"\"\"\r\n output:\r\n \"\"\"\r\n self.outcar = None\r\n self.poscar = None\r\n self.run_params = {}\r\n self.run_info = {}\r\n self.job_done = None # whether calculation has finished\r\n self.relaxed = None # whether structure is relaxed successfully\r\n\r\n self.cell = None # optimized cell\r\n self.trajectory = None\r\n\r\n\r\n\r\n def get_info(self, outcar, poscar):\r\n \"\"\"\r\n get the general information of opt run from opt run output file\r\n which is now stored in self.lines\r\n \"\"\"\r\n self.outcar = outcar\r\n self.poscar = poscar\r\n with open(self.outcar, 'r') as fout:\r\n self.lines = fout.readlines()\r\n with open(self.poscar, 'r') as fin:\r\n self.lines_poscar = fin.readlines()\r\n # check whether calculation is finished\r\n if len(self.lines[-1].split()) == 4 and self.lines[-1].split()[0] == \"Voluntary\" and self.lines[-1].split()[1] == \"context\":\r\n self.job_done = True\r\n else:\r\n self.job_done = False\r\n # check whether successfully relaxed\r\n self.relaxed = False\r\n for line in self.lines:\r\n if line == \" reached required accuracy - stopping structural energy minimisation\\n\":\r\n self.relaxed = True\r\n break\r\n\r\n self.get_trajectory()\r\n self.get_opt_params_and_run_info()\r\n\r\n\r\n def get_trajectory(self):\r\n \"\"\"\r\n in OUTCAR we can get the atom positions but the element name is not available\r\n so we have to found it in POSCAR through self.lines_poscar\r\n\r\n Note: the cell of each trajectory is also obtained\r\n \"\"\"\r\n\r\n self.trajectory = []\r\n self.cells = []\r\n for i in range(len(self.lines)):\r\n if len(self.lines[i].split()) > 0 and self.lines[i].split()[0] == \"POSITION\" and self.lines[i].split()[1] == \"TOTAL-FORCE\":\r\n atm = []\r\n j = i + 2\r\n while len(self.lines[j].split()) == 6:\r\n atm.append(Atom(\"x\", float(self.lines[j].split()[0]), float(self.lines[j].split()[1]), float(self.lines[j].split()[2])))\r\n j = j + 1\r\n self.trajectory.append(atm)\r\n for i in range(len(self.lines)):\r\n if len(self.lines[i].split()) > 0 and self.lines[i].split()[0] == \"direct\" and self.lines[i].split()[1] == \"lattice\" and len(self.lines[i-4].split()) > 0 and self.lines[i-4].split()[0] == \"VOLUME\":\r\n # note in the begining vasp will output the direct lattice vector of the input structure so that must be excluded, so we check the previous 4 line by i-4 above\r\n cell = []\r\n for j in range(3):\r\n cell.append([float(self.lines[i+1].split()[0]), float(self.lines[i+1].split()[1]), float(self.lines[i+1].split()[2])])\r\n cell.append([float(self.lines[i+2].split()[0]), float(self.lines[i+2].split()[1]), float(self.lines[i+2].split()[2])])\r\n cell.append([float(self.lines[i+3].split()[0]), float(self.lines[i+3].split()[1]), float(self.lines[i+3].split()[2])])\r\n self.cells.append(cell)\r\n # len(self.cells) must equals to len(self.trajectory)\r\n if not len(self.cells) == len(self.trajectory):\r\n print(len(self.cells))\r\n print(len(self.trajectory))\r\n print(\"============================================================\\n\")\r\n print(\" WARNING!!!!!!!\\n\")\r\n print(\"------------------------------------------------------------\\n\")\r\n print(\"post.opt.opt_out.get_trajectory():\\n\")\r\n print(\"length of self.cells and self.trajectory should be equal!!!!\\n\")\r\n print('but they are not now! please check it manually\\n')\r\n sys.exit(1)\r\n # set labels for each atom in each image\r\n elements = self.lines_poscar[5].split() # like [\"S\", \"Mo\"]\r\n natom_each_element = self.lines_poscar[6].split() # like [\"2\", \"1\"]\r\n label_list = []\r\n for i in range(len(elements)):\r\n for j in range(int(natom_each_element[i])):\r\n label_list.append(elements[i])\r\n for i in range(len(self.trajectory)):\r\n for j in range(len(label_list)):\r\n self.trajectory[i][j].name = label_list[j]\r\n #\r\n\r\n\r\n def get_opt_params_and_run_info(self):\r\n \"\"\"\r\n run_info[\"iterations\"]: scf iterations per scf step\r\n run_info[\"total_energies\"]: total energies of every scf step\r\n run_info[\"fermi_energies\"]: fermi energies of every scf step\r\n run_info[\"total_forces_rms\"]: total RMS forces of every scf step\r\n \"\"\"\r\n self.run_info[\"iterations\"] = []\r\n self.run_info[\"total_energies\"] = []\r\n self.run_info[\"fermi_energies\"] = []\r\n self.run_info[\"total_forces_rms\"] = []\r\n\r\n for line in self.lines:\r\n # if it is an empty line continue to next line\r\n if len(line.split()) == 0:\r\n continue\r\n if line.split()[0] == \"executed\" and line.split()[1] == \"on\" and line.split()[3] == \"date\":\r\n self.run_info[\"start_time\"] = line.split(\"\\n\")[0]\r\n #if line.split()[0] == \"This\" and line.split()[1] == \"run\" and line.split()[3] == \"terminated\":\r\n # self.run_info[\"stop_time\"] = line.split(\"\\n\")[0]\r\n if line.split()[0] == \"Total\" and line.split()[1] == \"CPU\" and line.split()[2] == \"time\":\r\n self.run_info[\"total_cpu_time\"] = float(line.split()[5]) # in unit of second\r\n if line.split()[0] == \"Elapsed\" and line.split()[1] == \"time\":\r\n self.run_info[\"elapsed_time\"] = float(line.split()[3])\r\n if len(line.split()) == 4 and line.split()[1] == \"Iteration\":\r\n self.run_info[\"iterations\"].append(line)\r\n if line.split()[0] == \"energy\" and line.split()[1] == \"without\" and line.split()[2] == \"entropy=\":\r\n self.run_info[\"total_energies\"].append(float(line.split()[3]))\r\n if line.split()[0] == \"E-fermi\" and line.split()[1] == \":\":\r\n self.run_info[\"fermi_energies\"].append(float(line.split()[2]))\r\n if line.split()[0] == \"FORCES:\" and line.split()[1] == \"max\":\r\n self.run_info[\"total_forces_rms\"].append(float(line.split()[5]))\r\n if line.split()[0] == \"ENCUT\" and line.split()[1] == \"=\":\r\n self.run_params[\"ENCUT\"] = float(line.split()[2])\r\n if line.split()[0] == \"EDIFF\" and line.split()[1] == \"=\":\r\n self.run_params[\"EDIFF\"] = float(line.split()[2])\r\n if line.split()[0] == \"LREAL\" and line.split()[1] == \"=\":\r\n self.run_params[\"LREAL\"] = line.split()[2]\r\n if line.split()[0] == \"EDIFFG\" and line.split()[1] == \"=\":\r\n self.run_params[\"EDIFFG\"] = float(line.split()[2])\r\n if line.split()[0] == \"NSW\" and line.split()[1] == \"=\":\r\n self.run_params[\"NSW\"] = int(line.split()[2])\r\n if line.split()[0] == \"IBRION\" and line.split()[1] == \"=\":\r\n self.run_params[\"IBRION\"] = int(line.split()[2])\r\n if line.split()[0] == \"NFREE\" and line.split()[1] == \"=\":\r\n self.run_params[\"NFREE\"] = int(line.split()[2])\r\n if line.split()[0] == \"ISIF\" and line.split()[1] == \"=\":\r\n self.run_params[\"ISIF\"] = int(line.split()[2])\r\n if line.split()[0] == \"POTIM\" and line.split()[1] == \"=\":\r\n self.run_params[\"POTIM\"] = float(line.split()[2])\r\n if line.split()[0] == \"TEIN\" and line.split()[1] == \"=\":\r\n self.run_params[\"TEIN\"] = float(line.split()[2])\r\n if line.split()[0] == \"TEBEG\" and line.split()[1] == \"=\":\r\n self.run_params[\"TEBEG\"] = float(line.split()[2].split(\";\")[0])\r\n if line.split()[0] == \"SMASS\" and line.split()[1] == \"=\":\r\n self.run_params[\"SMASS\"] = float(line.split()[2])\r\n if line.split()[0] == \"PSTRESS=\":\r\n self.run_params[\"PSTRESS\"] = float(line.split()[1])\r\n\r\n #self.run_info[\"scf-cycles\"] = len(self.run_info[\"iterations\"])\r\n #if self.run_type == \"relax\":\r\n # self.run_info[\"ion-steps\"] = len(self.run_info[\"iterations\"]) - 1\r\n #elif self.run_type == \"vc-relax\":\r\n # self.run_info[\"ion-steps\"] = len(self.run_info[\"iterations\"]) - 2\r\n\r\n def print_final_structure(self, xyz=\"optimized.xyz\"):\r\n if self.relaxed == False:\r\n with open(\"final-structure(not-relaxed).xyz\", 'w') as fout:\r\n fout.write(\"%d\\n\" % len(self.trajectory[0]))\r\n fout.write(\"Warning: structure failed to be relaxed or vc-relaxed, this is the final structure(unrelaxed)\\n\")\r\n for atom in self.trajectory[-1]:\r\n fout.write(\"%s\\t%.9f\\t%.9f\\t%.9f\\n\" % (atom.name, atom.x, atom.y, atom.z))\r\n return\r\n # printout relaxed structure\r\n with open(xyz, 'w') as fout:\r\n fout.write(\"%d\\n\" % len(self.trajectory[-1]))\r\n fout.write(\"cell: %f %f %f | %f %f %f | %f %f %f\\n\" % (\r\n self.cells[-1][0][0], self.cells[-1][0][1], self.cells[-1][0][2],\r\n self.cells[-1][1][0], self.cells[-1][1][1], self.cells[-1][1][2],\r\n self.cells[-1][2][0], self.cells[-1][2][1], self.cells[-1][2][2]\r\n ))\r\n for atom in self.trajectory[-1]:\r\n fout.write(\"%s\\t%.9f\\t%.9f\\t%.9f\\n\" % (atom.name, atom.x, atom.y, atom.z))\r\n\r\n def print_trajectory(self, xyz=\"trajectory.xyz\"):\r\n with open(xyz, 'w') as fout:\r\n for i in range(len(self.trajectory)):\r\n fout.write(\"%d\\n\" % len(self.trajectory[i]))\r\n #fout.write(\"i = %d\\n\" % i)\r\n fout.write(\"cell(i=%d): %f %f %f | %f %f %f | %f %f %f\\n\" % (\r\n i,\r\n self.cells[i][0][0], self.cells[i][0][1], self.cells[i][0][2],\r\n self.cells[i][1][0], self.cells[i][1][1], self.cells[i][1][2],\r\n self.cells[i][2][0], self.cells[i][2][1], self.cells[i][2][2]\r\n ))\r\n for atom in self.trajectory[i]:\r\n fout.write(\"%s\\t%.9f\\t%.9f\\t%.9f\\n\" % (atom.name, atom.x, atom.y, atom.z))\r\n\r\n def export(self, directory=\"tmp-vasp-optimization\"):\r\n os.chdir(directory)\r\n os.system(\"mkdir -p post-processing\")\r\n os.chdir(\"post-processing\")\r\n # now we are in post-processing, generate the output and return\r\n self.print_trajectory()\r\n self.print_final_structure()\r\n\r\n #plt.plot(self.run_info[\"self-energies\"])\r\n #plt.title(\"Energy per scf step\")\r\n #plt.xlabel(\"Scf step\")\r\n #plt.ylabel(\"Total energy\")\r\n #plt.tight_layout()\r\n #plt.savefig(\"energy-per-scf-step.png\")\r\n #plt.close()\r\n\r\n with open(\"self-info.md\", 'w', encoding='utf-8') as fout:\r\n fout.write(\"# 几何优化实验统计\\n\")\r\n fout.write(\"几何优化类型: ISIF = %d\\n\" % self.run_params[\"ISIF\"])\r\n fout.write(\"几何优化任务是否结束:%s\\n\" % str(self.job_done))\r\n if self.job_done == True:\r\n fout.write(\"是否成功优化: %s\\n\" % str(self.relaxed))\r\n else:\r\n fout.write(\"是否成功优化: %s\\n\" % (\"运行未结束, 结果未知\"))\r\n fout.write(\"## 离子步参数\\n\")\r\n for item in self.run_params:\r\n fout.write(\"- %s: %s\\n\" % (item, str(self.run_params[item])))\r\n fout.write(\"## 电子步参数\\n\")\r\n for item in self.run_params:\r\n fout.write(\"- %s: %s\\n\" % (item, str(self.run_params[item])))\r\n fout.write(\"## 运行信息\\n\")\r\n # calculate the running time and print it out\r\n # Importante: the length of the time string might be different, depending\r\n # on the value of hours and minutes and seconds. if they are two digits\r\n # number, they will be divided like: '11: 6: 2', only when they all are\r\n # two digtis number, they will not be divided '11:16:12'\r\n # so we have to preprocess it to build the right time string to pass into\r\n # datetime.datetime.strptime()\r\n start_str = self.run_info[\"start_time\"].split()[4]+\"-\"+self.run_info[\"start_time\"].split()[5]\r\n if self.job_done == True:\r\n #stop_str = self.run_info[\"stop-time\"].split()[8]+\"-\"+self.run_info[\"stop-time\"].split()[5]+self.run_info[\"stop-time\"].split()[6]+self.run_info[\"stop-time\"].split()[7]\r\n pass\r\n\r\n start = datetime.datetime.strptime(start_str, \"%Y.%m.%d-%H:%M:%S\")\r\n #if self.job_done == True:\r\n # stop = datetime.datetime.strptime(stop_str, \"%d%b%Y-%H:%M:%S\")\r\n # delta_t = stop -start\r\n fout.write(\"- Time consuming:\\n\")\r\n fout.write(\" - job starts at %s\\n\" % start)\r\n fout.write(\" - Elapsed time: %.3f(sec) = %.3f(min) = %.3f(hour)\\n\" % (self.run_info[\"elapsed_time\"], self.run_info[\"elapsed_time\"]/60, self.run_info[\"elapsed_time\"]/3600))\r\n #if self.job_done == True:\r\n # fout.write(\" - totally %.1f seconds, or %.3f minutes or %.5f hours\\n\" % (delta_t.total_seconds(), delta_t.total_seconds()/60, delta_t.total_seconds()/3600))\r\n #else:\r\n # fout.write(\" - job is not finished yet, but it starts at %s\\n\" % start)\r\n # end the time information\r\n for item in self.run_info:\r\n fout.write(\"- %s: %s\\n\" % (item, str(self.run_info[item])))\r\n\r\n fout.write(\"## 运行信息图示\\n\")\r\n fout.write(\"Iterations per SCF\\n\")\r\n fout.write(\"\\n\")\r\n\r\n fout.write(\"Total energies per SCF\\n\")\r\n fout.write(\"\\n\")\r\n\r\n fout.write(\"Fermi energies per SCF\\n\")\r\n fout.write(\"\\n\")\r\n\r\n fout.write(\"Total forces per SCF\\n\")\r\n fout.write(\"\\n\")\r\n\r\n os.chdir(\"../\")\r\n os.chdir(\"../\")\r\n\r\n\r\nclass opt_post:\r\n \"\"\"\r\n Note:\r\n \"\"\"\r\n def __init__(self, output=\"OUTCAR\"):\r\n \"\"\"\r\n output:\r\n \"\"\"\r\n self.file = output\r\n self.electronic_params = {}\r\n self.ionic_params = {}\r\n self.run_info = {}\r\n self.job_done = None # whether calculation has finished\r\n self.relaxed = None # whether structure is relaxed successfully\r\n\r\n self.cell = None # optimized cell\r\n self.trajectory = None\r\n\r\n with open(self.file, 'r') as fout:\r\n self.lines = fout.readlines()\r\n self.get_info()\r\n\r\n def get_info(self):\r\n \"\"\"\r\n get the general information of opt run from opt run output file\r\n which is now stored in self.lines\r\n \"\"\"\r\n # check whether calculation is finished\r\n if len(self.lines[-1].split()) == 4 and self.lines[-1].split()[0] == \"Voluntary\" and self.lines[-1].split()[1] == \"context\":\r\n self.job_done = True\r\n else:\r\n self.job_done = False\r\n # check whether successfully relaxed\r\n self.relaxed = False\r\n for line in self.lines:\r\n if line == \" reached required accuracy - stopping structural energy minimisation\\n\":\r\n self.relaxed = True\r\n break\r\n\r\n self.get_trajectory()\r\n self.get_opt_params_and_run_info()\r\n\r\n\r\n def get_trajectory(self):\r\n self.trajectory = []\r\n for i in range(len(self.lines)):\r\n if len(self.lines[i].split()) > 0 and self.lines[i].split()[0] == \"POSITION\" and self.lines[i].split()[1] == \"TOTAL-FORCE\":\r\n atm = []\r\n j = i + 2\r\n while len(self.lines[j].split()) == 6:\r\n atm.append(Atom(\"x\", float(self.lines[j].split()[0]), float(self.lines[j].split()[1]), float(self.lines[j].split()[2])))\r\n j = j + 1\r\n self.trajectory.append(atm)\r\n #\r\n \"\"\"\r\n if self.relaxed == True and self.run_type == \"vc-relax\":\r\n # get the line number of the 'Begin final coordinates'\r\n # and 'End final coordinates'\r\n begin_final_coord_line = 0\r\n end_final_coord_line = 0\r\n while self.lines[begin_final_coord_line] != \"Begin final coordinates\\n\":\r\n begin_final_coord_line += 1\r\n while self.lines[end_final_coord_line] != \"End final coordinates\\n\":\r\n end_final_coord_line += 1\r\n # get the optimized cell\r\n self.cell = []\r\n for i in range(begin_final_coord_line+5, begin_final_coord_line+8):\r\n for j in range(3):\r\n self.cell.append(float(self.lines[i].split()[j]))\r\n #\r\n \"\"\"\r\n\r\n def get_opt_params_and_run_info(self):\r\n \"\"\"\r\n run_info[\"iterations\"]: scf iterations per scf step\r\n run_info[\"total-energies\"]: total energies of every scf step\r\n run_info[\"fermi-energies\"]: fermi energies of every scf step\r\n run_info[\"total-forces-rms\"]: total RMS forces of every scf step\r\n \"\"\"\r\n self.run_info[\"iterations\"] = []\r\n self.run_info[\"total-energies\"] = []\r\n self.run_info[\"fermi-energies\"] = []\r\n self.run_info[\"total-forces-rms\"] = []\r\n\r\n for line in self.lines:\r\n # if it is an empty line continue to next line\r\n if len(line.split()) == 0:\r\n continue\r\n if line.split()[0] == \"executed\" and line.split()[1] == \"on\" and line.split()[3] == \"date\":\r\n self.run_info[\"start-time\"] = line.split(\"\\n\")[0]\r\n #if line.split()[0] == \"This\" and line.split()[1] == \"run\" and line.split()[3] == \"terminated\":\r\n # self.run_info[\"stop-time\"] = line.split(\"\\n\")[0]\r\n if line.split()[0] == \"Total\" and line.split()[1] == \"CPU\" and line.split()[2] == \"time\":\r\n self.run_info[\"total-cpu-time\"] = float(line.split()[5]) # in unit of second\r\n if line.split()[0] == \"Elapsed\" and line.split()[1] == \"time\":\r\n self.run_info[\"elapsed-time\"] = float(line.split()[3])\r\n if len(line.split()) == 4 and line.split()[1] == \"Iteration\":\r\n self.run_info[\"iterations\"].append(line)\r\n if line.split()[0] == \"energy\" and line.split()[1] == \"without\" and line.split()[2] == \"entropy=\":\r\n self.run_info[\"total-energies\"].append(float(line.split()[3]))\r\n if line.split()[0] == \"E-fermi\" and line.split()[1] == \":\":\r\n self.run_info[\"fermi-energies\"].append(float(line.split()[2]))\r\n if line.split()[0] == \"FORCES:\" and line.split()[1] == \"max\":\r\n self.run_info[\"total-forces-rms\"].append(float(line.split()[5]))\r\n if line.split()[0] == \"ENCUT\" and line.split()[1] == \"=\":\r\n self.electronic_params[\"ENCUT\"] = float(line.split()[2])\r\n if line.split()[0] == \"EDIFF\" and line.split()[1] == \"=\":\r\n self.electronic_params[\"EDIFF\"] = float(line.split()[2])\r\n if line.split()[0] == \"LREAL\" and line.split()[1] == \"=\":\r\n self.electronic_params[\"LREAL\"] = line.split()[2]\r\n if line.split()[0] == \"EDIFFG\" and line.split()[1] == \"=\":\r\n self.ionic_params[\"EDIFFG\"] = float(line.split()[2])\r\n if line.split()[0] == \"NSW\" and line.split()[1] == \"=\":\r\n self.ionic_params[\"NSW\"] = int(line.split()[2])\r\n if line.split()[0] == \"IBRION\" and line.split()[1] == \"=\":\r\n self.ionic_params[\"IBRION\"] = int(line.split()[2])\r\n if line.split()[0] == \"NFREE\" and line.split()[1] == \"=\":\r\n self.ionic_params[\"NFREE\"] = int(line.split()[2])\r\n if line.split()[0] == \"ISIF\" and line.split()[1] == \"=\":\r\n self.ionic_params[\"ISIF\"] = int(line.split()[2])\r\n if line.split()[0] == \"POTIM\" and line.split()[1] == \"=\":\r\n self.ionic_params[\"POTIM\"] = float(line.split()[2])\r\n if line.split()[0] == \"TEIN\" and line.split()[1] == \"=\":\r\n self.ionic_params[\"TEIN\"] = float(line.split()[2])\r\n if line.split()[0] == \"TEBEG\" and line.split()[1] == \"=\":\r\n self.ionic_params[\"TEBEG\"] = float(line.split()[2].split(\";\")[0])\r\n if line.split()[0] == \"SMASS\" and line.split()[1] == \"=\":\r\n self.ionic_params[\"SMASS\"] = float(line.split()[2])\r\n if line.split()[0] == \"PSTRESS=\":\r\n self.ionic_params[\"PSTRESS\"] = float(line.split()[1])\r\n\r\n #self.run_info[\"scf-cycles\"] = len(self.run_info[\"iterations\"])\r\n #if self.run_type == \"relax\":\r\n # self.run_info[\"ion-steps\"] = len(self.run_info[\"iterations\"]) - 1\r\n #elif self.run_type == \"vc-relax\":\r\n # self.run_info[\"ion-steps\"] = len(self.run_info[\"iterations\"]) - 2\r\n\r\n def print_final_structure(self, xyz=\"optimized.xyz\"):\r\n if self.relaxed == False:\r\n with open(\"final-structure(not-relaxed).xyz\", 'w') as fout:\r\n fout.write(\"%d\\n\" % len(self.trajectory[0]))\r\n fout.write(\"Warning: structure failed to be relaxed or vc-relaxed, this is the final structure(unrelaxed)\\n\")\r\n for atom in self.trajectory[-1]:\r\n fout.write(\"%s\\t%.9f\\t%.9f\\t%.9f\\n\" % (atom.name, atom.x, atom.y, atom.z))\r\n return\r\n # printout relaxed structure\r\n with open(xyz, 'w') as fout:\r\n fout.write(\"%d\\n\" % len(self.trajectory[-1]))\r\n fout.write(\"cell:\\n\")\r\n for atom in self.trajectory[-1]:\r\n fout.write(\"%s\\t%.9f\\t%.9f\\t%.9f\\n\" % (atom.name, atom.x, atom.y, atom.z))\r\n\r\n def print_trajectory(self, xyz=\"trajectory.xyz\"):\r\n with open(xyz, 'w') as fout:\r\n for i in range(len(self.trajectory)):\r\n fout.write(\"%d\\n\" % len(self.trajectory[i]))\r\n fout.write(\"i = %d\\n\" % i)\r\n for atom in self.trajectory[i]:\r\n fout.write(\"%s\\t%.9f\\t%.9f\\t%.9f\\n\" % (atom.name, atom.x, atom.y, atom.z))\r\n\r\n def view_trajectory(self, trajfile=\"trajectory.xyz\"):\r\n #os.system(\"xcrysden --xyz %s\" % trajfile)\r\n subprocess.call([\"xcrysden\", \"--xyz\", trajfile])\r\n\r\n def plot_run_info(self):\r\n \"\"\"\r\n \"\"\"\r\n #plt.plot(self.run_info[\"iterations\"])\r\n #plt.title(\"Iterations per SCF\")\r\n #plt.xlabel(\"Scf cycles\")\r\n #plt.ylabel(\"iterations\")\r\n #plt.tight_layout()\r\n #plt.savefig(\"iterations-per-scf.png\")\r\n #plt.close()\r\n\r\n plt.plot(self.run_info[\"total-energies\"])\r\n plt.title(\"Total energies per SCF\")\r\n plt.xlabel(\"Scf cycles\")\r\n plt.ylabel(\"Total Energies (eV)\")\r\n plt.tight_layout()\r\n plt.savefig(\"total-energies-per-scf.png\")\r\n plt.close()\r\n\r\n plt.plot(self.run_info[\"fermi-energies\"])\r\n plt.title(\"Fermi energies per SCF\")\r\n plt.xlabel(\"Scf cycles\")\r\n plt.ylabel(\"Fermi energies (eV)\")\r\n plt.tight_layout()\r\n plt.savefig(\"fermi-energies-per-scf.png\")\r\n plt.close()\r\n\r\n plt.plot(self.run_info[\"total-forces-rms\"])\r\n plt.title(\"Total forces(RMS) per SCF\")\r\n plt.xlabel(\"Scf cycles\")\r\n plt.ylabel(\"Total forces (eV/Angst)\")\r\n plt.tight_layout()\r\n plt.savefig(\"total-forces-rms-per-scf.png\")\r\n plt.close()\r\n\r\n def markdown_report(self, md=\"OptimizationReport.md\"):\r\n \"\"\"\r\n when writing Chinese to a file you must specify\r\n encoding='utf-8' when open the file for writing\r\n \"\"\"\r\n with open(md, 'w', encoding='utf-8') as fout:\r\n fout.write(\"# 几何优化实验统计\\n\")\r\n fout.write(\"几何优化类型: ISIF = %d\\n\" % self.ionic_params[\"ISIF\"])\r\n fout.write(\"几何优化任务是否结束:%s\\n\" % str(self.job_done))\r\n if self.job_done == True:\r\n fout.write(\"是否成功优化: %s\\n\" % str(self.relaxed))\r\n else:\r\n fout.write(\"是否成功优化: %s\\n\" % (\"运行未结束, 结果未知\"))\r\n fout.write(\"## 离子步参数\\n\")\r\n for item in self.ionic_params:\r\n fout.write(\"- %s: %s\\n\" % (item, str(self.ionic_params[item])))\r\n fout.write(\"## 电子步参数\\n\")\r\n for item in self.electronic_params:\r\n fout.write(\"- %s: %s\\n\" % (item, str(self.electronic_params[item])))\r\n fout.write(\"## 运行信息\\n\")\r\n # calculate the running time and print it out\r\n # Importante: the length of the time string might be different, depending\r\n # on the value of hours and minutes and seconds. if they are two digits\r\n # number, they will be divided like: '11: 6: 2', only when they all are\r\n # two digtis number, they will not be divided '11:16:12'\r\n # so we have to preprocess it to build the right time string to pass into\r\n # datetime.datetime.strptime()\r\n start_str = self.run_info[\"start-time\"].split()[4]+\"-\"+self.run_info[\"start-time\"].split()[5]\r\n if self.job_done == True:\r\n #stop_str = self.run_info[\"stop-time\"].split()[8]+\"-\"+self.run_info[\"stop-time\"].split()[5]+self.run_info[\"stop-time\"].split()[6]+self.run_info[\"stop-time\"].split()[7]\r\n pass\r\n\r\n start = datetime.datetime.strptime(start_str, \"%Y.%m.%d-%H:%M:%S\")\r\n #if self.job_done == True:\r\n # stop = datetime.datetime.strptime(stop_str, \"%d%b%Y-%H:%M:%S\")\r\n # delta_t = stop -start\r\n fout.write(\"- Time consuming:\\n\")\r\n fout.write(\" - job starts at %s\\n\" % start)\r\n fout.write(\" - Elapsed time: %.3f(sec) = %.3f(min) = %.3f(hour)\\n\" % (self.run_info[\"elapsed-time\"], self.run_info[\"elapsed-time\"]/60, self.run_info[\"elapsed-time\"]/3600))\r\n #if self.job_done == True:\r\n # fout.write(\" - totally %.1f seconds, or %.3f minutes or %.5f hours\\n\" % (delta_t.total_seconds(), delta_t.total_seconds()/60, delta_t.total_seconds()/3600))\r\n #else:\r\n # fout.write(\" - job is not finished yet, but it starts at %s\\n\" % start)\r\n # end the time information\r\n for item in self.run_info:\r\n fout.write(\"- %s: %s\\n\" % (item, str(self.run_info[item])))\r\n\r\n fout.write(\"## 运行信息图示\\n\")\r\n fout.write(\"Iterations per SCF\\n\")\r\n fout.write(\"\\n\")\r\n\r\n fout.write(\"Total energies per SCF\\n\")\r\n fout.write(\"\\n\")\r\n\r\n fout.write(\"Fermi energies per SCF\\n\")\r\n fout.write(\"\\n\")\r\n\r\n fout.write(\"Total forces per SCF\\n\")\r\n fout.write(\"\\n\")\r\n\r\n\r\n def export(self):\r\n \"\"\"\r\n Note:\r\n * will only printout the final structure if the job is done\r\n \"\"\"\r\n if self.job_done == True:\r\n self.print_final_structure()\r\n self.print_trajectory()\r\n self.plot_run_info()\r\n self.markdown_report(\"OptimizationReport.md\")\r\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OverHall27/Gasyori100knock | [
"341c528eb4c0789034898ee1f7d0a4b2f8b23eff"
] | [
"Question_11_20/myans/myans_18.py"
] | [
"import cv2\nimport numpy as np\n\ndef BGRGRAY(_img):\n\n img = np.zeros((_img.shape[0], _img.shape[1]), dtype=np.float32)\n img = _img[:,:,2].copy() * 0.2126 + _img[:,:,1].copy() * 0.7152 + _img[:,:,0].copy() * 0.0722\n\n return img.astype(np.uint8)\n\ndef EmbossFilter(img, K_size=3):\n\n Hol, Ver = img.shape\n pad = K_size // 2\n\n result = np.zeros((Hol + pad * 2, Ver + pad * 2), dtype=np.float)\n tmp = result.copy()\n tmp[pad: pad + Hol, pad: pad + Ver] = img.copy().astype(np.float)\n\n ##create filter\n kernel = [[-2., -1., 0.], [-1, 1., 1.], [0., 1., 2.]]\n\n for x in range(Hol):\n for y in range(Ver):\n result[pad + x, pad + y] = np.sum(tmp[x: x + K_size, y: y + K_size] * kernel)\n\n result = np.clip(result, 0, 255)\n result = result[pad: pad + Hol, pad: pad + Ver].astype(np.uint8)\n\n return result\n\n\nimg = cv2.imread(\"imori.jpg\")\n\ngray_img = BGRGRAY(img)\nresult = EmbossFilter(gray_img, K_size=3)\ncv2.imwrite(\"myans_18.jpg\", result)\n"
] | [
[
"numpy.zeros",
"numpy.sum",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ParikhKadam/google-research | [
"7cee4b22b925581d912e8d993625c180da2a5a4f",
"7cee4b22b925581d912e8d993625c180da2a5a4f"
] | [
"non_decomp/models.py",
"vatt/utils/train/optimizers.py"
] | [
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"CIFAR ResNet-32 model definition.\"\"\"\n\nimport tensorflow as tf\n\n_L2_WEIGHT_DECAY = 1e-4\n_BATCH_NORM_DECAY = 0.9\n_BATCH_NORM_EPSILON = 1e-5\n\nlayers = tf.keras.layers\n\n\ndef _gen_l2_regularizer(use_l2_regularizer=True):\n return tf.keras.regularizers.l2(\n _L2_WEIGHT_DECAY) if use_l2_regularizer else None\n\n\ndef _identity_block(input_tensor,\n kernel_size,\n filters,\n stage,\n block,\n use_l2_regularizer=True,\n batch_norm_decay=0.9,\n batch_norm_epsilon=1e-5):\n \"\"\"The identity block is the block that has no conv layer at shortcut.\n\n Forked from\n tensorflow_models.official.legacy.image_classification.resnet.resnet_model.\n\n Args:\n input_tensor: input tensor.\n kernel_size: default 3, the kernel size of middle conv layer at main path.\n filters: list of integers, the filters of 3 conv layer at main path.\n stage: integer, current stage label, used for generating layer names.\n block: 'a','b'..., current block label, used for generating layer names.\n use_l2_regularizer: whether to use L2 regularizer on Conv layer.\n batch_norm_decay: Moment of batch norm layers.\n batch_norm_epsilon: Epsilon of batch borm layers.\n\n Returns:\n Output tensor for the block.\n \"\"\"\n filters1, filters2, filters3 = filters\n if tf.keras.backend.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = layers.Conv2D(\n filters1, (1, 1),\n use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),\n name=conv_name_base + '2a')(\n input_tensor)\n x = layers.BatchNormalization(\n axis=bn_axis,\n momentum=batch_norm_decay,\n epsilon=batch_norm_epsilon,\n name=bn_name_base + '2a')(\n x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(\n filters2,\n kernel_size,\n padding='same',\n use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),\n name=conv_name_base + '2b')(\n x)\n x = layers.BatchNormalization(\n axis=bn_axis,\n momentum=batch_norm_decay,\n epsilon=batch_norm_epsilon,\n name=bn_name_base + '2b')(\n x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(\n filters3, (1, 1),\n use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),\n name=conv_name_base + '2c')(\n x)\n x = layers.BatchNormalization(\n axis=bn_axis,\n momentum=batch_norm_decay,\n epsilon=batch_norm_epsilon,\n name=bn_name_base + '2c')(\n x)\n\n x = layers.add([x, input_tensor])\n x = layers.Activation('relu')(x)\n return x\n\n\ndef _conv_block(input_tensor,\n kernel_size,\n filters,\n stage,\n block,\n strides=(2, 2),\n use_l2_regularizer=True,\n batch_norm_decay=0.9,\n batch_norm_epsilon=1e-5):\n \"\"\"A block that has a conv layer at shortcut.\n\n Forked from\n tensorflow_models.official.legacy.image_classification.resnet.resnet_model.\n\n Note that from stage 3,\n the second conv layer at main path is with strides=(2, 2)\n And the shortcut should have strides=(2, 2) as well\n\n Args:\n input_tensor: input tensor.\n kernel_size: default 3, the kernel size of middle conv layer at main path.\n filters: list of integers, the filters of 3 conv layer at main path.\n stage: integer, current stage label, used for generating layer names.\n block: 'a','b'..., current block label, used for generating layer names.\n strides: Strides for the second conv layer in the block.\n use_l2_regularizer: whether to use L2 regularizer on Conv layer.\n batch_norm_decay: Moment of batch norm layers.\n batch_norm_epsilon: Epsilon of batch borm layers.\n\n Returns:\n Output tensor for the block.\n \"\"\"\n filters1, filters2, filters3 = filters\n if tf.keras.backend.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = layers.Conv2D(\n filters1, (1, 1),\n use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),\n name=conv_name_base + '2a')(\n input_tensor)\n x = layers.BatchNormalization(\n axis=bn_axis,\n momentum=batch_norm_decay,\n epsilon=batch_norm_epsilon,\n name=bn_name_base + '2a')(\n x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(\n filters2,\n kernel_size,\n strides=strides,\n padding='same',\n use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),\n name=conv_name_base + '2b')(\n x)\n x = layers.BatchNormalization(\n axis=bn_axis,\n momentum=batch_norm_decay,\n epsilon=batch_norm_epsilon,\n name=bn_name_base + '2b')(\n x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(\n filters3, (1, 1),\n use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),\n name=conv_name_base + '2c')(\n x)\n x = layers.BatchNormalization(\n axis=bn_axis,\n momentum=batch_norm_decay,\n epsilon=batch_norm_epsilon,\n name=bn_name_base + '2c')(\n x)\n\n shortcut = layers.Conv2D(\n filters3, (1, 1),\n strides=strides,\n use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),\n name=conv_name_base + '1')(\n input_tensor)\n shortcut = layers.BatchNormalization(\n axis=bn_axis,\n momentum=batch_norm_decay,\n epsilon=batch_norm_epsilon,\n name=bn_name_base + '1')(\n shortcut)\n\n x = layers.add([x, shortcut])\n x = layers.Activation('relu')(x)\n return x\n\n\ndef _cifar_resnet(input_shape, num_classes, config, use_l2_regularizer=True):\n \"\"\"Instantiates the ResNet architecture.\n\n Args:\n input_shape: Tensor size of the image input.\n num_classes: Number of classes for image classification.\n config: Config of the network.\n use_l2_regularizer: whether to use L2 regularizer on Conv/Dense layer.\n\n Returns:\n A Keras model instance.\n \"\"\"\n\n img_input = layers.Input(shape=input_shape)\n x = img_input\n\n # channels_last\n bn_axis = 3\n\n x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(x)\n x = layers.Conv2D(\n 16, (3, 3),\n strides=(1, 1),\n padding='valid',\n use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),\n name='conv1')(\n x)\n x = layers.BatchNormalization(\n axis=bn_axis,\n momentum=_BATCH_NORM_DECAY,\n epsilon=_BATCH_NORM_EPSILON,\n name='bn_conv1')(\n x)\n x = layers.Activation('relu')(x)\n\n for stage, (n_layers, n_filters, stride) in enumerate(config):\n x = _conv_block(\n x,\n 3, [n_filters, n_filters, n_filters],\n stage=stage + 2,\n block='a',\n strides=(stride, stride),\n use_l2_regularizer=use_l2_regularizer)\n for i in range(n_layers - 1):\n x = _identity_block(\n x,\n 3, [n_filters, n_filters, n_filters],\n stage=stage + 2,\n block='bcdefghijklm'[i],\n use_l2_regularizer=use_l2_regularizer)\n\n x = layers.BatchNormalization(\n axis=3, momentum=0.9, epsilon=1e-5, name='final_bn')(\n x)\n\n rm_axes = [1, 2]\n x = layers.Lambda(\n lambda x: tf.keras.backend.mean(x, rm_axes), name='reduce_mean')(\n x)\n x = layers.Dense(\n num_classes,\n activation=None,\n kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),\n kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),\n bias_regularizer=_gen_l2_regularizer(use_l2_regularizer),\n name='fc10')(\n x)\n\n return tf.keras.models.Model(img_input, x, name='resnet')\n\n\ndef cifar_resnet32(num_classes):\n \"\"\"Returns CIFAR ResNet-32 for the given number of classes.\"\"\"\n return _cifar_resnet(\n input_shape=(32, 32, 3),\n num_classes=num_classes,\n config=[(5, 16, 1), (5, 32, 2), (5, 64, 2)],\n use_l2_regularizer=True)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Define metric factory.\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow_addons import optimizers as tfa_optimizers\n\nfrom vatt.configs import experiment\n\n\ndef get_optimizer(learning_rate, config):\n \"\"\"Returns the optimizer of choice given the configurations.\"\"\"\n\n if isinstance(config, experiment.MomentumOptimizer):\n optimizer = tf.keras.optimizers.SGD(\n learning_rate=learning_rate,\n momentum=config.momentum,\n nesterov=config.nesterov\n )\n elif isinstance(config, experiment.MomentumWOptimizer):\n optimizer = tfa_optimizers.SGDW(\n weight_decay=config.weight_decay,\n learning_rate=learning_rate,\n momentum=config.momentum,\n nesterov=config.nesterov\n )\n elif isinstance(config, experiment.AdamOptimizer):\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=learning_rate,\n beta_1=config.beta_1,\n beta_2=config.beta_2,\n epsilon=config.epsilon,\n )\n elif isinstance(config, experiment.AdamWOptimizer):\n optimizer = tfa_optimizers.AdamW(\n weight_decay=config.weight_decay,\n learning_rate=learning_rate,\n beta_1=config.beta_1,\n beta_2=config.beta_2,\n epsilon=config.epsilon,\n )\n else:\n optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)\n return optimizer\n"
] | [
[
"tensorflow.keras.backend.image_data_format",
"tensorflow.keras.models.Model",
"tensorflow.keras.regularizers.l2",
"tensorflow.keras.backend.mean",
"tensorflow.keras.initializers.RandomNormal"
],
[
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.optimizers.SGD"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
jayvdb/astropy | [
"bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f",
"bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f",
"bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f",
"bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f",
"bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f",
"bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f",
"bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f",
"bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f"
] | [
"docs/wcs/examples/from_file.py",
"astropy/table/tests/test_groups.py",
"astropy/conftest.py",
"astropy/modeling/tests/test_quantities_rotations.py",
"astropy/io/tests/test_registry.py",
"astropy/timeseries/sampled.py",
"astropy/coordinates/tests/test_representation_arithmetic.py",
"astropy/nddata/tests/test_nduncertainty.py"
] | [
"# Load the WCS information from a fits header, and use it\n# to convert pixel coordinates to world coordinates.\n\nimport numpy as np\nfrom astropy import wcs\nfrom astropy.io import fits\nimport sys\n\n\ndef load_wcs_from_file(filename):\n # Load the FITS hdulist using astropy.io.fits\n hdulist = fits.open(filename)\n\n # Parse the WCS keywords in the primary HDU\n w = wcs.WCS(hdulist[0].header)\n\n # Print out the \"name\" of the WCS, as defined in the FITS header\n print(w.wcs.name)\n\n # Print out all of the settings that were parsed from the header\n w.wcs.print_contents()\n\n # Three pixel coordinates of interest.\n # Note we've silently assumed an NAXIS=2 image here.\n # The pixel coordinates are pairs of [X, Y].\n # The \"origin\" argument indicates whether the input coordinates\n # are 0-based (as in Numpy arrays) or\n # 1-based (as in the FITS convention, for example coordinates\n # coming from DS9).\n pixcrd = np.array([[0, 0], [24, 38], [45, 98]], dtype=np.float64)\n\n # Convert pixel coordinates to world coordinates\n # The second argument is \"origin\" -- in this case we're declaring we\n # have 0-based (Numpy-like) coordinates.\n world = w.wcs_pix2world(pixcrd, 0)\n print(world)\n\n # Convert the same coordinates back to pixel coordinates.\n pixcrd2 = w.wcs_world2pix(world, 0)\n print(pixcrd2)\n\n # These should be the same as the original pixel coordinates, modulo\n # some floating-point error.\n assert np.max(np.abs(pixcrd - pixcrd2)) < 1e-6\n\n # The example below illustrates the use of \"origin\" to convert between\n # 0- and 1- based coordinates when executing the forward and backward\n # WCS transform.\n x = 0\n y = 0\n origin = 0\n assert (w.wcs_pix2world(x, y, origin) ==\n w.wcs_pix2world(x + 1, y + 1, origin + 1))\n\n\nif __name__ == '__main__':\n load_wcs_from_file(sys.argv[-1])\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\nimport pytest\nimport numpy as np\n\nfrom astropy.tests.helper import catch_warnings\nfrom astropy.table import Table, Column, QTable, table_helpers, NdarrayMixin, unique\nfrom astropy.utils.exceptions import AstropyUserWarning\nfrom astropy import time\nfrom astropy import units as u\nfrom astropy import coordinates\n\n\ndef sort_eq(list1, list2):\n return sorted(list1) == sorted(list2)\n\n\ndef test_column_group_by(T1):\n for masked in (False, True):\n t1 = Table(T1, masked=masked)\n t1a = t1['a'].copy()\n\n # Group by a Column (i.e. numpy array)\n t1ag = t1a.group_by(t1['a'])\n assert np.all(t1ag.groups.indices == np.array([0, 1, 4, 8]))\n\n # Group by a Table\n t1ag = t1a.group_by(t1['a', 'b'])\n assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))\n\n # Group by a numpy structured array\n t1ag = t1a.group_by(t1['a', 'b'].as_array())\n assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))\n\n\ndef test_table_group_by(T1):\n \"\"\"\n Test basic table group_by functionality for possible key types and for\n masked/unmasked tables.\n \"\"\"\n for masked in (False, True):\n t1 = Table(T1, masked=masked)\n # Group by a single column key specified by name\n tg = t1.group_by('a')\n assert np.all(tg.groups.indices == np.array([0, 1, 4, 8]))\n assert str(tg.groups) == \"<TableGroups indices=[0 1 4 8]>\"\n assert str(tg['a'].groups) == \"<ColumnGroups indices=[0 1 4 8]>\"\n\n # Sorted by 'a' and in original order for rest\n assert tg.pformat() == [' a b c d ',\n '--- --- --- ---',\n ' 0 a 0.0 4',\n ' 1 b 3.0 5',\n ' 1 a 2.0 6',\n ' 1 a 1.0 7',\n ' 2 c 7.0 0',\n ' 2 b 5.0 1',\n ' 2 b 6.0 2',\n ' 2 a 4.0 3']\n assert tg.meta['ta'] == 1\n assert tg['c'].meta['a'] == 1\n assert tg['c'].description == 'column c'\n\n # Group by a table column\n tg2 = t1.group_by(t1['a'])\n assert tg.pformat() == tg2.pformat()\n\n # Group by two columns spec'd by name\n for keys in (['a', 'b'], ('a', 'b')):\n tg = t1.group_by(keys)\n assert np.all(tg.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8]))\n # Sorted by 'a', 'b' and in original order for rest\n assert tg.pformat() == [' a b c d ',\n '--- --- --- ---',\n ' 0 a 0.0 4',\n ' 1 a 2.0 6',\n ' 1 a 1.0 7',\n ' 1 b 3.0 5',\n ' 2 a 4.0 3',\n ' 2 b 5.0 1',\n ' 2 b 6.0 2',\n ' 2 c 7.0 0']\n\n # Group by a Table\n tg2 = t1.group_by(t1['a', 'b'])\n assert tg.pformat() == tg2.pformat()\n\n # Group by a structured array\n tg2 = t1.group_by(t1['a', 'b'].as_array())\n assert tg.pformat() == tg2.pformat()\n\n # Group by a simple ndarray\n tg = t1.group_by(np.array([0, 1, 0, 1, 2, 1, 0, 0]))\n assert np.all(tg.groups.indices == np.array([0, 4, 7, 8]))\n assert tg.pformat() == [' a b c d ',\n '--- --- --- ---',\n ' 2 c 7.0 0',\n ' 2 b 6.0 2',\n ' 1 a 2.0 6',\n ' 1 a 1.0 7',\n ' 2 b 5.0 1',\n ' 2 a 4.0 3',\n ' 1 b 3.0 5',\n ' 0 a 0.0 4']\n\n\ndef test_groups_keys(T1):\n tg = T1.group_by('a')\n keys = tg.groups.keys\n assert keys.dtype.names == ('a',)\n assert np.all(keys['a'] == np.array([0, 1, 2]))\n\n tg = T1.group_by(['a', 'b'])\n keys = tg.groups.keys\n assert keys.dtype.names == ('a', 'b')\n assert np.all(keys['a'] == np.array([0, 1, 1, 2, 2, 2]))\n assert np.all(keys['b'] == np.array(['a', 'a', 'b', 'a', 'b', 'c']))\n\n # Grouping by Column ignores column name\n tg = T1.group_by(T1['b'])\n keys = tg.groups.keys\n assert keys.dtype.names is None\n\n\ndef test_groups_iterator(T1):\n tg = T1.group_by('a')\n for ii, group in enumerate(tg.groups):\n assert group.pformat() == tg.groups[ii].pformat()\n assert group['a'][0] == tg['a'][tg.groups.indices[ii]]\n\n\ndef test_grouped_copy(T1):\n \"\"\"\n Test that copying a table or column copies the groups properly\n \"\"\"\n for masked in (False, True):\n t1 = Table(T1, masked=masked)\n tg = t1.group_by('a')\n tgc = tg.copy()\n assert np.all(tgc.groups.indices == tg.groups.indices)\n assert np.all(tgc.groups.keys == tg.groups.keys)\n\n tac = tg['a'].copy()\n assert np.all(tac.groups.indices == tg['a'].groups.indices)\n\n c1 = t1['a'].copy()\n gc1 = c1.group_by(t1['a'])\n gc1c = gc1.copy()\n assert np.all(gc1c.groups.indices == np.array([0, 1, 4, 8]))\n\n\ndef test_grouped_slicing(T1):\n \"\"\"\n Test that slicing a table removes previous grouping\n \"\"\"\n\n for masked in (False, True):\n t1 = Table(T1, masked=masked)\n\n # Regular slice of a table\n tg = t1.group_by('a')\n tg2 = tg[3:5]\n assert np.all(tg2.groups.indices == np.array([0, len(tg2)]))\n assert tg2.groups.keys is None\n\n\ndef test_group_column_from_table(T1):\n \"\"\"\n Group a column that is part of a table\n \"\"\"\n cg = T1['c'].group_by(np.array(T1['a']))\n assert np.all(cg.groups.keys == np.array([0, 1, 2]))\n assert np.all(cg.groups.indices == np.array([0, 1, 4, 8]))\n\n\ndef test_table_groups_mask_index(T1):\n \"\"\"\n Use boolean mask as item in __getitem__ for groups\n \"\"\"\n for masked in (False, True):\n t1 = Table(T1, masked=masked).group_by('a')\n\n t2 = t1.groups[np.array([True, False, True])]\n assert len(t2.groups) == 2\n assert t2.groups[0].pformat() == t1.groups[0].pformat()\n assert t2.groups[1].pformat() == t1.groups[2].pformat()\n assert np.all(t2.groups.keys['a'] == np.array([0, 2]))\n\n\ndef test_table_groups_array_index(T1):\n \"\"\"\n Use numpy array as item in __getitem__ for groups\n \"\"\"\n for masked in (False, True):\n t1 = Table(T1, masked=masked).group_by('a')\n\n t2 = t1.groups[np.array([0, 2])]\n assert len(t2.groups) == 2\n assert t2.groups[0].pformat() == t1.groups[0].pformat()\n assert t2.groups[1].pformat() == t1.groups[2].pformat()\n assert np.all(t2.groups.keys['a'] == np.array([0, 2]))\n\n\ndef test_table_groups_slicing(T1):\n \"\"\"\n Test that slicing table groups works\n \"\"\"\n\n for masked in (False, True):\n t1 = Table(T1, masked=masked).group_by('a')\n\n # slice(0, 2)\n t2 = t1.groups[0:2]\n assert len(t2.groups) == 2\n assert t2.groups[0].pformat() == t1.groups[0].pformat()\n assert t2.groups[1].pformat() == t1.groups[1].pformat()\n assert np.all(t2.groups.keys['a'] == np.array([0, 1]))\n\n # slice(1, 2)\n t2 = t1.groups[1:2]\n assert len(t2.groups) == 1\n assert t2.groups[0].pformat() == t1.groups[1].pformat()\n assert np.all(t2.groups.keys['a'] == np.array([1]))\n\n # slice(0, 3, 2)\n t2 = t1.groups[0:3:2]\n assert len(t2.groups) == 2\n assert t2.groups[0].pformat() == t1.groups[0].pformat()\n assert t2.groups[1].pformat() == t1.groups[2].pformat()\n assert np.all(t2.groups.keys['a'] == np.array([0, 2]))\n\n\ndef test_grouped_item_access(T1):\n \"\"\"\n Test that column slicing preserves grouping\n \"\"\"\n for masked in (False, True):\n t1 = Table(T1, masked=masked)\n\n # Regular slice of a table\n tg = t1.group_by('a')\n tgs = tg['a', 'c', 'd']\n assert np.all(tgs.groups.keys == tg.groups.keys)\n assert np.all(tgs.groups.indices == tg.groups.indices)\n tgsa = tgs.groups.aggregate(np.sum)\n assert tgsa.pformat() == [' a c d ',\n '--- ---- ---',\n ' 0 0.0 4',\n ' 1 6.0 18',\n ' 2 22.0 6']\n\n tgs = tg['c', 'd']\n assert np.all(tgs.groups.keys == tg.groups.keys)\n assert np.all(tgs.groups.indices == tg.groups.indices)\n tgsa = tgs.groups.aggregate(np.sum)\n assert tgsa.pformat() == [' c d ',\n '---- ---',\n ' 0.0 4',\n ' 6.0 18',\n '22.0 6']\n\n\ndef test_mutable_operations(T1):\n \"\"\"\n Operations like adding or deleting a row should removing grouping,\n but adding or removing or renaming a column should retain grouping.\n \"\"\"\n for masked in (False, True):\n t1 = Table(T1, masked=masked)\n\n # add row\n tg = t1.group_by('a')\n tg.add_row((0, 'a', 3.0, 4))\n assert np.all(tg.groups.indices == np.array([0, len(tg)]))\n assert tg.groups.keys is None\n\n # remove row\n tg = t1.group_by('a')\n tg.remove_row(4)\n assert np.all(tg.groups.indices == np.array([0, len(tg)]))\n assert tg.groups.keys is None\n\n # add column\n tg = t1.group_by('a')\n indices = tg.groups.indices.copy()\n tg.add_column(Column(name='e', data=np.arange(len(tg))))\n assert np.all(tg.groups.indices == indices)\n assert np.all(tg['e'].groups.indices == indices)\n assert np.all(tg['e'].groups.keys == tg.groups.keys)\n\n # remove column (not key column)\n tg = t1.group_by('a')\n tg.remove_column('b')\n assert np.all(tg.groups.indices == indices)\n # Still has original key col names\n assert tg.groups.keys.dtype.names == ('a',)\n assert np.all(tg['a'].groups.indices == indices)\n\n # remove key column\n tg = t1.group_by('a')\n tg.remove_column('a')\n assert np.all(tg.groups.indices == indices)\n assert tg.groups.keys.dtype.names == ('a',)\n assert np.all(tg['b'].groups.indices == indices)\n\n # rename key column\n tg = t1.group_by('a')\n tg.rename_column('a', 'aa')\n assert np.all(tg.groups.indices == indices)\n assert tg.groups.keys.dtype.names == ('a',)\n assert np.all(tg['aa'].groups.indices == indices)\n\n\ndef test_group_by_masked(T1):\n t1m = Table(T1, masked=True)\n t1m['c'].mask[4] = True\n t1m['d'].mask[5] = True\n assert t1m.group_by('a').pformat() == [' a b c d ',\n '--- --- --- ---',\n ' 0 a -- 4',\n ' 1 b 3.0 --',\n ' 1 a 2.0 6',\n ' 1 a 1.0 7',\n ' 2 c 7.0 0',\n ' 2 b 5.0 1',\n ' 2 b 6.0 2',\n ' 2 a 4.0 3']\n\n\ndef test_group_by_errors(T1):\n \"\"\"\n Appropriate errors get raised.\n \"\"\"\n # Bad column name as string\n with pytest.raises(ValueError):\n T1.group_by('f')\n\n # Bad column names in list\n with pytest.raises(ValueError):\n T1.group_by(['f', 'g'])\n\n # Wrong length array\n with pytest.raises(ValueError):\n T1.group_by(np.array([1, 2]))\n\n # Wrong type\n with pytest.raises(TypeError):\n T1.group_by(None)\n\n # Masked key column\n t1 = Table(T1, masked=True)\n t1['a'].mask[4] = True\n with pytest.raises(ValueError):\n t1.group_by('a')\n\n\ndef test_groups_keys_meta(T1):\n \"\"\"\n Make sure the keys meta['grouped_by_table_cols'] is working.\n \"\"\"\n # Group by column in this table\n tg = T1.group_by('a')\n assert tg.groups.keys.meta['grouped_by_table_cols'] is True\n assert tg['c'].groups.keys.meta['grouped_by_table_cols'] is True\n assert tg.groups[1].groups.keys.meta['grouped_by_table_cols'] is True\n assert (tg['d'].groups[np.array([False, True, True])]\n .groups.keys.meta['grouped_by_table_cols'] is True)\n\n # Group by external Table\n tg = T1.group_by(T1['a', 'b'])\n assert tg.groups.keys.meta['grouped_by_table_cols'] is False\n assert tg['c'].groups.keys.meta['grouped_by_table_cols'] is False\n assert tg.groups[1].groups.keys.meta['grouped_by_table_cols'] is False\n\n # Group by external numpy array\n tg = T1.group_by(T1['a', 'b'].as_array())\n assert not hasattr(tg.groups.keys, 'meta')\n assert not hasattr(tg['c'].groups.keys, 'meta')\n\n # Group by Column\n tg = T1.group_by(T1['a'])\n assert 'grouped_by_table_cols' not in tg.groups.keys.meta\n assert 'grouped_by_table_cols' not in tg['c'].groups.keys.meta\n\n\ndef test_table_aggregate(T1):\n \"\"\"\n Aggregate a table\n \"\"\"\n # Table with only summable cols\n t1 = T1['a', 'c', 'd']\n tg = t1.group_by('a')\n tga = tg.groups.aggregate(np.sum)\n assert tga.pformat() == [' a c d ',\n '--- ---- ---',\n ' 0 0.0 4',\n ' 1 6.0 18',\n ' 2 22.0 6']\n # Reverts to default groups\n assert np.all(tga.groups.indices == np.array([0, 3]))\n assert tga.groups.keys is None\n\n # metadata survives\n assert tga.meta['ta'] == 1\n assert tga['c'].meta['a'] == 1\n assert tga['c'].description == 'column c'\n\n # Aggregate with np.sum with masked elements. This results\n # in one group with no elements, hence a nan result and conversion\n # to float for the 'd' column.\n t1m = Table(t1, masked=True)\n t1m['c'].mask[4:6] = True\n t1m['d'].mask[4:6] = True\n tg = t1m.group_by('a')\n with catch_warnings(Warning) as warning_lines:\n tga = tg.groups.aggregate(np.sum)\n assert warning_lines[0].category == UserWarning\n assert \"converting a masked element to nan\" in str(warning_lines[0].message)\n\n assert tga.pformat() == [' a c d ',\n '--- ---- ----',\n ' 0 nan nan',\n ' 1 3.0 13.0',\n ' 2 22.0 6.0']\n\n # Aggregrate with np.sum with masked elements, but where every\n # group has at least one remaining (unmasked) element. Then\n # the int column stays as an int.\n t1m = Table(t1, masked=True)\n t1m['c'].mask[5] = True\n t1m['d'].mask[5] = True\n tg = t1m.group_by('a')\n tga = tg.groups.aggregate(np.sum)\n assert tga.pformat() == [' a c d ',\n '--- ---- ---',\n ' 0 0.0 4',\n ' 1 3.0 13',\n ' 2 22.0 6']\n\n # Aggregate with a column type that cannot by supplied to the aggregating\n # function. This raises a warning but still works.\n tg = T1.group_by('a')\n with catch_warnings(Warning) as warning_lines:\n tga = tg.groups.aggregate(np.sum)\n assert warning_lines[0].category == AstropyUserWarning\n assert \"Cannot aggregate column\" in str(warning_lines[0].message)\n assert tga.pformat() == [' a c d ',\n '--- ---- ---',\n ' 0 0.0 4',\n ' 1 6.0 18',\n ' 2 22.0 6']\n\n\ndef test_table_aggregate_reduceat(T1):\n \"\"\"\n Aggregate table with functions which have a reduceat method\n \"\"\"\n # Comparison functions without reduceat\n def np_mean(x):\n return np.mean(x)\n\n def np_sum(x):\n return np.sum(x)\n\n def np_add(x):\n return np.add(x)\n\n # Table with only summable cols\n t1 = T1['a', 'c', 'd']\n tg = t1.group_by('a')\n # Comparison\n tga_r = tg.groups.aggregate(np.sum)\n tga_a = tg.groups.aggregate(np.add)\n tga_n = tg.groups.aggregate(np_sum)\n\n assert np.all(tga_r == tga_n)\n assert np.all(tga_a == tga_n)\n assert tga_n.pformat() == [' a c d ',\n '--- ---- ---',\n ' 0 0.0 4',\n ' 1 6.0 18',\n ' 2 22.0 6']\n\n tga_r = tg.groups.aggregate(np.mean)\n tga_n = tg.groups.aggregate(np_mean)\n assert np.all(tga_r == tga_n)\n assert tga_n.pformat() == [' a c d ',\n '--- --- ---',\n ' 0 0.0 4.0',\n ' 1 2.0 6.0',\n ' 2 5.5 1.5']\n\n # Binary ufunc np_add should raise warning without reduceat\n t2 = T1['a', 'c']\n tg = t2.group_by('a')\n\n with catch_warnings(Warning) as warning_lines:\n tga = tg.groups.aggregate(np_add)\n assert warning_lines[0].category == AstropyUserWarning\n assert \"Cannot aggregate column\" in str(warning_lines[0].message)\n assert tga.pformat() == [' a ',\n '---',\n ' 0',\n ' 1',\n ' 2']\n\n\ndef test_column_aggregate(T1):\n \"\"\"\n Aggregate a single table column\n \"\"\"\n for masked in (False, True):\n tg = Table(T1, masked=masked).group_by('a')\n tga = tg['c'].groups.aggregate(np.sum)\n assert tga.pformat() == [' c ',\n '----',\n ' 0.0',\n ' 6.0',\n '22.0']\n\n\ndef test_table_filter():\n \"\"\"\n Table groups filtering\n \"\"\"\n def all_positive(table, key_colnames):\n colnames = [name for name in table.colnames if name not in key_colnames]\n for colname in colnames:\n if np.any(table[colname] < 0):\n return False\n return True\n\n # Negative value in 'a' column should not filter because it is a key col\n t = Table.read([' a c d',\n ' -2 7.0 0',\n ' -2 5.0 1',\n ' 0 0.0 4',\n ' 1 3.0 5',\n ' 1 2.0 -6',\n ' 1 1.0 7',\n ' 3 3.0 5',\n ' 3 -2.0 6',\n ' 3 1.0 7',\n ], format='ascii')\n tg = t.group_by('a')\n t2 = tg.groups.filter(all_positive)\n assert t2.groups[0].pformat() == [' a c d ',\n '--- --- ---',\n ' -2 7.0 0',\n ' -2 5.0 1']\n assert t2.groups[1].pformat() == [' a c d ',\n '--- --- ---',\n ' 0 0.0 4']\n\n\ndef test_column_filter():\n \"\"\"\n Table groups filtering\n \"\"\"\n def all_positive(column):\n if np.any(column < 0):\n return False\n return True\n\n # Negative value in 'a' column should not filter because it is a key col\n t = Table.read([' a c d',\n ' -2 7.0 0',\n ' -2 5.0 1',\n ' 0 0.0 4',\n ' 1 3.0 5',\n ' 1 2.0 -6',\n ' 1 1.0 7',\n ' 3 3.0 5',\n ' 3 -2.0 6',\n ' 3 1.0 7',\n ], format='ascii')\n tg = t.group_by('a')\n c2 = tg['c'].groups.filter(all_positive)\n assert len(c2.groups) == 3\n assert c2.groups[0].pformat() == [' c ', '---', '7.0', '5.0']\n assert c2.groups[1].pformat() == [' c ', '---', '0.0']\n assert c2.groups[2].pformat() == [' c ', '---', '3.0', '2.0', '1.0']\n\n\ndef test_group_mixins():\n \"\"\"\n Test grouping a table with mixin columns\n \"\"\"\n # Setup mixins\n idx = np.arange(4)\n x = np.array([3., 1., 2., 1.])\n q = x * u.m\n lon = coordinates.Longitude(x * u.deg)\n lat = coordinates.Latitude(x * u.deg)\n # For Time do J2000.0 + few * 0.1 ns (this requires > 64 bit precision)\n tm = time.Time(2000, format='jyear') + time.TimeDelta(x * 1e-10, format='sec')\n sc = coordinates.SkyCoord(ra=lon, dec=lat)\n aw = table_helpers.ArrayWrapper(x)\n nd = np.array([(3, 'c'), (1, 'a'), (2, 'b'), (1, 'a')],\n dtype='<i4,|S1').view(NdarrayMixin)\n\n qt = QTable([idx, x, q, lon, lat, tm, sc, aw, nd],\n names=['idx', 'x', 'q', 'lon', 'lat', 'tm', 'sc', 'aw', 'nd'])\n\n # Test group_by with each supported mixin type\n mixin_keys = ['x', 'q', 'lon', 'lat', 'tm', 'sc', 'aw', 'nd']\n for key in mixin_keys:\n qtg = qt.group_by(key)\n\n # Test that it got the sort order correct\n assert np.all(qtg['idx'] == [1, 3, 2, 0])\n\n # Test that the groups are right\n # Note: skip testing SkyCoord column because that doesn't have equality\n for name in ['x', 'q', 'lon', 'lat', 'tm', 'aw', 'nd']:\n assert np.all(qt[name][[1, 3]] == qtg.groups[0][name])\n assert np.all(qt[name][[2]] == qtg.groups[1][name])\n assert np.all(qt[name][[0]] == qtg.groups[2][name])\n\n # Test that unique also works with mixins since most of the work is\n # done with group_by(). This is using *every* mixin as key.\n uqt = unique(qt, keys=mixin_keys)\n assert len(uqt) == 3\n assert np.all(uqt['idx'] == [1, 2, 0])\n assert np.all(uqt['x'] == [1., 2., 3.])\n\n # Column group_by() with mixins\n idxg = qt['idx'].group_by(qt[mixin_keys])\n assert np.all(idxg == [1, 3, 2, 0])\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nThis file contains pytest configuration settings that are astropy-specific\n(i.e. those that would not necessarily be shared by affiliated packages\nmaking use of astropy's test runner).\n\"\"\"\nimport os\nimport builtins\nimport tempfile\n\ntry:\n from pytest_astropy_header.display import PYTEST_HEADER_MODULES\nexcept ImportError:\n PYTEST_HEADER_MODULES = {}\n\nfrom astropy.tests.helper import enable_deprecations_as_exceptions\n\ntry:\n import matplotlib\nexcept ImportError:\n HAS_MATPLOTLIB = False\nelse:\n HAS_MATPLOTLIB = True\n\nenable_deprecations_as_exceptions(\n include_astropy_deprecations=False,\n # This is a workaround for the OpenSSL deprecation warning that comes from\n # the `requests` module. It only appears when both asdf and sphinx are\n # installed. This can be removed once pyopenssl 1.7.20+ is released.\n modules_to_ignore_on_import=['requests'],\n warnings_to_ignore_by_pyver={\n # This warning shows up in mpl <3.1.2 on python 3.8,\n # remove once 3.1.2 is released\n (3, 8): set([(r\"In future, it will be an error for 'np.bool_' scalars \"\n \"to be interpreted as an index\", DeprecationWarning),])})\n\nif HAS_MATPLOTLIB:\n matplotlib.use('Agg')\n\nmatplotlibrc_cache = {}\n\n\ndef pytest_configure(config):\n builtins._pytest_running = True\n # do not assign to matplotlibrc_cache in function scope\n if HAS_MATPLOTLIB:\n matplotlibrc_cache.update(matplotlib.rcParams)\n matplotlib.rcdefaults()\n\n # Make sure we use temporary directories for the config and cache\n # so that the tests are insensitive to local configuration. Note that this\n # is also set in the test runner, but we need to also set it here for\n # things to work properly in parallel mode\n\n builtins._xdg_config_home_orig = os.environ.get('XDG_CONFIG_HOME')\n builtins._xdg_cache_home_orig = os.environ.get('XDG_CACHE_HOME')\n\n os.environ['XDG_CONFIG_HOME'] = tempfile.mkdtemp('astropy_config')\n os.environ['XDG_CACHE_HOME'] = tempfile.mkdtemp('astropy_cache')\n\n os.mkdir(os.path.join(os.environ['XDG_CONFIG_HOME'], 'astropy'))\n os.mkdir(os.path.join(os.environ['XDG_CACHE_HOME'], 'astropy'))\n\n config.option.astropy_header = True\n\n PYTEST_HEADER_MODULES['Cython'] = 'cython'\n PYTEST_HEADER_MODULES['Scikit-image'] = 'skimage'\n PYTEST_HEADER_MODULES['asdf'] = 'asdf'\n\n\ndef pytest_unconfigure(config):\n builtins._pytest_running = False\n # do not assign to matplotlibrc_cache in function scope\n if HAS_MATPLOTLIB:\n matplotlib.rcParams.update(matplotlibrc_cache)\n matplotlibrc_cache.clear()\n\n if builtins._xdg_config_home_orig is None:\n os.environ.pop('XDG_CONFIG_HOME')\n else:\n os.environ['XDG_CONFIG_HOME'] = builtins._xdg_config_home_orig\n\n if builtins._xdg_cache_home_orig is None:\n os.environ.pop('XDG_CACHE_HOME')\n else:\n os.environ['XDG_CACHE_HOME'] = builtins._xdg_cache_home_orig\n\n\ndef pytest_terminal_summary(terminalreporter):\n \"\"\"Output a warning to IPython users in case any tests failed.\"\"\"\n\n try:\n get_ipython()\n except NameError:\n return\n\n if not terminalreporter.stats.get('failed'):\n # Only issue the warning when there are actually failures\n return\n\n terminalreporter.ensure_newline()\n terminalreporter.write_line(\n 'Some tests are known to fail when run from the IPython prompt; '\n 'especially, but not limited to tests involving logging and warning '\n 'handling. Unless you are certain as to the cause of the failure, '\n 'please check that the failure occurs outside IPython as well. See '\n 'http://docs.astropy.org/en/stable/known_issues.html#failing-logging-'\n 'tests-when-running-the-tests-in-ipython for more information.',\n yellow=True, bold=True)\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom astropy.wcs import wcs\nfrom astropy.modeling import models\nfrom astropy import units as u\nfrom astropy.tests.helper import assert_quantity_allclose\n\n\[email protected](('inp'), [(0, 0), (4000, -20.56), (-2001.5, 45.9),\n (0, 90), (0, -90), (np.mgrid[:4, :6])])\ndef test_against_wcslib(inp):\n w = wcs.WCS()\n crval = [202.4823228, 47.17511893]\n w.wcs.crval = crval\n w.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n\n lonpole = 180\n tan = models.Pix2Sky_TAN()\n n2c = models.RotateNative2Celestial(crval[0] * u.deg, crval[1] * u.deg, lonpole * u.deg)\n c2n = models.RotateCelestial2Native(crval[0] * u.deg, crval[1] * u.deg, lonpole * u.deg)\n m = tan | n2c\n minv = c2n | tan.inverse\n\n radec = w.wcs_pix2world(inp[0], inp[1], 1)\n xy = w.wcs_world2pix(radec[0], radec[1], 1)\n\n assert_allclose(m(*inp), radec, atol=1e-12)\n assert_allclose(minv(*radec), xy, atol=1e-12)\n\n\[email protected](('inp'), [(40 * u.deg, -0.057 * u.rad), (21.5 * u.arcsec, 45.9 * u.deg)])\ndef test_roundtrip_sky_rotation(inp):\n lon, lat, lon_pole = 42 * u.deg, (43 * u.deg).to(u.arcsec), (44 * u.deg).to(u.rad)\n n2c = models.RotateNative2Celestial(lon, lat, lon_pole)\n c2n = models.RotateCelestial2Native(lon, lat, lon_pole)\n assert_quantity_allclose(n2c.inverse(*n2c(*inp)), inp, atol=1e-13 * u.deg)\n assert_quantity_allclose(c2n.inverse(*c2n(*inp)), inp, atol=1e-13 * u.deg)\n\n\ndef test_Rotation2D():\n model = models.Rotation2D(angle=90 * u.deg)\n a, b = 1 * u.deg, 0 * u.deg\n x, y = model(a, b)\n assert_quantity_allclose([x, y], [0 * u.deg, 1 * u.deg], atol=1e-10 * u.deg)\n\n\ndef test_Rotation2D_inverse():\n model = models.Rotation2D(angle=234.23494 * u.deg)\n x, y = model.inverse(*model(1 * u.deg, 0 * u.deg))\n assert_quantity_allclose([x, y], [1 * u.deg, 0 * u.deg], atol=1e-10 * u.deg)\n\n\ndef test_euler_angle_rotations():\n ydeg = (90 * u.deg, 0 * u.deg)\n y = (90, 0)\n z = (0, 90)\n\n # rotate y into minus z\n model = models.EulerAngleRotation(0 * u.rad, np.pi / 2 * u.rad, 0 * u.rad, 'zxz')\n assert_allclose(model(*z), y, atol=10**-12)\n model = models.EulerAngleRotation(0 * u.deg, 90 * u.deg, 0 * u.deg, 'zxz')\n assert_quantity_allclose(model(*(z * u.deg)), ydeg, atol=10**-12 * u.deg)\n\n\[email protected](('params'), [(60, 10, 25),\n (60 * u.deg, 10 * u.deg, 25 * u.deg),\n ((60 * u.deg).to(u.rad),\n (10 * u.deg).to(u.rad),\n (25 * u.deg).to(u.rad))])\ndef test_euler_rotations_with_units(params):\n x = 1 * u.deg\n y = 1 * u.deg\n phi, theta, psi = params\n\n urot = models.EulerAngleRotation(phi, theta, psi, axes_order='xyz')\n a, b = urot(x.value, y.value)\n assert_allclose((a, b), (-23.614457631192547, 9.631254579686113))\n a, b = urot(x, y)\n assert_quantity_allclose((a, b), (-23.614457631192547 * u.deg, 9.631254579686113 * u.deg))\n a, b = urot(x.to(u.rad), y.to(u.rad))\n assert_quantity_allclose((a, b), (-23.614457631192547 * u.deg, 9.631254579686113 * u.deg))\n\n\ndef test_attributes():\n n2c = models.RotateNative2Celestial(20016 * u.arcsec, -72.3 * u.deg, np.pi * u.rad)\n assert_allclose(n2c.lat.value, -72.3)\n assert_allclose(n2c.lat._raw_value, -1.2618730491919001)\n assert_allclose(n2c.lon.value, 20016)\n assert_allclose(n2c.lon._raw_value, 0.09704030641088472)\n assert_allclose(n2c.lon_pole.value, np.pi)\n assert_allclose(n2c.lon_pole._raw_value, np.pi)\n assert(n2c.lon.unit is u.Unit(\"arcsec\"))\n assert(n2c.lon.internal_unit is u.Unit(\"rad\"))\n assert(n2c.lat.unit is u.Unit(\"deg\"))\n assert(n2c.lat.internal_unit is u.Unit(\"rad\"))\n assert(n2c.lon_pole.unit is u.Unit(\"rad\"))\n assert(n2c.lon_pole.internal_unit is u.Unit(\"rad\"))\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport os\nfrom copy import copy\nfrom io import StringIO\n\nimport pytest\nimport numpy as np\n\nfrom astropy.io.registry import _readers, _writers, _identifiers\nfrom astropy.io import registry as io_registry\nfrom astropy.table import Table\nfrom astropy import units as u\n\n# Since we reset the readers/writers below, we need to also import any\n# sub-package that defines readers/writers first\nfrom astropy import timeseries # noqa\n\n# Cache original _readers, _writers, _identifiers in setup_function\n# since tests modify them. Important to do caching in setup_function\n# and not globally (as was the original implementation) because e.g.\n# CCDData reader/writers get registered *after* this module gets imported\n# during test collection but *before* tests (and the final teardown) get run.\n# This leaves the registry corrupted.\nORIGINAL = {}\n\ntry:\n import yaml # pylint: disable=W0611\n HAS_YAML = True\nexcept ImportError:\n HAS_YAML = False\n\n\nclass TestData:\n read = classmethod(io_registry.read)\n write = io_registry.write\n\n\ndef setup_function(function):\n ORIGINAL['readers'] = copy(_readers)\n ORIGINAL['writers'] = copy(_writers)\n ORIGINAL['identifiers'] = copy(_identifiers)\n\n _readers.clear()\n _writers.clear()\n _identifiers.clear()\n\n\ndef empty_reader(*args, **kwargs):\n return TestData()\n\n\ndef empty_writer(table, *args, **kwargs):\n pass\n\n\ndef empty_identifier(*args, **kwargs):\n return True\n\n\ndef test_get_reader_invalid():\n with pytest.raises(io_registry.IORegistryError) as exc:\n with pytest.warns(FutureWarning):\n io_registry.get_reader('test', TestData)\n assert str(exc.value).startswith(\n \"No reader defined for format 'test' and class 'TestData'\")\n\n\ndef test_get_writer_invalid():\n with pytest.raises(io_registry.IORegistryError) as exc:\n with pytest.warns(FutureWarning):\n io_registry.get_writer('test', TestData)\n assert str(exc.value).startswith(\n \"No writer defined for format 'test' and class 'TestData'\")\n\n\ndef test_register_reader():\n\n io_registry.register_reader('test1', TestData, empty_reader)\n io_registry.register_reader('test2', TestData, empty_reader)\n\n assert io_registry.get_reader('test1', TestData) == empty_reader\n assert io_registry.get_reader('test2', TestData) == empty_reader\n\n io_registry.unregister_reader('test1', TestData)\n\n with pytest.raises(io_registry.IORegistryError):\n io_registry.get_reader('test1', TestData)\n assert io_registry.get_reader('test2', TestData) == empty_reader\n\n with pytest.warns(FutureWarning):\n io_registry.unregister_reader('test2', TestData)\n\n with pytest.raises(io_registry.IORegistryError):\n with pytest.warns(FutureWarning):\n io_registry.get_reader('test2', TestData)\n\n\ndef test_register_writer():\n\n io_registry.register_writer('test1', TestData, empty_writer)\n io_registry.register_writer('test2', TestData, empty_writer)\n\n assert io_registry.get_writer('test1', TestData) == empty_writer\n assert io_registry.get_writer('test2', TestData) == empty_writer\n\n io_registry.unregister_writer('test1', TestData)\n\n with pytest.raises(io_registry.IORegistryError):\n io_registry.get_writer('test1', TestData)\n assert io_registry.get_writer('test2', TestData) == empty_writer\n\n with pytest.warns(FutureWarning):\n io_registry.unregister_writer('test2', TestData)\n\n with pytest.raises(io_registry.IORegistryError):\n with pytest.warns(FutureWarning):\n io_registry.get_writer('test2', TestData)\n\n\ndef test_register_identifier():\n\n io_registry.register_identifier('test1', TestData, empty_identifier)\n io_registry.register_identifier('test2', TestData, empty_identifier)\n\n io_registry.unregister_identifier('test1', TestData)\n io_registry.unregister_identifier('test2', TestData)\n\n\ndef test_register_reader_invalid():\n io_registry.register_reader('test', TestData, empty_reader)\n with pytest.raises(io_registry.IORegistryError) as exc:\n io_registry.register_reader('test', TestData, empty_reader)\n assert (str(exc.value) == \"Reader for format 'test' and class 'TestData' \"\n \"is already defined\")\n\n\ndef test_register_writer_invalid():\n io_registry.register_writer('test', TestData, empty_writer)\n with pytest.raises(io_registry.IORegistryError) as exc:\n io_registry.register_writer('test', TestData, empty_writer)\n assert (str(exc.value) == \"Writer for format 'test' and class 'TestData' \"\n \"is already defined\")\n\n\ndef test_register_identifier_invalid():\n io_registry.register_identifier('test', TestData, empty_identifier)\n with pytest.raises(io_registry.IORegistryError) as exc:\n io_registry.register_identifier('test', TestData, empty_identifier)\n assert (str(exc.value) == \"Identifier for format 'test' and class \"\n \"'TestData' is already defined\")\n\n\ndef test_unregister_reader_invalid():\n with pytest.raises(io_registry.IORegistryError) as exc:\n io_registry.unregister_reader('test', TestData)\n assert str(exc.value) == \"No reader defined for format 'test' and class 'TestData'\"\n\n\ndef test_unregister_writer_invalid():\n with pytest.raises(io_registry.IORegistryError) as exc:\n io_registry.unregister_writer('test', TestData)\n assert str(exc.value) == \"No writer defined for format 'test' and class 'TestData'\"\n\n\ndef test_unregister_identifier_invalid():\n with pytest.raises(io_registry.IORegistryError) as exc:\n io_registry.unregister_identifier('test', TestData)\n assert str(exc.value) == \"No identifier defined for format 'test' and class 'TestData'\"\n\n\ndef test_register_reader_force():\n io_registry.register_reader('test', TestData, empty_reader)\n io_registry.register_reader('test', TestData, empty_reader, force=True)\n\n\ndef test_register_writer_force():\n io_registry.register_writer('test', TestData, empty_writer)\n io_registry.register_writer('test', TestData, empty_writer, force=True)\n\n\ndef test_register_identifier_force():\n io_registry.register_identifier('test', TestData, empty_identifier)\n io_registry.register_identifier('test', TestData, empty_identifier, force=True)\n\n\ndef test_read_noformat():\n with pytest.raises(io_registry.IORegistryError) as exc:\n with pytest.warns(FutureWarning):\n TestData.read()\n assert str(exc.value).startswith(\"Format could not be identified based on the\"\n \" file name or contents, please provide a\"\n \" 'format' argument.\")\n\n\ndef test_write_noformat():\n with pytest.raises(io_registry.IORegistryError) as exc:\n with pytest.warns(FutureWarning):\n TestData().write()\n assert str(exc.value).startswith(\"Format could not be identified based on the\"\n \" file name or contents, please provide a\"\n \" 'format' argument.\")\n\n\ndef test_read_noformat_arbitrary():\n \"\"\"Test that all identifier functions can accept arbitrary input\"\"\"\n _identifiers.update(ORIGINAL['identifiers'])\n with pytest.raises(io_registry.IORegistryError) as exc:\n with pytest.warns(FutureWarning):\n TestData.read(object())\n assert str(exc.value).startswith(\"Format could not be identified based on the\"\n \" file name or contents, please provide a\"\n \" 'format' argument.\")\n\n\ndef test_read_noformat_arbitrary_file(tmpdir):\n \"\"\"Tests that all identifier functions can accept arbitrary files\"\"\"\n _readers.update(ORIGINAL['readers'])\n testfile = str(tmpdir.join('foo.example'))\n with open(testfile, 'w') as f:\n f.write(\"Hello world\")\n\n with pytest.raises(io_registry.IORegistryError) as exc:\n Table.read(testfile)\n assert str(exc.value).startswith(\"Format could not be identified based on the\"\n \" file name or contents, please provide a\"\n \" 'format' argument.\")\n\n\ndef test_write_noformat_arbitrary():\n \"\"\"Test that all identifier functions can accept arbitrary input\"\"\"\n _identifiers.update(ORIGINAL['identifiers'])\n with pytest.raises(io_registry.IORegistryError) as exc:\n with pytest.warns(FutureWarning):\n TestData().write(object())\n assert str(exc.value).startswith(\"Format could not be identified based on the\"\n \" file name or contents, please provide a\"\n \" 'format' argument.\")\n\n\ndef test_write_noformat_arbitrary_file(tmpdir):\n \"\"\"Tests that all identifier functions can accept arbitrary files\"\"\"\n _writers.update(ORIGINAL['writers'])\n testfile = str(tmpdir.join('foo.example'))\n\n with pytest.raises(io_registry.IORegistryError) as exc:\n Table().write(testfile)\n assert str(exc.value).startswith(\"Format could not be identified based on the\"\n \" file name or contents, please provide a\"\n \" 'format' argument.\")\n\n\ndef test_read_toomanyformats():\n io_registry.register_identifier('test1', TestData, lambda o, *x, **y: True)\n io_registry.register_identifier('test2', TestData, lambda o, *x, **y: True)\n with pytest.raises(io_registry.IORegistryError) as exc:\n TestData.read()\n assert str(exc.value) == \"Format is ambiguous - options are: test1, test2\"\n\n\ndef test_write_toomanyformats():\n io_registry.register_identifier('test1', TestData, lambda o, *x, **y: True)\n io_registry.register_identifier('test2', TestData, lambda o, *x, **y: True)\n with pytest.raises(io_registry.IORegistryError) as exc:\n TestData().write()\n assert str(exc.value) == \"Format is ambiguous - options are: test1, test2\"\n\n\ndef test_read_format_noreader():\n with pytest.raises(io_registry.IORegistryError) as exc:\n with pytest.warns(FutureWarning):\n TestData.read(format='test')\n assert str(exc.value).startswith(\n \"No reader defined for format 'test' and class 'TestData'\")\n\n\ndef test_write_format_nowriter():\n with pytest.raises(io_registry.IORegistryError) as exc:\n with pytest.warns(FutureWarning):\n TestData().write(format='test')\n assert str(exc.value).startswith(\n \"No writer defined for format 'test' and class 'TestData'\")\n\n\ndef test_read_identifier(tmpdir):\n\n io_registry.register_identifier(\n 'test1', TestData,\n lambda o, path, fileobj, *x, **y: path.endswith('a'))\n io_registry.register_identifier(\n 'test2', TestData,\n lambda o, path, fileobj, *x, **y: path.endswith('b'))\n\n # Now check that we got past the identifier and are trying to get\n # the reader. The io_registry.get_reader will fail but the error message\n # will tell us if the identifier worked.\n\n filename = tmpdir.join(\"testfile.a\").strpath\n open(filename, 'w').close()\n with pytest.raises(io_registry.IORegistryError) as exc:\n with pytest.warns(FutureWarning):\n TestData.read(filename)\n assert str(exc.value).startswith(\n \"No reader defined for format 'test1' and class 'TestData'\")\n\n filename = tmpdir.join(\"testfile.b\").strpath\n open(filename, 'w').close()\n with pytest.raises(io_registry.IORegistryError) as exc:\n with pytest.warns(FutureWarning):\n TestData.read(filename)\n assert str(exc.value).startswith(\n \"No reader defined for format 'test2' and class 'TestData'\")\n\n\ndef test_write_identifier():\n\n io_registry.register_identifier('test1', TestData, lambda o, *x, **y: x[0].startswith('a'))\n io_registry.register_identifier('test2', TestData, lambda o, *x, **y: x[0].startswith('b'))\n\n # Now check that we got past the identifier and are trying to get\n # the reader. The io_registry.get_writer will fail but the error message\n # will tell us if the identifier worked.\n\n with pytest.raises(io_registry.IORegistryError) as exc:\n with pytest.warns(FutureWarning):\n TestData().write('abc')\n assert str(exc.value).startswith(\n \"No writer defined for format 'test1' and class 'TestData'\")\n\n with pytest.raises(io_registry.IORegistryError) as exc:\n with pytest.warns(FutureWarning):\n TestData().write('bac')\n assert str(exc.value).startswith(\n \"No writer defined for format 'test2' and class 'TestData'\")\n\n\ndef test_identifier_origin():\n\n io_registry.register_identifier('test1', TestData, lambda o, *x, **y: o == 'read')\n io_registry.register_identifier('test2', TestData, lambda o, *x, **y: o == 'write')\n io_registry.register_reader('test1', TestData, empty_reader)\n io_registry.register_writer('test2', TestData, empty_writer)\n\n # There should not be too many formats defined\n TestData.read()\n TestData().write()\n\n with pytest.raises(io_registry.IORegistryError) as exc:\n TestData.read(format='test2')\n assert str(exc.value).startswith(\n \"No reader defined for format 'test2' and class 'TestData'\")\n\n with pytest.raises(io_registry.IORegistryError) as exc:\n TestData().write(format='test1')\n assert str(exc.value).startswith(\n \"No writer defined for format 'test1' and class 'TestData'\")\n\n\ndef test_read_valid_return():\n io_registry.register_reader('test', TestData, lambda: TestData())\n t = TestData.read(format='test')\n assert isinstance(t, TestData)\n\n\ndef test_non_existing_unknown_ext():\n \"\"\"Raise the correct error when attempting to read a non-existing\n file with an unknown extension.\"\"\"\n with pytest.raises(OSError):\n data = Table.read('non-existing-file-with-unknown.ext')\n\n\ndef test_read_basic_table():\n data = np.array(list(zip([1, 2, 3], ['a', 'b', 'c'])),\n dtype=[('A', int), ('B', '|U1')])\n io_registry.register_reader('test', Table, lambda x: Table(x))\n t = Table.read(data, format='test')\n assert t.keys() == ['A', 'B']\n for i in range(3):\n assert t['A'][i] == data['A'][i]\n assert t['B'][i] == data['B'][i]\n\n\ndef test_register_readers_with_same_name_on_different_classes():\n # No errors should be generated if the same name is registered for\n # different objects...but this failed under python3\n io_registry.register_reader('test', TestData, lambda: TestData())\n io_registry.register_reader('test', Table, lambda: Table())\n t = TestData.read(format='test')\n assert isinstance(t, TestData)\n tbl = Table.read(format='test')\n assert isinstance(tbl, Table)\n\n\ndef test_inherited_registration():\n # check that multi-generation inheritance works properly,\n # meaning that a child inherits from parents before\n # grandparents, see astropy/astropy#7156\n\n class Child1(Table):\n pass\n\n class Child2(Child1):\n pass\n\n def _read():\n return Table()\n\n def _read1():\n return Child1()\n\n # check that reader gets inherited\n io_registry.register_reader('test', Table, _read)\n assert io_registry.get_reader('test', Child2) is _read\n\n # check that nearest ancestor is identified\n # (i.e. that the reader for Child2 is the registered method\n # for Child1, and not Table)\n io_registry.register_reader('test', Child1, _read1)\n assert io_registry.get_reader('test', Child2) is _read1\n\n\ndef teardown_function(function):\n _readers.clear()\n _writers.clear()\n _identifiers.clear()\n _readers.update(ORIGINAL['readers'])\n _writers.update(ORIGINAL['writers'])\n _identifiers.update(ORIGINAL['identifiers'])\n\n\nclass TestSubclass:\n \"\"\"\n Test using registry with a Table sub-class\n \"\"\"\n\n def test_read_table_subclass(self):\n class MyTable(Table):\n pass\n data = ['a b', '1 2']\n mt = MyTable.read(data, format='ascii')\n t = Table.read(data, format='ascii')\n assert np.all(mt == t)\n assert mt.colnames == t.colnames\n assert type(mt) is MyTable\n\n def test_write_table_subclass(self):\n buffer = StringIO()\n\n class MyTable(Table):\n pass\n mt = MyTable([[1], [2]], names=['a', 'b'])\n mt.write(buffer, format='ascii')\n assert buffer.getvalue() == os.linesep.join(['a b', '1 2', ''])\n\n def test_read_table_subclass_with_columns_attributes(self, tmpdir):\n \"\"\"Regression test for https://github.com/astropy/astropy/issues/7181\n \"\"\"\n\n class MTable(Table):\n pass\n\n mt = MTable([[1, 2.5]], names=['a'])\n mt['a'].unit = u.m\n mt['a'].format = '.4f'\n mt['a'].description = 'hello'\n\n testfile = str(tmpdir.join('junk.fits'))\n mt.write(testfile, overwrite=True)\n\n t = MTable.read(testfile)\n assert np.all(mt == t)\n assert mt.colnames == t.colnames\n assert type(t) is MTable\n assert t['a'].unit == u.m\n assert t['a'].format == '{:13.4f}'\n if HAS_YAML:\n assert t['a'].description == 'hello'\n else:\n assert t['a'].description is None\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nfrom copy import deepcopy\n\nimport numpy as np\n\nfrom astropy.table import groups, QTable, Table\nfrom astropy.time import Time, TimeDelta\nfrom astropy import units as u\nfrom astropy.units import Quantity, UnitsError\nfrom astropy.utils.decorators import deprecated_renamed_argument\nfrom astropy.timeseries.core import BaseTimeSeries, autocheck_required_columns\n\n__all__ = ['TimeSeries']\n\n\n@autocheck_required_columns\nclass TimeSeries(BaseTimeSeries):\n \"\"\"\n A class to represent time series data in tabular form.\n\n `~astropy.timeseries.TimeSeries` provides a class for representing time\n series as a collection of values of different quantities measured at specific\n points in time (for time series with finite time bins, see the\n `~astropy.timeseries.BinnedTimeSeries` class).\n `~astropy.timeseries.TimeSeries` is a sub-class of `~astropy.table.QTable`\n and thus provides all the standard table maniplation methods available to\n tables, but it also provides additional conveniences for dealing with time\n series, such as a flexible initializer for setting up the times, a method\n for folding time series, and a ``time`` attribute for easy access to the\n time values.\n\n See also: http://docs.astropy.org/en/stable/timeseries/\n\n Parameters\n ----------\n data : numpy ndarray, dict, list, `~astropy.table.Table`, or table-like object, optional\n Data to initialize time series. This does not need to contain the times,\n which can be provided separately, but if it does contain the times they\n should be in a column called ``'time'`` to be automatically recognized.\n time : `~astropy.time.Time` or iterable\n The times at which the values are sampled - this can be either given\n directly as a `~astropy.time.Time` array or as any iterable that\n initializes the `~astropy.time.Time` class. If this is given, then\n the remaining time-related arguments should not be used.\n time_start : `~astropy.time.Time` or str\n The time of the first sample in the time series. This is an alternative\n to providing ``time`` and requires that ``time_delta`` is also provided.\n time_delta : `~astropy.time.TimeDelta` or `~astropy.units.Quantity`\n The step size in time for the series. This can either be a scalar if\n the time series is evenly sampled, or an array of values if it is not.\n n_samples : int\n The number of time samples for the series. This is only used if both\n ``time_start`` and ``time_delta`` are provided and are scalar values.\n **kwargs : dict, optional\n Additional keyword arguments are passed to `~astropy.table.QTable`.\n \"\"\"\n\n _required_columns = ['time']\n\n def __init__(self, data=None, *, time=None, time_start=None,\n time_delta=None, n_samples=None, **kwargs):\n\n super().__init__(data=data, **kwargs)\n\n # For some operations, an empty time series needs to be created, then\n # columns added one by one. We should check that when columns are added\n # manually, time is added first and is of the right type.\n if data is None and time is None and time_start is None and time_delta is None:\n self._required_columns_relax = True\n return\n\n # First if time has been given in the table data, we should extract it\n # and treat it as if it had been passed as a keyword argument.\n\n if data is not None:\n if n_samples is not None:\n if n_samples != len(self):\n raise TypeError(\"'n_samples' has been given both and it is not the \"\n \"same length as the input data.\")\n else:\n n_samples = len(self)\n\n if 'time' in self.colnames:\n if time is None:\n time = self.columns['time']\n else:\n raise TypeError(\"'time' has been given both in the table and as a keyword argument\")\n\n if time is None and time_start is None:\n raise TypeError(\"Either 'time' or 'time_start' should be specified\")\n elif time is not None and time_start is not None:\n raise TypeError(\"Cannot specify both 'time' and 'time_start'\")\n\n if time is not None and not isinstance(time, Time):\n time = Time(time)\n\n if time_start is not None and not isinstance(time_start, Time):\n time_start = Time(time_start)\n\n if time_delta is not None and not isinstance(time_delta, (Quantity, TimeDelta)):\n raise TypeError(\"'time_delta' should be a Quantity or a TimeDelta\")\n\n if isinstance(time_delta, TimeDelta):\n time_delta = time_delta.sec * u.s\n\n if time_start is not None:\n\n # We interpret this as meaning that time is that of the first\n # sample and that the interval is given by time_delta.\n\n if time_delta is None:\n raise TypeError(\"'time' is scalar, so 'time_delta' is required\")\n\n if time_delta.isscalar:\n time_delta = np.repeat(time_delta, n_samples)\n\n time_delta = np.cumsum(time_delta)\n time_delta = np.roll(time_delta, 1)\n time_delta[0] = 0. * u.s\n\n time = time_start + time_delta\n\n elif len(self.colnames) > 0 and len(time) != len(self):\n raise ValueError(\"Length of 'time' ({}) should match \"\n \"data length ({})\".format(len(time), n_samples))\n\n elif time_delta is not None:\n raise TypeError(\"'time_delta' should not be specified since \"\n \"'time' is an array\")\n\n with self._delay_required_column_checks():\n if 'time' in self.colnames:\n self.remove_column('time')\n self.add_column(time, index=0, name='time')\n\n @property\n def time(self):\n \"\"\"\n The time values.\n \"\"\"\n return self['time']\n\n @deprecated_renamed_argument('midpoint_epoch', 'epoch_time', '4.0')\n def fold(self, period=None, epoch_time=None, epoch_phase=0,\n wrap_phase=None, normalize_phase=False):\n \"\"\"\n Return a new `~astropy.timeseries.TimeSeries` folded with a period and\n epoch.\n\n Parameters\n ----------\n period : `~astropy.units.Quantity`\n The period to use for folding\n epoch_time : `~astropy.time.Time`\n The time to use as the reference epoch, at which the relative time\n offset / phase will be ``epoch_phase``. Defaults to the first time\n in the time series.\n epoch_phase : float or `~astropy.units.Quantity`\n Phase of ``epoch_time``. If ``normalize_phase`` is `True`, this\n should be a dimensionless value, while if ``normalize_phase`` is\n ``False``, this should be a `~astropy.units.Quantity` with time\n units. Defaults to 0.\n wrap_phase : float or `~astropy.units.Quantity`\n The value of the phase above which values are wrapped back by one\n period. If ``normalize_phase`` is `True`, this should be a\n dimensionless value, while if ``normalize_phase`` is ``False``,\n this should be a `~astropy.units.Quantity` with time units.\n Defaults to half the period, so that the resulting time series goes\n from ``-period / 2`` to ``period / 2`` (if ``normalize_phase`` is\n `False`) or -0.5 to 0.5 (if ``normalize_phase`` is `True`).\n normalize_phase : bool\n If `False` phase is returned as `~astropy.time.TimeDelta`,\n otherwise as a dimensionless `~astropy.units.Quantity`.\n\n Returns\n -------\n folded_timeseries : `~astropy.timeseries.TimeSeries`\n The folded time series object with phase as the ``time`` column.\n \"\"\"\n\n if not isinstance(period, Quantity) or period.unit.physical_type != 'time':\n raise UnitsError('period should be a Quantity in units of time')\n\n folded = self.copy()\n\n if epoch_time is None:\n epoch_time = self.time[0]\n else:\n epoch_time = Time(epoch_time)\n\n period_sec = period.to_value(u.s)\n\n if normalize_phase:\n if isinstance(epoch_phase, Quantity) and epoch_phase.unit.physical_type != 'dimensionless':\n raise UnitsError('epoch_phase should be a dimensionless Quantity '\n 'or a float when normalize_phase=True')\n epoch_phase_sec = epoch_phase * period_sec\n else:\n if epoch_phase == 0:\n epoch_phase_sec = 0.\n else:\n if not isinstance(epoch_phase, Quantity) or epoch_phase.unit.physical_type != 'time':\n raise UnitsError('epoch_phase should be a Quantity in units '\n 'of time when normalize_phase=False')\n epoch_phase_sec = epoch_phase.to_value(u.s)\n\n if wrap_phase is None:\n wrap_phase = period_sec / 2\n else:\n if normalize_phase:\n if isinstance(wrap_phase, Quantity) and not wrap_phase.unit.is_equivalent(u.one):\n raise UnitsError('wrap_phase should be dimensionless when '\n 'normalize_phase=True')\n else:\n if wrap_phase < 0 or wrap_phase > 1:\n raise ValueError('wrap_phase should be between 0 and 1')\n else:\n wrap_phase = wrap_phase * period_sec\n else:\n if isinstance(wrap_phase, Quantity) and wrap_phase.unit.physical_type == 'time':\n if wrap_phase < 0 or wrap_phase > period:\n raise ValueError('wrap_phase should be between 0 and the period')\n else:\n wrap_phase = wrap_phase.to_value(u.s)\n else:\n raise UnitsError('wrap_phase should be a Quantity in units '\n 'of time when normalize_phase=False')\n\n relative_time_sec = (((self.time - epoch_time).sec\n + epoch_phase_sec\n + (period_sec - wrap_phase)) % period_sec\n - (period_sec - wrap_phase))\n\n folded_time = TimeDelta(relative_time_sec * u.s)\n\n if normalize_phase:\n folded_time = (folded_time / period).decompose()\n period = period_sec = 1\n\n with folded._delay_required_column_checks():\n folded.remove_column('time')\n folded.add_column(folded_time, name='time', index=0)\n\n return folded\n\n def __getitem__(self, item):\n if self._is_list_or_tuple_of_str(item):\n if 'time' not in item:\n out = QTable([self[x] for x in item],\n meta=deepcopy(self.meta),\n copy_indices=self._copy_indices)\n out._groups = groups.TableGroups(out, indices=self.groups._indices,\n keys=self.groups._keys)\n return out\n return super().__getitem__(item)\n\n def add_column(self, *args, **kwargs):\n \"\"\"\n See :meth:`~astropy.table.Table.add_column`.\n \"\"\"\n # Note that the docstring is inherited from QTable\n result = super().add_column(*args, **kwargs)\n if len(self.indices) == 0 and 'time' in self.colnames:\n self.add_index('time')\n return result\n\n def add_columns(self, *args, **kwargs):\n \"\"\"\n See :meth:`~astropy.table.Table.add_columns`.\n \"\"\"\n # Note that the docstring is inherited from QTable\n result = super().add_columns(*args, **kwargs)\n if len(self.indices) == 0 and 'time' in self.colnames:\n self.add_index('time')\n return result\n\n @classmethod\n def from_pandas(self, df, time_scale='utc'):\n \"\"\"\n Convert a :class:`~pandas.DataFrame` to a\n :class:`astropy.timeseries.TimeSeries`.\n\n Parameters\n ----------\n df : :class:`pandas.DataFrame`\n A pandas :class:`pandas.DataFrame` instance.\n time_scale : str\n The time scale to pass into `astropy.time.Time`.\n Defaults to ``UTC``.\n\n \"\"\"\n from pandas import DataFrame, DatetimeIndex\n\n if not isinstance(df, DataFrame):\n raise TypeError(\"Input should be a pandas DataFrame\")\n\n if not isinstance(df.index, DatetimeIndex):\n raise TypeError(\"DataFrame does not have a DatetimeIndex\")\n\n time = Time(df.index, scale=time_scale)\n table = Table.from_pandas(df)\n\n return TimeSeries(time=time, data=table)\n\n def to_pandas(self):\n \"\"\"\n Convert this :class:`~astropy.timeseries.TimeSeries` to a\n :class:`~pandas.DataFrame` with a :class:`~pandas.DatetimeIndex` index.\n\n Returns\n -------\n dataframe : :class:`pandas.DataFrame`\n A pandas :class:`pandas.DataFrame` instance\n \"\"\"\n return Table(self).to_pandas(index='time')\n\n @classmethod\n def read(self, filename, time_column=None, time_format=None, time_scale=None, format=None, *args, **kwargs):\n \"\"\"\n Read and parse a file and returns a `astropy.timeseries.TimeSeries`.\n\n This method uses the unified I/O infrastructure in Astropy which makes\n it easy to define readers/writers for various classes\n (http://docs.astropy.org/en/stable/io/unified.html). By default, this\n method will try and use readers defined specifically for the\n `astropy.timeseries.TimeSeries` class - however, it is also\n possible to use the ``format`` keyword to specify formats defined for\n the `astropy.table.Table` class - in this case, you will need to also\n provide the column names for column containing the start times for the\n bins, as well as other column names (see the Parameters section below\n for details)::\n\n >>> from astropy.timeseries import TimeSeries\n >>> ts = TimeSeries.read('sampled.dat', format='ascii.ecsv',\n ... time_column='date') # doctest: +SKIP\n\n Parameters\n ----------\n filename : str\n File to parse.\n format : str\n File format specifier.\n time_column : str, optional\n The name of the time column.\n time_format : str, optional\n The time format for the time column.\n time_scale : str, optional\n The time scale for the time column.\n *args : tuple, optional\n Positional arguments passed through to the data reader.\n **kwargs : dict, optional\n Keyword arguments passed through to the data reader.\n\n Returns\n -------\n out : `astropy.timeseries.sampled.TimeSeries`\n TimeSeries corresponding to file contents.\n\n Notes\n -----\n \"\"\"\n try:\n\n # First we try the readers defined for the BinnedTimeSeries class\n return super().read(filename, format=format, *args, **kwargs)\n\n except TypeError:\n\n # Otherwise we fall back to the default Table readers\n\n if time_column is None:\n raise ValueError(\"``time_column`` should be provided since the default Table readers are being used.\")\n\n table = Table.read(filename, format=format, *args, **kwargs)\n\n if time_column in table.colnames:\n time = Time(table.columns[time_column], scale=time_scale, format=time_format)\n table.remove_column(time_column)\n else:\n raise ValueError(f\"Time column '{time_column}' not found in the input data.\")\n\n return TimeSeries(time=time, data=table)\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport functools\n\nimport pytest\nimport numpy as np\n\nfrom astropy import units as u\nfrom astropy.coordinates import (PhysicsSphericalRepresentation, CartesianRepresentation,\n CylindricalRepresentation, SphericalRepresentation,\n UnitSphericalRepresentation, SphericalDifferential,\n CartesianDifferential, UnitSphericalDifferential,\n SphericalCosLatDifferential, UnitSphericalCosLatDifferential,\n PhysicsSphericalDifferential, CylindricalDifferential,\n RadialRepresentation, RadialDifferential, Longitude, Latitude)\nfrom astropy.coordinates.representation import DIFFERENTIAL_CLASSES\nfrom astropy.coordinates.angle_utilities import angular_separation\nfrom astropy.tests.helper import assert_quantity_allclose, quantity_allclose\n\n\ndef assert_representation_allclose(actual, desired, rtol=1.e-7, atol=None,\n **kwargs):\n actual_xyz = actual.to_cartesian().get_xyz(xyz_axis=-1)\n desired_xyz = desired.to_cartesian().get_xyz(xyz_axis=-1)\n actual_xyz, desired_xyz = np.broadcast_arrays(actual_xyz, desired_xyz,\n subok=True)\n assert_quantity_allclose(actual_xyz, desired_xyz, rtol, atol, **kwargs)\n\n\ndef assert_differential_allclose(actual, desired, rtol=1.e-7, **kwargs):\n assert actual.components == desired.components\n for component in actual.components:\n actual_c = getattr(actual, component)\n atol = 1.e-10 * actual_c.unit\n assert_quantity_allclose(actual_c, getattr(desired, component),\n rtol, atol, **kwargs)\n\n\ndef representation_equal(first, second):\n return functools.reduce(np.logical_and,\n (getattr(first, component) ==\n getattr(second, component)\n for component in first.components))\n\n\nclass TestArithmetic():\n\n def setup(self):\n # Choose some specific coordinates, for which ``sum`` and ``dot``\n # works out nicely.\n self.lon = Longitude(np.arange(0, 12.1, 2), u.hourangle)\n self.lat = Latitude(np.arange(-90, 91, 30), u.deg)\n self.distance = [5., 12., 4., 2., 4., 12., 5.] * u.kpc\n self.spherical = SphericalRepresentation(self.lon, self.lat,\n self.distance)\n self.unit_spherical = self.spherical.represent_as(\n UnitSphericalRepresentation)\n self.cartesian = self.spherical.to_cartesian()\n\n def test_norm_spherical(self):\n norm_s = self.spherical.norm()\n assert isinstance(norm_s, u.Quantity)\n # Just to be sure, test against getting object arrays.\n assert norm_s.dtype.kind == 'f'\n assert np.all(norm_s == self.distance)\n\n @pytest.mark.parametrize('representation',\n (PhysicsSphericalRepresentation,\n CartesianRepresentation,\n CylindricalRepresentation))\n def test_norm(self, representation):\n in_rep = self.spherical.represent_as(representation)\n norm_rep = in_rep.norm()\n assert isinstance(norm_rep, u.Quantity)\n assert_quantity_allclose(norm_rep, self.distance)\n\n def test_norm_unitspherical(self):\n norm_rep = self.unit_spherical.norm()\n assert norm_rep.unit == u.dimensionless_unscaled\n assert np.all(norm_rep == 1. * u.dimensionless_unscaled)\n\n @pytest.mark.parametrize('representation',\n (SphericalRepresentation,\n PhysicsSphericalRepresentation,\n CartesianRepresentation,\n CylindricalRepresentation,\n UnitSphericalRepresentation))\n def test_neg_pos(self, representation):\n in_rep = self.cartesian.represent_as(representation)\n pos_rep = +in_rep\n assert type(pos_rep) is type(in_rep)\n assert pos_rep is not in_rep\n assert np.all(representation_equal(pos_rep, in_rep))\n neg_rep = -in_rep\n assert type(neg_rep) is type(in_rep)\n assert np.all(neg_rep.norm() == in_rep.norm())\n in_rep_xyz = in_rep.to_cartesian().xyz\n assert_quantity_allclose(neg_rep.to_cartesian().xyz,\n -in_rep_xyz, atol=1.e-10*in_rep_xyz.unit)\n\n def test_mul_div_spherical(self):\n s0 = self.spherical / (1. * u.Myr)\n assert isinstance(s0, SphericalRepresentation)\n assert s0.distance.dtype.kind == 'f'\n assert np.all(s0.lon == self.spherical.lon)\n assert np.all(s0.lat == self.spherical.lat)\n assert np.all(s0.distance == self.distance / (1. * u.Myr))\n s1 = (1./u.Myr) * self.spherical\n assert isinstance(s1, SphericalRepresentation)\n assert np.all(representation_equal(s1, s0))\n s2 = self.spherical * np.array([[1.], [2.]])\n assert isinstance(s2, SphericalRepresentation)\n assert s2.shape == (2, self.spherical.shape[0])\n assert np.all(s2.lon == self.spherical.lon)\n assert np.all(s2.lat == self.spherical.lat)\n assert np.all(s2.distance ==\n self.spherical.distance * np.array([[1.], [2.]]))\n s3 = np.array([[1.], [2.]]) * self.spherical\n assert isinstance(s3, SphericalRepresentation)\n assert np.all(representation_equal(s3, s2))\n s4 = -self.spherical\n assert isinstance(s4, SphericalRepresentation)\n assert quantity_allclose(s4.to_cartesian().xyz,\n -self.spherical.to_cartesian().xyz,\n atol=1e-15*self.spherical.distance.unit)\n assert np.all(s4.distance == self.spherical.distance)\n s5 = +self.spherical\n assert s5 is not self.spherical\n assert np.all(representation_equal(s5, self.spherical))\n\n @pytest.mark.parametrize('representation',\n (PhysicsSphericalRepresentation,\n CartesianRepresentation,\n CylindricalRepresentation))\n def test_mul_div(self, representation):\n in_rep = self.spherical.represent_as(representation)\n r1 = in_rep / (1. * u.Myr)\n assert isinstance(r1, representation)\n for component in in_rep.components:\n in_rep_comp = getattr(in_rep, component)\n r1_comp = getattr(r1, component)\n if in_rep_comp.unit == self.distance.unit:\n assert np.all(r1_comp == in_rep_comp / (1.*u.Myr))\n else:\n assert np.all(r1_comp == in_rep_comp)\n\n r2 = np.array([[1.], [2.]]) * in_rep\n assert isinstance(r2, representation)\n assert r2.shape == (2, in_rep.shape[0])\n assert_quantity_allclose(r2.norm(),\n self.distance * np.array([[1.], [2.]]))\n r3 = -in_rep\n assert np.all(representation_equal(r3, in_rep * -1.))\n with pytest.raises(TypeError):\n in_rep * in_rep\n with pytest.raises(TypeError):\n dict() * in_rep\n\n def test_mul_div_unit_spherical(self):\n s1 = self.unit_spherical * self.distance\n assert isinstance(s1, SphericalRepresentation)\n assert np.all(s1.lon == self.unit_spherical.lon)\n assert np.all(s1.lat == self.unit_spherical.lat)\n assert np.all(s1.distance == self.spherical.distance)\n s2 = self.unit_spherical / u.s\n assert isinstance(s2, SphericalRepresentation)\n assert np.all(s2.lon == self.unit_spherical.lon)\n assert np.all(s2.lat == self.unit_spherical.lat)\n assert np.all(s2.distance == 1./u.s)\n u3 = -self.unit_spherical\n assert isinstance(u3, UnitSphericalRepresentation)\n assert_quantity_allclose(u3.lon, self.unit_spherical.lon + 180.*u.deg)\n assert np.all(u3.lat == -self.unit_spherical.lat)\n assert_quantity_allclose(u3.to_cartesian().xyz,\n -self.unit_spherical.to_cartesian().xyz,\n atol=1.e-10*u.dimensionless_unscaled)\n u4 = +self.unit_spherical\n assert isinstance(u4, UnitSphericalRepresentation)\n assert u4 is not self.unit_spherical\n assert np.all(representation_equal(u4, self.unit_spherical))\n\n def test_add_sub_cartesian(self):\n c1 = self.cartesian + self.cartesian\n assert isinstance(c1, CartesianRepresentation)\n assert c1.x.dtype.kind == 'f'\n assert np.all(representation_equal(c1, 2. * self.cartesian))\n with pytest.raises(TypeError):\n self.cartesian + 10.*u.m\n with pytest.raises(u.UnitsError):\n self.cartesian + (self.cartesian / u.s)\n c2 = self.cartesian - self.cartesian\n assert isinstance(c2, CartesianRepresentation)\n assert np.all(representation_equal(\n c2, CartesianRepresentation(0.*u.m, 0.*u.m, 0.*u.m)))\n c3 = self.cartesian - self.cartesian / 2.\n assert isinstance(c3, CartesianRepresentation)\n assert np.all(representation_equal(c3, self.cartesian / 2.))\n\n @pytest.mark.parametrize('representation',\n (PhysicsSphericalRepresentation,\n SphericalRepresentation,\n CylindricalRepresentation))\n def test_add_sub(self, representation):\n in_rep = self.cartesian.represent_as(representation)\n r1 = in_rep + in_rep\n assert isinstance(r1, representation)\n expected = 2. * in_rep\n for component in in_rep.components:\n assert_quantity_allclose(getattr(r1, component),\n getattr(expected, component))\n with pytest.raises(TypeError):\n 10.*u.m + in_rep\n with pytest.raises(u.UnitsError):\n in_rep + (in_rep / u.s)\n r2 = in_rep - in_rep\n assert isinstance(r2, representation)\n assert np.all(representation_equal(\n r2.to_cartesian(), CartesianRepresentation(0.*u.m, 0.*u.m, 0.*u.m)))\n r3 = in_rep - in_rep / 2.\n assert isinstance(r3, representation)\n expected = in_rep / 2.\n assert_representation_allclose(r3, expected)\n\n def test_add_sub_unit_spherical(self):\n s1 = self.unit_spherical + self.unit_spherical\n assert isinstance(s1, SphericalRepresentation)\n expected = 2. * self.unit_spherical\n for component in s1.components:\n assert_quantity_allclose(getattr(s1, component),\n getattr(expected, component))\n with pytest.raises(TypeError):\n 10.*u.m - self.unit_spherical\n with pytest.raises(u.UnitsError):\n self.unit_spherical + (self.unit_spherical / u.s)\n s2 = self.unit_spherical - self.unit_spherical / 2.\n assert isinstance(s2, SphericalRepresentation)\n expected = self.unit_spherical / 2.\n for component in s2.components:\n assert_quantity_allclose(getattr(s2, component),\n getattr(expected, component))\n\n @pytest.mark.parametrize('representation',\n (CartesianRepresentation,\n PhysicsSphericalRepresentation,\n SphericalRepresentation,\n CylindricalRepresentation))\n def test_sum_mean(self, representation):\n in_rep = self.spherical.represent_as(representation)\n r_sum = in_rep.sum()\n assert isinstance(r_sum, representation)\n expected = SphericalRepresentation(\n 90. * u.deg, 0. * u.deg, 14. * u.kpc).represent_as(representation)\n for component in expected.components:\n exp_component = getattr(expected, component)\n assert_quantity_allclose(getattr(r_sum, component),\n exp_component,\n atol=1e-10*exp_component.unit)\n\n r_mean = in_rep.mean()\n assert isinstance(r_mean, representation)\n expected = expected / len(in_rep)\n for component in expected.components:\n exp_component = getattr(expected, component)\n assert_quantity_allclose(getattr(r_mean, component),\n exp_component,\n atol=1e-10*exp_component.unit)\n\n def test_sum_mean_unit_spherical(self):\n s_sum = self.unit_spherical.sum()\n assert isinstance(s_sum, SphericalRepresentation)\n expected = SphericalRepresentation(\n 90. * u.deg, 0. * u.deg, 3. * u.dimensionless_unscaled)\n for component in expected.components:\n exp_component = getattr(expected, component)\n assert_quantity_allclose(getattr(s_sum, component),\n exp_component,\n atol=1e-10*exp_component.unit)\n\n s_mean = self.unit_spherical.mean()\n assert isinstance(s_mean, SphericalRepresentation)\n expected = expected / len(self.unit_spherical)\n for component in expected.components:\n exp_component = getattr(expected, component)\n assert_quantity_allclose(getattr(s_mean, component),\n exp_component,\n atol=1e-10*exp_component.unit)\n\n @pytest.mark.parametrize('representation',\n (CartesianRepresentation,\n PhysicsSphericalRepresentation,\n SphericalRepresentation,\n CylindricalRepresentation))\n def test_dot(self, representation):\n in_rep = self.cartesian.represent_as(representation)\n r_dot_r = in_rep.dot(in_rep)\n assert isinstance(r_dot_r, u.Quantity)\n assert r_dot_r.shape == in_rep.shape\n assert_quantity_allclose(np.sqrt(r_dot_r), self.distance)\n r_dot_r_rev = in_rep.dot(in_rep[::-1])\n assert isinstance(r_dot_r_rev, u.Quantity)\n assert r_dot_r_rev.shape == in_rep.shape\n expected = [-25., -126., 2., 4., 2., -126., -25.] * u.kpc**2\n assert_quantity_allclose(r_dot_r_rev, expected)\n for axis in 'xyz':\n project = CartesianRepresentation(*(\n (1. if axis == _axis else 0.) * u.dimensionless_unscaled\n for _axis in 'xyz'))\n assert_quantity_allclose(in_rep.dot(project),\n getattr(self.cartesian, axis),\n atol=1.*u.upc)\n with pytest.raises(TypeError):\n in_rep.dot(self.cartesian.xyz)\n\n def test_dot_unit_spherical(self):\n u_dot_u = self.unit_spherical.dot(self.unit_spherical)\n assert isinstance(u_dot_u, u.Quantity)\n assert u_dot_u.shape == self.unit_spherical.shape\n assert_quantity_allclose(u_dot_u, 1.*u.dimensionless_unscaled)\n cartesian = self.unit_spherical.to_cartesian()\n for axis in 'xyz':\n project = CartesianRepresentation(*(\n (1. if axis == _axis else 0.) * u.dimensionless_unscaled\n for _axis in 'xyz'))\n assert_quantity_allclose(self.unit_spherical.dot(project),\n getattr(cartesian, axis), atol=1.e-10)\n\n @pytest.mark.parametrize('representation',\n (CartesianRepresentation,\n PhysicsSphericalRepresentation,\n SphericalRepresentation,\n CylindricalRepresentation))\n def test_cross(self, representation):\n in_rep = self.cartesian.represent_as(representation)\n r_cross_r = in_rep.cross(in_rep)\n assert isinstance(r_cross_r, representation)\n assert_quantity_allclose(r_cross_r.norm(), 0.*u.kpc**2,\n atol=1.*u.mpc**2)\n r_cross_r_rev = in_rep.cross(in_rep[::-1])\n sep = angular_separation(self.lon, self.lat,\n self.lon[::-1], self.lat[::-1])\n expected = self.distance * self.distance[::-1] * np.sin(sep)\n assert_quantity_allclose(r_cross_r_rev.norm(), expected,\n atol=1.*u.mpc**2)\n unit_vectors = CartesianRepresentation(\n [1., 0., 0.]*u.one,\n [0., 1., 0.]*u.one,\n [0., 0., 1.]*u.one)[:, np.newaxis]\n r_cross_uv = in_rep.cross(unit_vectors)\n assert r_cross_uv.shape == (3, 7)\n assert_quantity_allclose(r_cross_uv.dot(unit_vectors), 0.*u.kpc,\n atol=1.*u.upc)\n assert_quantity_allclose(r_cross_uv.dot(in_rep), 0.*u.kpc**2,\n atol=1.*u.mpc**2)\n zeros = np.zeros(len(in_rep)) * u.kpc\n expected = CartesianRepresentation(\n u.Quantity((zeros, -self.cartesian.z, self.cartesian.y)),\n u.Quantity((self.cartesian.z, zeros, -self.cartesian.x)),\n u.Quantity((-self.cartesian.y, self.cartesian.x, zeros)))\n # Comparison with spherical is hard since some distances are zero,\n # implying the angles are undefined.\n r_cross_uv_cartesian = r_cross_uv.to_cartesian()\n assert_representation_allclose(r_cross_uv_cartesian,\n expected, atol=1.*u.upc)\n # A final check, with the side benefit of ensuring __div__ and norm\n # work on multi-D representations.\n r_cross_uv_by_distance = r_cross_uv / self.distance\n uv_sph = unit_vectors.represent_as(UnitSphericalRepresentation)\n sep = angular_separation(self.lon, self.lat, uv_sph.lon, uv_sph.lat)\n assert_quantity_allclose(r_cross_uv_by_distance.norm(), np.sin(sep),\n atol=1e-9)\n\n with pytest.raises(TypeError):\n in_rep.cross(self.cartesian.xyz)\n\n def test_cross_unit_spherical(self):\n u_cross_u = self.unit_spherical.cross(self.unit_spherical)\n assert isinstance(u_cross_u, SphericalRepresentation)\n assert_quantity_allclose(u_cross_u.norm(), 0.*u.one, atol=1.e-10*u.one)\n u_cross_u_rev = self.unit_spherical.cross(self.unit_spherical[::-1])\n assert isinstance(u_cross_u_rev, SphericalRepresentation)\n sep = angular_separation(self.lon, self.lat,\n self.lon[::-1], self.lat[::-1])\n expected = np.sin(sep)\n assert_quantity_allclose(u_cross_u_rev.norm(), expected,\n atol=1.e-10*u.one)\n\n\nclass TestUnitVectorsAndScales():\n\n @staticmethod\n def check_unit_vectors(e):\n for v in e.values():\n assert type(v) is CartesianRepresentation\n assert_quantity_allclose(v.norm(), 1. * u.one)\n return e\n\n @staticmethod\n def check_scale_factors(sf, rep):\n unit = rep.norm().unit\n for c, f in sf.items():\n assert type(f) is u.Quantity\n assert (f.unit * getattr(rep, c).unit).is_equivalent(unit)\n\n def test_spherical(self):\n s = SphericalRepresentation(lon=[0., 6., 21.] * u.hourangle,\n lat=[0., -30., 85.] * u.deg,\n distance=[1, 2, 3] * u.kpc)\n e = s.unit_vectors()\n self.check_unit_vectors(e)\n sf = s.scale_factors()\n self.check_scale_factors(sf, s)\n\n s_lon = s + s.distance * 1e-5 * np.cos(s.lat) * e['lon']\n assert_quantity_allclose(s_lon.lon, s.lon + 1e-5*u.rad,\n atol=1e-10*u.rad)\n assert_quantity_allclose(s_lon.lat, s.lat, atol=1e-10*u.rad)\n assert_quantity_allclose(s_lon.distance, s.distance)\n s_lon2 = s + 1e-5 * u.radian * sf['lon'] * e['lon']\n assert_representation_allclose(s_lon2, s_lon)\n\n s_lat = s + s.distance * 1e-5 * e['lat']\n assert_quantity_allclose(s_lat.lon, s.lon)\n assert_quantity_allclose(s_lat.lat, s.lat + 1e-5*u.rad,\n atol=1e-10*u.rad)\n assert_quantity_allclose(s_lon.distance, s.distance)\n s_lat2 = s + 1.e-5 * u.radian * sf['lat'] * e['lat']\n assert_representation_allclose(s_lat2, s_lat)\n\n s_distance = s + 1. * u.pc * e['distance']\n assert_quantity_allclose(s_distance.lon, s.lon, atol=1e-10*u.rad)\n assert_quantity_allclose(s_distance.lat, s.lat, atol=1e-10*u.rad)\n assert_quantity_allclose(s_distance.distance, s.distance + 1.*u.pc)\n s_distance2 = s + 1. * u.pc * sf['distance'] * e['distance']\n assert_representation_allclose(s_distance2, s_distance)\n\n def test_unit_spherical(self):\n s = UnitSphericalRepresentation(lon=[0., 6., 21.] * u.hourangle,\n lat=[0., -30., 85.] * u.deg)\n\n e = s.unit_vectors()\n self.check_unit_vectors(e)\n sf = s.scale_factors()\n self.check_scale_factors(sf, s)\n\n s_lon = s + 1e-5 * np.cos(s.lat) * e['lon']\n assert_quantity_allclose(s_lon.lon, s.lon + 1e-5*u.rad,\n atol=1e-10*u.rad)\n assert_quantity_allclose(s_lon.lat, s.lat, atol=1e-10*u.rad)\n s_lon2 = s + 1e-5 * u.radian * sf['lon'] * e['lon']\n assert_representation_allclose(s_lon2, s_lon)\n\n s_lat = s + 1e-5 * e['lat']\n assert_quantity_allclose(s_lat.lon, s.lon)\n assert_quantity_allclose(s_lat.lat, s.lat + 1e-5*u.rad,\n atol=1e-10*u.rad)\n s_lat2 = s + 1.e-5 * u.radian * sf['lat'] * e['lat']\n assert_representation_allclose(s_lat2, s_lat)\n\n def test_radial(self):\n r = RadialRepresentation(10.*u.kpc)\n with pytest.raises(NotImplementedError):\n r.unit_vectors()\n sf = r.scale_factors()\n assert np.all(sf['distance'] == 1.*u.one)\n assert np.all(r.norm() == r.distance)\n with pytest.raises(TypeError):\n r + r\n\n def test_physical_spherical(self):\n\n s = PhysicsSphericalRepresentation(phi=[0., 6., 21.] * u.hourangle,\n theta=[90., 120., 5.] * u.deg,\n r=[1, 2, 3] * u.kpc)\n\n e = s.unit_vectors()\n self.check_unit_vectors(e)\n sf = s.scale_factors()\n self.check_scale_factors(sf, s)\n\n s_phi = s + s.r * 1e-5 * np.sin(s.theta) * e['phi']\n assert_quantity_allclose(s_phi.phi, s.phi + 1e-5*u.rad,\n atol=1e-10*u.rad)\n assert_quantity_allclose(s_phi.theta, s.theta, atol=1e-10*u.rad)\n assert_quantity_allclose(s_phi.r, s.r)\n s_phi2 = s + 1e-5 * u.radian * sf['phi'] * e['phi']\n assert_representation_allclose(s_phi2, s_phi)\n\n s_theta = s + s.r * 1e-5 * e['theta']\n assert_quantity_allclose(s_theta.phi, s.phi)\n assert_quantity_allclose(s_theta.theta, s.theta + 1e-5*u.rad,\n atol=1e-10*u.rad)\n assert_quantity_allclose(s_theta.r, s.r)\n s_theta2 = s + 1.e-5 * u.radian * sf['theta'] * e['theta']\n assert_representation_allclose(s_theta2, s_theta)\n\n s_r = s + 1. * u.pc * e['r']\n assert_quantity_allclose(s_r.phi, s.phi, atol=1e-10*u.rad)\n assert_quantity_allclose(s_r.theta, s.theta, atol=1e-10*u.rad)\n assert_quantity_allclose(s_r.r, s.r + 1.*u.pc)\n s_r2 = s + 1. * u.pc * sf['r'] * e['r']\n assert_representation_allclose(s_r2, s_r)\n\n def test_cartesian(self):\n\n s = CartesianRepresentation(x=[1, 2, 3] * u.pc,\n y=[2, 3, 4] * u.Mpc,\n z=[3, 4, 5] * u.kpc)\n\n e = s.unit_vectors()\n sf = s.scale_factors()\n for v, expected in zip(e.values(), ([1., 0., 0.] * u.one,\n [0., 1., 0.] * u.one,\n [0., 0., 1.] * u.one)):\n assert np.all(v.get_xyz(xyz_axis=-1) == expected)\n for f in sf.values():\n assert np.all(f == 1.*u.one)\n\n def test_cylindrical(self):\n\n s = CylindricalRepresentation(rho=[1, 2, 3] * u.pc,\n phi=[0., 90., -45.] * u.deg,\n z=[3, 4, 5] * u.kpc)\n e = s.unit_vectors()\n self.check_unit_vectors(e)\n sf = s.scale_factors()\n self.check_scale_factors(sf, s)\n\n s_rho = s + 1. * u.pc * e['rho']\n assert_quantity_allclose(s_rho.rho, s.rho + 1.*u.pc)\n assert_quantity_allclose(s_rho.phi, s.phi)\n assert_quantity_allclose(s_rho.z, s.z)\n s_rho2 = s + 1. * u.pc * sf['rho'] * e['rho']\n assert_representation_allclose(s_rho2, s_rho)\n\n s_phi = s + s.rho * 1e-5 * e['phi']\n assert_quantity_allclose(s_phi.rho, s.rho)\n assert_quantity_allclose(s_phi.phi, s.phi + 1e-5*u.rad)\n assert_quantity_allclose(s_phi.z, s.z)\n s_phi2 = s + 1e-5 * u.radian * sf['phi'] * e['phi']\n assert_representation_allclose(s_phi2, s_phi)\n\n s_z = s + 1. * u.pc * e['z']\n assert_quantity_allclose(s_z.rho, s.rho)\n assert_quantity_allclose(s_z.phi, s.phi, atol=1e-10*u.rad)\n assert_quantity_allclose(s_z.z, s.z + 1.*u.pc)\n s_z2 = s + 1. * u.pc * sf['z'] * e['z']\n assert_representation_allclose(s_z2, s_z)\n\n\[email protected]('omit_coslat', [False, True], scope='class')\nclass TestSphericalDifferential():\n # these test cases are subclassed for SphericalCosLatDifferential,\n # hence some tests depend on omit_coslat.\n\n def _setup(self, omit_coslat):\n if omit_coslat:\n self.SD_cls = SphericalCosLatDifferential\n else:\n self.SD_cls = SphericalDifferential\n\n s = SphericalRepresentation(lon=[0., 6., 21.] * u.hourangle,\n lat=[0., -30., 85.] * u.deg,\n distance=[1, 2, 3] * u.kpc)\n self.s = s\n self.e = s.unit_vectors()\n self.sf = s.scale_factors(omit_coslat=omit_coslat)\n\n def test_name_coslat(self, omit_coslat):\n self._setup(omit_coslat)\n if omit_coslat:\n assert self.SD_cls is SphericalCosLatDifferential\n assert self.SD_cls.get_name() == 'sphericalcoslat'\n else:\n assert self.SD_cls is SphericalDifferential\n assert self.SD_cls.get_name() == 'spherical'\n assert self.SD_cls.get_name() in DIFFERENTIAL_CLASSES\n\n def test_simple_differentials(self, omit_coslat):\n self._setup(omit_coslat)\n s, e, sf = self.s, self.e, self.sf\n\n o_lon = self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc)\n o_lonc = o_lon.to_cartesian(base=s)\n o_lon2 = self.SD_cls.from_cartesian(o_lonc, base=s)\n assert_differential_allclose(o_lon, o_lon2)\n # simple check by hand for first element.\n # lat[0] is 0, so cos(lat) term doesn't matter.\n assert_quantity_allclose(o_lonc[0].xyz,\n [0., np.pi/180./3600., 0.]*u.kpc)\n # check all using unit vectors and scale factors.\n s_lon = s + 1.*u.arcsec * sf['lon'] * e['lon']\n assert_representation_allclose(o_lonc, s_lon - s, atol=1*u.npc)\n s_lon2 = s + o_lon\n assert_representation_allclose(s_lon2, s_lon, atol=1*u.npc)\n\n o_lat = self.SD_cls(0.*u.arcsec, 1.*u.arcsec, 0.*u.kpc)\n o_latc = o_lat.to_cartesian(base=s)\n assert_quantity_allclose(o_latc[0].xyz,\n [0., 0., np.pi/180./3600.]*u.kpc,\n atol=1.*u.npc)\n s_lat = s + 1.*u.arcsec * sf['lat'] * e['lat']\n assert_representation_allclose(o_latc, s_lat - s, atol=1*u.npc)\n s_lat2 = s + o_lat\n assert_representation_allclose(s_lat2, s_lat, atol=1*u.npc)\n\n o_distance = self.SD_cls(0.*u.arcsec, 0.*u.arcsec, 1.*u.mpc)\n o_distancec = o_distance.to_cartesian(base=s)\n assert_quantity_allclose(o_distancec[0].xyz,\n [1e-6, 0., 0.]*u.kpc, atol=1.*u.npc)\n s_distance = s + 1.*u.mpc * sf['distance'] * e['distance']\n assert_representation_allclose(o_distancec, s_distance - s,\n atol=1*u.npc)\n s_distance2 = s + o_distance\n assert_representation_allclose(s_distance2, s_distance)\n\n def test_differential_arithmetic(self, omit_coslat):\n self._setup(omit_coslat)\n s = self.s\n\n o_lon = self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc)\n o_lon_by_2 = o_lon / 2.\n assert_representation_allclose(o_lon_by_2.to_cartesian(s) * 2.,\n o_lon.to_cartesian(s), atol=1e-10*u.kpc)\n assert_representation_allclose(s + o_lon, s + 2 * o_lon_by_2,\n atol=1e-10*u.kpc)\n o_lon_rec = o_lon_by_2 + o_lon_by_2\n assert_representation_allclose(s + o_lon, s + o_lon_rec,\n atol=1e-10*u.kpc)\n o_lon_0 = o_lon - o_lon\n for c in o_lon_0.components:\n assert np.all(getattr(o_lon_0, c) == 0.)\n o_lon2 = self.SD_cls(1*u.mas/u.yr, 0*u.mas/u.yr, 0*u.km/u.s)\n assert_quantity_allclose(o_lon2.norm(s)[0], 4.74*u.km/u.s,\n atol=0.01*u.km/u.s)\n assert_representation_allclose(o_lon2.to_cartesian(s) * 1000.*u.yr,\n o_lon.to_cartesian(s), atol=1e-10*u.kpc)\n s_off = s + o_lon\n s_off2 = s + o_lon2 * 1000.*u.yr\n assert_representation_allclose(s_off, s_off2, atol=1e-10*u.kpc)\n\n factor = 1e5 * u.radian/u.arcsec\n if not omit_coslat:\n factor = factor / np.cos(s.lat)\n s_off_big = s + o_lon * factor\n\n assert_representation_allclose(\n s_off_big, SphericalRepresentation(s.lon + 90.*u.deg, 0.*u.deg,\n 1e5*s.distance),\n atol=5.*u.kpc)\n\n o_lon3c = CartesianRepresentation(0., 4.74047, 0., unit=u.km/u.s)\n o_lon3 = self.SD_cls.from_cartesian(o_lon3c, base=s)\n expected0 = self.SD_cls(1.*u.mas/u.yr, 0.*u.mas/u.yr, 0.*u.km/u.s)\n assert_differential_allclose(o_lon3[0], expected0)\n s_off_big2 = s + o_lon3 * 1e5 * u.yr * u.radian/u.mas\n assert_representation_allclose(\n s_off_big2, SphericalRepresentation(90.*u.deg, 0.*u.deg,\n 1e5*u.kpc), atol=5.*u.kpc)\n\n with pytest.raises(TypeError):\n o_lon - s\n with pytest.raises(TypeError):\n s.to_cartesian() + o_lon\n\n def test_differential_init_errors(self, omit_coslat):\n self._setup(omit_coslat)\n s = self.s\n with pytest.raises(u.UnitsError):\n self.SD_cls(1.*u.arcsec, 0., 0.)\n with pytest.raises(TypeError):\n self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc,\n False, False)\n with pytest.raises(TypeError):\n self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc,\n copy=False, d_lat=0.*u.arcsec)\n with pytest.raises(TypeError):\n self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc,\n copy=False, flying='circus')\n with pytest.raises(ValueError):\n self.SD_cls(np.ones(2)*u.arcsec,\n np.zeros(3)*u.arcsec, np.zeros(2)*u.kpc)\n with pytest.raises(u.UnitsError):\n self.SD_cls(1.*u.arcsec, 1.*u.s, 0.*u.kpc)\n with pytest.raises(u.UnitsError):\n self.SD_cls(1.*u.kpc, 1.*u.arcsec, 0.*u.kpc)\n o = self.SD_cls(1.*u.arcsec, 1.*u.arcsec, 0.*u.km/u.s)\n with pytest.raises(u.UnitsError):\n o.to_cartesian(s)\n with pytest.raises(AttributeError):\n o.d_lat = 0.*u.arcsec\n with pytest.raises(AttributeError):\n del o.d_lat\n\n o = self.SD_cls(1.*u.arcsec, 1.*u.arcsec, 0.*u.km)\n with pytest.raises(TypeError):\n o.to_cartesian()\n c = CartesianRepresentation(10., 0., 0., unit=u.km)\n with pytest.raises(TypeError):\n self.SD_cls.to_cartesian(c)\n with pytest.raises(TypeError):\n self.SD_cls.from_cartesian(c)\n with pytest.raises(TypeError):\n self.SD_cls.from_cartesian(c, SphericalRepresentation)\n with pytest.raises(TypeError):\n self.SD_cls.from_cartesian(c, c)\n\n\[email protected]('omit_coslat', [False, True], scope='class')\nclass TestUnitSphericalDifferential():\n def _setup(self, omit_coslat):\n if omit_coslat:\n self.USD_cls = UnitSphericalCosLatDifferential\n else:\n self.USD_cls = UnitSphericalDifferential\n\n s = UnitSphericalRepresentation(lon=[0., 6., 21.] * u.hourangle,\n lat=[0., -30., 85.] * u.deg)\n self.s = s\n self.e = s.unit_vectors()\n self.sf = s.scale_factors(omit_coslat=omit_coslat)\n\n def test_name_coslat(self, omit_coslat):\n self._setup(omit_coslat)\n if omit_coslat:\n assert self.USD_cls is UnitSphericalCosLatDifferential\n assert self.USD_cls.get_name() == 'unitsphericalcoslat'\n else:\n assert self.USD_cls is UnitSphericalDifferential\n assert self.USD_cls.get_name() == 'unitspherical'\n assert self.USD_cls.get_name() in DIFFERENTIAL_CLASSES\n\n def test_simple_differentials(self, omit_coslat):\n self._setup(omit_coslat)\n s, e, sf = self.s, self.e, self.sf\n\n o_lon = self.USD_cls(1.*u.arcsec, 0.*u.arcsec)\n o_lonc = o_lon.to_cartesian(base=s)\n o_lon2 = self.USD_cls.from_cartesian(o_lonc, base=s)\n assert_differential_allclose(o_lon, o_lon2)\n # simple check by hand for first element\n # (lat[0]=0, so works for both normal and CosLat differential)\n assert_quantity_allclose(o_lonc[0].xyz,\n [0., np.pi/180./3600., 0.]*u.one)\n # check all using unit vectors and scale factors.\n s_lon = s + 1.*u.arcsec * sf['lon'] * e['lon']\n assert type(s_lon) is SphericalRepresentation\n assert_representation_allclose(o_lonc, s_lon - s, atol=1e-10*u.one)\n s_lon2 = s + o_lon\n assert_representation_allclose(s_lon2, s_lon, atol=1e-10*u.one)\n\n o_lat = self.USD_cls(0.*u.arcsec, 1.*u.arcsec)\n o_latc = o_lat.to_cartesian(base=s)\n assert_quantity_allclose(o_latc[0].xyz,\n [0., 0., np.pi/180./3600.]*u.one,\n atol=1e-10*u.one)\n s_lat = s + 1.*u.arcsec * sf['lat'] * e['lat']\n assert type(s_lat) is SphericalRepresentation\n assert_representation_allclose(o_latc, s_lat - s, atol=1e-10*u.one)\n s_lat2 = s + o_lat\n assert_representation_allclose(s_lat2, s_lat, atol=1e-10*u.one)\n\n def test_differential_arithmetic(self, omit_coslat):\n self._setup(omit_coslat)\n s = self.s\n\n o_lon = self.USD_cls(1.*u.arcsec, 0.*u.arcsec)\n o_lon_by_2 = o_lon / 2.\n assert type(o_lon_by_2) is self.USD_cls\n assert_representation_allclose(o_lon_by_2.to_cartesian(s) * 2.,\n o_lon.to_cartesian(s), atol=1e-10*u.one)\n s_lon = s + o_lon\n s_lon2 = s + 2 * o_lon_by_2\n assert type(s_lon) is SphericalRepresentation\n assert_representation_allclose(s_lon, s_lon2, atol=1e-10*u.one)\n o_lon_rec = o_lon_by_2 + o_lon_by_2\n assert type(o_lon_rec) is self.USD_cls\n assert representation_equal(o_lon, o_lon_rec)\n assert_representation_allclose(s + o_lon, s + o_lon_rec,\n atol=1e-10*u.one)\n o_lon_0 = o_lon - o_lon\n assert type(o_lon_0) is self.USD_cls\n for c in o_lon_0.components:\n assert np.all(getattr(o_lon_0, c) == 0.)\n\n o_lon2 = self.USD_cls(1.*u.mas/u.yr, 0.*u.mas/u.yr)\n kks = u.km/u.kpc/u.s\n assert_quantity_allclose(o_lon2.norm(s)[0], 4.74047*kks, atol=1e-4*kks)\n assert_representation_allclose(o_lon2.to_cartesian(s) * 1000.*u.yr,\n o_lon.to_cartesian(s), atol=1e-10*u.one)\n s_off = s + o_lon\n s_off2 = s + o_lon2 * 1000.*u.yr\n assert_representation_allclose(s_off, s_off2, atol=1e-10*u.one)\n\n factor = 1e5 * u.radian/u.arcsec\n if not omit_coslat:\n factor = factor / np.cos(s.lat)\n s_off_big = s + o_lon * factor\n\n assert_representation_allclose(\n s_off_big, SphericalRepresentation(s.lon + 90.*u.deg,\n 0.*u.deg, 1e5),\n atol=5.*u.one)\n\n o_lon3c = CartesianRepresentation(0., 4.74047, 0., unit=kks)\n # This looses information!!\n o_lon3 = self.USD_cls.from_cartesian(o_lon3c, base=s)\n expected0 = self.USD_cls(1.*u.mas/u.yr, 0.*u.mas/u.yr)\n assert_differential_allclose(o_lon3[0], expected0)\n # Part of motion kept.\n part_kept = s.cross(CartesianRepresentation(0, 1, 0, unit=u.one)).norm()\n assert_quantity_allclose(o_lon3.norm(s), 4.74047*part_kept*kks,\n atol=1e-10*kks)\n # (lat[0]=0, so works for both normal and CosLat differential)\n s_off_big2 = s + o_lon3 * 1e5 * u.yr * u.radian/u.mas\n expected0 = SphericalRepresentation(90.*u.deg, 0.*u.deg,\n 1e5*u.one)\n assert_representation_allclose(s_off_big2[0], expected0, atol=5.*u.one)\n\n def test_differential_init_errors(self, omit_coslat):\n self._setup(omit_coslat)\n with pytest.raises(u.UnitsError):\n self.USD_cls(0.*u.deg, 10.*u.deg/u.yr)\n\n\nclass TestRadialDifferential():\n def setup(self):\n s = SphericalRepresentation(lon=[0., 6., 21.] * u.hourangle,\n lat=[0., -30., 85.] * u.deg,\n distance=[1, 2, 3] * u.kpc)\n self.s = s\n self.r = s.represent_as(RadialRepresentation)\n self.e = s.unit_vectors()\n self.sf = s.scale_factors()\n\n def test_name(self):\n assert RadialDifferential.get_name() == 'radial'\n assert RadialDifferential.get_name() in DIFFERENTIAL_CLASSES\n\n def test_simple_differentials(self):\n r, s, e, sf = self.r, self.s, self.e, self.sf\n\n o_distance = RadialDifferential(1.*u.mpc)\n # Can be applied to RadialRepresentation, though not most useful.\n r_distance = r + o_distance\n assert_quantity_allclose(r_distance.distance,\n r.distance + o_distance.d_distance)\n r_distance2 = o_distance + r\n assert_quantity_allclose(r_distance2.distance,\n r.distance + o_distance.d_distance)\n # More sense to apply it relative to spherical representation.\n o_distancec = o_distance.to_cartesian(base=s)\n assert_quantity_allclose(o_distancec[0].xyz,\n [1e-6, 0., 0.]*u.kpc, atol=1.*u.npc)\n o_recover = RadialDifferential.from_cartesian(o_distancec, base=s)\n assert_quantity_allclose(o_recover.d_distance, o_distance.d_distance)\n\n s_distance = s + 1.*u.mpc * sf['distance'] * e['distance']\n assert_representation_allclose(o_distancec, s_distance - s,\n atol=1*u.npc)\n s_distance2 = s + o_distance\n assert_representation_allclose(s_distance2, s_distance)\n\n\nclass TestPhysicsSphericalDifferential():\n \"\"\"Test copied from SphericalDifferential, so less extensive.\"\"\"\n\n def setup(self):\n s = PhysicsSphericalRepresentation(phi=[0., 90., 315.] * u.deg,\n theta=[90., 120., 5.] * u.deg,\n r=[1, 2, 3] * u.kpc)\n self.s = s\n self.e = s.unit_vectors()\n self.sf = s.scale_factors()\n\n def test_name(self):\n assert PhysicsSphericalDifferential.get_name() == 'physicsspherical'\n assert PhysicsSphericalDifferential.get_name() in DIFFERENTIAL_CLASSES\n\n def test_simple_differentials(self):\n s, e, sf = self.s, self.e, self.sf\n\n o_phi = PhysicsSphericalDifferential(1*u.arcsec, 0*u.arcsec, 0*u.kpc)\n o_phic = o_phi.to_cartesian(base=s)\n o_phi2 = PhysicsSphericalDifferential.from_cartesian(o_phic, base=s)\n assert_quantity_allclose(o_phi.d_phi, o_phi2.d_phi, atol=1.*u.narcsec)\n assert_quantity_allclose(o_phi.d_theta, o_phi2.d_theta,\n atol=1.*u.narcsec)\n assert_quantity_allclose(o_phi.d_r, o_phi2.d_r, atol=1.*u.npc)\n # simple check by hand for first element.\n assert_quantity_allclose(o_phic[0].xyz,\n [0., np.pi/180./3600., 0.]*u.kpc,\n atol=1.*u.npc)\n # check all using unit vectors and scale factors.\n s_phi = s + 1.*u.arcsec * sf['phi'] * e['phi']\n assert_representation_allclose(o_phic, s_phi - s, atol=1e-10*u.kpc)\n\n o_theta = PhysicsSphericalDifferential(0*u.arcsec, 1*u.arcsec, 0*u.kpc)\n o_thetac = o_theta.to_cartesian(base=s)\n assert_quantity_allclose(o_thetac[0].xyz,\n [0., 0., -np.pi/180./3600.]*u.kpc,\n atol=1.*u.npc)\n s_theta = s + 1.*u.arcsec * sf['theta'] * e['theta']\n assert_representation_allclose(o_thetac, s_theta - s, atol=1e-10*u.kpc)\n s_theta2 = s + o_theta\n assert_representation_allclose(s_theta2, s_theta, atol=1e-10*u.kpc)\n\n o_r = PhysicsSphericalDifferential(0*u.arcsec, 0*u.arcsec, 1*u.mpc)\n o_rc = o_r.to_cartesian(base=s)\n assert_quantity_allclose(o_rc[0].xyz, [1e-6, 0., 0.]*u.kpc,\n atol=1.*u.npc)\n s_r = s + 1.*u.mpc * sf['r'] * e['r']\n assert_representation_allclose(o_rc, s_r - s, atol=1e-10*u.kpc)\n s_r2 = s + o_r\n assert_representation_allclose(s_r2, s_r)\n\n def test_differential_init_errors(self):\n with pytest.raises(u.UnitsError):\n PhysicsSphericalDifferential(1.*u.arcsec, 0., 0.)\n\n\nclass TestCylindricalDifferential():\n \"\"\"Test copied from SphericalDifferential, so less extensive.\"\"\"\n\n def setup(self):\n s = CylindricalRepresentation(rho=[1, 2, 3] * u.kpc,\n phi=[0., 90., 315.] * u.deg,\n z=[3, 2, 1] * u.kpc)\n self.s = s\n self.e = s.unit_vectors()\n self.sf = s.scale_factors()\n\n def test_name(self):\n assert CylindricalDifferential.get_name() == 'cylindrical'\n assert CylindricalDifferential.get_name() in DIFFERENTIAL_CLASSES\n\n def test_simple_differentials(self):\n s, e, sf = self.s, self.e, self.sf\n\n o_rho = CylindricalDifferential(1.*u.mpc, 0.*u.arcsec, 0.*u.kpc)\n o_rhoc = o_rho.to_cartesian(base=s)\n assert_quantity_allclose(o_rhoc[0].xyz, [1.e-6, 0., 0.]*u.kpc)\n s_rho = s + 1.*u.mpc * sf['rho'] * e['rho']\n assert_representation_allclose(o_rhoc, s_rho - s, atol=1e-10*u.kpc)\n s_rho2 = s + o_rho\n assert_representation_allclose(s_rho2, s_rho)\n\n o_phi = CylindricalDifferential(0.*u.kpc, 1.*u.arcsec, 0.*u.kpc)\n o_phic = o_phi.to_cartesian(base=s)\n o_phi2 = CylindricalDifferential.from_cartesian(o_phic, base=s)\n assert_quantity_allclose(o_phi.d_rho, o_phi2.d_rho, atol=1.*u.npc)\n assert_quantity_allclose(o_phi.d_phi, o_phi2.d_phi, atol=1.*u.narcsec)\n assert_quantity_allclose(o_phi.d_z, o_phi2.d_z, atol=1.*u.npc)\n # simple check by hand for first element.\n assert_quantity_allclose(o_phic[0].xyz,\n [0., np.pi/180./3600., 0.]*u.kpc)\n # check all using unit vectors and scale factors.\n s_phi = s + 1.*u.arcsec * sf['phi'] * e['phi']\n assert_representation_allclose(o_phic, s_phi - s, atol=1e-10*u.kpc)\n\n o_z = CylindricalDifferential(0.*u.kpc, 0.*u.arcsec, 1.*u.mpc)\n o_zc = o_z.to_cartesian(base=s)\n assert_quantity_allclose(o_zc[0].xyz, [0., 0., 1.e-6]*u.kpc)\n s_z = s + 1.*u.mpc * sf['z'] * e['z']\n assert_representation_allclose(o_zc, s_z - s, atol=1e-10*u.kpc)\n s_z2 = s + o_z\n assert_representation_allclose(s_z2, s_z)\n\n def test_differential_init_errors(self):\n with pytest.raises(u.UnitsError):\n CylindricalDifferential(1.*u.pc, 1.*u.arcsec, 3.*u.km/u.s)\n\n\nclass TestCartesianDifferential():\n \"\"\"Test copied from SphericalDifferential, so less extensive.\"\"\"\n\n def setup(self):\n s = CartesianRepresentation(x=[1, 2, 3] * u.kpc,\n y=[2, 3, 1] * u.kpc,\n z=[3, 1, 2] * u.kpc)\n self.s = s\n self.e = s.unit_vectors()\n self.sf = s.scale_factors()\n\n def test_name(self):\n assert CartesianDifferential.get_name() == 'cartesian'\n assert CartesianDifferential.get_name() in DIFFERENTIAL_CLASSES\n\n def test_simple_differentials(self):\n s, e, sf = self.s, self.e, self.sf\n\n for d, differential in ( # test different inits while we're at it.\n ('x', CartesianDifferential(1.*u.pc, 0.*u.pc, 0.*u.pc)),\n ('y', CartesianDifferential([0., 1., 0.], unit=u.pc)),\n ('z', CartesianDifferential(np.array([[0., 0., 1.]]) * u.pc,\n xyz_axis=1))):\n o_c = differential.to_cartesian(base=s)\n o_c2 = differential.to_cartesian()\n assert np.all(representation_equal(o_c, o_c2))\n assert all(np.all(getattr(differential, 'd_'+c) == getattr(o_c, c))\n for c in ('x', 'y', 'z'))\n differential2 = CartesianDifferential.from_cartesian(o_c)\n assert np.all(representation_equal(differential2, differential))\n differential3 = CartesianDifferential.from_cartesian(o_c, base=o_c)\n assert np.all(representation_equal(differential3, differential))\n\n s_off = s + 1.*u.pc * sf[d] * e[d]\n assert_representation_allclose(o_c, s_off - s, atol=1e-10*u.kpc)\n s_off2 = s + differential\n assert_representation_allclose(s_off2, s_off)\n\n def test_init_failures(self):\n with pytest.raises(ValueError):\n CartesianDifferential(1.*u.kpc/u.s, 2.*u.kpc)\n with pytest.raises(u.UnitsError):\n CartesianDifferential(1.*u.kpc/u.s, 2.*u.kpc, 3.*u.kpc)\n with pytest.raises(ValueError):\n CartesianDifferential(1.*u.kpc, 2.*u.kpc, 3.*u.kpc, xyz_axis=1)\n\n\nclass TestDifferentialConversion():\n def setup(self):\n self.s = SphericalRepresentation(lon=[0., 6., 21.] * u.hourangle,\n lat=[0., -30., 85.] * u.deg,\n distance=[1, 2, 3] * u.kpc)\n\n @pytest.mark.parametrize('sd_cls', [SphericalDifferential,\n SphericalCosLatDifferential])\n def test_represent_as_own_class(self, sd_cls):\n so = sd_cls(1.*u.deg, 2.*u.deg, 0.1*u.kpc)\n so2 = so.represent_as(sd_cls)\n assert so2 is so\n\n def test_represent_other_coslat(self):\n s = self.s\n coslat = np.cos(s.lat)\n so = SphericalDifferential(1.*u.deg, 2.*u.deg, 0.1*u.kpc)\n so_coslat = so.represent_as(SphericalCosLatDifferential, base=s)\n assert_quantity_allclose(so.d_lon * coslat,\n so_coslat.d_lon_coslat)\n so2 = so_coslat.represent_as(SphericalDifferential, base=s)\n assert np.all(representation_equal(so2, so))\n so3 = SphericalDifferential.from_representation(so_coslat, base=s)\n assert np.all(representation_equal(so3, so))\n so_coslat2 = SphericalCosLatDifferential.from_representation(so, base=s)\n assert np.all(representation_equal(so_coslat2, so_coslat))\n # Also test UnitSpherical\n us = s.represent_as(UnitSphericalRepresentation)\n uo = so.represent_as(UnitSphericalDifferential)\n uo_coslat = so.represent_as(UnitSphericalCosLatDifferential, base=s)\n assert_quantity_allclose(uo.d_lon * coslat,\n uo_coslat.d_lon_coslat)\n uo2 = uo_coslat.represent_as(UnitSphericalDifferential, base=us)\n assert np.all(representation_equal(uo2, uo))\n uo3 = UnitSphericalDifferential.from_representation(uo_coslat, base=us)\n assert np.all(representation_equal(uo3, uo))\n uo_coslat2 = UnitSphericalCosLatDifferential.from_representation(\n uo, base=us)\n assert np.all(representation_equal(uo_coslat2, uo_coslat))\n uo_coslat3 = uo.represent_as(UnitSphericalCosLatDifferential, base=us)\n assert np.all(representation_equal(uo_coslat3, uo_coslat))\n\n @pytest.mark.parametrize('sd_cls', [SphericalDifferential,\n SphericalCosLatDifferential])\n @pytest.mark.parametrize('r_cls', (SphericalRepresentation,\n UnitSphericalRepresentation,\n PhysicsSphericalRepresentation,\n CylindricalRepresentation))\n def test_represent_regular_class(self, sd_cls, r_cls):\n so = sd_cls(1.*u.deg, 2.*u.deg, 0.1*u.kpc)\n r = so.represent_as(r_cls, base=self.s)\n c = so.to_cartesian(self.s)\n r_check = c.represent_as(r_cls)\n assert np.all(representation_equal(r, r_check))\n so2 = sd_cls.from_representation(r, base=self.s)\n so3 = sd_cls.from_cartesian(r.to_cartesian(), self.s)\n assert np.all(representation_equal(so2, so3))\n\n @pytest.mark.parametrize('sd_cls', [SphericalDifferential,\n SphericalCosLatDifferential])\n def test_convert_physics(self, sd_cls):\n # Conversion needs no base for SphericalDifferential, but does\n # need one (to get the latitude) for SphericalCosLatDifferential.\n if sd_cls is SphericalDifferential:\n usd_cls = UnitSphericalDifferential\n base_s = base_u = base_p = None\n else:\n usd_cls = UnitSphericalCosLatDifferential\n base_s = self.s[1]\n base_u = base_s.represent_as(UnitSphericalRepresentation)\n base_p = base_s.represent_as(PhysicsSphericalRepresentation)\n\n so = sd_cls(1.*u.deg, 2.*u.deg, 0.1*u.kpc)\n po = so.represent_as(PhysicsSphericalDifferential, base=base_s)\n so2 = sd_cls.from_representation(po, base=base_s)\n assert_differential_allclose(so, so2)\n po2 = PhysicsSphericalDifferential.from_representation(so, base=base_p)\n assert_differential_allclose(po, po2)\n so3 = po.represent_as(sd_cls, base=base_p)\n assert_differential_allclose(so, so3)\n\n s = self.s\n p = s.represent_as(PhysicsSphericalRepresentation)\n cso = so.to_cartesian(s[1])\n cpo = po.to_cartesian(p[1])\n assert_representation_allclose(cso, cpo)\n assert_representation_allclose(s[1] + so, p[1] + po)\n po2 = so.represent_as(PhysicsSphericalDifferential,\n base=None if base_s is None else s)\n assert_representation_allclose(s + so, p + po2)\n\n suo = usd_cls.from_representation(so)\n puo = usd_cls.from_representation(po, base=base_u)\n assert_differential_allclose(suo, puo)\n suo2 = so.represent_as(usd_cls)\n puo2 = po.represent_as(usd_cls, base=base_p)\n assert_differential_allclose(suo2, puo2)\n assert_differential_allclose(puo, puo2)\n\n sro = RadialDifferential.from_representation(so)\n pro = RadialDifferential.from_representation(po)\n assert representation_equal(sro, pro)\n sro2 = so.represent_as(RadialDifferential)\n pro2 = po.represent_as(RadialDifferential)\n assert representation_equal(sro2, pro2)\n assert representation_equal(pro, pro2)\n\n @pytest.mark.parametrize(\n ('sd_cls', 'usd_cls'),\n [(SphericalDifferential, UnitSphericalDifferential),\n (SphericalCosLatDifferential, UnitSphericalCosLatDifferential)])\n def test_convert_unit_spherical_radial(self, sd_cls, usd_cls):\n s = self.s\n us = s.represent_as(UnitSphericalRepresentation)\n rs = s.represent_as(RadialRepresentation)\n assert_representation_allclose(rs * us, s)\n\n uo = usd_cls(2.*u.deg, 1.*u.deg)\n so = uo.represent_as(sd_cls, base=s)\n assert_quantity_allclose(so.d_distance, 0.*u.kpc, atol=1.*u.npc)\n uo2 = so.represent_as(usd_cls)\n assert_representation_allclose(uo.to_cartesian(us),\n uo2.to_cartesian(us))\n so1 = sd_cls(2.*u.deg, 1.*u.deg, 5.*u.pc)\n uo_r = so1.represent_as(usd_cls)\n ro_r = so1.represent_as(RadialDifferential)\n assert np.all(representation_equal(uo_r, uo))\n assert np.all(representation_equal(ro_r, RadialDifferential(5.*u.pc)))\n\n @pytest.mark.parametrize('sd_cls', [SphericalDifferential,\n SphericalCosLatDifferential])\n def test_convert_cylindrial(self, sd_cls):\n s = self.s\n so = sd_cls(1.*u.deg, 2.*u.deg, 0.1*u.kpc)\n cyo = so.represent_as(CylindricalDifferential, base=s)\n cy = s.represent_as(CylindricalRepresentation)\n so1 = cyo.represent_as(sd_cls, base=cy)\n assert_representation_allclose(so.to_cartesian(s),\n so1.to_cartesian(s))\n cyo2 = CylindricalDifferential.from_representation(so, base=cy)\n assert_representation_allclose(cyo2.to_cartesian(base=cy),\n cyo.to_cartesian(base=cy))\n so2 = sd_cls.from_representation(cyo2, base=s)\n assert_representation_allclose(so.to_cartesian(s),\n so2.to_cartesian(s))\n\n @pytest.mark.parametrize('sd_cls', [SphericalDifferential,\n SphericalCosLatDifferential])\n def test_combinations(self, sd_cls):\n if sd_cls is SphericalDifferential:\n uo = UnitSphericalDifferential(2.*u.deg, 1.*u.deg)\n uo_d_lon = uo.d_lon\n else:\n uo = UnitSphericalCosLatDifferential(2.*u.deg, 1.*u.deg)\n uo_d_lon = uo.d_lon_coslat\n ro = RadialDifferential(1.*u.mpc)\n so1 = uo + ro\n so1c = sd_cls(uo_d_lon, uo.d_lat, ro.d_distance)\n assert np.all(representation_equal(so1, so1c))\n\n so2 = uo - ro\n so2c = sd_cls(uo_d_lon, uo.d_lat, -ro.d_distance)\n assert np.all(representation_equal(so2, so2c))\n so3 = so2 + ro\n so3c = sd_cls(uo_d_lon, uo.d_lat, 0.*u.kpc)\n assert np.all(representation_equal(so3, so3c))\n so4 = so1 + ro\n so4c = sd_cls(uo_d_lon, uo.d_lat, 2*ro.d_distance)\n assert np.all(representation_equal(so4, so4c))\n so5 = so1 - uo\n so5c = sd_cls(0*u.deg, 0.*u.deg, ro.d_distance)\n assert np.all(representation_equal(so5, so5c))\n assert_representation_allclose(self.s + (uo+ro), self.s+so1)\n\n\[email protected]('rep,dif', [\n [CartesianRepresentation([1, 2, 3]*u.kpc),\n CartesianDifferential([.1, .2, .3]*u.km/u.s)],\n [SphericalRepresentation(90*u.deg, 0.*u.deg, 14.*u.kpc),\n SphericalDifferential(1.*u.deg, 2.*u.deg, 0.1*u.kpc)]\n])\ndef test_arithmetic_with_differentials_fail(rep, dif):\n\n rep = rep.with_differentials(dif)\n\n with pytest.raises(TypeError):\n rep + rep\n\n with pytest.raises(TypeError):\n rep - rep\n\n with pytest.raises(TypeError):\n rep * rep\n\n with pytest.raises(TypeError):\n rep / rep\n\n with pytest.raises(TypeError):\n 10. * rep\n\n with pytest.raises(TypeError):\n rep / 10.\n\n with pytest.raises(TypeError):\n -rep\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport pickle\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_array_equal\n\nfrom astropy.nddata.nduncertainty import (StdDevUncertainty,\n VarianceUncertainty,\n InverseVariance,\n NDUncertainty,\n IncompatibleUncertaintiesException,\n MissingDataAssociationException,\n UnknownUncertainty)\nfrom astropy.nddata.nddata import NDData\nfrom astropy.nddata.compat import NDDataArray\nfrom astropy.nddata.ccddata import CCDData\nfrom astropy import units as u\n\n# Regarding setter tests:\n# No need to test setters since the uncertainty is considered immutable after\n# creation except of the parent_nddata attribute and this accepts just\n# everything.\n# Additionally they should be covered by NDData, NDArithmeticMixin which rely\n# on it\n\n# Regarding propagate, _convert_uncert, _propagate_* tests:\n# They should be covered by NDArithmeticMixin since there is generally no need\n# to test them without this mixin.\n\n# Regarding __getitem__ tests:\n# Should be covered by NDSlicingMixin.\n\n# Regarding StdDevUncertainty tests:\n# This subclass only overrides the methods for propagation so the same\n# they should be covered in NDArithmeticMixin.\n\n# Not really fake but the minimum an uncertainty has to override not to be\n# abstract.\n\n\nclass FakeUncertainty(NDUncertainty):\n\n @property\n def uncertainty_type(self):\n return 'fake'\n\n def _data_unit_to_uncertainty_unit(self, value):\n return None\n\n def _propagate_add(self, data, final_data):\n pass\n\n def _propagate_subtract(self, data, final_data):\n pass\n\n def _propagate_multiply(self, data, final_data):\n pass\n\n def _propagate_divide(self, data, final_data):\n pass\n\n\n# Test the fake (added also StdDevUncertainty which should behave identical)\n\n# the list of classes used for parametrization in tests below\nuncertainty_types_to_be_tested = [\n FakeUncertainty,\n StdDevUncertainty,\n VarianceUncertainty,\n InverseVariance,\n UnknownUncertainty\n]\n\n\[email protected](('UncertClass'), uncertainty_types_to_be_tested)\ndef test_init_fake_with_list(UncertClass):\n fake_uncert = UncertClass([1, 2, 3])\n assert_array_equal(fake_uncert.array, np.array([1, 2, 3]))\n # Copy makes no difference since casting a list to an np.ndarray always\n # makes a copy.\n # But let's give the uncertainty a unit too\n fake_uncert = UncertClass([1, 2, 3], unit=u.adu)\n assert_array_equal(fake_uncert.array, np.array([1, 2, 3]))\n assert fake_uncert.unit is u.adu\n\n\[email protected](('UncertClass'), uncertainty_types_to_be_tested)\ndef test_init_fake_with_ndarray(UncertClass):\n uncert = np.arange(100).reshape(10, 10)\n fake_uncert = UncertClass(uncert)\n # Numpy Arrays are copied by default\n assert_array_equal(fake_uncert.array, uncert)\n assert fake_uncert.array is not uncert\n # Now try it without copy\n fake_uncert = UncertClass(uncert, copy=False)\n assert fake_uncert.array is uncert\n # let's provide a unit\n fake_uncert = UncertClass(uncert, unit=u.adu)\n assert_array_equal(fake_uncert.array, uncert)\n assert fake_uncert.array is not uncert\n assert fake_uncert.unit is u.adu\n\n\[email protected](('UncertClass'), uncertainty_types_to_be_tested)\ndef test_init_fake_with_quantity(UncertClass):\n uncert = np.arange(10).reshape(2, 5) * u.adu\n fake_uncert = UncertClass(uncert)\n # Numpy Arrays are copied by default\n assert_array_equal(fake_uncert.array, uncert.value)\n assert fake_uncert.array is not uncert.value\n assert fake_uncert.unit is u.adu\n # Try without copy (should not work, quantity.value always returns a copy)\n fake_uncert = UncertClass(uncert, copy=False)\n assert fake_uncert.array is not uncert.value\n assert fake_uncert.unit is u.adu\n # Now try with an explicit unit parameter too\n fake_uncert = UncertClass(uncert, unit=u.m)\n assert_array_equal(fake_uncert.array, uncert.value) # No conversion done\n assert fake_uncert.array is not uncert.value\n assert fake_uncert.unit is u.m # It took the explicit one\n\n\[email protected](('UncertClass'), uncertainty_types_to_be_tested)\ndef test_init_fake_with_fake(UncertClass):\n uncert = np.arange(5).reshape(5, 1)\n fake_uncert1 = UncertClass(uncert)\n fake_uncert2 = UncertClass(fake_uncert1)\n assert_array_equal(fake_uncert2.array, uncert)\n assert fake_uncert2.array is not uncert\n # Without making copies\n fake_uncert1 = UncertClass(uncert, copy=False)\n fake_uncert2 = UncertClass(fake_uncert1, copy=False)\n assert_array_equal(fake_uncert2.array, fake_uncert1.array)\n assert fake_uncert2.array is fake_uncert1.array\n # With a unit\n uncert = np.arange(5).reshape(5, 1) * u.adu\n fake_uncert1 = UncertClass(uncert)\n fake_uncert2 = UncertClass(fake_uncert1)\n assert_array_equal(fake_uncert2.array, uncert.value)\n assert fake_uncert2.array is not uncert.value\n assert fake_uncert2.unit is u.adu\n # With a unit and an explicit unit-parameter\n fake_uncert2 = UncertClass(fake_uncert1, unit=u.cm)\n assert_array_equal(fake_uncert2.array, uncert.value)\n assert fake_uncert2.array is not uncert.value\n assert fake_uncert2.unit is u.cm\n\n\[email protected](('UncertClass'), uncertainty_types_to_be_tested)\ndef test_init_fake_with_somethingElse(UncertClass):\n # What about a dict?\n uncert = {'rdnoise': 2.9, 'gain': 0.6}\n fake_uncert = UncertClass(uncert)\n assert fake_uncert.array == uncert\n # We can pass a unit too but since we cannot do uncertainty propagation\n # the interpretation is up to the user\n fake_uncert = UncertClass(uncert, unit=u.s)\n assert fake_uncert.array == uncert\n assert fake_uncert.unit is u.s\n # So, now check what happens if copy is False\n fake_uncert = UncertClass(uncert, copy=False)\n assert fake_uncert.array == uncert\n assert id(fake_uncert) != id(uncert)\n # dicts cannot be referenced without copy\n # TODO : Find something that can be referenced without copy :-)\n\n\ndef test_init_fake_with_StdDevUncertainty():\n # Different instances of uncertainties are not directly convertible so this\n # should fail\n uncert = np.arange(5).reshape(5, 1)\n std_uncert = StdDevUncertainty(uncert)\n with pytest.raises(IncompatibleUncertaintiesException):\n FakeUncertainty(std_uncert)\n # Ok try it the other way around\n fake_uncert = FakeUncertainty(uncert)\n with pytest.raises(IncompatibleUncertaintiesException):\n StdDevUncertainty(fake_uncert)\n\n\ndef test_uncertainty_type():\n fake_uncert = FakeUncertainty([10, 2])\n assert fake_uncert.uncertainty_type == 'fake'\n std_uncert = StdDevUncertainty([10, 2])\n assert std_uncert.uncertainty_type == 'std'\n var_uncert = VarianceUncertainty([10, 2])\n assert var_uncert.uncertainty_type == 'var'\n ivar_uncert = InverseVariance([10, 2])\n assert ivar_uncert.uncertainty_type == 'ivar'\n\n\ndef test_uncertainty_correlated():\n fake_uncert = FakeUncertainty([10, 2])\n assert not fake_uncert.supports_correlated\n std_uncert = StdDevUncertainty([10, 2])\n assert std_uncert.supports_correlated\n\n\ndef test_for_leak_with_uncertainty():\n # Regression test for memory leak because of cyclic references between\n # NDData and uncertainty\n from collections import defaultdict\n from gc import get_objects\n\n def test_leak(func, specific_objects=None):\n \"\"\"Function based on gc.get_objects to determine if any object or\n a specific object leaks.\n\n It requires a function to be given and if any objects survive the\n function scope it's considered a leak (so don't return anything).\n \"\"\"\n before = defaultdict(int)\n for i in get_objects():\n before[type(i)] += 1\n\n func()\n\n after = defaultdict(int)\n for i in get_objects():\n after[type(i)] += 1\n\n if specific_objects is None:\n assert all(after[k] - before[k] == 0 for k in after)\n else:\n assert after[specific_objects] - before[specific_objects] == 0\n\n def non_leaker_nddata():\n # Without uncertainty there is no reason to assume that there is a\n # memory leak but test it nevertheless.\n NDData(np.ones(100))\n\n def leaker_nddata():\n # With uncertainty there was a memory leak!\n NDData(np.ones(100), uncertainty=StdDevUncertainty(np.ones(100)))\n\n test_leak(non_leaker_nddata, NDData)\n test_leak(leaker_nddata, NDData)\n\n # Same for NDDataArray:\n\n from astropy.nddata.compat import NDDataArray\n\n def non_leaker_nddataarray():\n NDDataArray(np.ones(100))\n\n def leaker_nddataarray():\n NDDataArray(np.ones(100), uncertainty=StdDevUncertainty(np.ones(100)))\n\n test_leak(non_leaker_nddataarray, NDDataArray)\n test_leak(leaker_nddataarray, NDDataArray)\n\n\ndef test_for_stolen_uncertainty():\n # Sharing uncertainties should not overwrite the parent_nddata attribute\n ndd1 = NDData(1, uncertainty=1)\n ndd2 = NDData(2, uncertainty=ndd1.uncertainty)\n # uncertainty.parent_nddata.data should be the original data!\n assert ndd1.uncertainty.parent_nddata.data == ndd1.data\n assert ndd2.uncertainty.parent_nddata.data == ndd2.data\n\n\ndef test_stddevuncertainty_pickle():\n uncertainty = StdDevUncertainty(np.ones(3), unit=u.m)\n uncertainty_restored = pickle.loads(pickle.dumps(uncertainty))\n np.testing.assert_array_equal(uncertainty.array, uncertainty_restored.array)\n assert uncertainty.unit == uncertainty_restored.unit\n with pytest.raises(MissingDataAssociationException):\n uncertainty_restored.parent_nddata\n\n\[email protected](('UncertClass'), uncertainty_types_to_be_tested)\ndef test_quantity(UncertClass):\n fake_uncert = UncertClass([1, 2, 3], unit=u.adu)\n assert isinstance(fake_uncert.quantity, u.Quantity)\n assert fake_uncert.quantity.unit.is_equivalent(u.adu)\n\n fake_uncert_nounit = UncertClass([1, 2, 3])\n assert isinstance(fake_uncert_nounit.quantity, u.Quantity)\n assert fake_uncert_nounit.quantity.unit.is_equivalent(u.dimensionless_unscaled)\n\n\[email protected](('UncertClass'),\n [VarianceUncertainty,\n StdDevUncertainty,\n InverseVariance])\ndef test_setting_uncertainty_unit_results_in_unit_object(UncertClass):\n v = UncertClass([1, 1])\n v.unit = 'electron'\n assert isinstance(v.unit, u.UnitBase)\n\n\[email protected]('NDClass', [NDData, NDDataArray, CCDData])\[email protected](('UncertClass'),\n [VarianceUncertainty,\n StdDevUncertainty,\n InverseVariance])\ndef test_changing_unit_to_value_inconsistent_with_parent_fails(NDClass,\n UncertClass):\n ndd1 = NDClass(1, unit='adu')\n v = UncertClass(1)\n # Sets the uncertainty unit to whatever makes sense with this data.\n ndd1.uncertainty = v\n\n with pytest.raises(u.UnitConversionError):\n # Nothing special about 15 except no one would ever use that unit\n v.unit = ndd1.unit ** 15\n\n\[email protected]('NDClass', [NDData, NDDataArray, CCDData])\[email protected](('UncertClass, expected_unit'),\n [(VarianceUncertainty, u.adu ** 2),\n (StdDevUncertainty, u.adu),\n (InverseVariance, 1 / u.adu ** 2)])\ndef test_assigning_uncertainty_to_parent_gives_correct_unit(NDClass,\n UncertClass,\n expected_unit):\n # Does assigning a unitless uncertainty to an NDData result in the\n # expected unit?\n ndd = NDClass([1, 1], unit=u.adu)\n v = UncertClass([1, 1])\n ndd.uncertainty = v\n assert v.unit == expected_unit\n\n\[email protected]('NDClass', [NDData, NDDataArray, CCDData])\[email protected](('UncertClass, expected_unit'),\n [(VarianceUncertainty, u.adu ** 2),\n (StdDevUncertainty, u.adu),\n (InverseVariance, 1 / u.adu ** 2)])\ndef test_assigning_uncertainty_with_unit_to_parent_with_unit(NDClass,\n UncertClass,\n expected_unit):\n # Does assigning an uncertainty with an appropriate unit to an NDData\n # with a unit work?\n ndd = NDClass([1, 1], unit=u.adu)\n v = UncertClass([1, 1], unit=expected_unit)\n ndd.uncertainty = v\n assert v.unit == expected_unit\n\n\[email protected]('NDClass', [NDData, NDDataArray, CCDData])\[email protected](('UncertClass'),\n [(VarianceUncertainty),\n (StdDevUncertainty),\n (InverseVariance)])\ndef test_assigning_uncertainty_with_bad_unit_to_parent_fails(NDClass,\n UncertClass):\n # Does assigning an uncertainty with a non-matching unit to an NDData\n # with a unit work?\n ndd = NDClass([1, 1], unit=u.adu)\n # Set the unit to something inconsistent with ndd's unit\n v = UncertClass([1, 1], unit=u.second)\n with pytest.raises(u.UnitConversionError):\n ndd.uncertainty = v\n"
] | [
[
"numpy.array",
"numpy.abs"
],
[
"numpy.arange",
"numpy.all",
"numpy.mean",
"numpy.any",
"numpy.add",
"numpy.array",
"numpy.sum"
],
[
"matplotlib.rcdefaults",
"matplotlib.use",
"matplotlib.rcParams.update"
],
[
"numpy.testing.assert_allclose"
],
[
"numpy.all"
],
[
"numpy.roll",
"numpy.repeat",
"numpy.cumsum"
],
[
"numpy.sqrt",
"numpy.arange",
"numpy.cos",
"numpy.sin",
"numpy.all",
"numpy.ones",
"numpy.broadcast_arrays",
"numpy.array",
"numpy.zeros"
],
[
"numpy.testing.assert_array_equal",
"numpy.arange",
"numpy.array",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vigsivan/fastMRI | [
"0f6c4c0176ff74bf2761d20ec62facb01c9038f8"
] | [
"fastmri/models/varnet.py"
] | [
"\"\"\"\nCopyright (c) Facebook, Inc. and its affiliates.\n\nThis source code is licensed under the MIT license found in the\nLICENSE file in the root directory of this source tree.\n\"\"\"\n\nimport math\nfrom typing import List, Tuple, Optional\n\nimport fastmri\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom fastmri.data import transforms\n\nfrom .unet import Unet\n\n\nclass NormUnet(nn.Module):\n \"\"\"\n Normalized U-Net model.\n\n This is the same as a regular U-Net, but with normalization applied to the\n input before the U-Net. This keeps the values more numerically stable\n during training.\n \"\"\"\n\n def __init__(\n self,\n chans: int,\n num_pools: int,\n in_chans: int = 2,\n out_chans: int = 2,\n drop_prob: float = 0.0,\n ):\n \"\"\"\n Args:\n chans: Number of output channels of the first convolution layer.\n num_pools: Number of down-sampling and up-sampling layers.\n in_chans: Number of channels in the input to the U-Net model.\n out_chans: Number of channels in the output to the U-Net model.\n drop_prob: Dropout probability.\n \"\"\"\n super().__init__()\n\n self.unet = Unet(\n in_chans=in_chans,\n out_chans=out_chans,\n chans=chans,\n num_pool_layers=num_pools,\n drop_prob=drop_prob,\n )\n\n def complex_to_chan_dim(self, x: torch.Tensor) -> torch.Tensor:\n b, c, h, w, two = x.shape\n assert two == 2\n return x.permute(0, 4, 1, 2, 3).reshape(b, 2 * c, h, w)\n\n def chan_complex_to_last_dim(self, x: torch.Tensor) -> torch.Tensor:\n b, c2, h, w = x.shape\n assert c2 % 2 == 0\n c = c2 // 2\n return x.view(b, 2, c, h, w).permute(0, 2, 3, 4, 1).contiguous()\n\n def norm(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n # group norm\n b, c, h, w = x.shape\n x = x.view(b, 2, c // 2 * h * w)\n\n mean = x.mean(dim=2).view(b, 2, 1, 1)\n std = x.std(dim=2).view(b, 2, 1, 1)\n\n x = x.view(b, c, h, w)\n\n return (x - mean) / std, mean, std\n\n def unnorm(\n self, x: torch.Tensor, mean: torch.Tensor, std: torch.Tensor\n ) -> torch.Tensor:\n return x * std + mean\n\n def pad(\n self, x: torch.Tensor\n ) -> Tuple[torch.Tensor, Tuple[List[int], List[int], int, int]]:\n _, _, h, w = x.shape\n w_mult = ((w - 1) | 15) + 1\n h_mult = ((h - 1) | 15) + 1\n w_pad = [math.floor((w_mult - w) / 2), math.ceil((w_mult - w) / 2)]\n h_pad = [math.floor((h_mult - h) / 2), math.ceil((h_mult - h) / 2)]\n # TODO: fix this type when PyTorch fixes theirs\n # the documentation lies - this actually takes a list\n # https://github.com/pytorch/pytorch/blob/master/torch/nn/functional.py#L3457\n # https://github.com/pytorch/pytorch/pull/16949\n x = F.pad(x, w_pad + h_pad)\n\n return x, (h_pad, w_pad, h_mult, w_mult)\n\n def unpad(\n self,\n x: torch.Tensor,\n h_pad: List[int],\n w_pad: List[int],\n h_mult: int,\n w_mult: int,\n ) -> torch.Tensor:\n return x[..., h_pad[0] : h_mult - h_pad[1], w_pad[0] : w_mult - w_pad[1]]\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n if not x.shape[-1] == 2:\n raise ValueError(\"Last dimension must be 2 for complex.\")\n\n # get shapes for unet and normalize\n x = self.complex_to_chan_dim(x)\n x, mean, std = self.norm(x)\n x, pad_sizes = self.pad(x)\n\n x = self.unet(x)\n\n # get shapes back and unnormalize\n x = self.unpad(x, *pad_sizes)\n x = self.unnorm(x, mean, std)\n x = self.chan_complex_to_last_dim(x)\n\n return x\n\n\nclass SensitivityModel(nn.Module):\n \"\"\"\n Model for learning sensitivity estimation from k-space data.\n\n This model applies an IFFT to multichannel k-space data and then a U-Net\n to the coil images to estimate coil sensitivities. It can be used with the\n end-to-end variational network.\n \"\"\"\n\n def __init__(\n self,\n chans: int,\n num_pools: int,\n in_chans: int = 2,\n out_chans: int = 2,\n drop_prob: float = 0.0,\n mask_center: bool = True,\n ):\n \"\"\"\n Args:\n chans: Number of output channels of the first convolution layer.\n num_pools: Number of down-sampling and up-sampling layers.\n in_chans: Number of channels in the input to the U-Net model.\n out_chans: Number of channels in the output to the U-Net model.\n drop_prob: Dropout probability.\n mask_center: Whether to mask center of k-space for sensitivity map\n calculation.\n \"\"\"\n super().__init__()\n self.mask_center = mask_center\n self.norm_unet = NormUnet(\n chans,\n num_pools,\n in_chans=in_chans,\n out_chans=out_chans,\n drop_prob=drop_prob,\n )\n\n def chans_to_batch_dim(self, x: torch.Tensor) -> Tuple[torch.Tensor, int]:\n b, c, h, w, comp = x.shape\n\n return x.view(b * c, 1, h, w, comp), b\n\n def batch_chans_to_chan_dim(self, x: torch.Tensor, batch_size: int) -> torch.Tensor:\n bc, _, h, w, comp = x.shape\n c = bc // batch_size\n\n return x.view(batch_size, c, h, w, comp)\n\n def divide_root_sum_of_squares(self, x: torch.Tensor) -> torch.Tensor:\n return x / fastmri.rss_complex(x, dim=1).unsqueeze(-1).unsqueeze(1)\n\n def get_pad_and_num_low_freqs(\n self, mask: torch.Tensor, num_low_frequencies: Optional[int] = None\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n if num_low_frequencies is None:\n # get low frequency line locations and mask them out\n squeezed_mask = mask[:, 0, 0, :, 0].to(torch.int8)\n cent = squeezed_mask.shape[1] // 2\n # running argmin returns the first non-zero\n left = torch.argmin(squeezed_mask[:, :cent].flip(1), dim=1)\n right = torch.argmin(squeezed_mask[:, cent:], dim=1)\n num_low_frequencies_tensor = torch.max(\n 2 * torch.min(left, right), torch.ones_like(left)\n ) # force a symmetric center unless 1\n else:\n num_low_frequencies_tensor = num_low_frequencies * torch.ones(\n mask.shape[0], dtype=mask.dtype, device=mask.device\n )\n\n pad = (mask.shape[-2] - num_low_frequencies_tensor + 1) // 2\n\n return pad, num_low_frequencies_tensor\n\n def forward(\n self,\n masked_kspace: torch.Tensor,\n mask: torch.Tensor,\n num_low_frequencies: Optional[int] = None,\n ) -> torch.Tensor:\n if self.mask_center:\n pad, num_low_freqs = self.get_pad_and_num_low_freqs(\n mask, num_low_frequencies\n )\n masked_kspace = transforms.batched_mask_center(\n masked_kspace, pad, pad + num_low_freqs\n )\n\n # convert to image space\n images, batches = self.chans_to_batch_dim(fastmri.ifft2c(masked_kspace))\n\n # estimate sensitivities\n return self.divide_root_sum_of_squares(\n self.batch_chans_to_chan_dim(self.norm_unet(images), batches)\n )\n\n\nclass VarNet(nn.Module):\n \"\"\"\n A full variational network model.\n\n This model applies a combination of soft data consistency with a U-Net\n regularizer. To use non-U-Net regularizers, use VarNetBlock.\n \"\"\"\n\n def __init__(\n self,\n num_cascades: int = 12,\n sens_chans: int = 8,\n sens_pools: int = 4,\n chans: int = 18,\n pools: int = 4,\n mask_center: bool = True,\n ):\n \"\"\"\n Args:\n num_cascades: Number of cascades (i.e., layers) for variational\n network.\n sens_chans: Number of channels for sensitivity map U-Net.\n sens_pools Number of downsampling and upsampling layers for\n sensitivity map U-Net.\n chans: Number of channels for cascade U-Net.\n pools: Number of downsampling and upsampling layers for cascade\n U-Net.\n mask_center: Whether to mask center of k-space for sensitivity map\n calculation.\n \"\"\"\n super().__init__()\n\n self.sens_net = SensitivityModel(\n chans=sens_chans,\n num_pools=sens_pools,\n mask_center=mask_center,\n )\n self.cascades = nn.ModuleList(\n [VarNetBlock(NormUnet(chans, pools)) for _ in range(num_cascades)]\n )\n\n def forward(\n self,\n masked_kspace: torch.Tensor,\n mask: torch.Tensor,\n num_low_frequencies: Optional[int] = None,\n ) -> torch.Tensor:\n sens_maps = self.sens_net(masked_kspace, mask, num_low_frequencies)\n kspace_pred = masked_kspace.clone()\n\n for cascade in self.cascades:\n kspace_pred = cascade(kspace_pred, masked_kspace, mask, sens_maps)\n\n return fastmri.rss(fastmri.complex_abs(fastmri.ifft2c(kspace_pred)), dim=1)\n\n\nclass VarNetBlock(nn.Module):\n \"\"\"\n Model block for end-to-end variational network.\n\n This model applies a combination of soft data consistency with the input\n model as a regularizer. A series of these blocks can be stacked to form\n the full variational network.\n \"\"\"\n\n def __init__(self, model: nn.Module):\n \"\"\"\n Args:\n model: Module for \"regularization\" component of variational\n network.\n \"\"\"\n super().__init__()\n\n self.model = model\n self.dc_weight = nn.Parameter(torch.ones(1))\n\n def sens_expand(self, x: torch.Tensor, sens_maps: torch.Tensor) -> torch.Tensor:\n return fastmri.fft2c(fastmri.complex_mul(x, sens_maps))\n\n def sens_reduce(self, x: torch.Tensor, sens_maps: torch.Tensor) -> torch.Tensor:\n x = fastmri.ifft2c(x)\n return fastmri.complex_mul(x, fastmri.complex_conj(sens_maps)).sum(\n dim=1, keepdim=True\n )\n\n def forward(\n self,\n current_kspace: torch.Tensor,\n ref_kspace: torch.Tensor,\n mask: torch.Tensor,\n sens_maps: torch.Tensor,\n ) -> torch.Tensor:\n zero = torch.zeros(1, 1, 1, 1, 1).to(current_kspace)\n soft_dc = torch.where(mask, current_kspace - ref_kspace, zero) * self.dc_weight\n model_term = self.sens_expand(\n self.model(self.sens_reduce(current_kspace, sens_maps)), sens_maps\n )\n\n return current_kspace - soft_dc - model_term\n"
] | [
[
"torch.ones",
"torch.zeros",
"torch.min",
"torch.argmin",
"torch.where",
"torch.ones_like",
"torch.nn.functional.pad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ikewai/hawaii_climate_products_container | [
"05e25faffd814b6c8c9e550897a976db3456a0fc"
] | [
"preliminary/air_temp/daily/code/cross_validate_temp.py"
] | [
"#Daily only\nimport sys\nimport rasterio\nimport numpy as np\nimport pandas as pd\n\nimport Temp_linear as tmpl\nfrom affine import Affine\nfrom pyproj import Transformer\n\n#DEFINE CONSTANTS--------------------------------------------------------------\nMASTER_DIR = r'/home/hawaii_climate_products_container/preliminary/'\nRUN_MASTER_DIR = MASTER_DIR + r'air_temp/data_outputs/'\nDEP_MASTER_DIR = MASTER_DIR + r'air_temp/daily/dependencies/'\nPRED_DIR = DEP_MASTER_DIR + r'predictors/'\nMASK_TIFF_DIR = DEP_MASTER_DIR + r'geoTiffs_250m/masks/'\nCV_OUTPUT_DIR = RUN_MASTER_DIR + r'tables/loocv/daily/county/'\nMETA_OUTPUT_DIR = RUN_MASTER_DIR + r'metadata/daily/county/'\nMETA_MASTER_FILE = r'https://raw.githubusercontent.com/ikewai/hawaii_wx_station_mgmt_container/main/Hawaii_Master_Station_Meta.csv'\n#END CONSTANTS-----------------------------------------------------------------\n\n#DEFINE FUNCTIONS--------------------------------------------------------------\ndef get_coordinates(GeoTiff_name):\n\n # Read raster\n with rasterio.open(GeoTiff_name) as r:\n T0 = r.transform # upper-left pixel corner affine transform\n A = r.read() # pixel values\n\n # All rows and columns\n cols, rows = np.meshgrid(np.arange(A.shape[2]), np.arange(A.shape[1]))\n\n # Get affine transform for pixel centres\n T1 = T0 * Affine.translation(0.5, 0.5)\n # Function to convert pixel row/column index (from 0) to easting/northing\n # at centre\n def rc2en(r, c): return T1 * (c, r)\n\n # All eastings and northings (there is probably a faster way to do this)\n eastings, northings = np.vectorize(\n rc2en, otypes=[\n float, float])(\n rows, cols)\n\n transformer = Transformer.from_proj(\n 'EPSG:4326',\n '+proj=longlat +datum=WGS84 +no_defs +type=crs',\n always_xy=True,\n skip_equivalent=True)\n\n LON, LAT = transformer.transform(eastings, northings)\n return LON, LAT\n\ndef get_isl_dims(iCode,mask_tiff_dir):\n \"\"\"\n Helper function\n Dependencies: input_dir = parent input directory branch\n \"\"\"\n tiffname = mask_tiff_dir + iCode.lower() + '_mask.tif'\n lons,lats = get_coordinates(tiffname)\n lons = np.unique(lons.reshape(-1))\n lats = np.unique(lats.reshape(-1))\n xdiff = lons[1:]-lons[:-1]\n ydiff = lats[1:]-lats[:-1]\n xresolution = np.round(np.min(xdiff),6)\n yresolution = np.round(np.min(ydiff),6)\n xmin = np.min(lons)\n xmax = np.max(lons)\n ymin = np.min(lats)\n ymax = np.max(lats)\n isl_dims = {'XResolution':xresolution,'YResolution':yresolution,'Xmin':xmin,'Xmax':xmax,'Ymin':ymin,'Ymax':ymax}\n\n return isl_dims\n\ndef cross_validation(prediction,predictor,varname,model,iCode,threshold=2.5):\n \"\"\"\n Input requirements:\n prediction: dataframe including, at minimum, Island and [varname] columns, indexed by SKN\n predictor: series including model predictor columns, indexed by SKN\n \"\"\"\n #Get target_isl stations (only validate against target island stations, not supplementary stations)\n if iCode == 'MN':\n isl_list = ['MA','MO','LA','KO']\n else:\n isl_list = [iCode]\n \n target_isl = prediction[prediction['Island'].isin(isl_list)].index.values\n \n cv_data = pd.DataFrame(index=prediction.index)\n for target in target_isl:\n #All stations excluding target station\n train_inds = np.setdiff1d(predictor.index.values,[target])\n X_train = predictor.loc[train_inds]\n X_test = predictor.loc[target].values.reshape(-1,1)\n y_train = prediction.loc[train_inds,varname]\n y_obs = prediction.at[target,varname]\n theta,pcov,X,y = tmpl.makeModel(y_train,X_train,model,threshold)\n y_pred = model(X_test,*theta)\n anom = y_obs - y_pred\n cv_data.loc[target,['ObservedTemp','PredictedTemp','Obs-Pred','ValidatedStation']] = [y_obs,y_pred,anom,'TRUE']\n\n #cv_data now populated for all target island stations\n #Include non-validated training data\n non_target_isl = prediction[~prediction['Island'].isin(isl_list)].index.values\n cv_data.loc[non_target_isl,'ObservedTemp'] = prediction.loc[non_target_isl,varname]\n cv_data.loc[non_target_isl,'ValidatedStation'] = 'FALSE'\n \n return cv_data\n\ndef get_metrics(varname,iCode,date_str,param_list,inversion=2150):\n #Needs to makeModel based on the cv_data to get model parameters\n #Also needs to open some island reference file for dimensions\n \"\"\"\n Requirements:\n --\n \"\"\"\n cv_dir = CV_OUTPUT_DIR + iCode.upper() + '/'\n pred_dir = PRED_DIR\n mask_dir = MASK_TIFF_DIR\n if iCode == 'MN':\n isl_list = ['MA','MO','KO','LA']\n else:\n isl_list = [iCode]\n date_tail = ''.join(date_str.split('-'))\n year = date_str.split('-')[0]\n mon = date_str.split('-')[1]\n n_params = len(param_list)\n #File names\n temp_file = '_'.join((varname,'map',iCode,date_tail)) + '.tif'\n se_file = '_'.join((varname,'map',iCode,date_tail,'se')) + '.tif'\n cv_file = cv_dir + '_'.join((date_tail,varname,iCode,'loocv')) + '.csv'\n pred_file = pred_dir + varname.lower() + '_predictors.csv'\n\n if varname == 'Tmean':\n tmin_file = '_'.join(('daily','Tmin',year,mon)) + '.csv'\n tmax_file = '_'.join(('daily','Tmax',year,mon)) + '.csv'\n input_file = ', '.join((tmin_file,tmax_file))\n input_file = '_'.join(('daily',varname,year,mon)) +'.csv'\n \n\n cv_data = pd.read_csv(cv_file)\n cv_data.set_index('SKN',inplace=True)\n #Get actual linear regression info, don't make a dummy one for Tmean\n if varname == 'Tmean':\n theta = np.array([np.nan,np.nan,np.nan])\n else:\n pred_df,pr_series = tmpl.extract_predictors(pred_file,param_list)\n training_temp = cv_data['ObservedTemp']\n training_pred = pr_series.loc[training_temp.index]\n MODEL = tmpl.myModel(inversion=inversion)\n theta,pcov,X,y = tmpl.makeModel(training_temp,training_pred,MODEL,threshold=2.5)\n\n #Get island dims\n isl_dims = get_isl_dims(iCode,mask_dir)\n\n #Number of stations\n non_target_stns = cv_data[~cv_data['Island'].isin(isl_list)]\n non_target_isl_codes = cv_data[~cv_data['Island'].isin(isl_list)]['Island'].unique()\n high_elev_stns = non_target_stns[non_target_stns['ELEV.m.'] > inversion]\n high_elev_isl_codes = high_elev_stns['Island'].unique()\n nstn = cv_data.shape[0]\n nstn_ext = non_target_stns.shape[0]\n nstn_elev = high_elev_stns.shape[0]\n\n #Check the numbers\n observed = cv_data[cv_data['ValidatedStation']==True]['ObservedTemp'].values.flatten()\n predicted = cv_data[cv_data['ValidatedStation']==True]['PredictedTemp'].values.flatten()\n pred_clip,obs_clip = tmpl.sigma_Clip(predicted,observed)\n if ((len(pred_clip) - n_params -1) < 3) | ((len(obs_clip) - n_params - 1) < 3):\n obs_mean = np.nan\n pred_mean = np.nan\n mae = np.nan\n rmse = np.nan\n r2 = np.nan\n aic = np.nan\n aicc = np.nan\n bic = np.nan\n bias = np.nan\n r2_code = 1 #Not enough data to produce R2\n else:\n mae,rmse,r2,aic,aicc,bic = tmpl.metrics(pred_clip,obs_clip,False,n_params)\n obs_mean = np.mean(observed)\n pred_mean = np.mean(predicted)\n bias = obs_mean - pred_mean\n if r2 >= 0:\n r2_code = 0\n else:\n r2_code = 2 #negative R2\n\n meta = {'Island':iCode,'inversion':inversion,'nstn':nstn,'nstn_ext':nstn_ext,'nstn_elev':nstn_elev,'outer_islands':non_target_isl_codes,'high_islands':high_elev_isl_codes,\n 'obs_mean':obs_mean,'pred_mean':pred_mean,'bias':bias,'MAE':mae,'RMSE':rmse,'R2':r2,'AIC':aic,'AICc':aicc,'BIC':bic,'r2_code':r2_code,'input_file':input_file,'temp_file':temp_file,'se_file':se_file,'lr_coef':theta}\n \n meta = {**meta,**isl_dims}\n return meta\n\ndef write_meta_text(varname,date_str,meta):\n date_tail = ''.join((date_str.split('-')))\n formatted_date = pd.to_datetime(date_str).strftime('%b. %d, %Y')\n temp_mode = {'Tmin':'minimum','Tmax':'maximum','Tmean':'mean'}\n isl_dict = {'BI':'Big Island','MA':'Maui','OA':'Oahu','KA':'Kauai','MN':'Maui, Molokai, Lanai, Kahoolawe'}\n island = meta['Island']\n meta_dir = META_OUTPUT_DIR + island.upper() + '/'\n cv_file = '_'.join((date_tail,varname,island.upper(),'loocv')) + '.csv'\n meta_file = meta_dir + '_'.join((date_tail,varname,island.upper(),'meta')) + '.txt'\n\n\n if island == 'BI':\n county_list = 'Hawaii County'\n elif island == 'MN':\n county_list = 'Maui County (Maui, Lanai, Molokai, Kahoolawe)'\n elif island == 'OA':\n county_list = 'Honolulu County (Oahu)'\n elif island == 'KA':\n county_list = 'Kauai County'\n \n if meta['nstn_elev'] > 0:\n high_isl_list = list(meta['high_islands'])\n high_islands = [isl_dict[icode.upper()] for icode in high_isl_list]\n high_islands = ', '.join(high_islands)\n if meta['nstn_ext'] > 0:\n outer_isl_list = list(meta['outer_islands'])\n outer_islands = [isl_dict[icode.upper()] for icode in outer_isl_list]\n outer_islands = ', '.join(outer_islands)\n\n #Mixed station text case\n if (meta['nstn_ext'] > meta['nstn_elev']) & (meta['nstn_elev'] > 0):\n high_elev_statement = 'The model was trained on {nstn} unique station location(s) within {county} and supplemented at high elevation by {nstn_elev} station(s) from {high_islands}. Due to limited station availability, the model training was also supplemented by {nstn_ext} station(s) drawn from {outer_islands}.'\n high_elev_statement = high_elev_statement.format(nstn=str(meta['nstn']),county=county_list,nstn_elev=str(meta['nstn_elev']),high_islands=high_islands,nstn_ext=str(meta['nstn_ext']),outer_islands=outer_islands)\n elif meta['nstn_ext'] > 0:\n high_elev_statement = 'The model was trained on {nstn} unique station location(s) within {county}. Due to limited station availability, the model training was supplemented by {nstn_ext} station(s) from {outer_islands}.'\n high_elev_statement = high_elev_statement.format(nstn=str(meta['nstn']),county=county_list,nstn_ext=str(meta['nstn_ext']),outer_islands=outer_islands)\n elif (meta['nstn_ext'] == meta['nstn_elev']) & (meta['nstn_ext'] > 0):\n high_elev_statement = 'The model was trained on {nstn} unique station location(s) within {county} and supplemented at high elevation by {nstn_elev} station(s) from {high_islands}.'\n high_elev_statement = high_elev_statement.format(nstn=str(meta['nstn']),county=county_list,nstn_elev=str(meta['nstn_elev']),high_islands=high_islands)\n else:\n high_elev_statement = 'The model was trained on {nstn} unique station location(s) within {county}.'\n high_elev_statement = high_elev_statement.format(nstn=meta['nstn'],county=county_list)\n \n lr_coef = meta['lr_coef']\n regress_const = lr_coef[0]\n regress_slope1 = lr_coef[1]\n if len(lr_coef) > 2:\n regress_slope2 = lr_coef[2]\n else:\n regress_slope2 = np.nan\n \n if meta['r2_code'] == 1:\n r2_statement = 'Insufficient validation stations were available for the target island. Leave-one-out cross-validation (LOOCV) could not be performed and R-squared value is nan.'\n elif meta['r2_code'] == 2:\n r2_statement = 'A leave-one-out cross-validation (LOOCV) was performed based on the station data available for the target island. However, the R-squared value is negative. If outer island data were used to supplement the model training, R-squared may not accurately represent goodness of fit. Please consult the cross-validation table or the standard error maps for more information on model error.'\n else:\n r2_statement = 'A leave one out cross validation (LOOCV) of the station data used in this map produced an R-squared of: {rsqr}.'\n r2_statement = r2_statement.format(rsqr=str(np.round(meta['R2'],4)))\n\n #Format data statement\n dataStatement_val = 'This {date} daily temperature {mode} map of {county} is a high spatial resolution gridded prediction of {mode} temperature in degrees Celsius for the date {date}. This was produced using a piece-wise linear regression model regressed on elevation with the junction point at {inversion} meters. ' + high_elev_statement + ' ' + r2_statement + ' All maps are subject to change as new data becomes available or unknown errors are corrected in reoccurring versions. Errors in temperature estimates do vary over space meaning any gridded temperature value, even on higher quality maps, could still produce incorrect estimates. Check standard error (SE) maps to better understand spatial estimates of prediction error'\n dataStatement_val = dataStatement_val.format(date=formatted_date,mode=temp_mode[varname],county=county_list,inversion=str(meta['inversion']))\n \n #Format keywords and credits\n kw_list = ', '.join([county_list,'Hawaii',temp_mode[varname]+' temperature prediction','daily temperature','temperature','climate','linear regression'])\n \n credit_statement = 'All data produced by University of Hawaii at Manoa Dept. of Geography and the Enviroment, Ecohydology Lab in collaboration with the Water Resource Research Center (WRRC). Support for the Hawai‘i EPSCoR Program is provided by the Hawaii Emergency Management Agency.'\n contact_list = 'Keri Kodama ([email protected]), Matthew Lucas ([email protected]), Ryan Longman ([email protected]), Sayed Bateni ([email protected]), Thomas Giambelluca ([email protected])'\n \n #Arrange all meta fields and write to file\n field_value_list = {'attribute':'value','dataStatement':dataStatement_val,'keywords':kw_list,\n 'county':island.lower(),'dataDate':formatted_date,'dataVersionType':'preliminary','tempStationFile':meta['input_file'],'tempGridFile':meta['temp_file'],\n 'tempSEGridFile':meta['se_file'],'crossValidationFile':cv_file,'fillValue':'-9999','GeoCoordUnits':'Decimal Degrees',\n 'GeoCoordRefSystem':'+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0','XResolution':str(meta['XResolution']),\n 'YResolution':str(meta['YResolution']),'ExtentXmin':str(meta['Xmin']),\n 'ExtentXmax':str(meta['Xmax']),'ExtentYmin':str(meta['Ymin']),\n 'ExtentYmax':str(meta['Ymax']),'stationCount':str(meta['nstn']),\n 'outerStationCount':str(meta['nstn_ext']),'regressionConst': str(np.round(regress_const,4)),'regressionSlope1':str(np.round(regress_slope1,4)),'regressionSlope2':str(np.round(regress_slope2,4)),'biasTemp':str(np.round(meta['bias'],5)),'rsqTemp':str(np.round(meta['R2'],5)),\n 'rmseTemp':str(np.round(meta['RMSE'],5)),'maeTemp':str(np.round(meta['MAE'],5)),\n 'credits':credit_statement,'contacts':contact_list}\n col1 = list(field_value_list.keys())\n col2 = [field_value_list[key] for key in col1]\n fmeta = open(meta_file,'w')\n for (key,val) in zip(col1,col2):\n line = [key,val]\n fmt_line = \"{:20}{:60}\\n\".format(*line)\n fmeta.write(fmt_line)\n fmeta.close()\n return meta_file\n\ndef main_cv_single(varname,date_str,temp_data,pred_data,iCode,output_dir=None,inversion=2150):\n \"\"\"\n Outputs cross-validation table for single specified county and variable.\n Cannot accommodate Tmean which requires both variables\n \"\"\"\n date_tail = ''.join(date_str.split('-'))\n loocv_name = output_dir + iCode.upper() + '/' + '_'.join((date_tail,varname,iCode,'loocv')) + '.csv'\n MODEL = tmpl.myModel(inversion)\n\n cv_temp = cross_validation(temp_data,pred_data,varname,MODEL,iCode)\n \n temp_meta = pd.read_csv(META_MASTER_FILE)\n temp_meta.set_index('SKN',inplace=True)\n cv_meta = temp_meta.loc[cv_temp.index]\n \n cv_temp = cv_meta.join(cv_temp,how='left')\n cv_temp.reset_index(inplace=True)\n cv_temp.to_csv(loocv_name,index=False)\n \n return cv_temp\n\ndef main_cv_mean(date_str,iCode,cv_dir=CV_OUTPUT_DIR):\n date_tail = ''.join(date_str.split('-'))\n varname = 'Tmean'\n\n #input files\n tmin_file = cv_dir + iCode.upper() + '/' + '_'.join((date_tail,'Tmin',iCode,'loocv')) + '.csv'\n tmax_file = cv_dir + iCode.upper() + '/' + '_'.join((date_tail,'Tmax',iCode,'loocv')) + '.csv'\n\n #Output file\n tmean_loocv_file = cv_dir + iCode.upper() + '/' + '_'.join((date_tail,varname,iCode,'loocv')) + '.csv'\n \n meta_master_table = pd.read_csv(META_MASTER_FILE)\n meta_master_table = meta_master_table.set_index('SKN')\n\n cv_tmin = pd.read_csv(tmin_file)\n cv_tmin = cv_tmin.set_index('SKN')\n cv_tmax = pd.read_csv(tmax_file)\n cv_tmax = cv_tmax.set_index('SKN')\n\n shared_inds = list(set(cv_tmin.index.values) & set(cv_tmax.index.values))\n obs_tmin = cv_tmin.loc[shared_inds,'ObservedTemp']\n obs_tmax = cv_tmax.loc[shared_inds,'ObservedTemp']\n obs_tmean = (obs_tmin + obs_tmax) * 0.5\n pred_tmin = cv_tmin.loc[shared_inds,'PredictedTemp']\n pred_tmax = cv_tmax.loc[shared_inds,'PredictedTemp']\n pred_tmean = (pred_tmin + pred_tmax) * 0.5\n\n cv_tmean = pd.DataFrame(index=shared_inds)\n cv_tmean.index.name = 'SKN'\n cv_tmean.loc[shared_inds,'ObservedTemp'] = obs_tmean\n cv_tmean.loc[shared_inds,'PredictedTemp'] = pred_tmean\n cv_tmean.loc[shared_inds,'Obs-Pred'] = obs_tmean - pred_tmean\n valid_inds = cv_tmean['PredictedTemp'].dropna().index.values\n cv_tmean.loc[cv_tmean.index.isin(valid_inds),'ValidatedStation'] = 'TRUE'\n cv_tmean.loc[~cv_tmean.index.isin(valid_inds),'ValidatedStation'] = 'FALSE'\n\n meta_tmean = meta_master_table.loc[shared_inds]\n cv_tmean = meta_tmean.join(cv_tmean,how='left')\n cv_tmean = cv_tmean.sort_values(by='SKN')\n \n cv_tmean = cv_tmean.reset_index()\n cv_tmean.to_csv(tmean_loocv_file,index=False)\n return cv_tmean\n\n\ndef main_cv_all(date_str,tmin_df,tmax_df,param_tmin,param_tmax,iCode,meta_table,output_dir,inversion=2150):\n \"\"\"\n Outputs cross-validation tables for tmin, tmax, tmean and metadata dictionaries\n \"\"\"\n date_tail = ''.join(date_str.split('-'))\n tmin_name = output_dir + iCode.upper() + '/' + '_'.join((date_tail,'Tmin',iCode,'loocv')) + '.csv'\n tmax_name = output_dir + iCode.upper() + '/' + '_'.join((date_tail,'Tmax',iCode,'loocv')) + '.csv'\n tmean_name = output_dir + iCode.upper() + '/' + '_'.join((date_tail,'Tmean',iCode,'loocv')) + '.csv'\n MODEL = tmpl.myModel(inversion)\n cv_tmin = cross_validation(tmin_df,param_tmin,'Tmin',MODEL,iCode)\n cv_tmax = cross_validation(tmax_df,param_tmax,'Tmax',MODEL,iCode)\n\n #For stations with both Tmin and Tmax available, compute predicted and observed Tmean\n #Predicted Tmean_i defined as mean(pred_tmin_i,pred_tmax_i)\n #Observed Tmean_i defined as mean(obs_tmin_i,obs_tmax_i)\n shared_inds = list(set(cv_tmin.index.values) & set(cv_tmax.index.values))\n obs_tmin = cv_tmin.loc[shared_inds,'ObservedTemp']\n obs_tmax = cv_tmax.loc[shared_inds,'ObservedTemp']\n obs_tmean = (obs_tmin + obs_tmax) * 0.5\n pred_tmin = cv_tmin.loc[shared_inds,'PredictedTemp']\n pred_tmax = cv_tmax.loc[shared_inds,'PredictedTemp']\n pred_tmean = (pred_tmin + pred_tmax) * 0.5\n cv_tmean = pd.DataFrame(index=shared_inds)\n cv_tmean.loc[shared_inds,'ObservedTemp'] = obs_tmean\n cv_tmean.loc[shared_inds,'PredictedTemp'] = pred_tmean\n cv_tmean.loc[shared_inds,'Obs-Pred'] = obs_tmean - pred_tmean\n valid_inds = cv_tmean['PredictedTemp'].dropna().index.values\n cv_tmean.loc[cv_tmean.index.isin(valid_inds),'ValidatedStation'] = 'TRUE'\n cv_tmean.loc[~cv_tmean.index.isin(valid_inds),'ValidatedStation'] = 'FALSE'\n\n meta_table = meta_table.set_index('SKN')\n tmin_meta = meta_table.loc[cv_tmin.index]\n tmax_meta = meta_table.loc[cv_tmax.index]\n tmean_meta = meta_table.loc[cv_tmean.index]\n\n cv_tmin = tmin_meta.join(cv_tmin,how='left')\n cv_tmax = tmax_meta.join(cv_tmax,how='left')\n cv_tmean = tmean_meta.join(cv_tmean,how='left')\n\n #Write the files\n #Tmin\n cv_tmin = cv_tmin.reset_index()\n cv_tmin.to_csv(tmin_name,index=False)\n\n #Tmax\n cv_tmax = cv_tmax.reset_index()\n cv_tmax.to_csv(tmax_name,index=False)\n\n #Tmean\n cv_tmean = cv_tmean.reset_index()\n cv_tmean.to_csv(tmean_name,index=False)\n\n return (cv_tmin,cv_tmax,cv_tmean)\n\n \n\n\n \n#END FUNCTIONS-----------------------------------------------------------------\n\n#MAIN\n\nif __name__ == '__main__':\n varname = sys.argv[1]\n iCode = sys.argv[2]\n master_dir = sys.argv[3]\n run_version = sys.argv[4]\n version_type = sys.argv[5]\n date_range = sys.argv[6] #YYYYMMDD_st-YYYYMMDD_en\n date_range = date_range.split('-')\n st_date = pd.to_datetime(date_range[0])\n en_date = pd.to_datetime(date_range[-1])\n dt_range = pd.date_range(st_date,en_date)\n\n params = ['dem_250']\n run_master_dir = master_dir + 'finalRunOutputs' + run_version + '/' + version_type + '/'\n temp_input_dir = run_master_dir + 'tables/' + varname + '_daily_raw/'\n proc_input_dir = master_dir + 'input/'\n cv_dir = run_master_dir + 'tables/loocv/county/'\n pred_dir = proc_input_dir + 'predictors/'\n pred_file = pred_dir + varname.lower() + '_predictors.csv'\n \n for dt in dt_range:\n date_str = dt.strftime('%Y-%m-%d')\n print(date_str)\n year_str = date_str.split('-')[0]\n mon_str = date_str.split('-')[1]\n\n temp_file = temp_input_dir + '_'.join(('daily',varname,year_str,mon_str)) + '.csv'\n temp_df,temp_meta,temp_data = tmpl.extract_temp_input(temp_file)\n pred_df,pr_series = tmpl.extract_predictors(pred_file,params)\n\n temp_date = tmpl.get_temperature_date(temp_data,temp_meta,iCode,date_str,varname=varname)\n pred_temp = pr_series.loc[temp_date.index]\n\n cv_temp = main_cv_single(varname,date_str,temp_date,pred_temp,iCode,cv_dir)\n \n temp_meta = get_metrics(varname,iCode,date_str,proc_input_dir,run_master_dir,params)\n temp_text = write_meta_text(varname,date_str,temp_meta,run_master_dir)\n\n\n"
] | [
[
"pandas.read_csv",
"pandas.to_datetime",
"numpy.min",
"numpy.arange",
"pandas.DataFrame",
"numpy.setdiff1d",
"numpy.round",
"numpy.max",
"numpy.vectorize",
"numpy.mean",
"pandas.date_range",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
pingsutw/flytekit | [
"0a6be27372a3629fe48c6b0369f7aaf2dca5bf46"
] | [
"flytekit/types/structured/basic_dfs.py"
] | [
"import os\nimport typing\nfrom typing import TypeVar\n\nimport pandas\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.parquet as pq\n\nfrom flytekit import FlyteContext\nfrom flytekit.core.data_persistence import DataPersistencePlugins\nfrom flytekit.models import literals\nfrom flytekit.models.literals import StructuredDatasetMetadata\nfrom flytekit.models.types import StructuredDatasetType\nfrom flytekit.types.structured.structured_dataset import (\n FLYTE_DATASET_TRANSFORMER,\n LOCAL,\n PARQUET,\n S3,\n StructuredDataset,\n StructuredDatasetDecoder,\n StructuredDatasetEncoder,\n)\n\nT = TypeVar(\"T\")\n\n\nclass PandasToParquetEncodingHandler(StructuredDatasetEncoder):\n def __init__(self, protocol: str):\n super().__init__(pd.DataFrame, protocol, PARQUET)\n # todo: Use this somehow instead of relaying ont he ctx file_access\n self._persistence = DataPersistencePlugins.find_plugin(protocol)()\n\n def encode(\n self,\n ctx: FlyteContext,\n structured_dataset: StructuredDataset,\n structured_dataset_type: StructuredDatasetType,\n ) -> literals.StructuredDataset:\n\n path = typing.cast(str, structured_dataset.uri) or ctx.file_access.get_random_remote_directory()\n df = typing.cast(pd.DataFrame, structured_dataset.dataframe)\n local_dir = ctx.file_access.get_random_local_directory()\n local_path = os.path.join(local_dir, f\"{0:05}\")\n df.to_parquet(local_path, coerce_timestamps=\"us\", allow_truncated_timestamps=False)\n ctx.file_access.upload_directory(local_dir, path)\n structured_dataset_type.format = PARQUET\n return literals.StructuredDataset(uri=path, metadata=StructuredDatasetMetadata(structured_dataset_type))\n\n\nclass ParquetToPandasDecodingHandler(StructuredDatasetDecoder):\n def __init__(self, protocol: str):\n super().__init__(pd.DataFrame, protocol, PARQUET)\n\n def decode(\n self,\n ctx: FlyteContext,\n flyte_value: literals.StructuredDataset,\n ) -> pd.DataFrame:\n path = flyte_value.uri\n local_dir = ctx.file_access.get_random_local_directory()\n ctx.file_access.get_data(path, local_dir, is_multipart=True)\n return pd.read_parquet(local_dir)\n\n\nclass ArrowToParquetEncodingHandler(StructuredDatasetEncoder):\n def __init__(self, protocol: str):\n super().__init__(pa.Table, protocol, PARQUET)\n\n def encode(\n self,\n ctx: FlyteContext,\n structured_dataset: StructuredDataset,\n structured_dataset_type: StructuredDatasetType,\n ) -> literals.StructuredDataset:\n path = typing.cast(str, structured_dataset.uri) or ctx.file_access.get_random_remote_path()\n df = structured_dataset.dataframe\n local_dir = ctx.file_access.get_random_local_directory()\n local_path = os.path.join(local_dir, f\"{0:05}\")\n pq.write_table(df, local_path)\n ctx.file_access.upload_directory(local_dir, path)\n return literals.StructuredDataset(uri=path, metadata=StructuredDatasetMetadata(structured_dataset_type))\n\n\nclass ParquetToArrowDecodingHandler(StructuredDatasetDecoder):\n def __init__(self, protocol: str):\n super().__init__(pa.Table, protocol, PARQUET)\n\n def decode(\n self,\n ctx: FlyteContext,\n flyte_value: literals.StructuredDataset,\n ) -> pa.Table:\n path = flyte_value.uri\n local_dir = ctx.file_access.get_random_local_directory()\n ctx.file_access.get_data(path, local_dir, is_multipart=True)\n return pq.read_table(local_dir)\n\n\nfor protocol in [LOCAL, S3]: # Should we add GCS\n FLYTE_DATASET_TRANSFORMER.register_handler(PandasToParquetEncodingHandler(protocol), default_for_type=True)\n FLYTE_DATASET_TRANSFORMER.register_handler(ParquetToPandasDecodingHandler(protocol), default_for_type=True)\n FLYTE_DATASET_TRANSFORMER.register_handler(ArrowToParquetEncodingHandler(protocol), default_for_type=True)\n FLYTE_DATASET_TRANSFORMER.register_handler(ParquetToArrowDecodingHandler(protocol), default_for_type=True)\n"
] | [
[
"pandas.read_parquet"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
konrad/openml-python | [
"5a2830cc494dbffe93fb324aa7eb98b8bf3f0b33"
] | [
"examples/30_extended/tasks_tutorial.py"
] | [
"\"\"\"\nTasks\n=====\n\nA tutorial on how to list and download tasks.\n\"\"\"\n\nimport openml\nimport pandas as pd\n\n############################################################################\n#\n# Tasks are identified by IDs and can be accessed in two different ways:\n#\n# 1. In a list providing basic information on all tasks available on OpenML.\n# This function will not download the actual tasks, but will instead download\n# meta data that can be used to filter the tasks and retrieve a set of IDs.\n# We can filter this list, for example, we can only list tasks having a\n# special tag or only tasks for a specific target such as\n# *supervised classification*.\n# 2. A single task by its ID. It contains all meta information, the target\n# metric, the splits and an iterator which can be used to access the\n# splits in a useful manner.\n\n############################################################################\n# Listing tasks\n# ^^^^^^^^^^^^^\n#\n# We will start by simply listing only *supervised classification* tasks:\n\ntasks = openml.tasks.list_tasks(task_type_id=1)\n\n############################################################################\n# **openml.tasks.list_tasks()** returns a dictionary of dictionaries, we convert it into a\n# `pandas dataframe <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html>`_\n# to have better visualization and easier access:\n\ntasks = pd.DataFrame.from_dict(tasks, orient='index')\nprint(tasks.columns)\nprint(\"First 5 of %s tasks:\" % len(tasks))\nprint(tasks.head())\n\n# The same can be obtained through lesser lines of code\ntasks_df = openml.tasks.list_tasks(task_type_id=1, output_format='dataframe')\nprint(tasks_df.head())\n\n############################################################################\n# We can filter the list of tasks to only contain datasets with more than\n# 500 samples, but less than 1000 samples:\n\nfiltered_tasks = tasks.query('NumberOfInstances > 500 and NumberOfInstances < 1000')\nprint(list(filtered_tasks.index))\n\n############################################################################\n\n# Number of tasks\nprint(len(filtered_tasks))\n\n############################################################################\n# Then, we can further restrict the tasks to all have the same resampling strategy:\n\nfiltered_tasks = filtered_tasks.query('estimation_procedure == \"10-fold Crossvalidation\"')\nprint(list(filtered_tasks.index))\n\n############################################################################\n\n# Number of tasks\nprint(len(filtered_tasks))\n\n############################################################################\n# Resampling strategies can be found on the\n# `OpenML Website <http://www.openml.org/search?type=measure&q=estimation%20procedure>`_.\n#\n# Similar to listing tasks by task type, we can list tasks by tags:\n\ntasks = openml.tasks.list_tasks(tag='OpenML100')\ntasks = pd.DataFrame.from_dict(tasks, orient='index')\nprint(\"First 5 of %s tasks:\" % len(tasks))\nprint(tasks.head())\n\n############################################################################\n# Furthermore, we can list tasks based on the dataset id:\n\ntasks = openml.tasks.list_tasks(data_id=1471)\ntasks = pd.DataFrame.from_dict(tasks, orient='index')\nprint(\"First 5 of %s tasks:\" % len(tasks))\nprint(tasks.head())\n\n############################################################################\n# In addition, a size limit and an offset can be applied both separately and simultaneously:\n\ntasks = openml.tasks.list_tasks(size=10, offset=50)\ntasks = pd.DataFrame.from_dict(tasks, orient='index')\nprint(tasks)\n\n############################################################################\n#\n# **OpenML 100**\n# is a curated list of 100 tasks to start using OpenML. They are all\n# supervised classification tasks with more than 500 instances and less than 50000\n# instances per task. To make things easier, the tasks do not contain highly\n# unbalanced data and sparse data. However, the tasks include missing values and\n# categorical features. You can find out more about the *OpenML 100* on\n# `the OpenML benchmarking page <https://www.openml.org/guide/benchmark>`_.\n#\n# Finally, it is also possible to list all tasks on OpenML with:\n\n############################################################################\ntasks = openml.tasks.list_tasks()\ntasks = pd.DataFrame.from_dict(tasks, orient='index')\nprint(len(tasks))\n\n############################################################################\n# Exercise\n# ########\n#\n# Search for the tasks on the 'eeg-eye-state' dataset.\n\ntasks.query('name==\"eeg-eye-state\"')\n\n############################################################################\n# Downloading tasks\n# ^^^^^^^^^^^^^^^^^\n#\n# We provide two functions to download tasks, one which downloads only a\n# single task by its ID, and one which takes a list of IDs and downloads\n# all of these tasks:\n\ntask_id = 31\ntask = openml.tasks.get_task(task_id)\n\n############################################################################\n# Properties of the task are stored as member variables:\n\nprint(task)\n\n############################################################################\n# And:\n\nids = [2, 1891, 31, 9983]\ntasks = openml.tasks.get_tasks(ids)\nprint(tasks[0])\n\n############################################################################\n# Creating tasks\n# ^^^^^^^^^^^^^^\n#\n# You can also create new tasks. Take the following into account:\n#\n# * You can only create tasks on *active* datasets\n# * For now, only the following tasks are supported: classification, regression,\n# clustering, and learning curve analysis.\n# * For now, tasks can only be created on a single dataset.\n# * The exact same task must not already exist.\n#\n# Creating a task requires the following input:\n#\n# * task_type_id: The task type ID, required (see below). Required.\n# * dataset_id: The dataset ID. Required.\n# * target_name: The name of the attribute you aim to predict. Optional.\n# * estimation_procedure_id : The ID of the estimation procedure used to create train-test\n# splits. Optional.\n# * evaluation_measure: The name of the evaluation measure. Optional.\n# * Any additional inputs for specific tasks\n#\n# It is best to leave the evaluation measure open if there is no strong prerequisite for a\n# specific measure. OpenML will always compute all appropriate measures and you can filter\n# or sort results on your favourite measure afterwards. Only add an evaluation measure if\n# necessary (e.g. when other measure make no sense), since it will create a new task, which\n# scatters results across tasks.\n\n\n############################################################################\n# Example\n# #######\n#\n# Let's create a classification task on a dataset. In this example we will do this on the\n# Iris dataset (ID=128 (on test server)). We'll use 10-fold cross-validation (ID=1),\n# and *predictive accuracy* as the predefined measure (this can also be left open).\n# If a task with these parameters exist, we will get an appropriate exception.\n# If such a task doesn't exist, a task will be created and the corresponding task_id\n# will be returned.\n\n\n# using test server for example uploads\nopenml.config.start_using_configuration_for_example()\n\ntry:\n tasktypes = openml.tasks.TaskTypeEnum\n my_task = openml.tasks.create_task(\n task_type_id=tasktypes.SUPERVISED_CLASSIFICATION,\n dataset_id=128,\n target_name=\"class\",\n evaluation_measure=\"predictive_accuracy\",\n estimation_procedure_id=1)\n my_task.publish()\nexcept openml.exceptions.OpenMLServerException as e:\n # Error code for 'task already exists'\n if e.code == 614:\n # Lookup task\n tasks = openml.tasks.list_tasks(data_id=128, output_format='dataframe').to_numpy()\n tasks = tasks[tasks[:, 4] == \"Supervised Classification\"]\n tasks = tasks[tasks[:, 6] == \"10-fold Crossvalidation\"]\n tasks = tasks[tasks[:, 19] == \"predictive_accuracy\"]\n task_id = tasks[0][0]\n print(\"Task already exists. Task ID is\", task_id)\n\n# reverting to prod server\nopenml.config.stop_using_configuration_for_example()\n\n\n############################################################################\n# * `Complete list of task types <https://www.openml.org/search?type=task_type>`_.\n# * `Complete list of model estimation procedures <https://www.openml.org/search?q=%2520measure_type%3Aestimation_procedure&type=measure>`_.\n# * `Complete list of evaluation measures <https://www.openml.org/search?q=measure_type%3Aevaluation_measure&type=measure>`_.\n#\n"
] | [
[
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ZhiruiFeng/RL-League | [
"69eba8adb55384e6b65606322fc65d3a68ece54f"
] | [
"agents/modules/approximator.py"
] | [
"import tensorflow as tf\nimport os\nclass Estimator:\n \"\"\" Neural network for value approximator\n Here it is used for both Q-Network and the Target Network.\n \"\"\"\n\n def __init__(self, scope=\"estimator\", summaries_dir=None, valid_actions=[0,1,2,3]):\n self.scope = scope\n self.valid_actions = valid_actions\n # Writes Tensorboard summaries to disk\n self.summary_writer = None\n with tf.variable_scope(scope):\n self._build_model()\n if summaries_dir:\n summary_dir = os.path.join(summaries_dir, \"summaries_{}\".format(scope))\n if not os.path.exists(summary_dir):\n os.makedirs(summary_dir)\n self.summary_writer = tf.summary.FileWriter(summary_dir)\n\n def _build_model(self):\n \"\"\"\n Builds the tensorflow graph.\n \"\"\"\n # Placeholders for our input\n # Our input are 4 RGB frames of shape 160, 160 each\n self.X_pl = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.uint8, name=\"X\")\n # The TD target value\n self.y_pl = tf.placeholder(shape=[None], dtype=tf.float32, name=\"y\")\n # Integer id of which action was selected\n self.actions_pl = tf.placeholder(shape=[None], dtype=tf.int32, name=\"actions\")\n\n X = tf.to_float(self.X_pl) / 255.0\n batch_size = tf.shape(self.X_pl)[0]\n\n # Three convolutional layers\n conv1 = tf.contrib.layers.conv2d(\n X, 32, 8, 4, activation_fn=tf.nn.relu)\n conv2 = tf.contrib.layers.conv2d(\n conv1, 64, 4, 2, activation_fn=tf.nn.relu)\n conv3 = tf.contrib.layers.conv2d(\n conv2, 64, 3, 1, activation_fn=tf.nn.relu)\n\n # Fully connected layers\n flattened = tf.contrib.layers.flatten(conv3)\n fc1 = tf.contrib.layers.fully_connected(flattened, 512)\n self.predictions = tf.contrib.layers.fully_connected(fc1, len(self.valid_actions))\n\n # Get the predictions for the chosen actions only\n gather_indices = tf.range(batch_size) * tf.shape(self.predictions)[1] + self.actions_pl\n self.action_predictions = tf.gather(tf.reshape(self.predictions, [-1]), gather_indices)\n\n # Calcualte the loss\n self.losses = tf.squared_difference(self.y_pl, self.action_predictions)\n self.loss = tf.reduce_mean(self.losses)\n\n # Optimizer Parameters from original paper\n self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)\n self.train_op = self.optimizer.minimize(self.loss, global_step=tf.contrib.framework.get_global_step())\n\n # Summaries for Tensorboard\n self.summaries = tf.summary.merge([\n tf.summary.scalar(\"loss\", self.loss),\n tf.summary.histogram(\"loss_hist\", self.losses),\n tf.summary.histogram(\"q_values_hist\", self.predictions),\n tf.summary.scalar(\"max_q_value\", tf.reduce_max(self.predictions))\n ])\n\n def predict(self, sess, s):\n \"\"\"\n Predicts action values.\n\n Args:\n sess: Tensorflow session\n s: State input of shape [batch_size, 4, 160, 160, 3]\n\n Returns:\n Tensor of shape [batch_size, NUM_VALID_ACTIONS] containing the estimated\n action values.\n \"\"\"\n return sess.run(self.predictions, { self.X_pl: s })\n\n def update(self, sess, s, a, y):\n \"\"\"\n Updates the estimator towards the given targets.\n\n Args:\n sess: Tensorflow session object\n s: State input of shape [batch_size, 4, 160, 160, 3]\n a: Chosen actions of shape [batch_size]\n y: Targets of shape [batch_size]\n\n Returns:\n The calculated loss on the batch.\n \"\"\"\n feed_dict = { self.X_pl: s, self.y_pl: y, self.actions_pl: a }\n summaries, global_step, _, loss = sess.run(\n [self.summaries, tf.contrib.framework.get_global_step(), self.train_op, self.loss],\n feed_dict)\n if self.summary_writer:\n self.summary_writer.add_summary(summaries, global_step)\n return loss\n"
] | [
[
"tensorflow.reduce_max",
"tensorflow.summary.FileWriter",
"tensorflow.summary.scalar",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.reduce_mean",
"tensorflow.shape",
"tensorflow.range",
"tensorflow.reshape",
"tensorflow.contrib.framework.get_global_step",
"tensorflow.placeholder",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.contrib.layers.flatten",
"tensorflow.contrib.layers.conv2d",
"tensorflow.to_float",
"tensorflow.variable_scope",
"tensorflow.squared_difference",
"tensorflow.summary.histogram"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
JackInTaiwan/BertSum | [
"5b6f372b13358473d17c49bfc45f1e15c80f9fce"
] | [
"src/models/data_loader.py"
] | [
"import os\nimport gc\nimport glob\nimport random\nimport itertools\nimport torch\n\nfrom src.others.logging import logger\n\n\n\nclass Batch(object):\n def _pad(self, data, pad_id, width=-1):\n if (width == -1):\n width = max(len(d) for d in data)\n rtn_data = [d + [pad_id] * (width - len(d)) for d in data]\n return rtn_data\n\n def __init__(self, data=None, device=None, is_test=False):\n \"\"\"Create a Batch from a list of examples.\"\"\"\n if data is not None:\n self.batch_size = len(data)\n pre_src = [x[0] for x in data]\n pre_labels = [x[1] for x in data]\n pre_segs = [x[2] for x in data]\n pre_clss = [x[3] for x in data]\n\n src = torch.tensor(self._pad(pre_src, 0))\n\n labels = torch.tensor(self._pad(pre_labels, 0))\n segs = torch.tensor(self._pad(pre_segs, 0))\n mask = 1 - (src == 0)\n\n clss = torch.tensor(self._pad(pre_clss, -1))\n mask_cls = 1 - (clss == -1)\n clss[clss == -1] = 0\n\n setattr(self, 'clss', clss.to(device))\n setattr(self, 'mask_cls', mask_cls.to(device))\n setattr(self, 'src', src.to(device))\n setattr(self, 'labels', labels.to(device))\n setattr(self, 'segs', segs.to(device))\n setattr(self, 'mask', mask.to(device))\n\n if (is_test):\n src_str = [x[-2] for x in data]\n setattr(self, 'src_str', src_str)\n tgt_str = [x[-1] for x in data]\n setattr(self, 'tgt_str', tgt_str)\n\n def __len__(self):\n return self.batch_size\n\n\ndef batch(data, batch_size):\n \"\"\"Yield elements from data in chunks of batch_size.\"\"\"\n minibatch, size_so_far = [], 0\n for ex in data:\n minibatch.append(ex)\n size_so_far = simple_batch_size_fn(ex, len(minibatch))\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[-1:], simple_batch_size_fn(ex, 1)\n if minibatch:\n yield minibatch\n\n\ndef load_dataset(args, corpus_type, shuffle):\n \"\"\"\n Dataset generator. Don't do extra stuff here, like printing,\n because they will be postponed to the first loading time.\n\n Args:\n corpus_type: 'train' or 'valid'\n Returns:\n A list of dataset, the dataset(s) are lazily loaded.\n \"\"\"\n assert corpus_type in [\"train\", \"valid\", \"test\"]\n\n def _lazy_dataset_loader(pt_file, corpus_type):\n dataset = torch.load(pt_file)\n logger.info('Loading {} dataset from {}, number of examples: {}'.format(corpus_type, pt_file, len(dataset)))\n return dataset\n\n # Sort the glob output by file name (by increasing indexes).\n pts = sorted(glob.glob(os.path.join(args.bert_data_path, '*{}.[0-9]*.pt'.format(corpus_type))))\n\n if pts:\n if (shuffle):\n random.shuffle(pts)\n\n for pt in pts:\n yield _lazy_dataset_loader(pt, corpus_type)\n else:\n raise ValueError(\"Cannot find bert data in the form of '{}'\".format(os.path.join(args.bert_data_path, '*{}.[0-9]*.pt'.format(corpus_type))))\n\n\ndef simple_batch_size_fn(new, count):\n src, labels = new[0], new[1]\n global max_n_sents, max_n_tokens, max_size\n if count == 1:\n max_size = 0\n max_n_sents=0\n max_n_tokens=0\n max_n_sents = max(max_n_sents, len(src))\n max_size = max(max_size, max_n_sents)\n src_elements = count * max_size\n return src_elements\n\n\n\nclass Dataloader(object):\n def __init__(self, args, datasets, batch_size, device, shuffle, is_test):\n self.args = args\n self.datasets = datasets\n self.batch_size = batch_size\n self.device = device\n self.shuffle = shuffle\n self.is_test = is_test\n self.cur_iter = self._next_dataset_iterator(datasets)\n\n assert self.cur_iter is not None\n\n\n def __iter__(self):\n dataset_iter = (d for d in self.datasets)\n while self.cur_iter is not None:\n for batch in self.cur_iter:\n yield batch\n self.cur_iter = self._next_dataset_iterator(dataset_iter)\n\n\n def _next_dataset_iterator(self, dataset_iter):\n try:\n # Drop the current dataset for decreasing memory\n if hasattr(self, \"cur_dataset\"):\n self.cur_dataset = None\n gc.collect()\n del self.cur_dataset\n gc.collect()\n\n self.cur_dataset = next(dataset_iter)\n\n except StopIteration:\n return None\n\n return DataIterator(args = self.args,\n dataset=self.cur_dataset, batch_size=self.batch_size,\n device=self.device, shuffle=self.shuffle, is_test=self.is_test)\n\n\n\nclass DataIterator(object):\n def __init__(self, args, dataset, batch_size, device=None, is_test=False,\n shuffle=True):\n self.args = args\n self.batch_size, self.is_test, self.dataset = batch_size, is_test, dataset\n self.iterations = 0\n self.device = device\n self.shuffle = shuffle\n\n self.sort_key = lambda x: len(x[1])\n\n self._iterations_this_epoch = 0\n\n\n def data(self):\n if self.shuffle:\n random.shuffle(self.dataset)\n xs = self.dataset\n return xs\n\n\n def preprocess(self, ex, is_test):\n src = ex['src']\n if ('labels' in ex):\n labels = ex['labels']\n else:\n labels = ex['src_sent_labels']\n\n segs = ex['segs']\n if (not self.args.use_interval):\n segs = [0] * len(segs)\n clss = ex['clss']\n src_txt = ex['src_txt']\n tgt_txt = ex['tgt_txt']\n\n if (is_test):\n return src, labels, segs, clss, src_txt, tgt_txt\n else:\n return src, labels, segs, clss\n\n\n def batch_buffer(self, data, batch_size):\n minibatch, size_so_far = [], 0\n\n for ex in data:\n if (len(ex['src'])==0):\n continue\n ex = self.preprocess(ex, self.is_test)\n if (ex is None):\n continue\n minibatch.append(ex)\n size_so_far = simple_batch_size_fn(ex, len(minibatch))\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[-1:], simple_batch_size_fn(ex, 1)\n\n if minibatch:\n yield minibatch\n\n\n def create_batches(self):\n \"\"\" Create batches \"\"\"\n data = self.data()\n for buffer in self.batch_buffer(data, self.batch_size * 50):\n p_batch = sorted(buffer, key=lambda x: len(x[3])) if not self.is_test else buffer\n p_batch = batch(p_batch, self.batch_size)\n p_batch = list(p_batch)\n\n if (self.shuffle): random.shuffle(p_batch)\n\n for b in p_batch:\n yield b\n\n\n def __iter__(self):\n while True:\n self.batches = self.create_batches()\n for idx, minibatch in enumerate(self.batches):\n if len(minibatch) > 0:\n # fast-forward if loaded from state\n if self._iterations_this_epoch > idx: continue\n self.iterations += 1\n self._iterations_this_epoch += 1\n batch = Batch(minibatch, self.device, self.is_test)\n yield batch\n return\n"
] | [
[
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vchatchai/python201 | [
"783481dbb8b2a641583f1f349f95f22126bfa9ed"
] | [
"numpy_broadcasting.py"
] | [
"import numpy as np \nprint(f'numpy version: {np.__version__}')\n\nnp.random.seed(9)\nx = np.random.randint(1, 11, 5)\n\nprint(f'np.random.randint(1,11,5) => {x}')\n\nprint(f'x * 2 => {x*2}')\n\nb = np.array([2,2,2,2,2])\n\nprint(f'b => {b}')\n\n\nprint(f'x * np.array([2]) => {x * np.array([2])}')\n\nprint(f'repeat(np.array[2], 5) => {np.repeat(np.array([2]), 5) }')\n\n\nprint(f'x * np.repeat(np.array([2]), 5) => {x * np.repeat(np.array([2]), 5)}')\n\n\nmu = np.mean(x)\nstd = np.std(x)\nz = (x - np.mean(x)) / np.std(x)\n\nprint(f'np.mean(x) => {mu}')\nprint(f'np.std(x) => {std}')\nprint(f'np.repeat() = >{np.repeat(mu, 5)}')\n\n\nprint(f'(x - np.mean(x)) / np.std(x) => {z}' )\n\n\n#2-dim\n\na = np.arange(1, 7).reshape(3, -1 ) * 10\n\nprint(f'a => {a}')\nprint(f'a.shape => {a.shape}')\n\n\nprint(5 + a)\n\n# np.full((3,2), 5)\nb = np.full(a.shape, 5)\n\n\nprint(f'b => {b}')\n\nprint(f'b + a => {b + a}')\n\nc = np.array([2,10])\nprint(f'c => {c}')\n\nprint(f'a.shape => {a.shape}')\nprint(f'b.shape => {c.shape}')\n\nprint(f'a + c => {a + c}')\n\n# print(f'np.tile(c, a.shape[0]) => {np.tile(c, a.shape([0])).reshape()}')\n\n\nd=np.array([[2],[3],[4]])\nprint(f'd => {d}')\n \nprint(f'a + d => {a + d}')\n\ne = np.array([[5],[100]])\nprint(f'e => {e}')\n\nprint(f'a.shape => {a.shape}')\nprint(f'e.shape => {e.shape}')"
] | [
[
"numpy.random.seed",
"numpy.arange",
"numpy.full",
"numpy.std",
"numpy.mean",
"numpy.repeat",
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
altana-tech/scikit-network | [
"dedc9d3e694c7106e4709aae22dffb5142c15859",
"dedc9d3e694c7106e4709aae22dffb5142c15859",
"dedc9d3e694c7106e4709aae22dffb5142c15859",
"dedc9d3e694c7106e4709aae22dffb5142c15859",
"dedc9d3e694c7106e4709aae22dffb5142c15859"
] | [
"sknetwork/ranking/diffusion.py",
"sknetwork/utils/seeds.py",
"sknetwork/utils/parse.py",
"sknetwork/visualization/dendrograms.py",
"sknetwork/embedding/svd.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on July 17 2019\n@author: Nathan de Lara <[email protected]>\n@author: Thomas Bonald <[email protected]>\n\"\"\"\nfrom typing import Union, Optional\n\nimport numpy as np\nfrom scipy import sparse\nfrom scipy.sparse.linalg import bicgstab, LinearOperator\n\nfrom sknetwork.linalg.normalization import normalize\nfrom sknetwork.ranking.base import BaseRanking, BaseBiRanking\nfrom sknetwork.utils.check import check_format, check_seeds, check_square, check_is_proba\nfrom sknetwork.utils.format import bipartite2undirected\nfrom sknetwork.utils.seeds import stack_seeds\nfrom sknetwork.utils.verbose import VerboseMixin\n\n\nclass DirichletOperator(LinearOperator):\n \"\"\"Diffusion in discrete time as a LinearOperator.\n\n Parameters\n ----------\n adjacency : sparse.csr_matrix\n Adjacency matrix of the graph.\n damping_factor : float\n Damping factor.\n border : np.ndarray (bool)\n Border nodes. If ``None``, then the diffusion is free.\n\n Attributes\n ----------\n a : sparse.csr_matrix\n Diffusion matrix.\n b : np.ndarray\n Regularization (uniform).\n \"\"\"\n def __init__(self, adjacency: sparse.csr_matrix, damping_factor: float, border: np.ndarray = None):\n super(DirichletOperator, self).__init__(shape=adjacency.shape, dtype=float)\n n = adjacency.shape[0]\n out_nodes = adjacency.dot(np.ones(n)).astype(bool)\n if border is None:\n border = np.zeros(n, dtype=bool)\n interior: sparse.csr_matrix = sparse.diags(~border, shape=(n, n), format='csr', dtype=float)\n self.a = damping_factor * interior.dot(normalize(adjacency))\n self.b = interior.dot(np.ones(n) - damping_factor * out_nodes) / n\n\n def _matvec(self, x: np.ndarray):\n return self.a.dot(x) + self.b * x.sum()\n\n\nclass DeltaDirichletOperator(DirichletOperator):\n \"\"\"Diffusion in discrete time as a LinearOperator (delta of temperature).\n\n Parameters\n ----------\n adjacency : sparse.csr_matrix\n Adjacency matrix of the graph.\n damping_factor : float\n Damping factor.\n border : np.ndarray (bool)\n Border nodes. If ``None``, then the diffusion is free.\n\n Attributes\n ----------\n a : sparse.csr_matrix\n Diffusion matrix.\n b : np.ndarray\n Regularization (uniform).\n \"\"\"\n def __init__(self, adjacency: sparse.csr_matrix, damping_factor: float, border: np.ndarray = None):\n super(DeltaDirichletOperator, self).__init__(adjacency, damping_factor, border)\n\n def _matvec(self, x: np.ndarray):\n return self.a.dot(x) + self.b * x.sum() - x\n\n\nclass Diffusion(BaseRanking):\n \"\"\"Ranking by diffusion along the edges (heat equation).\n\n * Graphs\n * Digraphs\n\n Parameters\n ----------\n n_iter : int\n Number of steps of the diffusion in discrete time (must be positive).\n damping_factor : float (optional)\n Damping factor (default value = 1).\n\n Attributes\n ----------\n scores_ : np.ndarray\n Score of each node (= temperature).\n\n Example\n -------\n >>> from sknetwork.data import house\n >>> diffusion = Diffusion(n_iter=2)\n >>> adjacency = house()\n >>> seeds = {0: 1, 2: 0}\n >>> scores = diffusion.fit_transform(adjacency, seeds)\n >>> np.round(scores, 2)\n array([0.58, 0.56, 0.38, 0.58, 0.42])\n\n References\n ----------\n Chung, F. (2007). The heat kernel as the pagerank of a graph. Proceedings of the National Academy of Sciences.\n \"\"\"\n def __init__(self, n_iter: int = 3, damping_factor: Optional[float] = None):\n super(Diffusion, self).__init__()\n\n if n_iter <= 0:\n raise ValueError('The number of iterations must be positive.')\n else:\n self.n_iter = n_iter\n if damping_factor is None:\n damping_factor = 1.\n check_is_proba(damping_factor, 'Damping factor')\n self.damping_factor = damping_factor\n\n def fit(self, adjacency: Union[sparse.csr_matrix, np.ndarray],\n seeds: Optional[Union[dict, np.ndarray]] = None, init: Optional[float] = None) \\\n -> 'Diffusion':\n \"\"\"Compute the diffusion (temperatures at equilibrium).\n\n Parameters\n ----------\n adjacency :\n Adjacency matrix of the graph.\n seeds :\n Temperatures of seed nodes in initial state (dictionary or vector). Negative temperatures ignored.\n init :\n Temperature of non-seed nodes in initial state.\n If ``None``, use the average temperature of seed nodes (default).\n\n Returns\n -------\n self: :class:`Diffusion`\n \"\"\"\n adjacency = check_format(adjacency)\n check_square(adjacency)\n n: int = adjacency.shape[0]\n if seeds is None:\n self.scores_ = np.ones(n) / n\n return self\n\n seeds = check_seeds(seeds, n)\n border = (seeds >= 0)\n\n if init is None:\n scores = seeds[border].mean() * np.ones(n)\n else:\n scores = init * np.ones(n)\n scores[border] = seeds[border]\n\n diffusion = DirichletOperator(adjacency, self.damping_factor)\n for i in range(self.n_iter):\n scores = diffusion.dot(scores)\n\n self.scores_ = scores\n\n return self\n\n\nclass BiDiffusion(Diffusion, BaseBiRanking):\n \"\"\"Ranking by diffusion along the edges of a bipartite graph (heat equation).\n\n * Bigraphs\n\n Parameters\n ----------\n n_iter : int\n Number of steps of the diffusion in discrete time (must be positive).\n damping_factor : float (optional)\n Damping factor (default value = 1).\n\n Attributes\n ----------\n scores_ : np.ndarray\n Scores of rows.\n scores_row_ : np.ndarray\n Scores of rows (copy of **scores_**).\n scores_col_ : np.ndarray\n Scores of columns.\n\n Example\n -------\n >>> from sknetwork.ranking import BiDiffusion\n >>> from sknetwork.data import star_wars\n >>> bidiffusion = BiDiffusion(n_iter=2)\n >>> biadjacency = star_wars()\n >>> scores = bidiffusion.fit_transform(biadjacency, seeds_row = {0: 1, 2: 0})\n >>> np.round(scores, 2)\n array([0.5 , 0.5 , 0.46, 0.44])\n \"\"\"\n def __init__(self, n_iter: int = 3, damping_factor: Optional[float] = None):\n super(BiDiffusion, self).__init__(n_iter, damping_factor)\n\n def fit(self, biadjacency: Union[sparse.csr_matrix, np.ndarray],\n seeds_row: Optional[Union[dict, np.ndarray]] = None, seeds_col: Optional[Union[dict, np.ndarray]] = None,\n init: Optional[float] = None) -> 'BiDiffusion':\n \"\"\"Compute the diffusion (temperatures at equilibrium).\n\n Parameters\n ----------\n biadjacency :\n Biadjacency matrix, shape (n_row, n_col).\n seeds_row :\n Temperatures of seed rows in initial state (dictionary or vector of size n_row).\n Negative temperatures ignored.\n seeds_col :\n Temperatures of seed columns in initial state (dictionary or vector of size n_col).\n Negative temperatures ignored.\n init :\n Temperature of non-seed nodes in initial state.\n If ``None``, use the average temperature of seed nodes (default).\n Returns\n -------\n self: :class:`BiDiffusion`\n \"\"\"\n biadjacency = check_format(biadjacency)\n n_row, n_col = biadjacency.shape\n seeds = stack_seeds(n_row, n_col, seeds_row, seeds_col)\n adjacency = bipartite2undirected(biadjacency)\n Diffusion.fit(self, adjacency, seeds, init)\n # average over 2 successive iterations because the graph is bipartite\n diffusion = DirichletOperator(adjacency, self.damping_factor)\n self.scores_ += diffusion.dot(self.scores_)\n self.scores_ /= 2\n self._split_vars(n_row)\n\n return self\n\n\nclass Dirichlet(BaseRanking, VerboseMixin):\n \"\"\"Ranking by the Dirichlet problem (heat diffusion with boundary constraints).\n\n * Graphs\n * Digraphs\n\n Parameters\n ----------\n n_iter : int\n If positive, number of steps of the diffusion in discrete time.\n Otherwise, solve the Dirichlet problem by the bi-conjugate gradient stabilized method.\n damping_factor : float (optional)\n Damping factor (default value = 1).\n verbose : bool\n Verbose mode.\n\n Attributes\n ----------\n scores_ : np.ndarray\n Score of each node (= temperature).\n\n Example\n -------\n >>> from sknetwork.ranking import Dirichlet\n >>> from sknetwork.data import house\n >>> dirichlet = Dirichlet()\n >>> adjacency = house()\n >>> seeds = {0: 1, 2: 0}\n >>> scores = dirichlet.fit_transform(adjacency, seeds)\n >>> np.round(scores, 2)\n array([1. , 0.54, 0. , 0.31, 0.62])\n\n References\n ----------\n Chung, F. (2007). The heat kernel as the pagerank of a graph. Proceedings of the National Academy of Sciences.\n \"\"\"\n def __init__(self, n_iter: int = 10, damping_factor: Optional[float] = None, verbose: bool = False):\n super(Dirichlet, self).__init__()\n VerboseMixin.__init__(self, verbose)\n\n self.n_iter = n_iter\n if damping_factor is None:\n damping_factor = 1.\n check_is_proba(damping_factor, 'Damping factor')\n self.damping_factor = damping_factor\n\n def fit(self, adjacency: Union[sparse.csr_matrix, np.ndarray],\n seeds: Optional[Union[dict, np.ndarray]] = None, init: Optional[float] = None) -> 'Dirichlet':\n \"\"\"Compute the solution to the Dirichlet problem (temperatures at equilibrium).\n\n Parameters\n ----------\n adjacency :\n Adjacency matrix of the graph.\n seeds :\n Temperatures of seed nodes (dictionary or vector). Negative temperatures ignored.\n init :\n Temperature of non-seed nodes in initial state.\n If ``None``, use the average temperature of seed nodes (default).\n\n Returns\n -------\n self: :class:`Dirichlet`\n \"\"\"\n adjacency = check_format(adjacency)\n check_square(adjacency)\n n: int = adjacency.shape[0]\n if seeds is None:\n self.scores_ = np.ones(n) / n\n return self\n\n seeds = check_seeds(seeds, n)\n border = (seeds >= 0)\n\n if init is None:\n scores = seeds[border].mean() * np.ones(n)\n else:\n scores = init * np.ones(n)\n scores[border] = seeds[border]\n\n if self.n_iter > 0:\n diffusion = DirichletOperator(adjacency, self.damping_factor, border)\n for i in range(self.n_iter):\n scores = diffusion.dot(scores)\n scores[border] = seeds[border]\n else:\n a = DeltaDirichletOperator(adjacency, self.damping_factor, border)\n b = -seeds\n b[~border] = 0\n scores, info = bicgstab(a, b, atol=0., x0=scores)\n self._scipy_solver_info(info)\n\n tmin, tmax = seeds[border].min(), seeds[border].max()\n self.scores_ = np.clip(scores, tmin, tmax)\n\n return self\n\n\nclass BiDirichlet(Dirichlet, BaseBiRanking):\n \"\"\"Ranking by the Dirichlet problem in bipartite graphs (heat diffusion with boundary constraints).\n\n * Bigraphs\n\n Parameters\n ----------\n n_iter : int\n If positive, number of steps of the diffusion in discrete time.\n Otherwise, solve the Dirichlet problem by the bi-conjugate gradient stabilized method.\n damping_factor : float (optional)\n Damping factor (default value = 1).\n verbose : bool\n Verbose mode.\n\n Attributes\n ----------\n scores_ : np.ndarray\n Scores of rows.\n scores_row_ : np.ndarray\n Scores of rows (copy of **scores_**).\n scores_col_ : np.ndarray\n Scores of columns.\n\n Example\n -------\n >>> from sknetwork.ranking import BiDirichlet\n >>> from sknetwork.data import star_wars\n >>> bidirichlet = BiDirichlet()\n >>> biadjacency = star_wars()\n >>> scores = bidirichlet.fit_transform(biadjacency, seeds_row = {0: 1, 2: 0})\n >>> np.round(scores, 2)\n array([1. , 0.5 , 0. , 0.29])\n \"\"\"\n\n def __init__(self, n_iter: int = 10, damping_factor: Optional[float] = None, verbose: bool = False):\n super(BiDirichlet, self).__init__(n_iter, damping_factor, verbose)\n\n def fit(self, biadjacency: Union[sparse.csr_matrix, np.ndarray],\n seeds_row: Optional[Union[dict, np.ndarray]] = None, seeds_col: Optional[Union[dict, np.ndarray]] = None,\n init: Optional[float] = None) -> 'BiDirichlet':\n \"\"\"Compute the solution to the Dirichlet problem (temperatures at equilibrium).\n\n Parameters\n ----------\n biadjacency :\n Biadjacency matrix, shape (n_row, n_col).\n seeds_row :\n Temperatures of seed rows (dictionary or vector of size n_row). Negative temperatures ignored.\n seeds_col :\n Temperatures of seed columns (dictionary or vector of size n_col). Negative temperatures ignored.\n init :\n Temperature of non-seed nodes in initial state.\n If ``None``, use the average temperature of seed nodes (default).\n\n Returns\n -------\n self: :class:`BiDirichlet`\n \"\"\"\n biadjacency = check_format(biadjacency)\n n_row, n_col = biadjacency.shape\n seeds = stack_seeds(n_row, n_col, seeds_row, seeds_col)\n adjacency = bipartite2undirected(biadjacency)\n Dirichlet.fit(self, adjacency, seeds, init)\n self._split_vars(n_row)\n\n return self\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Apr, 2019\n@author: Nathan de Lara <[email protected]>\n\"\"\"\nfrom typing import Optional, Union\n\nimport numpy as np\n\nfrom sknetwork.utils.check import check_seeds\n\n\ndef stack_seeds(n_row: int, n_col: int, seeds_row: Optional[Union[np.ndarray, dict]],\n seeds_col: Optional[Union[np.ndarray, dict]] = None, default_value: float = -1) -> np.ndarray:\n \"\"\"Process seeds for rows and columns and stack the results into a single vector.\"\"\"\n if seeds_row is None and seeds_col is None:\n seeds_row = np.ones(n_row)\n seeds_col = default_value * np.ones(n_col)\n elif seeds_row is None:\n seeds_row = default_value * np.ones(n_row)\n elif seeds_col is None:\n seeds_col = default_value * np.ones(n_col)\n seeds_row = check_seeds(seeds_row, n_row)\n seeds_col = check_seeds(seeds_col, n_col)\n return np.hstack((seeds_row, seeds_col))\n\n\ndef seeds2probs(n: int, seeds: Union[dict, np.ndarray] = None) -> np.ndarray:\n \"\"\"Transform seeds into probability vector.\n\n Parameters\n ----------\n n : int\n Total number of samples.\n seeds :\n If ``None``, the uniform distribution is used.\n Otherwise, a non-negative, non-zero vector or a dictionary must be provided.\n\n Returns\n -------\n probs: np.ndarray\n A probability vector.\n \"\"\"\n if seeds is None:\n return np.ones(n) / n\n else:\n seeds = check_seeds(seeds, n)\n probs = np.zeros_like(seeds, dtype=float)\n ix = (seeds > 0)\n probs[ix] = seeds[ix]\n w: float = probs.sum()\n if w > 0:\n return probs / w\n else:\n raise ValueError('At least one seeds must have a positive probability.')\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on May, 2020\nNathan de Lara <[email protected]>\n\"\"\"\nimport numpy as np\nfrom scipy import sparse\n\nfrom sknetwork.utils.format import directed2undirected\n\n\ndef edgelist2adjacency(edgelist: list, undirected: bool = False) -> sparse.csr_matrix:\n \"\"\"Build an adjacency matrix from a list of edges.\n\n Parameters\n ----------\n edgelist : list\n List of edges as pairs (i, j) or triplets (i, j, w) for weighted edges.\n undirected : bool\n If ``True``, return a symmetric adjacency.\n\n Returns\n -------\n adjacency : sparse.csr_matrix\n\n Examples\n --------\n >>> edgelist = [(0, 1), (1, 2), (2, 0)]\n >>> adjacency = edgelist2adjacency(edgelist)\n >>> adjacency.shape, adjacency.nnz\n ((3, 3), 3)\n >>> adjacency = edgelist2adjacency(edgelist, undirected=True)\n >>> adjacency.shape, adjacency.nnz\n ((3, 3), 6)\n >>> weighted_edgelist = [(0, 1, 0.2), (1, 2, 4), (2, 0, 1.3)]\n >>> adjacency = edgelist2adjacency(weighted_edgelist)\n >>> adjacency.dtype\n dtype('float64')\n \"\"\"\n edges = np.array(edgelist)\n row, col = edges[:, 0].astype(np.int32), edges[:, 1].astype(np.int32)\n n = max(row.max(), col.max()) + 1\n if edges.shape[1] > 2:\n data = edges[:, 2]\n else:\n data = np.ones_like(row, dtype=bool)\n adjacency = sparse.csr_matrix((data, (row, col)), shape=(n, n))\n if undirected:\n adjacency = directed2undirected(adjacency)\n return adjacency\n\n\ndef edgelist2biadjacency(edgelist: list) -> sparse.csr_matrix:\n \"\"\"Build a biadjacency matrix from a list of edges.\n\n Parameters\n ----------\n edgelist : list\n List of edges as pairs (i, j) or triplets (i, j, w) for weighted edges.\n\n Returns\n -------\n biadjacency : sparse.csr_matrix\n\n Examples\n --------\n >>> edgelist = [(0, 0), (1, 0), (1, 1), (2, 1)]\n >>> biadjacency = edgelist2biadjacency(edgelist)\n >>> biadjacency.shape, biadjacency.nnz\n ((3, 2), 4)\n >>> weighted_edgelist = [(0, 0, 0.5), (1, 0, 1), (1, 1, 1), (2, 1, 2)]\n >>> biadjacency = edgelist2biadjacency(weighted_edgelist)\n >>> biadjacency.dtype\n dtype('float64')\n \"\"\"\n edges = np.array(edgelist)\n row, col = edges[:, 0].astype(np.int32), edges[:, 1].astype(np.int32)\n n_row, n_col = row.max() + 1, col.max() + 1\n if edges.shape[1] > 2:\n data = edges[:, 2]\n else:\n data = np.ones_like(row, dtype=bool)\n biadjacency = sparse.csr_matrix((data, (row, col)), shape=(n_row, n_col))\n return biadjacency\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on April 2020\n@author: Thomas Bonald <[email protected]>\n\"\"\"\nfrom typing import Iterable, Optional\n\nimport numpy as np\n\nfrom sknetwork.hierarchy.postprocess import cut_straight\nfrom sknetwork.visualization.colors import STANDARD_COLORS\n\n\ndef get_index(dendrogram, reorder=True):\n \"\"\"Index nodes for pretty dendrogram.\"\"\"\n n = dendrogram.shape[0] + 1\n tree = {i: [i] for i in range(n)}\n for t in range(n - 1):\n i = int(dendrogram[t, 0])\n j = int(dendrogram[t, 1])\n left: list = tree.pop(i)\n right: list = tree.pop(j)\n if reorder and len(left) < len(right):\n tree[n + t] = right + left\n else:\n tree[n + t] = left + right\n return list(tree.values())[0]\n\n\ndef svg_dendrogram_top(dendrogram, names, width, height, margin, margin_text, scale, line_width, n_clusters,\n color, colors, font_size, reorder, rotate_names):\n \"\"\"Dendrogram as SVG image with root on top.\"\"\"\n\n # scaling\n height *= scale\n width *= scale\n\n # positioning\n labels = cut_straight(dendrogram, n_clusters, return_dendrogram=False)\n index = get_index(dendrogram, reorder)\n n = len(index)\n unit_height = height / dendrogram[-1, 2]\n unit_width = width / n\n height_basis = margin + height\n position = {index[i]: (margin + i * unit_width, height_basis) for i in range(n)}\n label = {i: l for i, l in enumerate(labels)}\n width += 2 * margin\n height += 2 * margin\n if names is not None:\n text_length = np.max(np.array([len(str(name)) for name in names]))\n height += text_length * font_size * .5 + margin_text\n\n svg = \"\"\"<svg width=\"{}\" height=\"{}\" xmlns=\"http://www.w3.org/2000/svg\">\"\"\".format(width, height)\n\n # text\n if names is not None:\n for i in range(n):\n x, y = position[i]\n x -= margin_text\n y += margin_text\n text = str(names[i]).replace('&', ' ')\n if rotate_names:\n svg += \"\"\"<text x=\"{}\" y=\"{}\" transform=\"rotate(60, {}, {})\" font-size=\"{}\">{}</text>\"\"\" \\\n .format(x, y, x, y, font_size, text)\n else:\n y += margin_text\n svg += \"\"\"<text x=\"{}\" y=\"{}\" font-size=\"{}\">{}</text>\"\"\" \\\n .format(x, y, font_size, text)\n\n # tree\n for t in range(n - 1):\n i = int(dendrogram[t, 0])\n j = int(dendrogram[t, 1])\n x1, y1 = position.pop(i)\n x2, y2 = position.pop(j)\n l1 = label.pop(i)\n l2 = label.pop(j)\n if l1 == l2:\n line_color = colors[l1 % len(colors)]\n else:\n line_color = color\n x = .5 * (x1 + x2)\n y = height_basis - dendrogram[t, 2] * unit_height\n svg += \"\"\"<path stroke-width=\"{}\" stroke=\"{}\" d=\"M {} {} {} {}\" />\"\"\"\\\n .format(line_width, line_color, x1, y1, x1, y)\n svg += \"\"\"<path stroke-width=\"{}\" stroke=\"{}\" d=\"M {} {} {} {}\" />\"\"\"\\\n .format(line_width, line_color, x2, y2, x2, y)\n svg += \"\"\"<path stroke-width=\"{}\" stroke=\"{}\" d=\"M {} {} {} {}\" />\"\"\"\\\n .format(line_width, line_color, x1, y, x2, y)\n position[n + t] = (x, y)\n label[n + t] = l1\n\n svg += '</svg>'\n return svg\n\n\ndef svg_dendrogram_left(dendrogram, names, width, height, margin, margin_text, scale, line_width, n_clusters,\n color, colors, font_size, reorder):\n \"\"\"Dendrogram as SVG image with root on left side.\"\"\"\n\n # scaling\n height *= scale\n width *= scale\n\n # positioning\n labels = cut_straight(dendrogram, n_clusters, return_dendrogram=False)\n index = get_index(dendrogram, reorder)\n n = len(index)\n unit_height = height / n\n unit_width = width / dendrogram[-1, 2]\n width_basis = width + margin\n position = {index[i]: (width_basis, margin + i * unit_height) for i in range(n)}\n label = {i: l for i, l in enumerate(labels)}\n width += 2 * margin\n height += 2 * margin\n if names is not None:\n text_length = np.max(np.array([len(str(name)) for name in names]))\n width += text_length * font_size * .5 + margin_text\n\n svg = \"\"\"<svg width=\"{}\" height=\"{}\" xmlns=\"http://www.w3.org/2000/svg\">\"\"\".format(width, height)\n\n # text\n if names is not None:\n for i in range(n):\n x, y = position[i]\n x += margin_text\n y += unit_height / 3\n text = str(names[i]).replace('&', ' ')\n svg += \"\"\"<text x=\"{}\" y=\"{}\" font-size=\"{}\">{}</text>\"\"\" \\\n .format(x, y, font_size, text)\n\n # tree\n for t in range(n - 1):\n i = int(dendrogram[t, 0])\n j = int(dendrogram[t, 1])\n x1, y1 = position.pop(i)\n x2, y2 = position.pop(j)\n l1 = label.pop(i)\n l2 = label.pop(j)\n if l1 == l2:\n line_color = colors[l1 % len(colors)]\n else:\n line_color = color\n y = .5 * (y1 + y2)\n x = width_basis - dendrogram[t, 2] * unit_width\n svg += \"\"\"<path stroke-width=\"{}\" stroke=\"{}\" d=\"M {} {} {} {}\" />\"\"\"\\\n .format(line_width, line_color, x1, y1, x, y1)\n svg += \"\"\"<path stroke-width=\"{}\" stroke=\"{}\" d=\"M {} {} {} {}\" />\"\"\"\\\n .format(line_width, line_color, x2, y2, x, y2)\n svg += \"\"\"<path stroke-width=\"{}\" stroke=\"{}\" d=\"M {} {} {} {}\" />\"\"\"\\\n .format(line_width, line_color, x, y1, x, y2)\n position[n + t] = (x, y)\n label[n + t] = l1\n\n svg += '</svg>'\n\n return svg\n\n\ndef svg_dendrogram(dendrogram: np.ndarray, names: Optional[np.ndarray] = None, rotate: bool = False, width: float = 400,\n height: float = 300, margin: float = 10, margin_text: float = 5, scale: float = 1,\n line_width: float = 2, n_clusters: int = 2, color: str = 'black', colors: Optional[Iterable] = None,\n font_size: int = 12, reorder: bool = False, rotate_names: bool = True,\n filename: Optional[str] = None):\n \"\"\"Return SVG image of a dendrogram.\n\n Parameters\n ----------\n dendrogram :\n Dendrogram to display.\n names :\n Names of leaves.\n rotate :\n If ``True``, rotate the tree so that the root is on the left.\n width :\n Width of the image (margins excluded).\n height :\n Height of the image (margins excluded).\n margin :\n Margin.\n margin_text :\n Margin between leaves and their names, if any.\n scale :\n Scaling factor.\n line_width :\n Line width.\n n_clusters :\n Number of coloured clusters to display.\n color :\n Default SVG color for the dendrogram.\n colors :\n SVG colors of the clusters of the dendrogram (optional).\n font_size :\n Font size.\n reorder :\n If ``True``, reorder leaves so that left subtree has more leaves than right subtree.\n rotate_names :\n If ``True``, rotate names of leaves (only valid if **rotate** is ``False``).\n filename :\n Filename for saving image (optional).\n\n Example\n -------\n >>> dendrogram = np.array([[0, 1, 1, 2], [2, 3, 2, 3]])\n >>> from sknetwork.visualization import svg_dendrogram\n >>> image = svg_dendrogram(dendrogram)\n >>> image[1:4]\n 'svg'\n \"\"\"\n if colors is None:\n colors = STANDARD_COLORS\n elif isinstance(colors, dict):\n colors = np.array(list(colors.values()))\n elif isinstance(colors, list):\n colors = np.array(colors)\n\n if rotate:\n svg = svg_dendrogram_left(dendrogram, names, width, height, margin, margin_text, scale, line_width, n_clusters,\n color, colors, font_size, reorder)\n else:\n svg = svg_dendrogram_top(dendrogram, names, width, height, margin, margin_text, scale, line_width, n_clusters,\n color, colors, font_size, reorder, rotate_names)\n\n if filename is not None:\n with open(filename + '.svg', 'w') as f:\n f.write(svg)\n\n return svg\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 31 17:16:22 2018\n@author: Nathan de Lara <[email protected]>\n@author: Thomas Bonald <[email protected]>\n\"\"\"\nfrom typing import Union\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom sknetwork.embedding.base import BaseBiEmbedding\nfrom sknetwork.linalg import SVDSolver, HalkoSVD, LanczosSVD, auto_solver, safe_sparse_dot, diag_pinv, normalize, \\\n RegularizedAdjacency\nfrom sknetwork.utils.check import check_format, check_adjacency_vector, check_nonnegative, check_n_components\n\n\nclass GSVD(BaseBiEmbedding):\n \"\"\"Graph embedding by Generalized Singular Value Decomposition of the adjacency or biadjacency matrix :math:`A`.\n This is equivalent to the Singular Value Decomposition of the matrix :math:`D_1^{- \\\\alpha_1}AD_2^{- \\\\alpha_2}`\n where :math:`D_1, D_2` are the diagonal matrices of row weights and columns weights, respectively, and\n :math:`\\\\alpha_1, \\\\alpha_2` are parameters.\n\n * Graphs\n * Digraphs\n * Bigraphs\n\n Parameters\n -----------\n n_components : int\n Dimension of the embedding.\n regularization : ``None`` or float (default = ``None``)\n Implicitly add edges of given weight between all pairs of nodes.\n relative_regularization : bool (default = ``True``)\n If ``True``, consider the regularization as relative to the total weight of the graph.\n factor_row : float (default = 0.5)\n Power factor :math:`\\\\alpha_1` applied to the diagonal matrix of row weights.\n factor_col : float (default = 0.5)\n Power factor :math:`\\\\alpha_2` applied to the diagonal matrix of column weights.\n factor_singular : float (default = 0.)\n Parameter :math:`\\\\alpha` applied to the singular values on right singular vectors.\n The embedding of rows and columns are respectively :math:`D_1^{- \\\\alpha_1}U \\\\Sigma^{1-\\\\alpha}` and\n :math:`D_2^{- \\\\alpha_2}V \\\\Sigma^\\\\alpha` where:\n\n * :math:`U` is the matrix of left singular vectors, shape (n_row, n_components)\n * :math:`V` is the matrix of right singular vectors, shape (n_col, n_components)\n * :math:`\\\\Sigma` is the diagonal matrix of singular values, shape (n_components, n_components)\n\n normalized : bool (default = ``True``)\n If ``True``, normalized the embedding so that each vector has norm 1 in the embedding space, i.e.,\n each vector lies on the unit sphere.\n solver : ``'auto'``, ``'halko'``, ``'lanczos'`` or :class:`SVDSolver`\n Which singular value solver to use.\n\n * ``'auto'``: call the auto_solver function.\n * ``'halko'``: randomized method, fast but less accurate than ``'lanczos'`` for ill-conditioned matrices.\n * ``'lanczos'``: power-iteration based method.\n * :class:`SVDSolver`: custom solver.\n\n Attributes\n ----------\n embedding_ : np.ndarray, shape = (n1, n_components)\n Embedding of the rows.\n embedding_row_ : np.ndarray, shape = (n1, n_components)\n Embedding of the rows (copy of **embedding_**).\n embedding_col_ : np.ndarray, shape = (n2, n_components)\n Embedding of the columns.\n singular_values_ : np.ndarray, shape = (n_components)\n Singular values.\n singular_vectors_left_ : np.ndarray, shape = (n_row, n_components)\n Left singular vectors.\n singular_vectors_right_ : np.ndarray, shape = (n_col, n_components)\n Right singular vectors.\n regularization_ : ``None`` or float\n Regularization factor added to all pairs of nodes.\n weights_col_ : np.ndarray, shape = (n2)\n Weights applied to columns.\n\n Example\n -------\n >>> from sknetwork.embedding import GSVD\n >>> from sknetwork.data import karate_club\n >>> gsvd = GSVD()\n >>> adjacency = karate_club()\n >>> embedding = gsvd.fit_transform(adjacency)\n >>> embedding.shape\n (34, 2)\n\n References\n ----------\n Abdi, H. (2007).\n `Singular value decomposition (SVD) and generalized singular value decomposition.\n <https://www.cs.cornell.edu/cv/ResearchPDF/Generalizing%20The%20Singular%20Value%20Decomposition.pdf>`_\n Encyclopedia of measurement and statistics, 907-912.\n \"\"\"\n def __init__(self, n_components=2, regularization: Union[None, float] = None, relative_regularization: bool = True,\n factor_row: float = 0.5, factor_col: float = 0.5, factor_singular: float = 0., normalized: bool = True,\n solver: Union[str, SVDSolver] = 'auto'):\n super(GSVD, self).__init__()\n\n self.n_components = n_components\n if regularization == 0:\n self.regularization = None\n else:\n self.regularization = regularization\n self.relative_regularization = relative_regularization\n self.factor_row = factor_row\n self.factor_col = factor_col\n self.factor_singular = factor_singular\n self.normalized = normalized\n if solver == 'halko':\n self.solver: SVDSolver = HalkoSVD()\n elif solver == 'lanczos':\n self.solver: SVDSolver = LanczosSVD()\n else:\n self.solver = solver\n\n self.singular_values_ = None\n self.singular_vectors_left_ = None\n self.singular_vectors_right_ = None\n self.regularization_ = None\n self.weights_col_ = None\n\n def fit(self, adjacency: Union[sparse.csr_matrix, np.ndarray]) -> 'GSVD':\n \"\"\"Compute the GSVD of the adjacency or biadjacency matrix.\n\n Parameters\n ----------\n adjacency :\n Adjacency or biadjacency matrix of the graph.\n\n Returns\n -------\n self: :class:`GSVD`\n \"\"\"\n adjacency = check_format(adjacency).asfptype()\n n_row, n_col = adjacency.shape\n n_components = check_n_components(self.n_components, min(n_row, n_col) - 1)\n\n if self.solver == 'auto':\n solver = auto_solver(adjacency.nnz)\n if solver == 'lanczos':\n self.solver: SVDSolver = LanczosSVD()\n else: # pragma: no cover\n self.solver: SVDSolver = HalkoSVD()\n\n regularization = self.regularization\n if regularization:\n if self.relative_regularization:\n regularization = regularization * np.sum(adjacency.data) / (n_row * n_col)\n adjacency_reg = RegularizedAdjacency(adjacency, regularization)\n else:\n adjacency_reg = adjacency\n\n weights_row = adjacency_reg.dot(np.ones(n_col))\n weights_col = adjacency_reg.T.dot(np.ones(n_row))\n diag_row = diag_pinv(np.power(weights_row, self.factor_row))\n diag_col = diag_pinv(np.power(weights_col, self.factor_col))\n self.solver.fit(safe_sparse_dot(diag_row, safe_sparse_dot(adjacency_reg, diag_col)), n_components)\n\n singular_values = self.solver.singular_values_\n index = np.argsort(-singular_values)\n singular_values = singular_values[index]\n singular_vectors_left = self.solver.singular_vectors_left_[:, index]\n singular_vectors_right = self.solver.singular_vectors_right_[:, index]\n singular_left_diag = sparse.diags(np.power(singular_values, 1 - self.factor_singular))\n singular_right_diag = sparse.diags(np.power(singular_values, self.factor_singular))\n\n embedding_row = diag_row.dot(singular_vectors_left)\n embedding_col = diag_col.dot(singular_vectors_right)\n embedding_row = singular_left_diag.dot(embedding_row.T).T\n embedding_col = singular_right_diag.dot(embedding_col.T).T\n\n if self.normalized:\n embedding_row = normalize(embedding_row, p=2)\n embedding_col = normalize(embedding_col, p=2)\n\n self.embedding_row_ = embedding_row\n self.embedding_col_ = embedding_col\n self.embedding_ = embedding_row\n self.singular_values_ = singular_values\n self.singular_vectors_left_ = singular_vectors_left\n self.singular_vectors_right_ = singular_vectors_right\n self.regularization_ = regularization\n self.weights_col_ = weights_col\n\n return self\n\n @staticmethod\n def _check_adj_vector(adjacency_vectors):\n check_nonnegative(adjacency_vectors)\n\n def predict(self, adjacency_vectors: Union[sparse.csr_matrix, np.ndarray]) -> np.ndarray:\n \"\"\"Predict the embedding of new rows, defined by their adjacency vectors.\n\n Parameters\n ----------\n adjacency_vectors :\n Adjacency vectors of nodes.\n Array of shape (n_col,) (single vector) or (n_vectors, n_col)\n\n Returns\n -------\n embedding_vectors : np.ndarray\n Embedding of the nodes.\n \"\"\"\n self._check_fitted()\n singular_vectors_right = self.singular_vectors_right_\n singular_values = self.singular_values_\n\n n_row, _ = self.embedding_row_.shape\n n_col, _ = self.embedding_col_.shape\n\n adjacency_vectors = check_adjacency_vector(adjacency_vectors, n_col)\n self._check_adj_vector(adjacency_vectors)\n\n # regularization\n if self.regularization_:\n adjacency_vectors = RegularizedAdjacency(adjacency_vectors, self.regularization_)\n\n # weighting\n weights_row = adjacency_vectors.dot(np.ones(n_col))\n diag_row = diag_pinv(np.power(weights_row, self.factor_row))\n diag_col = diag_pinv(np.power(self.weights_col_, self.factor_col))\n adjacency_vectors = safe_sparse_dot(diag_row, safe_sparse_dot(adjacency_vectors, diag_col))\n\n # projection in the embedding space\n averaging = adjacency_vectors\n embedding_vectors = diag_row.dot(averaging.dot(singular_vectors_right))\n\n # scaling\n embedding_vectors /= np.power(singular_values, self.factor_singular)\n\n if self.normalized:\n embedding_vectors = normalize(embedding_vectors, p=2)\n\n if embedding_vectors.shape[0] == 1:\n embedding_vectors = embedding_vectors.ravel()\n\n return embedding_vectors\n\n\nclass SVD(GSVD):\n \"\"\"Graph embedding by Singular Value Decomposition of the adjacency or biadjacency matrix.\n\n * Graphs\n * Digraphs\n * Bigraphs\n\n Parameters\n ----------\n n_components : int\n Dimension of the embedding.\n regularization : ``None`` or float (default = ``None``)\n Implicitly add edges of given weight between all pairs of nodes.\n relative_regularization : bool (default = ``True``)\n If ``True``, consider the regularization as relative to the total weight of the graph.\n factor_singular : float (default = 0.)\n Power factor :math:`\\\\alpha` applied to the singular values on right singular vectors.\n The embedding of rows and columns are respectively :math:`U \\\\Sigma^{1-\\\\alpha}` and\n :math:`V \\\\Sigma^\\\\alpha` where:\n\n * :math:`U` is the matrix of left singular vectors, shape (n_row, n_components)\n * :math:`V` is the matrix of right singular vectors, shape (n_col, n_components)\n * :math:`\\\\Sigma` is the diagonal matrix of singular values, shape (n_components, n_components)\n\n normalized : bool (default = ``False``)\n If ``True``, normalized the embedding so that each vector has norm 1 in the embedding space, i.e.,\n each vector lies on the unit sphere.\n solver : ``'auto'``, ``'halko'``, ``'lanczos'`` or :class:`SVDSolver`\n Which singular value solver to use.\n\n * ``'auto'``: call the auto_solver function.\n * ``'halko'``: randomized method, fast but less accurate than ``'lanczos'`` for ill-conditioned matrices.\n * ``'lanczos'``: power-iteration based method.\n * :class:`SVDSolver`: custom solver.\n\n Attributes\n ----------\n embedding_ : np.ndarray, shape = (n_row, n_components)\n Embedding of the rows.\n embedding_row_ : np.ndarray, shape = (n_row, n_components)\n Embedding of the rows (copy of **embedding_**).\n embedding_col_ : np.ndarray, shape = (n_col, n_components)\n Embedding of the columns.\n singular_values_ : np.ndarray, shape = (n_components)\n Singular values.\n singular_vectors_left_ : np.ndarray, shape = (n_row, n_components)\n Left singular vectors.\n singular_vectors_right_ : np.ndarray, shape = (n_col, n_components)\n Right singular vectors.\n regularization_ : ``None`` or float\n Regularization factor added to all pairs of nodes.\n\n Example\n -------\n >>> from sknetwork.embedding import SVD\n >>> from sknetwork.data import karate_club\n >>> svd = SVD()\n >>> adjacency = karate_club()\n >>> embedding = svd.fit_transform(adjacency)\n >>> embedding.shape\n (34, 2)\n\n References\n ----------\n Abdi, H. (2007).\n `Singular value decomposition (SVD) and generalized singular value decomposition.\n <https://www.cs.cornell.edu/cv/ResearchPDF/Generalizing%20The%20Singular%20Value%20Decomposition.pdf>`_\n Encyclopedia of measurement and statistics, 907-912.\n \"\"\"\n def __init__(self, n_components=2, regularization: Union[None, float] = None, relative_regularization: bool = True,\n factor_singular: float = 0., normalized: bool = False, solver: Union[str, SVDSolver] = 'auto'):\n super(SVD, self).__init__(n_components=n_components, regularization=regularization,\n relative_regularization=relative_regularization, factor_singular=factor_singular,\n factor_row=0., factor_col=0., normalized=normalized, solver=solver)\n\n @staticmethod\n def _check_adj_vector(adjacency_vectors: np.ndarray):\n return\n"
] | [
[
"scipy.sparse.linalg.bicgstab",
"numpy.clip",
"scipy.sparse.diags",
"numpy.ones",
"numpy.zeros"
],
[
"numpy.hstack",
"numpy.zeros_like",
"numpy.ones"
],
[
"numpy.array",
"numpy.ones_like",
"scipy.sparse.csr_matrix"
],
[
"numpy.array"
],
[
"numpy.argsort",
"numpy.ones",
"numpy.sum",
"numpy.power"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JackKelly/neuralnilm_prototype | [
"2119292e7d5c8a137797ad3c9abf9f37e7f749af",
"2119292e7d5c8a137797ad3c9abf9f37e7f749af",
"2119292e7d5c8a137797ad3c9abf9f37e7f749af",
"2119292e7d5c8a137797ad3c9abf9f37e7f749af",
"2119292e7d5c8a137797ad3c9abf9f37e7f749af",
"2119292e7d5c8a137797ad3c9abf9f37e7f749af",
"2119292e7d5c8a137797ad3c9abf9f37e7f749af",
"2119292e7d5c8a137797ad3c9abf9f37e7f749af",
"2119292e7d5c8a137797ad3c9abf9f37e7f749af",
"2119292e7d5c8a137797ad3c9abf9f37e7f749af",
"2119292e7d5c8a137797ad3c9abf9f37e7f749af"
] | [
"scripts/e179.py",
"scripts/e318.py",
"scripts/e303.py",
"scripts/e224.py",
"scripts/e563.py",
"scripts/disag_545c.py",
"scripts/e271.py",
"scripts/e314.py",
"scripts/e131.py",
"scripts/e125.py",
"scripts/e512.py"
] | [
"from __future__ import print_function, division\nimport matplotlib\nmatplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!\nfrom neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer\nfrom lasagne.nonlinearities import sigmoid, rectify\nfrom lasagne.objectives import crossentropy, mse\nfrom lasagne.init import Uniform, Normal\nfrom lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer\nfrom neuralnilm.updates import nesterov_momentum\nfrom functools import partial\nimport os\nfrom neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff\nfrom neuralnilm.experiment import run_experiment\nfrom neuralnilm.net import TrainingError\nimport __main__\n\nNAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]\nPATH = \"/homes/dk3810/workspace/python/neuralnilm/figures\"\nSAVE_PLOT_INTERVAL = 250\nGRADIENT_STEPS = 100\n\n\"\"\"\ne103\nDiscovered that bottom layer is hardly changing. So will try\njust a single lstm layer\n\ne104\nstandard init\nlower learning rate\n\ne106\nlower learning rate to 0.001\n\ne108\nis e107 but with batch size of 5\n\ne109\nNormal(1) for BLSTM\n\ne110\n* Back to Uniform(5) for BLSTM\n* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f\nRESULTS: Seems to run fine again!\n\ne111\n* Try with nntools head\n* peepholes=False\nRESULTS: appears to be working well. Haven't seen a NaN, \neven with training rate of 0.1\n\ne112\n* n_seq_per_batch = 50\n\ne114\n* Trying looking at layer by layer training again.\n* Start with single BLSTM layer\n\ne115\n* Learning rate = 1\n\ne116\n* Standard inits\n\ne117\n* Uniform(1) init\n\ne119\n* Learning rate 10\n# Result: didn't work well!\n\ne120\n* init: Normal(1)\n* not as good as Uniform(5)\n\ne121\n* Uniform(25)\n\ne122\n* Just 10 cells\n* Uniform(5)\n\ne125\n* Pre-train lower layers\n\ne128\n* Add back all 5 appliances\n* Seq length 1500\n* skip_prob = 0.7\n\ne129\n* max_input_power = None\n* 2nd layer has Uniform(5)\n* pre-train bottom layer for 2000 epochs\n* add third layer at 4000 epochs\n\ne131\n\ne138\n* Trying to replicate e82 and then break it ;)\n\ne140\ndiff\n\ne141\nconv1D layer has Uniform(1), as does 2nd BLSTM layer\n\ne142\ndiff AND power\n\ne144\ndiff and power and max power is 5900\n\ne145\nUniform(25) for first layer\n\ne146\ngradient clip and use peepholes\n\ne147\n* try again with new code\n\ne148\n* learning rate 0.1\n\ne150\n* Same as e149 but without peepholes and using BLSTM not BBLSTM\n\ne151\n* Max pooling\n\n171\nlower learning rate\n\n172\neven lower learning rate\n\n173\nslightly higher learning rate!\n\n175\nsame as 174 but with skip prob = 0, and LSTM not BLSTM, and only 4000 epochs\n\n176\nnew cost function\n\n177\nanother new cost func (this one avoids NaNs)\nskip prob 0.7\n10x higher learning rate\n\n178\nrefactored cost func (functionally equiv to 177)\n0.1x learning rate\n\"\"\"\n\n\n# def scaled_cost(x, t):\n# raw_cost = (x - t) ** 2\n# energy_per_seq = t.sum(axis=1)\n# energy_per_batch = energy_per_seq.sum(axis=1)\n# energy_per_batch = energy_per_batch.reshape((-1, 1))\n# normaliser = energy_per_seq / energy_per_batch\n# cost = raw_cost.mean(axis=1) * (1 - normaliser)\n# return cost.mean()\n\nfrom theano.ifelse import ifelse\nimport theano.tensor as T\n\nTHRESHOLD = 0\ndef scaled_cost(x, t):\n sq_error = (x - t) ** 2\n def mask_and_mean_sq_error(mask):\n masked_sq_error = sq_error[mask.nonzero()]\n mean = masked_sq_error.mean()\n mean = ifelse(T.isnan(mean), 0.0, mean)\n return mean\n above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)\n below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)\n return (above_thresh_mean + below_thresh_mean) / 2.0\n\n\ndef exp_a(name):\n source = RealApplianceSource(\n filename='/data/dk3810/ukdale.h5',\n appliances=[\n ['fridge freezer', 'fridge', 'freezer'], \n 'hair straighteners', \n 'television',\n 'dish washer',\n ['washer dryer', 'washing machine']\n ],\n max_appliance_powers=None,#[200, 100, 200, 2500, 2400],\n on_power_thresholds=[5, 5, 5, 5, 5],\n max_input_power=5900,\n min_on_durations=[60, 60, 60, 1800, 1800],\n min_off_durations=[12, 12, 12, 1800, 600],\n window=(\"2013-06-01\", \"2014-07-01\"),\n seq_length=1494,\n output_one_appliance=False,\n boolean_targets=False,\n train_buildings=[1],\n validation_buildings=[1], \n skip_probability=0.7,\n n_seq_per_batch=25,\n subsample_target=9,\n input_padding=8,\n include_diff=True\n )\n\n net = Net(\n experiment_name=name,\n source=source,\n save_plot_interval=250,\n loss_function=scaled_cost,\n updates=partial(nesterov_momentum, learning_rate=.00001, clip_range=(-1, 1)),\n layers_config=[\n {\n 'type': LSTMLayer,\n 'num_units': 50,\n 'W_in_to_cell': Uniform(25),\n 'gradient_steps': GRADIENT_STEPS,\n 'peepholes': False\n },\n {\n 'type': DimshuffleLayer,\n 'pattern': (0, 2, 1) # (batch, features, time)\n },\n {\n 'type': Conv1DLayer, # convolve over the time axis\n 'num_filters': 50,\n 'filter_length': 3,\n 'stride': 1,\n 'nonlinearity': sigmoid,\n 'W': Uniform(1)\n },\n {\n 'type': DimshuffleLayer,\n 'pattern': (0, 2, 1) # back to (batch, time, features)\n },\n {\n 'type': FeaturePoolLayer,\n 'ds': 3, # number of feature maps to be pooled together\n 'axis': 1 # pool over the time axis\n },\n {\n 'type': LSTMLayer,\n 'num_units': 50,\n 'W_in_to_cell': Uniform(1),\n 'gradient_steps': GRADIENT_STEPS,\n 'peepholes': False\n },\n {\n 'type': DimshuffleLayer,\n 'pattern': (0, 2, 1) # (batch, features, time)\n },\n {\n 'type': Conv1DLayer, # convolve over the time axis\n 'num_filters': 50,\n 'filter_length': 3,\n 'stride': 1,\n 'nonlinearity': sigmoid,\n 'W': Uniform(1)\n },\n {\n 'type': DimshuffleLayer,\n 'pattern': (0, 2, 1) # back to (batch, time, features)\n },\n {\n 'type': FeaturePoolLayer,\n 'ds': 3, # number of feature maps to be pooled together\n 'axis': 1 # pool over the time axis\n },\n {\n 'type': LSTMLayer,\n 'num_units': 50,\n 'W_in_to_cell': Uniform(1),\n 'gradient_steps': GRADIENT_STEPS,\n 'peepholes': False\n },\n {\n 'type': DenseLayer,\n 'num_units': 50,\n 'nonlinearity': rectify\n },\n {\n 'type': DenseLayer,\n 'num_units': source.n_outputs,\n 'nonlinearity': None,\n 'W': Uniform(25)\n }\n ]\n )\n return net\n\ndef init_experiment(experiment):\n full_exp_name = NAME + experiment\n func_call = 'exp_{:s}(full_exp_name)'.format(experiment)\n print(\"***********************************\")\n print(\"Preparing\", full_exp_name, \"...\")\n net = eval(func_call)\n return net\n\n\ndef main():\n for experiment in list('a'):\n full_exp_name = NAME + experiment\n path = os.path.join(PATH, full_exp_name)\n try:\n net = init_experiment(experiment)\n run_experiment(net, path, epochs=None)\n except KeyboardInterrupt:\n break\n except TrainingError as exception:\n print(\"EXCEPTION:\", exception)\n except Exception as exception:\n raise\n print(\"EXCEPTION:\", exception)\n import ipdb; ipdb.set_trace()\n\n\nif __name__ == \"__main__\":\n main()\n",
"from __future__ import print_function, division\nimport matplotlib\nimport logging\nfrom sys import stdout\nmatplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!\nfrom neuralnilm import (Net, RealApplianceSource, \n BLSTMLayer, DimshuffleLayer, \n BidirectionalRecurrentLayer)\nfrom neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff\nfrom neuralnilm.experiment import run_experiment, init_experiment\nfrom neuralnilm.net import TrainingError\nfrom neuralnilm.layers import MixtureDensityLayer\nfrom neuralnilm.objectives import scaled_cost, mdn_nll, scaled_cost_ignore_inactive, ignore_inactive\nfrom neuralnilm.plot import MDNPlotter\n\nfrom lasagne.nonlinearities import sigmoid, rectify, tanh\nfrom lasagne.objectives import mse\nfrom lasagne.init import Uniform, Normal\nfrom lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer, \n ReshapeLayer, FeaturePoolLayer, RecurrentLayer)\nfrom lasagne.updates import nesterov_momentum, momentum\nfrom functools import partial\nimport os\nimport __main__\nfrom copy import deepcopy\nfrom math import sqrt\nimport numpy as np\nimport theano.tensor as T\n\nNAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]\nPATH = \"/homes/dk3810/workspace/python/neuralnilm/figures\"\nSAVE_PLOT_INTERVAL = 1000\nGRADIENT_STEPS = 100\nSEQ_LENGTH = 512\n\nsource_dict = dict(\n filename='/data/dk3810/ukdale.h5',\n appliances=[\n ['fridge freezer', 'fridge', 'freezer'], \n 'hair straighteners', \n 'television',\n 'dish washer',\n ['washer dryer', 'washing machine']\n ],\n max_appliance_powers=[300, 500, 200, 2500, 2400],\n on_power_thresholds=[5] * 5,\n max_input_power=5900,\n min_on_durations=[60, 60, 60, 1800, 1800],\n min_off_durations=[12, 12, 12, 1800, 600],\n window=(\"2013-06-01\", \"2014-07-01\"),\n seq_length=SEQ_LENGTH,\n output_one_appliance=False,\n boolean_targets=False,\n train_buildings=[1],\n validation_buildings=[1], \n skip_probability=0.5,\n n_seq_per_batch=16,\n subsample_target=4,\n include_diff=False,\n clip_appliance_power=True,\n target_is_prediction=False,\n# independently_center_inputs = True,\n standardise_input=True,\n unit_variance_targets=True,\n input_padding=0,\n lag=0\n# reshape_target_to_2D=True,\n # input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),\n # 'std': np.array([ 0.12636775], dtype=np.float32)},\n # target_stats={\n # 'mean': np.array([ 0.04066789, 0.01881946, \n # 0.24639061, 0.17608672, 0.10273963], \n # dtype=np.float32),\n # 'std': np.array([ 0.11449792, 0.07338708, \n # 0.26608968, 0.33463112, 0.21250485], \n # dtype=np.float32)}\n)\n\nN = 50\nnet_dict = dict( \n save_plot_interval=SAVE_PLOT_INTERVAL,\n# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),\n# loss_function=lambda x, t: mdn_nll(x, t).mean(),\n# loss_function=lambda x, t: mse(x, t).mean(),\n# loss_function=partial(scaled_cost, loss_func=mse),\n loss_function=ignore_inactive,\n updates_func=momentum,\n learning_rate=1e-3,\n learning_rate_changes_by_iteration={\n# 500: 5e-06\n # 4000: 1e-03,\n # 6000: 5e-06,\n # 7000: 1e-06\n # 2000: 5e-06\n # 3000: 1e-05\n # 7000: 5e-06,\n # 10000: 1e-06,\n # 15000: 5e-07,\n # 50000: 1e-07\n }, \n do_save_activations=True\n# plotter=MDNPlotter\n)\n\ndef exp_a(name):\n global source\n source_dict_copy = deepcopy(source_dict)\n source = RealApplianceSource(**source_dict_copy)\n net_dict_copy = deepcopy(net_dict)\n net_dict_copy.update(dict(\n experiment_name=name,\n source=source\n ))\n N = 50\n net_dict_copy['layers_config'] = [\n {\n 'type': BidirectionalRecurrentLayer,\n 'num_units': N,\n 'gradient_steps': GRADIENT_STEPS,\n 'W_in_to_hid': Normal(std=1.),\n 'nonlinearity': tanh\n },\n {\n 'type': FeaturePoolLayer,\n 'ds': 2, # number of feature maps to be pooled together\n 'axis': 1, # pool over the time axis\n 'pool_function': T.max\n },\n {\n 'type': BidirectionalRecurrentLayer,\n 'num_units': N,\n 'gradient_steps': GRADIENT_STEPS,\n 'W_in_to_hid': Normal(std=1/sqrt(N)),\n 'nonlinearity': tanh\n },\n {\n 'type': FeaturePoolLayer,\n 'ds': 2, # number of feature maps to be pooled together\n 'axis': 1, # pool over the time axis\n 'pool_function': T.max\n },\n {\n 'type': BidirectionalRecurrentLayer,\n 'num_units': N,\n 'gradient_steps': GRADIENT_STEPS,\n 'W_in_to_hid': Normal(std=1/sqrt(N)),\n 'nonlinearity': tanh\n },\n {\n 'type': DenseLayer,\n 'num_units': source.n_outputs,\n 'W': Normal(std=1/sqrt(N)),\n 'nonlinearity': T.nnet.softplus\n }\n\n # {\n # 'type': MixtureDensityLayer,\n # 'num_units': source.n_outputs,\n # 'num_components': 1,\n # 'nonlinearity_mu': T.nnet.softplus\n # }\n ]\n net = Net(**net_dict_copy)\n return net\n\n\ndef main():\n # EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')\n EXPERIMENTS = list('a')\n for experiment in EXPERIMENTS:\n full_exp_name = NAME + experiment\n func_call = init_experiment(PATH, experiment, full_exp_name)\n logger = logging.getLogger(full_exp_name)\n try:\n net = eval(func_call)\n run_experiment(net, epochs=None)\n except KeyboardInterrupt:\n logger.info(\"KeyboardInterrupt\")\n break\n except Exception as exception:\n logger.exception(\"Exception\")\n raise\n finally:\n logging.shutdown()\n\n\nif __name__ == \"__main__\":\n main()\n",
"from __future__ import print_function, division\nimport matplotlib\nimport logging\nfrom sys import stdout\nmatplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!\nfrom neuralnilm import (Net, RealApplianceSource, \n BLSTMLayer, DimshuffleLayer, \n BidirectionalRecurrentLayer)\nfrom neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff\nfrom neuralnilm.experiment import run_experiment, init_experiment\nfrom neuralnilm.net import TrainingError\nfrom neuralnilm.layers import MixtureDensityLayer\nfrom neuralnilm.objectives import scaled_cost, mdn_nll, scaled_cost_ignore_inactive, ignore_inactive\nfrom neuralnilm.plot import MDNPlotter\n\nfrom lasagne.nonlinearities import sigmoid, rectify, tanh\nfrom lasagne.objectives import mse\nfrom lasagne.init import Uniform, Normal\nfrom lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer, \n ReshapeLayer, FeaturePoolLayer, RecurrentLayer)\nfrom lasagne.updates import nesterov_momentum, momentum\nfrom functools import partial\nimport os\nimport __main__\nfrom copy import deepcopy\nfrom math import sqrt\nimport numpy as np\nimport theano.tensor as T\n\nNAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]\nPATH = \"/homes/dk3810/workspace/python/neuralnilm/figures\"\nSAVE_PLOT_INTERVAL = 1000\nGRADIENT_STEPS = 100\nSEQ_LENGTH = 512\n\nsource_dict = dict(\n filename='/data/dk3810/ukdale.h5',\n appliances=[\n ['fridge freezer', 'fridge', 'freezer'], \n 'hair straighteners', \n 'television'\n # 'dish washer',\n # ['washer dryer', 'washing machine']\n ],\n max_appliance_powers=[300, 500, 200, 2500, 2400],\n on_power_thresholds=[5] * 5,\n max_input_power=5900,\n min_on_durations=[60, 60, 60, 1800, 1800],\n min_off_durations=[12, 12, 12, 1800, 600],\n window=(\"2013-06-01\", \"2014-07-01\"),\n seq_length=SEQ_LENGTH,\n output_one_appliance=False,\n boolean_targets=False,\n train_buildings=[1],\n validation_buildings=[1], \n skip_probability=0.0,\n n_seq_per_batch=16,\n subsample_target=4,\n include_diff=False,\n clip_appliance_power=True,\n target_is_prediction=False,\n standardise_input=True,\n standardise_targets=True,\n input_padding=0,\n lag=0,\n reshape_target_to_2D=False,\n input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),\n 'std': np.array([ 0.12636775], dtype=np.float32)},\n target_stats={\n 'mean': np.array([ 0.04066789, 0.01881946, \n 0.24639061, 0.17608672, 0.10273963], \n dtype=np.float32),\n 'std': np.array([ 0.11449792, 0.07338708, \n 0.26608968, 0.33463112, 0.21250485], \n dtype=np.float32)}\n)\n\nN = 50\nnet_dict = dict( \n save_plot_interval=SAVE_PLOT_INTERVAL,\n# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),\n# loss_function=lambda x, t: mdn_nll(x, t).mean(),\n loss_function=lambda x, t: mse(x, t).mean(),\n updates_func=momentum,\n learning_rate=1e-02,\n learning_rate_changes_by_iteration={\n 500: 5e-03,\n 4000: 1e-03,\n 6000: 5e-06,\n 7000: 1e-06\n # 2000: 5e-06\n # 3000: 1e-05\n # 7000: 5e-06,\n # 10000: 1e-06,\n # 15000: 5e-07,\n # 50000: 1e-07\n }, \n do_save_activations=True\n)\n\n\ndef callback(net, epoch):\n net.source.reshape_target_to_2D = True\n net.plotter = MDNPlotter(net)\n net.generate_validation_data_and_set_shapes()\n net.loss_function = lambda x, t: mdn_nll(x, t).mean()\n net.learning_rate = 1e-05\n\n\ndef exp_a(name):\n # 3 appliances\n global source\n source_dict_copy = deepcopy(source_dict)\n source_dict_copy['reshape_target_to_2D'] = False\n source = RealApplianceSource(**source_dict_copy)\n source.reshape_target_to_2D = False\n net_dict_copy = deepcopy(net_dict)\n net_dict_copy.update(dict(\n experiment_name=name,\n source=source\n ))\n N = 50\n net_dict_copy['layers_config'] = [\n {\n 'type': BidirectionalRecurrentLayer,\n 'num_units': N,\n 'gradient_steps': GRADIENT_STEPS,\n 'W_in_to_hid': Normal(std=1.),\n 'nonlinearity': tanh\n },\n {\n 'type': FeaturePoolLayer,\n 'ds': 4, # number of feature maps to be pooled together\n 'axis': 1, # pool over the time axis\n 'pool_function': T.max\n },\n {\n 'type': BidirectionalRecurrentLayer,\n 'num_units': N,\n 'gradient_steps': GRADIENT_STEPS,\n 'W_in_to_hid': Normal(std=1/sqrt(N)),\n 'nonlinearity': tanh\n },\n {\n 'type': DenseLayer,\n 'W': Normal(std=1/sqrt(N)),\n 'num_units': source.n_outputs,\n 'nonlinearity': None\n }\n ]\n net_dict_copy['layer_changes'] = {\n 5001: {\n 'remove_from': -2,\n 'callback': callback,\n 'new_layers': [\n {\n 'type': MixtureDensityLayer,\n 'num_units': source.n_outputs,\n 'num_components': 2\n }\n ]\n }\n }\n\n net = Net(**net_dict_copy)\n return net\n\n\ndef main():\n # EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')\n EXPERIMENTS = list('a')\n for experiment in EXPERIMENTS:\n full_exp_name = NAME + experiment\n func_call = init_experiment(PATH, experiment, full_exp_name)\n logger = logging.getLogger(full_exp_name)\n try:\n net = eval(func_call)\n run_experiment(net, epochs=100000)\n except KeyboardInterrupt:\n logger.info(\"KeyboardInterrupt\")\n break\n except Exception as exception:\n logger.exception(\"Exception\")\n raise\n finally:\n logging.shutdown()\n\n\nif __name__ == \"__main__\":\n main()\n",
"from __future__ import print_function, division\nimport matplotlib\nmatplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!\nfrom neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer\nfrom lasagne.nonlinearities import sigmoid, rectify\nfrom lasagne.objectives import crossentropy, mse\nfrom lasagne.init import Uniform, Normal\nfrom lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer\nfrom lasagne.updates import nesterov_momentum\nfrom functools import partial\nimport os\nfrom neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff\nfrom neuralnilm.experiment import run_experiment\nfrom neuralnilm.net import TrainingError\nimport __main__\n\nNAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]\nPATH = \"/homes/dk3810/workspace/python/neuralnilm/figures\"\nSAVE_PLOT_INTERVAL = 250\nGRADIENT_STEPS = 100\n\n\"\"\"\ne103\nDiscovered that bottom layer is hardly changing. So will try\njust a single lstm layer\n\ne104\nstandard init\nlower learning rate\n\ne106\nlower learning rate to 0.001\n\ne108\nis e107 but with batch size of 5\n\ne109\nNormal(1) for BLSTM\n\ne110\n* Back to Uniform(5) for BLSTM\n* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f\nRESULTS: Seems to run fine again!\n\ne111\n* Try with nntools head\n* peepholes=False\nRESULTS: appears to be working well. Haven't seen a NaN, \neven with training rate of 0.1\n\ne112\n* n_seq_per_batch = 50\n\ne114\n* Trying looking at layer by layer training again.\n* Start with single BLSTM layer\n\ne115\n* Learning rate = 1\n\ne116\n* Standard inits\n\ne117\n* Uniform(1) init\n\ne119\n* Learning rate 10\n# Result: didn't work well!\n\ne120\n* init: Normal(1)\n* not as good as Uniform(5)\n\ne121\n* Uniform(25)\n\ne122\n* Just 10 cells\n* Uniform(5)\n\ne125\n* Pre-train lower layers\n\ne128\n* Add back all 5 appliances\n* Seq length 1500\n* skip_prob = 0.7\n\ne129\n* max_input_power = None\n* 2nd layer has Uniform(5)\n* pre-train bottom layer for 2000 epochs\n* add third layer at 4000 epochs\n\ne131\n\ne138\n* Trying to replicate e82 and then break it ;)\n\ne140\ndiff\n\ne141\nconv1D layer has Uniform(1), as does 2nd BLSTM layer\n\ne142\ndiff AND power\n\ne144\ndiff and power and max power is 5900\n\ne145\nUniform(25) for first layer\n\ne146\ngradient clip and use peepholes\n\ne147\n* try again with new code\n\ne148\n* learning rate 0.1\n\ne150\n* Same as e149 but without peepholes and using BLSTM not BBLSTM\n\ne151\n* Max pooling\n\n171\nlower learning rate\n\n172\neven lower learning rate\n\n173\nslightly higher learning rate!\n\n175\nsame as 174 but with skip prob = 0, and LSTM not BLSTM, and only 4000 epochs\n\n176\nnew cost function\n\n177\nanother new cost func (this one avoids NaNs)\nskip prob 0.7\n10x higher learning rate\n\n178\nrefactored cost func (functionally equiv to 177)\n0.1x learning rate\n\ne180\n* mse\n\ne181\n* back to scaled cost\n* different architecture:\n - convd1 at input (2x)\n - then 3 LSTM layers, each with a 2x conv in between\n - no diff input\n\ne189\n* divide dominant appliance power\n* mse\n\n217\nno peepholes\n\n218\ndon't clip gradient\nlag=64\n\n219\nback to lag=32\ntry all 5 appliances (with max input = 500)\n\"\"\"\n\n\n# def scaled_cost(x, t):\n# raw_cost = (x - t) ** 2\n# energy_per_seq = t.sum(axis=1)\n# energy_per_batch = energy_per_seq.sum(axis=1)\n# energy_per_batch = energy_per_batch.reshape((-1, 1))\n# normaliser = energy_per_seq / energy_per_batch\n# cost = raw_cost.mean(axis=1) * (1 - normaliser)\n# return cost.mean()\n\nfrom theano.ifelse import ifelse\nimport theano.tensor as T\n\nTHRESHOLD = 0\ndef scaled_cost(x, t):\n sq_error = (x - t) ** 2\n def mask_and_mean_sq_error(mask):\n masked_sq_error = sq_error[mask.nonzero()]\n mean = masked_sq_error.mean()\n mean = ifelse(T.isnan(mean), 0.0, mean)\n return mean\n above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)\n below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)\n return (above_thresh_mean + below_thresh_mean) / 2.0\n\n\ndef exp_a(name):\n # global source\n # source = RealApplianceSource(\n # filename='/data/dk3810/ukdale.h5',\n # appliances=[\n # ['fridge freezer', 'fridge', 'freezer'], \n # 'hair straighteners', \n # 'television',\n # 'dish washer',\n # ['washer dryer', 'washing machine']\n # ],\n # max_appliance_powers=None,#[500] * 5,\n # on_power_thresholds=[5] * 5,\n # max_input_power=2500,\n # min_on_durations=[60, 60, 60, 1800, 1800],\n # min_off_durations=[12, 12, 12, 1800, 600],\n # window=(\"2013-06-01\", \"2014-07-01\"),\n # seq_length=1500,\n # output_one_appliance=False,\n # boolean_targets=False,\n # train_buildings=[1],\n # validation_buildings=[1], \n # skip_probability=0.7,\n # n_seq_per_batch=25,\n # # subsample_target=4,\n # # input_padding=0,\n # include_diff=False,\n # clip_appliance_power=False,\n # lag=0\n # )\n\n net = Net(\n experiment_name=name,\n source=source,\n save_plot_interval=1000,\n loss_function=scaled_cost,\n updates=partial(nesterov_momentum, learning_rate=0.0001),\n layers_config=[\n {\n 'type': DenseLayer,\n 'num_units': 50,\n 'nonlinearity': sigmoid,\n 'W': Uniform(25)\n },\n {\n 'type': DenseLayer,\n 'num_units': 50,\n 'nonlinearity': sigmoid,\n 'W': Uniform(5) # doesn't work if this is 1\n },\n {\n 'type': DenseLayer,\n 'num_units': source.n_outputs,\n 'nonlinearity': None,\n 'W': Uniform(25)\n }\n ]\n )\n return net\n\n\n\ndef init_experiment(experiment):\n full_exp_name = NAME + experiment\n func_call = 'exp_{:s}(full_exp_name)'.format(experiment)\n print(\"***********************************\")\n print(\"Preparing\", full_exp_name, \"...\")\n net = eval(func_call)\n return net\n\n\ndef main():\n for experiment in list('a'):\n full_exp_name = NAME + experiment\n path = os.path.join(PATH, full_exp_name)\n try:\n net = init_experiment(experiment)\n run_experiment(net, path, epochs=None)\n except KeyboardInterrupt:\n break\n except TrainingError as exception:\n print(\"EXCEPTION:\", exception)\n except Exception as exception:\n raise\n print(\"EXCEPTION:\", exception)\n import ipdb; ipdb.set_trace()\n\n\nif __name__ == \"__main__\":\n main()\n",
"from __future__ import print_function, division\nimport matplotlib\nimport logging\nfrom sys import stdout\nmatplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!\nfrom neuralnilm import (Net, RealApplianceSource)\nfrom neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,\n RandomSegments, RandomSegmentsInMemory,\n SameLocation, MultiSource)\nfrom neuralnilm.experiment import run_experiment, init_experiment\nfrom neuralnilm.net import TrainingError\nfrom neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,\n SharedWeightsDenseLayer, BLSTMLayer)\nfrom neuralnilm.objectives import (scaled_cost, mdn_nll,\n scaled_cost_ignore_inactive, ignore_inactive,\n scaled_cost3)\nfrom neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter, RectangularOutputPlotter, StartEndMeanPlotter\nfrom neuralnilm.updates import clipped_nesterov_momentum\nfrom neuralnilm.disaggregate import disaggregate\nfrom neuralnilm.rectangulariser import rectangularise\n\nfrom lasagne.nonlinearities import (sigmoid, rectify, tanh, identity, softmax)\nfrom lasagne.objectives import squared_error, binary_crossentropy\nfrom lasagne.init import Uniform, Normal\nfrom lasagne.layers import (DenseLayer, Conv1DLayer,\n ReshapeLayer, FeaturePoolLayer,\n DimshuffleLayer, DropoutLayer, ConcatLayer, PadLayer)\nfrom lasagne.updates import nesterov_momentum, momentum\nfrom functools import partial\nimport os\nimport __main__\nfrom copy import deepcopy\nfrom math import sqrt\nimport numpy as np\nimport theano.tensor as T\nimport gc\n\n\n\"\"\"\nMax powers:\nmicrowave = 3000W\n\"\"\"\n\n\nNAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]\n#PATH = \"/homes/dk3810/workspace/python/neuralnilm/figures\"\nPATH = \"/data/dk3810/figures\"\n# PATH = \"/home/jack/experiments/neuralnilm/figures\"\nSAVE_PLOT_INTERVAL = 1000\n\nUKDALE_FILENAME = '/data/dk3810/ukdale.h5'\n\nMAX_TARGET_POWER = 3000\nON_POWER_THRESHOLD = 200\nMIN_ON_DURATION = 18\nMIN_OFF_DURATION = 30\nTARGET_APPLIANCE = 'microwave'\n\nSEQ_LENGTH = 256\nN_SEQ_PER_BATCH = 64\nTRAIN_BUILDINGS = [1, 2]\nVALIDATION_BUILDINGS = [5]\nSKIP_PROBABILITY_FOR_TARGET = 0.5\nINDEPENDENTLY_CENTER_INPUTS = True\nSUBSAMPLE_TARGET = 2\nINPUT_PADDING = 4\n\nWINDOW_PER_BUILDING = {\n 1: (\"2013-03-17\", \"2014-12-01\"),\n 2: (\"2013-05-22\", \"2013-10-01\"),\n 3: (\"2013-02-27\", \"2013-04-01\"),\n 4: (\"2013-03-09\", \"2013-09-20\"),\n 5: (\"2014-06-29\", \"2014-08-27\")\n}\n\nINPUT_STATS = {\n 'mean': np.array([297.87216187], dtype=np.float32),\n 'std': np.array([374.43884277], dtype=np.float32)\n}\n\n\ndef only_train_on_real_data(net, iteration):\n net.logger.info(\n \"Iteration {}: Now only training on real data.\".format(iteration))\n net.source.sources[0]['train_probability'] = 0.0\n net.source.sources[1]['train_probability'] = 1.0\n\n\nnet_dict = dict(\n save_plot_interval=SAVE_PLOT_INTERVAL,\n loss_function=lambda x, t: squared_error(x, t).mean(),\n updates_func=nesterov_momentum,\n learning_rate=1e-1,\n learning_rate_changes_by_iteration={\n 500: 1e-2,\n 10000: 1e-3\n },\n epoch_callbacks={\n 350000: only_train_on_real_data\n },\n do_save_activations=True,\n auto_reshape=True,\n layers_config=[\n {\n 'type': DenseLayer,\n 'num_units': 16,\n 'nonlinearity': tanh\n },\n {\n 'type': BLSTMLayer,\n 'num_units': 32,\n 'merge_mode': 'concatenate'\n },\n {\n 'type': DimshuffleLayer,\n 'pattern': (0, 2, 1)\n },\n {\n 'type': Conv1DLayer,\n 'num_filters': 16,\n 'filter_size': 5,\n 'stride': 2,\n 'nonlinearity': tanh\n },\n {\n 'type': DimshuffleLayer,\n 'pattern': (0, 2, 1)\n },\n {\n 'type': BLSTMLayer,\n 'num_units': 128,\n 'merge_mode': 'concatenate'\n },\n {\n 'type': DenseLayer,\n 'num_units': 128,\n 'nonlinearity': tanh\n },\n {\n 'type': DenseLayer,\n 'num_units': 1,\n 'nonlinearity': None\n }\n ]\n)\n\n\ndef exp_a(name):\n logger = logging.getLogger(name)\n global multi_source\n\n # real_appliance_source1 = RealApplianceSource(\n # logger=logger,\n # filename=UKDALE_FILENAME,\n # appliances=[\n # TARGET_APPLIANCE,\n # ['fridge freezer', 'fridge', 'freezer'],\n # 'dish washer',\n # 'kettle',\n # ['washer dryer', 'washing machine']\n # ],\n # max_appliance_powers=[MAX_TARGET_POWER, 300, 2500, 2600, 2400],\n # on_power_thresholds=[ON_POWER_THRESHOLD] + [10] * 4,\n # min_on_durations=[MIN_ON_DURATION, 60, 1800, 12, 1800],\n # min_off_durations=[MIN_OFF_DURATION, 12, 1800, 12, 600],\n # divide_input_by_max_input_power=False,\n # window_per_building=WINDOW_PER_BUILDING,\n # seq_length=SEQ_LENGTH,\n # output_one_appliance=True,\n # train_buildings=TRAIN_BUILDINGS,\n # validation_buildings=VALIDATION_BUILDINGS,\n # n_seq_per_batch=N_SEQ_PER_BATCH,\n # skip_probability=0.75,\n # skip_probability_for_first_appliance=SKIP_PROBABILITY_FOR_TARGET,\n # standardise_input=True,\n # input_stats=INPUT_STATS,\n # independently_center_inputs=INDEPENDENTLY_CENTER_INPUTS,\n # subsample_target=SUBSAMPLE_TARGET,\n # input_padding=INPUT_PADDING\n # )\n\n # same_location_source1 = SameLocation(\n # logger=logger,\n # filename=UKDALE_FILENAME,\n # target_appliance=TARGET_APPLIANCE,\n # window_per_building=WINDOW_PER_BUILDING,\n # seq_length=SEQ_LENGTH,\n # train_buildings=TRAIN_BUILDINGS,\n # validation_buildings=VALIDATION_BUILDINGS,\n # n_seq_per_batch=N_SEQ_PER_BATCH,\n # skip_probability=SKIP_PROBABILITY_FOR_TARGET,\n # standardise_input=True,\n # offset_probability=1,\n # divide_target_by=MAX_TARGET_POWER,\n # input_stats=INPUT_STATS,\n # independently_center_inputs=INDEPENDENTLY_CENTER_INPUTS,\n # on_power_threshold=ON_POWER_THRESHOLD,\n # min_on_duration=MIN_ON_DURATION,\n # min_off_duration=MIN_OFF_DURATION,\n # include_all=True,\n # allow_incomplete=True,\n # subsample_target=SUBSAMPLE_TARGET,\n # input_padding=INPUT_PADDING\n # )\n\n # multi_source = MultiSource(\n # sources=[\n # {\n # 'source': real_appliance_source1,\n # 'train_probability': 0.5,\n # 'validation_probability': 0\n # },\n # {\n # 'source': same_location_source1,\n # 'train_probability': 0.5,\n # 'validation_probability': 1\n # }\n # ],\n # standardisation_source=same_location_source1\n # )\n\n net_dict_copy = deepcopy(net_dict)\n net_dict_copy.update(dict(\n experiment_name=name,\n source=multi_source,\n plotter=Plotter(\n n_seq_to_plot=32,\n n_training_examples_to_plot=16)\n ))\n net = Net(**net_dict_copy)\n return net\n\n\ndef main():\n EXPERIMENTS = list('a')\n for experiment in EXPERIMENTS:\n full_exp_name = NAME + experiment\n func_call = init_experiment(PATH, experiment, full_exp_name)\n logger = logging.getLogger(full_exp_name)\n try:\n net = eval(func_call)\n run_experiment(net, epochs=None)\n except KeyboardInterrupt:\n logger.info(\"KeyboardInterrupt\")\n break\n except Exception:\n logger.exception(\"Exception\")\n # raise\n finally:\n logging.shutdown()\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\"\"\"\nEmacs variables\nLocal Variables:\ncompile-command: \"cp /home/jack/workspace/python/neuralnilm/scripts/e563.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/\"\nEnd:\n\"\"\"\n",
"from __future__ import print_function, division\n#import matplotlib\nimport logging\nfrom sys import stdout\n# matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!\nimport matplotlib.pyplot as plt\nfrom neuralnilm import (Net, RealApplianceSource)\nfrom neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,\n RandomSegments, RandomSegmentsInMemory,\n SameLocation)\nfrom neuralnilm.experiment import run_experiment, init_experiment\nfrom neuralnilm.net import TrainingError\nfrom neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,\n SharedWeightsDenseLayer)\nfrom neuralnilm.objectives import (scaled_cost, mdn_nll,\n scaled_cost_ignore_inactive, ignore_inactive,\n scaled_cost3)\nfrom neuralnilm.plot import (\n StartEndMeanPlotter, plot_disaggregate_start_stop_end)\nfrom neuralnilm.disaggregate import (\n disaggregate_start_stop_end, rectangles_to_matrix,\n rectangles_matrix_to_vector, save_rectangles)\nfrom neuralnilm.rectangulariser import rectangularise\n\nfrom lasagne.nonlinearities import sigmoid, rectify, tanh, identity, softmax\nfrom lasagne.objectives import squared_error, binary_crossentropy\nfrom lasagne.init import Uniform, Normal\nfrom lasagne.layers import (DenseLayer, Conv1DLayer,\n ReshapeLayer, FeaturePoolLayer,\n DimshuffleLayer, DropoutLayer, ConcatLayer, PadLayer)\nfrom lasagne.updates import nesterov_momentum, momentum\nfrom functools import partial\nimport os\nimport __main__\nfrom copy import deepcopy\nfrom math import sqrt\nimport numpy as np\nimport theano.tensor as T\nimport gc\n\n\nNAME = 'e545'\nPATH = \"/data/dk3810/figures\"\nSAVE_PLOT_INTERVAL = 25000\n\nN_SEQ_PER_BATCH = 64\nMAX_TARGET_POWER = 200\n\nfull_exp_name = NAME + 'c'\npath = os.path.join(PATH, full_exp_name)\nprint(\"Changing directory to\", path)\nos.chdir(path)\n\nlogger = logging.getLogger(full_exp_name)\nif not logger.handlers:\n fh = logging.FileHandler(full_exp_name + '.log')\n formatter = logging.Formatter('%(asctime)s %(message)s')\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n logger.addHandler(logging.StreamHandler(stream=stdout))\n\nlogger.setLevel(logging.DEBUG)\nlogger.info(\"***********************************\")\nlogger.info(\"Preparing \" + full_exp_name + \"...\")\n\n# Load input stats\ninput_stats = {\n 'mean': np.load(\"input_stats_mean.npy\"),\n 'std': np.load(\"input_stats_std.npy\")\n}\n\n\nsource_dict = dict(\n filename='/data/dk3810/ukdale.h5',\n appliances=[\n ['fridge freezer', 'fridge', 'freezer'],\n ['washer dryer', 'washing machine'],\n 'kettle',\n 'HTPC',\n 'dish washer'\n ],\n max_appliance_powers=[300, 2400, 2600, 200, 2500],\n on_power_thresholds=[5] * 5,\n min_on_durations=[60, 1800, 30, 60, 1800],\n min_off_durations=[12, 600, 1, 12, 1800],\n # Just load a tiny bit of data. Won't be used.\n window=(\"2013-04-12\", \"2013-04-27\"),\n seq_length=2048,\n output_one_appliance=True,\n train_buildings=[1],\n validation_buildings=[1],\n n_seq_per_batch=N_SEQ_PER_BATCH,\n standardise_input=True,\n independently_center_inputs=False,\n skip_probability=0.75,\n target_is_start_and_end_and_mean=True,\n one_target_per_seq=False,\n input_stats=input_stats\n)\n\n\nnet_dict = dict(\n save_plot_interval=SAVE_PLOT_INTERVAL,\n loss_function=lambda x, t: squared_error(x, t).mean(),\n updates_func=nesterov_momentum,\n learning_rate=1e-3,\n do_save_activations=True,\n auto_reshape=False,\n plotter=StartEndMeanPlotter(\n n_seq_to_plot=32, max_target_power=MAX_TARGET_POWER)\n)\n\n\ndef exp_a(name):\n global source\n source_dict_copy = deepcopy(source_dict)\n source_dict_copy.update(dict(\n logger=logging.getLogger(name)\n ))\n source = RealApplianceSource(**source_dict_copy)\n net_dict_copy = deepcopy(net_dict)\n net_dict_copy.update(dict(\n experiment_name=name,\n source=source\n ))\n net_dict_copy['layers_config'] = [\n {\n 'type': DimshuffleLayer,\n 'pattern': (0, 2, 1) # (batch, features, time)\n },\n {\n 'type': PadLayer,\n 'width': 4\n },\n {\n 'type': Conv1DLayer, # convolve over the time axis\n 'num_filters': 16,\n 'filter_size': 4,\n 'stride': 1,\n 'nonlinearity': None,\n 'border_mode': 'valid'\n },\n {\n 'type': Conv1DLayer, # convolve over the time axis\n 'num_filters': 16,\n 'filter_size': 4,\n 'stride': 1,\n 'nonlinearity': None,\n 'border_mode': 'valid'\n },\n {\n 'type': DimshuffleLayer,\n 'pattern': (0, 2, 1), # back to (batch, time, features)\n 'label': 'dimshuffle3'\n },\n {\n 'type': DenseLayer,\n 'num_units': 512 * 16,\n 'nonlinearity': rectify,\n 'label': 'dense0'\n },\n {\n 'type': DenseLayer,\n 'num_units': 512 * 8,\n 'nonlinearity': rectify,\n 'label': 'dense1'\n },\n {\n 'type': DenseLayer,\n 'num_units': 512 * 4,\n 'nonlinearity': rectify,\n 'label': 'dense2'\n },\n {\n 'type': DenseLayer,\n 'num_units': 512,\n 'nonlinearity': rectify\n },\n {\n 'type': DenseLayer,\n 'num_units': 3,\n 'nonlinearity': None\n }\n ]\n net = Net(**net_dict_copy)\n net.load_params(300000)\n return net\n\n# Load neural net\nnet = exp_a(full_exp_name)\nnet.print_net()\nnet.compile()\n\n# Generate mains data\n# create new source, based on net's source,\n# but with 5 outputs (so each seq includes entire appliance activation,\n# and to make it easier to plot every appliance),\n# and long seq length,\n# then make one long mains by concatenating each seq\nsource_dict_copy = deepcopy(source_dict)\nsource_dict_copy.update(dict(\n logger=logger,\n seq_length=2048,\n border=100,\n output_one_appliance=False,\n input_stats=input_stats,\n target_is_start_and_end_and_mean=False,\n window=(\"2014-12-10\", None)\n))\nmains_source = RealApplianceSource(**source_dict_copy)\nmains_source.start()\n\nN_BATCHES = 1\nlogger.info(\"Preparing synthetic mains data for {} batches.\".format(N_BATCHES))\nmains = None\ntargets = None\nTARGET_I = 3\nfor batch_i in range(N_BATCHES):\n batch = mains_source.queue.get(timeout=30)\n mains_batch, targets_batch = batch.data\n if mains is None:\n mains = mains_batch\n targets = targets_batch[:, :, TARGET_I]\n else:\n mains = np.concatenate((mains, mains_batch))\n targets = np.concatenate((targets, targets_batch[:, :, TARGET_I]))\n\nmains_source.stop()\n\n# Post-process data\nseq_length = net.input_shape[1]\n\n\ndef pad(data):\n return np.pad(data, (seq_length, seq_length), mode='constant',\n constant_values=(data.min().astype(float), ))\n\n\nmains = pad(mains.flatten())\ntargets = pad(targets.flatten())\nlogger.info(\"Done preparing synthetic mains data!\")\n\n# Unstandardise for plotting\ntargets *= MAX_TARGET_POWER\nmains_unstandardised = (mains * input_stats['std']) + input_stats['mean']\nmains_unstandardised *= mains_source.max_input_power\n\n# disag\nSTRIDE = 16\nlogger.info(\"Starting disag...\")\nrectangles = disaggregate_start_stop_end(\n mains, net, stride=STRIDE, max_target_power=MAX_TARGET_POWER)\nrectangles_matrix = rectangles_to_matrix(rectangles[0], MAX_TARGET_POWER)\ndisag_vector = rectangles_matrix_to_vector(\n rectangles_matrix, min_on_power=50, overlap_threshold=0.30)\n\n# save data to disk\nlogger.info(\"Saving data to disk...\")\nnp.save('mains', mains_unstandardised)\nnp.save('targets', targets)\nnp.save('disag_vector', disag_vector)\nsave_rectangles(rectangles)\n\n# plot\nlogger.info(\"Plotting...\")\nfig, axes = plt.subplots(4, 1, sharex=True)\nalpha = STRIDE / seq_length\nplot_disaggregate_start_stop_end(rectangles, ax=axes[0], alpha=alpha)\naxes[0].set_title('Network output')\n\naxes[1].plot(disag_vector)\naxes[1].set_title(\"Disaggregated vector\")\n\naxes[2].plot(targets)\naxes[2].set_title(\"Target\")\n\naxes[3].plot(mains_unstandardised)\naxes[3].set_title('Network input')\naxes[3].set_xlim((0, len(mains)))\nplt.show()\nlogger.info(\"DONE!\")\n\n\"\"\"\nEmacs variables\nLocal Variables:\ncompile-command: \"cp /home/jack/workspace/python/neuralnilm/scripts/disag_545c.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/\"\nEnd:\n\"\"\"\n",
"from __future__ import print_function, division\nimport matplotlib\nmatplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!\nfrom neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer\nfrom neuralnilm.net import BidirectionalRecurrentLayer\nfrom lasagne.nonlinearities import sigmoid, rectify, tanh\nfrom lasagne.objectives import crossentropy, mse\nfrom lasagne.init import Uniform, Normal\nfrom lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer, RecurrentLayer\nfrom lasagne.updates import nesterov_momentum\nfrom functools import partial\nimport os\nfrom neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff\nfrom neuralnilm.experiment import run_experiment\nfrom neuralnilm.net import TrainingError\nimport __main__\nfrom copy import deepcopy\nfrom math import sqrt\n\nNAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]\nPATH = \"/homes/dk3810/workspace/python/neuralnilm/figures\"\nSAVE_PLOT_INTERVAL = 500\nGRADIENT_STEPS = 100\n\n\"\"\"\ne233\nbased on e131c but with:\n* lag=32\n* pool\n\ne234\n* init final layer and conv layer\n\n235\nno lag\n\n236\nshould be exactly as 131c: no pool, no lag, no init for final and conv layer\n\n237\nputting the pool back\n\n238\nseems pooling hurts us! disable pooling.\nenable lag = 32\n\n239\nBLSTM\nlag = 20\n\n240\nLSTM not BLSTM\nvarious lags\n\n241\noutput is prediction\n\n260\nstandardise inputs and outputs.\n\n261\ntrying just 3 appliances. Standardisation\n\n263\nconv1d between layers\n\nideas for next TODO:\n* 3 LSTM layers with smaller conv between them\n* why does pooling hurt us?\n\"\"\"\n\nfrom theano.ifelse import ifelse\nimport theano.tensor as T\n\nTHRESHOLD = 0\ndef scaled_cost(x, t):\n sq_error = (x - t) ** 2\n def mask_and_mean_sq_error(mask):\n masked_sq_error = sq_error[mask.nonzero()]\n mean = masked_sq_error.mean()\n mean = ifelse(T.isnan(mean), 0.0, mean)\n return mean\n above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)\n below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)\n return (above_thresh_mean + below_thresh_mean) / 2.0\n\n\nsource_dict = dict(\n filename='/data/dk3810/ukdale.h5',\n appliances=[\n ['fridge freezer', 'fridge', 'freezer'], \n 'hair straighteners', \n 'television'\n #'dish washer',\n #['washer dryer', 'washing machine']\n ],\n max_appliance_powers=[300, 500, 200, 2500, 2400],\n on_power_thresholds=[5] * 5,\n max_input_power=5900,\n min_on_durations=[60, 60, 60, 1800, 1800],\n min_off_durations=[12, 12, 12, 1800, 600],\n window=(\"2013-06-01\", \"2014-07-01\"),\n seq_length=1500,\n output_one_appliance=False,\n boolean_targets=False,\n train_buildings=[1],\n validation_buildings=[1], \n # skip_probability=0.0,\n n_seq_per_batch=50,\n # subsample_target=5,\n include_diff=False,\n clip_appliance_power=True,\n target_is_prediction=False,\n standardise_input=True,\n standardise_targets=True,\n input_padding=0,\n lag=0\n)\n\n\ndef change_learning_rate(net, epoch):\n net.updates = partial(nesterov_momentum, learning_rate=0.01)\n net.compile()\n\n\ndef change_subsample(net, epoch):\n net.source.subsample_target = 5\n net.generate_validation_data_and_set_shapes()\n\nnet_dict = dict( \n save_plot_interval=SAVE_PLOT_INTERVAL,\n loss_function=scaled_cost,\n updates=partial(nesterov_momentum, learning_rate=0.001),\n do_save_activations=True\n# epoch_callbacks={250: change_learning_rate}\n)\n\n\ndef exp_a(name):\n global source\n # source_dict_copy = deepcopy(source_dict)\n # source = RealApplianceSource(**source_dict_copy)\n source.subsample_target = 5\n net_dict_copy = deepcopy(net_dict)\n net_dict_copy.update(dict(experiment_name=name, source=source))\n net_dict_copy['layers_config'] = [\n {\n 'type': BidirectionalRecurrentLayer,\n 'num_units': 25,\n 'gradient_steps': GRADIENT_STEPS,\n 'W_in_to_hid': Normal(std=1.),\n 'nonlinearity': tanh\n },\n {\n 'type': FeaturePoolLayer,\n 'ds': 5, # number of feature maps to be pooled together\n 'axis': 1, # pool over the time axis\n 'pool_function': T.mean\n },\n {\n 'type': BidirectionalRecurrentLayer,\n 'num_units': 25,\n 'gradient_steps': GRADIENT_STEPS,\n 'W_in_to_hid': Normal(std=1/sqrt(25)),\n 'nonlinearity': tanh\n },\n {\n 'type': DenseLayer,\n 'num_units': source.n_outputs,\n 'nonlinearity': None,\n 'W': Normal(std=(1/sqrt(25)))\n }\n ]\n net = Net(**net_dict_copy)\n return net\n\n\ndef init_experiment(experiment):\n full_exp_name = NAME + experiment\n func_call = 'exp_{:s}(full_exp_name)'.format(experiment)\n print(\"***********************************\")\n print(\"Preparing\", full_exp_name, \"...\")\n net = eval(func_call)\n return net\n\n\ndef main():\n for experiment in list('a'):\n full_exp_name = NAME + experiment\n path = os.path.join(PATH, full_exp_name)\n try:\n net = init_experiment(experiment)\n run_experiment(net, path, epochs=None)\n except KeyboardInterrupt:\n break\n except TrainingError as exception:\n print(\"EXCEPTION:\", exception)\n except Exception as exception:\n print(\"EXCEPTION:\", exception)\n raise\n\n\nif __name__ == \"__main__\":\n main()\n",
"from __future__ import print_function, division\nimport matplotlib\nimport logging\nfrom sys import stdout\nmatplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!\nfrom neuralnilm import (Net, RealApplianceSource, \n BLSTMLayer, DimshuffleLayer, \n BidirectionalRecurrentLayer)\nfrom neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff\nfrom neuralnilm.experiment import run_experiment, init_experiment\nfrom neuralnilm.net import TrainingError\nfrom neuralnilm.layers import MixtureDensityLayer\nfrom neuralnilm.objectives import scaled_cost, mdn_nll, scaled_cost_ignore_inactive, ignore_inactive\nfrom neuralnilm.plot import MDNPlotter\n\nfrom lasagne.nonlinearities import sigmoid, rectify, tanh\nfrom lasagne.objectives import mse\nfrom lasagne.init import Uniform, Normal\nfrom lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer, \n ReshapeLayer, FeaturePoolLayer, RecurrentLayer)\nfrom lasagne.updates import nesterov_momentum, momentum\nfrom functools import partial\nimport os\nimport __main__\nfrom copy import deepcopy\nfrom math import sqrt\nimport numpy as np\nimport theano.tensor as T\n\nNAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]\nPATH = \"/homes/dk3810/workspace/python/neuralnilm/figures\"\nSAVE_PLOT_INTERVAL = 250\nGRADIENT_STEPS = 100\nSEQ_LENGTH = 512\n\nsource_dict = dict(\n filename='/data/dk3810/ukdale.h5',\n appliances=[\n ['fridge freezer', 'fridge', 'freezer'], \n 'hair straighteners', \n 'television'\n # 'dish washer',\n # ['washer dryer', 'washing machine']\n ],\n max_appliance_powers=[300, 500, 200, 2500, 2400],\n on_power_thresholds=[5] * 5,\n max_input_power=5900,\n min_on_durations=[60, 60, 60, 1800, 1800],\n min_off_durations=[12, 12, 12, 1800, 600],\n window=(\"2013-06-01\", \"2014-07-01\"),\n seq_length=SEQ_LENGTH,\n output_one_appliance=False,\n boolean_targets=False,\n train_buildings=[1],\n validation_buildings=[1], \n skip_probability=0.0,\n n_seq_per_batch=16,\n subsample_target=4,\n include_diff=False,\n clip_appliance_power=True,\n target_is_prediction=False,\n independently_center_inputs = True,\n standardise_input=True,\n unit_variance_targets=True,\n input_padding=0,\n lag=0,\n reshape_target_to_2D=True,\n input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),\n 'std': np.array([ 0.12636775], dtype=np.float32)},\n target_stats={\n 'mean': np.array([ 0.04066789, 0.01881946, \n 0.24639061, 0.17608672, 0.10273963], \n dtype=np.float32),\n 'std': np.array([ 0.11449792, 0.07338708, \n 0.26608968, 0.33463112, 0.21250485], \n dtype=np.float32)}\n)\n\nN = 50\nnet_dict = dict( \n save_plot_interval=SAVE_PLOT_INTERVAL,\n# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),\n loss_function=lambda x, t: mdn_nll(x, t).mean(),\n# loss_function=lambda x, t: mse(x, t).mean(),\n# loss_function=partial(scaled_cost, loss_func=mse),\n updates_func=momentum,\n learning_rate=5e-3,\n learning_rate_changes_by_iteration={\n 50: 1e-3,\n 200: 5e-4,\n 400: 1e-4\n # 4000: 1e-03,\n # 6000: 5e-06,\n # 7000: 1e-06\n # 2000: 5e-06\n # 3000: 1e-05\n # 7000: 5e-06,\n # 10000: 1e-06,\n # 15000: 5e-07,\n # 50000: 1e-07\n }, \n do_save_activations=True,\n plotter=MDNPlotter\n)\n\ndef exp_a(name):\n # 3 appliances\n global source\n # source_dict_copy = deepcopy(source_dict)\n # source = RealApplianceSource(**source_dict_copy)\n net_dict_copy = deepcopy(net_dict)\n net_dict_copy.update(dict(\n experiment_name=name,\n source=source\n ))\n N = 25\n net_dict_copy['layers_config'] = [\n {\n 'type': BidirectionalRecurrentLayer,\n 'num_units': N,\n 'gradient_steps': GRADIENT_STEPS,\n 'W_in_to_hid': Normal(std=1.),\n 'nonlinearity': tanh\n },\n {\n 'type': FeaturePoolLayer,\n 'ds': 4, # number of feature maps to be pooled together\n 'axis': 1, # pool over the time axis\n 'pool_function': T.max\n },\n {\n 'type': BidirectionalRecurrentLayer,\n 'num_units': N,\n 'gradient_steps': GRADIENT_STEPS,\n 'W_in_to_hid': Normal(std=1/sqrt(N)),\n 'nonlinearity': tanh\n },\n {\n 'type': MixtureDensityLayer,\n 'num_units': source.n_outputs,\n 'num_components': 1,\n 'nonlinearity_mu': T.nnet.softplus\n }\n ]\n net = Net(**net_dict_copy)\n return net\n\n\ndef main():\n # EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')\n EXPERIMENTS = list('a')\n for experiment in EXPERIMENTS:\n full_exp_name = NAME + experiment\n func_call = init_experiment(PATH, experiment, full_exp_name)\n logger = logging.getLogger(full_exp_name)\n try:\n net = eval(func_call)\n run_experiment(net, epochs=None)\n except KeyboardInterrupt:\n logger.info(\"KeyboardInterrupt\")\n break\n except Exception as exception:\n logger.exception(\"Exception\")\n raise\n finally:\n logging.shutdown()\n\n\nif __name__ == \"__main__\":\n main()\n",
"from __future__ import print_function, division\nimport matplotlib\nmatplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!\nfrom neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer\nfrom lasagne.nonlinearities import sigmoid, rectify\nfrom lasagne.objectives import crossentropy, mse\nfrom lasagne.init import Uniform, Normal\nfrom lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer\nfrom lasagne.updates import adagrad, nesterov_momentum\nfrom functools import partial\nimport os\nfrom neuralnilm.source import standardise\nfrom neuralnilm.experiment import run_experiment\nfrom neuralnilm.net import TrainingError\nimport __main__\n\nNAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]\nPATH = \"/homes/dk3810/workspace/python/neuralnilm/figures\"\nSAVE_PLOT_INTERVAL = 250\nGRADIENT_STEPS = 100\n\n\"\"\"\ne103\nDiscovered that bottom layer is hardly changing. So will try\njust a single lstm layer\n\ne104\nstandard init\nlower learning rate\n\ne106\nlower learning rate to 0.001\n\ne108\nis e107 but with batch size of 5\n\ne109\nNormal(1) for LSTM\n\ne110\n* Back to Uniform(5) for LSTM\n* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f\nRESULTS: Seems to run fine again!\n\ne111\n* Try with nntools head\n* peepholes=False\nRESULTS: appears to be working well. Haven't seen a NaN, \neven with training rate of 0.1\n\ne112\n* n_seq_per_batch = 50\n\ne114\n* Trying looking at layer by layer training again.\n* Start with single LSTM layer\n\ne115\n* Learning rate = 1\n\ne116\n* Standard inits\n\ne117\n* Uniform(1) init\n\ne119\n* Learning rate 10\n# Result: didn't work well!\n\ne120\n* init: Normal(1)\n* not as good as Uniform(5)\n\ne121\n* Uniform(25)\n\ne122\n* Just 10 cells\n* Uniform(5)\n\ne125\n* Pre-train lower layers\n\ne128\n* Add back all 5 appliances\n* Seq length 1500\n* skip_prob = 0.7\n\ne129\n* max_input_power = None\n* 2nd layer has Uniform(5)\n* pre-train bottom layer for 2000 epochs\n* add third layer at 4000 epochs\n\ne131\n\n\"\"\"\n\ndef exp_a(name):\n # e130a but no pretraining and 3 appliances but max_input_power is 5900\n # Results: learns something. Still confuses TV for fridge. No aweful though. Appears to train very quickly though (at 250 epochs it's doing about as well as it did at 1750 epochs)\n source = RealApplianceSource(\n filename='/data/dk3810/ukdale.h5',\n appliances=[\n ['fridge freezer', 'fridge', 'freezer'], \n 'hair straighteners', \n 'television'#,\n #'dish washer',\n #['washer dryer', 'washing machine']\n ],\n max_appliance_powers=[300, 500, 200],#, 2500, 2400],\n on_power_thresholds=[5, 5, 5],#, 5, 5],\n max_input_power=5900,\n min_on_durations=[60, 60, 60],#, 1800, 1800],\n min_off_durations=[12, 12, 12],#, 1800, 600],\n window=(\"2013-06-01\", \"2014-07-01\"),\n seq_length=1500,\n output_one_appliance=False,\n boolean_targets=False,\n train_buildings=[1],\n validation_buildings=[1], \n skip_probability=0.7,\n n_seq_per_batch=50\n )\n\n net = Net(\n experiment_name=name,\n source=source,\n save_plot_interval=SAVE_PLOT_INTERVAL,\n loss_function=crossentropy,\n updates=partial(nesterov_momentum, learning_rate=1.0),\n layers_config=[\n {\n 'type': BLSTMLayer,\n 'num_units': 50,\n 'W_in_to_cell': Uniform(25),\n 'gradient_steps': GRADIENT_STEPS,\n 'peepholes': False\n },\n {\n 'type': BLSTMLayer,\n 'num_units': 50,\n 'W_in_to_cell': Uniform(5),\n 'gradient_steps': GRADIENT_STEPS,\n 'peepholes': False\n },\n {\n 'type': DenseLayer,\n 'num_units': source.n_outputs,\n 'nonlinearity': sigmoid\n }\n ]\n )\n return net\n\n\n\ndef exp_b(name):\n # same input as A but e59a's net (plus gradient steps) and batch size of 10\n # hasn't learnt anything useful! Just means.\n source = RealApplianceSource(\n filename='/data/dk3810/ukdale.h5',\n appliances=[\n ['fridge freezer', 'fridge', 'freezer'], \n 'hair straighteners', \n 'television'#,\n #'dish washer',\n #['washer dryer', 'washing machine']\n ],\n max_appliance_powers=[300, 500, 200],#, 2500, 2400],\n on_power_thresholds=[5, 5, 5],#, 5, 5],\n max_input_power=5900,\n min_on_durations=[60, 60, 60],#, 1800, 1800],\n min_off_durations=[12, 12, 12],#, 1800, 600],\n window=(\"2013-06-01\", \"2014-07-01\"),\n seq_length=1500,\n output_one_appliance=False,\n boolean_targets=False,\n train_buildings=[1],\n validation_buildings=[1], \n skip_probability=0.7,\n n_seq_per_batch=10,\n subsample_target=5\n )\n\n net = Net(\n experiment_name=name,\n source=source,\n save_plot_interval=SAVE_PLOT_INTERVAL,\n loss_function=crossentropy,\n updates=partial(nesterov_momentum, learning_rate=1.0),\n layers_config=[\n {\n 'type': DenseLayer,\n 'num_units': 50,\n 'nonlinearity': sigmoid,\n 'W': Uniform(25),\n 'b': Uniform(25)\n },\n {\n 'type': DenseLayer,\n 'num_units': 50,\n 'nonlinearity': sigmoid,\n 'W': Uniform(10),\n 'b': Uniform(10)\n },\n {\n 'type': BLSTMLayer,\n 'num_units': 40,\n 'W_in_to_cell': Uniform(5),\n 'gradient_steps': GRADIENT_STEPS,\n 'peepholes': False\n },\n {\n 'type': DimshuffleLayer,\n 'pattern': (0, 2, 1)\n },\n {\n 'type': Conv1DLayer,\n 'num_filters': 20,\n 'filter_length': 5,\n 'stride': 5,\n 'nonlinearity': sigmoid\n },\n {\n 'type': DimshuffleLayer,\n 'pattern': (0, 2, 1)\n },\n {\n 'type': BLSTMLayer,\n 'num_units': 80,\n 'W_in_to_cell': Uniform(5),\n 'gradient_steps': GRADIENT_STEPS,\n 'peepholes': False\n },\n {\n 'type': DenseLayer,\n 'num_units': source.n_outputs,\n 'nonlinearity': sigmoid\n }\n ]\n )\n return net\n\n\n\ndef exp_c(name):\n # same as B but all 5 appliances\n # probably the best yet for all 5 appliances ;)\n source = RealApplianceSource(\n filename='/data/dk3810/ukdale.h5',\n appliances=[\n ['fridge freezer', 'fridge', 'freezer'], \n 'hair straighteners', \n 'television',\n 'dish washer',\n ['washer dryer', 'washing machine']\n ],\n max_appliance_powers=[300, 500, 200, 2500, 2400],\n on_power_thresholds=[5, 5, 5, 5, 5],\n max_input_power=5900,\n min_on_durations=[60, 60, 60, 1800, 1800],\n min_off_durations=[12, 12, 12, 1800, 600],\n window=(\"2013-06-01\", \"2014-07-01\"),\n seq_length=1500,\n output_one_appliance=False,\n boolean_targets=False,\n train_buildings=[1],\n validation_buildings=[1], \n skip_probability=0.7,\n n_seq_per_batch=10,\n subsample_target=5\n )\n\n net = Net(\n experiment_name=name,\n source=source,\n save_plot_interval=SAVE_PLOT_INTERVAL,\n loss_function=crossentropy,\n updates=partial(nesterov_momentum, learning_rate=1.0),\n layers_config=[\n {\n 'type': DenseLayer,\n 'num_units': 50,\n 'nonlinearity': sigmoid,\n 'W': Uniform(25),\n 'b': Uniform(25)\n },\n {\n 'type': DenseLayer,\n 'num_units': 50,\n 'nonlinearity': sigmoid,\n 'W': Uniform(10),\n 'b': Uniform(10)\n },\n {\n 'type': BLSTMLayer,\n 'num_units': 40,\n 'W_in_to_cell': Uniform(5),\n 'gradient_steps': GRADIENT_STEPS,\n 'peepholes': False\n },\n {\n 'type': DimshuffleLayer,\n 'pattern': (0, 2, 1)\n },\n {\n 'type': Conv1DLayer,\n 'num_filters': 20,\n 'filter_length': 5,\n 'stride': 5,\n 'nonlinearity': sigmoid\n },\n {\n 'type': DimshuffleLayer,\n 'pattern': (0, 2, 1)\n },\n {\n 'type': BLSTMLayer,\n 'num_units': 80,\n 'W_in_to_cell': Uniform(5),\n 'gradient_steps': GRADIENT_STEPS,\n 'peepholes': False\n },\n {\n 'type': DenseLayer,\n 'num_units': source.n_outputs,\n 'nonlinearity': sigmoid\n }\n ]\n )\n return net\n\n\n\ndef exp_d(name):\n # same as C but bool targets\n # NaN after 372\n # Showing some (but little) promise at 250 epochs\n source = RealApplianceSource(\n filename='/data/dk3810/ukdale.h5',\n appliances=[\n ['fridge freezer', 'fridge', 'freezer'], \n 'hair straighteners', \n 'television',\n 'dish washer',\n ['washer dryer', 'washing machine']\n ],\n max_appliance_powers=[300, 500, 200, 2500, 2400],\n on_power_thresholds=[5, 5, 5, 5, 5],\n max_input_power=5900,\n min_on_durations=[60, 60, 60, 1800, 1800],\n min_off_durations=[12, 12, 12, 1800, 600],\n window=(\"2013-06-01\", \"2014-07-01\"),\n seq_length=1500,\n output_one_appliance=False,\n boolean_targets=True,\n train_buildings=[1],\n validation_buildings=[1], \n skip_probability=0.7,\n n_seq_per_batch=10,\n subsample_target=5\n )\n\n net = Net(\n experiment_name=name,\n source=source,\n save_plot_interval=SAVE_PLOT_INTERVAL,\n loss_function=crossentropy,\n updates=partial(nesterov_momentum, learning_rate=1.0),\n layers_config=[\n {\n 'type': DenseLayer,\n 'num_units': 50,\n 'nonlinearity': sigmoid,\n 'W': Uniform(25),\n 'b': Uniform(25)\n },\n {\n 'type': DenseLayer,\n 'num_units': 50,\n 'nonlinearity': sigmoid,\n 'W': Uniform(10),\n 'b': Uniform(10)\n },\n {\n 'type': BLSTMLayer,\n 'num_units': 40,\n 'W_in_to_cell': Uniform(5),\n 'gradient_steps': GRADIENT_STEPS,\n 'peepholes': False\n },\n {\n 'type': DimshuffleLayer,\n 'pattern': (0, 2, 1)\n },\n {\n 'type': Conv1DLayer,\n 'num_filters': 20,\n 'filter_length': 5,\n 'stride': 5,\n 'nonlinearity': sigmoid\n },\n {\n 'type': DimshuffleLayer,\n 'pattern': (0, 2, 1)\n },\n {\n 'type': BLSTMLayer,\n 'num_units': 80,\n 'W_in_to_cell': Uniform(5),\n 'gradient_steps': GRADIENT_STEPS,\n 'peepholes': False\n },\n {\n 'type': DenseLayer,\n 'num_units': source.n_outputs,\n 'nonlinearity': sigmoid\n }\n ]\n )\n return net\n\n\n\n\ndef init_experiment(experiment):\n full_exp_name = NAME + experiment\n func_call = 'exp_{:s}(full_exp_name)'.format(experiment)\n print(\"***********************************\")\n print(\"Preparing\", full_exp_name, \"...\")\n net = eval(func_call)\n return net\n\n\ndef main():\n for experiment in list('abcd'):\n full_exp_name = NAME + experiment\n path = os.path.join(PATH, full_exp_name)\n try:\n net = init_experiment(experiment)\n run_experiment(net, path, epochs=2000)\n except KeyboardInterrupt:\n break\n except TrainingError as e:\n print(\"EXCEPTION:\", e)\n\n\nif __name__ == \"__main__\":\n main()\n",
"from __future__ import print_function, division\nimport matplotlib\nmatplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!\nfrom neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer\nfrom lasagne.nonlinearities import sigmoid, rectify\nfrom lasagne.objectives import crossentropy, mse\nfrom lasagne.init import Uniform, Normal\nfrom lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer\nfrom lasagne.updates import adagrad, nesterov_momentum\nfrom functools import partial\nimport os\nfrom neuralnilm.source import standardise\nfrom neuralnilm.experiment import run_experiment\nfrom neuralnilm.net import TrainingError\nimport __main__\n\nNAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]\nPATH = \"/homes/dk3810/workspace/python/neuralnilm/figures\"\nSAVE_PLOT_INTERVAL = 250\nGRADIENT_STEPS = 100\n\n\"\"\"\ne103\nDiscovered that bottom layer is hardly changing. So will try\njust a single lstm layer\n\ne104\nstandard init\nlower learning rate\n\ne106\nlower learning rate to 0.001\n\ne108\nis e107 but with batch size of 5\n\ne109\nNormal(1) for LSTM\n\ne110\n* Back to Uniform(5) for LSTM\n* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f\nRESULTS: Seems to run fine again!\n\ne111\n* Try with nntools head\n* peepholes=False\nRESULTS: appears to be working well. Haven't seen a NaN, \neven with training rate of 0.1\n\ne112\n* n_seq_per_batch = 50\n\ne114\n* Trying looking at layer by layer training again.\n* Start with single LSTM layer\n\ne115\n* Learning rate = 1\n\ne116\n* Standard inits\n\ne117\n* Uniform(1) init\n\ne119\n* Learning rate 10\n# Result: didn't work well!\n\ne120\n* init: Normal(1)\n* not as good as Uniform(5)\n\ne121\n* Uniform(25)\n\ne122\n* Just 10 cells\n* Uniform(5)\n\ne125\n* Pre-train lower layers\n\"\"\"\n\ndef exp_a(name):\n source = RealApplianceSource(\n filename='/data/dk3810/ukdale.h5',\n appliances=[\n ['fridge freezer', 'fridge', 'freezer'], \n 'hair straighteners', \n 'television'\n # 'dish washer',\n # ['washer dryer', 'washing machine']\n ],\n max_appliance_powers=[300, 500, 200], #, 2500, 2400],\n on_power_thresholds=[20, 20, 20], #, 20, 20],\n max_input_power=1000,\n min_on_durations=[60, 60, 60], #, 1800, 1800],\n window=(\"2013-06-01\", \"2014-07-01\"),\n seq_length=1000,\n output_one_appliance=False,\n boolean_targets=False,\n min_off_duration=60,\n train_buildings=[1],\n validation_buildings=[1], \n skip_probability=0,\n n_seq_per_batch=50\n )\n\n net = Net(\n experiment_name=name,\n source=source,\n save_plot_interval=SAVE_PLOT_INTERVAL,\n loss_function=crossentropy,\n updates=partial(nesterov_momentum, learning_rate=1.0),\n layers_config=[\n {\n 'type': LSTMLayer,\n 'num_units': 50,\n 'W_in_to_cell': Uniform(25),\n 'gradient_steps': GRADIENT_STEPS,\n 'peepholes': False\n },\n {\n 'type': DenseLayer,\n 'num_units': source.n_outputs,\n 'nonlinearity': sigmoid\n }\n ],\n layer_changes={\n 251: {\n 'remove_from': -3,\n 'new_layers':\n [\n {\n 'type': LSTMLayer,\n 'num_units': 50,\n 'W_in_to_cell': Uniform(10),\n 'gradient_steps': GRADIENT_STEPS,\n 'peepholes': False\n },\n {\n 'type': DenseLayer,\n 'num_units': source.n_outputs,\n 'nonlinearity': sigmoid\n }\n ]\n }\n }\n )\n return net\n\n\ndef init_experiment(experiment):\n full_exp_name = NAME + experiment\n func_call = 'exp_{:s}(full_exp_name)'.format(experiment)\n print(\"***********************************\")\n print(\"Preparing\", full_exp_name, \"...\")\n net = eval(func_call)\n return net\n\n\ndef main():\n for experiment in list('a'):\n full_exp_name = NAME + experiment\n path = os.path.join(PATH, full_exp_name)\n try:\n net = init_experiment(experiment)\n run_experiment(net, path, epochs=5000)\n except KeyboardInterrupt:\n break\n except TrainingError as e:\n print(\"EXCEPTION:\", e)\n\n\nif __name__ == \"__main__\":\n main()\n",
"from __future__ import print_function, division\nimport matplotlib\nimport logging\nfrom sys import stdout\nmatplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!\nfrom neuralnilm import (Net, RealApplianceSource,\n BLSTMLayer, DimshuffleLayer,\n BidirectionalRecurrentLayer)\nfrom neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,\n RandomSegments, RandomSegmentsInMemory,\n SameLocation)\nfrom neuralnilm.experiment import run_experiment, init_experiment\nfrom neuralnilm.net import TrainingError\nfrom neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer, \n SharedWeightsDenseLayer)\nfrom neuralnilm.objectives import (scaled_cost, mdn_nll,\n scaled_cost_ignore_inactive, ignore_inactive,\n scaled_cost3)\nfrom neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter, RectangularOutputPlotter, StartEndMeanPlotter\nfrom neuralnilm.updates import clipped_nesterov_momentum\nfrom neuralnilm.disaggregate import disaggregate\nfrom neuralnilm.rectangulariser import rectangularise\n\nfrom lasagne.nonlinearities import sigmoid, rectify, tanh, identity, softmax\nfrom lasagne.objectives import mse, binary_crossentropy\nfrom lasagne.init import Uniform, Normal\nfrom lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,\n ReshapeLayer, FeaturePoolLayer, RecurrentLayer)\nfrom lasagne.updates import nesterov_momentum, momentum\nfrom functools import partial\nimport os\nimport __main__\nfrom copy import deepcopy\nfrom math import sqrt\nimport numpy as np\nimport theano.tensor as T\nimport gc\n\n\"\"\"\n447: first attempt at disaggregation\n\"\"\"\n\nNAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]\n#PATH = \"/homes/dk3810/workspace/python/neuralnilm/figures\"\nPATH = \"/data/dk3810/figures\"\nSAVE_PLOT_INTERVAL = 1000\n\nN_SEQ_PER_BATCH = 64\nN_SEGMENTS = 3\nMAX_TARGET_POWER = 300\n\nsource_dict = dict(\n filename='/data/dk3810/ukdale.h5',\n window=(\"2013-03-18\", None),\n train_buildings=[1],\n validation_buildings=[1],\n n_seq_per_batch=N_SEQ_PER_BATCH,\n standardise_input=True,\n independently_center_inputs=True,\n subsample_target=1,\n ignore_incomplete=True,\n allow_incomplete=False,\n include_all=False,\n skip_probability=0.25,\n offset_probability=0.9,\n target_is_start_and_end_and_mean=True,\n y_processing_func=lambda y: y / MAX_TARGET_POWER\n)\n\n\nnet_dict = dict(\n save_plot_interval=SAVE_PLOT_INTERVAL,\n# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),\n# loss_function=lambda x, t: mdn_nll(x, t).mean(),\n# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),\n# loss_function=lambda x, t: mse(x, t).mean(),\n loss_function=lambda x, t: binary_crossentropy(x, t).mean(),\n# loss_function=partial(scaled_cost, loss_func=mse),\n# loss_function=ignore_inactive,\n# loss_function=partial(scaled_cost3, ignore_inactive=False),\n# updates_func=momentum,\n updates_func=clipped_nesterov_momentum,\n updates_kwargs={'clip_range': (0, 10)},\n learning_rate=1e-1,\n learning_rate_changes_by_iteration={\n 1000: 1e-2,\n 5000: 1e-3\n },\n do_save_activations=True,\n auto_reshape=False,\n# plotter=CentralOutputPlotter\n# plotter=Plotter(n_seq_to_plot=32)\n plotter=StartEndMeanPlotter(n_seq_to_plot=16, max_target_power=MAX_TARGET_POWER)\n)\n\n\ndef exp_a(name, target_appliance, seq_length):\n global source\n source_dict_copy = deepcopy(source_dict)\n source_dict_copy.update(dict(\n target_appliance=target_appliance,\n logger=logging.getLogger(name),\n seq_length=seq_length\n ))\n source = SameLocation(**source_dict_copy)\n net_dict_copy = deepcopy(net_dict)\n net_dict_copy.update(dict(\n experiment_name=name,\n source=source\n ))\n NUM_FILTERS = 16\n target_seq_length = source.output_shape_after_processing()[1]\n net_dict_copy['layers_config'] = [\n {\n 'type': DimshuffleLayer,\n 'pattern': (0, 2, 1) # (batch, features, time)\n },\n {\n 'type': Conv1DLayer, # convolve over the time axis\n 'num_filters': NUM_FILTERS,\n 'filter_size': 4,\n 'stride': 1,\n 'nonlinearity': None,\n 'border_mode': 'valid'\n },\n {\n 'type': DimshuffleLayer,\n 'pattern': (0, 2, 1) # back to (batch, time, features)\n },\n {\n 'type': DenseLayer,\n 'num_units': 512,\n 'nonlinearity': rectify\n },\n {\n 'type': DenseLayer,\n 'num_units': 256,\n 'nonlinearity': rectify\n },\n {\n 'type': DenseLayer,\n 'num_units': 128,\n 'nonlinearity': rectify\n },\n {\n 'type': DenseLayer,\n 'num_units': target_seq_length,\n 'nonlinearity': sigmoid\n }\n ]\n net = Net(**net_dict_copy)\n return net\n\n\ndef main():\n APPLIANCES = [\n ('a', ['fridge freezer', 'fridge'], 512),\n ('b', \"'coffee maker'\", 512),\n ('c', \"'dish washer'\", 2000),\n ('d', \"'hair dryer'\", 256),\n ('e', \"'kettle'\", 256),\n ('f', \"'oven'\", 2000),\n ('g', \"'toaster'\", 256),\n ('h', \"'light'\", 2000),\n ('i', ['washer dryer', 'washing machine'], 800)\n ]\n for experiment, appliance, seq_length in APPLIANCES[:1]:\n full_exp_name = NAME + experiment\n func_call = init_experiment(PATH, 'a', full_exp_name)\n func_call = func_call[:-1] + \", {}, {})\".format(appliance, seq_length)\n logger = logging.getLogger(full_exp_name)\n try:\n net = eval(func_call)\n run_experiment(net, epochs=None)\n except KeyboardInterrupt:\n logger.info(\"KeyboardInterrupt\")\n break\n except Exception as exception:\n logger.exception(\"Exception\")\n # raise\n else:\n del net.source\n del net\n gc.collect()\n finally:\n logging.shutdown()\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\"\"\"\nEmacs variables\nLocal Variables:\ncompile-command: \"cp /home/jack/workspace/python/neuralnilm/scripts/e512.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/\"\nEnd:\n\"\"\"\n"
] | [
[
"matplotlib.use"
],
[
"matplotlib.use"
],
[
"matplotlib.use",
"numpy.array"
],
[
"matplotlib.use"
],
[
"matplotlib.use",
"numpy.array"
],
[
"matplotlib.pyplot.subplots",
"numpy.save",
"numpy.concatenate",
"numpy.load",
"matplotlib.pyplot.show"
],
[
"matplotlib.use"
],
[
"matplotlib.use",
"numpy.array"
],
[
"matplotlib.use"
],
[
"matplotlib.use"
],
[
"matplotlib.use"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Anshnrag02/ML-Templates | [
"de8c871fabdafe79271685ffb40a6abe64c133b2",
"de8c871fabdafe79271685ffb40a6abe64c133b2"
] | [
"classification/Rabdom forest classifier.py",
"reinforcement learning/thompson sampling.py"
] | [
"#importing the libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n\r\n#importing the dataset\r\ndataset = pd.read_csv('Social_Network_Ads.csv')\r\nX = dataset.iloc[:, :-1].values\r\ny = dataset.iloc[:, -1].values\r\n\r\n\r\n\r\n#splitting the dataset into training set and test set\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)\r\n\r\n\r\n#feature scaling\r\nfrom sklearn.preprocessing import StandardScaler\r\nsc = StandardScaler()\r\nX_train = sc.fit_transform(X_train)\r\nX_test = sc.fit_transform(X_test)\r\n\r\n\r\n#training the decision tree classifier model on training set\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nclassifier = RandomForestClassifier(n_estimators=10,criterion='entropy',random_state=0)\r\nclassifier.fit(X_train, y_train)\r\n\r\n\r\n'''#predicting a result\r\nprint(classifier.predict(sc.transform([[30,87000]])))'''\r\n\r\n#predicting test set result\r\ny_pred=classifier.predict(X_test)\r\n\r\n\r\n#making confusion matrix\r\nfrom sklearn.metrics import confusion_matrix,accuracy_score\r\ncm= (confusion_matrix(y_test,y_pred))\r\nacc_sc=accuracy_score(y_test,y_pred)\r\n\r\n\r\n#visualising the training set result\r\nfrom matplotlib.colors import ListedColormap\r\nX_set, y_set = sc.inverse_transform(X_train), y_train\r\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 10, stop = X_set[:, 0].max() + 10, step = 1),\r\n np.arange(start = X_set[:, 1].min() - 1000, stop = X_set[:, 1].max() + 1000, step = 1))\r\nplt.contourf(X1, X2, classifier.predict(sc.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(X1.shape),\r\n alpha = 0.75, cmap = ListedColormap(('red', 'green')))\r\nplt.xlim(X1.min(), X1.max())\r\nplt.ylim(X2.min(), X2.max())\r\nfor i, j in enumerate(np.unique(y_set)):\r\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green'))(i), label = j)\r\nplt.title('K-NN (Training set)')\r\nplt.xlabel('Age')\r\nplt.ylabel('Estimated Salary')\r\nplt.legend()\r\nplt.show()\r\n#visualising the test set result\r\nfrom matplotlib.colors import ListedColormap\r\nX_set, y_set = sc.inverse_transform(X_test), y_test\r\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 10, stop = X_set[:, 0].max() + 10, step = 1),\r\n np.arange(start = X_set[:, 1].min() - 1000, stop = X_set[:, 1].max() + 1000, step = 1))\r\nplt.contourf(X1, X2, classifier.predict(sc.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(X1.shape),\r\n alpha = 0.75, cmap = ListedColormap(('red', 'green')))\r\nplt.xlim(X1.min(), X1.max())\r\nplt.ylim(X2.min(), X2.max())\r\nfor i, j in enumerate(np.unique(y_set)):\r\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green'))(i), label = j)\r\nplt.title('K-NN (Test set)')\r\nplt.xlabel('Age')\r\nplt.ylabel('Estimated Salary')\r\nplt.legend()\r\nplt.show()\r\n",
"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\ndataset = pd.read_csv('Ads_CTR_Optimisation.csv')\r\n\r\n\r\nimport random\r\nN = 10000\r\nd = 10\r\nads_selected = []\r\nnumbers_of_rewards_1 = [0] * d\r\nnumbers_of_rewards_0 = [0] * d\r\ntotal_reward = 0\r\nfor n in range(0, N):\r\n ad = 0\r\n max_random = 0\r\n for i in range(0, d):\r\n random_beta = random.betavariate(numbers_of_rewards_1[i] + 1, numbers_of_rewards_0[i] + 1)\r\n if (random_beta > max_random):\r\n max_random = random_beta\r\n ad = i\r\n ads_selected.append(ad)\r\n reward = dataset.values[n, ad]\r\n if reward == 1:\r\n numbers_of_rewards_1[ad] = numbers_of_rewards_1[ad] + 1\r\n else:\r\n numbers_of_rewards_0[ad] = numbers_of_rewards_0[ad] + 1\r\n total_reward = total_reward + reward\r\nplt.hist(ads_selected)\r\nplt.title('Histogram of ads selections')\r\nplt.xlabel('Ads')\r\nplt.ylabel('Number of times each ad was selected')\r\nplt.show()\r\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"sklearn.ensemble.RandomForestClassifier",
"matplotlib.pyplot.title",
"numpy.unique",
"sklearn.metrics.confusion_matrix",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.ylabel",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.xlabel",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.show",
"sklearn.metrics.accuracy_score"
],
[
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.